Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev optimize std vector #6630

Merged
merged 22 commits into from
Nov 27, 2021
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
2ccda21
use reserve
Flowingsun007 Oct 26, 2021
358f0be
use emplace_back
Flowingsun007 Oct 26, 2021
83a4e4b
refine
Flowingsun007 Oct 26, 2021
7939b67
Merge branch 'master' into dev_optimize_std_vector
Flowingsun007 Oct 29, 2021
2ead89e
merge master
Flowingsun007 Nov 24, 2021
afc924e
Merge branch 'master' into dev_optimize_std_vector
Flowingsun007 Nov 25, 2021
b093e81
Merge branch 'master' into dev_optimize_std_vector
Flowingsun007 Nov 26, 2021
d134a29
refine
Flowingsun007 Nov 26, 2021
1854390
refine
Flowingsun007 Nov 26, 2021
39602e3
rm useless file
Flowingsun007 Nov 26, 2021
cebd86a
refine
Flowingsun007 Nov 26, 2021
347957f
Merge branch 'master' into dev_optimize_std_vector
Flowingsun007 Nov 26, 2021
43b613b
Merge branch 'master' into dev_optimize_std_vector
oneflow-ci-bot Nov 26, 2021
6a4279a
clang tidy add use-emplace check
Flowingsun007 Nov 26, 2021
611e82b
Merge branch 'dev_optimize_std_vector' of github.com:Oneflow-Inc/onef…
Flowingsun007 Nov 26, 2021
76d7b70
fix ci fail
Flowingsun007 Nov 26, 2021
d10bf77
Merge branch 'master' into dev_optimize_std_vector
oneflow-ci-bot Nov 26, 2021
b9b2181
refine
Flowingsun007 Nov 26, 2021
7182dac
Merge branch 'dev_optimize_std_vector' of github.com:Oneflow-Inc/onef…
Flowingsun007 Nov 26, 2021
130dbb2
format
Flowingsun007 Nov 26, 2021
b756f98
Merge branch 'master' into dev_optimize_std_vector
Flowingsun007 Nov 26, 2021
ed57e1a
Merge branch 'master' into dev_optimize_std_vector
oneflow-ci-bot Nov 26, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion oneflow/api/python/framework/tensor_tuple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ struct TensorTupleUtil final {

static void AppendTensor(std::shared_ptr<TensorTuple>& tensor_tuple,
const std::shared_ptr<Tensor>& tensor) {
tensor_tuple->push_back(tensor);
tensor_tuple->emplace_back(tensor);
}
};

Expand Down
2 changes: 1 addition & 1 deletion oneflow/api/python/functional/indexing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ Maybe<Shape> InferArraySizes(PyObject* object) {
while (PySequence_Check(seq)) {
int64_t length = PySequence_Length(seq);
CHECK_GT_OR_RETURN(length, 0) << "Index should not be empty.";
sizes.push_back(length);
sizes.emplace_back(length);
CHECK_LE_OR_RETURN(sizes.size(), /*MAX_DIMS=*/128)
<< "Too many dimensions " << Py_TYPE(seq)->tp_name;
if (length == 0) break;
Expand Down
4 changes: 2 additions & 2 deletions oneflow/api/python/utils/tensor_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ MaybeGetTensorBufferShapesAndDTypes(const std::shared_ptr<Tensor>& t) {
const auto* tensor_buffer_ptr = blob.dptr<TensorBuffer>();
for (int64_t i = 0; i < blob_shape.elem_cnt(); ++i) {
const TensorBuffer* tensor_buffer = tensor_buffer_ptr + i;
shapes.push_back(tensor_buffer->shape());
dtypes.push_back(DType::Get(tensor_buffer->data_type()).GetOrThrow());
shapes.emplace_back(tensor_buffer->shape());
dtypes.emplace_back(DType::Get(tensor_buffer->data_type()).GetOrThrow());
}
return std::make_tuple(shapes, dtypes);
}
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/autograd/autograd_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ GraphTask::GraphTask(const TensorTuple& outputs, bool retain_graph, bool create_
roots_.reserve(outputs.size());
for (const auto& out_tensor : outputs) {
FunctionNode* node = out_tensor->mut_grad_fn_node().get();
roots_.push_back(node);
roots_.emplace_back(node);
dependencies_.insert(std::make_pair(node, 0));
}
}
Expand Down
1 change: 1 addition & 0 deletions oneflow/core/autograd/autograd_meta.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ Maybe<const std::vector<Symbol<cfg::SbpParallel>>&> GetSbpTuple(Symbol<cfg::NdSb
auto iter = map.find(nd_sbp);
if (iter == map.end()) {
std::vector<Symbol<cfg::SbpParallel>> sbp_tuple;
sbp_tuple.reserve(nd_sbp->sbp_parallel().size());
for (const auto& sbp_parallel : nd_sbp->sbp_parallel()) {
sbp_tuple.push_back(SymbolOf(sbp_parallel));
}
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/autograd/autograd_meta.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class AutogradMeta final {
void set_requires_grad(bool requires_grad) { requires_grad_ = requires_grad; }
void set_retain_grad(bool retain_grad) { retain_grad_ = retain_grad; }
void set_is_leaf(bool is_leaf) { is_leaf_ = is_leaf; }
void add_hook(const Hook& hook) { hooks_.push_back(hook); }
void add_hook(const Hook& hook) { hooks_.emplace_back(hook); }

private:
bool is_leaf_;
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/autograd/gradient_funcs/bias_add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class BiasAdd : public OpExprGradFunction<BiasAddCaptureState> {
std::vector<int32_t> reduce_axes_vec;
reduce_axes_vec.reserve(num_axes);
for (int i = 0; i < num_axes; ++i) {
if (i != ctx->axis) { reduce_axes_vec.push_back(i); }
if (i != ctx->axis) { reduce_axes_vec.emplace_back(i); }
}
if (ctx->bias_requires_grad) {
in_grads->at(1) = JUST(functional::ReduceSum(out_grads.at(0), reduce_axes_vec, false));
Expand Down
6 changes: 3 additions & 3 deletions oneflow/core/autograd/gradient_funcs/deconv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,9 @@ Maybe<void> DeConvolutionNd::Apply(const DeConvolutionNdCaptureState* ctx,
const auto& x = ctx->SavedTensors().at(1);
std::vector<int64_t> start, stop, step;
for (int i = 0; i < x->shape()->NumAxes(); i++) {
start.push_back(0);
stop.push_back(x->shape()->At(i));
step.push_back(1);
start.emplace_back(0);
stop.emplace_back(x->shape()->At(i));
step.emplace_back(1);
}
const auto& weight = ctx->SavedTensors().at(0);
if (ctx->ndims == 1) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ Maybe<void> FusedBiasAddDropout::Apply(const FusedBiasAddDropoutInterpState* ctx
std::vector<int32_t> reduce_axes_vec;
reduce_axes_vec.reserve(num_axes);
for (int i = 0; i < num_axes; ++i) {
if (i != ctx->axis) { reduce_axes_vec.push_back(i); }
if (i != ctx->axis) { reduce_axes_vec.emplace_back(i); }
}
in_grads->at(1) = JUST(functional::ReduceSum(dropout_grad, reduce_axes_vec, false));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class FusedBiasAddGelu : public OpExprGradFunction<FusedBiasAddGeluInterpState>
std::vector<int32_t> reduce_axes_vec;
reduce_axes_vec.reserve(num_axes);
for (int i = 0; i < num_axes; ++i) {
if (i != ctx->axis) { reduce_axes_vec.push_back(i); }
if (i != ctx->axis) { reduce_axes_vec.emplace_back(i); }
}
in_grads->at(1) =
JUST(functional::ReduceSum(fused_bias_add_gelu_grad, reduce_axes_vec, false));
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/autograd/gradient_funcs/normalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,9 +139,9 @@ class NormalizationGrad : public OpExprGradFunction<NormalizationGradCaptureStat
DimVector dim_vec;
for (int i = 0; i < x->shape()->NumAxes(); ++i) {
if (i != ctx->axis) {
dim_vec.push_back(1);
dim_vec.emplace_back(1);
} else {
dim_vec.push_back(x->shape()->At(ctx->axis));
dim_vec.emplace_back(x->shape()->At(ctx->axis));
}
}
Shape shape(dim_vec);
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/autograd/gradient_funcs/split_like.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,10 @@ Maybe<void> SplitLike::Apply(const SplitLikeCaptureState* ctx, const TensorTuple
for (int i = 0; i < out_grads.size(); ++i) {
const auto& out_grad_i = out_grads.at(i);
if (out_grad_i.get()) {
inputs.push_back(out_grad_i);
inputs.emplace_back(out_grad_i);
} else {
const auto& zero_grad = JUST(functional::ZerosLike(saved_tensors.at(i)));
inputs.push_back(zero_grad);
inputs.emplace_back(zero_grad);
}
}
in_grads->at(0) = JUST(functional::Concat(inputs, ctx->axis));
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/comm_network/comm_network.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,11 @@ void CommNet::AddWorkToStream(void* actor_read_id, const std::function<void()>&
ready_cbs_.Send(cb);
} else {
CommNetItem work_item(is_read, cb);
actor_read_ctx->waiting_list.push_back(work_item);
actor_read_ctx->waiting_list.emplace_back(work_item);
}
if (is_read) {
CommNetItem empty_cb;
actor_read_ctx->waiting_list.push_back(empty_cb);
actor_read_ctx->waiting_list.emplace_back(empty_cb);
}
}

Expand Down
8 changes: 4 additions & 4 deletions oneflow/core/common/channel_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,14 @@ TEST(Channel, 30sender40receiver) {
std::vector<std::vector<int>> visits;
for (int i = 0; i < receiver_num; ++i) {
std::vector<int> visit_i;
for (int j = 0; j < range_num; j++) { visit_i.push_back(0); }
visits.push_back(visit_i);
for (int j = 0; j < range_num; j++) { visit_i.emplace_back(0); }
visits.emplace_back(visit_i);
}
for (int i = 0; i < sender_num; ++i) {
senders.push_back(std::thread(CallFromSenderThread, &channel, Range(0, range_num)));
senders.emplace_back(CallFromSenderThread, &channel, Range(0, range_num));
}
for (int i = 0; i < receiver_num; ++i) {
receivers.push_back(std::thread(CallFromReceiverThread, &visits[i], &channel));
receivers.emplace_back(CallFromReceiverThread, &visits[i], &channel);
}
for (std::thread& this_thread : senders) { this_thread.join(); }
channel.Close();
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/common/fixed_vector.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ class fixed_vector final {

void push_back(const T& value) { insert(end(), value); }
void push_back(T&& value) { insert(end(), std::move(value)); }

void emplace_back(const T& value) { insert(end(), value); }
template<class... Args>
void emplace_back(Args&&... args) {
insert(end(), std::forward<Args>(args)...);
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/common/fixed_vector_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -306,15 +306,15 @@ TEST(fixed_vector, erase_1) {
TEST(fixed_vector, push_back_0) {
std::vector<int> vec{0, 1, 2, 3};
FixedVec a{0, 1, 2};
a.push_back(3);
a.emplace_back(3);
ASSERT_TRUE(std::equal(a.begin(), a.end(), vec.begin()));
}

TEST(fixed_vector, push_back_1) {
std::vector<int> vec{0, 1, 2, 3};
FixedVec a{0, 1, 2};
int three = 3;
a.push_back(std::move(three));
a.emplace_back(std::move(three));
ASSERT_TRUE(std::equal(a.begin(), a.end(), vec.begin()));
}

Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/common/flat_shape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ Maybe<Shape> FlatShape::ToShape() const {

Maybe<void> FlatShape::ToShape(Shape* shape) const {
DimVector dim_vec;
for (int i = 0; i < this->dim_size(); ++i) { dim_vec.push_back(this->dim(i)); }
for (int i = 0; i < this->dim_size(); ++i) { dim_vec.emplace_back(this->dim(i)); }
*shape = Shape(dim_vec);
return Maybe<void>::Ok();
}
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/common/optional_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ namespace test {
TEST(Optional, copy_constructor) {
Optional<int64_t> a(0);
std::vector<Optional<int64_t>> vec;
vec.push_back(a);
vec.emplace_back(a);
ASSERT_TRUE(vec[0].has_value());
int64_t val = CHECK_JUST(vec[0]);
ASSERT_EQ(val, 0);
Expand Down
6 changes: 3 additions & 3 deletions oneflow/core/common/shape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ Shape Shape::RemoveOnes(const AxisVector& axis_vec) const {
const AxisVector& axis_vec_shifted = ShiftNegativeAxisVec(axis_vec);
for (int64_t i = 0; i < this->dim_vec().size(); i++) {
if (std::find(axis_vec_shifted.begin(), axis_vec_shifted.end(), i) == axis_vec_shifted.end()) {
dim_vec.push_back(this->dim_vec().at(i));
dim_vec.emplace_back(this->dim_vec().at(i));
} else {
CHECK_EQ(this->dim_vec().at(i), 1);
}
Expand All @@ -181,7 +181,7 @@ AxisVector Shape::Axes4BroadcastTo(const Shape& broadcast_shape) const {
CHECK_EQ(broadcast_shape.NumAxes(), NumAxes());
for (int64_t i = 0; i < NumAxes(); i++) {
if (this->dim_vec().at(i) != broadcast_shape.dim_vec().at(i) && this->dim_vec().at(i) == 1) {
broadcast_axis_vec.push_back(i);
broadcast_axis_vec.emplace_back(i);
} else {
CHECK_EQ(this->dim_vec().at(i), broadcast_shape.dim_vec().at(i));
}
Expand All @@ -204,7 +204,7 @@ Maybe<Shape> Shape::Slice(int64_t start_dim, int64_t end_dim) const {
if (start_dim > ndims) { start_dim = ndims; }
if (end_dim > ndims) { end_dim = ndims; }
DimVector dim_vec;
for (int64_t i = start_dim; i < end_dim && i < ndims; ++i) { dim_vec.push_back(this->At(i)); }
for (int64_t i = start_dim; i < end_dim && i < ndims; ++i) { dim_vec.emplace_back(this->At(i)); }
return std::make_shared<Shape>(dim_vec);
}

Expand Down
5 changes: 3 additions & 2 deletions oneflow/core/control/ctrl_bootstrap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,21 +26,22 @@ namespace oneflow {

Maybe<void> CtrlBootstrap::InitProcessCtx(int64_t port, ProcessCtx* ret_process_ctx) {
std::vector<WorkerProcessInfo> worker_process_info_list;
worker_process_info_list.reserve(world_size());
if (rank() == 0) {
WorkerProcessInfo worker_process_info;
{
worker_process_info.set_rank(rank());
worker_process_info.set_port(port);
JUST(SetCurrentHostByMaster(&worker_process_info));
}
worker_process_info_list.push_back(worker_process_info);
worker_process_info_list.emplace_back(worker_process_info);
for (int64_t world_rank = 1; world_rank < world_size(); ++world_rank) {
std::string key = std::string("GetWorkerProcessInfo") + std::to_string(world_rank);
WorkerProcessInfo cur_work_process_info;
mut_bootstrap_client()->PullMasterKV(key, &cur_work_process_info);
CHECK_EQ_OR_RETURN(world_rank, worker_process_info_list.size());
CHECK_EQ_OR_RETURN(world_rank, cur_work_process_info.rank());
worker_process_info_list.push_back(cur_work_process_info);
worker_process_info_list.emplace_back(cur_work_process_info);
}
} else {
std::string key = std::string("GetWorkerProcessInfo") + std::to_string(rank());
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/control/host_list_bootstrap_client.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ HostListBootstrapClient::HostListBootstrapClient(const EnvDesc& env_desc) {
const Machine& mchn = env_desc.machine(i);
port = (mchn.ctrl_port_agent() != -1) ? (mchn.ctrl_port_agent()) : env_desc.ctrl_port();
addr = mchn.addr() + ":" + std::to_string(port);
stubs_.push_back(CtrlService::NewStub(addr));
stubs_.emplace_back(CtrlService::NewStub(addr));
LoadServer(mchn.addr(), stubs_[i].get());
}
}
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/control/rank_info_bootstrap_client.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ RankInfoBootstrapClient::RankInfoBootstrapClient(const BootstrapConf& bootstrap_
stubs_.reserve(bootstrap_conf.world_size());
const auto& master_addr = bootstrap_conf.master_addr();
const std::string& host = master_addr.host() + ":" + std::to_string(master_addr.port());
stubs_.push_back(CtrlService::NewStub(host));
stubs_.emplace_back(CtrlService::NewStub(host));
LoadServerRequest request;
request.set_addr(master_addr.host());
request.set_rank(bootstrap_conf.rank());
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/control/rpc_client.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class RpcClient {
CtrlService::Stub* GetStubAt(int64_t i) { return stubs_[i].get(); };
size_t GetStubSize() { return stubs_.size(); };
void ReserveStubsOfSize(int64_t n) { stubs_.reserve(n); };
void AddStub(std::unique_ptr<CtrlService::Stub> s) { stubs_.push_back(std::move(s)); };
void AddStub(std::unique_ptr<CtrlService::Stub> s) { stubs_.emplace_back(std::move(s)); };

std::vector<std::unique_ptr<CtrlService::Stub>> stubs_;
std::mutex done_names_mtx_;
Expand Down
6 changes: 3 additions & 3 deletions oneflow/core/control/rpc_server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ void RpcServer::Init() {
.first;
}
CHECK_EQ(barrier_num, barrier_call_it->second.second) << barrier_name;
barrier_call_it->second.first.push_back(call);
barrier_call_it->second.first.emplace_back(call);
if (barrier_call_it->second.first.size() == barrier_call_it->second.second) {
for (CtrlCallIf* pending_call : barrier_call_it->second.first) {
pending_call->SendResponse();
Expand Down Expand Up @@ -121,7 +121,7 @@ void RpcServer::Init() {
void* lock_status = name2lock_status_.at(lock_name);
if (lock_status) {
auto waiting_calls = static_cast<std::list<CtrlCallIf*>*>(lock_status);
waiting_calls->push_back(call);
waiting_calls->emplace_back(call);
} else {
call->SendResponse();
}
Expand Down Expand Up @@ -160,7 +160,7 @@ void RpcServer::Init() {
call->mut_response()->set_val(kv_it->second);
call->SendResponse();
} else {
pending_kv_calls_[k].push_back(call);
pending_kv_calls_[k].emplace_back(call);
}
EnqueueRequest<CtrlMethod::kPullKV>();
});
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/device/cuda_device_descriptor_class.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class CudaDeviceDescriptorClass : public DeviceDescriptorClass {
CHECK(cuda_device);
std::string serialized_device;
cuda_device->Serialize(&serialized_device);
serialized_devices.push_back(std::move(serialized_device));
serialized_devices.emplace_back(std::move(serialized_device));
}
nlohmann::json json_object;
json_object[kJsonKeyDevices] = serialized_devices;
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/device/cudnn_conv_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ perf_t CudnnConvAlgoGetOrInfer(const CudnnConvParams& params,
if (perf_it != key_it->second.cend()) { return perf_it->second; }
}
perf_t perf = InferFn(p);
(*store)[params_without_ws].push_back(std::make_pair(p.max_ws_size, perf));
(*store)[params_without_ws].emplace_back(std::make_pair(p.max_ws_size, perf));
return perf;
};
return ThreadLocalCachedCall(cache_size, InferWithCache, params);
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/device/net_ib_device_descriptor_class.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class NetIBDeviceDescriptorClass : public DeviceDescriptorClass {
for (int port = 1; port <= device_attr.phys_port_cnt; ++port) {
auto device_desc =
NetIBDeviceDescriptor::Query(static_cast<int32_t>(devices.size()), context, port);
if (device_desc) { devices.push_back(device_desc); }
if (device_desc) { devices.emplace_back(device_desc); }
}
}
ibv::wrapper.ibv_free_device_list(device_list);
Expand All @@ -76,7 +76,7 @@ class NetIBDeviceDescriptorClass : public DeviceDescriptorClass {
CHECK(ib_device);
std::string serialized_device;
ib_device->Serialize(&serialized_device);
serialized_devices.push_back(std::move(serialized_device));
serialized_devices.emplace_back(std::move(serialized_device));
}
nlohmann::json json_object;
json_object[kJsonKeyDevices] = serialized_devices;
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/device/net_socket_device_descriptor_class.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class NetSocketDeviceDescriptorClass : public DeviceDescriptorClass {
}
auto socket_device =
NetSocketDeviceDescriptor::Query(static_cast<int32_t>(devices.size()), name, host);
if (socket_device) { devices.push_back(socket_device); }
if (socket_device) { devices.emplace_back(socket_device); }
}
freeifaddrs(interfaces);
return std::make_shared<const BasicDeviceDescriptorList>(
Expand All @@ -90,7 +90,7 @@ class NetSocketDeviceDescriptorClass : public DeviceDescriptorClass {
CHECK(socket_device);
std::string serialized_device;
socket_device->Serialize(&serialized_device);
serialized_devices.push_back(std::move(serialized_device));
serialized_devices.emplace_back(std::move(serialized_device));
}
nlohmann::json json_object;
json_object[kJsonKeyDevices] = serialized_devices;
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/framework/arg_tuple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,15 +38,15 @@ void InitArgName2BnIndex2TensorTupleIndex(
// vector is auto created by [] if arg_name doesn't exist in map
auto* bn_index2tensor_tuple_index = &(*arg_name2bn_index2tensor_tuple_index)[arg_name];
CHECK_EQ(bn_index2tensor_tuple_index->size(), bn_index);
bn_index2tensor_tuple_index->push_back(i);
bn_index2tensor_tuple_index->emplace_back(i);
}
}

} // namespace

ArgTuple::ArgTuple(const std::vector<std::string>& indexed_bns) : indexed_bns_(indexed_bns) {
indexed_arg_name_and_index_.reserve(indexed_bns.size());
for (const auto& bn : indexed_bns) { indexed_arg_name_and_index_.push_back(GetPair(bn)); }
for (const auto& bn : indexed_bns) { indexed_arg_name_and_index_.emplace_back(GetPair(bn)); }
InitArgName2BnIndex2TensorTupleIndex(indexed_arg_name_and_index_,
&arg_name2bn_index2tensor_tuple_index_);
for (int i = 0; i < indexed_bns.size(); ++i) {
Expand Down
1 change: 1 addition & 0 deletions oneflow/core/framework/device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@ std::string Device::ToString() const {
Maybe<Symbol<Device>> Device::MakeDeviceByParallelDesc(const ParallelDesc& parallel_desc) {
std::string type = Type4DeviceTag(parallel_desc.device_tag());
std::vector<std::string> machine_device_ids;
machine_device_ids.reserve(parallel_desc.parallel_conf().device_name().size());
for (const auto& item : parallel_desc.parallel_conf().device_name()) {
machine_device_ids.emplace_back(item);
}
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/framework/instruction_replay.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void EndRecordingInstructions() { *RecordingInstructionsFlag() = false; }
void ClearRecordedInstructions() { RecordedInstructionList()->clear(); }

void RecordInstruction(const intrusive::shared_ptr<vm::InstructionMsg>& instruction) {
RecordedInstructionList()->push_back(instruction);
RecordedInstructionList()->emplace_back(instruction);
}

void ReplayInstructions() {
Expand Down
Loading