From c7ded2fe2fe1c4474d2d62a3af6a69964bdbb785 Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Thu, 17 Feb 2022 10:42:27 +0800 Subject: [PATCH] refactor(imperative): remove unnecessary reverve in small vector GitOrigin-RevId: 85c30bc828a65bc6e626ced97095ee3c68a50c31 --- imperative/src/impl/interpreter/interpreter_impl.cpp | 9 --------- imperative/src/impl/ops/tensor_manip.cpp | 1 - imperative/src/impl/proxy_graph.cpp | 3 --- imperative/src/impl/proxy_graph/mini_graph.h | 5 ----- 4 files changed, 18 deletions(-) diff --git a/imperative/src/impl/interpreter/interpreter_impl.cpp b/imperative/src/impl/interpreter/interpreter_impl.cpp index 738627bb..173ac65b 100644 --- a/imperative/src/impl/interpreter/interpreter_impl.cpp +++ b/imperative/src/impl/interpreter/interpreter_impl.cpp @@ -238,7 +238,6 @@ void ChannelImpl::dispatch_default_cpu( MGB_RECORD_EVENT(ShapeInferEvent, validated); SmallVector input_tensornds; - input_tensornds.reserve(input_descs.size()); CompNode output_cn; { MGB_LOCK_GUARD(m_mutex); @@ -261,9 +260,7 @@ void ChannelImpl::dispatch_default_cpu( } } - outputs->reserve(output_descs.size()); SmallVector output_tensornds; - output_tensornds.reserve(output_descs.size()); for (auto&& desc : output_descs) { // TODO: may conflict with condtake, which need alloc inside mgb_assert(!desc.layout.is_empty()); @@ -290,7 +287,6 @@ void ChannelImpl::dispatch_default_cpu( } SmallVector output_infos; - output_infos.reserve(output_descs.size()); for (auto&& tensornd : output_tensornds) { HostTensorND host_tensornd = HostTensorND::make_proxy(tensornd).proxy_to_comp_node(output_cn); @@ -329,9 +325,6 @@ void ChannelImpl::dispatch_kernel( ApplyOp cmd{Profiler::next_id(), std::move(op)}; cmd.inputs = std::move(input_infos); - cmd.outputs.reserve(output_descs.size()); - outputs->reserve(output_descs.size()); - for (int i = 0; i < output_descs.size(); ++i) { auto&& desc = output_descs[i]; auto info = alloc(); @@ -399,9 +392,7 @@ SmallVector ChannelImpl::apply_op_impl( i); } SmallVector input_infos; - input_infos.reserve(inputs.size()); SmallVector input_descs; - input_descs.reserve(inputs.size()); { MGB_LOCK_GUARD(m_mutex); for (auto i : inputs) { diff --git a/imperative/src/impl/ops/tensor_manip.cpp b/imperative/src/impl/ops/tensor_manip.cpp index ec560303..413e1d8c 100644 --- a/imperative/src/impl/ops/tensor_manip.cpp +++ b/imperative/src/impl/ops/tensor_manip.cpp @@ -87,7 +87,6 @@ void apply_on_device_tensornd( HostTensorND get_var_shape_host_tensor( const OpDef& def, const SmallVector& inputs) { SmallVector input_tensornds; - input_tensornds.reserve(inputs.size()); for (auto&& inp : inputs) { input_tensornds.push_back(inp->dev_tensor()); } diff --git a/imperative/src/impl/proxy_graph.cpp b/imperative/src/impl/proxy_graph.cpp index 4c8aac45..8248c694 100644 --- a/imperative/src/impl/proxy_graph.cpp +++ b/imperative/src/impl/proxy_graph.cpp @@ -232,7 +232,6 @@ public: // fill args for infer_func cg::static_infer::InpVal args{1}; - args.val.reserve(desc->deps.size()); auto push_shape = [&args](const TensorShape* shape) { args.val.emplace_back(); args.val.back().m_shape = shape; @@ -607,8 +606,6 @@ EncodedSubgraph ProxyGraph::make_backward_graph( } // set backward graph inputs - igraph.inputs.reserve(nr_backward_graph_inputs); - result.input_mask.reserve(nr_backward_graph_inputs); auto write_inputs = [&igraph, &var2idx, &result](const VarNodeArray& vars) { for (auto&& i : vars) { auto&& iter = var2idx.find(i); diff --git a/imperative/src/impl/proxy_graph/mini_graph.h b/imperative/src/impl/proxy_graph/mini_graph.h index 69ba85df..a6d58460 100644 --- a/imperative/src/impl/proxy_graph/mini_graph.h +++ b/imperative/src/impl/proxy_graph/mini_graph.h @@ -132,7 +132,6 @@ protected: mgb_assert(!infer_func); infer_func = func; inp_val.val.resize(dep_val.size()); - deps.reserve(dep_val.size()); for (auto&& dep : dep_val) { auto [found, i] = find_index(opr->input(), dep.dest); @@ -253,7 +252,6 @@ public: // fix permuted input: the order of m_opr->input() and vinputs may be // different, input_remap keeps the index map of m_opr->input() and vinputs - input_remap.reserve(m_opr->input().size()); for (auto* v : m_opr->input()) { auto [found, i] = find_index(vinputs, v); mgb_assert(found); @@ -272,7 +270,6 @@ public: } // fix permuted output - output_remap.reserve(ovars.size()); for (auto* v : ovars) { auto [found, i] = find_index(m_opr->output(), v); mgb_assert(found); @@ -784,7 +781,6 @@ public: auto sess = minigraph.infer_session(inputs); std::tuple, bool> ret; auto& [descs, noerr] = ret; - descs.reserve(minigraph.output_size()); for (size_t i = 0; i < minigraph.output_size(); ++i) { descs.emplace_back(); auto& desc = descs.back(); @@ -819,7 +815,6 @@ public: mgb_assert(shape); minigraph.opr()->output()[i]->shape(*shape); } - descs.reserve(minigraph.output_size()); for (size_t i = 0; i < minigraph.output_size(); ++i) { auto* ovar = minigraph.output_var(i); mgb_assert(ovar->dtype().valid() && ovar->comp_node().valid());