GitOrigin-RevId: 85c30bc828
tags/v1.9.0
@@ -238,7 +238,6 @@ void ChannelImpl::dispatch_default_cpu( | |||||
MGB_RECORD_EVENT(ShapeInferEvent, validated); | MGB_RECORD_EVENT(ShapeInferEvent, validated); | ||||
SmallVector<DeviceTensorND> input_tensornds; | SmallVector<DeviceTensorND> input_tensornds; | ||||
input_tensornds.reserve(input_descs.size()); | |||||
CompNode output_cn; | CompNode output_cn; | ||||
{ | { | ||||
MGB_LOCK_GUARD(m_mutex); | MGB_LOCK_GUARD(m_mutex); | ||||
@@ -261,9 +260,7 @@ void ChannelImpl::dispatch_default_cpu( | |||||
} | } | ||||
} | } | ||||
outputs->reserve(output_descs.size()); | |||||
SmallVector<DeviceTensorND> output_tensornds; | SmallVector<DeviceTensorND> output_tensornds; | ||||
output_tensornds.reserve(output_descs.size()); | |||||
for (auto&& desc : output_descs) { | for (auto&& desc : output_descs) { | ||||
// TODO: may conflict with condtake, which need alloc inside | // TODO: may conflict with condtake, which need alloc inside | ||||
mgb_assert(!desc.layout.is_empty()); | mgb_assert(!desc.layout.is_empty()); | ||||
@@ -290,7 +287,6 @@ void ChannelImpl::dispatch_default_cpu( | |||||
} | } | ||||
SmallVector<TensorInfo*> output_infos; | SmallVector<TensorInfo*> output_infos; | ||||
output_infos.reserve(output_descs.size()); | |||||
for (auto&& tensornd : output_tensornds) { | for (auto&& tensornd : output_tensornds) { | ||||
HostTensorND host_tensornd = | HostTensorND host_tensornd = | ||||
HostTensorND::make_proxy(tensornd).proxy_to_comp_node(output_cn); | HostTensorND::make_proxy(tensornd).proxy_to_comp_node(output_cn); | ||||
@@ -329,9 +325,6 @@ void ChannelImpl::dispatch_kernel( | |||||
ApplyOp cmd{Profiler::next_id(), std::move(op)}; | ApplyOp cmd{Profiler::next_id(), std::move(op)}; | ||||
cmd.inputs = std::move(input_infos); | cmd.inputs = std::move(input_infos); | ||||
cmd.outputs.reserve(output_descs.size()); | |||||
outputs->reserve(output_descs.size()); | |||||
for (int i = 0; i < output_descs.size(); ++i) { | for (int i = 0; i < output_descs.size(); ++i) { | ||||
auto&& desc = output_descs[i]; | auto&& desc = output_descs[i]; | ||||
auto info = alloc(); | auto info = alloc(); | ||||
@@ -399,9 +392,7 @@ SmallVector<Handle> ChannelImpl::apply_op_impl( | |||||
i); | i); | ||||
} | } | ||||
SmallVector<TensorInfo*> input_infos; | SmallVector<TensorInfo*> input_infos; | ||||
input_infos.reserve(inputs.size()); | |||||
SmallVector<LogicalTensorDesc> input_descs; | SmallVector<LogicalTensorDesc> input_descs; | ||||
input_descs.reserve(inputs.size()); | |||||
{ | { | ||||
MGB_LOCK_GUARD(m_mutex); | MGB_LOCK_GUARD(m_mutex); | ||||
for (auto i : inputs) { | for (auto i : inputs) { | ||||
@@ -87,7 +87,6 @@ void apply_on_device_tensornd( | |||||
HostTensorND get_var_shape_host_tensor( | HostTensorND get_var_shape_host_tensor( | ||||
const OpDef& def, const SmallVector<TensorPtr>& inputs) { | const OpDef& def, const SmallVector<TensorPtr>& inputs) { | ||||
SmallVector<DeviceTensorND> input_tensornds; | SmallVector<DeviceTensorND> input_tensornds; | ||||
input_tensornds.reserve(inputs.size()); | |||||
for (auto&& inp : inputs) { | for (auto&& inp : inputs) { | ||||
input_tensornds.push_back(inp->dev_tensor()); | input_tensornds.push_back(inp->dev_tensor()); | ||||
} | } | ||||
@@ -232,7 +232,6 @@ public: | |||||
// fill args for infer_func | // fill args for infer_func | ||||
cg::static_infer::InpVal args{1}; | cg::static_infer::InpVal args{1}; | ||||
args.val.reserve(desc->deps.size()); | |||||
auto push_shape = [&args](const TensorShape* shape) { | auto push_shape = [&args](const TensorShape* shape) { | ||||
args.val.emplace_back(); | args.val.emplace_back(); | ||||
args.val.back().m_shape = shape; | args.val.back().m_shape = shape; | ||||
@@ -607,8 +606,6 @@ EncodedSubgraph ProxyGraph::make_backward_graph( | |||||
} | } | ||||
// set backward graph inputs | // set backward graph inputs | ||||
igraph.inputs.reserve(nr_backward_graph_inputs); | |||||
result.input_mask.reserve(nr_backward_graph_inputs); | |||||
auto write_inputs = [&igraph, &var2idx, &result](const VarNodeArray& vars) { | auto write_inputs = [&igraph, &var2idx, &result](const VarNodeArray& vars) { | ||||
for (auto&& i : vars) { | for (auto&& i : vars) { | ||||
auto&& iter = var2idx.find(i); | auto&& iter = var2idx.find(i); | ||||
@@ -132,7 +132,6 @@ protected: | |||||
mgb_assert(!infer_func); | mgb_assert(!infer_func); | ||||
infer_func = func; | infer_func = func; | ||||
inp_val.val.resize(dep_val.size()); | inp_val.val.resize(dep_val.size()); | ||||
deps.reserve(dep_val.size()); | |||||
for (auto&& dep : dep_val) { | for (auto&& dep : dep_val) { | ||||
auto [found, i] = find_index(opr->input(), dep.dest); | auto [found, i] = find_index(opr->input(), dep.dest); | ||||
@@ -253,7 +252,6 @@ public: | |||||
// fix permuted input: the order of m_opr->input() and vinputs may be | // fix permuted input: the order of m_opr->input() and vinputs may be | ||||
// different, input_remap keeps the index map of m_opr->input() and vinputs | // different, input_remap keeps the index map of m_opr->input() and vinputs | ||||
input_remap.reserve(m_opr->input().size()); | |||||
for (auto* v : m_opr->input()) { | for (auto* v : m_opr->input()) { | ||||
auto [found, i] = find_index(vinputs, v); | auto [found, i] = find_index(vinputs, v); | ||||
mgb_assert(found); | mgb_assert(found); | ||||
@@ -272,7 +270,6 @@ public: | |||||
} | } | ||||
// fix permuted output | // fix permuted output | ||||
output_remap.reserve(ovars.size()); | |||||
for (auto* v : ovars) { | for (auto* v : ovars) { | ||||
auto [found, i] = find_index(m_opr->output(), v); | auto [found, i] = find_index(m_opr->output(), v); | ||||
mgb_assert(found); | mgb_assert(found); | ||||
@@ -784,7 +781,6 @@ public: | |||||
auto sess = minigraph.infer_session(inputs); | auto sess = minigraph.infer_session(inputs); | ||||
std::tuple<SmallVector<LogicalTensorDesc>, bool> ret; | std::tuple<SmallVector<LogicalTensorDesc>, bool> ret; | ||||
auto& [descs, noerr] = ret; | auto& [descs, noerr] = ret; | ||||
descs.reserve(minigraph.output_size()); | |||||
for (size_t i = 0; i < minigraph.output_size(); ++i) { | for (size_t i = 0; i < minigraph.output_size(); ++i) { | ||||
descs.emplace_back(); | descs.emplace_back(); | ||||
auto& desc = descs.back(); | auto& desc = descs.back(); | ||||
@@ -819,7 +815,6 @@ public: | |||||
mgb_assert(shape); | mgb_assert(shape); | ||||
minigraph.opr()->output()[i]->shape(*shape); | minigraph.opr()->output()[i]->shape(*shape); | ||||
} | } | ||||
descs.reserve(minigraph.output_size()); | |||||
for (size_t i = 0; i < minigraph.output_size(); ++i) { | for (size_t i = 0; i < minigraph.output_size(); ++i) { | ||||
auto* ovar = minigraph.output_var(i); | auto* ovar = minigraph.output_var(i); | ||||
mgb_assert(ovar->dtype().valid() && ovar->comp_node().valid()); | mgb_assert(ovar->dtype().valid() && ovar->comp_node().valid()); | ||||