diff --git a/src/jit/impl/fusion_pass.cpp b/src/jit/impl/fusion_pass.cpp index 4a606711..1887b938 100644 --- a/src/jit/impl/fusion_pass.cpp +++ b/src/jit/impl/fusion_pass.cpp @@ -33,16 +33,16 @@ class JITFusionPass::Impl final { CompNode::UnorderedMap m_cn2max_nr_input; SubGraph::Rewriter m_rewriter; - SmallVector> m_igraph_gen_storage; - ThinHashMap m_var2igraph_gen; + SmallVector> m_igraph_gen_storage; + ThinHashMap m_var2igraph_gen; //! map from var to its reader oprs and the corresponding dependency types ThinHashMap>> m_var_readers; ThinHashSet m_endpoint_set; - //! create a new InternalGraphGenrator rooted at given opr - InternalGraphGenrator* create_new_igraph_gen(OperatorNodeBase* opr); + //! create a new InternalGraphGenerator rooted at given opr + InternalGraphGenerator* create_new_igraph_gen(OperatorNodeBase* opr); //! process a single operator, maintaining m_var2igraph_gen void process_opr(OperatorNodeBase* opr); @@ -51,11 +51,11 @@ class JITFusionPass::Impl final { //! check whether all oprs which depend on the var are in i_graph bool test_all_readers_in_the_graph(VarNode* var, - InternalGraphGenrator* i_graph); + InternalGraphGenerator* i_graph); //! check shape to determine whether the opr should be added to the internal //! graph - bool check_shape(cg::OperatorNodeBase* opr, InternalGraphGenrator* i_graph); + bool check_shape(cg::OperatorNodeBase* opr, InternalGraphGenerator* i_graph); //! use m_rewriter to update graph void update_graph(); @@ -155,7 +155,7 @@ void JITFusionPass::Impl::update_graph() { } bool JITFusionPass::Impl::test_all_readers_in_the_graph( - VarNode* var, InternalGraphGenrator* ig_gen) { + VarNode* var, InternalGraphGenerator* ig_gen) { for (auto&& reader : m_var_readers.at(var)) { if (reader.second & DepType::DEV_VALUE) { if (ig_gen->opr_set().count(reader.first) == 0) { @@ -167,7 +167,7 @@ bool JITFusionPass::Impl::test_all_readers_in_the_graph( } bool JITFusionPass::Impl::check_shape(cg::OperatorNodeBase* opr, - InternalGraphGenrator* ig_gen) { + InternalGraphGenerator* ig_gen) { if (!cg::is_static_var_shape(opr->output(0))) { // currently we do not handle dynamic shape in JIT return false; @@ -249,9 +249,9 @@ bool JITFusionPass::Impl::check_shape(cg::OperatorNodeBase* opr, } } -InternalGraphGenrator* JITFusionPass::Impl::create_new_igraph_gen( +InternalGraphGenerator* JITFusionPass::Impl::create_new_igraph_gen( OperatorNodeBase* opr) { - auto uptr = std::make_unique(opr); + auto uptr = std::make_unique(opr); auto ptr = uptr.get(); m_igraph_gen_storage.emplace_back(std::move(uptr)); m_var2igraph_gen[opr->output(0)] = ptr; @@ -267,7 +267,7 @@ void JITFusionPass::Impl::process_opr(OperatorNodeBase* opr) { } // dimshuffle should not be an endpoint, because megbrain has lazy // dimshuffle machanism - InternalGraphGenrator* ig_gen = nullptr; + InternalGraphGenerator* ig_gen = nullptr; if (m_var2igraph_gen.count(opr->output(0)) == 0) { // because of the reverse traversal, when an operator is being // processed but not in m_var2igraph_gen, means it is a endpoint of a diff --git a/src/jit/impl/internal_graph.cpp b/src/jit/impl/internal_graph.cpp index 6a726e4e..4c257170 100644 --- a/src/jit/impl/internal_graph.cpp +++ b/src/jit/impl/internal_graph.cpp @@ -81,12 +81,12 @@ InternalGraphPtr expand_executor_opr(const InternalGraphPtr& prev_igraph) { } // namespace -InternalGraphGenrator::InternalGraphGenrator(cg::OperatorNodeBase* opr) +InternalGraphGenerator::InternalGraphGenerator(cg::OperatorNodeBase* opr) : m_output{opr->output(0)} { add_opr(opr); } -VarNode* InternalGraphGenrator::replace_graph_by_placeholder() { +VarNode* InternalGraphGenerator::replace_graph_by_placeholder() { ThinHashMap old2new; auto cpu_default = CompNode::default_cpu(); auto igraph_copy_opr_shallow = [cpu_default](OperatorNodeBase* opr, @@ -163,7 +163,7 @@ VarNode* InternalGraphGenrator::replace_graph_by_placeholder() { return old2new.at(m_output); } -InternalGraphPtr InternalGraphGenrator::generate() { +InternalGraphPtr InternalGraphGenerator::generate() { m_input_idx = 0; auto new_nd = replace_graph_by_placeholder(); @@ -172,7 +172,7 @@ InternalGraphPtr InternalGraphGenrator::generate() { return expand_executor_opr(igraph); } -size_t InternalGraphGenrator::get_cnt_input_if_add( +size_t InternalGraphGenerator::get_cnt_input_if_add( cg::OperatorNodeBase* opr) const { // minus 1 first because this opr should be removed from subgraph's input size_t new_cnt_input = m_graph_input_set.size() - 1; @@ -183,7 +183,7 @@ size_t InternalGraphGenrator::get_cnt_input_if_add( return new_cnt_input; } -void InternalGraphGenrator::add_opr(cg::OperatorNodeBase* opr) { +void InternalGraphGenerator::add_opr(cg::OperatorNodeBase* opr) { if (m_opr_set.count(opr)) { // ignore duplicated oprs (which occur in tests) return; @@ -253,7 +253,7 @@ void InternalGraphGenrator::add_opr(cg::OperatorNodeBase* opr) { } } -void InternalGraphGenrator::find_reduce_opr_deps(cg::OperatorNodeBase* opr) { +void InternalGraphGenerator::find_reduce_opr_deps(cg::OperatorNodeBase* opr) { mgb_assert(opr->same_type() || (opr->same_type() && try_cast_as_op(opr)->has_reduce())); @@ -264,7 +264,7 @@ void InternalGraphGenrator::find_reduce_opr_deps(cg::OperatorNodeBase* opr) { cg::DepOprIter{cb}.add(opr); } -void InternalGraphGenrator::find_oprs_depended_by_dimshuffle( +void InternalGraphGenerator::find_oprs_depended_by_dimshuffle( cg::OperatorNodeBase* dimshuffle) { mgb_assert( dimshuffle->same_type() || @@ -287,7 +287,7 @@ void InternalGraphGenrator::find_oprs_depended_by_dimshuffle( cg::DepOprIter{cb}.add(dimshuffle); } -PlaceholderArray InternalGraphGenrator::to_placeholder_opr_arr( +PlaceholderArray InternalGraphGenerator::to_placeholder_opr_arr( const VarNodeArray& vars) { PlaceholderArray ret(vars.size()); for (size_t i = 0; i < vars.size(); ++i) { diff --git a/src/jit/include/megbrain/jit/internal_graph.h b/src/jit/include/megbrain/jit/internal_graph.h index 489bfcd6..5b41d0df 100644 --- a/src/jit/include/megbrain/jit/internal_graph.h +++ b/src/jit/include/megbrain/jit/internal_graph.h @@ -76,12 +76,12 @@ private: * This object stores intermediate state during visiting the computing graph in * JITFusionPass. * - * The graph is iterated in reverse topological order. InternalGraphGenrator + * The graph is iterated in reverse topological order. InternalGraphGenerator * starts with a single operator (i.e. the output node of the fused opr), and * new oprs are gradually added into it. Thus the process is expanding a tree * rooted at the output node. */ -class InternalGraphGenrator { +class InternalGraphGenerator { //! replace oprs in the graph of m_output and populate m_orig_inps, //! m_placeholders VarNode* replace_graph_by_placeholder(); @@ -95,7 +95,7 @@ class InternalGraphGenrator { void find_oprs_depended_by_dimshuffle(cg::OperatorNodeBase* opr); public: - explicit InternalGraphGenrator(cg::OperatorNodeBase* opr); + explicit InternalGraphGenerator(cg::OperatorNodeBase* opr); //! generate the graph; this method can be called multiple times InternalGraphPtr generate(); diff --git a/src/jit/test/codegen.cpp b/src/jit/test/codegen.cpp index 9e823089..bcafdc35 100644 --- a/src/jit/test/codegen.cpp +++ b/src/jit/test/codegen.cpp @@ -54,7 +54,7 @@ void run(Backend backend, CompNode cn) { VarNodeArray inputs{a.node(), b.node(), c.node()}, outputs{y.node()}; auto ig_gen = - std::make_unique(y.node()->owner_opr()); + std::make_unique(y.node()->owner_opr()); for (auto i : get_rev_topo_order(y)) { if (!i->same_type()) { @@ -91,7 +91,7 @@ void run(Backend backend, CompNode cn) { VarNodeArray inputs{a.node(), b.node(), c.node()}, outputs{y.node()}; auto ig_gen = - std::make_unique(y.node()->owner_opr()); + std::make_unique(y.node()->owner_opr()); for (auto i : get_rev_topo_order(y)) { if (!i->same_type()) { diff --git a/src/jit/test/fusion.cpp b/src/jit/test/fusion.cpp index 2130bae5..60a4a7ac 100644 --- a/src/jit/test/fusion.cpp +++ b/src/jit/test/fusion.cpp @@ -540,7 +540,7 @@ void run(Backend backend, CompNode cn) { auto make_jit = [](SymbolVar target, const SymbolVarArray& inputs) { auto y = target.node(); - auto ig_gen = std::make_unique(y->owner_opr()); + auto ig_gen = std::make_unique(y->owner_opr()); auto inputs_vptr = cg::to_var_node_array(inputs); for (auto i : get_rev_topo_order( target, {inputs_vptr.begin(), inputs_vptr.end()})) { @@ -830,9 +830,9 @@ TEST(TestJITFusionHalide, JITExecutor) { y = opr::reduce_sum(a + b, shape_of_b), z = opr::reduce_sum(a * b, shape_of_a); auto ig_gen_1 = - std::make_unique(y.node()->owner_opr()); + std::make_unique(y.node()->owner_opr()); auto ig_gen_2 = - std::make_unique(z.node()->owner_opr()); + std::make_unique(z.node()->owner_opr()); { ThinHashSet nd_set; nd_set.insert(a.node()); diff --git a/src/jit/test/helper.cpp b/src/jit/test/helper.cpp index e9ada456..6889c04d 100644 --- a/src/jit/test/helper.cpp +++ b/src/jit/test/helper.cpp @@ -85,7 +85,7 @@ void FusionChecker::ensure_init_graph() { SymbolVar jit_y; if (m_direct_build) { - auto ig_gen = std::make_unique( + auto ig_gen = std::make_unique( m_truth_y.node()->owner_opr()); ThinHashSet endpoints_set; for (size_t i = 0; i < m_nr_input; ++i) {