Browse Source

build(third_party): update llvm-project

GitOrigin-RevId: 26bb606606
release-1.1
Megvii Engine Team 4 years ago
parent
commit
0007b9e09b
7 changed files with 9 additions and 12 deletions
  1. +2
    -6
      src/jit/impl/mlir/compiler.cpp
  2. +1
    -1
      src/jit/impl/mlir/executable_cpu.cpp
  3. +2
    -1
      src/jit/impl/mlir/ir/lower_to_affine_pass.cpp
  4. +2
    -1
      src/jit/impl/mlir/ir/lower_to_gpu_pass.cpp
  5. +1
    -1
      src/jit/impl/mlir/ir/lower_to_llvm_pass.cpp
  6. +0
    -1
      src/jit/test/codegen.cpp
  7. +1
    -1
      third_party/llvm-project

+ 2
- 6
src/jit/impl/mlir/compiler.cpp View File

@@ -130,10 +130,9 @@ void add_cpu_lowering_pass(mlir::PassManager& manager) {
opt_pm.addPass(mlir::createCanonicalizerPass());
opt_pm.addPass(mlir::createCSEPass());
}

manager.addPass(create_lower_to_affine_pass());
{
mlir::OpPassManager& opt_pm = manager.nest<mlir::FuncOp>();
opt_pm.addPass(create_lower_to_affine_pass());
opt_pm.addPass(mlir::createCanonicalizerPass());
opt_pm.addPass(mlir::createCSEPass());
opt_pm.addPass(mlir::createLoopFusionPass());
@@ -150,9 +149,9 @@ void add_cuda_lowering_pass(mlir::PassManager& manager,
opt_pm.addPass(mlir::createCanonicalizerPass());
opt_pm.addPass(mlir::createCSEPass());
}
manager.addPass(create_lower_to_gpu_pass());
{
mlir::OpPassManager& opt_pm = manager.nest<mlir::FuncOp>();
opt_pm.addPass(create_lower_to_gpu_pass());
opt_pm.addPass(mlir::createCanonicalizerPass());
opt_pm.addPass(mlir::createCSEPass());
opt_pm.addPass(mlir::createLoopFusionPass());
@@ -179,9 +178,6 @@ thread_local mlir::MLIRContext MLIRCompiler::sm_ctx;

MLIRCompiler::MLIRCompiler(CompNode::DeviceType device_type)
: m_device_type{device_type} {
mlir::registerAllDialects();
mlir::registerDialect<MgbDialect>();

#if MGB_CUDA
if (m_device_type == CompNode::DeviceType::CUDA) {
LLVMInitializeNVPTXTarget();


+ 1
- 1
src/jit/impl/mlir/executable_cpu.cpp View File

@@ -81,7 +81,7 @@ MLIRCPUExecutable::MLIRCPUExecutable(mlir::OwningModuleRef& module,
auto opt_pipeline = mlir::makeOptimizingTransformer(3, 3, 0);
std::vector<std::string> libs;
auto&& engine = mlir::ExecutionEngine::create(
*module, opt_pipeline, llvm::None,
*module, nullptr, opt_pipeline, llvm::None,
std::vector<llvm::StringRef>(libs.begin(), libs.end()), true,
false);
mgb_assert(engine);


+ 2
- 1
src/jit/impl/mlir/ir/lower_to_affine_pass.cpp View File

@@ -176,7 +176,8 @@ public:
AssignOpLowering, ConstantScalarOpLowering>(
&getContext());

if (failed(applyPartialConversion(getFunction(), target, patterns))) {
if (failed(applyPartialConversion(getFunction(), target,
std::move(patterns)))) {
signalPassFailure();
}
}


+ 2
- 1
src/jit/impl/mlir/ir/lower_to_gpu_pass.cpp View File

@@ -279,7 +279,8 @@ public:
ConstantScalarOpLowering, AssignOpLowering>(
&getContext(), launch_op);

if (failed(applyPartialConversion(func_op, target, patterns))) {
if (failed(applyPartialConversion(func_op, target,
std::move(patterns)))) {
signalPassFailure();
}
}


+ 1
- 1
src/jit/impl/mlir/ir/lower_to_llvm_pass.cpp View File

@@ -51,7 +51,7 @@ public:
populateExpandTanhPattern(patterns, &getContext());

auto module = getOperation();
if (failed(applyFullConversion(module, target, patterns)))
if (failed(applyFullConversion(module, target, std::move(patterns))))
signalPassFailure();
}
};


+ 0
- 1
src/jit/test/codegen.cpp View File

@@ -366,7 +366,6 @@ TYPED_TEST(TestJITMlirUnaryElemwise, runGpu) {
REQUIRE_GPU(1);
auto cn = CompNode::load("gpu0");

SKIP_MODE(SIN);
SKIP_MODE(ROUND);

run_mlir_mode<TypeParam, 1>(cn);


+ 1
- 1
third_party/llvm-project

@@ -1 +1 @@
Subproject commit fc031d29bea856f2b91a250fd81c5f9fb79dbe07
Subproject commit c30ab6c2a307cfdce8323ed94c3d70eb2d26bc14

Loading…
Cancel
Save