diff --git a/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h b/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h index 72aacd0e8e89..23a8764db1d2 100644 --- a/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h +++ b/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h @@ -72,13 +72,15 @@ public: /// Creates an execution engine for the given module. If `transformer` is /// provided, it will be called on the LLVM module during JIT-compilation and - /// can be used, e.g., for reporting or optimization. - /// If `sharedLibPaths` are provided, the underlying JIT-compilation will open - /// and link the shared libraries for symbol resolution. - /// If `objectCache` is provided, JIT compiler will use it to store the object - /// generated for the given module. + /// can be used, e.g., for reporting or optimization. `jitCodeGenOptLevel`, + /// when provided, is used as the optimization level for target code + /// generation. If `sharedLibPaths` are provided, the underlying + /// JIT-compilation will open and link the shared libraries for symbol + /// resolution. If `objectCache` is provided, JIT compiler will use it to + /// store the object generated for the given module. static llvm::Expected> create( ModuleOp m, std::function transformer = {}, + Optional jitCodeGenOptLevel = llvm::None, ArrayRef sharedLibPaths = {}, bool enableObjectCache = false); /// Looks up a packed-argument function with the given name and returns a diff --git a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp index 2ba50544e512..cc0979a8a17a 100644 --- a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp @@ -198,6 +198,7 @@ ExecutionEngine::ExecutionEngine(bool enableObjectCache) Expected> ExecutionEngine::create( ModuleOp m, std::function transformer, + Optional jitCodeGenOptLevel, ArrayRef sharedLibPaths, bool enableObjectCache) { auto engine = std::make_unique(enableObjectCache); @@ -264,6 +265,8 @@ Expected> ExecutionEngine::create( // LLJITWithObjectCache example. auto compileFunctionCreator = [&](JITTargetMachineBuilder JTMB) -> Expected { + if (jitCodeGenOptLevel) + JTMB.setCodeGenOptLevel(jitCodeGenOptLevel.getValue()); auto TM = JTMB.createTargetMachine(); if (!TM) return TM.takeError(); diff --git a/mlir/lib/Support/JitRunner.cpp b/mlir/lib/Support/JitRunner.cpp index 549b1ad479ae..f87664d621a6 100644 --- a/mlir/lib/Support/JitRunner.cpp +++ b/mlir/lib/Support/JitRunner.cpp @@ -81,14 +81,18 @@ static llvm::cl::list llvm::cl::cat(optFlags)); // CLI variables for -On options. -static llvm::cl::opt optO0("O0", llvm::cl::desc("Run opt O0 passes"), - llvm::cl::cat(optFlags)); -static llvm::cl::opt optO1("O1", llvm::cl::desc("Run opt O1 passes"), - llvm::cl::cat(optFlags)); -static llvm::cl::opt optO2("O2", llvm::cl::desc("Run opt O2 passes"), - llvm::cl::cat(optFlags)); -static llvm::cl::opt optO3("O3", llvm::cl::desc("Run opt O3 passes"), - llvm::cl::cat(optFlags)); +static llvm::cl::opt + optO0("O0", llvm::cl::desc("Run opt passes and codegen at O0"), + llvm::cl::cat(optFlags)); +static llvm::cl::opt + optO1("O1", llvm::cl::desc("Run opt passes and codegen at O1"), + llvm::cl::cat(optFlags)); +static llvm::cl::opt + optO2("O2", llvm::cl::desc("Run opt passes and codegen at O2"), + llvm::cl::cat(optFlags)); +static llvm::cl::opt + optO3("O3", llvm::cl::desc("Run opt passes and codegen at O3"), + llvm::cl::cat(optFlags)); static llvm::cl::OptionCategory clOptionsCategory("linking options"); static llvm::cl::list @@ -178,14 +182,34 @@ static LogicalResult convertAffineStandardToLLVMIR(ModuleOp module) { return manager.run(module); } +static llvm::Optional getCommandLineOptLevel() { + llvm::Optional optLevel; + llvm::SmallVector>, 4> optFlags{ + optO0, optO1, optO2, optO3}; + + // Determine if there is an optimization flag present. + for (unsigned j = 0; j < 4; ++j) { + auto &flag = optFlags[j].get(); + if (flag) { + optLevel = j; + break; + } + } + return optLevel; +} + // JIT-compile the given module and run "entryPoint" with "args" as arguments. static Error compileAndExecute(ModuleOp module, StringRef entryPoint, std::function transformer, void **args) { + Optional jitCodeGenOptLevel; + if (auto clOptLevel = getCommandLineOptLevel()) + jitCodeGenOptLevel = + static_cast(clOptLevel.getValue()); SmallVector libs(clSharedLibs.begin(), clSharedLibs.end()); - auto expectedEngine = - mlir::ExecutionEngine::create(module, transformer, libs); + auto expectedEngine = mlir::ExecutionEngine::create(module, transformer, + jitCodeGenOptLevel, libs); if (!expectedEngine) return expectedEngine.takeError(); @@ -296,26 +320,24 @@ int mlir::JitRunnerMain( initializeLLVM(); mlir::initializeLLVMPasses(); - llvm::SmallVector>, 4> optFlags{ - optO0, optO1, optO2, optO3}; - llvm::cl::ParseCommandLineOptions(argc, argv, "MLIR CPU execution driver\n"); - llvm::SmallVector passes; - llvm::Optional optLevel; + llvm::Optional optLevel = getCommandLineOptLevel(); + llvm::SmallVector>, 4> optFlags{ + optO0, optO1, optO2, optO3}; unsigned optCLIPosition = 0; // Determine if there is an optimization flag present, and its CLI position // (optCLIPosition). for (unsigned j = 0; j < 4; ++j) { auto &flag = optFlags[j].get(); if (flag) { - optLevel = j; optCLIPosition = flag.getPosition(); break; } } // Generate vector of pass information, plus the index at which we should // insert any optimization passes in that vector (optPosition). + llvm::SmallVector passes; unsigned optPosition = 0; for (unsigned i = 0, e = llvmPasses.size(); i < e; ++i) { passes.push_back(llvmPasses[i]);