Set mlir-cpu-runner JIT codegen opt level correctly

- the JIT codegen was being run at the default -O0 level; instead,
  propagate the opt level from the cmd line.

Signed-off-by: Uday Bondhugula <uday@polymagelabs.com>

Closes tensorflow/mlir#123

COPYBARA_INTEGRATE_REVIEW=https://github.com/tensorflow/mlir/pull/123 from bondhugula:jit-runner 3b055e47f94c9a48bf487f6400787478738cda02
PiperOrigin-RevId: 267778586
This commit is contained in:
Uday Bondhugula 2019-09-07 09:59:47 -07:00 committed by A. Unique TensorFlower
parent 53bb528b19
commit 713ab0dde7
3 changed files with 48 additions and 21 deletions

View File

@ -72,13 +72,15 @@ public:
/// Creates an execution engine for the given module. If `transformer` is /// Creates an execution engine for the given module. If `transformer` is
/// provided, it will be called on the LLVM module during JIT-compilation and /// provided, it will be called on the LLVM module during JIT-compilation and
/// can be used, e.g., for reporting or optimization. /// can be used, e.g., for reporting or optimization. `jitCodeGenOptLevel`,
/// If `sharedLibPaths` are provided, the underlying JIT-compilation will open /// when provided, is used as the optimization level for target code
/// and link the shared libraries for symbol resolution. /// generation. If `sharedLibPaths` are provided, the underlying
/// If `objectCache` is provided, JIT compiler will use it to store the object /// JIT-compilation will open and link the shared libraries for symbol
/// generated for the given module. /// resolution. If `objectCache` is provided, JIT compiler will use it to
/// store the object generated for the given module.
static llvm::Expected<std::unique_ptr<ExecutionEngine>> create( static llvm::Expected<std::unique_ptr<ExecutionEngine>> create(
ModuleOp m, std::function<llvm::Error(llvm::Module *)> transformer = {}, ModuleOp m, std::function<llvm::Error(llvm::Module *)> transformer = {},
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel = llvm::None,
ArrayRef<StringRef> sharedLibPaths = {}, bool enableObjectCache = false); ArrayRef<StringRef> sharedLibPaths = {}, bool enableObjectCache = false);
/// Looks up a packed-argument function with the given name and returns a /// Looks up a packed-argument function with the given name and returns a

View File

@ -198,6 +198,7 @@ ExecutionEngine::ExecutionEngine(bool enableObjectCache)
Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create( Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
ModuleOp m, std::function<Error(llvm::Module *)> transformer, ModuleOp m, std::function<Error(llvm::Module *)> transformer,
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel,
ArrayRef<StringRef> sharedLibPaths, bool enableObjectCache) { ArrayRef<StringRef> sharedLibPaths, bool enableObjectCache) {
auto engine = std::make_unique<ExecutionEngine>(enableObjectCache); auto engine = std::make_unique<ExecutionEngine>(enableObjectCache);
@ -264,6 +265,8 @@ Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
// LLJITWithObjectCache example. // LLJITWithObjectCache example.
auto compileFunctionCreator = [&](JITTargetMachineBuilder JTMB) auto compileFunctionCreator = [&](JITTargetMachineBuilder JTMB)
-> Expected<IRCompileLayer::CompileFunction> { -> Expected<IRCompileLayer::CompileFunction> {
if (jitCodeGenOptLevel)
JTMB.setCodeGenOptLevel(jitCodeGenOptLevel.getValue());
auto TM = JTMB.createTargetMachine(); auto TM = JTMB.createTargetMachine();
if (!TM) if (!TM)
return TM.takeError(); return TM.takeError();

View File

@ -81,13 +81,17 @@ static llvm::cl::list<const llvm::PassInfo *, bool, llvm::PassNameParser>
llvm::cl::cat(optFlags)); llvm::cl::cat(optFlags));
// CLI variables for -On options. // CLI variables for -On options.
static llvm::cl::opt<bool> optO0("O0", llvm::cl::desc("Run opt O0 passes"), static llvm::cl::opt<bool>
optO0("O0", llvm::cl::desc("Run opt passes and codegen at O0"),
llvm::cl::cat(optFlags)); llvm::cl::cat(optFlags));
static llvm::cl::opt<bool> optO1("O1", llvm::cl::desc("Run opt O1 passes"), static llvm::cl::opt<bool>
optO1("O1", llvm::cl::desc("Run opt passes and codegen at O1"),
llvm::cl::cat(optFlags)); llvm::cl::cat(optFlags));
static llvm::cl::opt<bool> optO2("O2", llvm::cl::desc("Run opt O2 passes"), static llvm::cl::opt<bool>
optO2("O2", llvm::cl::desc("Run opt passes and codegen at O2"),
llvm::cl::cat(optFlags)); llvm::cl::cat(optFlags));
static llvm::cl::opt<bool> optO3("O3", llvm::cl::desc("Run opt O3 passes"), static llvm::cl::opt<bool>
optO3("O3", llvm::cl::desc("Run opt passes and codegen at O3"),
llvm::cl::cat(optFlags)); llvm::cl::cat(optFlags));
static llvm::cl::OptionCategory clOptionsCategory("linking options"); static llvm::cl::OptionCategory clOptionsCategory("linking options");
@ -178,14 +182,34 @@ static LogicalResult convertAffineStandardToLLVMIR(ModuleOp module) {
return manager.run(module); return manager.run(module);
} }
static llvm::Optional<unsigned> getCommandLineOptLevel() {
llvm::Optional<unsigned> optLevel;
llvm::SmallVector<std::reference_wrapper<llvm::cl::opt<bool>>, 4> optFlags{
optO0, optO1, optO2, optO3};
// Determine if there is an optimization flag present.
for (unsigned j = 0; j < 4; ++j) {
auto &flag = optFlags[j].get();
if (flag) {
optLevel = j;
break;
}
}
return optLevel;
}
// JIT-compile the given module and run "entryPoint" with "args" as arguments. // JIT-compile the given module and run "entryPoint" with "args" as arguments.
static Error static Error
compileAndExecute(ModuleOp module, StringRef entryPoint, compileAndExecute(ModuleOp module, StringRef entryPoint,
std::function<llvm::Error(llvm::Module *)> transformer, std::function<llvm::Error(llvm::Module *)> transformer,
void **args) { void **args) {
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel;
if (auto clOptLevel = getCommandLineOptLevel())
jitCodeGenOptLevel =
static_cast<llvm::CodeGenOpt::Level>(clOptLevel.getValue());
SmallVector<StringRef, 4> libs(clSharedLibs.begin(), clSharedLibs.end()); SmallVector<StringRef, 4> libs(clSharedLibs.begin(), clSharedLibs.end());
auto expectedEngine = auto expectedEngine = mlir::ExecutionEngine::create(module, transformer,
mlir::ExecutionEngine::create(module, transformer, libs); jitCodeGenOptLevel, libs);
if (!expectedEngine) if (!expectedEngine)
return expectedEngine.takeError(); return expectedEngine.takeError();
@ -296,26 +320,24 @@ int mlir::JitRunnerMain(
initializeLLVM(); initializeLLVM();
mlir::initializeLLVMPasses(); mlir::initializeLLVMPasses();
llvm::SmallVector<std::reference_wrapper<llvm::cl::opt<bool>>, 4> optFlags{
optO0, optO1, optO2, optO3};
llvm::cl::ParseCommandLineOptions(argc, argv, "MLIR CPU execution driver\n"); llvm::cl::ParseCommandLineOptions(argc, argv, "MLIR CPU execution driver\n");
llvm::SmallVector<const llvm::PassInfo *, 4> passes; llvm::Optional<unsigned> optLevel = getCommandLineOptLevel();
llvm::Optional<unsigned> optLevel; llvm::SmallVector<std::reference_wrapper<llvm::cl::opt<bool>>, 4> optFlags{
optO0, optO1, optO2, optO3};
unsigned optCLIPosition = 0; unsigned optCLIPosition = 0;
// Determine if there is an optimization flag present, and its CLI position // Determine if there is an optimization flag present, and its CLI position
// (optCLIPosition). // (optCLIPosition).
for (unsigned j = 0; j < 4; ++j) { for (unsigned j = 0; j < 4; ++j) {
auto &flag = optFlags[j].get(); auto &flag = optFlags[j].get();
if (flag) { if (flag) {
optLevel = j;
optCLIPosition = flag.getPosition(); optCLIPosition = flag.getPosition();
break; break;
} }
} }
// Generate vector of pass information, plus the index at which we should // Generate vector of pass information, plus the index at which we should
// insert any optimization passes in that vector (optPosition). // insert any optimization passes in that vector (optPosition).
llvm::SmallVector<const llvm::PassInfo *, 4> passes;
unsigned optPosition = 0; unsigned optPosition = 0;
for (unsigned i = 0, e = llvmPasses.size(); i < e; ++i) { for (unsigned i = 0, e = llvmPasses.size(); i < e; ++i) {
passes.push_back(llvmPasses[i]); passes.push_back(llvmPasses[i]);