[llvm][NFC] expose LLVM_HAVE_TF_API through llvm-config.h

Summary:
This allows users of the llvm library discover whether llvm was built
with the tensorflow c API dependency, which helps if using the TFUtils
wrapper, for example.

We don't do the same for the LLVM_HAVE_TF_AOT flag, because that does
not expose any API.

Reviewers: mehdi_amini, davidxl

Subscribers: mgorny, aaron.ballman, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D83746
This commit is contained in:
Mircea Trofin 2020-07-13 22:30:25 -07:00
parent c6e8bf7287
commit c1e2f73c39
4 changed files with 29 additions and 18 deletions

View File

@ -832,6 +832,21 @@ configure_file(
${LLVM_INCLUDE_DIR}/llvm/Config/Targets.def
)
# For up-to-date instructions for installing the Tensorflow dependency, refer to
# the bot setup script: https://github.com/google/ml-compiler-opt/blob/master/buildbot/buildbot_init.sh
# In this case, the latest C API library is available for download from
# https://www.tensorflow.org/install/lang_c.
# We will expose the conditional compilation variable,
# LLVM_HAVE_TF_API, through llvm-config.h, so that a user of the LLVM library may
# also leverage the dependency.
set(TENSORFLOW_C_LIB_PATH "" CACHE PATH "Path to TensorFlow C library install")
find_library(tensorflow_c_api tensorflow PATHS ${TENSORFLOW_C_LIB_PATH}/lib)
if (tensorflow_c_api)
set(LLVM_HAVE_TF_API "ON" CACHE BOOL "Full Tensorflow API available")
include_directories(${TENSORFLOW_C_LIB_PATH}/include)
endif()
# Configure the three LLVM configuration header files.
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/config.h.cmake
@ -972,27 +987,18 @@ set(TENSORFLOW_AOT_PATH "" CACHE PATH "Path to TensorFlow pip install dir")
if (NOT TENSORFLOW_AOT_PATH STREQUAL "")
set(LLVM_HAVE_TF_AOT "ON" CACHE BOOL "Tensorflow AOT available")
set(TENSORFLOW_AOT_COMPILER
"${TENSORFLOW_AOT_PATH}/../../../../bin/saved_model_cli"
CACHE PATH "Path to the Tensorflow AOT compiler")
set(TENSORFLOW_AOT_COMPILER
"${TENSORFLOW_AOT_PATH}/../../../../bin/saved_model_cli"
CACHE PATH "Path to the Tensorflow AOT compiler")
# Unlike the LLVM_HAVE_TF_API case, we don't need to expose this through
# llvm-config.h, because it's an internal implementation detail. A user of the llvm library that wants to also
# use the TF AOT compiler may do so through their custom build step.
add_definitions("-DLLVM_HAVE_TF_AOT")
include_directories(${TENSORFLOW_AOT_PATH}/include)
add_subdirectory(${TENSORFLOW_AOT_PATH}/xla_aot_runtime_src
${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/tf_runtime)
endif()
set(TENSORFLOW_C_LIB_PATH "" CACHE PATH "Path to TensorFlow C library install")
find_library(tensorflow_c_api tensorflow PATHS ${TENSORFLOW_C_LIB_PATH}/lib)
# Similar to the above Tensorflow dependency, please refer to the same script.
# In this case, the latest C API library is available for download from
# https://www.tensorflow.org/install/lang_c
if (tensorflow_c_api)
set(LLVM_HAVE_TF_API "ON" CACHE BOOL "Full Tensorflow API available")
add_definitions("-DLLVM_HAVE_TF_API")
include_directories(${TENSORFLOW_C_LIB_PATH}/include)
endif()
# Put this before tblgen. Else we have a circular dependence.
add_subdirectory(lib/Demangle)
add_subdirectory(lib/Support)

View File

@ -9,6 +9,8 @@
#ifndef LLVM_ANALYSIS_UTILS_TFUTILS_H
#define LLVM_ANALYSIS_UTILS_TFUTILS_H
#include "llvm/Config/config.h"
#ifdef LLVM_HAVE_TF_API
#include "tensorflow/c/c_api.h"
#include "llvm/IR/LLVMContext.h"

View File

@ -79,4 +79,7 @@
*/
#cmakedefine01 LLVM_FORCE_ENABLE_STATS
/* Define if LLVM was built with a dependency to the libtensorflow dynamic library */
#cmakedefine LLVM_HAVE_TF_API
#endif

View File

@ -26,7 +26,7 @@ using namespace llvm;
extern const char *TestMainArgv0;
extern cl::opt<std::string> TFIR2NativeModelPath;
#if LLVM_HAVE_TF_API
#ifdef LLVM_HAVE_TF_API
static std::string getModelPath() {
SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0);
llvm::sys::path::append(InputsDir, "ir2native_x86_64_model");
@ -87,13 +87,13 @@ define internal i32 @top() {
)IR");
FunctionAnalysisManager FAM = buildFAM();
#if LLVM_HAVE_TF_API
#ifdef LLVM_HAVE_TF_API
TFIR2NativeModelPath = getModelPath();
#endif
InlineSizeEstimatorAnalysis FA;
auto SizeEstimate = FA.run(*M->getFunction("branches"), FAM);
#if LLVM_HAVE_TF_API
#ifdef LLVM_HAVE_TF_API
EXPECT_GT(*SizeEstimate, 0);
#else
EXPECT_FALSE(SizeEstimate.hasValue());