get_errc_messages(LLVM_LIT_ERRC_MESSAGES)
endif()
-# For up-to-date instructions for installing the Tensorflow dependency, refer to
+# For up-to-date instructions for installing the TFLite dependency, refer to
# the bot setup script: https://github.com/google/ml-compiler-opt/blob/main/buildbot/buildbot_init.sh
-# In this case, the latest C API library is available for download from
-# https://www.tensorflow.org/install/lang_c.
-# We will expose the conditional compilation variable,
-# LLVM_HAVE_TF_API, through llvm-config.h, so that a user of the LLVM library may
-# also leverage the dependency.
set(TENSORFLOW_C_LIB_PATH "" CACHE PATH "Path to TensorFlow C library install")
-if (TENSORFLOW_C_LIB_PATH)
+set(LLVM_HAVE_TFLITE "" CACHE BOOL "Use tflite")
+if (LLVM_HAVE_TFLITE)
+ find_package(protobuf REQUIRED)
+ find_package(tensorflow-lite REQUIRED)
+ set(LLVM_HAVE_TF_API "ON" CACHE BOOL "Full Tensorflow API available")
+ set(LLVM_PROTOBUF_OUT_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/protobuf_gen)
+ include_directories(${LLVM_PROTOBUF_OUT_DIR})
+elseif (TENSORFLOW_C_LIB_PATH)
find_library(tensorflow_c_api tensorflow PATHS ${TENSORFLOW_C_LIB_PATH}/lib NO_DEFAULT_PATH REQUIRED)
# Currently, the protobuf headers are distributed with the pip package that corresponds to the version
# of the C API library.
set(TENSORFLOW_AOT_COMPILER
"${TENSORFLOW_AOT_PATH}/../../../../bin/saved_model_cli"
CACHE PATH "Path to the Tensorflow AOT compiler")
- include_directories(${TENSORFLOW_AOT_PATH}/include)
+ # This needs to happen to avoid clashing protobuf codegen when building both AOT and development mode.
+ # We plan to replace protobuf with a simpler alternative, so this will go away.
+ file(COPY ${TENSORFLOW_AOT_PATH}/include DESTINATION ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/tensorflow
+ PATTERN "google/*" EXCLUDE
+ PATTERN "*.pb.h" EXCLUDE)
+ include_directories(${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/tensorflow/include)
add_subdirectory(${TENSORFLOW_AOT_PATH}/xla_aot_runtime_src
${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/tf_runtime)
install(TARGETS tf_xla_runtime EXPORT LLVMExports
set(MLLinkDeps ${MLLinkDeps} tf_xla_runtime PARENT_SCOPE)
add_definitions(-DLLVM_HAVE_TF_AOT_${fname_allcaps})
endfunction()
+
+function(build_proto)
+ foreach (P ${ARGV})
+ set(PB_SRCS ${PB_SRCS} ${LLVM_PROTOBUF_OUT_DIR}/${P}.pb.cc)
+ set(PB_HDRS ${PB_HDRS} ${LLVM_PROTOBUF_OUT_DIR}/${P}.pb.h)
+ set(PBS ${PBS} ${TENSORFLOW_SRC_DIR}/${P}.proto)
+ endforeach()
+ add_custom_command(OUTPUT ${PB_SRCS} ${PB_HDRS}
+ COMMAND protobuf::protoc
+ ARGS --proto_path=${TENSORFLOW_SRC_DIR} --cpp_out=${LLVM_PROTOBUF_OUT_DIR} ${PBS})
+ set_source_files_properties(${PB_SRCS} PROPERTIES
+ GENERATED 1)
+ set(GeneratedMLSources ${GeneratedMLSources} ${PB_SRCS} PARENT_SCOPE)
+ set(MLDeps ${MLDeps} ${MLDeps} PARENT_SCOPE)
+endfunction()
/* Define if LLVM was built with a dependency to the libtensorflow dynamic library */
#cmakedefine LLVM_HAVE_TF_API
+/* Define if LLVM is using tflite instead of libtensorflow */
+#cmakedefine LLVM_HAVE_TFLITE
+
/* Define to 1 if you have the <sysexits.h> header file. */
#cmakedefine HAVE_SYSEXITS_H ${HAVE_SYSEXITS_H}
endif()
if (DEFINED LLVM_HAVE_TF_API)
- list(APPEND MLLinkDeps ${tensorflow_c_api} ${tensorflow_fx})
+ if (DEFINED LLVM_HAVE_TFLITE)
+ build_proto(
+ tensorflow/core/protobuf/error_codes
+ tensorflow/core/example/feature
+ tensorflow/core/example/example)
+ list(APPEND MLLinkDeps
+ tensorflow-lite::tensorflow-lite
+ protobuf::libprotobuf)
+ else()
+ list(APPEND MLLinkDeps ${tensorflow_c_api} ${tensorflow_fx})
+ endif()
endif()
endif()
SyncDependenceAnalysis.cpp
SyntheticCountsUtils.cpp
TFUtils.cpp
+ TFLiteUtils.cpp
TargetLibraryInfo.cpp
TargetTransformInfo.cpp
TensorSpec.cpp
--- /dev/null
+//===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements utilities for interfacing with tensorflow C APIs.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/Config/config.h"
+#if defined(LLVM_HAVE_TFLITE)
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/Utils/TFUtils.h"
+#include "llvm/Support/Base64.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "tensorflow/lite/interpreter.h"
+#include "tensorflow/lite/kernels/register.h"
+#include "tensorflow/lite/model.h"
+#include "tensorflow/lite/model_builder.h"
+#include "tensorflow/lite/op_resolver.h"
+
+#include <cassert>
+#include <numeric>
+
+using namespace llvm;
+
+namespace llvm {
+class EvaluationResultImpl {
+public:
+ EvaluationResultImpl(const std::vector<const TfLiteTensor *> &Outputs)
+ : Outputs(Outputs){};
+
+ const TfLiteTensor *getOutput(size_t I) { return Outputs[I]; }
+
+ EvaluationResultImpl(const EvaluationResultImpl &) = delete;
+ EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
+
+private:
+ const std::vector<const TfLiteTensor *> Outputs;
+};
+
+class TFModelEvaluatorImpl {
+public:
+ TFModelEvaluatorImpl(StringRef SavedModelPath,
+ const std::vector<TensorSpec> &InputSpecs,
+ function_ref<TensorSpec(size_t)> GetOutputSpecs,
+ size_t OutputSpecsSize, const char *Tags);
+
+ bool isValid() const { return IsValid; }
+ size_t outputSize() const { return Output.size(); }
+
+ std::unique_ptr<EvaluationResultImpl> evaluate() {
+ Interpreter->Invoke();
+ return std::make_unique<EvaluationResultImpl>(Output);
+ }
+
+ const std::vector<TfLiteTensor *> &getInput() const { return Input; }
+
+ ~TFModelEvaluatorImpl();
+
+private:
+ std::unique_ptr<tflite::FlatBufferModel> Model;
+
+ /// The objects necessary for carrying out an evaluation of the SavedModel.
+ /// They are expensive to set up, and we maintain them accross all the
+ /// evaluations of the model.
+ std::unique_ptr<tflite::Interpreter> Interpreter;
+
+ /// The input tensors. We set up the tensors once and just mutate theirs
+ /// scalars before each evaluation. The input tensors keep their value after
+ /// an evaluation.
+ std::vector<TfLiteTensor *> Input;
+
+ /// The output nodes.
+ std::vector<const TfLiteTensor *> Output;
+
+ void invalidate() { IsValid = false; }
+
+ bool IsValid = true;
+
+ /// Reusable utility for ensuring we can bind the requested Name to a node in
+ /// the SavedModel Graph.
+ bool checkReportAndInvalidate(const TfLiteTensor *Tensor,
+ const TensorSpec &Spec);
+};
+
+} // namespace llvm
+
+TFModelEvaluatorImpl::TFModelEvaluatorImpl(
+ StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
+ function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
+ const char *Tags = "serve")
+ : Input(InputSpecs.size()), Output(OutputSpecsSize) {
+ // FIXME: make ErrorReporter a member (may also need subclassing
+ // StatefulErrorReporter) to easily get the latest error status, for
+ // debugging.
+ tflite::StderrReporter ErrorReporter;
+ SmallVector<char, 128> TFLitePathBuff;
+ llvm::sys::path::append(TFLitePathBuff, SavedModelPath, "model.tflite");
+ StringRef TFLitePath(TFLitePathBuff.data(), TFLitePathBuff.size());
+ Model = tflite::FlatBufferModel::BuildFromFile(TFLitePath.str().c_str(),
+ &ErrorReporter);
+ if (!Model) {
+ invalidate();
+ return;
+ }
+
+ tflite::ops::builtin::BuiltinOpResolver Resolver;
+ tflite::InterpreterBuilder Builder(*Model, Resolver);
+ Builder(&Interpreter);
+
+ if (!Interpreter ||
+ Interpreter->AllocateTensors() != TfLiteStatus::kTfLiteOk) {
+ invalidate();
+ return;
+ }
+ // Known inputs and outputs
+ StringMap<int> InputsMap;
+ StringMap<int> OutputsMap;
+ for (size_t I = 0; I < Interpreter->inputs().size(); ++I)
+ InputsMap[Interpreter->GetInputName(I)] = I;
+ for (size_t I = 0; I < Interpreter->outputs().size(); ++I)
+ OutputsMap[Interpreter->GetOutputName(I)] = I;
+
+ for (size_t I = 0; I < InputSpecs.size(); ++I) {
+ auto &InputSpec = InputSpecs[I];
+ auto MapI = InputsMap.find(InputSpec.name() + ":" +
+ std::to_string(InputSpec.port()));
+ if (MapI == InputsMap.end()) {
+ Input[I] = nullptr;
+ continue;
+ }
+ Input[I] = Interpreter->tensor(MapI->second);
+ if (!checkReportAndInvalidate(Input[I], InputSpec))
+ return;
+ std::memset(Input[I]->data.data, 0,
+ InputSpecs[I].getTotalTensorBufferSize());
+ }
+
+ for (size_t I = 0; I < OutputSpecsSize; ++I) {
+ auto OutputSpec = GetOutputSpecs(I);
+ Output[I] = Interpreter->output_tensor(
+ OutputsMap[OutputSpec.name() + ":" +
+ std::to_string(OutputSpec.port())]);
+ if (!checkReportAndInvalidate(Output[I], OutputSpec))
+ return;
+ }
+}
+
+TFModelEvaluator::TFModelEvaluator(
+ StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
+ function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
+ const char *Tags)
+ : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, GetOutputSpecs,
+ OutputSpecsSize, Tags)) {
+ if (!Impl->isValid())
+ Impl.reset();
+}
+
+TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
+ const std::vector<TensorSpec> &InputSpecs,
+ const std::vector<TensorSpec> &OutputSpecs,
+ const char *Tags)
+ : TFModelEvaluator(
+ SavedModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I]; },
+ OutputSpecs.size(), Tags) {}
+
+TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {}
+
+bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TfLiteTensor *Tensor,
+ const TensorSpec &Spec) {
+ if (!Tensor) {
+ errs() << "Could not find TF_Output named: " + Spec.name();
+ IsValid = false;
+ }
+ if (Spec.getTotalTensorBufferSize() != Tensor->bytes)
+ IsValid = false;
+
+ // If the total sizes match, there could still be a mismatch in the shape.
+ // We ignore that for now.
+
+ return IsValid;
+}
+
+Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
+ if (!isValid())
+ return None;
+ return EvaluationResult(Impl->evaluate());
+}
+
+void *TFModelEvaluator::getUntypedInput(size_t Index) {
+ TfLiteTensor *T = Impl->getInput()[Index];
+ if (!T)
+ return nullptr;
+ return T->data.data;
+}
+
+TFModelEvaluator::EvaluationResult::EvaluationResult(
+ std::unique_ptr<EvaluationResultImpl> Impl)
+ : Impl(std::move(Impl)) {}
+
+TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
+ : Impl(std::move(Other.Impl)) {}
+
+TFModelEvaluator::EvaluationResult &
+TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
+ Impl = std::move(Other.Impl);
+ return *this;
+}
+
+void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
+ return Impl->getOutput(Index)->data.data;
+}
+
+const void *
+TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
+ return Impl->getOutput(Index)->data.data;
+}
+
+TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
+TFModelEvaluator::~TFModelEvaluator() {}
+
+#endif // defined(LLVM_HAVE_TF_API)
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
-#if defined(LLVM_HAVE_TF_API)
+#if defined(LLVM_HAVE_TF_API) && !defined(LLVM_HAVE_TFLITE)
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/Utils/TFUtils.h"
--- /dev/null
+"""Convert a saved model to tflite model.
+
+Usage: python3 saved-model-to-tflite.py <mlgo saved_model_dir> <tflite dest_dir>
+
+The <tflite dest_dir> will contain:
+ model.tflite: this is the converted saved model
+ output_spec.json: the output spec, copied from the saved_model dir.
+"""
+
+import tensorflow as tf
+import os
+import sys
+from tf_agents.policies import greedy_policy
+
+
+def main(argv):
+ assert len(argv) == 3
+ sm_dir = argv[1]
+ tfl_dir = argv[2]
+ tf.io.gfile.makedirs(tfl_dir)
+ tfl_path = os.path.join(tfl_dir, 'model.tflite')
+ converter = tf.lite.TFLiteConverter.from_saved_model(sm_dir)
+ converter.target_spec.supported_ops = [
+ tf.lite.OpsSet.TFLITE_BUILTINS,
+ ]
+ tfl_model = converter.convert()
+ with tf.io.gfile.GFile(tfl_path, 'wb') as f:
+ f.write(tfl_model)
+
+ json_file = 'output_spec.json'
+ src_json = os.path.join(sm_dir, json_file)
+ if tf.io.gfile.exists(src_json):
+ tf.io.gfile.copy(src_json,
+ os.path.join(tfl_dir, json_file))
+
+if __name__ == '__main__':
+ main(sys.argv)
; RUN: sed -i 's/\\n/ /g' %t1
; RUN: FileCheck --input-file %t1 %s
-; RUN: rm -rf %t && mkdir %t
-; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t
+; RUN: rm -rf %t %t_savedmodel
+; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t_savedmodel
+; RUN: %python %S/../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-advisor=development \
; RUN: -regalloc-training-log=%t2 -tfutils-text-log -regalloc-model=%t < %s
; RUN: sed -i 's/ \+/ /g' %t2
; RUN: FileCheck --input-file %t1 %s --check-prefixes=CHECK,NOML
; RUN: diff %t1 %S/Inputs/reference-log-noml.txt
-; RUN: rm -rf %t && mkdir %t
-; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t
+; RUN: rm -rf %t_savedmodel %t
+; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t_savedmodel
+; RUN: %python %S/../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-advisor=development \
; RUN: -regalloc-training-log=%t2 -tfutils-text-log -regalloc-model=%t < %S/Inputs/input.ll
; RUN: sed -i 's/ \+/ /g' %t2
; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-advisor=release \
; RUN: %S/Inputs/input.ll -o %t.release
-; RUN: rm -rf %t && mkdir %t
+; RUN: rm -rf %t %t_savedmodel
+; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t
; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-advisor=development \
; RUN: -regalloc-model=%t %S/Inputs/input.ll -o %t.development
;
; Generate mock model
; RUN: rm -rf %t
-; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t
+; RUN: rm -rf %t_savedmodel
+; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t_savedmodel
+; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
;
; When the bounds are very wide ("no bounds"), all inlinings happen.
; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -tfutils-text-log -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=NOBOUNDS
; Test that we can produce a log if we have or do not have a model, in development mode.
; REQUIRES: have_tf_api
; Generate mock model
-; RUN: rm -rf %t
-; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t
+; RUN: rm -rf %t_savedmodel %t
+; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t_savedmodel
+; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
;
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s | FileCheck %s --check-prefixes=EXTRA-OUTPUTS,CHECK
; for the 'release' mode.
;
; REQUIRES: have_tf_api
-; RUN: rm -rf %t && mkdir %t
-; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t
+; RUN: rm -rf %t
+; RUN: rm -rf %t_savedmodel
+; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t_savedmodel
+; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=default -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=DEFAULT
; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=development -ml-inliner-model-under-training=%t -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=CHECK
TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
- EXPECT_TRUE(Evaluator.isValid());
-
- int32_t *V = Evaluator.getInput<int32_t>(0);
- // Fill it up with 1's, we know the output.
- for (auto I = 0; I < KnownSize; ++I) {
- V[I] = 1;
- }
- auto ER = Evaluator.evaluate();
- EXPECT_FALSE(ER.hasValue());
EXPECT_FALSE(Evaluator.isValid());
}
/* Define if LLVM was built with a dependency to the libtensorflow dynamic library */
#cmakedefine LLVM_HAVE_TF_API
+/* Define if LLVM is using tflite instead of libtensorflow */
+#cmakedefine LLVM_HAVE_TFLITE
+
/* Define to 1 if you have the <sysexits.h> header file. */
#cmakedefine HAVE_SYSEXITS_H ${HAVE_SYSEXITS_H}