From 5fd51fcba6a5d675e60a59b4ed6c449efe70f41b Mon Sep 17 00:00:00 2001 From: Mircea Trofin Date: Fri, 3 Feb 2023 17:03:11 -0800 Subject: [PATCH] Reland "[mlgo] Hook up the interactive runner to the mlgo-ed passes" This reverts commit a772f0bb920a4957fb94dd8dbe45943809fd0ec3. The main problem was related to how we handled `dbgs()` from the hosted compiler. Using explicit `subprocess.communicate`, and not relying on dbgs() being flushed until the end appears to address the problem. Also some fixes due to some bots running older pythons, so we can't have nice things like `int | float` and such. --- .../include/llvm/Analysis/InlineModelFeatureMaps.h | 3 +- .../include/llvm/Analysis/InteractiveModelRunner.h | 2 +- llvm/include/llvm/Analysis/MLModelRunner.h | 1 + .../include/llvm/Analysis/ReleaseModeModelRunner.h | 6 ++ llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp | 2 +- llvm/lib/Analysis/InlineAdvisor.cpp | 2 - llvm/lib/Analysis/MLInlineAdvisor.cpp | 36 ++++++++-- llvm/lib/Analysis/models/interactive_host.py | 84 ++++++++++++++++++++++ llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp | 36 +++++++--- llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp | 40 ++++++++--- llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp | 2 - llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp | 2 - .../CodeGen/MLRegalloc/Inputs/interactive_main.py | 28 ++++++++ llvm/test/CodeGen/MLRegalloc/interactive-mode.ll | 22 ++++++ .../Inline/ML/Inputs/interactive_main.py | 21 ++++++ llvm/test/Transforms/Inline/ML/interactive-mode.ll | 27 +++++++ 16 files changed, 281 insertions(+), 33 deletions(-) create mode 100644 llvm/lib/Analysis/models/interactive_host.py create mode 100644 llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py create mode 100644 llvm/test/CodeGen/MLRegalloc/interactive-mode.ll create mode 100644 llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py create mode 100644 llvm/test/Transforms/Inline/ML/interactive-mode.ll diff --git a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h index fb8236c..0418a2b 100644 --- a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h +++ b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h @@ -129,9 +129,10 @@ inlineCostFeatureToMlFeature(InlineCostFeatureIndex Feature) { constexpr size_t NumberOfFeatures = static_cast(FeatureIndex::NumberOfFeatures); -extern const std::array FeatureMap; +extern const std::vector FeatureMap; extern const char *const DecisionName; +extern const TensorSpec InlineDecisionSpec; extern const char *const DefaultDecisionName; extern const char *const RewardName; diff --git a/llvm/include/llvm/Analysis/InteractiveModelRunner.h b/llvm/include/llvm/Analysis/InteractiveModelRunner.h index a35e06d..680dc42 100644 --- a/llvm/include/llvm/Analysis/InteractiveModelRunner.h +++ b/llvm/include/llvm/Analysis/InteractiveModelRunner.h @@ -48,7 +48,7 @@ public: static bool classof(const MLModelRunner *R) { return R->getKind() == MLModelRunner::Kind::Interactive; } - void switchContext(StringRef Name) { + void switchContext(StringRef Name) override { Log->switchContext(Name); Log->flush(); } diff --git a/llvm/include/llvm/Analysis/MLModelRunner.h b/llvm/include/llvm/Analysis/MLModelRunner.h index 6fcccf7..903411f 100644 --- a/llvm/include/llvm/Analysis/MLModelRunner.h +++ b/llvm/include/llvm/Analysis/MLModelRunner.h @@ -49,6 +49,7 @@ public: enum class Kind : int { Unknown, Release, Development, NoOp, Interactive }; Kind getKind() const { return Type; } + virtual void switchContext(StringRef Name) {} protected: MLModelRunner(LLVMContext &Ctx, Kind Type, size_t NrInputs) diff --git a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h index bf1aaca..9185513 100644 --- a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h +++ b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h @@ -85,6 +85,12 @@ public: void *arg_data(int) { llvm_unreachable(NOOP_MODEL_ERRMSG); } #undef NOOP_MODEL_ERRMSG }; + +template bool isEmbeddedModelEvaluatorValid() { return true; } + +template <> inline bool isEmbeddedModelEvaluatorValid() { + return false; +} } // namespace llvm #endif // LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp index a91d2ff..605e590 100644 --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -283,7 +283,7 @@ TrainingLogger::TrainingLogger(StringRef LogFileName, FT.push_back(TensorSpec::createSpec(DefaultDecisionName, {1})); DecisionPos = FT.size(); - FT.push_back(TensorSpec::createSpec(DecisionName, {1})); + FT.push_back(InlineDecisionSpec); std::error_code EC; auto OS = std::make_unique(TrainingLog, EC); if (EC) diff --git a/llvm/lib/Analysis/InlineAdvisor.cpp b/llvm/lib/Analysis/InlineAdvisor.cpp index 540aad7..16de1cf 100644 --- a/llvm/lib/Analysis/InlineAdvisor.cpp +++ b/llvm/lib/Analysis/InlineAdvisor.cpp @@ -231,10 +231,8 @@ bool InlineAdvisorAnalysis::Result::tryCreate( #endif break; case InliningAdvisorMode::Release: -#ifdef LLVM_HAVE_TF_AOT LLVM_DEBUG(dbgs() << "Using release-mode inliner policy.\n"); Advisor = llvm::getReleaseModeAdvisor(M, MAM); -#endif break; } diff --git a/llvm/lib/Analysis/MLInlineAdvisor.cpp b/llvm/lib/Analysis/MLInlineAdvisor.cpp index a20c052..0a23f2e 100644 --- a/llvm/lib/Analysis/MLInlineAdvisor.cpp +++ b/llvm/lib/Analysis/MLInlineAdvisor.cpp @@ -18,10 +18,12 @@ #include "llvm/Analysis/FunctionPropertiesAnalysis.h" #include "llvm/Analysis/InlineCost.h" #include "llvm/Analysis/InlineModelFeatureMaps.h" +#include "llvm/Analysis/InteractiveModelRunner.h" #include "llvm/Analysis/LazyCallGraph.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/MLModelRunner.h" #include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/ReleaseModeModelRunner.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/InstIterator.h" @@ -30,19 +32,37 @@ using namespace llvm; +static cl::opt InteractiveChannelBaseName( + "inliner-interactive-channel-base", cl::Hidden, + cl::desc( + "Base file path for the interactive mode. The incoming filename should " + "have the name .in, while the " + "outgoing name should be .out")); + #if defined(LLVM_HAVE_TF_AOT_INLINERSIZEMODEL) -#include "llvm/Analysis/ReleaseModeModelRunner.h" // codegen-ed file #include "InlinerSizeModel.h" // NOLINT +using CompiledModelType = llvm::InlinerSizeModel; +#else +using CompiledModelType = NoopSavedModelImpl; +#endif std::unique_ptr llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM) { - auto AOTRunner = - std::make_unique>( - M.getContext(), FeatureMap, DecisionName); + if (!llvm::isEmbeddedModelEvaluatorValid() && + InteractiveChannelBaseName.empty()) + return nullptr; + std::unique_ptr AOTRunner; + if (InteractiveChannelBaseName.empty()) + AOTRunner = std::make_unique>( + M.getContext(), FeatureMap, DecisionName); + else + AOTRunner = std::make_unique( + M.getContext(), FeatureMap, InlineDecisionSpec, + InteractiveChannelBaseName + ".out", + InteractiveChannelBaseName + ".in"); return std::make_unique(M, MAM, std::move(AOTRunner)); } -#endif #define DEBUG_TYPE "inline-ml" @@ -59,7 +79,7 @@ static cl::opt KeepFPICache( cl::init(false)); // clang-format off -const std::array llvm::FeatureMap{ +const std::vector llvm::FeatureMap{ #define POPULATE_NAMES(_, NAME) TensorSpec::createSpec(NAME, {1} ), // InlineCost features - these must come first INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES) @@ -73,6 +93,8 @@ const std::array llvm::FeatureMap{ // clang-format on const char *const llvm::DecisionName = "inlining_decision"; +const TensorSpec llvm::InlineDecisionSpec = + TensorSpec::createSpec(DecisionName, {1}); const char *const llvm::DefaultDecisionName = "inlining_default"; const char *const llvm::RewardName = "delta_size"; @@ -94,7 +116,7 @@ MLInlineAdvisor::MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM, CG(MAM.getResult(M)), InitialIRSize(getModuleIRSize()), CurrentIRSize(InitialIRSize) { assert(ModelRunner); - + ModelRunner->switchContext(""); // Extract the 'call site height' feature - the position of a call site // relative to the farthest statically reachable SCC node. We don't mutate // this value while inlining happens. Empirically, this feature proved diff --git a/llvm/lib/Analysis/models/interactive_host.py b/llvm/lib/Analysis/models/interactive_host.py new file mode 100644 index 0000000..5a14c47 --- /dev/null +++ b/llvm/lib/Analysis/models/interactive_host.py @@ -0,0 +1,84 @@ +"""Utility for testing InteractiveModelRunner. + +Use it from pass-specific tests by providing a main .py which calls this library's +`run_interactive` with an appropriate callback to provide advice. + +From .ll tests, just call the above-mentioned main as a prefix to the opt/llc +invocation (with the appropriate flags enabling the interactive mode) + +Examples: +test/Transforms/Inline/ML/interactive-mode.ll +test/CodeGen/MLRegalloc/interactive-mode.ll +""" + +import ctypes +import log_reader +import io +import math +import os +import subprocess +from typing import BinaryIO, Callable, Union + + +def send(f: io.BufferedWriter, value: Union[int, float], + spec: log_reader.TensorSpec): + """Send the `value` - currently just a scalar - formatted as per `spec`.""" + + # just int64 for now + assert (spec.element_type == ctypes.c_int64) + to_send = ctypes.c_int64(int(value)) + assert f.write(bytes(to_send)) == ctypes.sizeof( + spec.element_type) * math.prod(spec.shape) + f.flush() + + +def run_interactive(temp_rootname: str, + make_response: Callable[[list[log_reader.TensorValue]], + Union[int, float]], + process_and_args: list[str]): + """Host the compiler. + Args: + temp_rootname: the base file name from which to construct the 2 pipes for + communicating with the compiler. + make_response: a function that, given the current tensor values, provides a + response. + process_and_args: the full commandline for the compiler. It it assumed it + contains a flag poiting to `temp_rootname` so that the InteractiveModeRunner + would attempt communication on the same pair as this function opens. + + This function sets up the communication with the compiler - via 2 files named + `temp_rootname`.in and `temp_rootname`.out - prints out the received features, + and sends back to the compiler an advice (which it gets from `make_response`). + It's used for testing, and also to showcase how to set up communication in an + interactive ML ("gym") environment. + """ + to_compiler = temp_rootname + ".in" + from_compiler = temp_rootname + ".out" + try: + os.mkfifo(to_compiler, 0o666) + os.mkfifo(from_compiler, 0o666) + compiler_proc = subprocess.Popen( + process_and_args, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL) + with io.BufferedWriter(io.FileIO(to_compiler, 'wb')) as tc: + with io.BufferedReader(io.FileIO(from_compiler, 'rb')) as fc: + tensor_specs, _, advice_spec = log_reader.read_header(fc) + context = None + while compiler_proc.poll() is None and (next_event := fc.readline()): + last_context, observation_id, features, _ = log_reader.read_one_observation( + context, next_event, fc, tensor_specs, None) + if last_context != context: + print(f'context: {last_context}') + context = last_context + print(f'observation: {observation_id}') + tensor_values = [] + for fv in features: + log_reader.pretty_print_tensor_value(fv) + tensor_values.append(fv) + send(tc, make_response(tensor_values), advice_spec) + _, err = compiler_proc.communicate() + print(err.decode('utf-8')) + compiler_proc.wait() + + finally: + os.unlink(to_compiler) + os.unlink(from_compiler) diff --git a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp index 5cc8ad3..0064e85d 100644 --- a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp @@ -13,6 +13,7 @@ #include "AllocationOrder.h" #include "RegAllocEvictionAdvisor.h" #include "RegAllocGreedy.h" +#include "llvm/Analysis/InteractiveModelRunner.h" #include "llvm/Analysis/MLModelRunner.h" #include "llvm/Analysis/TensorSpec.h" #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL) || defined(LLVM_HAVE_TFLITE) @@ -52,6 +53,14 @@ using CompiledModelType = RegallocEvictModel; using CompiledModelType = NoopSavedModelImpl; #endif +static cl::opt InteractiveChannelBaseName( + "regalloc-evict-interactive-channel-base", cl::Hidden, + cl::desc( + "Base file path for the interactive mode. The incoming filename should " + "have the name .in, while the " + "outgoing name should be " + ".out")); + // Options that only make sense in development mode #ifdef LLVM_HAVE_TFLITE #include "RegAllocScore.h" @@ -213,6 +222,8 @@ static const std::vector PerLiveRangeShape{1, NumberOfInterferences}; // will be guaranteed to be to a mask == 1 position. Using a macro here to // avoid 'not used' warnings (and keep cond compilation to a minimum) #define DecisionName "index_to_evict" +static const TensorSpec DecisionSpec = + TensorSpec::createSpec(DecisionName, {1}); // Named features index. enum FeatureIDs { @@ -382,14 +393,21 @@ private: std::unique_ptr getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override { - if (!Runner) - Runner = std::make_unique>( - MF.getFunction().getContext(), InputFeatures, DecisionName); + if (!Runner) { + if (InteractiveChannelBaseName.empty()) + Runner = std::make_unique>( + MF.getFunction().getContext(), InputFeatures, DecisionName); + else + Runner = std::make_unique( + MF.getFunction().getContext(), InputFeatures, DecisionSpec, + InteractiveChannelBaseName + ".out", + InteractiveChannelBaseName + ".in"); + } return std::make_unique( MF, RA, Runner.get(), getAnalysis(), getAnalysis()); } - std::unique_ptr> Runner; + std::unique_ptr Runner; }; // =================================== @@ -398,8 +416,6 @@ private: // // Features we log #ifdef LLVM_HAVE_TFLITE -static const TensorSpec Output = - TensorSpec::createSpec(DecisionName, {1}); static const TensorSpec Reward = TensorSpec::createSpec("reward", {1}); // Features we bind on the model. The tensor names have a prefix, and we also @@ -512,7 +528,7 @@ private: // We always log the output; in particular, if we're not evaluating, we // don't have an output spec json file. That's why we handle the // 'normal' output separately. - LFS.push_back(Output); + LFS.push_back(DecisionSpec); Log = std::make_unique(std::move(OS), LFS, Reward, /*IncludeReward*/ true); @@ -557,6 +573,7 @@ MLEvictAdvisor::MLEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA, Runner(std::move(Runner)), MBFI(MBFI), Loops(Loops), InitialQSize(MLEvictAdvisor::getInitialQueueSize(MF)) { assert(this->Runner); + Runner->switchContext(MF.getName()); DoNotNormalize.set(FeatureIDs::mask); DoNotNormalize.set(FeatureIDs::is_free); DoNotNormalize.set(FeatureIDs::is_hint); @@ -1134,7 +1151,10 @@ bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) { #endif // #ifdef LLVM_HAVE_TFLITE RegAllocEvictionAdvisorAnalysis *llvm::createReleaseModeAdvisor() { - return new ReleaseModeEvictionAdvisorAnalysis(); + return llvm::isEmbeddedModelEvaluatorValid() || + !InteractiveChannelBaseName.empty() + ? new ReleaseModeEvictionAdvisorAnalysis() + : nullptr; } // In all cases except development mode, we don't need scoring. diff --git a/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp index 320a184..7a5345e 100644 --- a/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp @@ -14,6 +14,7 @@ #include "RegAllocGreedy.h" #include "RegAllocPriorityAdvisor.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/InteractiveModelRunner.h" #include "llvm/Analysis/MLModelRunner.h" #include "llvm/Analysis/ReleaseModeModelRunner.h" #include "llvm/Analysis/TensorSpec.h" @@ -40,6 +41,16 @@ using namespace llvm; +static cl::opt InteractiveChannelBaseName( + "regalloc-priority-interactive-channel-base", cl::Hidden, + cl::desc( + "Base file path for the interactive mode. The incoming filename should " + "have the name .in, while " + "the outgoing name should be " + ".out")); + +using CompiledModelType = NoopSavedModelImpl; + // Options that only make sense in development mode #ifdef LLVM_HAVE_TFLITE #include "RegAllocScore.h" @@ -65,6 +76,9 @@ static const std::vector PerLiveRangeShape{1}; M(float, weight, PerLiveRangeShape, "weight") #define DecisionName "priority" +static const TensorSpec DecisionSpec = + TensorSpec::createSpec(DecisionName, {1}); + // Named features index. enum FeatureIDs { @@ -125,13 +139,20 @@ private: std::unique_ptr getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override { - if (!Runner) - Runner = std::make_unique>( - MF.getFunction().getContext(), InputFeatures, DecisionName); + if (!Runner) { + if (InteractiveChannelBaseName.empty()) + Runner = std::make_unique>( + MF.getFunction().getContext(), InputFeatures, DecisionName); + else + Runner = std::make_unique( + MF.getFunction().getContext(), InputFeatures, DecisionSpec, + InteractiveChannelBaseName + ".out", + InteractiveChannelBaseName + ".in"); + } return std::make_unique( MF, RA, &getAnalysis(), Runner.get()); } - std::unique_ptr> Runner; + std::unique_ptr Runner; }; // =================================== @@ -140,9 +161,6 @@ private: // // Features we log #ifdef LLVM_HAVE_TFLITE - -static const TensorSpec Output = - TensorSpec::createSpec(DecisionName, {1}); static const TensorSpec Reward = TensorSpec::createSpec("reward", {1}); #define _DECL_TRAIN_FEATURES(type, name, shape, _) \ @@ -231,7 +249,7 @@ private: // We always log the output; in particular, if we're not evaluating, we // don't have an output spec json file. That's why we handle the // 'normal' output separately. - LFS.push_back(Output); + LFS.push_back(DecisionSpec); Log = std::make_unique(std::move(OS), LFS, Reward, /*IncludeReward*/ true); @@ -258,7 +276,10 @@ private: } // namespace llvm RegAllocPriorityAdvisorAnalysis *llvm::createReleaseModePriorityAdvisor() { - return new ReleaseModePriorityAdvisorAnalysis(); + return llvm::isEmbeddedModelEvaluatorValid() || + !InteractiveChannelBaseName.empty() + ? new ReleaseModePriorityAdvisorAnalysis() + : nullptr; } MLPriorityAdvisor::MLPriorityAdvisor(const MachineFunction &MF, @@ -268,6 +289,7 @@ MLPriorityAdvisor::MLPriorityAdvisor(const MachineFunction &MF, : RegAllocPriorityAdvisor(MF, RA, Indexes), DefaultAdvisor(MF, RA, Indexes), Runner(std::move(Runner)) { assert(this->Runner); + Runner->switchContext(MF.getName()); } float MLPriorityAdvisor::getPriorityImpl(const LiveInterval &LI) const { diff --git a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp index b1743d3..048496f 100644 --- a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp +++ b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp @@ -100,9 +100,7 @@ template <> Pass *llvm::callDefaultCtor() { #endif break; case RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release: -#if defined(LLVM_HAVE_TF_AOT) Ret = createReleaseModeAdvisor(); -#endif break; } if (Ret) diff --git a/llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp b/llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp index b3a13cc..e031019 100644 --- a/llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp +++ b/llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp @@ -81,9 +81,7 @@ template <> Pass *llvm::callDefaultCtor() { #endif break; case RegAllocPriorityAdvisorAnalysis::AdvisorMode::Release: -#if defined(LLVM_HAVE_TF_AOT_REGALLOCPRIORITYMODEL) Ret = createReleaseModePriorityAdvisor(); -#endif break; } if (Ret) diff --git a/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py b/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py new file mode 100644 index 0000000..dc96804 --- /dev/null +++ b/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py @@ -0,0 +1,28 @@ +import log_reader +import interactive_host +import sys + + +def main(args): + # this advisor just picks the first legal register to evict, which is + # identifiable by the "mask" feature + class Advisor: + to_return = False + + def advice(self, tensor_values: list[log_reader.TensorValue]): + for tv in tensor_values: + if tv.spec().name != 'mask': + continue + for i, v in enumerate(tv): + if v == 1: + return i + # i.e. invalid: + return -1 + + + a = Advisor() + interactive_host.run_interactive(args[0], a.advice, args[1:]) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/llvm/test/CodeGen/MLRegalloc/interactive-mode.ll b/llvm/test/CodeGen/MLRegalloc/interactive-mode.ll new file mode 100644 index 0000000..141c24d --- /dev/null +++ b/llvm/test/CodeGen/MLRegalloc/interactive-mode.ll @@ -0,0 +1,22 @@ +; RUN: rm -rf %t.rundir +; RUN: rm -rf %t.channel-basename.* +; RUN: mkdir %t.rundir +; RUN: cp %S/../../../lib/Analysis/models/log_reader.py %t.rundir +; RUN: cp %S/../../../lib/Analysis/models/interactive_host.py %t.rundir +; RUN: cp %S/Inputs/interactive_main.py %t.rundir +; RUN: %python %t.rundir/interactive_main.py %t.channel-basename \ +; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-advisor=release -interactive-model-runner-echo-reply \ +; RUN: -regalloc-evict-interactive-channel-base=%t.channel-basename %S/Inputs/two-large-fcts.ll -o /dev/null | FileCheck %s + +;; Make sure we see both contexts. Also sanity-check that the advice is the +;; expected one - the index of the first legal register +; CHECK: context: SyFgets +; CHECK-NEXT: observation: 0 +; CHECK-NEXT: mask: 0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 +; CHECK: observation: 1 +; CHECK-NEXT: mask: 0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 +; CHECK: context: SyFgetsCopy +; CHECK-NEXT: observation: 0 + +; CHECK: index_to_evict: 9 +; CHECK-NEXT: index_to_evict: 10 diff --git a/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py b/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py new file mode 100644 index 0000000..2f80845 --- /dev/null +++ b/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py @@ -0,0 +1,21 @@ +import interactive_host +import sys + + +def main(args): + + class Advisor: + to_return = False + + def advice(self, _): + # The adice will be a sequence of yes/no/yes/no/... + # see ../interactive-mode.ll + self.to_return = not self.to_return + return int(self.to_return) + + a = Advisor() + interactive_host.run_interactive(args[0], a.advice, args[1:]) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/llvm/test/Transforms/Inline/ML/interactive-mode.ll b/llvm/test/Transforms/Inline/ML/interactive-mode.ll new file mode 100644 index 0000000..55bde78 --- /dev/null +++ b/llvm/test/Transforms/Inline/ML/interactive-mode.ll @@ -0,0 +1,27 @@ +; RUN: rm -rf %t.rundir +; RUN: rm -rf %t.channel-basename.* +; RUN: mkdir %t.rundir +; RUN: cp %S/../../../../lib/Analysis/models/log_reader.py %t.rundir +; RUN: cp %S/../../../../lib/Analysis/models/interactive_host.py %t.rundir +; RUN: cp %S/Inputs/interactive_main.py %t.rundir +; RUN: %python %t.rundir/interactive_main.py %t.channel-basename \ +; RUN: opt -passes=scc-oz-module-inliner -interactive-model-runner-echo-reply \ +; RUN: -enable-ml-inliner=release --inliner-interactive-channel-base=%t.channel-basename %S/Inputs/test-module.ll -S -o /dev/null | FileCheck %s + +;; It'd be nice if we had stdout and stderr interleaved, but we don't, so +;; let's just check the features have non-zero values, and that we see as many +;; advices as observations, and that the advices flip-flop as intended. +; CHECK: context: +; CHECK-NEXT: observation: 0 +; CHECK-NEXT: sroa_savings: 0 +; CHECK: unsimplified_common_instructions: 5 +; CHECK: callee_users: 3 +; CHECK: observation: 5 +; CHECK-NOT: observation: 6 + +; CHECK: inlining_decision: 1 +; CHECK-NEXT: inlining_decision: 0 +; CHECK-NEXT: inlining_decision: 1 +; CHECK-NEXT: inlining_decision: 0 +; CHECK-NEXT: inlining_decision: 1 +; CHECK-NEXT: inlining_decision: 0 -- 2.7.4