Remove legacy way of exposing caffe2 operators to PyTorch (#17742)
authorSebastian Messmer <messmer@fb.com>
Fri, 8 Mar 2019 18:19:49 +0000 (10:19 -0800)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Fri, 8 Mar 2019 18:22:41 +0000 (10:22 -0800)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/17742

This path isn't used anymore, and is incompatible with the changes stacked on top of this diff.
Removing it.
cc bwasti to check and confirm these can really be deleted

Reviewed By: ezyang

Differential Revision: D14362426

fbshipit-source-id: 32cdc19f28c2a981ae1e204901420998367ee588

caffe2/core/operator.cc
caffe2/core/operator.h
caffe2/core/operator_test.cc
tools/build_variables.py
torch/CMakeLists.txt
torch/csrc/jit/caffe2_operator.cpp [deleted file]
torch/csrc/jit/caffe2_operator.h [deleted file]
torch/csrc/jit/custom_operator.h
torch/csrc/jit/register_caffe2_ops.cpp [deleted file]

index 30a4d50..a9f7862 100644 (file)
@@ -354,23 +354,6 @@ unique_ptr<OperatorBase> CreateOperator(
   }
 }
 
-void RunOperator(
-    c10::Symbol name,
-    const std::vector<c10::IValue>& inputs,
-    const std::vector<c10::IValue*>& outputs) {
-  auto fn_wrap =
-      caffe2::FunctionSchemaRegistry()->Create(name.toUnqualString());
-  CAFFE_ENFORCE(
-      fn_wrap,
-      "Operator not registered with FunctionSchema constructor.",
-      name.toUnqualString());
-  auto fn = fn_wrap->getSchema();
-  auto op = caffe2::FunctionSchemaOperatorRegistry()->Create(
-      name.toUnqualString(), fn, inputs, outputs);
-
-  op->Run();
-}
-
 std::map<DeviceType, OperatorRegistry*>* gDeviceTypeRegistry() {
   static std::map<DeviceType, OperatorRegistry*> g_device_type_registry;
   return &g_device_type_registry;
@@ -403,15 +386,6 @@ C10_DEFINE_REGISTRY(
     const OperatorDef&,
     const vector<GradientWrapper>&);
 
-C10_DEFINE_REGISTRY(
-    FunctionSchemaOperatorRegistry,
-    OperatorBase,
-    const c10::FunctionSchema,
-    std::vector<c10::IValue>,
-    std::vector<c10::IValue*>);
-
-C10_DEFINE_REGISTRY(FunctionSchemaRegistry, FunctionSchemaStorageBase);
-
 GradientOpsMeta GetGradientForOp(
     const OperatorDef& def, const vector<GradientWrapper>& g_output) {
   std::unique_ptr<GradientMakerBase> maker(
@@ -757,11 +731,6 @@ std::set<std::string> GetRegisteredOperators() {
     all_keys.emplace(name);
   }
 
-  // FunctionSchema registered operators
-  for (const auto& name : FunctionSchemaOperatorRegistry()->Keys()) {
-    all_keys.emplace(name);
-  }
-
   return all_keys;
 }
 
index d832dec..dceb918 100644 (file)
@@ -1239,52 +1239,6 @@ C10_DECLARE_REGISTRY(
   REGISTER_HIP_OPERATOR_WITH_ENGINE(name, MIOPEN, __VA_ARGS__) \
   REGISTER_HIP_OPERATOR_WITH_ENGINE(name, CUDNN, __VA_ARGS__) // Make CUDNN an alias of MIOPEN for HIP ops
 
-C10_DECLARE_REGISTRY(
-    FunctionSchemaOperatorRegistry,
-    OperatorBase,
-    const c10::FunctionSchema,
-    std::vector<c10::IValue>,
-    std::vector<c10::IValue*>);
-
-struct FunctionSchemaStorageBase {
-  FunctionSchemaStorageBase() {}
-  virtual c10::FunctionSchema getSchema() = 0;
-  virtual ~FunctionSchemaStorageBase() {}
-};
-
-C10_DECLARE_REGISTRY(FunctionSchemaRegistry, FunctionSchemaStorageBase);
-
-// Prefer to use the {DECLARE,DEFINE}_FUNCTION_SCHEMA_OPERATOR macros,
-// as they wrap it all in a Meyer's singleton accessible from Torch.
-
-#define REGISTER_FUNCTION_SCHEMA_OPERATOR(name, inputs, outputs, impl)        \
-  C10_REGISTER_CLASS(FunctionSchemaOperatorRegistry, name, impl)              \
-  struct FunctionSchemaStorageBase##name : public FunctionSchemaStorageBase { \
-    c10::FunctionSchema getSchema() override {                                \
-      return c10::FunctionSchema("_caffe2::" #name, inputs, outputs);         \
-    }                                                                         \
-  };                                                                          \
-  C10_REGISTER_CLASS(                                                         \
-      FunctionSchemaRegistry, name, FunctionSchemaStorageBase##name)
-
-#define DEFINE_FUNCTION_SCHEMA_OPERATOR(name, inputs, outputs, impl) \
-  void CAFFE2_MEYERS_OP_REGISTRATION_##name() {                      \
-    REGISTER_FUNCTION_SCHEMA_OPERATOR(name, inputs, outputs, impl);  \
-  }                                                                  \
-  static CAFFE2_STRUCT_OP_REGISTRATION_##name                        \
-      CAFFE2_STRUCT_OP_REGISTRATION_DEFN_##name;
-
-#define DECLARE_FUNCTION_SCHEMA_OPERATOR(name)             \
-  CAFFE2_API void CAFFE2_MEYERS_OP_REGISTRATION_##name();  \
-  struct CAFFE2_API CAFFE2_STRUCT_OP_REGISTRATION_##name { \
-    CAFFE2_STRUCT_OP_REGISTRATION_##name() {               \
-      CAFFE2_MEYERS_OP_REGISTRATION_##name();              \
-    }                                                      \
-  };
-
-#define GET_FUNCTION_SCHEMA(name) \
-  FunctionSchemaRegistry()->Create(name)->getSchema()
-
 // StaticLinkingProtector is a helper class that ensures that the Caffe2
 // library is linked correctly with whole archives (in the case of static
 // linking). What happens is that when CreateOperator is called for the first
@@ -1342,13 +1296,6 @@ CAFFE2_API unique_ptr<OperatorBase> CreateOperator(
     Workspace* ws,
     int net_position = OperatorBase::kNoNetPositionSet);
 
-// Using the new C10 interface and FunctionSchema registry,
-// instantiate and run the operator.
-CAFFE2_API void RunOperator(
-    c10::Symbol name,
-    const std::vector<c10::IValue>& inputs,
-    const std::vector<c10::IValue*>& outputs);
-
 CAFFE2_API const std::string OpRegistryKey(
     const std::string& op_type,
     const std::string& engine = "");
index c813e04..1ce881d 100644 (file)
@@ -595,90 +595,4 @@ TEST(IsTestArg, non_standard) {
       "JustTestWithNonStandardIsTestArg");
 }
 
-class TestOperatorWithFunctionSchema final : public Operator<CPUContext> {
- public:
-  TestOperatorWithFunctionSchema(const OperatorDef& def, Workspace* ws)
-      : Operator<CPUContext>(def, ws) {}
-
-  TestOperatorWithFunctionSchema(
-      const c10::FunctionSchema& f,
-      const std::vector<c10::IValue>& i,
-      const std::vector<c10::IValue*>& o)
-      : Operator<CPUContext>(f, i, o) {
-    if (HasArgument("test_arg")) {
-      test_arg_ =
-          static_cast<float>(this->GetSingleArgument<float>("test_arg", 0.01));
-    }
-  }
-
-  bool RunOnDevice() override {
-    auto out =
-        OutputTensor(0, {1, 1}, at::TensorOptions(TypeMeta::Make<float>()));
-    out->mutable_data<float>()[0] = test_arg_;
-    return true;
-  }
-
- private:
-  float test_arg_ = 0;
-};
-
-REGISTER_CPU_OPERATOR(
-    TestOperatorWithFunctionSchema,
-    TestOperatorWithFunctionSchema);
-OPERATOR_SCHEMA(TestOperatorWithFunctionSchema)
-    .NumInputs(0, 1)
-    .NumOutputs(0, 1)
-    .Arg("test_arg", "this arg is required", true);
-
-// The new way combines OPERATOR_SCHEMA and REGISTER_OPERATOR
-REGISTER_FUNCTION_SCHEMA_OPERATOR(
-    TestOperatorWithFunctionSchema,
-    {c10::Argument("test_arg")},
-    {c10::Argument("output")},
-    TestOperatorWithFunctionSchema)
-
-TEST(FunctionSchema, Creation) {
-  std::vector<c10::IValue> inputs;
-  float test_val = 1337.0f;
-  inputs.emplace_back(test_val);
-
-  caffe2::Tensor out = TensorCPUFromValues<float>({1, 1}, {123.0f});
-  std::vector<c10::IValue*> outputs;
-  auto t = at::Tensor(std::move(out.getIntrusivePtr()));
-  auto out_val = c10::IValue(t);
-  outputs.emplace_back(&out_val);
-
-  auto fn = FunctionSchemaRegistry()
-                ->Create("TestOperatorWithFunctionSchema")
-                ->getSchema();
-  auto op = FunctionSchemaOperatorRegistry()->Create(
-      "TestOperatorWithFunctionSchema", fn, inputs, outputs);
-
-  op->Run();
-  EXPECT_EQ(out.data<float>()[0], test_val);
-}
-
-TEST(FunctionSchema, OutputChange) {
-  std::vector<c10::IValue> inputs;
-  float test_val = 1337.0f;
-  inputs.emplace_back(test_val);
-
-  // Wrong type
-  caffe2::Tensor out = TensorCPUFromValues<int>({1, 1}, {123});
-  std::vector<c10::IValue*> outputs;
-  auto t = at::Tensor(std::move(out.getIntrusivePtr()));
-  auto out_val = c10::IValue(t);
-  outputs.emplace_back(&out_val);
-
-  auto fn = FunctionSchemaRegistry()
-                ->Create("TestOperatorWithFunctionSchema")
-                ->getSchema();
-  auto op = FunctionSchemaOperatorRegistry()->Create(
-      "TestOperatorWithFunctionSchema", fn, inputs, outputs);
-
-  op->Run();
-  out = caffe2::Tensor(out_val.toTensor());
-  EXPECT_EQ(out.data<float>()[0], test_val);
-}
-
 }  // namespace caffe2
index 52acd27..adba461 100644 (file)
@@ -59,8 +59,6 @@ libtorch_sources = [
     "torch/csrc/jit/ir.cpp",
     "torch/csrc/jit/irparser.cpp",
     "torch/csrc/jit/netdef_converter.cpp",
-    "torch/csrc/jit/caffe2_operator.cpp",
-    "torch/csrc/jit/register_caffe2_ops.cpp",
     "torch/csrc/jit/register_c10_ops.cpp",
     "torch/csrc/jit/symbolic_script.cpp",
     "torch/csrc/jit/operator.cpp",
index a06ac07..5bcd344 100644 (file)
@@ -136,7 +136,6 @@ set(TORCH_SRCS
   ${TORCH_SRC_DIR}/csrc/jit/irparser.cpp
   ${TORCH_SRC_DIR}/csrc/jit/netdef_converter.cpp
   ${TORCH_SRC_DIR}/csrc/jit/operator.cpp
-  ${TORCH_SRC_DIR}/csrc/jit/caffe2_operator.cpp
   ${TORCH_SRC_DIR}/csrc/jit/register_c10_ops.cpp
   ${TORCH_SRC_DIR}/csrc/jit/symbolic_script.cpp
   ${TORCH_SRC_DIR}/csrc/jit/passes/alias_analysis.cpp
@@ -195,11 +194,6 @@ set(TORCH_SRCS
   ${TORCH_ROOT}/test/cpp/jit/no-gtest.cpp
   )
 
-if (BUILD_CAFFE2_OPS)
-  list(APPEND TORCH_SRCS
-    ${TORCH_SRC_DIR}/csrc/jit/register_caffe2_ops.cpp)
-endif()
-
 if (WIN32)
   list(APPEND TORCH_SRCS
     ${TORCH_SRC_DIR}/csrc/jit/fuser/cpu/dynamic_library_win.cpp
diff --git a/torch/csrc/jit/caffe2_operator.cpp b/torch/csrc/jit/caffe2_operator.cpp
deleted file mode 100644 (file)
index b32d31c..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-#include <jit/caffe2_operator.h>
-#include <caffe2/core/operator.h>
-
-namespace torch {
-namespace jit {
-
-Operator createOperatorFromCaffe2(const std::string& name) {
-  auto symbolic_name = c10::Symbol::fromQualString("caffe2::" + name);
-  auto fn_wrap = caffe2::FunctionSchemaRegistry()->Create(symbolic_name.toUnqualString());
-  CAFFE_ENFORCE(
-      fn_wrap,
-      "Operator not registered with FunctionSchema constructor:",
-      name);
-  auto fn = fn_wrap->getSchema();
-
-  return Operator(fn, [symbolic_name, fn](Stack& stack) {
-      const auto input_size = fn.arguments().size();
-      const auto output_size = fn.returns().size();
-      std::vector<c10::IValue> inputs;
-      for (size_t i = 0; i < input_size; ++i) {
-        auto input = pop(stack);
-        // Tensors come in as variables but need to be unwrapped
-        if (input.isTensor()) {
-          input = torch::autograd::Variable(input.toTensor()).data();
-        }
-        inputs.emplace(inputs.begin(), std::move(input));
-      }
-
-      // We use a temporary stack for arguments passed into RunOperator
-      std::list<c10::IValue> outputs_real;
-      std::vector<c10::IValue*> outputs;
-      for (size_t i = 0; i < output_size; ++i) {
-        if (DimensionedTensorType::get() == fn.returns()[i].type()) {
-          outputs_real.emplace_back(c10::IValue(at::Tensor()));
-        } else {
-          outputs_real.emplace_back(c10::IValue());
-        }
-        outputs.emplace_back(&outputs_real.back());
-      }
-
-      caffe2::RunOperator(symbolic_name, inputs, outputs);
-
-      // We need to convert tensors back into variables
-      for (auto& t : outputs_real) {
-        if (t.isTensor()) {
-            push(stack, c10::IValue(torch::autograd::make_variable(t.toTensor())));
-        } else {
-            push(stack, std::move(t));
-        }
-      }
-
-      return 0;
-  });
-}
-
-}} // torch::jit
diff --git a/torch/csrc/jit/caffe2_operator.h b/torch/csrc/jit/caffe2_operator.h
deleted file mode 100644 (file)
index 36d3bbe..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#pragma once
-
-#include <torch/csrc/jit/operator.h>
-
-namespace torch {
-namespace jit {
-
-Operator createOperatorFromCaffe2(const std::string& name);
-
-}} // torch::jit
index 0b2fc27..e787dc8 100644 (file)
@@ -1,6 +1,5 @@
 #pragma once
 
-#include <torch/csrc/jit/caffe2_operator.h>
 #include <torch/csrc/jit/operator.h>
 #include <ATen/core/stack.h>
 #include <torch/csrc/jit/tracer.h>
@@ -280,14 +279,6 @@ struct TORCH_API RegisterOperators {
     op(name, std::forward<Implementation>(implementation));
   }
 
-  /// Requires declaration of the FunctionSchema with
-  /// REGISTER_FUNCTION_SCHEMA_OPERATOR(name, ...)
-  static RegisterOperators Caffe2Operator(const std::string& name) {
-    auto r = RegisterOperators();
-    registerOperator(createOperatorFromCaffe2(name));
-    return r;
-  }
-
   /// Creates a new operator from a name and implementation function (function
   /// pointer or function object/lambda) using `torch::jit::createOperator`, and
   /// then registers the operator.
diff --git a/torch/csrc/jit/register_caffe2_ops.cpp b/torch/csrc/jit/register_caffe2_ops.cpp
deleted file mode 100644 (file)
index ea6295c..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <jit/custom_operator.h>
-
-#define REGISTER_CAFFE2_OP(name) \
-  static caffe2::CAFFE2_STRUCT_OP_REGISTRATION_##name CAFFE2_STRUCT_OP_REGISTRATION_DEFN_TORCH_##name; \
-  static auto CAFFE2_OP_EXPORT_##name = torch::jit::RegisterOperators::Caffe2Operator(#name);