[PyTorch] Reduce code size of register_prim_ops.cpp (#61494)
authorScott Wolchok <swolchok@fb.com>
Fri, 27 Aug 2021 19:55:26 +0000 (12:55 -0700)
committerFacebook GitHub Bot <facebook-github-bot@users.noreply.github.com>
Fri, 27 Aug 2021 19:56:35 +0000 (12:56 -0700)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/61494

Creating a constexpr array and then looping over it is much cheaper than emitting a function call per item.
ghstack-source-id: 136639302

Test Plan:
fitsships

Buildsizebot some mobile apps to check size impact.

Reviewed By: dhruvbird, iseeyuan

Differential Revision: D29646977

fbshipit-source-id: 6144999f6acfc4e5dcd659845859702051344d88

torch/csrc/jit/runtime/custom_operator.h
torch/csrc/jit/runtime/operator.h
torch/csrc/jit/runtime/register_ops_utils.h
torch/csrc/jit/runtime/register_prim_ops.cpp

index 45ad667..e39789b 100644 (file)
@@ -19,7 +19,7 @@ struct TORCH_API RegisterOperators {
   /// Registers a vector of already created `Operator`s.
   /// The operator element is now optional to filter null ops. It's backward
   /// compatible and works for selective operator registration.
-  RegisterOperators(std::vector<c10::optional<Operator>> operators) {
+  explicit RegisterOperators(std::vector<c10::optional<Operator>> operators) {
     for (c10::optional<Operator>& o : operators) {
       if (o) {
         registerOperator(std::move(o.value()));
index e243e8f..ccdbfa0 100644 (file)
@@ -220,7 +220,7 @@ TORCH_API bool aliasAnalysisHasSpecialCaseFor(c10::Symbol sym);
 // string.
 template <typename Func>
 c10::optional<Operator> OperatorGenerator(
-    torch::detail::SelectiveStr<true> schema_str,
+    const char* schema_str,
     Func&& op,
     AliasAnalysisKind alias_analysis) {
   return c10::optional<Operator>(Operator(
@@ -229,6 +229,17 @@ c10::optional<Operator> OperatorGenerator(
 
 template <typename Func>
 c10::optional<Operator> OperatorGenerator(
+    torch::detail::SelectiveStr<true> schema_str,
+    Func&& op,
+    AliasAnalysisKind alias_analysis) {
+  return OperatorGenerator(
+      static_cast<const char*>(schema_str),
+      std::forward<Func>(op),
+      alias_analysis);
+}
+
+template <typename Func>
+c10::optional<Operator> OperatorGenerator(
     torch::detail::SelectiveStr<false> schema_str,
     Func&& op,
     AliasAnalysisKind alias_analysis) {
index e068b78..5d00872 100644 (file)
 
 namespace torch {
 namespace jit {
-inline c10::AliasAnalysisKind aliasAnalysisFromSchema() {
+constexpr inline c10::AliasAnalysisKind aliasAnalysisFromSchema() {
   return c10::AliasAnalysisKind::FROM_SCHEMA;
 }
 
-inline c10::AliasAnalysisKind aliasAnalysisConservative() {
+constexpr inline c10::AliasAnalysisKind aliasAnalysisConservative() {
   return c10::AliasAnalysisKind::CONSERVATIVE;
 }
 
-inline c10::AliasAnalysisKind aliasAnalysisSpecialCase() {
+constexpr inline c10::AliasAnalysisKind aliasAnalysisSpecialCase() {
   return c10::AliasAnalysisKind::INTERNAL_SPECIAL_CASE;
 }
 
@@ -430,9 +430,46 @@ void listCopyAndSort<at::Tensor>(Stack* stack);
 
 void listSetItem(Stack* stack);
 
+struct OperatorGeneratorArgs {
+  const char* schema_str;
+  bool isOperationCreator;
+  union {
+    void (*operation)(Stack*);
+    OperationCreator operationCreator;
+  };
+  AliasAnalysisKind aliasAnalysis;
+
+  explicit constexpr OperatorGeneratorArgs(
+      torch::detail::SelectiveStr<true> schema_str,
+      void (*op)(Stack*),
+      AliasAnalysisKind aa)
+      : schema_str(schema_str),
+        isOperationCreator(false),
+        operation(op),
+        aliasAnalysis(aa) {}
+
+  explicit constexpr OperatorGeneratorArgs(
+      torch::detail::SelectiveStr<true> schema_str,
+      OperationCreator opCreator,
+      AliasAnalysisKind aa)
+      : schema_str(schema_str),
+        isOperationCreator(true),
+        operationCreator(opCreator),
+        aliasAnalysis(aa) {}
+
+  template <typename... Args>
+  explicit constexpr OperatorGeneratorArgs(
+      torch::detail::SelectiveStr<false>,
+      Args...)
+      : schema_str(nullptr),
+        isOperationCreator(false),
+        operation(nullptr),
+        aliasAnalysis(AliasAnalysisKind::INTERNAL_SPECIAL_CASE) {}
+};
+
 #define DEFINE_GENERIC_BINARY_OP(                                             \
     aten_op, op, int_float_result, complex_result)                            \
-  OperatorGenerator(                                                          \
+  OperatorGeneratorArgs(                                                      \
       TORCH_SELECTIVE_SCHEMA(#aten_op                                         \
                              ".int_int(int a, int b) -> " #int_float_result), \
       [](Stack* stack) {                                                      \
@@ -441,7 +478,7 @@ void listSetItem(Stack* stack);
         push(stack, op);                                                      \
       },                                                                      \
       aliasAnalysisFromSchema()),                                             \
-      OperatorGenerator(                                                      \
+      OperatorGeneratorArgs(                                                  \
           TORCH_SELECTIVE_SCHEMA(                                             \
               #aten_op                                                        \
               ".float_float(float a, float b) -> " #int_float_result),        \
@@ -451,7 +488,7 @@ void listSetItem(Stack* stack);
             push(stack, op);                                                  \
           },                                                                  \
           aliasAnalysisFromSchema()),                                         \
-      OperatorGenerator(                                                      \
+      OperatorGeneratorArgs(                                                  \
           TORCH_SELECTIVE_SCHEMA(                                             \
               #aten_op                                                        \
               ".complex_complex(complex a, complex b) -> " #complex_result),  \
@@ -464,7 +501,7 @@ void listSetItem(Stack* stack);
 
 // define implementations for primitive number ops
 #define DEFINE_GENERIC_OP(aten_op, int_op, float_op, int_result, float_result) \
-  OperatorGenerator(                                                           \
+  OperatorGeneratorArgs(                                                       \
       TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result),   \
       [](Stack* stack) {                                                       \
         int64_t a, b;                                                          \
@@ -472,7 +509,7 @@ void listSetItem(Stack* stack);
         push(stack, int_op);                                                   \
       },                                                                       \
       aliasAnalysisFromSchema()),                                              \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA(                                              \
               #aten_op ".float(float a, float b) -> " #float_result),          \
           [](Stack* stack) {                                                   \
@@ -483,7 +520,7 @@ void listSetItem(Stack* stack);
           aliasAnalysisFromSchema())
 
 #define DEFINE_INT_FLOAT_OP(aten_op, op, result)                            \
-  OperatorGenerator(                                                        \
+  OperatorGeneratorArgs(                                                    \
       TORCH_SELECTIVE_SCHEMA(#aten_op                                       \
                              ".int_float(int a, float b) -> " #result),     \
       [](Stack* stack) {                                                    \
@@ -493,7 +530,7 @@ void listSetItem(Stack* stack);
         push(stack, op);                                                    \
       },                                                                    \
       aliasAnalysisFromSchema()),                                           \
-      OperatorGenerator(                                                    \
+      OperatorGeneratorArgs(                                                \
           TORCH_SELECTIVE_SCHEMA(#aten_op                                   \
                                  ".float_int(float a, int b) -> " #result), \
           [](Stack* stack) {                                                \
@@ -505,7 +542,7 @@ void listSetItem(Stack* stack);
           aliasAnalysisFromSchema())
 
 #define DEFINE_INT_OP(aten_op, op)                                  \
-  OperatorGenerator(                                                \
+  OperatorGeneratorArgs(                                            \
       TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> int"), \
       [](Stack* stack) {                                            \
         int64_t a, b;                                               \
@@ -515,7 +552,7 @@ void listSetItem(Stack* stack);
       aliasAnalysisFromSchema())
 
 #define DEFINE_STR_CMP_OP(aten_op, op)                               \
-  OperatorGenerator(                                                 \
+  OperatorGeneratorArgs(                                             \
       TORCH_SELECTIVE_SCHEMA(#aten_op ".str(str a, str b) -> bool"), \
       [](Stack* stack) {                                             \
         auto b = pop(stack).toStringRef();                           \
@@ -530,7 +567,7 @@ void listSetItem(Stack* stack);
 // in unintended implicit conversions
 #define DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC(          \
     aten_op, int_op, float_op, result, string_val)                \
-  OperatorGenerator(                                              \
+  OperatorGeneratorArgs(                                          \
       TORCH_SELECTIVE_SCHEMA(#aten_op string_val                  \
                              "(Scalar a, Scalar b) -> " #result), \
       [](Stack* stack) {                                          \
@@ -586,7 +623,7 @@ void listSetItem(Stack* stack);
       DEFINE_STR_CMP_OP(aten_op, op)
 
 #define DEFINE_UNARY_INT_OP(aten_op, op, result)                  \
-  OperatorGenerator(                                              \
+  OperatorGeneratorArgs(                                          \
       TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a) -> " #result), \
       [](Stack* stack) {                                          \
         int64_t a;                                                \
@@ -596,7 +633,7 @@ void listSetItem(Stack* stack);
       aliasAnalysisFromSchema())
 
 #define DEFINE_UNARY_FLOAT_OP(aten_op, op, result)                    \
-  OperatorGenerator(                                                  \
+  OperatorGeneratorArgs(                                              \
       TORCH_SELECTIVE_SCHEMA(#aten_op ".float(float a) -> " #result), \
       [](Stack* stack) {                                              \
         double a;                                                     \
@@ -608,7 +645,7 @@ void listSetItem(Stack* stack);
 #define DEFINE_UNARY_OP(aten_op, op, int_result, float_result)            \
   DEFINE_UNARY_INT_OP(aten_op, op, int_result),                           \
       DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result),                   \
-      OperatorGenerator(                                                  \
+      OperatorGeneratorArgs(                                              \
           TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \
           [](Stack* stack) {                                              \
             IValue x;                                                     \
@@ -623,7 +660,7 @@ void listSetItem(Stack* stack);
           },                                                              \
           aliasAnalysisFromSchema())
 #define DEFINE_BOOL_OP(aten_op, op)                                     \
-  OperatorGenerator(                                                    \
+  OperatorGeneratorArgs(                                                \
       TORCH_SELECTIVE_SCHEMA(#aten_op ".bool(bool a, bool b) -> bool"), \
       [](Stack* stack) {                                                \
         bool a, b;                                                      \
@@ -632,7 +669,7 @@ void listSetItem(Stack* stack);
       },                                                                \
       aliasAnalysisFromSchema())
 #define DEFINE_STRING_OP(op_name, string_op, result)                    \
-  OperatorGenerator(                                                    \
+  OperatorGeneratorArgs(                                                \
       TORCH_SELECTIVE_SCHEMA(#op_name ".str(str a, str b) ->" #result), \
       [](Stack* stack) {                                                \
         auto b = pop(stack).toStringRef();                              \
@@ -646,7 +683,7 @@ void listSetItem(Stack* stack);
 //-----------------------------------------------------------------------------
 //-----------------------------------------------------------------------------
 #define DEFINE_UNARY_COMPLEX_OP(aten_op, op, result)                      \
-  OperatorGenerator(                                                      \
+  OperatorGeneratorArgs(                                                  \
       TORCH_SELECTIVE_SCHEMA(#aten_op ".complex(complex a) -> " #result), \
       [](Stack* stack) {                                                  \
         c10::complex<double> a;                                           \
@@ -670,7 +707,7 @@ void listSetItem(Stack* stack);
   DEFINE_UNARY_INT_OP(aten_op, op, int_result),                           \
       DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result),                   \
       DEFINE_UNARY_COMPLEX_OP(aten_op, op, complex_result),               \
-      OperatorGenerator(                                                  \
+      OperatorGeneratorArgs(                                              \
           TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \
           [](Stack* stack) {                                              \
             IValue x;                                                     \
@@ -700,7 +737,7 @@ void listSetItem(Stack* stack);
     int_result,                                                               \
     float_result,                                                             \
     complex_result)                                                           \
-  OperatorGenerator(                                                          \
+  OperatorGeneratorArgs(                                                      \
       TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result),  \
       [](Stack* stack) {                                                      \
         int64_t a, b;                                                         \
@@ -708,7 +745,7 @@ void listSetItem(Stack* stack);
         push(stack, int_op);                                                  \
       },                                                                      \
       aliasAnalysisFromSchema()),                                             \
-      OperatorGenerator(                                                      \
+      OperatorGeneratorArgs(                                                  \
           TORCH_SELECTIVE_SCHEMA(                                             \
               #aten_op ".complex(complex a, complex b) -> " #complex_result), \
           [](Stack* stack) {                                                  \
@@ -717,7 +754,7 @@ void listSetItem(Stack* stack);
             push(stack, complex_op);                                          \
           },                                                                  \
           aliasAnalysisFromSchema()),                                         \
-      OperatorGenerator(                                                      \
+      OperatorGeneratorArgs(                                                  \
           TORCH_SELECTIVE_SCHEMA(                                             \
               #aten_op ".float(float a, float b) -> " #float_result),         \
           [](Stack* stack) {                                                  \
@@ -728,7 +765,7 @@ void listSetItem(Stack* stack);
           aliasAnalysisFromSchema())
 
 #define DEFINE_INT_COMPLEX_OP(aten_op, op, result)                          \
-  OperatorGenerator(                                                        \
+  OperatorGeneratorArgs(                                                    \
       TORCH_SELECTIVE_SCHEMA(#aten_op                                       \
                              ".int_complex(int a, complex b) -> " #result), \
       [](Stack* stack) {                                                    \
@@ -738,7 +775,7 @@ void listSetItem(Stack* stack);
         push(stack, op);                                                    \
       },                                                                    \
       aliasAnalysisFromSchema()),                                           \
-      OperatorGenerator(                                                    \
+      OperatorGeneratorArgs(                                                \
           TORCH_SELECTIVE_SCHEMA(                                           \
               #aten_op ".complex_int(complex a, int b) -> " #result),       \
           [](Stack* stack) {                                                \
@@ -750,7 +787,7 @@ void listSetItem(Stack* stack);
           aliasAnalysisFromSchema())
 
 #define DEFINE_FLOAT_COMPLEX_OP(aten_op, op, result)                      \
-  OperatorGenerator(                                                      \
+  OperatorGeneratorArgs(                                                  \
       TORCH_SELECTIVE_SCHEMA(                                             \
           #aten_op ".float_complex(float a, complex b) -> " #result),     \
       [](Stack* stack) {                                                  \
@@ -760,7 +797,7 @@ void listSetItem(Stack* stack);
         push(stack, op);                                                  \
       },                                                                  \
       aliasAnalysisFromSchema()),                                         \
-      OperatorGenerator(                                                  \
+      OperatorGeneratorArgs(                                              \
           TORCH_SELECTIVE_SCHEMA(                                         \
               #aten_op ".complex_float(complex a, float b) -> " #result), \
           [](Stack* stack) {                                              \
@@ -773,7 +810,7 @@ void listSetItem(Stack* stack);
 
 #define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_AVOID_COLLISION_GENERIC( \
     aten_op, int_op, float_op, complex_op, result, string_val)        \
-  OperatorGenerator(                                                  \
+  OperatorGeneratorArgs(                                              \
       TORCH_SELECTIVE_SCHEMA(#aten_op string_val                      \
                              "(Scalar a, Scalar b) -> " #result),     \
       [](Stack* stack) {                                              \
@@ -821,7 +858,7 @@ void listSetItem(Stack* stack);
 
 #define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_WITHOUT_INT_COMPLEX_PAIR(     \
     aten_op, int_op, float_op, complex_op, result)                         \
-  OperatorGenerator(                                                       \
+  OperatorGeneratorArgs(                                                   \
       TORCH_SELECTIVE_SCHEMA(#aten_op "(Scalar a, Scalar b) -> " #result), \
       [](Stack* stack) {                                                   \
         IValue x, y;                                                       \
index 60458a0..2953b68 100644 (file)
@@ -86,943 +86,858 @@ auto powWrapper(T a, U b) {
   return pow(a, b);
 }
 
-RegisterOperators reg(
-    {OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::str(t elem) -> str"),
-         [](Stack* stack) {
-           std::stringstream ss;
-           ss << pop(stack);
-           push(stack, ss.str());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::list(str t) -> str[]"),
-         [](Stack* stack) {
-           auto str = pop(stack).toStringRef();
-           c10::List<std::string> chars;
-           chars.reserve(str.size());
-           for (auto c : str) {
-             chars.push_back(std::string(1, c));
-           }
-           push(stack, std::move(chars));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::cpu(Tensor(a) self) -> Tensor(a|b)"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.cpu());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::layout(Tensor a) -> int"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.layout());
-         },
-         aliasAnalysisFromSchema()),
-     Operator(
-         prim::tolist,
-         // This operator has to be unschematized because the return type
-         // depends on the type hint and input. The implementation of this
-         // operator below is intended to be as close to the Python
-         // implementation in torch/csrc/utils/tensor_list.cpp as possible.
-         [](const Node* /*node*/) -> Operation {
-           return [](Stack* stack) {
-             // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-             int elem_ty_val;
-             // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-             int dim_val;
-             at::Tensor t;
-
-             pop(stack, elem_ty_val);
-             pop(stack, dim_val);
-             pop(stack, t);
-
-             // If the Tensor is not on the CPU, transfer it.
-             if (!t.device().is_cpu()) {
-               t = t.cpu();
-             }
-
-             // Rebuild the output type using elem_ty_val and dim_val. Start
-             // with the element type corresponding to elem_ty_val.
-             TypePtr out_ty;
-             if (elem_ty_val == 0) {
-               out_ty = IntType::get();
-             } else if (elem_ty_val == 1) {
-               out_ty = FloatType::get();
-             } else if (elem_ty_val == 2) {
-               out_ty = BoolType::get();
-             } else if (elem_ty_val == 3) {
-               out_ty = ComplexType::get();
-             } else {
-               TORCH_CHECK(
-                   false,
-                   "Unsupported element type for tolist; only int, float, complex and bool are supported");
-             }
-
-             // Check that type of the Tensor matches that of the annotation.
-             // Make an exception for the case in which the annotated type is
-             // float/complex and the Tensor data type is also float/complex;
-             // the elements will be casted to double/c10::complex<double>
-             // later.
-             TORCH_CHECK(
-                 (out_ty == FloatType::get() && t.is_floating_point()) ||
-                     (out_ty == ComplexType::get() && t.is_complex()) ||
-                     tryScalarTypeFromJitType(out_ty) == t.scalar_type(),
-                 "Output annotation element type and runtime tensor element type must match for tolist()");
-
-             // Check that the dimension of the Tensor matches that of the
-             // annotation.
-             TORCH_CHECK(
-                 dim_val == t.dim(),
-                 "Output annotation list dimension and runtime tensor dimension must match for tolist()");
-
-             // Wrap out_ty in a ListType dim times.
-             for (const auto i : c10::irange(dim_val)) {
-               (void)i; // Suppress unused variable warning
-               out_ty = ListType::create(out_ty);
-             }
-
-             int64_t dim = t.dim();
-             auto sizes = t.sizes();
-             auto strides = t.strides();
-             size_t element_size = t.element_size();
-             char* data = static_cast<char*>(t.data_ptr());
-             auto result = tensorToListRecursive(
-                 data,
-                 0,
-                 dim,
-                 out_ty,
-                 t.scalar_type(),
-                 sizes,
-                 strides,
-                 element_size);
-             push(stack, std::move(result));
-           };
-         },
-         aliasAnalysisSpecialCase()),
-     // only used internally in range() translation
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::__range_length(int lo, int hi, int step) -> int"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           int64_t lo, hi, step;
-           pop(stack, lo, hi, step);
-           // error handling when step_val = 0 during runtime
-           if (step == 0) {
-             throw std::runtime_error("range() arg 3 must not be zero");
-           }
-           if (step > 0 && lo < hi) {
-             push(stack, 1 + (hi - 1 - lo) / step);
-           } else if (step < 0 && lo > hi) {
-             push(stack, 1 + (lo - 1 - hi) / (0 - step));
-           } else {
-             push(stack, 0);
-           }
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::__derive_index(int index, int start, int step) -> int"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           int64_t index, start, step;
-           pop(stack, index, start, step);
-           push(stack, start + index * step);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::TupleUnpack(Any tup) -> ..."),
-         [](Stack* stack) { tupleUnpack(*stack); },
-         aliasAnalysisSpecialCase()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::unchecked_cast(t x) -> t"),
-         noop,
-         aliasAnalysisSpecialCase()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::IntImplicit(Tensor a) -> int"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           checkImplicitTensorToNum(a, /*to int*/ true);
-           push(stack, a.item<int64_t>());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::ComplexImplicit(Tensor a) -> complex"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           checkImplicitTensorToNum(a, /*to int*/ false);
-           push(stack, a.item<c10::complex<double>>());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::FloatImplicit(Tensor a) -> float"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           checkImplicitTensorToNum(a, /*to int*/ false);
-           push(stack, a.item<double>());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::ScalarImplicit(Tensor a) -> Scalar"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           checkImplicitTensorToNum(a, /*to int*/ false);
-           push(stack, a.item());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Bool.Tensor(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_nonzero());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Bool.int(int a) -> bool"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           int64_t i;
-           pop(stack, i);
-           push(stack, (bool)i);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Bool.float(float a) -> bool"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           double d;
-           pop(stack, d);
-           push(stack, (bool)d);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Int.Tensor(Tensor a) -> int"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.item<int64_t>());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Int.bool(bool a) -> int"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           bool b;
-           pop(stack, b);
-           push(stack, static_cast<int64_t>(b));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Int.float(float a) -> int"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           double d;
-           pop(stack, d);
-           push(stack, static_cast<int64_t>(d));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Int.Scalar(Scalar a) -> int"),
-         [](Stack* stack) {
-           IValue scalar;
-           pop(stack, scalar);
-           if (scalar.isInt()) {
-             push(stack, std::move(scalar));
-           } else {
-             // toScalar() needed to avoid strict type check in IValue::toInt.
-             push(stack, static_cast<int64_t>(scalar.toScalar().toInt()));
-           }
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Int.str(str a) -> int"),
-         [](Stack* stack) {
-           auto s = pop(stack).toString();
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           std::string::size_type sz;
-           int64_t val = static_cast<int64_t>(c10::stoll(s->string(), &sz));
-           if (sz == s->string().size()) {
-             push(stack, val);
-           } else {
-             std::stringstream error_str;
-             error_str << "invalid literal for int() "
-                       << "with base 10: '" << s->string() << "'";
-             throw std::runtime_error(error_str.str());
-           }
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Float.Tensor(Tensor a) -> float"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.item<double>());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Float.Scalar(Scalar a) -> float"),
-         [](Stack* stack) {
-           IValue scalar;
-           pop(stack, scalar);
-           if (scalar.isDouble()) {
-             push(stack, std::move(scalar));
-           } else if (scalar.isComplexDouble()) {
-             push(stack, scalar.toComplexDouble().real());
-           } else {
-             push(stack, static_cast<double>(scalar.toInt()));
-           }
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Float.int(int a) -> float"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           int64_t i;
-           pop(stack, i);
-           push(stack, (float)i);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Float.bool(bool a) -> float"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           bool b;
-           pop(stack, b);
-           push(stack, (float)b);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Float.str(str a) -> float"),
-         [](Stack* stack) {
-           auto s = pop(stack).toString();
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           std::string::size_type sz;
-           double b = c10::stod(s->string(), &sz);
-           if (sz == s->string().size()) {
-             push(stack, b);
-           } else {
-             std::stringstream error_str;
-             error_str << "could not convert string "
-                       << "to float: '" << s->string() << "'";
-             throw std::runtime_error(error_str.str());
-           }
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Complex.Scalar(Scalar a) -> complex"),
-         [](Stack* stack) {
-           IValue scalar;
-           pop(stack, scalar);
-           if (scalar.isComplexDouble()) {
-             push(stack, std::move(scalar));
-           } else if (scalar.isDouble()) {
-             push(stack, c10::complex<double>(scalar.toDouble(), 0));
-           } else {
-             push(stack, c10::complex<double>(scalar.toInt(), 0));
-           }
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex"),
-         [](Stack* stack) {
-           at::Tensor a, b;
-           pop(stack, a, b);
-           push(
-               stack, c10::complex<double>(a.item<double>(), b.item<double>()));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::format(str self, ...) -> str"),
-         [](Stack* stack) {
-           size_t num_inputs = pop(stack).toInt();
-           format(*stack, num_inputs);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::einsum.sublist(Tensor a, ...) -> Tensor"),
-         [](Stack* stack) {
-           size_t num_inputs = pop(stack).toInt();
-           einsum(*stack, num_inputs);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.Scalar(Scalar a) -> Tensor"),
-         [](Stack* stack) {
-           at::Scalar s;
-           pop(stack, s);
-           push(stack, at::scalar_to_tensor(s));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::RaiseException(str msg) -> ()"),
-         [](Stack* stack) { throw JITException(pop(stack).toStringRef()); },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Size(int[] sizes) -> int[]"),
-         [](Stack* stack) {},
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::size(Tensor self) -> int[]"),
-         [](Stack* stack) {
-           auto t = std::move(pop(stack)).toTensor();
-           pack(stack, t.sizes().vec());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::EnumName(AnyEnumType enum) -> str"),
-         [](Stack* stack) {
-           IValue e = pop(stack);
-           push(stack, e.toEnumHolder()->name());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::EnumValue.int(AnyEnumType enum) -> int"),
-         [](Stack* stack) {
-           IValue e = pop(stack);
-           push(stack, e.toEnumHolder()->value());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "prim::EnumValue.float(AnyEnumType enum) -> float"),
-         [](Stack* stack) {
-           IValue e = pop(stack);
-           push(stack, e.toEnumHolder()->value());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::EnumValue.str(AnyEnumType enum) -> str"),
-         [](Stack* stack) {
-           IValue e = pop(stack);
-           push(stack, e.toEnumHolder()->value());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         // note the compiler knows to type TupleIndex more accurately than it
-         // is listed here.
-         TORCH_SELECTIVE_SCHEMA("prim::TupleIndex(Any tup, int i) -> Any"),
-         [](Stack* stack) {
-           int64_t index = pop(stack).toInt();
-           auto tuple = pop(stack).toTuple();
-           auto norm_index = normalizeIndex(index, tuple->elements().size());
-           if (norm_index < 0 ||
-               norm_index > static_cast<int64_t>(tuple->elements().size())) {
-             throw std::out_of_range("Tuple list index out of range");
-           }
-           stack->emplace_back(tuple->elements()[norm_index]);
-         },
-         aliasAnalysisSpecialCase()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::ne.int_list(int[] a, int[] b) -> bool"),
-         listNe<int64_t>,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "prim::unchecked_unwrap_optional(t(a)? optional) -> t(a)"),
-         noop,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::device(Tensor a) -> Device"),
-         [](Stack* stack) { push(stack, pop(stack).toTensor().device()); },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::dtype(Tensor a) -> int"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, static_cast<int64_t>(a.scalar_type()));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::__not__(bool self) -> bool"),
-         [](Stack* stack) { push(stack, !pop(stack).toBool()); },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::__is__(t1 self, t2 obj) -> bool"),
-         [](Stack* stack) {
-           IValue self, obj;
-           pop(stack, self, obj);
-           push(stack, self.is(obj));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::__isnot__(t1 self, t2 obj) -> bool"),
-         [](Stack* stack) {
-           IValue self, obj;
-           pop(stack, self, obj);
-           push(stack, !self.is(obj));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::element_size(Tensor self) -> int"),
-         [](Stack* stack) {
-           at::Tensor arg = pop(stack).toTensor();
-           push(stack, arg.element_size());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::numel(Tensor self) -> int"),
-         [](Stack* stack) {
-           at::Tensor arg = pop(stack).toTensor();
-           push(stack, arg.numel());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::dim(Tensor self) -> int"),
-         [](Stack* stack) {
-           at::Tensor arg = pop(stack).toTensor();
-           push(stack, arg.dim());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::get_device(Tensor self) -> int"),
-         [](Stack* stack) {
-           RECORD_FUNCTION("get_device", std::vector<c10::IValue>());
-           auto result =
-               at::get_device((std::move(peek(stack, 0, 1))).toTensor());
-           drop(stack, 1);
-           pack(stack, result);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::storage_offset(Tensor self) -> int"),
-         [](Stack* stack) {
-           RECORD_FUNCTION("storage_offset", std::vector<c10::IValue>());
-           auto result =
-               ((std::move(peek(stack, 0, 1))).toTensor()).storage_offset();
-           drop(stack, 1);
-           pack(stack, result);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::is_contiguous(Tensor self) -> bool"),
-         [](Stack* stack) {
-           RECORD_FUNCTION("is_contiguous", std::vector<c10::IValue>());
-           auto result =
-               ((std::move(peek(stack, 0, 1))).toTensor()).is_contiguous();
-           drop(stack, 1);
-           pack(stack, result);
-         },
-         aliasAnalysisFromSchema()),
-     // these ops are generic over the list element type.
-     // CREATING GENERIC_LIST_OPS
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::select.t(t[](a) list, int idx) -> t(*)"),
-         listSelect,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::__getitem__.t(t[](a) list, int idx) -> t(*)"),
-         listSelect,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)"),
-         listAppend,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::reverse.t(t[](a!) self) -> ()"),
-         listReverse,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::extend.t(t[](a!) self, t[] other) -> ()"),
-         listExtend,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::copy.t(t[](a) self) -> t[]"),
-         listCopy,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::_set_item.t(t [](a!) l, int idx, t(b -> *) el) -> t[](a!)"),
-         listSetItem,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::clear.t(t[](a!) self) -> ()"),
-         listClear,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::Delete.t(t[](a!) self, int idx) -> ()"),
-         listDelete,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::insert.t(t[](a!) self, int idx, t(b -> *) el) -> ()"),
-         listInsert,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::pop.t(t[](a!) self, int idx=-1) -> t(*)"),
-         listPop,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::add.t(t[] a, t[] b) -> t[]"),
-         listAdd,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::add_.t(t[](a!) self, t[] b) -> t[]"),
-         listInplaceAdd,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]"),
-         listSlice,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::list.t(t[] l) -> t[]"),
-         listList,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::mul.left_t(t[] l, int n) -> t[]"),
-         listMulIntLeft,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::mul.right_(int n, t[] l) -> t[]"),
-         listMulIntRight,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::mul_.t(t[](a!) l, int n) -> t[](a!)"),
-         listMulIntLeftInPlace,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::len.t(t[] a) -> int"),
-         listLen,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::eq.int_list(int[] a, int[] b) -> bool"),
-         listEq<int64_t>,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::eq.device(Device a, Device b) -> bool"),
-         [](Stack* stack) {
-           auto a = pop(stack).toDevice();
-           auto b = pop(stack).toDevice();
-           push(stack, a == b);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::ne.device(Device a, Device b) -> bool"),
-         [](Stack* stack) {
-           auto a = pop(stack).toDevice();
-           auto b = pop(stack).toDevice();
-           push(stack, a != b);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::eq.bool(bool a, bool b) -> bool"),
-         [](Stack* stack) {
-           auto a = pop(stack);
-           auto b = pop(stack);
-           push(stack, a == b);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::ne.bool(bool a, bool b) -> bool"),
-         [](Stack* stack) {
-           auto a = pop(stack);
-           auto b = pop(stack);
-           push(stack, a != b);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::Uninitialized() -> Any"),
-         [](Stack* stack) { push(stack, IValue::uninitialized()); },
-         aliasAnalysisSpecialCase()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::Print(...) -> ()"),
-         [](Stack* stack) {
-           auto num_inputs = pop(stack).toInt();
-           std::stringstream ss;
-           bool first = true;
-           for (const IValue& i : last(stack, num_inputs)) {
-             if (!first)
-               ss << " ";
-             first = false;
-             ss << i;
-           }
-           drop(stack, num_inputs);
-           ss << std::endl;
-           auto* handler = getPrintHandler();
-           TORCH_INTERNAL_ASSERT(handler);
-           handler(ss.str());
-         },
-         aliasAnalysisSpecialCase()),
-     // This is an alternative to aten::cat op that takes variable number of
-     // parameters as input.
-     // Format:
-     //    prim::VarConcat(Tensors..., dim) -> Tensor
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::VarConcat(...) -> Tensor"),
-         [](Stack* stack) {
-           auto num_inputs = pop(stack).toInt();
-           auto dim = pop(stack).toInt();
-           std::vector<at::Tensor> inputs(num_inputs - 1);
-           for (int i = 0; i < num_inputs - 1; ++i) {
-             inputs[num_inputs - 2 - i] = pop(stack).toTensor();
-           }
-           push(stack, at::cat(inputs, dim));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::VarStack(...) -> Tensor"),
-         [](Stack* stack) {
-           auto num_inputs = pop(stack).toInt();
-           auto dim = pop(stack).toInt();
-           std::vector<at::Tensor> inputs(num_inputs - 1);
-           for (int i = 0; i < num_inputs - 1; ++i) {
-             inputs[num_inputs - 2 - i] = pop(stack).toTensor();
-           }
-           push(stack, at::stack(inputs, dim));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool"),
-         [](Stack* stack) {
-           IValue x = pop(stack);
-           IValue y = pop(stack);
-           push(stack, x == y);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool"),
-         [](Stack* stack) {
-           IValue x = pop(stack);
-           IValue y = pop(stack);
-           push(stack, x != y);
-         },
-         aliasAnalysisFromSchema()),
-     // We define aten::dequantize in both native_functions.yaml and here,
-     // however, aten::dequantize.any defined here overrides
-     // aten::dequantize.tensors in native_functions.yaml. The variants here
-     // are only for graph mode quantization, and they should be removed once
-     // we deprecate graph mode quantization, and use the variants in
-     // native_functions.yaml.
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::dequantize.tensor(Tensor qtensor) -> Tensor"),
-         [](Stack* stack) {
-           at::Tensor qtensor;
-           pop(stack, qtensor);
-           push(stack, at::dequantize(qtensor));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::dequantize.list(Tensor[] qtensors) -> Tensor[]"),
-         [](Stack* stack) {
-           auto qtensors = pop(stack).toTensorVector();
-           push(stack, at::dequantize(qtensors));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::dequantize.any(Any tensors) -> Any"),
-         [](Stack* stack) { dequantize(*stack); },
-         aliasAnalysisFromSchema()),
-     DEFINE_UNARY_OP_WITH_COMPLEX(aten::log, std::log(a), float, float),
-     DEFINE_STRING_OP(aten::add, a + b, str),
-     DEFINE_COMPARISON_OP_WITH_COMPLEX(aten::eq, a == b),
-     DEFINE_COMPARISON_OP_WITH_COMPLEX(aten::ne, a != b),
-     DEFINE_GENERIC_OP(
-         aten::polar,
-         c10::polar(static_cast<double>(a), static_cast<double>(b)),
-         c10::polar(static_cast<double>(a), static_cast<double>(b)),
-         complex,
-         complex),
-     DEFINE_INT_FLOAT_OP(
-         aten::polar,
-         c10::polar(static_cast<double>(a), static_cast<double>(b)),
-         complex),
-     DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION(
-         aten::polar,
-         c10::polar(static_cast<double>(a), static_cast<double>(b)),
-         c10::polar(static_cast<double>(a), static_cast<double>(b)),
-         Scalar),
-     DEFINE_COMPARISON_OP(aten::lt, a < b),
-     DEFINE_COMPARISON_OP(aten::gt, a > b),
-     DEFINE_COMPARISON_OP(aten::le, a <= b),
-     DEFINE_COMPARISON_OP(aten::ge, a >= b),
-     DEFINE_BINARY_OP_WITH_COMPLEX(aten::add, a + b),
-     DEFINE_BINARY_OP_WITH_COMPLEX(aten::sub, a - b),
-     DEFINE_BINARY_OP_WITH_COMPLEX(aten::mul, a* b),
-     DEFINE_BOOL_OP(aten::__and__, a&& b),
-     DEFINE_BOOL_OP(aten::__or__, a || b),
-     DEFINE_BOOL_OP(aten::__xor__, a != b),
-     DEFINE_UNARY_OP(aten::round, round_to_even(a), float, float),
-     DEFINE_UNARY_OP(aten::floor, floor(a), int, int),
-     DEFINE_UNARY_OP(aten::ceil, ceil(a), int, int),
-     DEFINE_UNARY_OP_WITH_COMPLEX(aten::neg, -a, int, float),
-     DEFINE_UNARY_OP_WITH_COMPLEX(aten::exp, std::exp(a), float, float),
-     // Pass in two ops for handling int and float separately as % in C++ only
-     // works for int The modulus calculation is different between C++ and
-     // Python (on negative), we preserve the python behavior as it's more
-     // common and match python syntax, hence the conversion.
-     DEFINE_GENERIC_OP(
-         aten::remainder,
-         (b + (a % b)) % b,
-         fmod((b + fmod(a, b)), b),
-         int,
-         float),
-     DEFINE_INT_FLOAT_OP(aten::remainder, fmod((b + fmod(a, b)), b), float),
-     DEFINE_SCALAR_BINARY_OP(
-         aten::remainder,
-         (b + (a % b)) % b,
-         fmod((b + fmod(a, b)), b),
-         Scalar),
-     // NB: This is the python truediv operation
-     DEFINE_GENERIC_OP_WITH_COMPLEX(
-         aten::div,
-         static_cast<double>(a) / static_cast<double>(b),
-         a / b,
-         a / b,
-         float,
-         float,
-         complex),
-     DEFINE_SCALAR_BINARY_OP(
-         aten::div,
-         static_cast<double>(a) / static_cast<double>(b),
-         a / b,
-         float),
-     DEFINE_GENERIC_OP(
-         aten::floordiv,
-         floordiv(a, b),
-         std::floor(a / b),
-         int,
-         float),
-     DEFINE_INT_FLOAT_OP(aten::floordiv, std::floor(a / b), float),
-     DEFINE_SCALAR_BINARY_OP(
-         aten::floordiv,
-         floordiv(a, b),
-         std::floor(a / b),
-         Scalar),
-     // int ** int produces a float, because negative exponents produce float
-     // results
-     DEFINE_GENERIC_OP_WITH_COMPLEX(
-         aten::pow,
-         static_cast<double>(powWrapper(a, b)),
-         static_cast<double>(powWrapper(a, b)),
-         static_cast<c10::complex<double>>(pow(a, b)),
-         float,
-         float,
-         complex),
-     DEFINE_INT_FLOAT_OP(
-         aten::pow,
-         static_cast<double>(powWrapper(a, b)),
-         float),
-     DEFINE_FLOAT_COMPLEX_OP(aten::pow, pow(a, b), complex),
-     DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION(
-         aten::pow,
-         static_cast<double>(pow(a, b)),
-         static_cast<double>(pow(a, b)),
-         float),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::pow.int_to_int(int a, int b) -> int"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           int64_t a, b;
-           pop(stack, a, b);
-           push(stack, powWrapper(a, b));
-         },
-         aliasAnalysisFromSchema()),
-     // min and max are in prim:: because there is a difference between
-     // the python builtin 'min' and 'torch.min'
-     DEFINE_BINARY_OP(prim::min, a < b ? a : b),
-     DEFINE_BINARY_OP(prim::max, a > b ? a : b),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::type(Device self) -> str"),
-         [](Stack* stack) {
-           auto d = pop(stack);
-           push(
-               stack,
-               DeviceTypeName(d.toDevice().type(), /* lower_case=*/true));
-         },
-         aliasAnalysisFromSchema()),
-     // tensor length op (size of 1st dimension)
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::len.Tensor(Tensor t) -> int"),
-         [](Stack* stack) {
-           at::Tensor t = pop(stack).toTensor();
-           if (t.dim() == 0) {
-             AT_ERROR("len() of a 0-d tensor");
-           }
-           push(stack, t.sizes()[0]);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::ord(str string) -> int"),
-         [](Stack* stack) {
-           auto string = pop(stack).toStringRef();
-           TORCH_CHECK(
-               string.size() == 1,
-               "String for ord() must be 1 character, found ",
-               string.size());
-           uint8_t ord = string.at(0);
-           push(stack, int64_t(ord));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::lower(str self) -> str"),
-         [](Stack* stack) {
-           auto string = pop(stack).toStringRef();
-           std::stringstream ss;
-           for (char c : string) {
-             ss << static_cast<char>(::tolower(c));
-           }
-           push(stack, ss.str());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::__contains__.int_list(int[] l, int item) -> bool"),
-         listContains<int64_t>,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::__contains__.str_list(str[] l, str item) -> bool"),
-         listContains<std::string>,
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::len.str(str s) -> int"),
-         [](Stack* stack) {
-           auto string = pop(stack).toStringRef();
-           push(stack, static_cast<int64_t>(string.size()));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::dict() -> Dict(str, Tensor)"),
-         [](Stack* stack) {
-           auto dict =
-               c10::impl::GenericDict(StringType::get(), TensorType::get());
-           push(stack, dict);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::__getitem__.str(str s, int index) -> str"),
-         [](Stack* stack) {
-           auto index = pop(stack).toInt();
-           auto string = pop(stack).toStringRef();
-           auto norm_index = normalizeIndex(index, string.size());
-           char c = string.at(norm_index);
-           push(stack, std::string(&c, 1));
-         },
-         aliasAnalysisFromSchema()),
+static const OperatorGeneratorArgs opGenArgs[] = {
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::str(t elem) -> str"),
+        [](Stack* stack) {
+          std::stringstream ss;
+          ss << pop(stack);
+          push(stack, ss.str());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::list(str t) -> str[]"),
+        [](Stack* stack) {
+          auto str = pop(stack).toStringRef();
+          c10::List<std::string> chars;
+          chars.reserve(str.size());
+          for (auto c : str) {
+            chars.push_back(std::string(1, c));
+          }
+          push(stack, std::move(chars));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::cpu(Tensor(a) self) -> Tensor(a|b)"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.cpu());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::layout(Tensor a) -> int"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.layout());
+        },
+        aliasAnalysisFromSchema()),
+
+    // only used internally in range() translation
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::__range_length(int lo, int hi, int step) -> int"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          int64_t lo, hi, step;
+          pop(stack, lo, hi, step);
+          // error handling when step_val = 0 during runtime
+          if (step == 0) {
+            throw std::runtime_error("range() arg 3 must not be zero");
+          }
+          if (step > 0 && lo < hi) {
+            push(stack, 1 + (hi - 1 - lo) / step);
+          } else if (step < 0 && lo > hi) {
+            push(stack, 1 + (lo - 1 - hi) / (0 - step));
+          } else {
+            push(stack, 0);
+          }
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::__derive_index(int index, int start, int step) -> int"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          int64_t index, start, step;
+          pop(stack, index, start, step);
+          push(stack, start + index * step);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::TupleUnpack(Any tup) -> ..."),
+        [](Stack* stack) { tupleUnpack(*stack); },
+        aliasAnalysisSpecialCase()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::unchecked_cast(t x) -> t"),
+        noop,
+        aliasAnalysisSpecialCase()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::IntImplicit(Tensor a) -> int"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          checkImplicitTensorToNum(a, /*to int*/ true);
+          push(stack, a.item<int64_t>());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::ComplexImplicit(Tensor a) -> complex"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          checkImplicitTensorToNum(a, /*to int*/ false);
+          push(stack, a.item<c10::complex<double>>());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::FloatImplicit(Tensor a) -> float"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          checkImplicitTensorToNum(a, /*to int*/ false);
+          push(stack, a.item<double>());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::ScalarImplicit(Tensor a) -> Scalar"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          checkImplicitTensorToNum(a, /*to int*/ false);
+          push(stack, a.item());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Bool.Tensor(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_nonzero());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Bool.int(int a) -> bool"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          int64_t i;
+          pop(stack, i);
+          push(stack, (bool)i);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Bool.float(float a) -> bool"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          double d;
+          pop(stack, d);
+          push(stack, (bool)d);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Int.Tensor(Tensor a) -> int"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.item<int64_t>());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Int.bool(bool a) -> int"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          bool b;
+          pop(stack, b);
+          push(stack, static_cast<int64_t>(b));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Int.float(float a) -> int"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          double d;
+          pop(stack, d);
+          push(stack, static_cast<int64_t>(d));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Int.Scalar(Scalar a) -> int"),
+        [](Stack* stack) {
+          IValue scalar;
+          pop(stack, scalar);
+          if (scalar.isInt()) {
+            push(stack, std::move(scalar));
+          } else {
+            // toScalar() needed to avoid strict type check in IValue::toInt.
+            push(stack, static_cast<int64_t>(scalar.toScalar().toInt()));
+          }
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Int.str(str a) -> int"),
+        [](Stack* stack) {
+          auto s = pop(stack).toString();
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          std::string::size_type sz;
+          int64_t val = static_cast<int64_t>(c10::stoll(s->string(), &sz));
+          if (sz == s->string().size()) {
+            push(stack, val);
+          } else {
+            std::stringstream error_str;
+            error_str << "invalid literal for int() "
+                      << "with base 10: '" << s->string() << "'";
+            throw std::runtime_error(error_str.str());
+          }
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Float.Tensor(Tensor a) -> float"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.item<double>());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Float.Scalar(Scalar a) -> float"),
+        [](Stack* stack) {
+          IValue scalar;
+          pop(stack, scalar);
+          if (scalar.isDouble()) {
+            push(stack, std::move(scalar));
+          } else if (scalar.isComplexDouble()) {
+            push(stack, scalar.toComplexDouble().real());
+          } else {
+            push(stack, static_cast<double>(scalar.toInt()));
+          }
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Float.int(int a) -> float"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          int64_t i;
+          pop(stack, i);
+          push(stack, (float)i);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Float.bool(bool a) -> float"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          bool b;
+          pop(stack, b);
+          push(stack, (float)b);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Float.str(str a) -> float"),
+        [](Stack* stack) {
+          auto s = pop(stack).toString();
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          std::string::size_type sz;
+          double b = c10::stod(s->string(), &sz);
+          if (sz == s->string().size()) {
+            push(stack, b);
+          } else {
+            std::stringstream error_str;
+            error_str << "could not convert string "
+                      << "to float: '" << s->string() << "'";
+            throw std::runtime_error(error_str.str());
+          }
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Complex.Scalar(Scalar a) -> complex"),
+        [](Stack* stack) {
+          IValue scalar;
+          pop(stack, scalar);
+          if (scalar.isComplexDouble()) {
+            push(stack, std::move(scalar));
+          } else if (scalar.isDouble()) {
+            push(stack, c10::complex<double>(scalar.toDouble(), 0));
+          } else {
+            push(stack, c10::complex<double>(scalar.toInt(), 0));
+          }
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex"),
+        [](Stack* stack) {
+          at::Tensor a, b;
+          pop(stack, a, b);
+          push(stack, c10::complex<double>(a.item<double>(), b.item<double>()));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::format(str self, ...) -> str"),
+        [](Stack* stack) {
+          size_t num_inputs = pop(stack).toInt();
+          format(*stack, num_inputs);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::einsum.sublist(Tensor a, ...) -> Tensor"),
+        [](Stack* stack) {
+          size_t num_inputs = pop(stack).toInt();
+          einsum(*stack, num_inputs);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.Scalar(Scalar a) -> Tensor"),
+        [](Stack* stack) {
+          at::Scalar s;
+          pop(stack, s);
+          push(stack, at::scalar_to_tensor(s));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::RaiseException(str msg) -> ()"),
+        [](Stack* stack) { throw JITException(pop(stack).toStringRef()); },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Size(int[] sizes) -> int[]"),
+        [](Stack* stack) {},
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::size(Tensor self) -> int[]"),
+        [](Stack* stack) {
+          auto t = std::move(pop(stack)).toTensor();
+          pack(stack, t.sizes().vec());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::EnumName(AnyEnumType enum) -> str"),
+        [](Stack* stack) {
+          IValue e = pop(stack);
+          push(stack, e.toEnumHolder()->name());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::EnumValue.int(AnyEnumType enum) -> int"),
+        [](Stack* stack) {
+          IValue e = pop(stack);
+          push(stack, e.toEnumHolder()->value());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "prim::EnumValue.float(AnyEnumType enum) -> float"),
+        [](Stack* stack) {
+          IValue e = pop(stack);
+          push(stack, e.toEnumHolder()->value());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::EnumValue.str(AnyEnumType enum) -> str"),
+        [](Stack* stack) {
+          IValue e = pop(stack);
+          push(stack, e.toEnumHolder()->value());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        // note the compiler knows to type TupleIndex more accurately than it
+        // is listed here.
+        TORCH_SELECTIVE_SCHEMA("prim::TupleIndex(Any tup, int i) -> Any"),
+        [](Stack* stack) {
+          int64_t index = pop(stack).toInt();
+          auto tuple = pop(stack).toTuple();
+          auto norm_index = normalizeIndex(index, tuple->elements().size());
+          if (norm_index < 0 ||
+              norm_index > static_cast<int64_t>(tuple->elements().size())) {
+            throw std::out_of_range("Tuple list index out of range");
+          }
+          stack->emplace_back(tuple->elements()[norm_index]);
+        },
+        aliasAnalysisSpecialCase()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::ne.int_list(int[] a, int[] b) -> bool"),
+        listNe<int64_t>,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "prim::unchecked_unwrap_optional(t(a)? optional) -> t(a)"),
+        noop,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::device(Tensor a) -> Device"),
+        [](Stack* stack) { push(stack, pop(stack).toTensor().device()); },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::dtype(Tensor a) -> int"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, static_cast<int64_t>(a.scalar_type()));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::__not__(bool self) -> bool"),
+        [](Stack* stack) { push(stack, !pop(stack).toBool()); },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::__is__(t1 self, t2 obj) -> bool"),
+        [](Stack* stack) {
+          IValue self, obj;
+          pop(stack, self, obj);
+          push(stack, self.is(obj));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::__isnot__(t1 self, t2 obj) -> bool"),
+        [](Stack* stack) {
+          IValue self, obj;
+          pop(stack, self, obj);
+          push(stack, !self.is(obj));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::element_size(Tensor self) -> int"),
+        [](Stack* stack) {
+          at::Tensor arg = pop(stack).toTensor();
+          push(stack, arg.element_size());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::numel(Tensor self) -> int"),
+        [](Stack* stack) {
+          at::Tensor arg = pop(stack).toTensor();
+          push(stack, arg.numel());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::dim(Tensor self) -> int"),
+        [](Stack* stack) {
+          at::Tensor arg = pop(stack).toTensor();
+          push(stack, arg.dim());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::get_device(Tensor self) -> int"),
+        [](Stack* stack) {
+          RECORD_FUNCTION("get_device", std::vector<c10::IValue>());
+          auto result =
+              at::get_device((std::move(peek(stack, 0, 1))).toTensor());
+          drop(stack, 1);
+          pack(stack, result);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::storage_offset(Tensor self) -> int"),
+        [](Stack* stack) {
+          RECORD_FUNCTION("storage_offset", std::vector<c10::IValue>());
+          auto result =
+              ((std::move(peek(stack, 0, 1))).toTensor()).storage_offset();
+          drop(stack, 1);
+          pack(stack, result);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::is_contiguous(Tensor self) -> bool"),
+        [](Stack* stack) {
+          RECORD_FUNCTION("is_contiguous", std::vector<c10::IValue>());
+          auto result =
+              ((std::move(peek(stack, 0, 1))).toTensor()).is_contiguous();
+          drop(stack, 1);
+          pack(stack, result);
+        },
+        aliasAnalysisFromSchema()),
+    // these ops are generic over the list element type.
+    // CREATING GENERIC_LIST_OPS
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::select.t(t[](a) list, int idx) -> t(*)"),
+        listSelect,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::__getitem__.t(t[](a) list, int idx) -> t(*)"),
+        listSelect,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)"),
+        listAppend,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::reverse.t(t[](a!) self) -> ()"),
+        listReverse,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::extend.t(t[](a!) self, t[] other) -> ()"),
+        listExtend,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::copy.t(t[](a) self) -> t[]"),
+        listCopy,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::_set_item.t(t [](a!) l, int idx, t(b -> *) el) -> t[](a!)"),
+        listSetItem,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::clear.t(t[](a!) self) -> ()"),
+        listClear,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::Delete.t(t[](a!) self, int idx) -> ()"),
+        listDelete,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::insert.t(t[](a!) self, int idx, t(b -> *) el) -> ()"),
+        listInsert,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::pop.t(t[](a!) self, int idx=-1) -> t(*)"),
+        listPop,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::add.t(t[] a, t[] b) -> t[]"),
+        listAdd,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::add_.t(t[](a!) self, t[] b) -> t[]"),
+        listInplaceAdd,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]"),
+        listSlice,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::list.t(t[] l) -> t[]"),
+        listList,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::mul.left_t(t[] l, int n) -> t[]"),
+        listMulIntLeft,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::mul.right_(int n, t[] l) -> t[]"),
+        listMulIntRight,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::mul_.t(t[](a!) l, int n) -> t[](a!)"),
+        listMulIntLeftInPlace,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::len.t(t[] a) -> int"),
+        listLen,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::eq.int_list(int[] a, int[] b) -> bool"),
+        listEq<int64_t>,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::eq.device(Device a, Device b) -> bool"),
+        [](Stack* stack) {
+          auto a = pop(stack).toDevice();
+          auto b = pop(stack).toDevice();
+          push(stack, a == b);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::ne.device(Device a, Device b) -> bool"),
+        [](Stack* stack) {
+          auto a = pop(stack).toDevice();
+          auto b = pop(stack).toDevice();
+          push(stack, a != b);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::eq.bool(bool a, bool b) -> bool"),
+        [](Stack* stack) {
+          auto a = pop(stack);
+          auto b = pop(stack);
+          push(stack, a == b);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::ne.bool(bool a, bool b) -> bool"),
+        [](Stack* stack) {
+          auto a = pop(stack);
+          auto b = pop(stack);
+          push(stack, a != b);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::Uninitialized() -> Any"),
+        [](Stack* stack) { push(stack, IValue::uninitialized()); },
+        aliasAnalysisSpecialCase()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::Print(...) -> ()"),
+        [](Stack* stack) {
+          auto num_inputs = pop(stack).toInt();
+          std::stringstream ss;
+          bool first = true;
+          for (const IValue& i : last(stack, num_inputs)) {
+            if (!first)
+              ss << " ";
+            first = false;
+            ss << i;
+          }
+          drop(stack, num_inputs);
+          ss << std::endl;
+          auto* handler = getPrintHandler();
+          TORCH_INTERNAL_ASSERT(handler);
+          handler(ss.str());
+        },
+        aliasAnalysisSpecialCase()),
+    // This is an alternative to aten::cat op that takes variable number of
+    // parameters as input.
+    // Format:
+    //    prim::VarConcat(Tensors..., dim) -> Tensor
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::VarConcat(...) -> Tensor"),
+        [](Stack* stack) {
+          auto num_inputs = pop(stack).toInt();
+          auto dim = pop(stack).toInt();
+          std::vector<at::Tensor> inputs(num_inputs - 1);
+          for (int i = 0; i < num_inputs - 1; ++i) {
+            inputs[num_inputs - 2 - i] = pop(stack).toTensor();
+          }
+          push(stack, at::cat(inputs, dim));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::VarStack(...) -> Tensor"),
+        [](Stack* stack) {
+          auto num_inputs = pop(stack).toInt();
+          auto dim = pop(stack).toInt();
+          std::vector<at::Tensor> inputs(num_inputs - 1);
+          for (int i = 0; i < num_inputs - 1; ++i) {
+            inputs[num_inputs - 2 - i] = pop(stack).toTensor();
+          }
+          push(stack, at::stack(inputs, dim));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool"),
+        [](Stack* stack) {
+          IValue x = pop(stack);
+          IValue y = pop(stack);
+          push(stack, x == y);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool"),
+        [](Stack* stack) {
+          IValue x = pop(stack);
+          IValue y = pop(stack);
+          push(stack, x != y);
+        },
+        aliasAnalysisFromSchema()),
+    // We define aten::dequantize in both native_functions.yaml and here,
+    // however, aten::dequantize.any defined here overrides
+    // aten::dequantize.tensors in native_functions.yaml. The variants here
+    // are only for graph mode quantization, and they should be removed once
+    // we deprecate graph mode quantization, and use the variants in
+    // native_functions.yaml.
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::dequantize.tensor(Tensor qtensor) -> Tensor"),
+        [](Stack* stack) {
+          at::Tensor qtensor;
+          pop(stack, qtensor);
+          push(stack, at::dequantize(qtensor));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::dequantize.list(Tensor[] qtensors) -> Tensor[]"),
+        [](Stack* stack) {
+          auto qtensors = pop(stack).toTensorVector();
+          push(stack, at::dequantize(qtensors));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::dequantize.any(Any tensors) -> Any"),
+        [](Stack* stack) { dequantize(*stack); },
+        aliasAnalysisFromSchema()),
+    DEFINE_UNARY_OP_WITH_COMPLEX(aten::log, std::log(a), float, float),
+    DEFINE_STRING_OP(aten::add, a + b, str),
+    DEFINE_COMPARISON_OP_WITH_COMPLEX(aten::eq, a == b),
+    DEFINE_COMPARISON_OP_WITH_COMPLEX(aten::ne, a != b),
+    DEFINE_GENERIC_OP(
+        aten::polar,
+        c10::polar(static_cast<double>(a), static_cast<double>(b)),
+        c10::polar(static_cast<double>(a), static_cast<double>(b)),
+        complex,
+        complex),
+    DEFINE_INT_FLOAT_OP(
+        aten::polar,
+        c10::polar(static_cast<double>(a), static_cast<double>(b)),
+        complex),
+    DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION(
+        aten::polar,
+        c10::polar(static_cast<double>(a), static_cast<double>(b)),
+        c10::polar(static_cast<double>(a), static_cast<double>(b)),
+        Scalar),
+    DEFINE_COMPARISON_OP(aten::lt, a < b),
+    DEFINE_COMPARISON_OP(aten::gt, a > b),
+    DEFINE_COMPARISON_OP(aten::le, a <= b),
+    DEFINE_COMPARISON_OP(aten::ge, a >= b),
+    DEFINE_BINARY_OP_WITH_COMPLEX(aten::add, a + b),
+    DEFINE_BINARY_OP_WITH_COMPLEX(aten::sub, a - b),
+    DEFINE_BINARY_OP_WITH_COMPLEX(aten::mul, a* b),
+    DEFINE_BOOL_OP(aten::__and__, a&& b),
+    DEFINE_BOOL_OP(aten::__or__, a || b),
+    DEFINE_BOOL_OP(aten::__xor__, a != b),
+    DEFINE_UNARY_OP(aten::round, round_to_even(a), float, float),
+    DEFINE_UNARY_OP(aten::floor, floor(a), int, int),
+    DEFINE_UNARY_OP(aten::ceil, ceil(a), int, int),
+    DEFINE_UNARY_OP_WITH_COMPLEX(aten::neg, -a, int, float),
+    DEFINE_UNARY_OP_WITH_COMPLEX(aten::exp, std::exp(a), float, float),
+    // Pass in two ops for handling int and float separately as % in C++ only
+    // works for int The modulus calculation is different between C++ and
+    // Python (on negative), we preserve the python behavior as it's more
+    // common and match python syntax, hence the conversion.
+    DEFINE_GENERIC_OP(
+        aten::remainder,
+        (b + (a % b)) % b,
+        fmod((b + fmod(a, b)), b),
+        int,
+        float),
+    DEFINE_INT_FLOAT_OP(aten::remainder, fmod((b + fmod(a, b)), b), float),
+    DEFINE_SCALAR_BINARY_OP(
+        aten::remainder,
+        (b + (a % b)) % b,
+        fmod((b + fmod(a, b)), b),
+        Scalar),
+    // NB: This is the python truediv operation
+    DEFINE_GENERIC_OP_WITH_COMPLEX(
+        aten::div,
+        static_cast<double>(a) / static_cast<double>(b),
+        a / b,
+        a / b,
+        float,
+        float,
+        complex),
+    DEFINE_SCALAR_BINARY_OP(
+        aten::div,
+        static_cast<double>(a) / static_cast<double>(b),
+        a / b,
+        float),
+    DEFINE_GENERIC_OP(
+        aten::floordiv,
+        floordiv(a, b),
+        std::floor(a / b),
+        int,
+        float),
+    DEFINE_INT_FLOAT_OP(aten::floordiv, std::floor(a / b), float),
+    DEFINE_SCALAR_BINARY_OP(
+        aten::floordiv,
+        floordiv(a, b),
+        std::floor(a / b),
+        Scalar),
+    // int ** int produces a float, because negative exponents produce float
+    // results
+    DEFINE_GENERIC_OP_WITH_COMPLEX(
+        aten::pow,
+        static_cast<double>(powWrapper(a, b)),
+        static_cast<double>(powWrapper(a, b)),
+        static_cast<c10::complex<double>>(pow(a, b)),
+        float,
+        float,
+        complex),
+    DEFINE_INT_FLOAT_OP(
+        aten::pow,
+        static_cast<double>(powWrapper(a, b)),
+        float),
+    DEFINE_FLOAT_COMPLEX_OP(aten::pow, pow(a, b), complex),
+    DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION(
+        aten::pow,
+        static_cast<double>(pow(a, b)),
+        static_cast<double>(pow(a, b)),
+        float),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::pow.int_to_int(int a, int b) -> int"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          int64_t a, b;
+          pop(stack, a, b);
+          push(stack, powWrapper(a, b));
+        },
+        aliasAnalysisFromSchema()),
+    // min and max are in prim:: because there is a difference between
+    // the python builtin 'min' and 'torch.min'
+    DEFINE_BINARY_OP(prim::min, a < b ? a : b),
+    DEFINE_BINARY_OP(prim::max, a > b ? a : b),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::type(Device self) -> str"),
+        [](Stack* stack) {
+          auto d = pop(stack);
+          push(
+              stack, DeviceTypeName(d.toDevice().type(), /* lower_case=*/true));
+        },
+        aliasAnalysisFromSchema()),
+    // tensor length op (size of 1st dimension)
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::len.Tensor(Tensor t) -> int"),
+        [](Stack* stack) {
+          at::Tensor t = pop(stack).toTensor();
+          if (t.dim() == 0) {
+            AT_ERROR("len() of a 0-d tensor");
+          }
+          push(stack, t.sizes()[0]);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::ord(str string) -> int"),
+        [](Stack* stack) {
+          auto string = pop(stack).toStringRef();
+          TORCH_CHECK(
+              string.size() == 1,
+              "String for ord() must be 1 character, found ",
+              string.size());
+          uint8_t ord = string.at(0);
+          push(stack, int64_t(ord));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::lower(str self) -> str"),
+        [](Stack* stack) {
+          auto string = pop(stack).toStringRef();
+          std::stringstream ss;
+          for (char c : string) {
+            ss << static_cast<char>(::tolower(c));
+          }
+          push(stack, ss.str());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::__contains__.int_list(int[] l, int item) -> bool"),
+        listContains<int64_t>,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::__contains__.str_list(str[] l, str item) -> bool"),
+        listContains<std::string>,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::len.str(str s) -> int"),
+        [](Stack* stack) {
+          auto string = pop(stack).toStringRef();
+          push(stack, static_cast<int64_t>(string.size()));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::dict() -> Dict(str, Tensor)"),
+        [](Stack* stack) {
+          auto dict =
+              c10::impl::GenericDict(StringType::get(), TensorType::get());
+          push(stack, dict);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::__getitem__.str(str s, int index) -> str"),
+        [](Stack* stack) {
+          auto index = pop(stack).toInt();
+          auto string = pop(stack).toStringRef();
+          auto norm_index = normalizeIndex(index, string.size());
+          char c = string.at(norm_index);
+          push(stack, std::string(&c, 1));
+        },
+        aliasAnalysisFromSchema()),
 #define CREATE_COPY_OP(other_type, c_type)                               \
-  OperatorGenerator(                                                     \
+  OperatorGeneratorArgs(                                                 \
       TORCH_SELECTIVE_SCHEMA("aten::copy_." #other_type                  \
                              "(Tensor(a!) self, " #other_type            \
                              " other) -> Tensor(a!)"),                   \
@@ -1035,170 +950,168 @@ RegisterOperators reg(
       },                                                                 \
       aliasAnalysisFromSchema())
 
-     CREATE_COPY_OP(Tensor, at::Tensor),
-     CREATE_COPY_OP(int, int64_t),
-     CREATE_COPY_OP(float, double),
+    CREATE_COPY_OP(Tensor, at::Tensor),
+    CREATE_COPY_OP(int, int64_t),
+    CREATE_COPY_OP(float, double),
 #undef CREATE_COPY_OP
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::backward(Tensor self, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()"),
-         [](Stack* stack) {
-           bool create_graph = pop(stack).toBool();
-           auto retain_graph = pop(stack).toOptional<bool>();
-           IValue gradient_ivalue = pop(stack);
-           at::Tensor gradient = gradient_ivalue.isNone()
-               ? at::Tensor()
-               : gradient_ivalue.toTensor();
-           at::Tensor self = pop(stack).toTensor();
-           bool keep_graph = retain_graph ? retain_graph.value() : create_graph;
-           self.backward(gradient, keep_graph, create_graph);
-         },
-         aliasAnalysisConservative()),
-     //
-     // create a clone of these declarations with a _hacked_twin overload name
-     // and nullability scrubbed from TensorList arg types
-     // TOOD find out why this exists and how to do it without the hack
-     //
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"),
-         [](Stack* stack) {
-           auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
-           auto self = pop(stack).toTensor();
-           auto result = at::index(self, indices);
-           push(stack, std::move(result));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::_index_put_impl_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)"),
-         [](Stack* stack) {
-           auto unsafe = pop(stack).toBool();
-           auto accumulate = pop(stack).toBool();
-           auto values = pop(stack).toTensor();
-           auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
-           auto self = pop(stack).toTensor();
-           auto result =
-               at::_index_put_impl_(self, indices, values, accumulate, unsafe);
-           push(stack, std::move(result));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"),
-         [](Stack* stack) {
-           auto accumulate = pop(stack).toBool();
-           auto values = pop(stack).toTensor();
-           auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
-           auto self = pop(stack).toTensor();
-           auto result = at::index_put_(self, indices, values, accumulate);
-           push(stack, std::move(result));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor"),
-         [](Stack* stack) {
-           auto accumulate = pop(stack).toBool();
-           auto values = pop(stack).toTensor();
-           auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
-           auto self = pop(stack).toTensor();
-           auto result = at::index_put_(self, indices, values, accumulate);
-           push(stack, std::move(result));
-         },
-         aliasAnalysisFromSchema()),
-     // reference function parse_to_conversion in python_arg_parsing.h
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           bool non_blocking;
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           bool copy;
-           pop(stack, non_blocking, copy);
-           c10::optional<at::ScalarType> scalarType =
-               pop(stack).toOptional<at::ScalarType>();
-           c10::optional<c10::Device> device =
-               pop(stack).toOptional<c10::Device>();
-           at::Tensor self = pop(stack).toTensor();
-           push(
-               stack,
-               to_dispatch(self, device, scalarType, non_blocking, copy));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           bool non_blocking;
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           bool copy;
-           pop(stack, non_blocking, copy);
-           c10::optional<at::ScalarType> scalarType =
-               pop(stack).toOptional<at::ScalarType>();
-           c10::optional<c10::Device> device = c10::nullopt;
-           at::Tensor self = pop(stack).toTensor();
-           push(
-               stack,
-               to_dispatch(self, device, scalarType, non_blocking, copy));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_cuda(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_cuda());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_xpu(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_xpu());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::data(Tensor(a) a) -> Tensor(a)"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, autograd::Variable(a).variable_data());
-         },
-         aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::backward(Tensor self, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()"),
+        [](Stack* stack) {
+          bool create_graph = pop(stack).toBool();
+          auto retain_graph = pop(stack).toOptional<bool>();
+          IValue gradient_ivalue = pop(stack);
+          at::Tensor gradient = gradient_ivalue.isNone()
+              ? at::Tensor()
+              : gradient_ivalue.toTensor();
+          at::Tensor self = pop(stack).toTensor();
+          bool keep_graph = retain_graph ? retain_graph.value() : create_graph;
+          self.backward(gradient, keep_graph, create_graph);
+        },
+        aliasAnalysisConservative()),
+    //
+    // create a clone of these declarations with a _hacked_twin overload name
+    // and nullability scrubbed from TensorList arg types
+    // TOOD find out why this exists and how to do it without the hack
+    //
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"),
+        [](Stack* stack) {
+          auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
+          auto self = pop(stack).toTensor();
+          auto result = at::index(self, indices);
+          push(stack, std::move(result));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::_index_put_impl_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)"),
+        [](Stack* stack) {
+          auto unsafe = pop(stack).toBool();
+          auto accumulate = pop(stack).toBool();
+          auto values = pop(stack).toTensor();
+          auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
+          auto self = pop(stack).toTensor();
+          auto result =
+              at::_index_put_impl_(self, indices, values, accumulate, unsafe);
+          push(stack, std::move(result));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"),
+        [](Stack* stack) {
+          auto accumulate = pop(stack).toBool();
+          auto values = pop(stack).toTensor();
+          auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
+          auto self = pop(stack).toTensor();
+          auto result = at::index_put_(self, indices, values, accumulate);
+          push(stack, std::move(result));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor"),
+        [](Stack* stack) {
+          auto accumulate = pop(stack).toBool();
+          auto values = pop(stack).toTensor();
+          auto indices = pop(stack).to<List<c10::optional<at::Tensor>>>();
+          auto self = pop(stack).toTensor();
+          auto result = at::index_put_(self, indices, values, accumulate);
+          push(stack, std::move(result));
+        },
+        aliasAnalysisFromSchema()),
+    // reference function parse_to_conversion in python_arg_parsing.h
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          bool non_blocking;
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          bool copy;
+          pop(stack, non_blocking, copy);
+          c10::optional<at::ScalarType> scalarType =
+              pop(stack).toOptional<at::ScalarType>();
+          c10::optional<c10::Device> device =
+              pop(stack).toOptional<c10::Device>();
+          at::Tensor self = pop(stack).toTensor();
+          push(
+              stack, to_dispatch(self, device, scalarType, non_blocking, copy));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          bool non_blocking;
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          bool copy;
+          pop(stack, non_blocking, copy);
+          c10::optional<at::ScalarType> scalarType =
+              pop(stack).toOptional<at::ScalarType>();
+          c10::optional<c10::Device> device = c10::nullopt;
+          at::Tensor self = pop(stack).toTensor();
+          push(
+              stack, to_dispatch(self, device, scalarType, non_blocking, copy));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_cuda(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_cuda());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_xpu(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_xpu());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::data(Tensor(a) a) -> Tensor(a)"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, autograd::Variable(a).variable_data());
+        },
+        aliasAnalysisFromSchema()),
 // these ops are not defined for Tensor
 #define CREATE_COMPARATOR_LIST_OPS_SPECIALIZED(decl_type, value_type)        \
-  OperatorGenerator(                                                         \
+  OperatorGeneratorArgs(                                                     \
       TORCH_SELECTIVE_SCHEMA("prim::min." decl_type "_list(" decl_type       \
                              "[] l, " decl_type "[] r) -> " decl_type "[]"), \
       minList<value_type>,                                                   \
       aliasAnalysisFromSchema()),                                            \
-      OperatorGenerator(                                                     \
+      OperatorGeneratorArgs(                                                 \
           TORCH_SELECTIVE_SCHEMA("prim::max." decl_type "_list(" decl_type   \
                                  "[] l, " decl_type "[] r) -> " decl_type    \
                                  "[]"),                                      \
           maxList<value_type>,                                               \
           aliasAnalysisFromSchema()),                                        \
-      OperatorGenerator(                                                     \
+      OperatorGeneratorArgs(                                                 \
           TORCH_SELECTIVE_SCHEMA("prim::min.self_" decl_type "(" decl_type   \
                                  "[] self) -> " decl_type),                  \
           listMin<value_type>,                                               \
           aliasAnalysisFromSchema()),                                        \
-      OperatorGenerator(                                                     \
+      OperatorGeneratorArgs(                                                 \
           TORCH_SELECTIVE_SCHEMA("prim::max.self_" decl_type "(" decl_type   \
                                  "[] self) -> " decl_type),                  \
           listMax<value_type>,                                               \
           aliasAnalysisFromSchema()),
-     CREATE_COMPARATOR_LIST_OPS_SPECIALIZED("int", int64_t)
-         CREATE_COMPARATOR_LIST_OPS_SPECIALIZED("float", double)
-             CREATE_COMPARATOR_LIST_OPS_SPECIALIZED("bool", bool)
+    CREATE_COMPARATOR_LIST_OPS_SPECIALIZED("int", int64_t)
+        CREATE_COMPARATOR_LIST_OPS_SPECIALIZED("float", double)
+            CREATE_COMPARATOR_LIST_OPS_SPECIALIZED("bool", bool)
 #undef CREATE_COMPARATOR_LIST_OPS_SPECIALIZED
 // python string is methods return false if empty
 #define DEFINE_STRING_IS_OP(op_name, char_op)                          \
-  OperatorGenerator(                                                   \
+  OperatorGeneratorArgs(                                               \
       TORCH_SELECTIVE_SCHEMA(#op_name "(str self) -> bool"),           \
       [](Stack* stack) {                                               \
         auto string = pop(stack).toStringRef();                        \
@@ -1211,15 +1124,15 @@ RegisterOperators reg(
       },                                                               \
       aliasAnalysisFromSchema())
 
-                 DEFINE_STRING_IS_OP(aten::isdigit, ::isdigit),
-     DEFINE_STRING_IS_OP(aten::isspace, ::isspace),
-     DEFINE_STRING_IS_OP(aten::isalnum, ::isalnum),
-     DEFINE_STRING_IS_OP(aten::isalpha, ::isalpha),
-     DEFINE_STRING_IS_OP(aten::isdecimal, ::isdigit),
-     DEFINE_STRING_IS_OP(aten::isnumeric, ::isdigit),
+                DEFINE_STRING_IS_OP(aten::isdigit, ::isdigit),
+    DEFINE_STRING_IS_OP(aten::isspace, ::isspace),
+    DEFINE_STRING_IS_OP(aten::isalnum, ::isalnum),
+    DEFINE_STRING_IS_OP(aten::isalpha, ::isalpha),
+    DEFINE_STRING_IS_OP(aten::isdecimal, ::isdigit),
+    DEFINE_STRING_IS_OP(aten::isnumeric, ::isdigit),
 
 #define DEFINE_STRING_CHAR_MAP_OP(op_name, char_op)         \
-  OperatorGenerator(                                        \
+  OperatorGeneratorArgs(                                    \
       TORCH_SELECTIVE_SCHEMA(#op_name "(str self) -> str"), \
       [](Stack* stack) {                                    \
         auto string = pop(stack).toStringRef();             \
@@ -1231,14 +1144,121 @@ RegisterOperators reg(
       },                                                    \
       aliasAnalysisFromSchema())
 
-     DEFINE_STRING_CHAR_MAP_OP(aten::upper, ::toupper),
-     DEFINE_STRING_CHAR_MAP_OP(aten::swapcase, ([](char c) {
-                                 if (c == static_cast<char>(::toupper(c))) {
-                                   return static_cast<char>(::tolower(c));
-                                 } else {
-                                   return static_cast<char>(::toupper(c));
-                                 }
-                               }))});
+    DEFINE_STRING_CHAR_MAP_OP(aten::upper, ::toupper),
+    DEFINE_STRING_CHAR_MAP_OP(aten::swapcase, ([](char c) {
+                                if (c == static_cast<char>(::toupper(c))) {
+                                  return static_cast<char>(::tolower(c));
+                                } else {
+                                  return static_cast<char>(::toupper(c));
+                                }
+                              }))};
+
+static std::vector<c10::optional<Operator>> createOperators(
+    const OperatorGeneratorArgs* args,
+    int length) {
+  std::vector<c10::optional<Operator>> result;
+  result.reserve(length);
+  for (int ii = 0; ii < length; ++ii) {
+    if (args[ii].schema_str) {
+      if (args[ii].isOperationCreator) {
+        result.push_back(OperatorGenerator(
+            args[ii].schema_str,
+            args[ii].operationCreator,
+            args[ii].aliasAnalysis));
+      } else {
+        result.push_back(OperatorGenerator(
+            args[ii].schema_str, args[ii].operation, args[ii].aliasAnalysis));
+      }
+    }
+  }
+  return result;
+}
+
+RegisterOperators reg(([]() {
+  auto v = createOperators(opGenArgs, sizeof(opGenArgs) / sizeof(opGenArgs[0]));
+  v.push_back(Operator(
+      prim::tolist,
+      // This operator has to be unschematized because the return type
+      // depends on the type hint and input. The implementation of this
+      // operator below is intended to be as close to the Python
+      // implementation in torch/csrc/utils/tensor_list.cpp as possible.
+      [](const Node* /*node*/) -> Operation {
+        return [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          int elem_ty_val;
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          int dim_val;
+          at::Tensor t;
+
+          pop(stack, elem_ty_val);
+          pop(stack, dim_val);
+          pop(stack, t);
+
+          // If the Tensor is not on the CPU, transfer it.
+          if (!t.device().is_cpu()) {
+            t = t.cpu();
+          }
+
+          // Rebuild the output type using elem_ty_val and dim_val. Start
+          // with the element type corresponding to elem_ty_val.
+          TypePtr out_ty;
+          if (elem_ty_val == 0) {
+            out_ty = IntType::get();
+          } else if (elem_ty_val == 1) {
+            out_ty = FloatType::get();
+          } else if (elem_ty_val == 2) {
+            out_ty = BoolType::get();
+          } else if (elem_ty_val == 3) {
+            out_ty = ComplexType::get();
+          } else {
+            TORCH_CHECK(
+                false,
+                "Unsupported element type for tolist; only int, float, complex and bool are supported");
+          }
+
+          // Check that type of the Tensor matches that of the annotation.
+          // Make an exception for the case in which the annotated type is
+          // float/complex and the Tensor data type is also float/complex;
+          // the elements will be casted to double/c10::complex<double>
+          // later.
+          TORCH_CHECK(
+              (out_ty == FloatType::get() && t.is_floating_point()) ||
+                  (out_ty == ComplexType::get() && t.is_complex()) ||
+                  tryScalarTypeFromJitType(out_ty) == t.scalar_type(),
+              "Output annotation element type and runtime tensor element type must match for tolist()");
+
+          // Check that the dimension of the Tensor matches that of the
+          // annotation.
+          TORCH_CHECK(
+              dim_val == t.dim(),
+              "Output annotation list dimension and runtime tensor dimension must match for tolist()");
+
+          // Wrap out_ty in a ListType dim times.
+          for (const auto i : c10::irange(dim_val)) {
+            (void)i; // Suppress unused variable warning
+            out_ty = ListType::create(out_ty);
+          }
+
+          int64_t dim = t.dim();
+          auto sizes = t.sizes();
+          auto strides = t.strides();
+          size_t element_size = t.element_size();
+          char* data = static_cast<char*>(t.data_ptr());
+          auto result = tensorToListRecursive(
+              data,
+              0,
+              dim,
+              out_ty,
+              t.scalar_type(),
+              sizes,
+              strides,
+              element_size);
+          push(stack, std::move(result));
+        };
+      },
+      aliasAnalysisSpecialCase()));
+  return v;
+})());
 
 void dictSetItem(Stack* stack) {
   auto value = pop(stack);
@@ -1408,123 +1428,125 @@ void dictConstructFromList(Stack* stack) {
 }
 
 #define CREATE_DICT_OPS(key_type)                                              \
-  OperatorGenerator(                                                           \
+  OperatorGeneratorArgs(                                                       \
       TORCH_SELECTIVE_SCHEMA("aten::len.Dict_" key_type "(Dict(" key_type      \
                              ", t) self) -> int"),                             \
       dictLen,                                                                 \
       aliasAnalysisFromSchema()),                                              \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::keys." key_type "(Dict(" key_type      \
                                  ", t) self) -> " key_type "[](*)"),           \
           dictKeys,                                                            \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::values." key_type "(Dict(" key_type    \
                                  ", t) self) -> t[](*)"),                      \
           dictValues,                                                          \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::__getitem__.Dict_" key_type            \
                                  "(Dict(" key_type ", t) self, " key_type      \
                                  " key) -> t(*)"),                             \
           dictIndex,                                                           \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::get." key_type "(Dict(" key_type       \
                                  ", t) self, " key_type " key) -> t(*)?"),     \
           dictGet<false>,                                                      \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::get.default_" key_type                 \
                                  "(Dict(" key_type ", t) self, " key_type      \
                                  " key, t default_value) -> t(*)"),            \
           dictGet<true>,                                                       \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA(                                              \
               "aten::setdefault." key_type "(Dict(" key_type                   \
               ", t)(a!) self, " key_type                                       \
               "(b -> *) key, t(c -> *) default_value) -> t(*)"),               \
           dictSetDefault,                                                      \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::Delete.Dict_" key_type                 \
                                  "(Dict(" key_type ", t)(a!) self, " key_type  \
                                  " key) -> ()"),                               \
           dictDelete,                                                          \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::pop.Dict_" key_type "(Dict(" key_type  \
                                  ", t)(a!) self, " key_type " key) -> t(*)"),  \
           dictPop<false>,                                                      \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::pop.Dict_default_" key_type            \
                                  "(Dict(" key_type ", t)(a!) self, " key_type  \
                                  " key, t default_value) -> t(*)"),            \
           dictPop<true>,                                                       \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::popitem." key_type "(Dict(" key_type   \
                                  ", t)(a!) self) -> ((" key_type ", t))"),     \
           dictPopItem,                                                         \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::clear." key_type "(Dict(" key_type     \
                                  ", t)(a!) self) -> ()"),                      \
           dictClear,                                                           \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::update." key_type "(Dict(" key_type    \
                                  ", t)(a!) self, Dict(" key_type               \
                                  ", t)(a!) to_add) -> ()"),                    \
           dictUpdate,                                                          \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::items." key_type "(Dict(" key_type     \
                                  ", t) self) -> ((" key_type ", t)[])"),       \
           dictItems,                                                           \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::copy.Dict_" key_type "(Dict(" key_type \
                                  ", t)(a) self) -> Dict(" key_type ", t)"),    \
           dictCopy,                                                            \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::__contains__." key_type                \
                                  "(Dict(" key_type ", t) dict, " key_type      \
                                  " key) -> bool"),                             \
           dictContains,                                                        \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::_set_item." key_type "(Dict(" key_type \
                                  ", t)(a!) l, " key_type                       \
                                  "(b -> *) idx, t(c -> *) v) -> ()"),          \
           dictSetItem,                                                         \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::dict." key_type "((" key_type          \
                                  ", tVal)[] inputs) -> Dict(" key_type         \
                                  ", tVal)"),                                   \
           dictConstructFromList,                                               \
           aliasAnalysisFromSchema()),                                          \
-      OperatorGenerator(                                                       \
+      OperatorGeneratorArgs(                                                   \
           TORCH_SELECTIVE_SCHEMA("aten::dict.Dict_" key_type "(Dict(" key_type \
                                  ", t)(a) self) -> Dict(" key_type ", t)"),    \
           dictCopy,                                                            \
           aliasAnalysisFromSchema())
 
-RegisterOperators reg_dict_ops({
+static const OperatorGeneratorArgs dict_ops[] = {
     CREATE_DICT_OPS("str"),
     CREATE_DICT_OPS("int"),
     CREATE_DICT_OPS("bool"),
     CREATE_DICT_OPS("float"),
     CREATE_DICT_OPS("complex"),
     CREATE_DICT_OPS("Tensor"),
-});
+};
+RegisterOperators reg_dict_ops(
+    createOperators(dict_ops, sizeof(dict_ops) / sizeof(dict_ops[0])));
 
 // NOLINTNEXTLINE(clang-diagnostic-unused-function)
-c10::AliasAnalysisKind aliasAnalysisFromSchema() {
+constexpr c10::AliasAnalysisKind aliasAnalysisFromSchema() {
   return c10::AliasAnalysisKind::FROM_SCHEMA;
 }
 
@@ -2095,393 +2117,394 @@ TORCH_LIBRARY_IMPL(aten, CatchAll, m) {
       });
 }
 
+static const OperatorGeneratorArgs opGenArgs1[] = {
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::rangelist(int n) -> int[]"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          int64_t n;
+          pop(stack, n);
+          c10::List<int64_t> elems;
+          elems.reserve(n);
+          for (const auto i : c10::irange(n)) {
+            elems.push_back(i);
+          }
+          push(stack, std::move(elems));
+        },
+        aliasAnalysisFromSchema()),
+    // note: this op needs to share a name with the Scalar -> Tensor conversion
+    // because all _to_tensor conversion have to have the same operator namet
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.bool(bool a) -> Tensor"),
+        [](Stack* stack) {
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          bool b;
+          pop(stack, b);
+          push(stack, at::scalar_to_tensor(b));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::device(str a) -> Device"),
+        [](Stack* stack) {
+          push(stack, c10::Device(pop(stack).toStringRef()));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::percentFormat(str self, ...) -> str"),
+        [](Stack* stack) {
+          size_t num_inputs = pop(stack).toInt();
+          percentFormat(*stack, num_inputs);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
+        [](Stack* stack) {
+          at::Tensor self;
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          bool non_blocking;
+          // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+          bool copy;
+          pop(stack, self, non_blocking, copy);
+          c10::optional<c10::Device> device = c10::nullopt;
+          c10::optional<at::ScalarType> scalarType = c10::nullopt;
+          push(
+              stack, to_dispatch(self, device, scalarType, non_blocking, copy));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::requires_grad(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.requires_grad());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::grad(Tensor a) -> Tensor(*)"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.grad());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_sparse(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_sparse());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_sparse_csr(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_sparse_csr());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_mkldnn(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_mkldnn());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_mlc(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_mlc());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_vulkan(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_vulkan());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_quantized(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_quantized());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_meta(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_meta());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::is_ort(Tensor a) -> bool"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.is_ort());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::name(Tensor a) -> str?"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          if (a.name() == "") {
+            push(stack, IValue());
+          } else {
+            push(stack, a.name());
+          }
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::index(Device self) -> int?"),
+        [](Stack* stack) {
+          auto d = pop(stack).toDevice();
+          if (d.has_index()) {
+            push(stack, d.index());
+          } else {
+            push(stack, IValue());
+          }
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        // TODO return generator object when torchscript supports RNG
+        // first-class
+        TORCH_SELECTIVE_SCHEMA("aten::manual_seed(int seed) -> ()"),
+        [](Stack* stack) { at::manual_seed(pop(stack).toInt()); },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::cuda(Tensor(a) self) -> Tensor(a|b)"),
+        [](Stack* stack) {
+          at::Tensor a;
+          pop(stack, a);
+          push(stack, a.cuda());
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::AutogradZero() -> Tensor"),
+        [](Stack* stack) { stack->emplace_back(at::Tensor()); },
+        aliasAnalysisSpecialCase()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim = False) -> int[]"),
+        [](Stack* stack) {
+          bool keepdim = pop(stack).toBool();
+          c10::List<int64_t> axes = pop(stack).toIntList();
+          c10::List<int64_t> size = pop(stack).toIntList();
+          if (keepdim) {
+            for (const auto& axis : axes) {
+              size.set(axis, 1);
+            }
+          } else {
+            int64_t index = 0;
+            auto iter = size.begin();
+            std::sort(axes.begin(), axes.end());
+            for (const auto& axis : axes) {
+              // move iter to the next axis
+              iter += axis - index;
+
+              // input iter points to axis and is updated to axis + 1
+              iter = size.erase(iter);
+
+              // update current index for iter
+              index = axis + 1;
+            }
+          }
+          push(stack, IValue(std::move(size)));
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::BroadcastSizes(...) -> int[]"),
+        [](Stack* stack) {
+          auto num_inputs = pop(stack).toInt();
+          std::vector<int64_t> size;
+          size.reserve(8);
+          for (const auto i : c10::irange(num_inputs)) {
+            size =
+                at::infer_size(size, peek(stack, i, num_inputs).toIntVector());
+          }
+          drop(stack, num_inputs);
+          push(stack, IValue(size));
+        },
+        aliasAnalysisSpecialCase()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::warn(str message, int stacklevel=2) -> ()"),
+        [](Stack* stack) {
+          TORCH_CHECK(false, "warn is implemented directly in the interpreter");
+        },
+        aliasAnalysisFromSchema()),
+
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "onnx::Reshape(Tensor input, Tensor shape) -> Tensor"),
+        [](Stack* stack) {
+          at::Tensor input, shape;
+          pop(stack, input, shape);
+          shape = shape.contiguous();
+          AT_ASSERT(shape.ndimension() == 1);
+          at::IntArrayRef shape_list(shape.data_ptr<int64_t>(), shape.size(0));
+          push(stack, input.reshape(shape_list));
+        },
+        aliasAnalysisSpecialCase()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("onnx::Shape(Tensor t) -> Tensor"),
+        [](Stack* stack) {
+          auto t = pop(stack).toTensor();
+          at::IntArrayRef sizes = t.sizes();
+          auto sizes_tensor = torch::empty(
+              {static_cast<int64_t>(sizes.size())}, at::dtype(at::kLong));
+          auto accessor = sizes_tensor.accessor<int64_t, 1>();
+          for (const auto i : c10::irange(sizes.size())) {
+            accessor[i] = sizes[i];
+          }
+          stack->emplace_back(sizes_tensor);
+        },
+        aliasAnalysisSpecialCase()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::AutogradAnyNonZero(...) -> bool"),
+        [](Stack* stack) {
+          auto num_inputs = pop(stack).toInt();
+          bool result = false;
+          for (const IValue& v : last(stack, num_inputs)) {
+            if (v.isTensor()) {
+              if (v.toTensor().defined()) {
+                result = true;
+                break;
+              }
+            } else if (v.isTensorList()) {
+              for (const at::Tensor& t : v.toTensorVector()) {
+                if (t.defined()) {
+                  result = true;
+                }
+              }
+              if (result) {
+                break;
+              }
+            } else {
+              TORCH_INTERNAL_ASSERT(false);
+            }
+          }
+          drop(stack, num_inputs);
+          stack->emplace_back(result);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::AutogradAllZero(...) -> bool"),
+        [](Stack* stack) {
+          auto num_inputs = pop(stack).toInt();
+          bool result = true;
+          for (const IValue& v : last(stack, num_inputs)) {
+            TORCH_INTERNAL_ASSERT(v.isTensor());
+            if (v.toTensor().defined()) {
+              result = false;
+              break;
+            }
+          }
+          drop(stack, num_inputs);
+          stack->emplace_back(result);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::AutogradAllNonZero(...) -> bool"),
+        [](Stack* stack) {
+          auto num_inputs = pop(stack).toInt();
+          bool result = true;
+          for (const IValue& v : last(stack, num_inputs)) {
+            TORCH_INTERNAL_ASSERT(v.isTensor());
+            if (!v.toTensor().defined()) {
+              result = false;
+              break;
+            }
+          }
+          drop(stack, num_inputs);
+          stack->emplace_back(result);
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("prim::AutogradAdd(Any a, Any b) -> Any"),
+        [](Stack* stack) {
+          at::Tensor a, b;
+          pop(stack, a, b);
+          // NOLINTNEXTLINE(bugprone-branch-clone)
+          if (!a.defined() && !b.defined()) {
+            // undef + undef == undef
+            stack->emplace_back(a);
+          } else if (!a.defined()) {
+            stack->emplace_back(b);
+          } else if (!b.defined()) {
+            stack->emplace_back(a);
+          } else {
+            stack->emplace_back(a + b);
+          }
+        },
+        aliasAnalysisSpecialCase()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::_size_if_not_equal(int[] self_size, int[] other_size) -> int[]?"),
+        [](Stack* stack) {
+          IValue self_size, other_size;
+          pop(stack, self_size, other_size);
+          auto s = self_size.toIntVector();
+          auto o = other_size.toIntVector();
+          if (s == o) {
+            push(stack, IValue());
+          } else {
+            push(stack, s);
+          }
+        },
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::_unwrap_optional(t(a)? optional) -> t(a)"),
+        [](Stack* stack) {
+          auto val = pop(stack);
+          TORCH_CHECK(!val.isNone(), "Unwrapping null optional");
+          push(stack, std::move(val));
+        },
+        aliasAnalysisFromSchema())};
+
 RegisterOperators reg1(
-    {OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::rangelist(int n) -> int[]"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           int64_t n;
-           pop(stack, n);
-           c10::List<int64_t> elems;
-           elems.reserve(n);
-           for (const auto i : c10::irange(n)) {
-             elems.push_back(i);
-           }
-           push(stack, std::move(elems));
-         },
-         aliasAnalysisFromSchema()),
-     // note: this op needs to share a name with the Scalar -> Tensor conversion
-     // because all _to_tensor conversion have to have the same operator namet
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.bool(bool a) -> Tensor"),
-         [](Stack* stack) {
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           bool b;
-           pop(stack, b);
-           push(stack, at::scalar_to_tensor(b));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::device(str a) -> Device"),
-         [](Stack* stack) {
-           push(stack, c10::Device(pop(stack).toStringRef()));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::percentFormat(str self, ...) -> str"),
-         [](Stack* stack) {
-           size_t num_inputs = pop(stack).toInt();
-           percentFormat(*stack, num_inputs);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
-         [](Stack* stack) {
-           at::Tensor self;
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           bool non_blocking;
-           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-           bool copy;
-           pop(stack, self, non_blocking, copy);
-           c10::optional<c10::Device> device = c10::nullopt;
-           c10::optional<at::ScalarType> scalarType = c10::nullopt;
-           push(
-               stack,
-               to_dispatch(self, device, scalarType, non_blocking, copy));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::requires_grad(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.requires_grad());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::grad(Tensor a) -> Tensor(*)"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.grad());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_sparse(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_sparse());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_sparse_csr(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_sparse_csr());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_mkldnn(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_mkldnn());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_mlc(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_mlc());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_vulkan(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_vulkan());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_quantized(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_quantized());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_meta(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_meta());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::is_ort(Tensor a) -> bool"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.is_ort());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::name(Tensor a) -> str?"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           if (a.name() == "") {
-             push(stack, IValue());
-           } else {
-             push(stack, a.name());
-           }
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::index(Device self) -> int?"),
-         [](Stack* stack) {
-           auto d = pop(stack).toDevice();
-           if (d.has_index()) {
-             push(stack, d.index());
-           } else {
-             push(stack, IValue());
-           }
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         // TODO return generator object when torchscript supports RNG
-         // first-class
-         TORCH_SELECTIVE_SCHEMA("aten::manual_seed(int seed) -> ()"),
-         [](Stack* stack) { at::manual_seed(pop(stack).toInt()); },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("aten::cuda(Tensor(a) self) -> Tensor(a|b)"),
-         [](Stack* stack) {
-           at::Tensor a;
-           pop(stack, a);
-           push(stack, a.cuda());
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::AutogradZero() -> Tensor"),
-         [](Stack* stack) { stack->emplace_back(at::Tensor()); },
-         aliasAnalysisSpecialCase()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim = False) -> int[]"),
-         [](Stack* stack) {
-           bool keepdim = pop(stack).toBool();
-           c10::List<int64_t> axes = pop(stack).toIntList();
-           c10::List<int64_t> size = pop(stack).toIntList();
-           if (keepdim) {
-             for (const auto& axis : axes) {
-               size.set(axis, 1);
-             }
-           } else {
-             int64_t index = 0;
-             auto iter = size.begin();
-             std::sort(axes.begin(), axes.end());
-             for (const auto& axis : axes) {
-               // move iter to the next axis
-               iter += axis - index;
-
-               // input iter points to axis and is updated to axis + 1
-               iter = size.erase(iter);
-
-               // update current index for iter
-               index = axis + 1;
-             }
-           }
-           push(stack, IValue(std::move(size)));
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::BroadcastSizes(...) -> int[]"),
-         [](Stack* stack) {
-           auto num_inputs = pop(stack).toInt();
-           std::vector<int64_t> size;
-           size.reserve(8);
-           for (const auto i : c10::irange(num_inputs)) {
-             size =
-                 at::infer_size(size, peek(stack, i, num_inputs).toIntVector());
-           }
-           drop(stack, num_inputs);
-           push(stack, IValue(size));
-         },
-         aliasAnalysisSpecialCase()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::warn(str message, int stacklevel=2) -> ()"),
-         [](Stack* stack) {
-           TORCH_CHECK(
-               false, "warn is implemented directly in the interpreter");
-         },
-         aliasAnalysisFromSchema()),
-
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "onnx::Reshape(Tensor input, Tensor shape) -> Tensor"),
-         [](Stack* stack) {
-           at::Tensor input, shape;
-           pop(stack, input, shape);
-           shape = shape.contiguous();
-           AT_ASSERT(shape.ndimension() == 1);
-           at::IntArrayRef shape_list(shape.data_ptr<int64_t>(), shape.size(0));
-           push(stack, input.reshape(shape_list));
-         },
-         aliasAnalysisSpecialCase()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("onnx::Shape(Tensor t) -> Tensor"),
-         [](Stack* stack) {
-           auto t = pop(stack).toTensor();
-           at::IntArrayRef sizes = t.sizes();
-           auto sizes_tensor = torch::empty(
-               {static_cast<int64_t>(sizes.size())}, at::dtype(at::kLong));
-           auto accessor = sizes_tensor.accessor<int64_t, 1>();
-           for (const auto i : c10::irange(sizes.size())) {
-             accessor[i] = sizes[i];
-           }
-           stack->emplace_back(sizes_tensor);
-         },
-         aliasAnalysisSpecialCase()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::AutogradAnyNonZero(...) -> bool"),
-         [](Stack* stack) {
-           auto num_inputs = pop(stack).toInt();
-           bool result = false;
-           for (const IValue& v : last(stack, num_inputs)) {
-             if (v.isTensor()) {
-               if (v.toTensor().defined()) {
-                 result = true;
-                 break;
-               }
-             } else if (v.isTensorList()) {
-               for (const at::Tensor& t : v.toTensorVector()) {
-                 if (t.defined()) {
-                   result = true;
-                 }
-               }
-               if (result) {
-                 break;
-               }
-             } else {
-               TORCH_INTERNAL_ASSERT(false);
-             }
-           }
-           drop(stack, num_inputs);
-           stack->emplace_back(result);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::AutogradAllZero(...) -> bool"),
-         [](Stack* stack) {
-           auto num_inputs = pop(stack).toInt();
-           bool result = true;
-           for (const IValue& v : last(stack, num_inputs)) {
-             TORCH_INTERNAL_ASSERT(v.isTensor());
-             if (v.toTensor().defined()) {
-               result = false;
-               break;
-             }
-           }
-           drop(stack, num_inputs);
-           stack->emplace_back(result);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::AutogradAllNonZero(...) -> bool"),
-         [](Stack* stack) {
-           auto num_inputs = pop(stack).toInt();
-           bool result = true;
-           for (const IValue& v : last(stack, num_inputs)) {
-             TORCH_INTERNAL_ASSERT(v.isTensor());
-             if (!v.toTensor().defined()) {
-               result = false;
-               break;
-             }
-           }
-           drop(stack, num_inputs);
-           stack->emplace_back(result);
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA("prim::AutogradAdd(Any a, Any b) -> Any"),
-         [](Stack* stack) {
-           at::Tensor a, b;
-           pop(stack, a, b);
-           // NOLINTNEXTLINE(bugprone-branch-clone)
-           if (!a.defined() && !b.defined()) {
-             // undef + undef == undef
-             stack->emplace_back(a);
-           } else if (!a.defined()) {
-             stack->emplace_back(b);
-           } else if (!b.defined()) {
-             stack->emplace_back(a);
-           } else {
-             stack->emplace_back(a + b);
-           }
-         },
-         aliasAnalysisSpecialCase()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::_size_if_not_equal(int[] self_size, int[] other_size) -> int[]?"),
-         [](Stack* stack) {
-           IValue self_size, other_size;
-           pop(stack, self_size, other_size);
-           auto s = self_size.toIntVector();
-           auto o = other_size.toIntVector();
-           if (s == o) {
-             push(stack, IValue());
-           } else {
-             push(stack, s);
-           }
-         },
-         aliasAnalysisFromSchema()),
-     OperatorGenerator(
-         TORCH_SELECTIVE_SCHEMA(
-             "aten::_unwrap_optional(t(a)? optional) -> t(a)"),
-         [](Stack* stack) {
-           auto val = pop(stack);
-           TORCH_CHECK(!val.isNone(), "Unwrapping null optional");
-           push(stack, std::move(val));
-         },
-         aliasAnalysisFromSchema())});
+    createOperators(opGenArgs1, sizeof(opGenArgs1) / sizeof(opGenArgs1[0])));
 
 void hashValue(Stack* stack) {
   auto value = pop(stack);
   push(stack, value.hash());
 }
 
-RegisterOperators reg2({
+static const OperatorGeneratorArgs opGenArgs2[] = {
     // registered as Any[] so that heterogenous tuples can be called with len()
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::len.any(Any[] a) -> int"),
         listLen,
         aliasAnalysisFromSchema()),
 
 // these ops have a specialized implementation for the list element type
 #define CREATE_SPECIALIZED_LIST_OPS(decl_type, value_type) \
-  OperatorGenerator(                                       \
+  OperatorGeneratorArgs(                                   \
       TORCH_SELECTIVE_SCHEMA(                              \
           "aten::remove." decl_type "(" decl_type          \
           "[](a!) self,                                                           \
         " decl_type " el) -> ()"),                         \
       listRemove<value_type>,                              \
       aliasAnalysisFromSchema()),                          \
-      OperatorGenerator(                                   \
+      OperatorGeneratorArgs(                               \
           TORCH_SELECTIVE_SCHEMA(                          \
               "aten::index.list_" decl_type "(" decl_type  \
               "[] self,                                                               \
         " decl_type " el) -> int"),                        \
           listIndex<value_type>,                           \
           aliasAnalysisFromSchema()),                      \
-      OperatorGenerator(                                   \
+      OperatorGeneratorArgs(                               \
           TORCH_SELECTIVE_SCHEMA(                          \
               "aten::count." decl_type "(" decl_type       \
               "[] self,                                                               \
@@ -2500,100 +2523,100 @@ RegisterOperators reg2({
 
     // `listContains<T>` is not implemented for non-primitive types
     // TODO: Add List[bool] once .to<c10::List<bool>> doesn't throw an error
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::__contains__.float_list(float[] l, float item) -> bool"),
         listContains<double>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::sort.int(int[](a!) self, bool reverse=False) -> ()"),
         listSort<int64_t>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::sort.float(float[](a!) self, bool reverse=False) -> ()"),
         listSort<double>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::sort.Tensor(Tensor[](a!) self, bool reverse=False) -> ()"),
         listSort<at::Tensor>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::sort.bool(bool[](a!) self, bool reverse=False) -> ()"),
         listSort<bool>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::sort.str(str[](a!) self, bool reverse=False) -> ()"),
         listSort<std::string>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::sorted.int(int[](a) input) -> (int[])"),
         listCopyAndSort<int64_t>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::sorted.float(float[](a) input) -> (float[])"),
         listCopyAndSort<double>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::sorted.Tensor(Tensor[](a) input) -> (Tensor[])"),
         listCopyAndSort<at::Tensor>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::sorted.bool(bool[](a) input) -> (bool[])"),
         listCopyAndSort<bool>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::sorted.str(str[](a) input) -> (str[])"),
         listCopyAndSort<std::string>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::eq.float_list(float[] a, float[] b) -> bool"),
         listEq<double>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::eq.Tensor_list(Tensor[] a, Tensor[] b) -> bool"),
         listEq<at::Tensor>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::eq.bool_list(bool[] a, bool[] b) -> bool"),
         listEq<bool>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::eq.str_list(str[] a, str[] b) -> bool"),
         listEq<std::string>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::ne.float_list(float[] a, float[] b) -> bool"),
         listNe<double>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::ne.Tensor_list(Tensor[] a, Tensor[] b) -> bool"),
         listNe<at::Tensor>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::ne.bool_list(bool[] a, bool[] b) -> bool"),
         listNe<bool>,
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::ne.str_list(str[] a, str[] b) -> bool"),
         listNe<std::string>,
         aliasAnalysisFromSchema()),
 
 #define DEFINE_CONVERT_BASE_OP(op_name, prefix, char_op) \
-  OperatorGenerator(                                     \
+  OperatorGeneratorArgs(                                 \
       TORCH_SELECTIVE_SCHEMA(#op_name "(int i) -> str"), \
       [](Stack* stack) {                                 \
         auto i = pop(stack).toInt();                     \
@@ -2610,7 +2633,7 @@ RegisterOperators reg2({
     DEFINE_CONVERT_BASE_OP(aten::hex, "x", std::hex),
     DEFINE_CONVERT_BASE_OP(aten::oct, "o", std::oct),
 
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::bin(int i) -> str"),
         [](Stack* stack) {
           auto i = pop(stack).toInt();
@@ -2630,7 +2653,7 @@ RegisterOperators reg2({
         },
         aliasAnalysisFromSchema()),
     // TODO: deprecate this in favor of aten::getelem
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "prim::StringIndex(str string, int index) -> str"),
         [](Stack* stack) {
@@ -2641,7 +2664,7 @@ RegisterOperators reg2({
           push(stack, std::string(&c, 1));
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::chr(int i) -> str"),
         [](Stack* stack) {
           auto i = pop(stack).toInt();
@@ -2659,7 +2682,7 @@ RegisterOperators reg2({
     // only used in loop unrolling, not exposed to end users
     DEFINE_INT_OP(aten::__round_to_zero_floordiv, a / b),
 
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::modf(float a) -> (float, float)"),
         [](Stack* stack) {
           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -2671,7 +2694,7 @@ RegisterOperators reg2({
           push(stack, b, c);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::frexp(float a) -> (float, int)"),
         [](Stack* stack) {
           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -2685,7 +2708,7 @@ RegisterOperators reg2({
           push(stack, m, e);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::ldexp(float x, int i) -> float"),
         [](Stack* stack) {
           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -2785,7 +2808,7 @@ RegisterOperators reg2({
         float,
         float,
         float),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("prim::abs(Tensor x) -> Tensor"),
         [](Stack* stack) {
           at::Tensor x;
@@ -2808,7 +2831,7 @@ RegisterOperators reg2({
         std::copysign(a, b),
         std::copysign(a, b),
         float),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::_tensor_to_list(Tensor self) -> int[]"),
         [](Stack* stack) {
           at::Tensor t;
@@ -2821,7 +2844,7 @@ RegisterOperators reg2({
           push(stack, std::move(elems));
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::_list_to_tensor(int[] self) -> Tensor"),
         [](Stack* stack) {
           c10::List<int64_t> l = pop(stack).toIntList();
@@ -2833,7 +2856,7 @@ RegisterOperators reg2({
           push(stack, std::move(t));
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::sum.int(int[] self) -> int"),
         [](Stack* stack) {
           c10::List<int64_t> l = pop(stack).toIntList();
@@ -2844,7 +2867,7 @@ RegisterOperators reg2({
           push(stack, sum);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::sum.float(float[] self) -> float"),
         [](Stack* stack) {
           c10::List<double> l = pop(stack).toDoubleList();
@@ -2855,7 +2878,7 @@ RegisterOperators reg2({
           push(stack, sum);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::sum.complex(complex[] self) -> complex"),
         [](Stack* stack) {
           c10::List<c10::complex<double>> l = pop(stack).toComplexDoubleList();
@@ -2866,7 +2889,7 @@ RegisterOperators reg2({
           push(stack, sum);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::sum.bool(bool[] self) -> int"),
         [](Stack* stack) {
           c10::List<bool> l = pop(stack).toBoolList();
@@ -2879,7 +2902,7 @@ RegisterOperators reg2({
           push(stack, sum);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::any.str(str[] self) -> bool"),
         [](Stack* stack) {
           auto l = pop(stack).toList();
@@ -2892,7 +2915,7 @@ RegisterOperators reg2({
           push(stack, false);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::any.int(int[] self) -> bool"),
         [](Stack* stack) {
           c10::List<int64_t> l = pop(stack).toIntList();
@@ -2905,7 +2928,7 @@ RegisterOperators reg2({
           push(stack, false);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::any.float(float[] self) -> bool"),
         [](Stack* stack) {
           c10::List<double> l = pop(stack).toDoubleList();
@@ -2918,7 +2941,7 @@ RegisterOperators reg2({
           push(stack, false);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::any.bool(bool[] self) -> bool"),
         [](Stack* stack) {
           c10::List<bool> l = pop(stack).toBoolList();
@@ -2931,7 +2954,7 @@ RegisterOperators reg2({
           push(stack, false);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::all.int(int[] self) -> bool"),
         [](Stack* stack) {
           c10::List<int64_t> l = pop(stack).toIntList();
@@ -2944,7 +2967,7 @@ RegisterOperators reg2({
           push(stack, true);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::all.float(float[] self) -> bool"),
         [](Stack* stack) {
           c10::List<double> l = pop(stack).toDoubleList();
@@ -2957,7 +2980,7 @@ RegisterOperators reg2({
           push(stack, true);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::all.bool(bool[] self) -> bool"),
         [](Stack* stack) {
           c10::List<bool> l = pop(stack).toBoolList();
@@ -2970,7 +2993,7 @@ RegisterOperators reg2({
           push(stack, true);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::divmod.int(int x, int y) -> (int, int)"),
         [](Stack* stack) {
           // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@@ -2992,7 +3015,7 @@ RegisterOperators reg2({
               static_cast<int64_t>(divresult.rem));
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA(
             "aten::divmod.float(float x, float y) -> (float, float)"),
         [](Stack* stack) {
@@ -3010,7 +3033,7 @@ RegisterOperators reg2({
           push(stack, (a - rem) / b, rem);
         },
         aliasAnalysisFromSchema()),
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("prim::id(AnyClassType? x) -> int"),
         [](Stack* stack) {
           IValue a;
@@ -3024,7 +3047,7 @@ RegisterOperators reg2({
         aliasAnalysisFromSchema()),
 
 #define DEFINE_DIVMOD_MIXED_OP(type_a, type_b)                               \
-  OperatorGenerator(                                                         \
+  OperatorGeneratorArgs(                                                     \
       TORCH_SELECTIVE_SCHEMA("aten::divmod." #type_a "_" #type_b "(" #type_a \
                              " x," #type_b " y) -> (float, float)"),         \
       [](Stack* stack) {                                                     \
@@ -3044,13 +3067,13 @@ RegisterOperators reg2({
     DEFINE_DIVMOD_MIXED_OP(float, int),
 
 #undef DEFINE_DIVMOD_MIXED_OP
-    OperatorGenerator(
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::hash.generic(t value) -> int"),
         hashValue,
         aliasAnalysisFromSchema()),
 
 #define DEFINE_COMPLEX_OP(type_a, type_b, actual_type_a, actual_type_b)       \
-  OperatorGenerator(                                                          \
+  OperatorGeneratorArgs(                                                      \
       TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_a "_" #type_b "(" #type_a \
                              " x," #type_b " y) -> complex"),                 \
       [](Stack* stack) {                                                      \
@@ -3064,7 +3087,7 @@ RegisterOperators reg2({
 
 #define DEFINE_COMPLEX_OP_WITH_TENSOR_ARG(                                    \
     type_a, type_b, actual_type_a, actual_type_b)                             \
-  OperatorGenerator(                                                          \
+  OperatorGeneratorArgs(                                                      \
       TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_a "_" #type_b "(" #type_a \
                              " x," #type_b " y) -> complex"),                 \
       [](Stack* stack) {                                                      \
@@ -3075,7 +3098,7 @@ RegisterOperators reg2({
         push(stack, comp);                                                    \
       },                                                                      \
       aliasAnalysisFromSchema()),                                             \
-      OperatorGenerator(                                                      \
+      OperatorGeneratorArgs(                                                  \
           TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_b "_" #type_a         \
                                  "(" #type_b " x," #type_a " y) -> complex"), \
           [](Stack* stack) {                                                  \
@@ -3099,7 +3122,10 @@ RegisterOperators reg2({
     DEFINE_COMPLEX_OP_WITH_TENSOR_ARG(Tensor, float, at::Tensor, double),
     DEFINE_COMPLEX_OP_WITH_TENSOR_ARG(Tensor, int, at::Tensor, int),
     DEFINE_COMPLEX_OP_WITH_TENSOR_ARG(Tensor, bool, at::Tensor, bool),
-});
+};
+
+RegisterOperators reg2(
+    createOperators(opGenArgs2, sizeof(opGenArgs2) / sizeof(opGenArgs2[0])));
 
 } // namespace
 } // namespace jit