--- /dev/null
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_OPERATION_IMPL_
+#define _NNC_CORE_BACKEND_INTERPRETER_OPERATION_IMPL_
+
+#include <vector>
+
+#include "nnc/core/linalg/Tensor.h"
+#include "nnc/core/linalg/TensorVariant.h"
+
+#include "nncc/core/ADT/tensor/Shape.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+
+using namespace nncc::contrib::core::data;
+using nncc::contrib::core::ADT::TensorVariant;
+
+using nncc::core::ADT::tensor::Shape;
+
+template <typename T> class OperationImpl
+{
+public:
+ virtual std::vector<TensorVariant> operator()() = 0;
+
+protected:
+ TensorVariant allocate_tensor(const Shape &shape)
+ {
+ size_t data_size = 1;
+ for (uint32_t i = 0; i < shape.rank(); ++i)
+ {
+ data_size *= shape.dim(i);
+ }
+
+ auto od = new T[data_size]();
+
+ std::shared_ptr<T> data(od, [](const T* d) { delete[] d; });
+ // Use hardcoded DTYPE for now, since theres no support for operations on types other than
+ // floats
+ TensorVariant t(shape, data, TensorVariant::DTYPE::FLOAT);
+
+ return t;
+ }
+};
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif //_NNC_CORE_BACKEND_INTERPRETER_OPERATION_IMPL_