--- /dev/null
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_FULLYCONNECTED_
+#define _NNC_CORE_BACKEND_INTERPRETER_FULLYCONNECTED_
+
+#include "nnc/core/linalg/ShapeRange.h"
+#include "nnc/core/IR/model/operations/fully_connected_op.h"
+#include "interpreter/ops/OperationImpl.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+
+using nncc::contrib::core::IR::model::ops::FullyConnectedOp;
+
+template<typename T>
+class FullyConnected : public OperationImpl<T>
+{
+public:
+ FullyConnected(const TensorVariant &_input, const FullyConnectedOp &_op) : _op(_op), _input(_input) {}
+
+ std::vector<TensorVariant> operator()() override
+ {
+ TensorVariant res = OperationImpl<T>::allocate_tensor(_op.getOutputShape(0));
+ Tensor<T> accessor(res);
+
+ ShapeRange outRange(res.getShape());
+
+ Tensor<T> weights(_op.getWeights());
+ const Shape &wShape = weights.getShape();
+ uint32_t wRank = wShape.rank();
+
+ const Shape &inShape = _input.getShape();
+ uint32_t inRank = inShape.rank();
+
+ assert(wShape.dim(wRank - 2) == inShape.dim(inRank - 1));
+
+ const uint32_t len = wShape.dim(wRank - 2);
+
+ uint32_t row;
+ uint32_t col;
+ for (auto &outIdx : outRange)
+ {
+ Index tIdx = outIdx;
+ T& outputElement = accessor.at(outIdx);
+ col = tIdx.at(wRank - 1);
+ row = tIdx.at(wRank - 2);
+ for (uint32_t i = 0u; i < len; ++i)
+ {
+ tIdx.at(wRank - 1) = i;
+ const T& w = weights.at(tIdx);
+ tIdx.at(wRank - 1) = col;
+ tIdx.at(wRank - 2) = i;
+ const T& in = _input.at(tIdx);
+ tIdx.at(wRank - 2) = row;
+ outputElement += w * in;
+ }
+ }
+
+ return {res};
+ }
+
+private:
+ const FullyConnectedOp &_op;
+ const Tensor<T> _input;
+};
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif //_NNC_CORE_BACKEND_INTERPRETER_FULLYCONNECTED_