#include "nnc/core/linalg/Tensor.h"
#include "nnc/core/linalg/TensorVariant.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
namespace nncc
{
using namespace nncc::contrib::core::data;
using nncc::contrib::core::ADT::TensorVariant;
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
template <typename T> class OperationImpl
{
#include <functional>
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "nnc/core/linalg/Tensor.h"
#include "interpreter/ops/OperationImpl.h"
#pragma once
-#include "nncc/core/ADT/tensor/Index.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Index.h"
+#include "nnc/core/linalg/Shape.h"
namespace nncc
{
namespace impl
{
-using nncc::core::ADT::tensor::Index;
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Index;
+using nncc::contrib::core::data::Shape;
///
namespace impl
{
-using namespace nncc::core::ADT::tensor;
+using namespace nncc::contrib::core::data;
Index reduce(const Index &idx)
{
namespace impl
{
-using namespace nncc::core::ADT::tensor;
+using namespace nncc::contrib::core::data;
// Mostly compatible with tensorflow implementation
// Assuming input is in NHWC format with batch omitted( [in_height, in_width, in_channels] )
#include "ConfigException.h"
#include "nnc/core/linalg/TensorVariant.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
namespace nncc
{
virtual ~InterpreterPlugin();
private:
- nncc::contrib::core::ADT::TensorVariant loadInput(const nncc::core::ADT::tensor::Shape &);
+ nncc::contrib::core::ADT::TensorVariant loadInput(const nncc::contrib::core::data::Shape &);
nncc::contrib::core::ADT::TensorVariant *_out;
};
#include "PluginInstance.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "interpreter/core/Interpreter.h"
{
using namespace nncc::contrib;
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
using namespace nncc::contrib::core::IR::model;
using nncc::contrib::backend::interpreter::core::NNInterpreter;
using nncc::contrib::plugin::BackendPlugin;
#include "nnc/core/linalg/Tensor.h"
#include "nnc/core/linalg/ShapeRange.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "op_info_util.h"
std::shared_ptr<TensorVariant> getTensor(const opinfo::Tensor* t)
#define _NNC_SOFT_BACKEND_MODEL_ANALYZER_H_
#include "nnc/core/IR/model/visitor/visitor.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "nnc/core/linalg/TensorVariant.h"
#include <vector>
#define _NNC_SOFT_BACKEND_SERIALIZER_H_
#include "nnc/core/IR/model/visitor/visitor.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "nnc/core/linalg/TensorVariant.h"
#include "model_analyzer.h"
void packData(const void *data, size_t size);
template <typename T>
void serializeT(const T &obj);
- void serializeShape(const nncc::core::ADT::tensor::Shape &s);
+ void serializeShape(const nncc::contrib::core::data::Shape &s);
void serializeTensor(const contrib::core::ADT::TensorVariant &t);
template <class Op>
void serializePads(const Op &op, int32_t padsRank);
#include "model_analyzer.h"
#include "nnc/core/IR/model/graph/ir_node.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "nnc/core/linalg/ShapeRange.h"
#include "nnc/core/IR/model/operations/concat_op.h"
namespace soft
{
-using nncc::core::ADT::tensor::Shape;
-using nncc::core::ADT::tensor::Index;
+using nncc::contrib::core::data::Shape;
+using nncc::contrib::core::data::Index;
using nncc::contrib::core::data::ShapeRange;
using nncc::contrib::core::ADT::TensorVariant;
using namespace std;
-using nncc::core::ADT::tensor::Shape;
-using nncc::core::ADT::tensor::Index;
+using nncc::contrib::core::data::Shape;
+using nncc::contrib::core::data::Index;
using nncc::contrib::core::data::ShapeRange;
using nncc::contrib::core::data::util::transposeTensor;
using nncc::contrib::core::ADT::TensorVariant;
#include "nnc/core/IR/model/operations/common.h"
#include "nnc/core/linalg/TensorVariant.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
namespace nncc
{
namespace ops
{
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
class Conv2DOp : public OpDescription
{
#include "nnc/core/linalg/TensorVariant.h"
#include "nnc/core/IR/model/operations/common.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
namespace nncc
{
namespace ops
{
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
class DepthwiseConv2DOp : public OpDescription
{
#include <map>
#include <nnc/core/linalg/TensorVariant.h>
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
namespace nncc
{
size_t getNumInputs() const;
size_t getNumOutputs() const;
- const nncc::core::ADT::tensor::Shape &getInputShape(const size_t index) const;
- virtual void setInputShape(const size_t index, const nncc::core::ADT::tensor::Shape &shape);
+ const nncc::contrib::core::data::Shape &getInputShape(const size_t index) const;
+ virtual void setInputShape(const size_t index, const nncc::contrib::core::data::Shape &shape);
- virtual const nncc::core::ADT::tensor::Shape &getOutputShape(const size_t index) const;
- void setOutputShape(const size_t index, const nncc::core::ADT::tensor::Shape &shape);
+ virtual const nncc::contrib::core::data::Shape &getOutputShape(const size_t index) const;
+ void setOutputShape(const size_t index, const nncc::contrib::core::data::Shape &shape);
private:
size_t _max_inputs;
size_t _max_outputs;
- std::map<size_t, nncc::core::ADT::tensor::Shape> _inputShapes;
- std::map<size_t, nncc::core::ADT::tensor::Shape> _outputShapes;
+ std::map<size_t, nncc::contrib::core::data::Shape> _inputShapes;
+ std::map<size_t, nncc::contrib::core::data::Shape> _outputShapes;
};
} // namespace model
#include "nnc/core/IR/model/operations/operation.h"
#include "nnc/core/IR/model/operations/common.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
namespace nncc
{
namespace ops
{
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
class PoolOp : public OpDescription
{
--- /dev/null
+#ifndef _NNC_CORE_LINALG_ACCESSOR_H_
+#define _NNC_CORE_LINALG_ACCESSOR_H_
+
+#include "nnc/core/linalg/Index.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+template <typename T> struct Accessor
+{
+ virtual ~Accessor() = default;
+
+ virtual T &at(const Index &) = 0;
+};
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_ACCESSOR_H__
--- /dev/null
+#ifndef _NNC_CORE_LINALG_EXTERNAL_REGION_H_
+#define _NNC_CORE_LINALG_EXTERNAL_REGION_H_
+
+#include "nnc/core/linalg/Region.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+template <typename T> class ExternalRegion final : public Region<T>
+{
+public:
+ ExternalRegion(T *base, uint32_t size) : _base{base}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ T *base(void) override { return _base; }
+ uint32_t size(void) const override { return _size; }
+
+private:
+ T *const _base;
+ uint32_t const _size;
+};
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_EXTERNAL_REGION_H_
--- /dev/null
+#ifndef _NNC_CORE_LINALG_INDEX_H_
+#define _NNC_CORE_LINALG_INDEX_H_
+
+#include <initializer_list>
+#include <vector>
+#include <cstdint>
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+class Index
+{
+public:
+ Index() = default;
+ Index(std::initializer_list<uint32_t> &&l);
+
+public:
+ uint32_t rank(void) const;
+
+public:
+ Index &resize(uint32_t size);
+
+public:
+ Index &fill(uint32_t index);
+
+public:
+ uint32_t &at(uint32_t axis);
+ uint32_t at(uint32_t axis) const;
+
+private:
+ std::vector<uint32_t> _indices;
+};
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_INDEX_H_
--- /dev/null
+#ifndef _NNC_CORE_LINALG_INDEX_ENUMERATE_H_
+#define _NNC_CORE_LINALG_INDEX_ENUMERATE_H_
+
+#include "nnc/core/linalg/Index.h"
+#include "nnc/core/linalg/Shape.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+class IndexEnumerator
+{
+public:
+ explicit IndexEnumerator(const Shape &shape);
+
+public:
+ IndexEnumerator(IndexEnumerator &&) = delete;
+ IndexEnumerator(const IndexEnumerator &) = delete;
+
+public:
+ bool valid(void) const { return _cursor < _shape.rank(); }
+
+public:
+ const Index ¤t(void) const { return _index; }
+
+public:
+ void advance(void);
+
+private:
+ const Shape _shape;
+ Index _index;
+
+private:
+ uint32_t _cursor;
+};
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_INDEX_ENUMERATE_H_
--- /dev/null
+#ifndef _NNC_CORE_LINALG_INDEX_RANGE_H_
+#define _NNC_CORE_LINALG_INDEX_RANGE_H_
+
+#include "nnc/core/linalg/Index.h"
+#include "nnc/core/linalg/Shape.h"
+
+#include "nnc/core/linalg/Iterable.h"
+
+#include <functional>
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+class IndexRange
+{
+public:
+ explicit IndexRange(const Shape &shape);
+
+public:
+ bool member(const Index &index) const;
+
+public:
+ void iterate(const std::function<void(const Index &)> &) const;
+
+public:
+ Iterable<IndexRange> iterate(void) const { return Iterable<IndexRange>{this}; }
+
+private:
+ const Shape _shape;
+};
+
+IndexRange range(const Shape &shape);
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_INDEX_RANGE_H_
--- /dev/null
+#ifndef _NNC_CORE_LINALG_ITERABLE_H_
+#define _NNC_CORE_LINALG_ITERABLE_H_
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+template<typename T>
+class Iterable
+{
+public:
+ Iterable(const T *ptr) : _ptr{ptr}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const T &get(void) const
+ { return *_ptr; }
+
+private:
+ const T *const _ptr;
+};
+
+template<typename T, typename Callable>
+const Iterable<T> &operator<<(const Iterable<T> &it, Callable cb)
+{
+ it.get().iterate(cb);
+ return it;
+}
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_ITERABLE_H_
--- /dev/null
+#ifndef _NNC_CORE_LINALG_READER_H_
+#define _NNC_CORE_LINALG_READER_H_
+
+#include "nnc/core/linalg/Index.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+template <typename T> struct Reader
+{
+ virtual ~Reader() = default;
+
+ virtual T at(const Index &) const = 0;
+};
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_READER_H_
--- /dev/null
+#ifndef _NNC_CORE_LINALG_REGION_H_
+#define _NNC_CORE_LINALG_REGION_H_
+
+#include <cstdint>
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+template<typename T>
+struct Region
+{
+ virtual ~Region() = default;
+
+ virtual T *base(void) = 0;
+
+ virtual uint32_t size(void) const = 0;
+};
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_REGION_H_
--- /dev/null
+#ifndef _NNC_CORE_LINALG_SHAPE_H_
+#define _NNC_CORE_LINALG_SHAPE_H_
+
+#include <initializer_list>
+#include <vector>
+#include <cstdint>
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+class Shape
+{
+public:
+ Shape() = default;
+ Shape(std::initializer_list<uint32_t> &&l);
+
+public:
+ uint32_t rank(void) const;
+
+public:
+ Shape &resize(uint32_t size);
+
+public:
+ uint32_t &dim(uint32_t axis);
+ uint32_t dim(uint32_t axis) const;
+
+public:
+ Shape &squeeze(void);
+
+private:
+ std::vector<uint32_t> _dims;
+};
+
+uint64_t num_elements(const Shape &);
+
+Shape squeeze(const Shape &);
+
+bool operator==(const Shape &, const Shape &);
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_SHAPE_H_
#include <cassert>
-#include "nncc/core/ADT/tensor/Shape.h"
-#include "nncc/core/ADT/tensor/Index.h"
+#include "nnc/core/linalg/Shape.h"
+#include "nnc/core/linalg/Index.h"
namespace nncc
{
namespace data
{
-using nncc::core::ADT::tensor::Shape;
-using nncc::core::ADT::tensor::Index;
+using nncc::contrib::core::data::Shape;
+using nncc::contrib::core::data::Index;
class ShapeIter :
public std::iterator<std::forward_iterator_tag, Index, std::size_t, Index*, Index&> {
#pragma once
-#include "nncc/core/ADT/tensor/Shape.h"
-#include "nncc/core/ADT/tensor/Accessor.h"
-#include "nncc/core/ADT/tensor/Reader.h"
+#include "nnc/core/linalg/Shape.h"
+#include "nnc/core/linalg/Accessor.h"
+#include "nnc/core/linalg/Reader.h"
-#include "nncc/foundation/ExternalRegion.h"
+#include "nnc/core/linalg/ExternalRegion.h"
#include "nnc/core/linalg/TensorVariant.h"
namespace data
{
-using nncc::core::ADT::tensor::Shape;
-using nncc::core::ADT::tensor::Index;
-using nncc::foundation::ExternalRegion;
-using nncc::core::ADT::tensor::Accessor;
-using nncc::core::ADT::tensor::Reader;
-
template<typename T>
class Tensor final : public Accessor<T>, public Reader<T> {
public:
#include <memory>
#include "nnc/core/linalg/TensorVariant.h"
-#include "nncc/core/ADT/tensor/Shape.h"
-#include "nncc/core/ADT/tensor/Index.h"
-#include "nncc/core/ADT/tensor/IndexRange.h"
+#include "nnc/core/linalg/Shape.h"
+#include "nnc/core/linalg/Index.h"
+#include "nnc/core/linalg/IndexRange.h"
namespace nncc
{
namespace util
{
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
using namespace nncc::contrib::core::ADT;
using namespace nncc::contrib::core::data;
static std::shared_ptr <TensorVariant>
transposeTensor(std::shared_ptr <TensorVariant> tensor)
{
- using nncc::core::ADT::tensor::Index;
- using nncc::core::ADT::tensor::IndexRange;
+ using nncc::contrib::core::data::Index;
+ using nncc::contrib::core::data::IndexRange;
const Shape &inShape = tensor->getShape();
Shape targetShape{inShape.dim(Ints)...};
#include <memory>
#include <cassert>
-#include "nncc/core/ADT/tensor/Index.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Index.h"
+#include "nnc/core/linalg/Shape.h"
namespace nncc {
namespace contrib {
namespace core {
namespace ADT {
-using nncc::core::ADT::tensor::Shape;
-using nncc::core::ADT::tensor::Index;
+using nncc::contrib::core::data::Shape;
+using nncc::contrib::core::data::Index;
constexpr int MAX_DIMENSIONS = 32;
namespace model
{
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
std::vector<int> calculate2DPaddings(ops::PaddingType paddingType, const Shape& inShape,
const Shape& windowShape, const Shape& strides, Shape& outShape)
namespace IR {
namespace model {
-using namespace nncc::core::ADT;
+using namespace nncc::contrib::core::data;
-const tensor::Shape &OpDescription::getInputShape(const size_t index) const {
+const Shape &OpDescription::getInputShape(const size_t index) const {
assert(index < getNumInputs());
return _inputShapes.at(index);
}
-void OpDescription::setInputShape(const size_t index, const tensor::Shape &shape) {
+void OpDescription::setInputShape(const size_t index, const Shape &shape) {
assert(index < getNumInputs());
_inputShapes[index] = shape;
}
-const tensor::Shape &OpDescription::getOutputShape(const size_t index) const {
+const Shape &OpDescription::getOutputShape(const size_t index) const {
assert(index < getNumOutputs());
return _outputShapes.at(index);
}
-void OpDescription::setOutputShape(const size_t index, const tensor::Shape &shape) {
+void OpDescription::setOutputShape(const size_t index, const Shape &shape) {
assert(index < getNumOutputs());
_outputShapes[index] = shape;
}
--- /dev/null
+#include "nnc/core/linalg/Index.h"
+
+#include <algorithm>
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+Index::Index(std::initializer_list<uint32_t> &&l) : _indices{l}
+{
+ // DO NOTHING
+}
+
+uint32_t Index::rank(void) const { return _indices.size(); }
+Index &Index::resize(uint32_t size)
+{
+ _indices.resize(size);
+ return *this;
+}
+
+Index &Index::fill(uint32_t index)
+{
+ std::fill(_indices.begin(), _indices.end(), index);
+ return (*this);
+}
+
+uint32_t &Index::at(uint32_t axis) { return _indices.at(axis); }
+uint32_t Index::at(uint32_t axis) const { return _indices.at(axis); }
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
--- /dev/null
+#include "nnc/core/linalg/IndexEnumerator.h"
+
+#include <cassert>
+
+using nncc::contrib::core::data::Shape;
+
+inline uint32_t axis_of(const Shape &shape, uint32_t cursor)
+{
+ const uint32_t rank = shape.rank();
+ assert(cursor < rank);
+ return rank - cursor - 1;
+}
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+IndexEnumerator::IndexEnumerator(const Shape &shape) : _shape{shape}, _cursor(0)
+{
+ const uint32_t rank = _shape.rank();
+
+ // Initialize _index
+ _index.resize(rank);
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ _index.at(axis) = 0;
+ }
+
+ // Initialize _cursor
+ for (_cursor = 0; _cursor < rank; ++_cursor)
+ {
+ const auto axis = axis_of(_shape, _cursor);
+
+ if (_index.at(axis) < _shape.dim(axis))
+ {
+ break;
+ }
+ }
+}
+
+void IndexEnumerator::advance(void)
+{
+ const uint32_t rank = _shape.rank();
+
+ // Find axis to be updated
+ while (_cursor < rank)
+ {
+ const auto axis = axis_of(_shape, _cursor);
+
+ if ((_index.at(axis)) + 1 < _shape.dim(axis))
+ {
+ break;
+ }
+
+ ++_cursor;
+ }
+
+ if (_cursor == rank)
+ {
+ return;
+ }
+
+ // Update index
+ _index.at(axis_of(_shape, _cursor)) += 1;
+
+ for (uint32_t pos = 0; pos < _cursor; ++pos)
+ {
+ const auto axis = axis_of(_shape, pos);
+ _index.at(axis) = 0;
+ }
+
+ // Reset cursor
+ _cursor = 0;
+}
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
--- /dev/null
+#include "nnc/core/linalg/IndexRange.h"
+#include "nnc/core/linalg/IndexEnumerator.h"
+
+#include <cassert>
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+IndexRange::IndexRange(const Shape &shape) : _shape(shape)
+{
+ // DO NOTHING
+}
+
+bool IndexRange::member(const Index &index) const
+{
+ if (index.rank() != _shape.rank())
+ {
+ return false;
+ }
+
+ const auto rank = _shape.rank();
+
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ if (!(index.at(axis) < _shape.dim(axis)))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void IndexRange::iterate(const std::function<void(const Index &)> &f) const
+{
+ for (IndexEnumerator e{_shape}; e.valid(); e.advance())
+ {
+ f(e.current());
+ }
+}
+
+IndexRange range(const Shape &shape) { return IndexRange{shape}; }
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
--- /dev/null
+#include "nnc/core/linalg/Reader.h"
+
+// DO NOT REMOVE THIS FILE
+//
+// This file is introduced to check the self-completeness of 'Reader.h'
--- /dev/null
+#include "nnc/core/linalg/Shape.h"
+
+#include <algorithm>
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+
+Shape::Shape(std::initializer_list<uint32_t> &&l) : _dims{l}
+{
+ // DO NOTHING
+}
+
+uint32_t Shape::rank(void) const { return _dims.size(); }
+Shape &Shape::resize(uint32_t size)
+{
+ _dims.resize(size);
+ return *this;
+}
+
+uint32_t &Shape::dim(uint32_t axis) { return _dims.at(axis); }
+uint32_t Shape::dim(uint32_t axis) const { return _dims.at(axis); }
+
+Shape &Shape::squeeze(void)
+{
+ _dims.erase(std::remove(_dims.begin(), _dims.end(), 1), _dims.end());
+ return *this;
+}
+
+uint64_t num_elements(const Shape &shape)
+{
+ if (shape.rank() == 0)
+ {
+ return 0;
+ }
+
+ uint64_t res = 1;
+
+ for (uint32_t axis = 0; axis < shape.rank(); ++axis)
+ {
+ res *= shape.dim(axis);
+ }
+
+ return res;
+}
+
+Shape squeeze(const Shape &shape)
+{
+ Shape res{shape};
+ res.squeeze();
+ return res;
+}
+
+bool operator==(const Shape &lhs, const Shape &rhs)
+{
+ if (lhs.rank() != rhs.rank())
+ {
+ return false;
+ }
+
+ for (uint32_t axis = 0; axis < lhs.rank(); ++axis)
+ {
+ if (lhs.dim(axis) != rhs.dim(axis))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
using IrTensor = nncc::contrib::core::ADT::TensorVariant;
using nncc::contrib::core::IR::model::Graph;
using nncc::contrib::core::IR::model::ADT::INode;
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
class ModelVisitor : public Visitor
{
#include "nnc/core/IR/model/graph/ir_node.h"
#include "nnc/core/linalg/TensorVariant.h"
#include "nnc/core/IR/model/operations/common.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "caffe/proto/caffe.pb.h"
using nncc::contrib::core::IR::model::Graph;
using nncc::contrib::core::IR::model::ADT::INode;
using IrTensor = nncc::contrib::core::ADT::TensorVariant;
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
class OpCreator
{
#include <vector>
#include <cassert>
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "nnc/core/IR/model/operations/variable_op.h"
#include "nnc/core/linalg/TensorUtil.h"
#include "PluginException.h"
{
using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp;
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
using nncc::contrib::core::data::util::transposeTensor;
void ModelVisitor::visit(const NetParameter& np)
#ifndef NNCC_SHAPE_HELPER_H
#define NNCC_SHAPE_HELPER_H
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
namespace nncc
{
namespace common
{
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
class ShapeHelper
{
#include "nnc/core/IR/model/graph/graph.h"
#include "nnc/core/IR/model/graph/ir_node.h"
#include "nnc/core/linalg/TensorVariant.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "nnc/core/IR/model/operations/common.h"
using nncc::contrib::core::IR::model::Graph;
using nncc::contrib::core::IR::model::ADT::INode;
using IrTensor = nncc::contrib::core::ADT::TensorVariant;
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
class OpCreator
{
#include "schema_v3.h"
#include "PluginException.h"
-#include "nncc/core/ADT/tensor/Shape.h"
-#include "nncc/core/ADT/tensor/Index.h"
-#include "nncc/core/ADT/tensor/IndexRange.h"
+#include "nnc/core/linalg/Shape.h"
+#include "nnc/core/linalg/Index.h"
+#include "nnc/core/linalg/IndexRange.h"
#include "nnc/core/linalg/TensorUtil.h"
#include "nnc/core/IR/model/operations/variable_op.h"
namespace tflite
{
-using nncc::core::ADT::tensor::Index;
-using nncc::core::ADT::tensor::IndexRange;
+using nncc::contrib::core::data::Index;
+using nncc::contrib::core::data::IndexRange;
using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp;
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
using nncc::contrib::core::data::util::transposeTensor;
IrVisitor::IrVisitor()
#include "nnc/core/IR/model/graph/graph.h"
#include "nnc/core/IR/model/actions/ShapeInference.h"
#include "nnc/core/IR/model/operations/reshape_op.h"
-#include "nncc/core/ADT/tensor/Shape.h"
+#include "nnc/core/linalg/Shape.h"
#include "gtest/gtest.h"
using namespace nncc::contrib::core::IR::model;
-using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::Shape;
TEST(ShapeInferenceTest, ReshapeAutoDimension) {
Graph g;
#include "nnc/core/linalg/TensorVariant.h"
-#include "nncc/core/ADT/feature/Shape.h"
#include <gtest/gtest.h>
using namespace nncc::contrib::core::ADT;
-using namespace nncc::core::ADT::tensor;
+using namespace nncc::contrib::core::data;
TEST(TensorVariant, BasicTest) {
Shape shape{2,2};
#include "nnc/core/IR/model/operations/operation.h"
#include "nnc/core/IR/model/operations/reshape_op.h"
#include "nnc/core/IR/model/graph/ir_node.h"
-#include "nncc/core/ADT/feature/Shape.h"
#include <gtest/gtest.h>
#include "nnc/core/IR/model/operations/operation.h"
-#include "nncc/core/ADT/feature/Shape.h"
#include <gtest/gtest.h>
using namespace nncc::contrib::core::IR::model;
-using namespace nncc::core::ADT::tensor;
+using namespace nncc::contrib::core::data;
TEST(OpDescription, InputOutputShapeTest) {
Shape inShape{1,2,3};
using namespace std;
using namespace nncc::contrib::backend::soft;
using namespace nncc::contrib::core::IR::model; // ShapeInference and Graph
-using namespace nncc::contrib::core; // data namespace for Tensor
-using namespace nncc::core::ADT; // for INode and TensorVariant tensor namespace for Index and Shape
+using namespace nncc::contrib::core; // data namespace for TensorVariant, Tensor, Index and Shape
using namespace nncc::contrib::backend::interpreter;
/*
}
/** Fills NNC Shape object with data from src container*/
-void fillNShape(tensor::Shape &nshape, const vector<int> &rawShapeData)
+void fillNShape(data::Shape &nshape, const vector<int> &rawShapeData)
{
int shapeRank = rawShapeData.size();
nshape.resize(shapeRank);
}
/** Converts NNC Shape to artifact Shape*/
-void copyAShapeFromNShape(Shape &ashape, const tensor::Shape &src)
+void copyAShapeFromNShape(Shape &ashape, const data::Shape &src)
{
int shapeRank = src.rank();
ashape.setDims(shapeRank);
}
/** Fills NNC and artifact Shape objects with data from rawShapeData*/
-void fillShapes(tensor::Shape &nshape, Shape &ashape, const vector<int> &rawShapeData)
+void fillShapes(data::Shape &nshape, Shape &ashape, const vector<int> &rawShapeData)
{
fillNShape(nshape, rawShapeData);
copyAShapeFromNShape(ashape, nshape);
{
float t = start;
data::Tensor<float> wrapper(dst);
- for (tensor::Index idx: data::ShapeRange(dst.getShape()))
+ for (data::Index idx: data::ShapeRange(dst.getShape()))
{
wrapper.at(idx) = sin(t) * 2.0f;
t += 1.0f;
}
}
-TensorVariant createNTensor(tensor::Shape &shape, float start)
+TensorVariant createNTensor(data::Shape &shape, float start)
{
shared_ptr<char> dataBuf(
- new char[sizeof(float)*tensor::num_elements(shape)], default_delete<char[]>());
+ new char[sizeof(float)*data::num_elements(shape)], default_delete<char[]>());
TensorVariant tensor(shape, dataBuf, TensorVariant::DTYPE::FLOAT, sizeof(float));
fillNTensor(tensor, start);
return tensor;
Index artIdx;
int rank = src.getShape().rank();
artIdx.setDims(rank);
- for (tensor::Index idx: data::ShapeRange(src.getShape()))
+ for (data::Index idx: data::ShapeRange(src.getShape()))
{
for (int i = 0; i < rank; ++i)
{
void fillTensors(unique_ptr<TensorVariant> &nTensor, Tensor &aTensor, const vector<int> &shape, float start)
{
Shape aShape;
- tensor::Shape nShape;
+ data::Shape nShape;
fillShapes(nShape, aShape, shape);
aTensor.reShape(aShape);
shared_ptr<char> dataBuf(
- new char[sizeof(float)*tensor::num_elements(nShape)], default_delete<char[]>());
+ new char[sizeof(float)*data::num_elements(nShape)], default_delete<char[]>());
nTensor.reset(new TensorVariant(nShape, dataBuf, TensorVariant::DTYPE::FLOAT, sizeof(float)));
fillNTensor(*nTensor, start);
copyATensorFromNTensor(aTensor, *nTensor);
/** Compares nnc TensorVariant and artifact Tensor objects*/
void compareResults(const TensorVariant &refNTensor, const Tensor &testATensor)
{
- const tensor::Shape &tvShape = refNTensor.getShape();
+ const data::Shape &tvShape = refNTensor.getShape();
const Shape &atShape = testATensor.getShape();
ASSERT_EQ(tvShape.rank(), atShape.getDims());
int rank = tvShape.rank();
}
Index artifactIdx;
artifactIdx.setDims(rank);
- for (tensor::Index tvIdx: data::ShapeRange(tvShape))
+ for (data::Index tvIdx: data::ShapeRange(tvShape))
{
for (int i = 0; i < rank; ++i)
{
TEST(cpp_operations_test, bias)
{
vector<int> inputShapeData{2, 3, 4, 5};
- tensor::Shape weightsShape{5};
+ data::Shape weightsShape{5};
vector<unique_ptr<TensorVariant>> inputNTensors(1);
Tensor aInputTensor;
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
for (iT strideW = 1; strideW <= 3; ++strideW)
{
vector<int> inputShapeData{5, 7, static_cast<int>(inputC)}; // HWC
- tensor::Shape kernelShape{kernelH, kernelW, inputC, outputC}; // HWCN
- tensor::Shape strides{strideH, strideW, 1};
+ data::Shape kernelShape{kernelH, kernelW, inputC, outputC}; // HWCN
+ data::Shape strides{strideH, strideW, 1};
vector<unique_ptr<TensorVariant>> inputNTensors(1);
Tensor aInputTensor;
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
for (iT multiplier = 1; multiplier <= 2; ++multiplier)
{
vector<int> inputShapeData{5, 7, static_cast<int>(channels)}; // HWC
- tensor::Shape kernelShape{kernelH, kernelW, channels, multiplier}; // HWCN
- tensor::Shape strides{strideH, strideW, 1};
+ data::Shape kernelShape{kernelH, kernelW, channels, multiplier}; // HWCN
+ data::Shape strides{strideH, strideW, 1};
vector<unique_ptr<TensorVariant>> inputNTensors(1);
Tensor aInputTensor;
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
TEST(cpp_operations_test, fully_connected)
{
vector<int> inputShapeData{1, 13};
- tensor::Shape weightsShape{13, 7};
+ data::Shape weightsShape{13, 7};
vector<unique_ptr<TensorVariant>> inputNTensors(1);
Tensor aInputTensor;
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
for (iT strideW = 1; strideW <= 3; ++strideW)
{
vector<int> shapeData{5, 7, static_cast<int>(channels)};
- tensor::Shape windowShape{windowH, windowW, 1};
- tensor::Shape strides{strideH, strideW, 1};
+ data::Shape windowShape{windowH, windowW, 1};
+ data::Shape strides{strideH, strideW, 1};
auto padT = IR::model::ops::PaddingType::Valid;
Tensor aInputTensor;
vector<unique_ptr<TensorVariant>> inputNTensors(1);
// test prerequisites
vector<int> inputShapeData{2,3,4,5};
vector<int> outputShapeData{1,1,1,120};
- tensor::Shape nOutputShape;
+ data::Shape nOutputShape;
fillNShape(nOutputShape, outputShapeData);
Tensor aInputTensor;
vector<unique_ptr<TensorVariant>> inputNTensors(1);