From: Павел Ильютченко/AI Tools Lab /SRR/Assistant Engineer/삼성전자 Date: Thu, 22 Nov 2018 11:30:58 +0000 (+0300) Subject: [nnc] Support Pad operation on TFLite Importer and Interpreter (#2330) X-Git-Tag: nncc_backup~1268 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=14b63a54e01bc37acf5c38a0bb7f3b633bf7f7dc;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Support Pad operation on TFLite Importer and Interpreter (#2330) * Support Pad op in TFLite imported and interpreter Signed-off-by: Pavel Iliutchenko --- diff --git a/contrib/nnc/core/modelIR/Deserializer.cpp b/contrib/nnc/core/modelIR/Deserializer.cpp index 2a99c90..82e1a6a 100644 --- a/contrib/nnc/core/modelIR/Deserializer.cpp +++ b/contrib/nnc/core/modelIR/Deserializer.cpp @@ -76,22 +76,22 @@ static TensorVariant deserializeFromMessage(const proto::TensorProto& object_as_ auto raw_data = new char[raw_data_size]; tensor_content.copy(raw_data, raw_data_size); - TensorVariant::DTYPE tv_dtype; + DTYPE tv_dtype; size_t element_size; switch (dt) { case proto::DataType::DT_INT32: element_size = sizeof(int32_t); - tv_dtype = TensorVariant::DTYPE::INT; + tv_dtype = DTYPE::INT32; break; case proto::DataType::DT_FLOAT: element_size = sizeof(float); - tv_dtype = TensorVariant::DTYPE::FLOAT; + tv_dtype = DTYPE::FLOAT32; break; case proto::DataType::DT_DOUBLE : element_size = sizeof(double); - tv_dtype = TensorVariant::DTYPE::FLOAT; + tv_dtype = DTYPE::FLOAT32; break; default: throw std::logic_error("Deserializer: received unsupported data type"); diff --git a/contrib/nnc/core/modelIR/Index.cpp b/contrib/nnc/core/modelIR/Index.cpp index 78f2ae2..d6df7b5 100644 --- a/contrib/nnc/core/modelIR/Index.cpp +++ b/contrib/nnc/core/modelIR/Index.cpp @@ -23,10 +23,9 @@ namespace nnc namespace mir { -Index::Index(std::initializer_list &&l) : _indices{l} -{ - // DO NOTHING -} +Index::Index(std::initializer_list&& l) : _indices{l} {} + +Index::Index(std::vector&& vec) : _indices(vec) {} int32_t Index::rank(void) const { return _indices.size(); } Index &Index::resize(int32_t size) diff --git a/contrib/nnc/core/modelIR/TensorVariant.cpp b/contrib/nnc/core/modelIR/TensorVariant.cpp index 0ec27b1..35bb440 100644 --- a/contrib/nnc/core/modelIR/TensorVariant.cpp +++ b/contrib/nnc/core/modelIR/TensorVariant.cpp @@ -21,8 +21,10 @@ namespace nnc namespace mir { -TensorVariant::TensorVariant(const Shape& shape, const std::shared_ptr& data, TensorVariant::DTYPE dtype, size_t element_size) - : _dtype(dtype), _data(data), _strides{0}, _rank(shape.rank()), _shape(shape), _element_size(element_size) +TensorVariant::TensorVariant(const Shape& shape, const std::shared_ptr& data, + DTYPE dtype, size_t element_size) + : _dtype(dtype), _data(data), _strides{0}, _rank(shape.rank()), + _shape(shape), _element_size(element_size) { int stride = 1; for (int d = _rank - 1; d >= 0; --d) diff --git a/contrib/nnc/include/core/modelIR/DataType.h b/contrib/nnc/include/core/modelIR/DataType.h new file mode 100644 index 0000000..22d6f90 --- /dev/null +++ b/contrib/nnc/include/core/modelIR/DataType.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NCC_CORE_DATA_TYPE_H_ +#define _NCC_CORE_DATA_TYPE_H_ + +namespace nnc { +namespace mir { + +enum class DTYPE { + UNKNOWN, + FLOAT32, + INT32 +}; + +} // namespace mir +} // namespace nnc + +#endif //_NCC_CORE_DATA_TYPE_H_ diff --git a/contrib/nnc/include/core/modelIR/Index.h b/contrib/nnc/include/core/modelIR/Index.h index cf39585..931e11a 100644 --- a/contrib/nnc/include/core/modelIR/Index.h +++ b/contrib/nnc/include/core/modelIR/Index.h @@ -31,7 +31,8 @@ class Index { public: Index() = default; - Index(std::initializer_list &&l); + Index(std::initializer_list&& l); + Index(std::vector&& vec); int32_t rank(void) const; diff --git a/contrib/nnc/include/core/modelIR/Scalar.h b/contrib/nnc/include/core/modelIR/Scalar.h new file mode 100644 index 0000000..7a5c59b --- /dev/null +++ b/contrib/nnc/include/core/modelIR/Scalar.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NNC_CORE_SCALAR_H_ +#define _NNC_CORE_SCALAR_H_ + +#include +#include +#include + +#include "core/modelIR/DataType.h" + +namespace nnc { +namespace mir { + +/** + * @brief Scalar class + */ +class Scalar { +public: + /** + * @brief Class for Scalar values in modelIR + * @param data Data pointer + * @param dtype Data type + * @param data_size Data size + */ + explicit Scalar(const char* data, DTYPE dtype, unsigned data_size) { + assert(data_size <= maxScalarLength); + _dataType = dtype; + memcpy(_data, data, data_size); + } + + /** + * @return Pointer on data + */ + char* getRawData() { return _data; } + + /** + * @return Data type + */ + DTYPE getDataType() { return _dataType; } + + /** + * @return Data size + */ + int getDataSize() const { + switch (_dataType) { + case DTYPE::UNKNOWN: + return -1; + case DTYPE::FLOAT32: + case DTYPE::INT32: + return 4; + } + } + /** + * @tparam T Class of returned object + * @return Object of T type + */ + template + T get() const { + assert(sizeof(T) <= getDataSize()); + T result; + memcpy(&result, _data, sizeof(T)); + return result; + } + +private: + static const unsigned int maxScalarLength = 8; + DTYPE _dataType; + char _data[maxScalarLength]; +}; + +} // namespace mir +} // namespace nnc + +#endif //_NNC_CORE_SCALAR_H_ diff --git a/contrib/nnc/include/core/modelIR/TensorVariant.h b/contrib/nnc/include/core/modelIR/TensorVariant.h index b53431c..ba574e4 100644 --- a/contrib/nnc/include/core/modelIR/TensorVariant.h +++ b/contrib/nnc/include/core/modelIR/TensorVariant.h @@ -23,6 +23,7 @@ #include "core/modelIR/Index.h" #include "core/modelIR/Shape.h" +#include "core/modelIR/DataType.h" namespace nnc { @@ -33,11 +34,6 @@ constexpr int MAX_DIMENSIONS = 32; class TensorVariant { public: - enum class DTYPE { - UNKNOWN, - FLOAT, - INT - }; explicit TensorVariant(const Shape& shape, const std::shared_ptr& data, DTYPE dtype, size_t element_size); diff --git a/contrib/nnc/include/core/modelIR/operations/PadOp.h b/contrib/nnc/include/core/modelIR/operations/PadOp.h index 2040dba..83dba33 100644 --- a/contrib/nnc/include/core/modelIR/operations/PadOp.h +++ b/contrib/nnc/include/core/modelIR/operations/PadOp.h @@ -18,45 +18,51 @@ #define _NCC_CORE_IR_MODEL_PAD_H_ #include "core/modelIR/Operation.h" -#include "core/modelIR/TensorVariant.h" -#include + +#include "core/modelIR/Scalar.h" namespace nnc { namespace mir { namespace ops { +/** + * @brief Pad operation class + */ class PadOp : public Operation { public: - enum class PaddingMode { - CONST, - REFLECT, - SYMMETRIC - }; - - PadOp(const IODescriptor& arg, PaddingMode paddingMode, int numDims, - const TensorVariant& constant_value) - : Operation(Type::pad, {arg}), _paddingMode(paddingMode), _numDims(numDims), - _constant_value(constant_value) { - _paddings.resize(_numDims); - } + /** + * @brief Class for Pad operation in modelIR + * @param arg IODescriptor + * @param numDims Number of dimensions + * @param paddings Vector with pairs of paddings (left, right) + * @param scalar_value Constant value filling padded region + */ + PadOp(const IODescriptor& arg, int32_t numDims, + const std::vector>& paddings, + const Scalar& scalar_value) + : Operation(Type::pad, {arg}), _numDims(numDims), + _paddings(paddings), _scalarValue(scalar_value) {} - PaddingMode getPaddingMode() const { return _paddingMode; } - - void setPadding(int dim, int32_t front_pad, int32_t back_pad) { - assert(dim < _numDims); - _paddings[dim].first = front_pad; - _paddings[dim].second = back_pad; - } - - std::pair getPaddingForDim(int dim) { return _paddings[dim]; } + /** + * @param dim Dimension number + * @return Pair of paddings for dimension + */ + std::pair getPaddingForDim(int dim) const { return _paddings[dim]; } + /** + * @return Number of dimensions + */ int getNumDim() const { return _numDims; } + /** + * @return Scalar value + */ + Scalar getScalar() const { return _scalarValue; } + private: - PaddingMode _paddingMode; std::vector> _paddings; - int _numDims; - TensorVariant _constant_value; + int32_t _numDims; + Scalar _scalarValue; }; } // namespace ops diff --git a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp index f34d699..c8cd6fe 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp @@ -200,7 +200,7 @@ void CaffeImporter::processDeprecatedInput() { } std::shared_ptr CaffeImporter::createTensor(const BlobProto& bp) { - auto type = IrTensor::DTYPE::FLOAT; + auto type = DTYPE::FLOAT32; size_t element_size; const char* src_data; diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp index 99979f3..145aaee 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp @@ -206,7 +206,7 @@ fixGroupedKernel(int groups, std::shared_ptr folded_kernel) { unfold_kernel->at(idx)); } else { // fill element of output kernel with zero element - assert(folded_kernel->getDataType() == IrTensor::DTYPE::FLOAT && + assert(folded_kernel->getDataType() == DTYPE::FLOAT32 && "unsupported data type, add appropriate zero element creation"); float* elem = reinterpret_cast(unfold_kernel->at(idx)); *elem = 0.0f; diff --git a/contrib/nnc/passes/interpreter/Interpreter.cpp b/contrib/nnc/passes/interpreter/Interpreter.cpp index 8942abc..fe89352 100644 --- a/contrib/nnc/passes/interpreter/Interpreter.cpp +++ b/contrib/nnc/passes/interpreter/Interpreter.cpp @@ -39,6 +39,7 @@ #include "core/modelIR/operations/TanhOp.h" #include "core/modelIR/operations/ElementwiseOp.h" #include "core/modelIR/operations/SqueezeOp.h" +#include "core/modelIR/operations/PadOp.h" #include "ops/Bias.h" #include "ops/Concat.h" @@ -53,6 +54,7 @@ #include "ops/Scale.h" #include "ops/Dropout.h" #include "ops/BatchNorm.h" +#include "ops/Pad.h" namespace nnc { @@ -270,7 +272,10 @@ void NNInterpreter::visit(ops::SqueezeOp& op) { } void NNInterpreter::visit(ops::PadOp& op) { - throw PassException("Not implemented yet"); + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + auto& input = var(operand.op->getId())[operand.index]; + var(op.getId()) = Pad(input, op)(); } } // namespace nnc diff --git a/contrib/nnc/passes/interpreter/interpreter_pass.cpp b/contrib/nnc/passes/interpreter/interpreter_pass.cpp index 4433423..43a2aa8 100644 --- a/contrib/nnc/passes/interpreter/interpreter_pass.cpp +++ b/contrib/nnc/passes/interpreter/interpreter_pass.cpp @@ -170,7 +170,8 @@ TensorVariant InterpreterPass::loadInput(const Shape &shape) assert(is_error != EOF && "Can not close file!"); (void)is_error; - return TensorVariant(shape, std::shared_ptr(data, [](const char* d) { delete[] d; }), TensorVariant::DTYPE::FLOAT, sizeof(float)); + return TensorVariant(shape, std::shared_ptr(data, std::default_delete()), + DTYPE::FLOAT32, sizeof(float)); } InterpreterPass::~InterpreterPass() diff --git a/contrib/nnc/passes/interpreter/ops/OperationImpl.h b/contrib/nnc/passes/interpreter/ops/OperationImpl.h index bf91d2f..7846dbb 100644 --- a/contrib/nnc/passes/interpreter/ops/OperationImpl.h +++ b/contrib/nnc/passes/interpreter/ops/OperationImpl.h @@ -46,7 +46,7 @@ protected: std::shared_ptr data(od, [](const T* d) { delete[] d; }); // Use hardcoded DTYPE for now, since theres no support for operations on types other than // floats - mir::TensorVariant t(shape, data, mir::TensorVariant::DTYPE::FLOAT); + mir::TensorVariant t(shape, data, mir::DTYPE::FLOAT32); return t; } diff --git a/contrib/nnc/passes/interpreter/ops/Pad.cpp b/contrib/nnc/passes/interpreter/ops/Pad.cpp new file mode 100644 index 0000000..072e987 --- /dev/null +++ b/contrib/nnc/passes/interpreter/ops/Pad.cpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "core/modelIR/ShapeRange.h" + +#include "Pad.h" + +namespace nnc { + +using namespace mir; + +std::vector Pad::operator()() { + auto result = allocate_tensor(_op.getOutputShape(0)); + Tensor result_accessor(result); + + Shape out_shape = result_accessor.getShape(); + + ShapeRange out_range(out_shape); + int32_t rank = _op.getNumDim(); + + Index temp_index; + temp_index.resize(rank); + + bool index_on_padding(false); + for (const Index& ind : out_range) { + index_on_padding = false; + + for (int32_t i = 0; i < rank; i++) { + // index on input values + if (ind.at(i) >= _op.getPaddingForDim(i).first && + ind.at(i) < out_shape.dim(i) - _op.getPaddingForDim(i).second) { + temp_index.at(i) = ind.at(i) - _op.getPaddingForDim(i).first; + } else { // not in input + index_on_padding = true; + break; + } + } + if (index_on_padding) { + result_accessor.at(ind) = _op.getScalar().get(); + } else { + result_accessor.at(ind) = _input.at(temp_index); + } + } + + return {result}; +} + +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/ops/Pad.h b/contrib/nnc/passes/interpreter/ops/Pad.h new file mode 100644 index 0000000..1f4ed32 --- /dev/null +++ b/contrib/nnc/passes/interpreter/ops/Pad.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NNC_CORE_BACKEND_INTERPRETER_PAD_IMPL_ +#define _NNC_CORE_BACKEND_INTERPRETER_PAD_IMPL_ + +#include "OperationImpl.h" +#include "core/modelIR/operations/PadOp.h" + +namespace nnc { +/** + * @brief Implements PadOp for interpreter backend + * + * This operation pads a tensor according to the paddings + * you specify. For each dimension of input add values + * before and after of contents. + */ +class Pad : public OperationImpl { +public: + /** + * @param input The Input tensor + * @param op The Pad operation object + */ + Pad(const mir::TensorVariant& input, const mir::ops::PadOp& op) + : _input(input), _op(op) { + assert(_input.getShape().rank() == _op.getNumDim()); + } + + /** + * @brief Computes operation aplication result + * @return Vector of all outputs from this node + */ + std::vector operator()() override; + +private: + const mir::Tensor _input; + const mir::ops::PadOp& _op; +}; + +} // namespace nnc + +#endif // _NNC_CORE_BACKEND_INTERPRETER_PAD_IMPL_ diff --git a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp index 14ccd66..0edd532 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp @@ -93,6 +93,7 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) { case BuiltinOperator_SOFTMAX: case BuiltinOperator_RESHAPE: case BuiltinOperator_SQUEEZE: + case BuiltinOperator_PAD: case BuiltinOperator_ADD: case BuiltinOperator_MUL: case BuiltinOperator_MAXIMUM: @@ -200,6 +201,9 @@ void TfliteImporter::walkOperator(const Operator* op) { outputs = _opCreator->createTransposeConv( inputs, params,op->builtin_options_as()); break; + case BuiltinOperator_PAD: + outputs = _opCreator->createPad(inputs, params, op->builtin_options_as()); + break; default: assert(false && "All unsupported types should have been found before this pass."); } @@ -269,27 +273,27 @@ std::shared_ptr TfliteImporter::createTensor(const Tensor* t, const Bu std::copy(b->data()->begin(), b->data()->end(), tensor_buffer_copy.get()); size_t elementSize; - IrTensor::DTYPE type; + mir::DTYPE type; switch (t->type()) { case TensorType_UINT8: elementSize = sizeof(uint8_t); - type = IrTensor::DTYPE::INT; + type = mir::DTYPE::INT32; // TODO break; case TensorType_FLOAT16: elementSize = sizeof(uint16_t); - type = IrTensor::DTYPE::FLOAT; + type = mir::DTYPE::FLOAT32; // TODO break; case TensorType_INT32: elementSize = sizeof(uint32_t); - type = IrTensor::DTYPE::INT; + type = mir::DTYPE::INT32; break; case TensorType_FLOAT32: elementSize = sizeof(uint32_t); - type = IrTensor::DTYPE::FLOAT; + type = mir::DTYPE::FLOAT32; break; case TensorType_INT64: elementSize = sizeof(uint64_t); - type = IrTensor::DTYPE::INT; + type = mir::DTYPE::INT32; // TODO break; default: throw PassException( diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp index a1bb93e..46af639 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp @@ -31,6 +31,8 @@ #include "core/modelIR/operations/BiasAddOp.h" #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/SqueezeOp.h" +#include "core/modelIR/operations/PadOp.h" +#include "core/modelIR/Tensor.h" #include "pass/PassException.h" using namespace nnc::mir; @@ -233,4 +235,26 @@ std::vector TFLiteOpCreator::createSqueeze(InputOps inputs, Inp squeeze_dims); } +std::vector TFLiteOpCreator::createPad(InputOps inputs, InputParams params, + const ::tflite::PadOptions *opts) { + assert(params.size() == 1); // support pad with one param + std::vector> paddings; + + auto paddings_tensor = mir::Tensor(*params[0].get()); + // check right paddings structure + assert(paddings_tensor.getShape().dim(1) == 2); + + int32_t num_dims = paddings_tensor.getShape().dim(0); + // create strucuture with paddings + for (int i = 0; i < num_dims; i++) + paddings.emplace_back(paddings_tensor.at(Index({i, 0})), paddings_tensor.at(Index({i, 1}))); + // create const value, it's float because we can't see input type + float const_value = 0.0; // not support different constant value + // create scalar with constant value + Scalar constant_value(reinterpret_cast(&const_value), DTYPE::FLOAT32, sizeof(float)); + + return createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0), + num_dims, paddings, constant_value); +} + } // namespace nnc diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h index 65f040b..bbe43ef 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h @@ -25,6 +25,7 @@ #include "core/modelIR/Graph.h" #include "core/modelIR/TensorVariant.h" +#include "core/modelIR/Scalar.h" #include "core/modelIR/Shape.h" #include "core/modelIR/operations/common.h" @@ -86,6 +87,16 @@ public: InputOps&, InputParams&, const ::tflite::TransposeConvOptions*); + /** + * @brief Create a Pad operation + * @param inputs Operations vector + * @param params Tensor with paddings for each dimension + * @param opts TFLite PadOptions + * @return Operations vector + */ + std::vector createPad(InputOps& inputs, InputParams& params, + const ::tflite::PadOptions* opts); + void checkPool2D(const ::tflite::Pool2DOptions*, std::set&); void checkConcatenation(const ::tflite::ConcatenationOptions*, std::set&); diff --git a/contrib/nnc/tests/interpreter/op_info_util.cpp b/contrib/nnc/tests/interpreter/op_info_util.cpp index 7c49d83..f633933 100644 --- a/contrib/nnc/tests/interpreter/op_info_util.cpp +++ b/contrib/nnc/tests/interpreter/op_info_util.cpp @@ -34,7 +34,7 @@ std::shared_ptr getTensor(const opinfo::Tensor* t) std::copy(t->data()->begin(), t->data()->end(), reinterpret_cast(tensorBufferCopy.get())); size_t elementSize = sizeof(float); - TensorVariant::DTYPE type = TensorVariant::DTYPE::FLOAT; + DTYPE type = DTYPE::FLOAT32; Shape tensorShape = ShapeHelper::createShape(*t->shape()->dims(), t->shape()->dims()->size()); diff --git a/contrib/nnc/unittests/core/TensorVariant.cpp b/contrib/nnc/unittests/core/TensorVariant.cpp index 2a33428..ed9d7b9 100644 --- a/contrib/nnc/unittests/core/TensorVariant.cpp +++ b/contrib/nnc/unittests/core/TensorVariant.cpp @@ -25,7 +25,7 @@ TEST(TensorVariant, BasicTest) { char* ptr = (char*)(new float[4]); std::shared_ptr mem(ptr, [](char* d){ delete[] (float*)d; } ); - TensorVariant t(shape, mem, TensorVariant::DTYPE::FLOAT, sizeof(float)); + TensorVariant t(shape, mem, DTYPE::FLOAT32, sizeof(float)); ASSERT_EQ(t.getShape(), shape); ASSERT_EQ(t.getOffset({0,0}), 0u); @@ -36,7 +36,7 @@ TEST(TensorVariant, ElementSizeDeductionTest) { std::shared_ptr mem(new float[8], [](float* f){ delete[] f; }); - TensorVariant t(shape, mem, TensorVariant::DTYPE::FLOAT); + TensorVariant t(shape, mem, DTYPE::FLOAT32); ASSERT_EQ(t.getElementSize(), sizeof(float)); ASSERT_EQ((float*)t.at({1,1,1}), mem.get() + 7); @@ -59,7 +59,7 @@ TEST(TensorVariant, DeletionTest) { { Shape shape{1,1}; auto mem = std::shared_ptr(raw_indicator, [](Indicator*& p){ p[0].reset(); }); - t = new TensorVariant(shape, mem, TensorVariant::DTYPE::UNKNOWN); + t = new TensorVariant(shape, mem, DTYPE::UNKNOWN); //mem gets destroyed here } diff --git a/contrib/nnc/unittests/core/serializer.cpp b/contrib/nnc/unittests/core/serializer.cpp index 54f84cb..cd5058c 100644 --- a/contrib/nnc/unittests/core/serializer.cpp +++ b/contrib/nnc/unittests/core/serializer.cpp @@ -62,7 +62,7 @@ static std::shared_ptr allocateTensorContent(const Shape &shape) static TensorVariant allocateIntTensor(const Shape &shape) { std::shared_ptr data = allocateTensorContent(shape); - return TensorVariant(shape, data, TensorVariant::DTYPE::INT); + return TensorVariant(shape, data, DTYPE::INT32); } static void checkIntTensor(const Tensor& tensor, const proto::TensorProto& proto_tensor) @@ -74,7 +74,7 @@ static void checkIntTensor(const Tensor& tensor, const proto::TensorProto& static TensorVariant allocateFloatTensor(const Shape &shape) { std::shared_ptr data = allocateTensorContent(shape); - return TensorVariant(shape, data, TensorVariant::DTYPE::FLOAT); + return TensorVariant(shape, data, DTYPE::FLOAT32); } static void checkFloatTensor(const Tensor& tensor, const proto::TensorProto& proto_tensor) @@ -86,7 +86,7 @@ static void checkFloatTensor(const Tensor& tensor, const proto::TensorPro static TensorVariant allocateDoubleTensor(const Shape &shape) { std::shared_ptr data = allocateTensorContent(shape); - return TensorVariant(shape, data, TensorVariant::DTYPE::FLOAT); + return TensorVariant(shape, data, DTYPE::FLOAT32); } static void checkDoubleTensor(const Tensor& tensor, const proto::TensorProto& proto_tensor) diff --git a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp index c1d112d..5ca14a6 100644 --- a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp +++ b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp @@ -170,7 +170,7 @@ mir::TensorVariant createNTensor(mir::Shape &shape, float start) { shared_ptr dataBuf( new char[sizeof(float) * shape.numElements()], default_delete()); - mir::TensorVariant tensor(shape, dataBuf, mir::TensorVariant::DTYPE::FLOAT, sizeof(float)); + mir::TensorVariant tensor(shape, dataBuf, mir::DTYPE::FLOAT32, sizeof(float)); fillNTensor(tensor, start); return tensor; } @@ -201,7 +201,7 @@ void fillTensors(unique_ptr &nTensor, Tensor &aTensor, const aTensor.reShape(aShape); shared_ptr dataBuf( new char[sizeof(float) * nShape.numElements()], default_delete()); - nTensor.reset(new mir::TensorVariant(nShape, dataBuf, mir::TensorVariant::DTYPE::FLOAT, sizeof(float))); + nTensor.reset(new mir::TensorVariant(nShape, dataBuf, mir::DTYPE::FLOAT32, sizeof(float))); fillNTensor(*nTensor, start); copyATensorFromNTensor(aTensor, *nTensor); } @@ -272,7 +272,7 @@ void compareResults(const mir::TensorVariant &ref_nnc_tensor, const Tensor &test artifact_idx[i] = nnc_idx.at(i); } assert(ref_nnc_tensor.getElementSize() == 4L && - ref_nnc_tensor.getDataType() == mir::TensorVariant::DTYPE::FLOAT); + ref_nnc_tensor.getDataType() == mir::DTYPE::FLOAT32); // Input and output data lies in range of [-10, 10], // chosen epsilon lies near the edge of float type computational precision float ref_data = mir::Tensor(ref_nnc_tensor).at(nnc_idx);