From: 서상민/동작제어Lab(SR)/Staff Engineer/삼성전자 Date: Fri, 8 Jun 2018 08:06:56 +0000 (+0900) Subject: [PureCL] Support TENSOR_QUANT8_ASYMM type operand (#1623) X-Git-Tag: 0.2~648 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ab56911301c425fd5b5912249c844410fe7bcd53;p=platform%2Fcore%2Fml%2Fnnfw.git [PureCL] Support TENSOR_QUANT8_ASYMM type operand (#1623) For issue #1481 This patch revises `internal::tflite::operand::Object` to keep `scale` and `zeroPoint` in order to support operand of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM type, and then modifies `ANeuralNetworksModel_addOperand` to pass the scale and zeroPoint values as well when appending a new operand. Signed-off-by: Sangmin Seo --- diff --git a/runtimes/pure_arm_compute/src/internal/Model.cc b/runtimes/pure_arm_compute/src/internal/Model.cc index 57cb3d3..87d84f8 100644 --- a/runtimes/pure_arm_compute/src/internal/Model.cc +++ b/runtimes/pure_arm_compute/src/internal/Model.cc @@ -67,11 +67,11 @@ namespace tflite namespace operand { -Index Set::append(const Shape &shape, int32_t type) +Index Set::append(const Shape &shape, int32_t type, float scale, int32_t zeroPoint) { int32_t index = _objects.size(); - _objects.emplace_back(new Object{shape, type}); + _objects.emplace_back(new Object{shape, type, scale, zeroPoint}); return Index{index}; } diff --git a/runtimes/pure_arm_compute/src/internal/Model.h b/runtimes/pure_arm_compute/src/internal/Model.h index 4c2fc5d..746339f 100644 --- a/runtimes/pure_arm_compute/src/internal/Model.h +++ b/runtimes/pure_arm_compute/src/internal/Model.h @@ -128,7 +128,9 @@ namespace operand class Object { public: - explicit Object(const Shape &shape, const int32_t type) : _shape{shape}, _type{type} + explicit Object(const Shape &shape, const int32_t type, const float scale, + const int32_t zeroPoint) + : _shape{shape}, _type{type}, _scale{scale}, _zeroPoint{zeroPoint} { // DO NOTHING } @@ -136,6 +138,8 @@ public: public: const Shape &shape(void) const { return _shape; } const int32_t type(void) const { return _type; } + const float scale(void) const { return _scale; } + const int32_t zeroPoint(void) const { return _zeroPoint; } private: void data(std::unique_ptr &&data) { _data = std::move(data); } @@ -163,6 +167,8 @@ public: private: const Shape _shape; const int32_t _type; + const float _scale; + const int32_t _zeroPoint; std::unique_ptr _data; }; @@ -192,7 +198,7 @@ public: } public: - Index append(const Shape &, int32_t type); + Index append(const Shape &, int32_t type, float scale, int32_t zeroPoint); public: const Object &at(const Index &) const; diff --git a/runtimes/pure_arm_compute/src/model.cc b/runtimes/pure_arm_compute/src/model.cc index 74f8107..c1e65ea 100644 --- a/runtimes/pure_arm_compute/src/model.cc +++ b/runtimes/pure_arm_compute/src/model.cc @@ -21,8 +21,9 @@ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model, { // ASSUME A tensor operand should consists of fp32 or int32 values. // NOTE We do not care about scala operands. - assert((type->dimensionCount == 0) || (type->type == 3 /* ANEURALNETWORKS_TENSOR_FLOAT32 */ || - type->type == 4 /* ANEURALNETWORKS_TENSOR_INT32 */)); + assert((type->dimensionCount == 0) || (type->type == ANEURALNETWORKS_TENSOR_FLOAT32 || + type->type == ANEURALNETWORKS_TENSOR_INT32 || + type->type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM)); internal::tflite::operand::Shape shape(type->dimensionCount); @@ -31,7 +32,7 @@ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model, shape.dim(axis) = type->dimensions[axis]; } - model->deref().operands().append(shape, type->type); + model->deref().operands().append(shape, type->type, type->scale, type->zeroPoint); // NOTE We do NOT allocate CLTensor here as we do not how to interpret this one. // TensorFlow Lite may interpret a rank-4 tensor either as a feature map (with batch) or