}
}
+ float quantizationScale = tensorPtr->quantizationScale();
+ int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
if (tensorPtr->dimensionality() == static_cast<unsigned int>(Dimensionality::Scalar))
{
- float quantizationScale = tensorPtr->quantizationScale();
- int32_t quantizationOffset = tensorPtr->quantizationOffset();
-
- return armnn::TensorInfo(armnn::TensorShape{armnn::Dimensionality::Scalar},
+ return armnn::TensorInfo(TensorShape{armnn::Dimensionality::Scalar},
type,
quantizationScale,
quantizationOffset);
}
+ else if (tensorPtr->dimensionality() == static_cast<unsigned int>(Dimensionality::NotSpecified))
+ {
+ armnn::TensorInfo result(TensorShape{Dimensionality::NotSpecified},
+ type,
+ quantizationScale,
+ quantizationOffset);
+ return result;
+ }
auto dimensions = tensorPtr->dimensions();
unsigned int size = dimensions->size();
std::vector<unsigned int> outputDims(dimensions->begin(), dimensions->begin() + size);
+ bool dimensionsSpecificity[armnn::MaxNumOfTensorDimensions];
+ std::fill_n(dimensionsSpecificity, armnn::MaxNumOfTensorDimensions, true);
+ // For backwards compatibility check if the dimensionSpecificity vector is present first.
+ // The default is to have dimensionSpecificity set to all true's anyway.
+ if (tensorPtr->dimensionSpecificity() != nullptr)
+ {
+ auto dimensionSpecificity = tensorPtr->dimensionSpecificity();
+ size = dimensionSpecificity->size();
+ for (unsigned int i = 0; i < size; ++i)
+ {
+ dimensionsSpecificity[i] = dimensionSpecificity->Get(i);
+ }
+ }
+ // Construct a TensorShape
+ TensorShape shape(size, outputDims.data(), dimensionsSpecificity);
auto quantizationScales = tensorPtr->quantizationScales();
-
if (quantizationScales)
{
unsigned int quantizationScalesSize = quantizationScales->size();
std::vector<float> scales(quantizationScales->begin(), quantizationScales->begin() + quantizationScalesSize);
unsigned int quantizationDim = tensorPtr->quantizationDim();
- armnn::TensorInfo result(size,
- outputDims.data(),
+ armnn::TensorInfo result(shape,
type,
scales,
quantizationDim);
return result;
}
- float quantizationScale = tensorPtr->quantizationScale();
- int32_t quantizationOffset = tensorPtr->quantizationOffset();
-
// two statements (on purpose) for easier debugging:
- armnn::TensorInfo result(size,
- outputDims.data(),
+ armnn::TensorInfo result(shape,
type,
quantizationScale,
quantizationOffset);
+
return result;
}
quantizationScales:[float];
quantizationDim:uint;
dimensionality:uint = 1;
+ dimensionSpecificity:[bool];
}
struct Connection {
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
// automatically generated by the FlatBuffers compiler, do not modify
VT_QUANTIZATIONOFFSET = 10,
VT_QUANTIZATIONSCALES = 12,
VT_QUANTIZATIONDIM = 14,
- VT_DIMENSIONALITY = 16
+ VT_DIMENSIONALITY = 16,
+ VT_DIMENSIONSPECIFICITY = 18
};
const flatbuffers::Vector<uint32_t> *dimensions() const {
return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_DIMENSIONS);
uint32_t dimensionality() const {
return GetField<uint32_t>(VT_DIMENSIONALITY, 1);
}
+ const flatbuffers::Vector<uint8_t> *dimensionSpecificity() const {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DIMENSIONSPECIFICITY);
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_DIMENSIONS) &&
verifier.VerifyVector(quantizationScales()) &&
VerifyField<uint32_t>(verifier, VT_QUANTIZATIONDIM) &&
VerifyField<uint32_t>(verifier, VT_DIMENSIONALITY) &&
+ VerifyOffset(verifier, VT_DIMENSIONSPECIFICITY) &&
+ verifier.VerifyVector(dimensionSpecificity()) &&
verifier.EndTable();
}
};
void add_dimensionality(uint32_t dimensionality) {
fbb_.AddElement<uint32_t>(TensorInfo::VT_DIMENSIONALITY, dimensionality, 1);
}
+ void add_dimensionSpecificity(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity) {
+ fbb_.AddOffset(TensorInfo::VT_DIMENSIONSPECIFICITY, dimensionSpecificity);
+ }
explicit TensorInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
int32_t quantizationOffset = 0,
flatbuffers::Offset<flatbuffers::Vector<float>> quantizationScales = 0,
uint32_t quantizationDim = 0,
- uint32_t dimensionality = 1) {
+ uint32_t dimensionality = 1,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity = 0) {
TensorInfoBuilder builder_(_fbb);
+ builder_.add_dimensionSpecificity(dimensionSpecificity);
builder_.add_dimensionality(dimensionality);
builder_.add_quantizationDim(quantizationDim);
builder_.add_quantizationScales(quantizationScales);
int32_t quantizationOffset = 0,
const std::vector<float> *quantizationScales = nullptr,
uint32_t quantizationDim = 0,
- uint32_t dimensionality = 1) {
+ uint32_t dimensionality = 1,
+ const std::vector<uint8_t> *dimensionSpecificity = nullptr) {
auto dimensions__ = dimensions ? _fbb.CreateVector<uint32_t>(*dimensions) : 0;
auto quantizationScales__ = quantizationScales ? _fbb.CreateVector<float>(*quantizationScales) : 0;
+ auto dimensionSpecificity__ = dimensionSpecificity ? _fbb.CreateVector<uint8_t>(*dimensionSpecificity) : 0;
return armnnSerializer::CreateTensorInfo(
_fbb,
dimensions__,
quantizationOffset,
quantizationScales__,
quantizationDim,
- dimensionality);
+ dimensionality,
+ dimensionSpecificity__);
}
struct ByteData FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
shape.push_back(tensorInfo.GetShape()[dim]);
}
+ std::vector<bool> specificity;
+ // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
+ // matches the size of dimensions.
+ for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
+ {
+ specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
+ }
+
if (tensorInfo.HasPerAxisQuantization())
{
// Create FlatBuffer TensorInfo
m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
tensorInfo.GetQuantizationDim().value(),
static_cast<unsigned int>
- (tensorInfo.GetShape().GetDimensionality()));
+ (tensorInfo.GetShape().GetDimensionality()),
+ m_flatBufferBuilder.CreateVector(specificity));
return flatBufferTensorInfo;
}
0,
0,
static_cast<unsigned int>
- (tensorInfo.GetShape().GetDimensionality()));
+ (tensorInfo.GetShape().GetDimensionality()),
+ m_flatBufferBuilder.CreateVector(specificity));
return flatBufferTensorInfo;
}