using MinMaxRanges = std::vector<MinMaxRange>;
using MinMaxRangeMap = std::unordered_map<LayerGuid, MinMaxRanges>;
+const float g_QuantizationBase = 255.0f;
+const float g_TestTolerance = 0.000001f;
+
BOOST_AUTO_TEST_SUITE(Quantizer)
class TestQuantization : public LayerVisitorBase<VisitorThrowingPolicy>
{
public:
- virtual void VisitInputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr)
+ void VisitInputLayer(const IConnectableLayer* layer,
+ LayerBindingId id,
+ const char* name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST((info.GetQuantizationOffset() == 128));
// Based off current default [-15.0f, 15.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f/255.0f, 0.000001f);
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / g_QuantizationBase, g_TestTolerance);
}
- virtual void VisitOutputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr)
+ void VisitOutputLayer(const IConnectableLayer* layer,
+ LayerBindingId id,
+ const char* name = nullptr) override
{}
};
VisitLayers(graph, visitor);
}
-BOOST_AUTO_TEST_CASE(QuantizeAddition)
+class TestAdditionQuantization : public TestQuantization
{
- class TestAdditionQuantization : public TestQuantization
+public:
+ void VisitAdditionLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) override
{
- public:
- virtual void VisitAdditionLayer(const IConnectableLayer* layer,
- const char* name = nullptr)
- {
- TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((info.GetQuantizationOffset() == 128));
+ BOOST_TEST((info.GetQuantizationOffset() == 128));
- // Based off current static value [-20.0f, 20.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 40.0f/255.0f, 0.000001f);
- }
- };
+ // Based off current static value [-20.0f, 20.0f]
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 40.0f / g_QuantizationBase, g_TestTolerance);
+ }
+};
+BOOST_AUTO_TEST_CASE(QuantizeAddition)
+{
auto network = INetwork::Create();
// Add the layers
input1->GetOutputSlot(0).Connect(addition->GetInputSlot(1));
addition->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- //Set TensorInfo
+ // Set TensorInfo
TensorShape shape{1U};
TensorInfo info(shape, DataType::Float32);
input0->GetOutputSlot(0).SetTensorInfo(info);
class TestActivationQuantization : public TestQuantization
{
public:
- virtual void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& descriptor,
- const char* name = nullptr)
+ void VisitActivationLayer(const IConnectableLayer* layer,
+ const ActivationDescriptor& descriptor,
+ const char* name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST((info.GetQuantizationOffset() == 0));
// Based off current static value [-20.0f, 20.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 15.0f/255.0f, 0.000001f);
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 15.0f / g_QuantizationBase, g_TestTolerance);
}
};
INetworkPtr CreateNetworkWithActivationLayer(const ActivationDescriptor& descriptor)
{
auto network = INetwork::Create();
+
// Add the layers
IConnectableLayer* input0 = network->AddInputLayer(0);
IConnectableLayer* activation = network->AddActivationLayer(descriptor);
input0->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- //Set TensorInfo
+ // Set TensorInfo
TensorShape shape{1U};
TensorInfo info(shape, DataType::Float32);
input0->GetOutputSlot(0).SetTensorInfo(info);
VisitLayersTopologically(quantizedNetwork.get(), validator);
}
-BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
+class TestBoundedReluActivationQuantization : public TestQuantization
{
- class TestBoundedReluActivationQuantization : public TestQuantization
+public:
+ void VisitActivationLayer(const IConnectableLayer* layer,
+ const ActivationDescriptor& descriptor,
+ const char* name = nullptr) override
{
- public:
- virtual void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& descriptor,
- const char* name = nullptr)
- {
- TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((info.GetQuantizationOffset() == 0));
+ BOOST_TEST((info.GetQuantizationOffset() == 0));
- // Based off current static value [0.0f, 3.5f(<-layer upper bound)]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 3.5f/255.0f, 0.000001f);
- }
- };
+ // Based off current static value [0.0f, 3.5f(<-layer upper bound)]
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 3.5f / g_QuantizationBase, g_TestTolerance);
+ }
+};
+BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
+{
ActivationDescriptor descriptor;
descriptor.m_Function = ActivationFunction::BoundedReLu;
descriptor.m_A = 3.5f;
VisitLayersTopologically(quantizedNetwork.get(), validator);
}
-BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
+class TestTanHActivationQuantization : public TestQuantization
{
- class TestTanHActivationQuantization : public TestQuantization
+public:
+ void VisitActivationLayer(const IConnectableLayer* layer,
+ const ActivationDescriptor& descriptor,
+ const char* name = nullptr) override
{
- public:
- virtual void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& descriptor,
- const char* name = nullptr)
- {
- TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((info.GetQuantizationOffset() == 128));
+ BOOST_TEST((info.GetQuantizationOffset() == 128));
- // Based off current static value [-1.0f, 1.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 2.0f/255.0f, 0.000001f);
- }
- };
+ // Based off current static value [-1.0f, 1.0f]
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 2.0f / g_QuantizationBase, g_TestTolerance);
+ }
+};
+BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
+{
ActivationDescriptor descriptor;
descriptor.m_Function = ActivationFunction::TanH;
descriptor.m_A = 3.5f;
class TestLeakyReLuActivationQuantization : public TestQuantization
{
public:
- virtual void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& descriptor,
- const char* name = nullptr)
+ void VisitActivationLayer(const IConnectableLayer* layer,
+ const ActivationDescriptor& descriptor,
+ const char* name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST((info.GetQuantizationOffset() == 64));
// Based off current static value [-5.0f, 15.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f);
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f / g_QuantizationBase, g_TestTolerance);
}
protected:
// used by the descendant classes which test layers
VisitLayersTopologically(quantizedNetwork.get(), validator);
}
-BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
+class TestBatchNormalizationQuantization : public TestQuantization
{
-
- class TestQuantization : public LayerVisitorBase<VisitorThrowingPolicy>
+public:
+ void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
+ const BatchNormalizationDescriptor& desc,
+ const ConstTensor& mean,
+ const ConstTensor& variance,
+ const ConstTensor& beta,
+ const ConstTensor& gamma,
+ const char* name = nullptr) override
{
- public:
- virtual void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
- const BatchNormalizationDescriptor& desc,
- const ConstTensor& mean,
- const ConstTensor& variance,
- const ConstTensor& beta,
- const ConstTensor& gamma,
- const char* name = nullptr)
- {
- TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
-
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
-
- BOOST_TEST((info.GetQuantizationOffset() == 128));
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
- // Based off current static value [-15.0f, 15.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f/255.0f, 0.000001f);
+ BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
- //Test constants
- BOOST_TEST((mean.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((variance.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((beta.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_TEST((gamma.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_TEST((info.GetQuantizationOffset() == 128));
- BOOST_CHECK_CLOSE(mean.GetInfo().GetQuantizationScale(), 3.0f/255.0f, 0.000001f);
- BOOST_CHECK_CLOSE(variance.GetInfo().GetQuantizationScale(), 3.0f/255.0f, 0.000001f);
- BOOST_CHECK_CLOSE(beta.GetInfo().GetQuantizationScale(), 3.0f/255.0f, 0.000001f);
- BOOST_CHECK_CLOSE(gamma.GetInfo().GetQuantizationScale(), 3.0f/255.0f, 0.000001f);
+ // Based off current static value [-15.0f, 15.0f]
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / g_QuantizationBase, g_TestTolerance);
- BOOST_TEST((mean.GetInfo().GetQuantizationOffset() == 85));
- }
+ // Test constants
+ BOOST_TEST((mean.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_TEST((variance.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_TEST((beta.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
+ BOOST_TEST((gamma.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- virtual void VisitInputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr)
- {}
+ float expectedQuantizationScale = 3.0f / g_QuantizationBase;
+ BOOST_CHECK_CLOSE(mean.GetInfo().GetQuantizationScale(), expectedQuantizationScale, g_TestTolerance);
+ BOOST_CHECK_CLOSE(variance.GetInfo().GetQuantizationScale(), expectedQuantizationScale, g_TestTolerance);
+ BOOST_CHECK_CLOSE(beta.GetInfo().GetQuantizationScale(), expectedQuantizationScale, g_TestTolerance);
+ BOOST_CHECK_CLOSE(gamma.GetInfo().GetQuantizationScale(), expectedQuantizationScale, g_TestTolerance);
- virtual void VisitOutputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr)
- {}
- };
+ BOOST_TEST((mean.GetInfo().GetQuantizationOffset() == 85));
+ }
+};
+BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
+{
auto network = INetwork::Create();
TensorShape shape{3U};
input0->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- //Set TensorInfo
+ // Set TensorInfo
input0->GetOutputSlot(0).SetTensorInfo(info);
batchNorm->GetOutputSlot(0).SetTensorInfo(info);
auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
- TestQuantization validator;
+ TestBatchNormalizationQuantization validator;
VisitLayersTopologically(quantizedNetwork.get(), validator);
}
input0->GetOutputSlot(0).Connect(fullyConnected->GetInputSlot(0));
fullyConnected->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- //Set TensorInfo
+ // Set TensorInfo
input0->GetOutputSlot(0).SetTensorInfo(info);
fullyConnected->GetOutputSlot(0).SetTensorInfo(info);
BOOST_TEST((info.GetQuantizationOffset() == 128));
// Based off current static value [-15.0f, 15.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f/255.0f, 0.000001f );
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / g_QuantizationBase, g_TestTolerance );
- //Test weights
+ // Test weights
BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), 3.0f/255.0f, 0.000001f);
+ BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), 3.0f / g_QuantizationBase, g_TestTolerance);
BOOST_TEST((weights.GetInfo().GetQuantizationOffset() == 85));
// Test biases
if (biases.has_value())
{
BOOST_TEST((biases.value().GetInfo().GetDataType() == DataType::QuantisedAsymm8));
- BOOST_CHECK_CLOSE(biases.value().GetInfo().GetQuantizationScale(), 30.0f/255.0f, 0.000001f);
+ BOOST_CHECK_CLOSE(biases.value().GetInfo().GetQuantizationScale(),
+ 30.0f / g_QuantizationBase,
+ g_TestTolerance);
}
}
};
class TestConv2dQuantization : public TestQuantization
{
public:
- virtual void VisitConvolution2dLayer(const IConnectableLayer *layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char *name = nullptr)
+ void VisitConvolution2dLayer(const IConnectableLayer *layer,
+ const Convolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char *name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
BOOST_TEST((info.GetQuantizationOffset() == 128));
// Based off current static value [-15.0f, 15.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / 255.0f, 0.000001f);
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / g_QuantizationBase, g_TestTolerance);
// Test weights
// Instantiate expected values
- const float quantizationScale = 3.0f / 255.0f;
- const float tolerance = 3.0f / 255.0f;
+ const float quantizationScale = 3.0f / g_QuantizationBase;
+ const float tolerance = 3.0f / g_QuantizationBase;
const int quantizationOffset = 85;
BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), quantizationScale, tolerance);
input0->GetOutputSlot(0).Connect(conv2d->GetInputSlot(0));
conv2d->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- //Set TensorInfo
+ // Set TensorInfo
input0->GetOutputSlot(0).SetTensorInfo(info);
conv2d->GetOutputSlot(0).SetTensorInfo(info);
class TestDepthwiseConv2dQuantization : public TestQuantization
{
public:
- virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer *layer,
- const DepthwiseConvolution2dDescriptor& desc,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char *name = nullptr)
+ void VisitDepthwiseConvolution2dLayer(const IConnectableLayer *layer,
+ const DepthwiseConvolution2dDescriptor& desc,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char *name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
BOOST_TEST((info.GetQuantizationOffset() == 128));
// Based off current static value [-15.0f, 15.0f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / 255.0f, 0.000001f);
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 30.0f / g_QuantizationBase, g_TestTolerance);
// Test weights
// Instantiate expected values
- const float quantizationScale = 3.0f / 255.0f;
- const float tolerance = 3.0f / 255.0f;
+ const float quantizationScale = 3.0f / g_QuantizationBase;
+ const float tolerance = 3.0f / g_QuantizationBase;
const int quantizationOffset = 85;
BOOST_TEST((weights.GetInfo().GetDataType() == DataType::QuantisedAsymm8));
BOOST_CHECK_CLOSE(weights.GetInfo().GetQuantizationScale(), quantizationScale, tolerance);
class TestSoftmaxQuantization : public TestQuantization
{
public:
- virtual void VisitSoftmaxLayer(const IConnectableLayer* layer,
- const SoftmaxDescriptor& descriptor,
- const char* name = nullptr)
+ void VisitSoftmaxLayer(const IConnectableLayer* layer,
+ const SoftmaxDescriptor& descriptor,
+ const char* name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST((info.GetQuantizationOffset() == 0));
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 1.0f/255.0f, 0.000001f );
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 1.0f / g_QuantizationBase, g_TestTolerance );
}
};
INetworkPtr CreateNetworkWithSoftmaxLayer(const SoftmaxDescriptor& descriptor)
{
auto network = INetwork::Create();
+
// Add the layers
IConnectableLayer* input0 = network->AddInputLayer(0);
IConnectableLayer* softmax = network->AddSoftmaxLayer(descriptor);
input0->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- //Set TensorInfo
+ // Set TensorInfo
TensorShape shape{1U};
TensorInfo info(shape, DataType::Float32);
input0->GetOutputSlot(0).SetTensorInfo(info);
class TestPermuteQuantization : public TestLeakyReLuActivationQuantization
{
public:
- virtual void VisitPermuteLayer(const IConnectableLayer* layer,
- const PermuteDescriptor& desc,
- const char* name = nullptr)
+ void VisitPermuteLayer(const IConnectableLayer* layer,
+ const PermuteDescriptor& desc,
+ const char* name = nullptr) override
{
CheckForwardedQuantizationSettings(layer);
}
class TestSpaceToBatchQuantization : public TestLeakyReLuActivationQuantization
{
public:
- virtual void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
- const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
- const char* name = nullptr) override
+ void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
+ const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+ const char* name = nullptr) override
{
CheckForwardedQuantizationSettings(layer);
}
class TestPooling2dQuantization : public TestLeakyReLuActivationQuantization
{
public:
- virtual void VisitPooling2dLayer(const IConnectableLayer* layer,
- const Pooling2dDescriptor& desc,
- const char* name = nullptr)
+ void VisitPooling2dLayer(const IConnectableLayer* layer,
+ const Pooling2dDescriptor& desc,
+ const char* name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST((info.GetQuantizationOffset() == 64));
// Based off parent LeakyReLu [-5.f, 15.f]
- BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f/255.0f, 0.000001f);
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 20.0f / g_QuantizationBase, g_TestTolerance);
}
};
VisitLayersTopologically(quantizedNetwork.get(), validator);
}
+class TestConstantQuantization : public TestAdditionQuantization
+{
+public:
+ void VisitConstantLayer(const IConnectableLayer* layer,
+ const ConstTensor& input,
+ const char* name = nullptr) override
+ {
+ BOOST_CHECK(std::string(name) == "ConstantLayer");
+
+ TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
+ BOOST_CHECK(info.GetDataType() == DataType::QuantisedAsymm8);
+ BOOST_CHECK(info.GetQuantizationOffset() == 64);
+
+ // Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
+ BOOST_CHECK_CLOSE(info.GetQuantizationScale(), 8.0f / g_QuantizationBase, g_TestTolerance);
+ }
+};
+
+BOOST_AUTO_TEST_CASE(QuantizeConstant)
+{
+ auto network = INetwork::Create();
+
+ // Constant layer data
+ const char* name = "ConstantLayer";
+ std::vector<float> data = {-2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
+ std::vector<unsigned int> dimensions = {1, 1, 3, 3};
+ TensorInfo tensorInfo(4, dimensions.data(), DataType::Float32);
+ ConstTensor constantTensor(tensorInfo, data);
+
+ // Add the layers
+ IConnectableLayer* input = network->AddInputLayer(0);
+ IConnectableLayer* constant = network->AddConstantLayer(constantTensor, name);
+ IConnectableLayer* addition = network->AddAdditionLayer();
+ IConnectableLayer* output = network->AddOutputLayer(1);
+
+ // Establish connections
+ input->GetOutputSlot(0).Connect(addition->GetInputSlot(0));
+ constant->GetOutputSlot(0).Connect(addition->GetInputSlot(1));
+ addition->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // Set TensorInfo in the remaining layers
+ input->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ addition->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ auto quantizedNetwork = INetworkQuantizer::Create(network.get())->ExportNetwork();
+ TestConstantQuantization validator;
+ VisitLayersTopologically(quantizedNetwork.get(), validator);
+}
+
BOOST_AUTO_TEST_SUITE_END()
} // namespace armnn