*/
#include "kernels/L2Normalize.h"
#include "kernels/TestUtils.h"
+#include "luci_interpreter/TestMemoryManager.h"
namespace luci_interpreter
{
void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> output_shape,
std::initializer_list<float> input_data, std::initializer_list<float> output_data)
{
- Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data);
+ std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
+ Tensor input_tensor =
+ makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
L2NormParams params{};
L2Normalize kernel(&input_tensor, &output_tensor, params);
kernel.configure();
+ memory_manager->allocate_memory(output_tensor);
kernel.execute();
EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(output_data));
std::initializer_list<float> input_data,
std::initializer_list<float> output_data)
{
+ std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
std::pair<float, int32_t> quant_param =
quantizationParams<uint8_t>(std::min(input_data) < 0 ? std::min(input_data) : 0.f,
std::max(input_data) > 0 ? std::max(input_data) : 0.f);
- Tensor input_tensor =
- makeInputTensor<DataType::U8>(input_shape, quant_param.first, quant_param.second, input_data);
+ Tensor input_tensor = makeInputTensor<DataType::U8>(
+ input_shape, quant_param.first, quant_param.second, input_data, memory_manager.get());
Tensor output_tensor = makeOutputTensor(DataType::U8, 1. / 128., 128);
L2NormParams params{};
L2Normalize kernel(&input_tensor, &output_tensor, params);
kernel.configure();
+ memory_manager->allocate_memory(output_tensor);
kernel.execute();
EXPECT_THAT(dequantizeTensorData(output_tensor),
TEST(L2NormalizeTest, ActivationType_NEG)
{
+ std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
std::vector<float> input_data = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
- Tensor input_tensor = makeInputTensor<DataType::FLOAT32>({1, 1, 1, 6}, input_data);
+ Tensor input_tensor =
+ makeInputTensor<DataType::FLOAT32>({1, 1, 1, 6}, input_data, memory_manager.get());
Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
L2NormParams params{};
TEST(L2NormalizeTest, InvalidOutputQuantParam_NEG)
{
+ std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
std::vector<float> input_data = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
- Tensor input_tensor = makeInputTensor<DataType::U8>({1, 1, 1, 6}, 1. / 64., 127, input_data);
+ Tensor input_tensor =
+ makeInputTensor<DataType::U8>({1, 1, 1, 6}, 1. / 64., 127, input_data, memory_manager.get());
Tensor output_tensor = makeOutputTensor(DataType::U8, 1. / 64., 127);
L2NormParams params{};