return m_Graph->AddLayer<MaximumLayer>(name);
}
+IConnectableLayer* Network::AddMinimumLayer(const char* name)
+{
+ return m_Graph->AddLayer<MinimumLayer>(name);
+}
+
IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
const char* name)
{
return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
}
-IConnectableLayer* Network::AddMinimumLayer(const char* name)
-{
- return m_Graph->AddLayer<MinimumLayer>(name);
-}
-
OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
: m_Graph(std::move(graph))
{
RefMaximumWorkload_Execute,
RefMultiplicationWorkload_Execute,
RefDivisionWorkload_Execute,
+ RefMinimumWorkload_Execute,
MAX_STRING_ID
};
m_Strings[RefMaximumWorkload_Execute] = "RefMaximumWorkload_Execute";
m_Strings[RefMultiplicationWorkload_Execute] = "RefMultiplicationWorkload_Execute";
m_Strings[RefDivisionWorkload_Execute] = "RefDivisionWorkload_Execute";
+ m_Strings[RefMinimumWorkload_Execute] = "RefMinimumWorkload_Execute";
}
StringMapping(const StringMapping &) = delete;
void Validate(const WorkloadInfo& workloadInfo) const;
};
+// Minimum layer workload data.
struct MinimumQueueDescriptor : QueueDescriptor
{
void Validate(const WorkloadInfo& workloadInfo) const;
virtual std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
const WorkloadInfo& Info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const = 0;
+
virtual std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& Info) const = 0;
virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const = 0;
- virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
-
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const = 0;
};
return workloadFactory.CreateMaximum(descriptor, info);
}
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
+ const armnn::IWorkloadFactory& workloadFactory,
+ const armnn::WorkloadInfo& info,
+ const armnn::MinimumQueueDescriptor& descriptor)
+{
+ return workloadFactory.CreateMinimum(descriptor, info);
+}
+
namespace {
template <typename Descriptor, typename dataType>
LayerTestResult<dataType, 4> ElementwiseTestHelper
std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
7, 10, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t>
(workloadFactory,
memoryManager,
shape0,
0);
}
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape0[] = { 1, 2, 2, 2 };
+ std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+ unsigned int shape1[] = { 1, 1, 1, 1 };
+ std::vector<float> input1({ 2 });
+
+ std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
+
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
+}
+
+
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape0[] = { 1, 2, 2, 2 };
+ std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
+
+ unsigned int shape1[] = { 1, 1, 1, 1 };
+ std::vector<float> input1({ 5 });
+
+ std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
+
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
+}
+
+LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
+ 7, 1, 2, 3, 4, 5 });
+
+ std::vector<uint8_t> input1({ 1, 2, 3});
+
+ std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
+ 1, 1, 2, 1, 2, 3 });
+
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, uint8_t>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
namespace {
LayerTestResult<float,4> MultiplicationTestHelper(
armnn::IWorkloadFactory& workloadFactory,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager);
+
LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
&TrueFunc<>);
}
+bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(input1);
+ ignore_unused(output);
+ return IsSupportedForDataTypeRef(reasonIfUnsupported,
+ input0.GetDataType(),
+ &TrueFunc<>,
+ &TrueFunc<>);
+}
+
bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsMeanSupported(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
return MakeWorkload<RefMeanFloat32Workload, RefMeanUint8Workload>(descriptor, info);
}
+std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMinimum(
+ const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+ return MakeWorkload<RefMinimumFloat32Workload, RefMinimumUint8Workload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
return MakeWorkload<RefStridedSliceFloat32Workload, RefStridedSliceUint8Workload>(descriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
-}
-
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
#include <backendsCommon/test/MergerTestImpl.hpp>
#include <boost/test/unit_test.hpp>
+#include <boost/test/execution_monitor.hpp>
BOOST_AUTO_TEST_SUITE(RefEndToEnd)
BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
}
+BOOST_AUTO_TEST_CASE(TrivialMin)
+{
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // Builds up the structure of the network.
+ armnn::INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input1 = net->AddInputLayer(0);
+ IConnectableLayer* input2 = net->AddInputLayer(1);
+ IConnectableLayer* min = net->AddMinimumLayer();
+ IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input1->GetOutputSlot(0).Connect(min->GetInputSlot(0));
+ input2->GetOutputSlot(0).Connect(min->GetInputSlot(1));
+ min->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // Sets the tensors in the network.
+ TensorInfo tensorInfo(TensorShape({1, 1, 1, 4}), DataType::Float32);
+ input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ min->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ // Creates structures for input & output - matching android nn test.
+ std::vector<float> input1Data
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+ std::vector<float> input2Data
+ {
+ 2.0f, 1.0f, 5.0f, 2.0f
+ };
+ std::vector<float> outputData(4);
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
+ {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+ };
+
+ // Does the inference.
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Checks the results
+ BOOST_TEST(outputData[0] == 1);
+ BOOST_TEST(outputData[1] == 1);
+ BOOST_TEST(outputData[2] == 3);
+ BOOST_TEST(outputData[3] == 2);
+}
+
+
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
{
MergerDim0EndToEnd<float>(defaultBackends);
ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1Element, DivisionBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1DVector, DivisionBroadcast1DVectorUint8Test)
-//Max
+// Max
ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1Element, MaximumBroadcast1ElementTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVector, MaximumBroadcast1DVectorTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementUint8, MaximumBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorUint8, MaximumBroadcast1DVectorUint8Test)
+// Min
+ARMNN_AUTO_TEST_CASE(SimpleMinimum1, MinimumBroadcast1ElementTest1)
+ARMNN_AUTO_TEST_CASE(SimpleMinimum2, MinimumBroadcast1ElementTest2)
+ARMNN_AUTO_TEST_CASE(Minimum1DVectorUint8, MinimumBroadcast1DVectorUint8Test)
+
// Mul
ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
FullyConnected.hpp
Maximum.hpp
Merger.hpp
+ Minimum.hpp
Pad.cpp
Pad.hpp
Pooling2d.cpp
#include "ElementwiseFunction.hpp"
#include "Broadcast.hpp"
#include <functional>
+#include "Minimum.hpp"
#include "Maximum.hpp"
template struct armnn::ElementwiseFunction<std::minus<float>>;
template struct armnn::ElementwiseFunction<std::multiplies<float>>;
template struct armnn::ElementwiseFunction<std::divides<float>>;
-template struct armnn::ElementwiseFunction<armnn::maximum<float>>;
\ No newline at end of file
+template struct armnn::ElementwiseFunction<armnn::maximum<float>>;
+template struct armnn::ElementwiseFunction<armnn::minimum<float>>;
\ No newline at end of file
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+namespace armnn
+{
+
+template<typename T>
+struct minimum
+{
+ T
+ operator()(const T& input1, const T& input2) const
+ {
+ return std::min(input1, input2);
+ }
+};
+
+} //namespace armnn
+
template class armnn::BaseFloat32ElementwiseWorkload<armnn::MaximumQueueDescriptor, armnn::maximum<float>>;
template class armnn::BaseUint8ElementwiseWorkload<armnn::MaximumQueueDescriptor, armnn::maximum<float>>;
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::MinimumQueueDescriptor, armnn::minimum<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::MinimumQueueDescriptor, armnn::minimum<float>>;
#include <backendsCommon/Workload.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include "Maximum.hpp"
+#include "Minimum.hpp"
+
+
namespace armnn
{
MaximumQueueDescriptor,
StringMapping::RefMaximumWorkload_Execute>;
+using RefMinimumFloat32Workload =
+ RefElementwiseWorkload<minimum<float>,
+ DataType::Float32,
+ MinimumQueueDescriptor,
+ StringMapping::RefMinimumWorkload_Execute>;
+
+using RefMinimumUint8Workload =
+ RefElementwiseWorkload<minimum<float>,
+ DataType::QuantisedAsymm8,
+ MinimumQueueDescriptor,
+ StringMapping::RefMinimumWorkload_Execute>;
} // armnn