This operation is the future replacement of the `Reduce` operation.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
void visit(mir::ops::PadOp &op) override;
void visit(mir::ops::PoolOp &op) override;
void visit(mir::ops::ReduceOp &op) override;
+ void visit(mir::ops::ReduceMeanOp &op) override;
void visit(mir::ops::ReluOp &op) override;
void visit(mir::ops::ReshapeOp &op) override;
void visit(mir::ops::ResizeOp &op) override;
#include "ops/Pad.h"
#include "ops/Pool.h"
#include "ops/Reduce.h"
+#include "ops/ReduceMean.h"
#include "ops/Reshape.h"
#include "ops/Softmax.h"
#include "ops/Sub.h"
setOutputTensors(op, std::move(outputs));
}
+void NNInterpreter::visit(ops::ReduceMeanOp &op)
+{
+ auto inputs = getInputTensors(op);
+ auto outputs = ReduceMean<float>(inputs[0], op)();
+ setOutputTensors(op, std::move(outputs));
+}
+
void NNInterpreter::visit(ops::TransposeOp &op)
{
auto inputs = getInputTensors(op);
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_REDUCE_MEAN_
+#define _NNC_CORE_BACKEND_INTERPRETER_REDUCE_MEAN_
+
+#include "OperationImpl.h"
+#include "mir/ops/ReduceMeanOp.h"
+#include "mir/Tensor.h"
+#include "mir/ShapeRange.h"
+
+namespace nnc
+{
+
+template <typename T> class ReduceMean : public OperationImpl<T>
+{
+public:
+ ReduceMean(const mir::TensorVariant &input, const mir::ops::ReduceMeanOp &op)
+ : _input(input), _op(op)
+ {
+ }
+
+ std::vector<mir::TensorVariant> operator()() override
+ {
+ const auto &input_shape = _op.getInputShape(0);
+ const auto &output_shape = _op.getOutputShape(0);
+ const auto &reduction_dims = _op.getReductionDims();
+ const bool keep_dims = _op.getKeepDims();
+
+ const auto reductor = [](T result, T x) { return result + x; };
+
+ auto res = OperationImpl<T>::allocate_tensor(output_shape);
+ mir::Tensor<T> res_accessor(res);
+
+ // This mask contains 'true' for dimensions that should be reduced. For example, if we want
+ // to reduce dimensions 1 and 3 with total number of dimensions of 4, the mask will be
+ // [false, true, false, true].
+ std::vector<bool> reduction_dims_mask(input_shape.rank(), false);
+ for (const int dim : reduction_dims)
+ {
+ reduction_dims_mask[dim] = true;
+ }
+
+ mir::Index out_index(output_shape.rank());
+ for (const mir::Index &in_index : mir::ShapeRange(input_shape))
+ {
+ int out_index_dim = 0;
+ for (int dim = 0; dim < input_shape.rank(); ++dim)
+ {
+ if (keep_dims)
+ {
+ out_index.at(out_index_dim++) = reduction_dims_mask[dim] ? 0 : in_index.at(dim);
+ }
+ else
+ {
+ if (!reduction_dims_mask[dim])
+ {
+ out_index.at(out_index_dim++) = in_index.at(dim);
+ }
+ }
+ }
+ res_accessor.at(out_index) = reductor(res_accessor.at(out_index), _input.at(in_index));
+ }
+
+ const std::int32_t reduction_factor = input_shape.numElements() / output_shape.numElements();
+
+ for (const auto &index : mir::ShapeRange(output_shape))
+ {
+ res_accessor.at(index) /= reduction_factor;
+ }
+
+ return {res};
+ }
+
+private:
+ const mir::Tensor<T> _input;
+ const mir::ops::ReduceMeanOp &_op;
+};
+
+} // namespace nnc
+
+#endif //_NNC_CORE_BACKEND_INTERPRETER_REDUCE_MEAN_
}
}
+void ModelAnalyzer::visit(mir::ops::ReduceMeanOp &op)
+{
+ appendOperationToInference(&op, "reduceMean");
+}
+
void ModelAnalyzer::visit(mir::ops::TransposeOp &op)
{
appendOperationToInference(&op, "transpose");
void visit(mir::ops::PadOp &op) override;
void visit(mir::ops::PoolOp &op) override;
void visit(mir::ops::ReduceOp &op) override;
+ void visit(mir::ops::ReduceMeanOp &op) override;
void visit(mir::ops::ReluOp &op) override;
void visit(mir::ops::ReshapeOp &op) override;
void visit(mir::ops::ResizeOp &op) override;
serializeShape(op.getOutputShape(0));
}
+void Serializer::visit(mir::ops::ReduceMeanOp &op)
+{
+ _curOp->paramStartOffset = _buffer.size();
+ serializeShape(Shape(op.getReductionDims())); // reuse shape serialization
+ serializeT<int32_t>(op.getKeepDims());
+ serializeShape(op.getOutputShape(0));
+}
+
void Serializer::visit(mir::ops::TransposeOp &op)
{
_curOp->paramStartOffset = _buffer.size();
void visit(mir::ops::PadOp &op) override;
void visit(mir::ops::PoolOp &op) override;
void visit(mir::ops::ReduceOp &op) override;
+ void visit(mir::ops::ReduceMeanOp &op) override;
void visit(mir::ops::ReluOp &op) override;
void visit(mir::ops::ReshapeOp &op) override;
void visit(mir::ops::ResizeOp &op) override;
#include "mir/ops/OutputOp.h"
#include "mir/ops/PadOp.h"
#include "mir/ops/PoolOp.h"
-#include "mir/ops/ReduceOp.h"
+#include "mir/ops/ReduceMeanOp.h"
#include "mir/ops/ReluOp.h"
#include "mir/ops/ReshapeOp.h"
#include "mir/ops/SigmoidOp.h"
#include "mir/ops/OutputOp.h"
#include "mir/ops/PadOp.h"
#include "mir/ops/PoolOp.h"
-#include "mir/ops/ReduceOp.h"
+#include "mir/ops/ReduceMeanOp.h"
#include "mir/ops/ReluOp.h"
#include "mir/ops/ReshapeOp.h"
#include "mir/ops/ResizeOp.h"
fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
auto op_generator = [&axis_list, keep_dims](
mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- auto op = g.create<mir::ops::ReduceOp>(inputs[0], axis_list, keep_dims,
- mir::ops::ReduceOp::FuncType::mean);
+ auto op = g.create<mir::ops::ReduceMeanOp>(inputs[0], axis_list, keep_dims);
return op;
};