"modelIR/operations/PadOp.cpp"
"modelIR/operations/PoolOp.cpp"
"modelIR/operations/SqueezeOp.cpp"
+ "modelIR/operations/TransposeOp.cpp"
"modelIR/Graph.cpp"
"modelIR/Index.cpp"
"modelIR/ir_dot_builder.cpp"
dotBuilder.updateWithOp(&op, node_info);
}
+void IrDotDumper::visit(ops::TransposeOp& op) {
+ auto node_info = DotIrNodeInfo().withType("TransposeOp", op.getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op));
+
+ dotBuilder.updateWithOp(&op, node_info);
+}
+
} // namespace mir
} // namespace nnc
#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
+#include "core/modelIR/operations/TransposeOp.h"
namespace nnc {
namespace mir {
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "core/modelIR/operations/TransposeOp.h"
+
+namespace nnc {
+namespace mir {
+namespace ops {
+
+TransposeOp::TransposeOp(const IODescriptor& arg, const std::vector<std::size_t>& axis_order)
+ : Operation(Type::transpose, {arg}), _axisOrder(axis_order) {
+ assert(_axisOrder.size() == static_cast<std::size_t>(getInputShape(0).rank()));
+ inferOutputShapes();
+}
+
+void TransposeOp::inferOutputShapes() {
+ auto& input_shape = getInputShape(0);
+ Shape output_shape;
+ output_shape.resize(input_shape.rank());
+ for (std::size_t i = 0; i < _axisOrder.size(); ++i)
+ output_shape.dim(i) = input_shape.dim(static_cast<int32_t>(_axisOrder.at(i)));
+ setOutputShape(0, output_shape);
+}
+
+} // namespace ops
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
+#include "core/modelIR/operations/TransposeOp.h"
#include "core/modelIR/ir_dot_builder.h"
void visit(ops::SqueezeOp& op) override;
void visit(ops::PadOp& op) override;
void visit(ops::ReduceFOp& op) override;
+ void visit(ops::TransposeOp& op) override;
void writeDot(std::ostream &os) { dotBuilder.writeDot(os); };
const std::vector<int32_t>& reduce_dims,
bool keep_dims,
FuncType func_type)
- : Operation(Type::reduceFOp, {arg}), _reduceDims(reduce_dims), _keepDims(keep_dims),
+ : Operation(Type::reduceF, {arg}), _reduceDims(reduce_dims), _keepDims(keep_dims),
_funcType(func_type) {
// Infer output shapes.
const auto& input_shape = getInputShape(0);
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_IR_MODEL_TRANSPOSE_H_
+#define _NNC_CORE_IR_MODEL_TRANSPOSE_H_
+
+#include "core/modelIR/Operation.h"
+#include <vector>
+
+namespace nnc {
+namespace mir {
+namespace ops {
+
+class TransposeOp : public Operation {
+public:
+ TransposeOp(const IODescriptor& arg, const std::vector<std::size_t>& axis_order);
+
+ const std::vector<std::size_t>& getAxisOrder() const { return _axisOrder; }
+
+private:
+ void inferOutputShapes();
+
+ std::vector<std::size_t> _axisOrder;
+};
+
+} // namespace ops
+} // namespace mir
+} // namespace nnc
+
+#endif //_NNC_CORE_IR_MODEL_TRANSPOSE_H_
HANDLE_OP(ELU, EluOp)
HANDLE_OP(squeeze, SqueezeOp)
HANDLE_OP(pad, PadOp)
-HANDLE_OP(reduceFOp, ReduceFOp)
+HANDLE_OP(reduceF, ReduceFOp)
+HANDLE_OP(transpose, TransposeOp)
void visit(mir::ops::SqueezeOp& op) override;
void visit(mir::ops::PadOp& op) override;
void visit(mir::ops::ReduceFOp& op) override;
+ void visit(mir::ops::TransposeOp& op) override;
private:
using AF = ArtifactFactory;
void visit(ops::SqueezeOp& op) override;
void visit(ops::PadOp& op) override;
void visit(ops::ReduceFOp& op) override;
+ void visit(ops::TransposeOp& op) override;
void setInput(const std::string &name, const TensorVariant& data);
std::vector<TensorVariant> &getResult(Operation* op);
assert(false && "Unimplemented operation: ReduceFOp");
}
+void AclCppOpGenerator::visit(mir::ops::TransposeOp& op) {
+ assert(false && "Unimplemented operation: TransposeOp");
+}
+
}
// namespace nnc
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/PadOp.h"
+#include "core/modelIR/operations/TransposeOp.h"
#include "ops/Bias.h"
#include "ops/Concat.h"
#include "ops/Reshape.h"
#include "ops/Softmax.h"
#include "ops/Scale.h"
+#include "ops/Transpose.h"
#include "ops/Dropout.h"
#include "ops/BatchNorm.h"
#include "ops/Pad.h"
}
}
+void NNInterpreter::visit(ops::TransposeOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ auto& input = var(operand.op->getId())[operand.index];
+ var(op.getId()) = Transpose(input, op)();
+}
+
} // namespace nnc
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Transpose.h"
+#include "core/modelIR/Tensor.h"
+#include "core/modelIR/ShapeRange.h"
+
+namespace nnc {
+
+using namespace mir;
+
+Transpose::Transpose(const mir::TensorVariant& input,
+ const mir::ops::TransposeOp& op) : _op(op), _input(input) {}
+
+std::vector<mir::TensorVariant> Transpose::operator()() {
+ auto res = allocate_tensor(_op.getOutputShape(0));
+ Tensor<float> res_accessor(res);
+
+ auto& input_shape = _op.getInputShape(0);
+ auto& axis_order = _op.getAxisOrder();
+ std::size_t num_axes = axis_order.size();
+
+ ShapeRange in_range(input_shape);
+ Index out_index;
+ out_index.resize(input_shape.rank());
+
+ for (auto& in_index : in_range) {
+ for (std::size_t i = 0; i < num_axes; ++i)
+ out_index.at(static_cast<int32_t>(i)) = in_index.at(static_cast<int32_t>(axis_order.at(i)));
+ res_accessor.at(out_index) = _input.at(in_index);
+ }
+
+ return {res};
+}
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_TRANSPOSE_
+#define _NNC_CORE_BACKEND_INTERPRETER_TRANSPOSE_
+
+#include "OperationImpl.h"
+#include "core/modelIR/operations/TransposeOp.h"
+
+namespace nnc {
+
+class Transpose : public OperationImpl<float> {
+public:
+ std::vector<mir::TensorVariant> operator()() override;
+
+ Transpose(const mir::TensorVariant& input, const mir::ops::TransposeOp& op);
+
+private:
+ const mir::ops::TransposeOp& _op;
+ const mir::Tensor<float> _input;
+};
+
+}
+
+#endif //_NNC_CORE_BACKEND_INTERPRETER_TRANSPOSE_
#include "cpp_tanh.generated.h"
#include "cpp_elementwise.generated.h"
#include "cpp_pad.generated.h"
+#include "cpp_transpose.generated.h"
namespace nnc
{
out.write(cpp_tanh, sizeof(cpp_tanh));
out.write(cpp_pad, sizeof(cpp_pad));
out.write(cpp_conv_transpose, sizeof(cpp_conv_transpose));
+ out.write(cpp_transpose, sizeof(cpp_transpose));
out.write(cpp_operations, sizeof(cpp_operations));
out.write(cpp_scale, sizeof(cpp_scale));
out.write(cpp_dropout, sizeof(cpp_dropout));
#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
+#include "core/modelIR/operations/TransposeOp.h"
using namespace std;
addOpDescr(&op, "ReduceMean");
}
+void ModelAnalyzer::visit(mir::ops::TransposeOp& op) {
+ addOpDescr(&op, "transpose");
+}
+
} // namespace nnc
void visit(mir::ops::SqueezeOp& op) override;
void visit(mir::ops::PadOp& op) override;
void visit(mir::ops::ReduceFOp& op) override;
+ void visit(mir::ops::TransposeOp& op) override;
/**
* @return vector of id's of network input tensors
#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
+#include "core/modelIR/operations/TransposeOp.h"
#include "pass/PassException.h"
#include <algorithm>
serializeShape(op.getOutputShape(0));
}
+void Serializer::visit(mir::ops::TransposeOp& op) {
+ _curOp->_paramStartOffset = _buffer.size();
+ // serializer parameters
+ auto& axis_order = op.getAxisOrder();
+ serializeT(static_cast<int32_t>(axis_order.size()));
+ for (std::size_t i = 0; i < axis_order.size(); ++i) {
+ serializeT(static_cast<int32_t>(axis_order.at(i)));
+ }
+ // serialize output shape
+ serializeShape(op.getOutputShape(0));
+}
+
} // namespace nnc
void visit(mir::ops::SqueezeOp& op) override;
void visit(mir::ops::PadOp& op) override;
void visit(mir::ops::ReduceFOp& op) override;
+ void visit(mir::ops::TransposeOp& op) override;
void serialize(std::list<OpDescr> &inferenceSequence);
inline int Offset(const RuntimeShape& shape, int* index) {
return Offset(shape, index[0], index[1], index[2], index[3]);
}
+
+struct TransposeParams {
+ int8 perm_count;
+ int32 perm[4];
+};
Pad(input, input_dims, left_paddings, right_paddings, output, output_dims);
}
+
+void transpose(Tensor &out, const char *params, const Tensor &in) {
+ TransposeParams transpose_params;
+ transpose_params.perm_count = deserializeT<int32_t>(params);
+ for (int i = 0; i < transpose_params.perm_count; ++i)
+ transpose_params.perm[i] = deserializeT<int32_t>(params);
+
+ Shape out_s = deserializeShape(params);
+ assert(out_s.getNumElems() == in.getShape().getNumElems());
+ out.reShape(out_s);
+
+ Transpose(transpose_params,
+ shapeToRuntimeShape(in.getShape()), in.getData(),
+ shapeToRuntimeShape(out.getShape()), out.getData());
+}
--- /dev/null
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+template <typename T>
+void Transpose(const TransposeParams& params,
+ const RuntimeShape& unextended_input_shape, const T* input_data,
+ const RuntimeShape& unextended_output_shape, T* output_data) {
+ const int unextended_output_size = unextended_output_shape.DimensionsCount();
+ TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
+ TFLITE_DCHECK_LE(unextended_output_size, 4);
+ TFLITE_DCHECK_EQ(unextended_output_size, params.perm_count);
+ const RuntimeShape input_shape =
+ RuntimeShape::ExtendedShape(4, unextended_input_shape);
+ const RuntimeShape output_shape =
+ RuntimeShape::ExtendedShape(4, unextended_output_shape);
+ const int input_ext_size = 4 - unextended_input_shape.DimensionsCount();
+ const int output_ext_size = 4 - unextended_output_size;
+
+ // The perm data is extended to match the output, each index incremented by
+ // the amount of front padding of the input shape.
+ int extended_perm[4];
+ for (int i = 0; i < output_ext_size; ++i) {
+ extended_perm[i] = i;
+ }
+ for (int i = 0; i < unextended_output_size; ++i) {
+ extended_perm[i + output_ext_size] = params.perm[i] + input_ext_size;
+ }
+
+ int out_sizes[4];
+ // Compute the inverse permutation array so we can do an output centered
+ // transpose. Also, check to make sure output_dims is matching input_dims.
+ for (int k = 0; k < 4; k++) {
+ out_sizes[k] = MatchingDim(input_shape, extended_perm[k], output_shape, k);
+ }
+
+ // Naive transpose loop (iterate on output index and compute input index).
+ int o[4]; // loop index (on output).
+ int i[4];
+ for (o[3] = 0; o[3] < out_sizes[3]; o[3]++) {
+ i[extended_perm[3]] = o[3];
+ for (o[2] = 0; o[2] < out_sizes[2]; o[2]++) {
+ i[extended_perm[2]] = o[2];
+ for (o[1] = 0; o[1] < out_sizes[1]; o[1]++) {
+ i[extended_perm[1]] = o[1];
+ for (o[0] = 0; o[0] < out_sizes[0]; o[0]++) {
+ i[extended_perm[0]] = o[0];
+ output_data[Offset(output_shape, o)] =
+ input_data[Offset(input_shape, i)];
+ }
+ }
+ }
+ }
+}
#include "code_snippets/cpp_elementwise.def"
#include "code_snippets/cpp_tanh.def"
#include "code_snippets/cpp_pad.def"
+#include "code_snippets/cpp_transpose.def"
#include "CommonData.def"
#include "code_snippets/cpp_header_types.def"