From 442b3168ac1137f52a918aae2066bdbcfbc4ddde Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9=20=D0=91=D0=B0=D1=80?= =?utf8?q?=D0=B0=D0=BD=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2/AI=20Tools=20Lab=20/S?= =?utf8?q?RR/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 4 Feb 2019 14:40:44 +0300 Subject: [PATCH] [nnc] Use getShape instead of directly accessing IODescriptor fields (#2999) Use `getShape` method of `IODescriptor` instead of accessing its fields. Signed-off-by: Sergei Barannikov --- contrib/nnc/core/modelIR/Operation.cpp | 4 +- contrib/nnc/include/core/modelIR/Operation.h | 2 +- .../passes/caffe2_frontend/caffe2_op_creator.cpp | 10 ++--- .../nnc/passes/caffe_frontend/caffe_op_creator.cpp | 12 +++--- .../nnc/passes/onnx_frontend/ONNXImporterImpl.cpp | 2 +- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp | 44 +++++++++++----------- 6 files changed, 35 insertions(+), 39 deletions(-) diff --git a/contrib/nnc/core/modelIR/Operation.cpp b/contrib/nnc/core/modelIR/Operation.cpp index 47956a1..c1da0b6 100644 --- a/contrib/nnc/core/modelIR/Operation.cpp +++ b/contrib/nnc/core/modelIR/Operation.cpp @@ -64,9 +64,7 @@ const IODescriptor Operation::getOutput(std::size_t index) { } const Shape& Operation::getInputShape(std::size_t index) const { - // Shape of the input is the shape of the connected output. - IODescriptor descriptor = _inputs.at(index); - return descriptor.op->getOutputShape(descriptor.index); + return getInput(index).getShape(); } const Shape& Operation::getOutputShape(std::size_t index) const { diff --git a/contrib/nnc/include/core/modelIR/Operation.h b/contrib/nnc/include/core/modelIR/Operation.h index 6a96b79..2c653ad 100644 --- a/contrib/nnc/include/core/modelIR/Operation.h +++ b/contrib/nnc/include/core/modelIR/Operation.h @@ -56,7 +56,7 @@ public: std::size_t getNumInputs() const { return _num_inputs; } std::size_t getNumOutputs() const { return _num_outputs; } - IODescriptor getInput(std::size_t index) { + IODescriptor getInput(std::size_t index) const { assert(index < _inputs.size()); return _inputs[index]; } diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp index 0f9b9bb..d2f249b 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp @@ -132,7 +132,7 @@ static Shape getWindowShape(const ::caffe2::OperatorDef& op, int kernel_h(0), kernel_w(0); if (is_global_pooling) { - auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index); + const auto& input_shape = inputs[0].getShape(); assert(input_shape.rank() == 4 && "getWindowShape() inputs must be of rank 4"); kernel_h = input_shape.dim(2); kernel_w = input_shape.dim(3); @@ -333,7 +333,7 @@ Caffe2OpCreator::convertFullyConnected(const std::vector& inputs, const MIRTensors& mir_tensors) { auto weights_tensor = transposeTensor<1, 0>(mir_tensors.at(op.input(1))); - auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index); + const auto& input_shape = inputs[0].getShape(); // Transform input into 2-D tensor by flattening axes Shape shape{input_shape.dim(0), input_shape.numElements() / input_shape.dim(0)}; @@ -395,7 +395,7 @@ Caffe2OpCreator::convertResizeNearest(const std::vector& inputs, const ::caffe2::OperatorDef& op) { // assume NCHW and convert to MIR (NHWC) std::vector scales(4); - assert(inputs[0].op->getOutputShape(0).rank() == 4 && "only 4d tensors is supported"); + assert(inputs[0].getShape().rank() == 4 && "only 4d tensors is supported"); scales[0] = 1; // default to noop scales[1] = getSingleArgument(op, "height_scale", 1.0f); @@ -454,9 +454,9 @@ Caffe2OpCreator::convertSpatialBN(const std::vector& inputs, } std::vector Caffe2OpCreator::convertSum(const std::vector& inputs) { - auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index); + const auto& input_shape = inputs[0].getShape(); for (auto& in : inputs) - assert(input_shape == in.op->getOutputShape(inputs[0].index) && "All Sum inputs must have same shape"); + assert(input_shape == in.getShape() && "All Sum inputs must have same shape"); auto op = createOp("Elementwise_Add", inputs, ops::ElementwiseOp::OpType::add); return {op->getOutput(0)}; diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp index c9bedbd..32bd548 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp @@ -85,7 +85,7 @@ mir::IODescriptor CaffeOpCreator::createMul(mir::IODescriptor arg1, mir::IODescr /// @brief Split arg into @p num_parts equal parts along @p axis axis. std::vector CaffeOpCreator::createSplit(mir::IODescriptor arg, int32_t num_parts, int32_t axis) { - const auto& arg_shape = arg.op->getOutputShape(arg.index); + const auto& arg_shape = arg.getShape(); assert(axis >= 0 && axis < arg_shape.rank()); int32_t part_size = arg_shape.dim(axis) / num_parts; @@ -109,8 +109,8 @@ IODescriptor CaffeOpCreator::createFullyConnected(const mir::IODescriptor& input, const mir::IODescriptor& weights, int32_t axis) { - const auto& input_shape = input.op->getOutputShape(input.index); - const auto& weights_shape = weights.op->getOutputShape(weights.index); + const auto& input_shape = input.getShape(); + const auto& weights_shape = weights.getShape(); assert(axis >= 0 && axis < input_shape.rank()); assert(weights_shape.rank() == 2); @@ -405,7 +405,7 @@ CaffeOpCreator::convertPooling(const caffe::LayerParameter& layer, Shape strides; std::vector padding_before, padding_after; - const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index); + const auto& input_shape = inputs[0].getShape(); convertPoolingParam(opts, input_shape, window_shape, strides, padding_before, padding_after); ops::PoolOp::PoolingType pool_type = getPoolingType(opts); @@ -434,7 +434,7 @@ CaffeOpCreator::convertSoftmax(const caffe::LayerParameter& layer, // CPP and ACL backends are able to perform Softmax only along the last axis. // FIXME Do it in backends. - if (inputs[0].op->getOutputShape(inputs[0].index).rank() == 4) { + if (inputs[0].getShape().rank() == 4) { // For now, we only account for the most common case. if (params.axis() != 1) throw PassException("Softmax: unsupported axis"); @@ -729,7 +729,7 @@ CaffeOpCreator::convertLSTM(const caffe::LayerParameter& layer, auto cont = inputs[1]; assert(inputs.size() == 2); - const auto& x_shape = x.op->getOutputShape(x.index); + const auto& x_shape = x.getShape(); const int32_t seq_length = x_shape.dim(0); const int32_t batch_size = x_shape.dim(1); const int32_t hidden_size = params.num_output(); diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp index ce9de6b..6929292 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp @@ -173,7 +173,7 @@ void ONNXImporterImpl::dump(const std::vector& input_descrs, auto op = out_descr.op; std::cout << onnx_node.op_type() << " '" << op->getName() << "'"; if (input_descrs[0].op->getNumInputs() > 0) { - std::cout << "Input Shape: " << input_descrs[0].op->getOutputShape(input_descrs[0].index); + std::cout << "Input Shape: " << input_descrs[0].getShape(); } std::cout << " Output Shape: " << op->getOutputShape(0); auto* onnx_op_type = ONNXPerfectHash::getONNXOpType(onnx_node.op_type().c_str(), onnx_node.op_type().size()); diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index 584cc3d..9c547d2 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -222,7 +222,7 @@ ONNXOpCreator::convertPad(const std::vector& inputs, vec[i] = pair; } auto result = - createOp(inputs[0], inputs[0].op->getOutputShape(0).rank(), vec, scalar); + createOp(inputs[0], inputs[0].getShape().rank(), vec, scalar); return {result->getOutput(0)}; } @@ -243,9 +243,8 @@ ONNXOpCreator::convertPool(const std::vector& inputs, pool_type = ops::PoolOp::PoolingType::AVG; // GlobalAveragePool is equivalent to AveragePool with kernel size equal // to the spatial dimension of input tensor - cdata.kernel_shape = {t_input.op->getOutputShape(0).dim(1), - t_input.op->getOutputShape(0).dim(2)}; - cdata.strides_shape = Shape{1, 1}; + cdata.kernel_shape = {t_input.getShape().dim(1), t_input.getShape().dim(2)}; + cdata.strides_shape = {1, 1}; break; } case ONNXOpCode::opAveragePool: @@ -316,7 +315,7 @@ ONNXOpCreator::convertUnsqueeze(const std::vector& inputs, const onnx::NodeProto& onnx_node) { auto* axes = findAttribute(onnx_node, "axes"); assert(axes && axes->ints_size()); - const Shape& input_shape = inputs[0].op->getOutputShape(inputs[0].index); + const Shape& input_shape = inputs[0].getShape(); const int out_rank = input_shape.rank() + axes->ints_size(); Shape out_shape(out_rank); auto ints_iterator = axes->ints().begin(); @@ -369,7 +368,7 @@ ONNXOpCreator::convertUpsample(const std::vector& inputs, auto* scales = dynamic_cast(inputs[1].op); assert(scales && "Weights could be a constant tensor only"); auto scales_tensor = Tensor(scales->getValue()); - int rank = inputs[0].op->getOutputShape(0).rank(); + int rank = inputs[0].getShape().rank(); assert(scales_tensor.getShape().numElements() == rank && "The number of elements of 'scales' should be the same as the rank of input 'X'" ); @@ -442,7 +441,7 @@ ONNXOpCreator::convertScale(const std::vector& inputs, float value; std::tie(found, value) = getFloatAttribute(onnx_node, "scale"); float scale_val = found ? value : 1.0; - const auto& shape = inputs[0].op->getOutputShape(inputs[0].index); + const auto& shape = inputs[0].getShape(); auto scale_tensor = createTensor(scale_val, shape); auto scale = createOp(scale_tensor)->getOutput(0); auto result = createOp(inputs[0], scale); @@ -451,11 +450,11 @@ ONNXOpCreator::convertScale(const std::vector& inputs, std::vector ONNXOpCreator::convertShape(const std::vector& inputs) { - const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index); + const auto& input_shape = inputs[0].getShape(); int size = input_shape.rank(); Shape output_shape{size}; std::vector data(static_cast(size)); - for (int i; i < size; i++) { + for (int i = 0; i < size; i++) { data[i] = input_shape.dim(i); } TensorVariant tensor(DTYPE::FLOAT32, output_shape, data.data()); @@ -521,15 +520,15 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, // 1. Prepare input matrix A // Flatten the shape by dim(0) - const auto& in_shape = inputs[0].op->getOutputShape(inputs[0].index); + const auto& in_shape = inputs[0].getShape(); mir::Shape shape0{in_shape.dim(0), in_shape.numElements() / in_shape.dim(0)}; - auto input_a = createOp(inputs[0], shape0); + auto input_a = createOp(inputs[0], shape0)->getOutput(0); if (trans_a) - input_a = createOp(input_a->getOutput(0), std::vector{1, 0}); + input_a = createOp(input_a, std::vector{1, 0})->getOutput(0); if (alpha_val != 1.0) { - auto alpha_tensor = createTensor(alpha_val, input_a->getOutputShape(0)); + auto alpha_tensor = createTensor(alpha_val, input_a.getShape()); auto alpha = createOp(alpha_tensor)->getOutput(0); - input_a = createOp(input_a->getOutput(0), alpha); + input_a = createOp(input_a, alpha)->getOutput(0); } // 2. Prepare input matrix B @@ -538,24 +537,23 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, if (trans_b) input_b = createOp(input_b, std::vector{1, 0})->getOutput(0); // Number of cols in tensor A must be equal to number of rows in tensor B - assert(input_a->getOutput(0).op->getOutputShape(0).dim(1) == - input_b.op->getOutputShape(0).dim(0)); - Shape mult_a_b({input_a->getOutputShape(0).dim(0), - input_b.op->getOutputShape(input_b.index).dim(1)}); + assert(input_a.getShape().dim(1) == input_b.getShape().dim(0)); + Shape mult_a_b{input_a.getShape().dim(0), input_b.getShape().dim(1)}; // 3. Prepare input matrix C // auto input_c = inputs[2]; - auto beta_tensor = createTensor(beta_val, input_c.op->getOutputShape(0)); + auto beta_tensor = createTensor(beta_val, input_c.getShape()); // TODO: check 'broadcast' attribute here - if ((mult_a_b.rank() == 2) && (input_c.op->getOutputShape(0).rank() == 1)) { + if ((mult_a_b.rank() == 2) && (input_c.getShape().rank() == 1)) { beta_tensor = TensorVariant(beta_tensor, mult_a_b); } auto beta = createOp(beta_tensor)->getOutput(0); std::vector descriptors = {beta, input_c}; - auto c_mult = createOp(descriptors, ops::ElementwiseOp::OpType::mul); - assert(c_mult->getOutputShape(0) == mult_a_b); - auto result = createOp(input_a->getOutput(0), input_b, c_mult->getOutput(0)); + auto c_mult = createOp(descriptors, + ops::ElementwiseOp::OpType::mul)->getOutput(0); + assert(c_mult.getShape() == mult_a_b); + auto result = createOp(input_a, input_b, c_mult); return {result->getOutput(0)}; } -- 2.7.4