{
public:
int reduceType;
- std::vector<size_t> reduceDims;
+ // reduceDims contains the dimensions that need to be reduced, targetDims is the target output dimension.
+ std::vector<size_t> reduceDims, targetDims;
static Ptr<ReduceLayer> create(const LayerParams& params);
};
{
reduceDims[i] = tempDims.get<int>(i);
}
+
+ CV_Assert(params.has("target_dims"));
+ tempDims = params.get("target_dims");
+ n = tempDims.size();
+ targetDims.resize(n);
+ for (i = 0; i < n; i++)
+ {
+ targetDims[i] = tempDims.get<int>(i);
+ }
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() > 0);
- CV_Assert(reduceDims.size() != 0 && inputs[0].size() >= reduceDims.size());
+ CV_Assert( reduceDims.size() !=0 && targetDims.size() != 0 && inputs[0].size() >= reduceDims.size());
- std::vector<int> outShape;
+ // outShapeTmp can save the right number of `total(outShapeTmp)`. And the outShape is used as the final output shape.
+ std::vector<int> outShapeTmp, outShape;
+ outShape.assign(targetDims.begin(), targetDims.end());
if (inputs[0].size() == reduceDims.size())
- outShape.push_back(1);
+ outShapeTmp.push_back(1);
else
{
for (int i = 0; i < inputs[0].size() - reduceDims.size(); i++)
{
- outShape.push_back(inputs[0][i]);
+ outShapeTmp.push_back(inputs[0][i]);
}
}
+
+ // Support dynamic shape of Batch size.
+ // Note that: when there are multiple dynamic inputs, we will give an error.
+ if (total(outShape) != total(outShapeTmp))
+ {
+ if (outShape[0] != outShapeTmp[0])
+ outShape[0] = outShapeTmp[0];
+ }
+
+ CV_Assert(total(outShape) == total(outShapeTmp));
outputs.assign(1, outShape);
return false;
{
reduceDims[i] = tempDims.get<int>(i);
}
+
+ CV_Assert(params.has("target_dims"));
+ tempDims = params.get("target_dims");
+ n = tempDims.size();
+ targetDims.resize(n);
+ for (i = 0; i < n; i++)
+ {
+ targetDims[i] = tempDims.get<int>(i);
+ }
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() > 0);
- CV_Assert(reduceDims.size() != 0 && inputs[0].size() >= reduceDims.size());
+ CV_Assert( reduceDims.size() !=0 && targetDims.size() != 0 && inputs[0].size() >= reduceDims.size());
- std::vector<int> outShape;
+ // outShapeTmp can save the right number of `total(outShapeTmp)`. And the outShape is used as the final output shape.
+ std::vector<int> outShapeTmp, outShape;
+ outShape.assign(targetDims.begin(), targetDims.end());
if (inputs[0].size() == reduceDims.size())
- outShape.push_back(1);
+ outShapeTmp.push_back(1);
else
{
for (int i = 0; i < inputs[0].size() - reduceDims.size(); i++)
{
- outShape.push_back(inputs[0][i]);
+ outShapeTmp.push_back(inputs[0][i]);
}
}
+
+ // Support dynamic shape of Batch size.
+ // Note that: when there are multiple dynamic inputs, we will give an error.
+ if (total(outShape) != total(outShapeTmp) && outShape[0] != outShapeTmp[0])
+ {
+ outShape[0] = outShapeTmp[0];
+ }
+
+ CV_Assert(total(outShape) == total(outShapeTmp));
outputs.assign(1, outShape);
return false;
int axesNum = axesMat.total();
for (int i = 0; i < axesNum; i++)
{
- int axis = normalize_axis(static_cast<int>(axesMat.at<float>(i)), inpShape.size());
+ int axis = normalize_axis(axesMat.at<int>(i), inpShape.size());
shouldDelete[axis] = true;
}
}
}
}
- MatShape targetShape;
+ std::vector<int> targetShape;
for (int i = 0; i < inpShape.size(); ++i)
{
if (!shouldDelete[i])
}
}
- LayerParams reduceLp = layerParams;
- reduceLp.name = layerParams.name + "/reduce";
- CV_Assert(layer_id.find(reduceLp.name) == layer_id.end());
- reduceLp.set("deleted_dims", DictValue::arrayInt(&deletedDims[0], deletedDims.size()));
+ layerParams.set("deleted_dims", DictValue::arrayInt(&deletedDims[0], deletedDims.size()));
+ layerParams.set("target_dims", DictValue::arrayInt(&targetShape[0], targetShape.size()));
node_proto.set_input(0, inputString);
- node_proto.set_output(0, reduceLp.name);
- addLayer(reduceLp, node_proto);
-
- layerParams.type = (depth == CV_8S) ? "ReshapeInt8" : "Reshape";
- layerParams.set("dim", DictValue::arrayInt(&targetShape[0], targetShape.size()));
-
- // Set batchsize dim as dynamic to be compatible with batch size >= 2.
- if (targetShape.size() > 1)
- {
- std::vector<int> dynamicAxes = {0}; // The index of batchsize dim is 0.
- std::vector<int> inputIndices = {0};
-
- layerParams.set("has_dynamic_shapes", true);
- layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size()));
- layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size()));
- }
-
- node_proto.set_input(0, node_proto.output(0));
node_proto.set_output(0, output_name);
addLayer(layerParams, node_proto);
TEST_P(Test_ONNX_layers, ReduceSum)
{
testONNXModels("reduce_sum");
- testONNXModels("reduce_sum_axis");
testONNXModels("reduce_sum_axis_dynamic_batch");
}