SliceLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
+ hasSteps = false;
axis = params.get<int>("axis", 1);
num_split = params.get<int>("num_split", 0);
hasDynamicShapes = params.get<bool>("has_dynamic_shapes", false);
sliceRanges[0][i].end = end; // We'll finalize a negative value later.
}
}
+
+ if (params.has("steps"))
+ {
+ const DictValue &steps = params.get("steps");
+ sliceSteps.resize(1);
+ sliceSteps[0].resize(steps.size());
+
+ for (int i = 0; i < steps.size(); ++i)
+ {
+ int step = steps.get<int>(i);
+ CV_Assert(step >= 1);
+ if (step > 1)
+ hasSteps = true;
+ sliceSteps[0][i] = step;
+ }
+ }
}
}
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
- sliceRanges.size() == 1 && sliceRanges[0].size() == 4;
+ sliceRanges.size() == 1 && sliceRanges[0].size() == 4 && !hasSteps;
#endif
#ifdef HAVE_DNN_NGRAPH
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- return sliceRanges.size() == 1;
+ return sliceRanges.size() == 1 && !hasSteps;
#endif
return backendId == DNN_BACKEND_OPENCV;
}
{
if (shapesInitialized || inpShape[j] > 0)
outputs[i][j] = normalize_axis_range(sliceRanges[i][j], inpShape[j]).size();
+
+ if (!sliceSteps.empty() && (i < sliceSteps.size()) && (j < sliceSteps[i].size()) && (sliceSteps[i][j] > 1))
+ outputs[i][j] = (outputs[i][j] + sliceSteps[i][j] - 1) / sliceSteps[i][j];
}
}
}
const MatSize& inpShape = inputs[0].size;
finalSliceRanges = sliceRanges;
+
if (sliceRanges.empty())
{
// Divide input blob on equal parts by axis.
}
}
+ if (!sliceSteps.empty() && sliceSteps[0].size() != inputs[0].dims)
+ sliceSteps[0].resize(inputs[0].dims, 1);
+
#if 0
std::cout << "DEBUG: DNN/Slice: " << outputs.size() << " inpShape=" << inpShape << std::endl;
for (int i = 0; i < outputs.size(); ++i)
{
CV_TRACE_FUNCTION();
+ if (hasSteps)
+ return false; // TODO not implemented yet: https://github.com/opencv/opencv/pull/19546
+
std::vector<UMat> inputs;
std::vector<UMat> outputs;
const Mat& inpMat = inputs[0];
CV_Assert(outputs.size() == finalSliceRanges.size());
- for (size_t i = 0; i < outputs.size(); i++)
+
+ if (!hasSteps)
{
- inpMat(finalSliceRanges[i]).copyTo(outputs[i]);
+ for (size_t i = 0; i < outputs.size(); i++)
+ {
+ inpMat(finalSliceRanges[i]).copyTo(outputs[i]);
+ }
+ }
+ else
+ {
+ int dimsNum = inpMat.dims;
+
+ for (size_t i = 0; i < outputs.size(); i++)
+ {
+ std::vector<int> inpIdx(dimsNum, 0);
+ std::vector<int> outIdx(dimsNum, 0);
+ getSliceRecursive(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
+ }
}
}
}
#endif // HAVE_DNN_NGRAPH
+private:
+ void getSliceRecursive(const Mat &inpMat, std::vector<int> &inpIdx,
+ const std::vector<Range> &sliceRanges,
+ const std::vector<int> &sliceSteps, int dim, int dimsNum,
+ Mat &outputs, std::vector<int> &outIdx)
+ {
+ int begin = sliceRanges[dim].start;
+ int end = sliceRanges[dim].end;
+ int step = !sliceSteps.empty() ? sliceSteps[dim] : 1;
+
+ const bool is32F = inpMat.depth() == CV_32F;
+
+ // TODO optimization is required (for 2D tail case at least)
+ for (int k = begin, j = 0; k < end; k += step, j++)
+ {
+ inpIdx[dim] = k;
+ outIdx[dim] = j;
+
+ if (dim + 1 < dimsNum)
+ getSliceRecursive(inpMat, inpIdx, sliceRanges, sliceSteps, dim + 1, dimsNum, outputs, outIdx);
+ else
+ {
+ if (is32F)
+ outputs.at<float>(outIdx.data()) = inpMat.at<float>(inpIdx.data());
+ else
+ outputs.at<short>(outIdx.data()) = inpMat.at<short>(inpIdx.data()); // 16F emulation
+ }
+ }
+ }
+
protected:
// The actual non-negative values determined from @p sliceRanges depends on input size.
std::vector<std::vector<Range> > finalSliceRanges;
bool hasDynamicShapes;
bool shapesInitialized;
+ bool hasSteps;
};
class CropLayerImpl CV_FINAL : public SliceLayerImpl
int axis = 0;
std::vector<int> begin;
std::vector<int> end;
+ std::vector<int> steps;
int inp_size = node_proto.input_size();
if (inp_size == 1)
{
- if (layerParams.has("steps"))
- {
- DictValue steps = layerParams.get("steps");
- for (int i = 0; i < steps.size(); ++i)
- {
- if (steps.get<int>(i) != 1)
- CV_Error(Error::StsNotImplemented,
- "Slice layer only supports steps = 1");
- }
- }
if (layerParams.has("axes")) {
DictValue axes = layerParams.get("axes");
for (int i = 1; i < axes.size(); ++i) {
int finish = ends.get<int>(i);
end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
}
- } else {
+ } else { // inp_size > 1
CV_Assert(inp_size >= 3);
for (int i = 1; i < inp_size; i++) {
CV_Assert(constBlobs.find(node_proto.input(i)) != constBlobs.end());
if (inp_size == 5) {
CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end());
Mat step_blob = getBlob(node_proto, 4);
+ const int* steps_ptr = step_blob.ptr<int>();
+
+ if (axis > 0)
+ steps.resize(axis, 1);
+
+ std::copy(steps_ptr, steps_ptr + step_blob.total(), std::back_inserter(steps));
// Very strange application for Slice op with tensor reversing.
// We just workaround it for 2d constants.
return;
}
}
- CV_CheckEQ(countNonZero(step_blob != 1), 0, "Slice layer only supports steps = 1");
}
}
layerParams.set("begin", DictValue::arrayInt(&begin[0], begin.size()));
layerParams.set("end", DictValue::arrayInt(&end[0], end.size()));
layerParams.set("axis", axis);
+ if (!steps.empty())
+ layerParams.set("steps", DictValue::arrayInt(&steps[0], steps.size()));
+
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
{
Mat inp = getBlob(node_proto, 0);