using nncc::contrib::core::data::Shape;
-std::vector<int> calculate2DPaddings(ops::PaddingType paddingType, const Shape& inShape,
- const Shape& windowShape, const Shape& strides, Shape& outShape)
+template<class Op>
+void fillHWShapesForPaddedOperations(Op &op, const Shape &windowShape, Shape &outShape)
{
+ auto &strides = op.getStrides();
+ auto &inShape = op.getInputShape(0);
auto inRank = inShape.rank();
- // Assuming input tensor is 3-dimensional. Will support more general cases as needed.
- assert(inRank == 3);
- std::vector<int> paddings(3);
+ outShape.resize(inRank);
- if (paddingType == ops::PaddingType::Same)
+ ops::PaddingType pType = op.getPaddingType();
+ switch (pType)
{
+ case ops::PaddingType::Same:
for (uint32_t d = 0; d < inRank - 1; ++d)
{
outShape.dim(d) = (inShape.dim(d) - 1) / strides.dim(d) + 1;
(int)windowShape.dim(d) - (int)inShape.dim(d),
0);
}
- paddings[d] = pad_along_axis / 2;
+ op.setPadding(d, pad_along_axis / 2);
}
- }
- else
- {
+ break;
+ case ops::PaddingType::Valid:
for (uint32_t d = 0; d < inRank - 1; ++d)
{
- outShape.dim(d) = (inShape.dim(d) - windowShape.dim(d)) / strides.dim(d) + 1;
- paddings[d] = 0;
+ op.setPadding(d, 0);
}
+ // FALLTHROUGH
+ case ops::PaddingType::Custom:
+ for (uint32_t d = 0; d < inRank - 1; ++d)
+ {
+ outShape.dim(d) = (inShape.dim(d) + 2*op.getPadding(d) - windowShape.dim(d)) / strides.dim(d) + 1;
+ }
+ break;
+ default:
+ assert(false && "invalid padding type");
+ break;
}
-
- return paddings;
+ // For now padding for channels is not supported, initialize it with zero
+ op.setPadding(inRank - 1, 0);
}
void ShapeInference::visit(ADT::INode::Ref node, ops::ConcatOp &op)
fillInputShapes(node, op);
Shape outShape;
- outShape.resize(3);
- auto &strides = op.getStrides();
auto &kernel = op.getKernel();
- auto &inShape = op.getInputShape(0);
auto &kernelShape = kernel.getShape();
- uint32_t inRank = inShape.rank();
- auto pads = calculate2DPaddings(op.getPaddingType(), inShape, kernelShape, strides, outShape);
- for (size_t i = 0; i < pads.size(); ++i)
- {
- op.setPadding(i, pads[i]);
- }
+ fillHWShapesForPaddedOperations(op, kernelShape, outShape);
- outShape.dim(inRank - 1) = kernelShape.dim(kernelShape.rank() - 1);
+ outShape.dim(outShape.rank() - 1) = kernelShape.dim(kernelShape.rank() - 1);
op.setOutputShape(0, outShape);
}
fillInputShapes(node, op);
Shape outShape;
- outShape.resize(3);
- auto &strides = op.getStrides();
auto &windowShape = op.getWindowShape();
auto &inShape = op.getInputShape(0);
const uint32_t inRank = inShape.rank();
-
// Assuming input tensor is 3-dimensional. Will support more general cases when needed.
assert(inRank == 3);
- auto pads = calculate2DPaddings(op.getPaddingType(), inShape, windowShape, strides, outShape);
- for (uint32_t d = 0; d < inShape.rank(); ++d)
- {
- op.setPadding(d, pads[d]);
- }
+ fillHWShapesForPaddedOperations(op, windowShape, outShape);
+
outShape.dim(inRank - 1) = inShape.dim(inRank - 1);
op.setOutputShape(0, outShape);
}
fillInputShapes(node, op);
Shape outShape;
- outShape.resize(3);
- auto &strides = op.getStrides();
auto &kernelShape = op.getKernel().getShape();
auto &inShape = op.getInputShape(0);
int inRank = inShape.rank();
assert(inRank == 3);
assert(inShape.dim(2) == kernelShape.dim(2));
- auto pads = calculate2DPaddings(op.getPaddingType(), inShape, kernelShape, strides, outShape);
- for (uint32_t d = 0; d < inShape.rank(); ++d)
- {
- op.setPadding(d, pads[d]);
- }
+ fillHWShapesForPaddedOperations(op, kernelShape, outShape);
outShape.dim(inRank - 1) = inShape.dim(inRank - 1) * kernelShape.dim(kernelRank - 1);
op.setOutputShape(0, outShape);
}
}
-template <typename OptsType>
-static inline bool has2DPad(const OptsType& opts)
-{
- return opts.has_pad_h() || opts.has_pad_w();
-}
-
-static ops::PaddingType getPadTypeFromOneValue(bool hasPad, unsigned int pad)
-{
- if (hasPad)
- {
- if (pad == 0)
- {
- return ops::PaddingType::Valid;
- }
- else
- {
- return ops::PaddingType::Same;
- }
- }
- else
- {
- return ops::PaddingType::Valid;
- }
-}
-
-/**
- * @brief Determine padding type (SAME/VALID) from two numeric values - height and width padding.
- * In general, knowing the input tensor shape is required to properly determine the padding type.
- * Also, these two padding values might not map to SAME or VALID padding.
- * @todo Change cout to logging call.
- */
-static ops::PaddingType getPadTypeFromTwoValues(unsigned int pad1, unsigned int pad2)
-{
- bool areBothPadsZero = pad1 == 0 && pad2 == 0;
- if (areBothPadsZero)
- {
- return ops::PaddingType::Valid;
- }
- else
- {
- std::string pads = "[" + std::to_string(pad1) + ", " + std::to_string(pad2) + "]";
- std::cout << "WARNING! Encountered padding " << pads
- << ", assuming padding SAME, but it is not guaranteed to be correct." << std::endl;
- return ops::PaddingType::Same;
- }
-}
-
-template <typename OptsType>
-static inline ops::PaddingType getPadTypeFromTwoValues(const OptsType &opts)
-{
- unsigned int pad1, pad2;
- pad1 = opts.has_pad_h() ? opts.pad_h() : 0;
- pad2 = opts.has_pad_w() ? opts.pad_w() : 0;
-
- return getPadTypeFromTwoValues(pad1, pad2);
-}
-
-template <typename OptsType>
-static ops::PaddingType getPadType(const OptsType &opts)
-{
- if (opts.pad_size() == 0)
- {
- return ops::PaddingType::Valid;
- }
- else if (opts.pad_size() == 1)
- {
- return getPadTypeFromOneValue(true, opts.pad(0));
- }
- else
- {
- return getPadTypeFromTwoValues(opts.pad(0), opts.pad(1));
- }
-}
-
-/**
- * @brief Tries to determine whether padding is SAME or VALID given
- * numeric pad values from Caffe layer options. Only 2D convolutions/pools
- * are supported currently.
- * @todo Currently, pad_h and pad_w options take precedence if they are present,
- * but maybe it is not correct logic. Check how it really is done.
- * @todo Specific calculations are required to check that padding is indeed SAME,
- * currently we just return SAME if padding is non-zero.
- */
-__attribute__ ((unused)) static ops::PaddingType getConvPadType(const ConvolutionParameter& opts)
-{
- if (has2DPad(opts))
- return getPadTypeFromTwoValues(opts);
- else
- return getPadType(opts);
-}
-
-__attribute__ ((unused)) static ops::PaddingType getPoolPadType(const PoolingParameter& opts)
-{
- if (has2DPad(opts))
- return getPadTypeFromTwoValues(opts);
- else
- return getPadTypeFromOneValue(opts.has_pad(), opts.pad());
-}
-
/**
* @brief Determines stride values from Caffe layer options.
* Only 2D convolutions/pools are supported currently.
std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams params,
const caffe::ConvolutionParameter& opts)
{
- assert(opts.pad_size() <= 2);
assert(opts.stride_size() <= 2);
- ops::PaddingType padType = util::getConvPadType(opts);
+ ops::PaddingType padType = ops::PaddingType::Custom;
Shape strideShape = util::getConvStride(opts);
std::shared_ptr<IrTensor> unfoldedTensor = params[0];
auto outputs = createOp<ops::Conv2DOp>(inputs, std::move(*unfoldedTensor),
strideShape, padType);
+ // Set pads
+ auto *op = static_cast<ops::Conv2DOp *>(outputs[0]->getOperation());
+
+ if (opts.pad_size() != 0 && (opts.has_pad_h() || opts.has_pad_w()))
+ throw PassException("Conflicting padding properties in convolution");
+
+ int pad_h = opts.has_pad_h() ? opts.pad_h() : 0;
+ int pad_w = opts.has_pad_w() ? opts.pad_w() : 0;
+ switch (opts.pad_size())
+ {
+ case 0:
+ // no common padding property set
+ break;
+ case 1:
+ pad_h = pad_w = opts.pad(0);
+ break;
+ default:
+ throw PassException("Unsupported number of pads");
+ }
+ op->setPadding(0, pad_h);
+ op->setPadding(1, pad_w);
+ op->setPadding(2, 0);
+
// bias_term is optional (so might not be present) and defaults to true
if (!opts.has_bias_term() || opts.bias_term())
return createOp<ops::BiasAddOp>(outputs, std::move(*params[1]));
Shape windowShape = util::getPoolWindowShape(opts);
ops::PoolOp::PoolingType poolType = util::getPoolingType(opts);
- ops::PaddingType padType = util::getPoolPadType(opts);
+ ops::PaddingType padType = ops::PaddingType::Custom;
Shape stride = util::getPoolStride(opts);
- return createOp<ops::PoolOp>(inputs, windowShape, stride, poolType, padType);
+ auto pooling = createOp<ops::PoolOp>(inputs, windowShape, stride, poolType, padType);
+
+ // Set pads
+ auto op = static_cast<ops::PoolOp *>(pooling[0]->getOperation());
+ if (opts.has_pad() && (opts.has_pad_h() || opts.has_pad_w()))
+ throw PassException("Conflicting padding properties in pooling");
+
+ int pad_h = opts.has_pad_h() ? opts.pad_h() : 0;
+ int pad_w = opts.has_pad_w() ? opts.pad_w() : 0;
+ if (opts.has_pad())
+ {
+ pad_h = pad_w = opts.pad();
+ }
+ op->setPadding(0, pad_h);
+ op->setPadding(1, pad_w);
+ op->setPadding(2, 0);
+
+ return pooling;
}
std::vector<INode::Ref> OpCreator::createSoftmax(InputOps inputs, InputParams params,