From: Efimov Alexander/AI Tools Lab/./Samsung Electronics Date: Thu, 13 Sep 2018 17:49:03 +0000 (+0300) Subject: Support new caffe operations in soft backend (#1476) X-Git-Tag: nncc_backup~1821 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=cd02e70ac08bf57cdeb00dab87206c33f4f4f775;p=platform%2Fcore%2Fml%2Fnnfw.git Support new caffe operations in soft backend (#1476) Support new operations: + Scale (works on channel axis only) + Dropout (passes tensor without changes) + BatchNorm (passes tensor without changes) Signed-off-by: Efimov Alexander --- diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_batchnorm.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_batchnorm.def new file mode 100644 index 0000000..66c60f3 --- /dev/null +++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_batchnorm.def @@ -0,0 +1,11 @@ +void batchNorm(Tensor &out, const char *params, const Tensor &in) +{ + out.reShape(in.getShape()); + float eps = deserializeT(params); + (float)eps; + float avgFraction = deserializeT(params); + (float)avgFraction; + bool spatial = deserializeT(params); + (void)spatial; + out.fillData(in.getData()); +} diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_dropout.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_dropout.def new file mode 100644 index 0000000..9fcb51a --- /dev/null +++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_dropout.def @@ -0,0 +1,7 @@ +void dropout(Tensor &out, const char *params, const Tensor &in) +{ + out.reShape(in.getShape()); + float rate = deserializeT(params); + (void)rate; + out.fillData(in.getData()); +} diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_scale.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_scale.def new file mode 100644 index 0000000..828c6d4 --- /dev/null +++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_scale.def @@ -0,0 +1,25 @@ +void scale(Tensor &out, const char *params, const Tensor &in) +{ + out.reShape(in.getShape()); + Kernel weights = deserializeKernel(params); + Shape inShape = in.getShape(); + const int wSize = weights.dims.sizes[0]; + assert(wSize == inShape[inShape.getDims() - 1]); + assert(weights.dims.sizes[1] == 1); + assert(weights.dims.sizes[2] == 1); + assert(weights.dims.sizes[3] == 1); + + const float *wData = weights.data; + const float *inData = in.getData(); + float *outData = out.getData(); + int32_t dataSize = inShape.getNumElems(); + + assert(dataSize % wSize == 0); + for (int32_t sliceOffset = 0; sliceOffset < dataSize; sliceOffset += wSize) + { + for (int i = 0; i < wSize; i++) + { + outData[sliceOffset + i] = inData[sliceOffset + i] * wData[i]; + } + } +} diff --git a/contrib/nnc/passes/soft_backend/cpp_generator.cpp b/contrib/nnc/passes/soft_backend/cpp_generator.cpp index 698a0bc..8ce0e4b 100644 --- a/contrib/nnc/passes/soft_backend/cpp_generator.cpp +++ b/contrib/nnc/passes/soft_backend/cpp_generator.cpp @@ -23,6 +23,9 @@ using namespace nncc::contrib::core::IR::model; #include "cpp_pool.generated.h" #include "cpp_relu.generated.h" #include "cpp_softmax.generated.h" +#include "cpp_scale.generated.h" +#include "cpp_dropout.generated.h" +#include "cpp_batchnorm.generated.h" namespace nncc { @@ -251,6 +254,9 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, co out.write(param_constants, sizeof(param_constants)); out.write(cpp_operations, sizeof(cpp_operations)); + out.write(cpp_scale, sizeof(cpp_scale)); + out.write(cpp_dropout, sizeof(cpp_dropout)); + out.write(cpp_batchnorm, sizeof(cpp_batchnorm)); // gen NN constructor out << className << "::" << className << "(const string ¶metersPath)\n" diff --git a/contrib/nnc/passes/soft_backend/serializer.cpp b/contrib/nnc/passes/soft_backend/serializer.cpp index 425437f..278bf84 100644 --- a/contrib/nnc/passes/soft_backend/serializer.cpp +++ b/contrib/nnc/passes/soft_backend/serializer.cpp @@ -236,7 +236,7 @@ void Serializer::visit(ADT::INode *node, ops::BatchNormOp &op) _curOp->_paramStartOffset = _buffer.size(); serializeT(op.getEps()); serializeT(op.getMovingAvgFraction()); - serializeT(op.getSpatial()); + serializeT(op.getSpatial()); } void Serializer::visit(ADT::INode *node, ops::ScaleOp &op) diff --git a/contrib/nnc/unittests/soft_backend/cpp_operations.cpp b/contrib/nnc/unittests/soft_backend/cpp_operations.cpp index 66d4ae2..e812863 100644 --- a/contrib/nnc/unittests/soft_backend/cpp_operations.cpp +++ b/contrib/nnc/unittests/soft_backend/cpp_operations.cpp @@ -21,6 +21,7 @@ #include "param_constants.def" #include "code_snippets/cpp_header_types.def" #include "code_snippets/cpp_operations.def" +#include "code_snippets/cpp_scale.def" // soft backend part @@ -39,6 +40,7 @@ #include "core/modelIR/operations/concat_op.h" #include "core/modelIR/operations/bias_add_op.h" #include "core/modelIR/operations/softmax_op.h" +#include "core/modelIR/operations/scale_op.h" // various headers #include "core/modelIR/TensorVariant.h" @@ -265,6 +267,20 @@ TEST(cpp_operations_test, bias) createAndRunTestGraph(opGenerator, biasAdd, inputNTensors, aInputTensor); } +TEST(cpp_operations_test, scale) +{ + vector inputShapeData{2, 3, 4, 5}; + data::Shape weightsShape{5}; + vector> inputNTensors(1); + Tensor aInputTensor; + fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); + + TensorVariant weights = createNTensor(weightsShape, 1.0f); + auto opGenerator = [weights](Graph &g){return g.create("y", weights);}; + + createAndRunTestGraph(opGenerator, scale, inputNTensors, aInputTensor); +} + TEST(cpp_operations_test, capped_relu) { // test prerequisites