--- /dev/null
+void batchNorm(Tensor &out, const char *params, const Tensor &in)
+{
+ out.reShape(in.getShape());
+ float eps = deserializeT<float>(params);
+ (float)eps;
+ float avgFraction = deserializeT<float>(params);
+ (float)avgFraction;
+ bool spatial = deserializeT<int32_t>(params);
+ (void)spatial;
+ out.fillData(in.getData());
+}
--- /dev/null
+void dropout(Tensor &out, const char *params, const Tensor &in)
+{
+ out.reShape(in.getShape());
+ float rate = deserializeT<float>(params);
+ (void)rate;
+ out.fillData(in.getData());
+}
--- /dev/null
+void scale(Tensor &out, const char *params, const Tensor &in)
+{
+ out.reShape(in.getShape());
+ Kernel weights = deserializeKernel(params);
+ Shape inShape = in.getShape();
+ const int wSize = weights.dims.sizes[0];
+ assert(wSize == inShape[inShape.getDims() - 1]);
+ assert(weights.dims.sizes[1] == 1);
+ assert(weights.dims.sizes[2] == 1);
+ assert(weights.dims.sizes[3] == 1);
+
+ const float *wData = weights.data;
+ const float *inData = in.getData();
+ float *outData = out.getData();
+ int32_t dataSize = inShape.getNumElems();
+
+ assert(dataSize % wSize == 0);
+ for (int32_t sliceOffset = 0; sliceOffset < dataSize; sliceOffset += wSize)
+ {
+ for (int i = 0; i < wSize; i++)
+ {
+ outData[sliceOffset + i] = inData[sliceOffset + i] * wData[i];
+ }
+ }
+}
#include "cpp_pool.generated.h"
#include "cpp_relu.generated.h"
#include "cpp_softmax.generated.h"
+#include "cpp_scale.generated.h"
+#include "cpp_dropout.generated.h"
+#include "cpp_batchnorm.generated.h"
namespace nncc
{
out.write(param_constants, sizeof(param_constants));
out.write(cpp_operations, sizeof(cpp_operations));
+ out.write(cpp_scale, sizeof(cpp_scale));
+ out.write(cpp_dropout, sizeof(cpp_dropout));
+ out.write(cpp_batchnorm, sizeof(cpp_batchnorm));
// gen NN constructor
out << className << "::" << className << "(const string ¶metersPath)\n"
_curOp->_paramStartOffset = _buffer.size();
serializeT<float>(op.getEps());
serializeT<float>(op.getMovingAvgFraction());
- serializeT<bool>(op.getSpatial());
+ serializeT<int32_t>(op.getSpatial());
}
void Serializer::visit(ADT::INode *node, ops::ScaleOp &op)
#include "param_constants.def"
#include "code_snippets/cpp_header_types.def"
#include "code_snippets/cpp_operations.def"
+#include "code_snippets/cpp_scale.def"
// soft backend part
#include "core/modelIR/operations/concat_op.h"
#include "core/modelIR/operations/bias_add_op.h"
#include "core/modelIR/operations/softmax_op.h"
+#include "core/modelIR/operations/scale_op.h"
// various headers
#include "core/modelIR/TensorVariant.h"
createAndRunTestGraph(opGenerator, biasAdd, inputNTensors, aInputTensor);
}
+TEST(cpp_operations_test, scale)
+{
+ vector<int> inputShapeData{2, 3, 4, 5};
+ data::Shape weightsShape{5};
+ vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ Tensor aInputTensor;
+ fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
+
+ TensorVariant weights = createNTensor(weightsShape, 1.0f);
+ auto opGenerator = [weights](Graph &g){return g.create<IR::model::ops::ScaleOp>("y", weights);};
+
+ createAndRunTestGraph(opGenerator, scale, inputNTensors, aInputTensor);
+}
+
TEST(cpp_operations_test, capped_relu)
{
// test prerequisites