From: Vladimir Plazun/AI Tools Lab /SRR/Engineer/삼성전자 Date: Fri, 1 Feb 2019 09:58:03 +0000 (+0300) Subject: [nnc] Fixed all integer overflow/underflow issues in cpu artifact snippets (#2981) X-Git-Tag: nncc_backup~886 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=72be3119df8b0d448677c05a102a44fddf8ffb03;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Fixed all integer overflow/underflow issues in cpu artifact snippets (#2981) Added explicit static casts where converting between different external types( Shape, RuntimeShape, Dims<> ) Signed-off-by: Vladimir Plazun --- diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def index f3e88a4..2d77cb9 100644 --- a/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def +++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def @@ -42,7 +42,7 @@ void readParameters(char *&data, size_t &len, const string &path, int statRes = fstat(fd, &st); assert(statRes != -1); UNUSED(statRes); - len = st.st_size; + len = static_cast(st.st_size); assert(len >= params::HEADER_LEN); // check magic correctness @@ -94,10 +94,10 @@ size_t volume(Dims d) } RuntimeShape shapeToRuntimeShape(const Shape& s) { - const int rank = s.getDims(); + const int rank = static_cast(s.getDims()); RuntimeShape sh(rank); for (int i = 0; i < rank; i++) { - sh.SetDim(i,s[i]); + sh.SetDim(i, static_cast(s[i])); } return sh; } @@ -105,12 +105,12 @@ RuntimeShape shapeToRuntimeShape(const Shape& s) { Dims<4> shapeToDims(const Shape &s) { Dims<4> dims; - const int rank = s.getDims(); + const int rank = static_cast(s.getDims()); assert(rank >= 1 && rank <= 4); int stride = 1; for (int i = 0; i < rank; ++i) { - dims.sizes[i] = s[rank - 1 - i]; + dims.sizes[i] = static_cast(s[rank - 1 - i]); dims.strides[i] = stride; stride *= s[rank - 1 - i]; } @@ -174,7 +174,7 @@ void concat(Tensor &out, const char *params, const Args &...inputs) int axis = deserializeT(params); Shape out_s = deserializeShape(params); // because inner functions accepts axis in reverse order - axis = out_s.getDims() - 1 - axis; + axis = static_cast(out_s.getDims()) - 1 - axis; int inputs_count = sizeof(input)/sizeof(input[0]); out.reshape(out_s); @@ -235,7 +235,7 @@ void convTransposed2d(Tensor& out, const char* params, const Tensor& input, cons const RuntimeShape out_rt_shape = shapeToRuntimeShape(out_shape); // Transpose the kernel from HWOI to OHWI format. - const Shape kernel_shape = kernel.getShape(); + const Shape& kernel_shape = kernel.getShape(); const RuntimeShape kernel_rt_shape = {static_cast(kernel_shape[2]), static_cast(kernel_shape[0]), static_cast(kernel_shape[1]), @@ -330,14 +330,14 @@ static inline void genericPool(Executor executor, Tensor &out, Shape out_s = deserializeShape(params); assert(window.getDims() == 2); - const int window_w = window[1]; - const int window_h = window[0]; + const int window_w = static_cast(window[1]); + const int window_h = static_cast(window[0]); assert(strides.getDims() == 2); - const int stride_w = strides[1]; - const int stride_h = strides[0]; + const int stride_w = static_cast(strides[1]); + const int stride_h = static_cast(strides[0]); assert(pads.getDims() == 2); - const int pad_w = pads[1]; - const int pad_h = pads[0]; + const int pad_w = static_cast(pads[1]); + const int pad_h = static_cast(pads[0]); out.reshape(out_s); @@ -398,7 +398,7 @@ void resize(Tensor& out, const char* params, const Tensor& in) { ResizeNearestNeighbor( in_shape, input, - out_shape[1], out_shape[2], + static_cast(out_shape[1]), static_cast(out_shape[2]), out_runtime, out.getData()); } @@ -429,16 +429,16 @@ void slice(Tensor& out, const char* params, const Tensor& in) { out.reshape(out_s); SliceParams slice_params; - slice_params.begin_count = starts.getDims(); - slice_params.size_count = sizes.getDims(); + slice_params.begin_count = static_cast(starts.getDims()); + slice_params.size_count = static_cast(sizes.getDims()); assert(slice_params.begin_count <= 4); assert(slice_params.size_count <= 4); assert(starts.getDims() == sizes.getDims()); for (int i = 0; i < slice_params.begin_count; i++) { - slice_params.begin[i] = starts[i]; - slice_params.size[i] = sizes[i]; + slice_params.begin[i] = static_cast(starts[i]); + slice_params.size[i] = static_cast(sizes[i]); } Slice( slice_params, @@ -511,7 +511,7 @@ void ElementWise(Tensor &out, const char* params, const Args& ...inputs) { } } else { auto running_shape = RuntimeShape::ExtendedShape(4, in_runtime_shapes[0]); - std::vector inp_tmp(out_shape.getNumElems()); + std::vector inp_tmp(static_cast(out_shape.getNumElems())); for (int32_t i = 1; i < num_inputs; ++i) { assert(running_shape.FlatSize() <= out_shape.getNumElems()); @@ -540,31 +540,31 @@ void reshape(Tensor& out, const char* params, const Tensor& in) { void reduceMean(Tensor& out, const char* params, const Tensor& in) { Shape tmp_reduction_dims = deserializeShape(params); - bool keep_dims = deserializeT(params); + bool keep_dims = static_cast(deserializeT(params)); Shape out_s = deserializeShape(params); out.reshape(out_s); - const int32_t rank_inp = in.getShape().getDims(); - const int32_t rank_out = out_s.getDims(); - const int32_t rank_axis = tmp_reduction_dims.getDims(); + const int32_t rank_inp = static_cast(in.getShape().getDims()); + const int32_t rank_out = static_cast(out_s.getDims()); + const int32_t rank_axis = static_cast(tmp_reduction_dims.getDims()); int32_t in_dim[8]; int32_t tmp_index[8]; // input iterator storage assert(rank_inp < 8); for (int i = 0; i < rank_inp; i++) { - in_dim[i] = in.getShape()[i]; + in_dim[i] = static_cast(in.getShape()[i]); } int32_t out_dim[8]; assert(rank_out <= 8); for (int i = 0; i < rank_out; i++) { - out_dim[i] = out.getShape()[i]; + out_dim[i] = static_cast(out.getShape()[i]); } int32_t axis[8]; int32_t resolved_axis[8]; // in case there are negative or duplicate indexes assert(rank_axis <= 8); for (int i = 0; i < rank_axis; i++) { - axis[i] = tmp_reduction_dims[i]; + axis[i] = static_cast(tmp_reduction_dims[i]); } float* temp_sum = new float[out_s.getNumElems()]; @@ -619,7 +619,7 @@ void sqrtFN(Tensor& out, const char* params, const Tensor& in) { void transpose(Tensor &out, const char *params, const Tensor &in) { TransposeParams transpose_params; - transpose_params.perm_count = deserializeT(params); + transpose_params.perm_count = static_cast(deserializeT(params)); for (int i = 0; i < transpose_params.perm_count; ++i) transpose_params.perm[i] = deserializeT(params); @@ -634,7 +634,7 @@ void transpose(Tensor &out, const char *params, const Tensor &in) { void gather(Tensor &out, const char *params, const Tensor &data, const Tensor &indices) { GatherParams gather_params; - gather_params.axis = deserializeT(params); + gather_params.axis = static_cast(deserializeT(params)); Shape out_s = deserializeShape(params); out.reshape(out_s);