void cappedRelu(Tensor &out, const char *params, const Tensor &in)
{
- // TODO call actual function
+ const float *input = in.getData();
+ Dims<4> input_d = shapeToDims(in.getShape());
+ float cap = deserializeT<float>(params);
+
+ out.reShape(in.getShape());
+
+ CappedRelu(input, input_d, cap, out.getData(), input_d);
}
void biasAdd(Tensor &out, const char *params, const Tensor &in)
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+inline void CappedRelu(const float* input_data, const Dims<4>& input_dims,
+ float cap, float* output_data, const Dims<4>& output_dims) {
+ const int flat_size = MatchingFlatSize(input_dims, output_dims);
+ for (int i = 0; i < flat_size; ++i) {
+ const float val = input_data[i];
+ const float lower = 0;
+ const float clamped = val > cap ? cap : val < lower ? lower : val;
+ output_data[i] = clamped;
+ }
+}