#include <vector>
#include <iostream>
+#include <cmath>
#include <runtime/pwl.h>
#include <gna_slope_scale.h>
y_upper = tmp;
}
- int64_t x_lower_new = FLOAT_TO_INT32((x_lower / in_scale) / abs(pow_scale) * in_scale);
- int64_t x_upper_new = FLOAT_TO_INT32((x_upper / in_scale) / abs(pow_scale) * in_scale);
+ int64_t x_lower_new = FLOAT_TO_INT32((x_lower / in_scale) / std::fabs(pow_scale) * in_scale);
+ int64_t x_upper_new = FLOAT_TO_INT32((x_upper / in_scale) / std::fabs(pow_scale) * in_scale);
x_lower = static_cast<int32_t>(x_lower_new);
x_upper = static_cast<int32_t>(x_upper_new);
if (x_lower_new < INT32_MIN) {
- int16_t offset_lower = abs(x_lower_new - INT32_MIN) / in_scale * out_scale;
+ int16_t offset_lower = std::abs(x_lower_new - INT32_MIN) / in_scale * out_scale;
x_lower = INT32_MIN;
y_lower = y_lower + offset_lower;
}
#include <cstdint>
-#define FLOAT_TO_INT16(a) static_cast<int16_t>(((a) < 0)?((a) - 0.5):((a) + 0.5))
-#define FLOAT_TO_INT32(a) static_cast<int32_t>(((a) < 0)?((a)-0.5):((a)+0.5))
+#define FLOAT_TO_INT16(a) static_cast<int16_t>(((a) < 0)?((a) - 0.5f):((a) + 0.5f))
+#define FLOAT_TO_INT32(a) static_cast<int32_t>(((a) < 0)?((a)-0.5f):((a)+0.5f))
void * alloc(size_t size) noexcept override {
return ptr;
}
- virtual bool free(void* handle) noexcept {
+ bool free(void* handle) noexcept override {
return true;
}
- virtual void Release() noexcept {
+ void Release() noexcept override {
delete this;
}
};
};
}
- JitConstants GetJitConstants(const eltwise_params& params) const;
+ JitConstants GetJitConstants(const eltwise_params& params) const override;
protected:
bool Validate(const Params& p, const optional_params& o) const override;
public:
ReduceKernel_b_fs_yx_fsv16() : ReduceKernelBase("reduce_gpu_b_fs_yx_fsv16") {}
virtual ~ReduceKernel_b_fs_yx_fsv16() {}
- virtual CommonDispatchData SetDefault(const reduce_params& params, const optional_params&) const;
+ CommonDispatchData SetDefault(const reduce_params& params, const optional_params&) const override;
JitConstants GetJitConstants(const reduce_params& params) const override;
KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
public:
ReduceKernelRef() : ReduceKernelBase("reduce_ref") {}
virtual ~ReduceKernelRef() {}
- virtual CommonDispatchData SetDefault(const reduce_params& params, const optional_params&) const;
+ CommonDispatchData SetDefault(const reduce_params& params, const optional_params&) const override;
KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
JitConstants GetJitConstants(const reduce_params& params) const override;
protected:
virtual CommonDispatchData SetDefault(const space_to_depth_params& params, const optional_params&) const;
virtual JitConstants GetJitConstants(const space_to_depth_params& params) const;
- virtual bool Validate(const Params& p, const optional_params& o) const;
+ bool Validate(const Params& p, const optional_params& o) const override;
std::vector<FusedOpType> GetSupportedFusedOps() const override {
return { FusedOpType::ELTWISE,
FusedOpType::QUANTIZE,
}
std::shared_ptr<gpu_toolkit> get_context() const { return _ctx; }
- cl::Event get() { return _event; }
+ cl::Event get() override { return _event; }
private:
std::shared_ptr<gpu_toolkit> _ctx;
_attached = true;
}
- cl::Event get() { return _last_ocl_event; }
+ cl::Event get() override { return _last_ocl_event; }
std::shared_ptr<gpu_toolkit> get_context() const { return _ctx; }
private: