|| (md.data.format == bwd_weights_qvnni);
const int ndims = md.data.ndims;
- const int *dims = md.data.dims;
- const int *pdims = md.data.layout_desc.blocking.padding_dims;
- const int *optd = md.data.layout_desc.blocking.offset_padding_to_data;
+ const ptrdiff_t *dims = md.data.dims;
+ const ptrdiff_t *pdims = md.data.layout_desc.blocking.padding_dims;
+ const ptrdiff_t *optd = md.data.layout_desc.blocking.offset_padding_to_data;
auto *strides_block = md.data.layout_desc.blocking.strides[0];
auto *strides_within_block = md.data.layout_desc.blocking.strides[1];
const mkldnn::memory::desc src_d = src.get_primitive_desc().desc();
const int ndims = src_d.data.ndims;
- const int *dims = src_d.data.dims;
- const int *pdims = src_d.data.layout_desc.blocking.padding_dims;
+ const ptrdiff_t *dims = src_d.data.dims;
+ const ptrdiff_t *pdims = src_d.data.layout_desc.blocking.padding_dims;
size_t idx[MAX_NDIMS] = {}, str[MAX_NDIMS] = {};
size_t nelems = 1;
case f::nChw16c:
case f::oihw:
case f::hwio:
+ case f::iohw:
case f::oIhw8i:
case f::oIhw16i:
case f::OIhw8i8o:
case f::Ohwi8o:
case f::Ohwi16o:
case f::OhIw8o4i:
+ case f::OIhw4i16o4i_s8s8:
+ case f::OhIw8o4i_s8s8:
+ case f::OhIw8o32i:
+ case f::OhIw16o32i:
ndims = 4; break;
case f::ncdhw:
case f::ndhwc:
case f::oidhw:
case f::goihw:
case f::hwigo:
+ case f::giohw:
case f::oIdhw8i:
case f::oIdhw16i:
case f::OIdhw8i8o:
case f::gOhwi8o:
case f::Goihw8g:
case f::Goihw16g:
+ case f::gOhwi16o:
case f::gOIhw8i8o:
case f::gOIhw16i16o:
case f::gOIhw8i16o2i:
case f::gOIhw16o16i:
case f::gIOhw16o16i:
case f::gOhIw8o4i:
+ case f::Goihw16g_s8s8:
ndims = 5; break;
case f::gOIdhw8i8o:
case f::gOIdhw16i16o:
});
}
+int div_up(const int a, const int b) {
+ return (a + b - 1) / b;
+}
+
template <typename data_t>
static void compare_data(mkldnn::memory& ref, mkldnn::memory& dst,
- data_t threshold = (data_t)1e-4)
+ data_t threshold = (data_t)1e-4, bool isBinary = false)
{
using data_type = mkldnn::memory::data_type;
ASSERT_TRUE(data_traits<data_t>::data_type == data_type::f32 ||
- data_traits<data_t>::data_type == data_type::s32);
+ data_traits<data_t>::data_type == data_type::s32 ||
+ data_traits<data_t>::data_type == data_type::u8);
/* Note: size_t incompatible with MSVC++ */
auto ref_desc = ref.get_primitive_desc().desc();
ptrdiff_t num = 1;
for (auto d = 0; d < ndims; ++d) {
- num *= dims[d];
+ if (isBinary && d == 1) {
+ num *= div_up(dims[d], 8);
+ } else {
+ num *= dims[d];
+ }
}
data_t *ref_data = (data_t *)ref.get_data_handle();
data_t *dst_data = (data_t *)dst.get_data_handle();
mkldnn::impl::parallel_nd(num, [&](ptrdiff_t i) {
- data_t ref = ref_data[map_index(ref_desc, i)];
- data_t got = dst_data[map_index(dst_desc, i)];
+ int divider = isBinary ? 8 : 1;
+
+ data_t ref = ref_data[map_index(ref_desc, i) / divider];
+ data_t got = dst_data[map_index(dst_desc, i) / divider];
if (data_traits<data_t>::data_type == data_type::f32) {
data_t diff = got - ref;
data_t e = (std::abs(ref) > threshold) ? diff / ref : diff;
- EXPECT_NEAR(e, (data_t)0.0, threshold)
- << "Index: " << i << " Total: " << num;
+ EXPECT_NEAR(e, (data_t) 0.0, threshold)
+ << "Index: " << i << " Total: " << num;
} else {
EXPECT_EQ(ref, got) << "Index: " << i << " Total: " << num;
}
struct test_convolution_params_t {
const mkldnn::engine::kind engine_kind;
mkldnn::algorithm aalgorithm;
- const float relu_negative_slope;
test_convolution_formats_t formats;
test_convolution_attr_t attr;
test_convolution_sizes_t sizes;
struct test_convolution_params_t_3d {
const mkldnn::engine::kind engine_kind;
mkldnn::algorithm aalgorithm;
- const float relu_negative_slope;
test_convolution_formats_t formats;
test_convolution_attr_t attr;
test_convolution_sizes_t_3d sizes;
test_roi_pool_desc_t test_pd;
};
+struct test_binary_convolution_params_t {
+ const mkldnn::engine::kind engine_kind;
+ mkldnn::algorithm aalgorithm;
+ float pad_value;
+ mkldnn::algorithm eltwise_algorithm;
+ const float eltwise_alpha;
+ const float eltwise_beta;
+ mkldnn::algorithm depthwise_algorithm;
+ bool with_sum;
+ mkldnn::algorithm binarization_algorithm;
+ test_convolution_formats_t formats;
+ test_convolution_sizes_t sizes;
+};
+
+struct test_binary_convolution_dw_conv_params_t {
+ const mkldnn::engine::kind engine_kind;
+ mkldnn::algorithm aalgorithm;
+ mkldnn::algorithm eltwise_algorithm;
+ const float eltwise_alpha;
+ const float eltwise_beta;
+ mkldnn::algorithm depthwise_algorithm;
+ bool with_sum;
+ mkldnn::algorithm binarization_algorithm;
+ test_convolution_dw_conv_formats_t formats;
+ test_convolution_dw_conv_sizes_t sizes;
+};
+
std::ostream &operator<<(std::ostream &stream,
const roi_pool_test_params &tp)
{
}
template<typename F> bool catch_expected_failures(const F &f,
- bool expect_to_fail, mkldnn_status_t expected_status)
+ bool expect_to_fail, mkldnn_status_t expected_status, bool ignore_unimplemented = true)
{
try {
f();
// not match.
if (!(expect_to_fail) || e.status != (expected_status)) {
// Ignore unimplemented
- if (e.status == mkldnn_unimplemented)
+ if ( ignore_unimplemented && (e.status == mkldnn_unimplemented))
return true;
else
throw e;