reinterpret_cast<float*>(output.buffer),
outShape);
} else if (input.type == OperandType::TENSOR_QUANT8_ASYMM) {
+#if 0 // REF-ANN We don't support depthwiseConvQuant8 yet
success = depthwiseConvPrepare(input.shape(), filter.shape(), bias.shape(),
padding_left, padding_right,
padding_top, padding_bottom,
depth_multiplier, activation,
reinterpret_cast<uint8_t*>(output.buffer),
outShape);
+#else // REF-ANN
+ LOG(ERROR) << getOperationName(operation.type) << " failed.";
+ NYI("We dont' support TENSOR_QUANT8_ASYMM yet.");
+#endif // REF-ANN
}
} break;
int32_t stride_width, int32_t stride_height,
int32_t depth_multiplier, int32_t activation,
float* outputData, const Shape& outputShape);
+#if 0 // REF-ANN We don't support depthwiseConvQuant8 yet
bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
const uint8_t* filterData, const Shape& filterShape,
const int32_t* biasData, const Shape& biasShape,
int32_t stride_width, int32_t stride_height,
int32_t depth_multiplier, int32_t activation,
uint8_t* outputData, const Shape& outputShape);
+#endif // REF-ANN
bool convFloat32(const float* inputData, const Shape& inputShape,
const float* filterData, const Shape& filterShape,
#include "OperationsUtils.h"
#include "internal/optimized/depthwiseconv_float.h"
+#if 0 // REF-ANN We don't support depthwiseConvQuant8 yet
#include "internal/optimized/depthwiseconv_uint8.h"
+#endif
namespace android {
namespace nn {
}
+#if 0 // REF-ANN We don't support depthwiseConvQuant8 yet
bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
const uint8_t* filterData, const Shape& filterShape,
const int32_t* biasData, const Shape& biasShape,
return true;
}
+#endif // REF-ANN
#undef ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
} // namespace nn