}
static void InferOutputSize64(
- const at::IntList& input_dims,
+ const at::IntArrayRef& input_dims,
const int output_channel,
const StorageOrder order,
const bool global_pooling,
return &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
}
- Tensor* OutputTensorCPU_(int idx, at::IntList dims, at::TensorOptions options) {
+ Tensor* OutputTensorCPU_(int idx, at::IntArrayRef dims, at::TensorOptions options) {
auto* t = &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
ReinitializeTensor(t, dims, options.device(CPU));
return t;
}
}
- Tensor* OutputTensorCPU_(int idx, at::IntList dims, at::TensorOptions options) {
+ Tensor* OutputTensorCPU_(int idx, at::IntArrayRef dims, at::TensorOptions options) {
if (dequantize_output_) {
return Output(idx, dims, options.device(CPU));
} else {