#include <ATen/TensorUtils.h>
#include <THC/THCAtomics.cuh>
#include <ATen/cuda/CUDAContext.h>
+#include <c10/macros/Macros.h>
#include <math.h>
// Threads per block for our apply kernel
// FIXME: use occupancy calculator instead
-#define AT_APPLY_THREADS_PER_BLOCK 32 * 16
-#define AT_APPLY_BLOCKS_PER_SM 4
+constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
+constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
// The `remaining_steps` argument is used to support Op that operates on
// multiple elements at the same time. Generally, the strategy of ApplyOpN is to
int ADims,
int step>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
+C10_LAUNCH_BOUNDS(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void kernelPointwiseApply1(detail::TensorInfo<scalar, IndexType> a,
IndexType totalElements, const Op op) {
int ADims, int BDims,
int step>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
+C10_LAUNCH_BOUNDS(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernelPointwiseApply2(detail::TensorInfo<scalar1, IndexType> a,
int ADims, int BDims, int CDims,
int step>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
+C10_LAUNCH_BOUNDS(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernelPointwiseApply3(detail::TensorInfo<scalar1, IndexType> a,
int ADims, int BDims, int CDims, int DDims,
int step>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
+C10_LAUNCH_BOUNDS(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernelPointwiseApply4(detail::TensorInfo<scalar1, IndexType> a,
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
+#include <c10/macros/Macros.h>
#include <curand_kernel.h>
#include <THC/THCGeneral.h>
typename IndexType,
int ADims>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(256,8)
+C10_LAUNCH_BOUNDS(256, 8)
#endif
__global__ void
fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a,
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
+#include <c10/macros/Macros.h>
namespace at { namespace native {
}
template <typename scalar_t>
- __launch_bounds__(1024)
+ C10_LAUNCH_BOUNDS(1024)
__global__ void grid_sampler_2d_kernel(
const int nthreads,
TensorInfo<scalar_t, int> input,
}
template <typename scalar_t>
- __launch_bounds__(1024)
+ C10_LAUNCH_BOUNDS(1024)
__global__ void grid_sampler_3d_kernel(
const int nthreads,
TensorInfo<scalar_t, int> input,
}
template <typename scalar_t>
- __launch_bounds__(1024)
+ C10_LAUNCH_BOUNDS(1024)
__global__ void grid_sampler_2d_backward_kernel(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
}
template <typename scalar_t>
- __launch_bounds__(1024)
+ C10_LAUNCH_BOUNDS(1024)
__global__ void grid_sampler_3d_backward_kernel(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/detail/FunctionTraits.h>
#include <ATen/native/TensorIterator.h>
-
+#include <c10/macros/Macros.h>
// Marks a lambda as executable on both the host and device. The __host__
// attribute is important so that we can access static type information from
namespace at { namespace native {
template<int nt, int vt, typename func_t>
-__launch_bounds__(nt, 4)
+C10_LAUNCH_BOUNDS(nt, 4)
__global__ void elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
#include <ATen/TensorUtils.h>
#include <c10/util/Exception.h>
+#include <c10/macros/Macros.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
template<typename scalar_t, typename target_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
-__launch_bounds__((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
+C10_LAUNCH_BOUNDS((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
// alpha kernel above. (As mentioned above, it might make sense do the calculation in the alpha kernel.)
template<typename scalar_t, typename target_t>
__global__ void
-__launch_bounds__((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
+C10_LAUNCH_BOUNDS((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
ctc_loss_backward_log_beta_gpu_kernel(scalar_t* __restrict__ log_beta_data,
const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length,
template<typename scalar_t, typename target_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
-__launch_bounds__((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
+C10_LAUNCH_BOUNDS((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_backward_collect_nonblank_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
template<typename scalar_t, typename target_t>
__global__ void
#if defined (__HIP_PLATFORM_HCC__)
-__launch_bounds__((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
+C10_LAUNCH_BOUNDS((std::is_same<scalar_t, float>::value ? 1024 : 896), 1)
#endif
ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
+#include <c10/macros/Macros.h>
namespace at { namespace native {
template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(32 * 16, 4)
+C10_LAUNCH_BOUNDS(512, 4)
#endif
__global__ void lstm_cell_forward(
TensorInfo<scalar_t, index_type> input,
template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(32 * 16, 4)
+C10_LAUNCH_BOUNDS(512, 4)
#endif
__global__ void lstm_cell_backward(
TensorInfo<scalar_t, index_type> storage,
template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(32 * 16, 4)
+C10_LAUNCH_BOUNDS(512, 4)
#endif
__global__ void gru_cell_forward(
TensorInfo<scalar_t, index_type> Input,
template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(32 * 16, 4)
+C10_LAUNCH_BOUNDS(512, 4)
#endif
__global__ void gru_cell_backward(
TensorInfo<scalar_t, index_type> gradInInput,
#include <THC/THCGeneral.hpp>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
+#include <c10/macros/Macros.h>
#include <functional>
#include <iosfwd>
#include <tuple>
std::ostream& operator<<(std::ostream& out, const ReduceConfig& config);
template<int nt, typename R>
-__launch_bounds__(nt, 4)
+C10_LAUNCH_BOUNDS(nt, 4)
__global__ void reduce_kernel(R reduction) {
reduction.run();
}
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
+#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
-#define AT_APPLY_THREADS_PER_BLOCK 32 * 16
-#define AT_APPLY_BLOCKS_PER_SM 4
+constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
+constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
+C10_LAUNCH_BOUNDS(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
#include <THC/THCTensorTypeUtils.cuh>
#include <THC/THCReduceApplyUtils.cuh>
#include <THC/THCNumerics.cuh>
+#include <c10/macros/Macros.h>
// Threads per thread block
#define THC_NONCONTIG_REDUCE_BLOCK_SIZE 32 * 16
typename FinalizeOp,
int ADims, int BDims>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(32 * 16, 4)
+C10_LAUNCH_BOUNDS(512, 4)
#endif
__global__ void kernelReduceNoncontigDim_shared
(TensorInfo<T, IndexType> out,
typename FinalizeOp,
int ADims, int BDims>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(32 * 16, 4)
+C10_LAUNCH_BOUNDS(512, 4)
#endif
__global__ void
kernelReduceNoncontigDim(TensorInfo<T, IndexType> out,
//
#include <THC/THCReduceApplyUtils.cuh>
+#include <c10/macros/Macros.h>
// Size per each reduction block
#define THC_REDUCE_ALL_BLOCK_SIZE 1024L
int ADims>
__global__ void
#if defined(__HIP_PLATFORM_HCC__)
-__launch_bounds__(THC_REDUCE_ALL_BLOCK_SIZE)
+C10_LAUNCH_BOUNDS(THC_REDUCE_ALL_BLOCK_SIZE)
#endif
kernelReduceAll(TensorInfo<T, IndexType> in,
IndexType totalElements,
#include <THC/THCReduceApplyUtils.cuh>
#include <THC/THCTensorTypeUtils.cuh>
#include <THC/THCNumerics.cuh>
+#include <c10/macros/Macros.h>
// Collection of kernel sort routines
template <typename T>
template <typename K, typename V,
int KeyDims, int ValueDims,
typename Comparator, typename IndexType, int Power2SortSize>
-__launch_bounds__(1024)
+C10_LAUNCH_BOUNDS(1024)
__global__ void
bitonicSortKVInPlace(TensorInfo<K, IndexType> keys,
IndexType keySlices,
#include <THC/THCReduceApplyUtils.cuh>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
+#include <c10/macros/Macros.h>
#include <thrust/functional.h>
template <typename Dtype, typename Acctype>
#if defined(__HIP_PLATFORM_HCC__)
-__launch_bounds__(MULTILABELMARGIN_THREADS)
+C10_LAUNCH_BOUNDS(MULTILABELMARGIN_THREADS)
#endif
__global__ void cunn_MultiLabelMarginCriterion_updateOutput_kernel(Dtype *output,
Dtype *input,
template <typename Dtype, typename Acctype>
#if defined(__HIP_PLATFORM_HCC__)
-__launch_bounds__(MULTILABELMARGIN_THREADS)
+C10_LAUNCH_BOUNDS(MULTILABELMARGIN_THREADS)
#endif
__global__ void cunn_MultiLabelMarginCriterion_updateGradInput_kernel(Dtype *gradInput,
Dtype *gradOutput,
#include <THC/THCDeviceTensorUtils.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCApply.cuh>
+#include <c10/macros/Macros.h>
#include <thrust/functional.h>
template <typename T, typename AccumT>
#if defined(__HIP_PLATFORM_HCC__)
-__launch_bounds__(1024)
+C10_LAUNCH_BOUNDS(1024)
#endif
__global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel(
T *output,
#include <THC/THCTensor.hpp>
#include <THC/THCStorage.hpp>
#include <THCUNN/common.h>
+#include <c10/macros/Macros.h>
template <typename Dtype, typename Acctype>
__global__ void
#if __CUDA_ARCH__ >= 320 || defined __HIP_PLATFORM_HCC__
-__launch_bounds__(CUDA_NUM_THREADS)
+C10_LAUNCH_BOUNDS(CUDA_NUM_THREADS)
#endif
LRNFillScale(const int nthreads, const Dtype* const in,
const int num, const int channels, const int height,
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCNumerics.cuh>
#include <THCUNN/common.h>
+#include <c10/macros/Macros.h>
// kernels borrowed from Caffe
template <typename Dtype, typename AccType>
const int BACKWARD_THREADS = 256;
template <typename Dtype, typename AccType>
-__launch_bounds__(BACKWARD_THREADS,2048/BACKWARD_THREADS)
+C10_LAUNCH_BOUNDS(BACKWARD_THREADS, 8)
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int64_t* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
#include <THCUNN/common.h>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
+#include <c10/macros/Macros.h>
// Kernel for fast unfold+copy
// Borrowed from Theano
// Authors: Arjun Jain, Frédéric Bastien, Jan Schlüter, Nicolas Ballas
template <typename Dtype>
-__global__ void __launch_bounds__(CUDA_NUM_THREADS) // ensure that at least 1 block can be resident
+__global__ void C10_LAUNCH_BOUNDS(CUDA_NUM_THREADS) // ensure that at least 1 block can be resident
im3d2col_kernel(const int64_t n, const Dtype* data_im,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t kernel_h, const int64_t kernel_w, const int64_t kernel_d,
}
template <typename Dtype, typename Acctype>
-__global__ void __launch_bounds__(CUDA_NUM_THREADS) // ensure that at least 1 block can be resident
+__global__ void C10_LAUNCH_BOUNDS(CUDA_NUM_THREADS) // ensure that at least 1 block can be resident
col2im3d_kernel(const int64_t n, const Dtype* data_col,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t channels,
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCAtomics.cuh>
+#include <c10/macros/Macros.h>
template<typename Dtype, typename Acctype>
-__launch_bounds__(1024)
+C10_LAUNCH_BOUNDS(1024)
__global__ void caffe_gpu_interp2_kernel(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
const THCDeviceTensor<Dtype, 5> data1, THCDeviceTensor<Dtype, 5> data2) {
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, typename Acctype>
-__launch_bounds__(1024)
+C10_LAUNCH_BOUNDS(1024)
__global__ void caffe_gpu_interp2_kernel_backward(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
THCDeviceTensor<Dtype, 5> data1, const THCDeviceTensor<Dtype, 5> data2){
#include <THCUNN/common.h>
#include <THC/THCNumerics.cuh>
+#include <c10/macros/Macros.h>
// Kernel for fast unfold+copy
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu)
template <typename Dtype>
-__launch_bounds__(CUDA_NUM_THREADS)
+C10_LAUNCH_BOUNDS(CUDA_NUM_THREADS)
__global__ void im2col_kernel(const int64_t n, const Dtype* data_im,
const int64_t height, const int64_t width,
const int64_t ksize_h, const int64_t ksize_w,
}
template <typename Dtype, typename Acctype>
-__launch_bounds__(CUDA_NUM_THREADS)
+C10_LAUNCH_BOUNDS(CUDA_NUM_THREADS)
__global__ void col2im_kernel(const int64_t n, const Dtype* data_col,
const int64_t height, const int64_t width, const int64_t channels,
const int64_t kernel_h, const int64_t kernel_w,
#define C10_HOST_DEVICE __host__ __device__
#define C10_DEVICE __device__
#define C10_HOST __host__
+// constants from (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications)
+// The maximum number of threads per multiprocessor is 1024 for Turing architecture (7.5)
+// but 2048 for previous architectures. You'll get warnings if you exceed these constants.
+// Hence, the following macros adjust the input values from the user to resolve potential warnings.
+#if __CUDA_ARCH__ >= 750
+constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024;
+#else
+constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048;
+#endif
+// CUDA_MAX_THREADS_PER_BLOCK is same for all architectures currently
+constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024;
+// CUDA_THREADS_PER_BLOCK_FALLBACK is the "canonical fallback" choice of block size.
+// 256 is a good number for this fallback and should give good occupancy and
+// versatility across all architectures.
+constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256;
+// NOTE: if you are thinking of constexpr-ify the inputs to launch bounds, it
+// turns out that although __launch_bounds__ can take constexpr, it
+// can't take a constexpr that has anything to do with templates.
+// Currently we use launch_bounds that depend on template arguments in
+// Loops.cuh, Reduce.cuh and LossCTC.cuh. Hence, C10_MAX_THREADS_PER_BLOCK and
+// C10_MIN_BLOCKS_PER_SM are kept as macros.
+// Suppose you were planning to write __launch_bounds__(a, b), based on your performance tuning on a modern GPU.
+// Instead, you should write __launch_bounds__(C10_MAX_THREADS_PER_BLOCK(a), C10_MIN_BLOCKS_PER_SM(a, b)),
+// which will also properly respect limits on old architectures.
+#define C10_MAX_THREADS_PER_BLOCK(val) (((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) : CUDA_THREADS_PER_BLOCK_FALLBACK)
+#define C10_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) ((((threads_per_block)*(blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) ? (blocks_per_sm) : ((CUDA_MAX_THREADS_PER_SM + (threads_per_block) - 1) / (threads_per_block))))
+// C10_LAUNCH_BOUNDS is analogous to __launch_bounds__
+// https://stackoverflow.com/a/8814003 snippet to have macro with an optional argument
+#define C10_LAUNCH_BOUNDS_0 __launch_bounds__(256, 4) // default launch bounds that should give good occupancy and versatility across all architectures.
+#define C10_LAUNCH_BOUNDS_1(max_threads_per_block) __launch_bounds__((C10_MAX_THREADS_PER_BLOCK((max_threads_per_block))))
+#define C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) __launch_bounds__((C10_MAX_THREADS_PER_BLOCK((max_threads_per_block))), (C10_MIN_BLOCKS_PER_SM((max_threads_per_block), (min_blocks_per_sm))))
+#define C10_LAUNCH_BOUNDS_X(x,max_threads_per_block,min_blocks_per_sm,FUNC, ...) FUNC
+#define C10_LAUNCH_BOUNDS(...) C10_LAUNCH_BOUNDS_X(,##__VA_ARGS__, C10_LAUNCH_BOUNDS_2(__VA_ARGS__), C10_LAUNCH_BOUNDS_1(__VA_ARGS__), C10_LAUNCH_BOUNDS_0(__VA_ARGS__))
#else
#define C10_HOST_DEVICE
#define C10_HOST
cudaGetLastError();
Py_RETURN_FALSE;
}
- return PyBool_FromLong(attr.memoryType == cudaMemoryTypeHost);
+ #if CUDA_VERSION >= 10000
+ return PyBool_FromLong(attr.type == cudaMemoryTypeHost);
+ #else
+ return PyBool_FromLong(attr.memoryType == cudaMemoryTypeHost);
+ #endif
#else
Py_RETURN_FALSE;
#endif