Fix typo embedding_renorm_cuda_ (#64542)
authorHojin Lee <leo.lab001@gmail.com>
Wed, 8 Sep 2021 16:33:23 +0000 (09:33 -0700)
committerFacebook GitHub Bot <facebook-github-bot@users.noreply.github.com>
Wed, 8 Sep 2021 16:36:24 +0000 (09:36 -0700)
Summary:
Fixes #{issue number}

cc ezyang albanD zou3519 gqchen pearu nikitaved soulitzer Lezcano Varal7 ngimel

Pull Request resolved: https://github.com/pytorch/pytorch/pull/64542

Reviewed By: mrshenli

Differential Revision: D30792842

Pulled By: ngimel

fbshipit-source-id: c9a548256d02b3ce6fb77dd9fb058084f2c91608

aten/src/ATen/native/cuda/Embedding.cu

index ba79fa1..155b389 100644 (file)
@@ -318,7 +318,7 @@ Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
     dim3 block = 128;
     int dim = self.stride(0);
 
-    AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_backward", [&] {
+    AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_renorm_cuda_", [&] {
       using accscalar_t = acc_type<scalar_t, true>;
       renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
         self.data_ptr<scalar_t>(),