From: Animesh Jain Date: Wed, 8 Sep 2021 17:48:09 +0000 (-0700) Subject: [NNC] Add Softplus operator (#64589) X-Git-Tag: accepted/tizen/8.0/unified/20231005.095509~374 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=18d24bb537bbe567fdb9f7b268c51478987319b8;p=platform%2Fupstream%2Fpytorch.git [NNC] Add Softplus operator (#64589) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/64589 Adding softplus operator lowering for NNC. Enabling element wise fusion as well. Test Plan: Added a test in test_jit_fuser.py Reviewed By: bertmaher Differential Revision: D30736449 fbshipit-source-id: 6c5fc3bceb5cef2322ecd4449f827e4af018ea93 --- diff --git a/test/test_jit_fuser_te.py b/test/test_jit_fuser_te.py index a082ce5..b7830f9 100644 --- a/test/test_jit_fuser_te.py +++ b/test/test_jit_fuser_te.py @@ -1255,6 +1255,7 @@ class TestTEFuser(JitTestCase): F.hardtanh, F.hardsigmoid, F.hardswish, + F.softplus, torch.sqrt, torch.rsqrt, F.gelu, @@ -2015,6 +2016,7 @@ works_list = [ 'nn.functional.hardshrink', 'nn.functional.hardsigmoid', 'nn.functional.hardswish', + 'nn.functional.softplus', 'nn.functional.hardtanh', 'nn.functional.leaky_relu', 'nn.functional.relu', diff --git a/torch/csrc/jit/passes/tensorexpr_fuser.cpp b/torch/csrc/jit/passes/tensorexpr_fuser.cpp index 75305d6..b505ce8 100644 --- a/torch/csrc/jit/passes/tensorexpr_fuser.cpp +++ b/torch/csrc/jit/passes/tensorexpr_fuser.cpp @@ -140,6 +140,7 @@ const OperatorSet& supported_eltwise_set() { "aten::sigmoid(Tensor self) -> Tensor", "aten::relu(Tensor self) -> Tensor", "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor", + "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor", "aten::relu6(Tensor self) -> Tensor", "aten::gelu(Tensor self) -> Tensor", "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", diff --git a/torch/csrc/jit/tensorexpr/kernel.cpp b/torch/csrc/jit/tensorexpr/kernel.cpp index 8a8aee7..c5f7e99 100644 --- a/torch/csrc/jit/tensorexpr/kernel.cpp +++ b/torch/csrc/jit/tensorexpr/kernel.cpp @@ -734,6 +734,7 @@ std::vector TensorExprKernel::inferSizesForValue( case aten::hardtanh: case aten::hardsigmoid: case aten::hardswish: + case aten::softplus: case aten::sqrt: case aten::rsqrt: case aten::abs: @@ -2028,6 +2029,27 @@ Tensor tensorexpr::computeOperandValue( }); } break; + case aten::softplus: { + return computeThreeOperand( + "aten_softplus", + inputs, + outputShape, + outputType, + [](const ExprHandle& a, + const ExprHandle& beta, + const ExprHandle& threshold) { + auto beta_promoted = Cast::make(a.dtype(), beta); + auto threshold_promoted = Cast::make(a.dtype(), threshold); + auto beta_a = beta_promoted * a; + return CompareSelect::make( + beta_a, + threshold_promoted, + a, + log1p(exp(beta_a)) / beta_promoted, + kGT); + }); + } break; + case aten::hardsigmoid: { return computeOneOperand( "aten_hardsigmoid",