From: Yinghai Lu Date: Thu, 9 Sep 2021 07:58:39 +0000 (-0700) Subject: Fix lop1p lowering bug (#64724) X-Git-Tag: accepted/tizen/8.0/unified/20231005.095509~348 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=233e3e5bb499b97e0a68ba93b6928c2e96096777;p=platform%2Fupstream%2Fpytorch.git Fix lop1p lowering bug (#64724) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/64724 `1` will introduce a int tensor instead of float tensor, which doesn't work well with downstream operators (elementwise). Error would be like ``` [TensorRT] WARNING: IElementWiseLayer with inputs (Unnamed Layer* 1) [Unary]_output and (Unnamed Layer* 2) [Constant]_output: first input has type Float but second input has type Int32. ``` Changing the constant to be float type fixes this. Reviewed By: 842974287 Differential Revision: D30796959 fbshipit-source-id: 0538e4dd960df9ce87a2d4cafe8f1a0c061b6bad --- diff --git a/torch/fx/experimental/fx_acc/acc_ops.py b/torch/fx/experimental/fx_acc/acc_ops.py index ccd8ec8..22de9ee 100644 --- a/torch/fx/experimental/fx_acc/acc_ops.py +++ b/torch/fx/experimental/fx_acc/acc_ops.py @@ -529,7 +529,7 @@ def relu(*, input, inplace=False): ) def torch_log1p_mapper(node: torch.fx.Node, _: torch.nn.Module) -> torch.fx.Node: with node.graph.inserting_before(node): - add_kwargs = {"input": node.kwargs["input"], "other": 1} + add_kwargs = {"input": node.kwargs["input"], "other": 1.0} add_node = node.graph.call_function(add, kwargs=add_kwargs) add_node.meta = node.meta.copy() log_kwargs = {"input": add_node}