From: David Riazati Date: Sat, 30 Mar 2019 01:23:28 +0000 (-0700) Subject: Add hash() global (#18258) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~540 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7b0ef317806178b5c3e4135ef53b78f60f767991;p=platform%2Fupstream%2Fpytorch.git Add hash() global (#18258) Summary: This adds `hash()` which supports `int`, `str`, and `float`. It relies on `std::hash` which is implementation defined, so the result of `hash()` in TorchScript is not the same as in Python, but should satisfy the same properties. Pull Request resolved: https://github.com/pytorch/pytorch/pull/18258 Differential Revision: D14692317 Pulled By: driazati fbshipit-source-id: 909df5d024bb3feea157d5a203b7de53c72261c9 --- diff --git a/aten/src/ATen/core/interned_strings.h b/aten/src/ATen/core/interned_strings.h index 25783d1..dfc6905 100644 --- a/aten/src/ATen/core/interned_strings.h +++ b/aten/src/ATen/core/interned_strings.h @@ -98,6 +98,7 @@ namespace c10 { _(aten, _set_item) \ _(aten, index_put_) \ _(aten, device) \ + _(aten, hash) \ _(aten, len) \ _(aten, list) \ _(aten, wait) \ diff --git a/test/test_jit.py b/test/test_jit.py index 7641070..4599c96 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -9989,6 +9989,34 @@ a") self.checkScript(fn, (torch.ones(2, 4, 2), torch.ones(2, 4, 2))) + def test_hash(self): + def tester(fn, inputs): + for x in inputs: + for y in inputs: + if x == y: + self.assertEqual(fn(x), fn(y)) + else: + self.assertNotEqual(fn(x), fn(y)) + + @torch.jit.script + def int_hash(x): + # type: (int) -> int + return hash(x) + + @torch.jit.script + def float_hash(x): + # type: (float) -> int + return hash(x) + + @torch.jit.script + def str_hash(x): + # type: (str) -> int + return hash(x) + + tester(int_hash, (20, 21, 22)) + tester(float_hash, (20.0, 21.00001, 22.443)) + tester(str_hash, ("", "hello", "a")) + def test_mutable_dce(self): @torch.jit.script def foo(): diff --git a/torch/csrc/jit/register_prim_ops.cpp b/torch/csrc/jit/register_prim_ops.cpp index 9e22d24..d000db1 100644 --- a/torch/csrc/jit/register_prim_ops.cpp +++ b/torch/csrc/jit/register_prim_ops.cpp @@ -1552,6 +1552,14 @@ int dictGetDefault(Stack& stack) { return 0; } +template +int hashValue(Stack& stack) { + auto value = pop(stack); + auto hash = std::hash()(value.to()); + push(stack, int64_t(hash)); + return 0; +} + RegisterOperators reg2({ #define DEFINE_STRING_OP(op_name, string_op, result) \ @@ -1914,6 +1922,11 @@ RegisterOperators reg2({ CREATE_DICT_OPS("int"), CREATE_DICT_OPS("float"), #undef CREATE_DICT_OPS + + + Operator("aten::hash(str t) -> int", hashValue), + Operator("aten::hash(int t) -> int", hashValue), + Operator("aten::hash(float t) -> int", hashValue), }); // reference: _output_size in torch/nn/functional.py diff --git a/torch/csrc/jit/script/compiler.cpp b/torch/csrc/jit/script/compiler.cpp index 1a06161..f87e066 100644 --- a/torch/csrc/jit/script/compiler.cpp +++ b/torch/csrc/jit/script/compiler.cpp @@ -399,6 +399,7 @@ struct Environment { {"_to_tensor", std::make_shared(TensorType::get(), prim::NumToTensor)}, {"len", std::make_shared(aten::len, at::nullopt)}, + {"hash", std::make_shared(aten::hash, at::nullopt)}, {"min", std::make_shared(prim::min, at::nullopt)}, {"max", std::make_shared(prim::max, at::nullopt)}, {"list", std::make_shared(aten::list, at::nullopt)},