From 3bd69d30204df7d5d2f1254247c3e917aca8ce93 Mon Sep 17 00:00:00 2001 From: Wenliang Zhao Date: Tue, 7 Sep 2021 14:09:31 -0700 Subject: [PATCH] add bubdle input into AIBench (#64557) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/64557 MaskRCNN speed depends on how many people detected in the detection stage. A random input from dataloader doesn't satisfy this. In order to standardize the benchmarking, we use 2 standard image for benchmarking, 2/3 people. Test Plan: AIBench result: https://www.internalfb.com/intern/aibench/details/945883114818980 Reviewed By: axitkhurana Differential Revision: D30446049 fbshipit-source-id: a2826fdb69e9f840c0afc566c4cbbcde1c2fba89 --- torch/utils/bundled_inputs.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/torch/utils/bundled_inputs.py b/torch/utils/bundled_inputs.py index 8a6d466..e074aae 100644 --- a/torch/utils/bundled_inputs.py +++ b/torch/utils/bundled_inputs.py @@ -127,6 +127,7 @@ def augment_model_with_bundled_inputs( inputs: Optional[Sequence[Tuple[Any, ...]]] = None, _receive_inflate_expr: Optional[List[str]] = None, # For debugging. info: Optional[List[str]] = None, # Optional argument to provide info about forward or its inputs + skip_size_check=False, ) -> None: """ Add bundled sample inputs to a model for the forward function. @@ -172,6 +173,7 @@ def augment_model_with_bundled_inputs( inputs={forward : inputs}, _receive_inflate_expr=_receive_inflate_expr, info={forward : info} if info else None, + skip_size_check=skip_size_check, ) @@ -180,6 +182,7 @@ def augment_many_model_functions_with_bundled_inputs( inputs: Dict[Callable, Optional[Sequence[Tuple[Any, ...]]]], _receive_inflate_expr: Optional[List[str]] = None, # For debugging. info: Optional[Dict[Callable, List[str]]] = None, # Optional argument to provide info about the function or its inputs + skip_size_check=False, ) -> None: """Add bundled sample inputs to a model for an arbitrary list of public functions. @@ -289,6 +292,7 @@ def augment_many_model_functions_with_bundled_inputs( arg, f"deflated[{inp_idx}][{arg_idx}]", inflate_helper_fn_name, + skip_size_check=skip_size_check, ) deflated_args.append(deflated) parts.append(f" {inflater},") @@ -354,7 +358,7 @@ def augment_many_model_functions_with_bundled_inputs( """.format(template=get_bundled_inputs_functions_and_info_template))) def _inflate_expr( - arg: T, ref: str, inflate_helper_fn_name: str + arg: T, ref: str, inflate_helper_fn_name: str, skip_size_check: bool = False ) -> Tuple[Union[T, torch.Tensor], str, Optional[str]]: # Allow custom inflation expressions any object. # For example, calling custom image-decoding ops. @@ -379,7 +383,7 @@ def _inflate_expr( if isinstance(arg, torch.Tensor): # Small-storage tensors can just be saved directly. - if arg.storage().size() <= MAX_RAW_TENSOR_SIZE: + if arg.storage().size() <= MAX_RAW_TENSOR_SIZE or skip_size_check: return arg, ref, None # Small contiguous tensors can be cloned to have small storage. # TODO: Should we do this even for non-contiguous tensors? -- 2.7.4