From 44b12ba8624ee9f0d5c77babc0d2c4b9012fc1b5 Mon Sep 17 00:00:00 2001 From: Kimish Patel Date: Fri, 13 Aug 2021 21:37:57 -0700 Subject: [PATCH] [Pytorch Profiler] Move start timestamp to end of start callback (#62191) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/62191 This moves start timestamping to end of callback. This way we dont account for callstack/module hierarchy related overhead in op runtime. Test Plan: CI Imported from OSS Reviewed By: ilia-cher Differential Revision: D29910519 fbshipit-source-id: f462031a81ae12b3db7993cf482e5ad93a35e096 --- torch/csrc/autograd/profiler_kineto.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch/csrc/autograd/profiler_kineto.cpp b/torch/csrc/autograd/profiler_kineto.cpp index 3b5b511..9995237 100644 --- a/torch/csrc/autograd/profiler_kineto.cpp +++ b/torch/csrc/autograd/profiler_kineto.cpp @@ -304,7 +304,6 @@ void pushProfilingCallbacks() { #endif // USE_KINETO auto ctx_ptr = std::make_unique(); - ctx_ptr->startUs = getTimeUs(); ctx_ptr->correlationId = corr_id; ctx_ptr->startThreadId = at::RecordFunction::currentThreadId(); @@ -337,6 +336,7 @@ void pushProfilingCallbacks() { ctx_ptr->module_hierarchy = jit::currentModuleHierarchy(); } #endif + ctx_ptr->startUs = getTimeUs(); if (config.state == ProfilerState::KINETO_GPU_FALLBACK) { try { cudaStubs()->record(nullptr, &ctx_ptr->cuda_event_start_, nullptr); -- 2.7.4