@@ -172,7 +172,7 @@ def self_cpu_time_total(self):
172
172
def table (self , sort_by = None , row_limit = 100 , max_src_column_width = 75 , header = None , top_level_events_only = False ):
173
173
"""Prints an EventList as a nicely formatted table.
174
174
175
- Arguments :
175
+ Args :
176
176
sort_by (str, optional): Attribute used to sort entries. By default
177
177
they are printed in the same order as they were registered.
178
178
Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``,
@@ -203,7 +203,7 @@ def export_chrome_trace(self, path):
203
203
204
204
The checkpoint can be later loaded and inspected under ``chrome://tracing`` URL.
205
205
206
- Arguments :
206
+ Args :
207
207
path (str): Path where the trace will be written.
208
208
"""
209
209
import os
@@ -288,7 +288,7 @@ def export_stacks(self, path: str, metric: str):
288
288
def key_averages (self , group_by_input_shapes = False , group_by_stack_n = 0 ):
289
289
"""Averages all function events over their keys.
290
290
291
- Arguments :
291
+ Args :
292
292
group_by_input_shapes: group entries by
293
293
(event name, input shapes) rather than just event name.
294
294
This is useful to see which input shapes contribute to the runtime
@@ -345,7 +345,7 @@ class profile(object):
345
345
only report runtime of PyTorch functions.
346
346
Note: profiler is thread local and is automatically propagated into the async tasks
347
347
348
- Arguments :
348
+ Args :
349
349
enabled (bool, optional): Setting this to False makes this context manager a no-op.
350
350
Default: ``True``.
351
351
@@ -574,7 +574,7 @@ class record_function(ContextDecorator):
574
574
Python code (or function) when running autograd profiler. It is
575
575
useful when tracing the code profile.
576
576
577
- Arguments :
577
+ Args :
578
578
name (str): Label assigned to the block of code.
579
579
node_id (int): ID of node, for distributed profiling. Unset in
580
580
non-distributed cases.
@@ -628,7 +628,7 @@ def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]:
628
628
once to attach the callback onto the future, and will throw if called multiple
629
629
times.
630
630
631
- Arguments :
631
+ Args :
632
632
fut: (torch._C.Future): future for which to schedule
633
633
callback for.
634
634
@@ -666,7 +666,7 @@ class emit_nvtx(object):
666
666
This context manager should not be called recursively, i.e. at most one
667
667
instance should be enabled at any given time.
668
668
669
- Arguments :
669
+ Args :
670
670
enabled (bool, optional, default=True): Setting ``enabled=False`` makes this context manager a no-op.
671
671
Default: ``True``.
672
672
record_shapes (bool, optional, default=False): If ``record_shapes=True``, the nvtx range wrapping
@@ -761,7 +761,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
761
761
def load_nvprof (path ):
762
762
"""Opens an nvprof trace file and parses autograd annotations.
763
763
764
- Arguments :
764
+ Args :
765
765
path (str): path to nvprof trace
766
766
"""
767
767
return EventList (parse_nvprof_trace (path ))
0 commit comments