Skip to content

Commit

Permalink
Remove benchmark of inductor internal launch latency
Browse files Browse the repository at this point in the history
Summary:
This launch path apparently changes a lot (see D56642231,
pytorch/pytorch#124592).  A regression will show up in
other benchmarks, so let's just remove this one to save the maintenance hassle.

Reviewed By: masnesral

Differential Revision: D56671375

fbshipit-source-id: 509a39544f7750e0197da0c1cbaec307cba9cd75
  • Loading branch information
bertmaher authored and facebook-github-bot committed Apr 29, 2024
1 parent fc0c752 commit 60ed63b
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 69 deletions.
51 changes: 0 additions & 51 deletions torchbenchmark/operators/launch_latency/async_compilation.py

This file was deleted.

18 changes: 0 additions & 18 deletions torchbenchmark/operators/launch_latency/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,8 @@
register_benchmark,
register_metric,
)

from .async_compilation import inductor_nop, inductor_nop_args
from .kernels import nop_kernel, nop_with_args_kernel, trivial_add_kernel

try:
from torch._inductor.runtime import triton_heuristics
except ImportError:
# TODO(jansel): delete this case once D56408511 lands
from torch._inductor import triton_heuristics


class Operator(BenchmarkOperator):
DEFAULT_METRICS = ["walltime"]
Expand Down Expand Up @@ -59,16 +51,6 @@ def nop_triton_compiled_kernel_run(self, *args):
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, function, None, None, metadata, *args
)

@register_benchmark()
def nop_inductor_kernel_run(self, *args):
stream = get_raw_stream(0)
grid = triton_heuristics.grid(1)

if len(args) == 0:
return lambda: inductor_nop.run(1, grid=grid, stream=stream)
args = args[:-5]
return lambda: inductor_nop_args.run(*args, grid=grid, stream=stream)

@register_benchmark()
def nop_inductor_kernel(self, *args):
return lambda: trivial_add_kernel(*args)
Expand Down

0 comments on commit 60ed63b

Please sign in to comment.