Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix intmm benchmark script #141

Merged
merged 12 commits into from
Apr 16, 2024
13 changes: 11 additions & 2 deletions benchmarks/intmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,21 @@
import csv
import itertools
import math
import sys
import pathlib

import torch
from torchao.quantization.utils import TORCH_VERSION_AFTER_2_4, TORCH_VERSION_AFTER_2_2


# Check if CUDA is available, if not, exit the script
if not torch.cuda.is_available():
print("CUDA is not available. Exiting the script.")
sys.exit(0)

import torch.nn.functional as F
import torch.utils.benchmark as benchmark
from torchao.kernel.intmm_triton import int_matmul, int_scaled_matmul
from torchao.kernel.intmm import int_matmul, int_scaled_matmul

torch._dynamo.config.cache_size_limit = 128
torch._dynamo.config.accumulated_cache_size_limit = 128
Expand Down Expand Up @@ -81,7 +90,7 @@ def run_benchmarks(shapes):

if __name__ == "__main__":
parser = argparse.ArgumentParser(description="integer matmul benchmarks")
parser.add_argument("file_path", type=str, help="Path to csv file with shapes")
parser.add_argument("--file_path", type=str, required=True, help="Path to csv file with shapes")
args = parser.parse_args()
# Access the file path provided as an argument
file_path = args.file_path
Expand Down
Loading