Skip to content
This repository was archived by the owner on Jul 24, 2024. It is now read-only.

Commit e40d183

Browse files
authored
Changed ipex from ipex-xpu -> ipex-cpu (#90)
1 parent 28b0e5d commit e40d183

File tree

4 files changed

+33
-8
lines changed

4 files changed

+33
-8
lines changed

dl_bench/llm.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def get_llm(name, dtype):
2626

2727
kwargs = {}
2828
if name.startswith("llama2") and "HF_TOKEN" in os.environ:
29-
kwargs = {"HF_TOKEN": os.environ.get("HF_TOKEN")}
29+
kwargs = {"token": os.environ.get("HF_TOKEN")}
3030

3131
model_name, M, T = name2params[name]
3232

@@ -75,14 +75,15 @@ def inference(self, backend):
7575
# self.flops_per_sample = get_macs(self.model, self.in_shape, backend) * 2
7676
self.model = backend.prepare_eval_transformer(self.model)
7777

78-
self.model.eval()
7978
enabled = backend.dtype != torch.float32
8079

8180
n_items = 0
8281
outputs = []
8382
fw_times = []
8483

85-
self.model.eval()
84+
85+
# Ipex gives error with eval, other backends have no effect
86+
# self.model.eval()
8687
for i in range(self.n_iter):
8788
print(f"Epoch {i+1}/{self.n_iter}")
8889
cast = torch.autocast(enabled=enabled, device_type=backend.device_name)

dl_bench/utils.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ def prepare_eval_transformer(self, model):
132132
model = model.to(memory_format=torch.channels_last)
133133

134134
model.to(self.device)
135-
with torch.inference_mode():
135+
with torch.no_grad():
136136
model.eval()
137137
return self._compile_transformer_model(
138138
self.compile_mode, model, dtype=self.dtype
@@ -160,7 +160,9 @@ def _compile_transformer_model(compile_mode, model, dtype=torch.bfloat16):
160160
import intel_extension_for_pytorch as ipex
161161

162162
params = {} if dtype != torch.bfloat16 else {"dtype": torch.bfloat16}
163-
compiled_model = ipex.optimize_transformers(model, **params)
163+
#compiled_model = ipex.llm.optimize(model, **params, inplace=True, deployment_mode=True)
164+
compiled_model = ipex.llm.optimize(model, **params)
165+
# compiled_model = ipex.optimize_transformers(model, **params)
164166
print("Compiled with ipex")
165167
elif compile_mode == "ipex_onednn_graph":
166168
raise NotImplementedError()

tests/conda-envs/ipex-xpu.yaml

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
name: ipex
2+
channels:
3+
- intel
4+
- conda-forge
5+
dependencies:
6+
- intel-aikit-pytorch
7+
- pytorch>=2.0.1=*_xpu_*
8+
- intel-extension-for-pytorch
9+
- datasets
10+
- accelerate
11+
- sentencepiece
12+
# The following packages are required to run benchmarks
13+
- sqlalchemy>=2.0.0
14+
- pytest

tests/conda-envs/ipex.yaml

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,20 @@ channels:
33
- intel
44
- conda-forge
55
dependencies:
6-
- intel-aikit-pytorch
7-
- pytorch>=2.0.1=*_xpu_*
8-
- intel-extension-for-pytorch
6+
- python=3.11
97
- datasets
108
- accelerate
119
- sentencepiece
1210
# The following packages are required to run benchmarks
1311
- sqlalchemy>=2.0.0
1412
- pytest
13+
- pip
14+
- pip:
15+
- --extra-index-url https://download.pytorch.org/whl/cpu
16+
- torch
17+
- torchvision
18+
- torchaudio
19+
- transformers==4.35.2
20+
- intel-extension-for-pytorch
21+
- --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
22+
- oneccl_bind_pt

0 commit comments

Comments
 (0)