diff --git a/llm/llama3/xpu/_sources/index.md.txt b/llm/llama3/xpu/_sources/index.md.txt index 4b3429720..8cf6babda 100644 --- a/llm/llama3/xpu/_sources/index.md.txt +++ b/llm/llama3/xpu/_sources/index.md.txt @@ -51,7 +51,7 @@ pip install accelerate datasets diffusers git clone https://github.com/intel/intel-extension-for-pytorch.git cd intel-extension-for-pytorch git checkout dev/llama-int4 -cd intel-extension-for-pytorch/examples/gpu/inference/python/llm +cd examples/gpu/inference/python/llm ``` | Key args of run_generation_gpu_woq_for_llama.py | Notes | diff --git a/llm/llama3/xpu/genindex.html b/llm/llama3/xpu/genindex.html index c9b5c14a4..8ba6f073c 100644 --- a/llm/llama3/xpu/genindex.html +++ b/llm/llama3/xpu/genindex.html @@ -95,7 +95,7 @@
git clone https://github.com/intel/intel-extension-for-pytorch.git
cd intel-extension-for-pytorch
git checkout dev/llama-int4
-cd intel-extension-for-pytorch/examples/gpu/inference/python/llm
+cd examples/gpu/inference/python/llm