Skip to content

Commit

Permalink
Merge branch 'habana-main' into 2.3.0
Browse files Browse the repository at this point in the history
  • Loading branch information
yuanwu2017 authored Oct 23, 2024
2 parents 67ee45a + c5e3881 commit 8686a0f
Show file tree
Hide file tree
Showing 7 changed files with 93 additions and 99 deletions.
1 change: 1 addition & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ RUN cd server && \
pip install -r requirements.txt && \
bash ./dill-0.3.8-patch.sh && \
pip install git+https://github.com/HabanaAI/DeepSpeed.git@1.17.0 && \
BUILD_CUDA_EXT=0 pip install git+https://github.com/AutoGPTQ/AutoGPTQ.git@097dd04e --no-build-isolation && \
pip install . --no-cache-dir

# Install benchmarker
Expand Down
54 changes: 29 additions & 25 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,36 +40,40 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene
> ```bash
> docker build -t tgi_gaudi .
> ```
2. Launch a local server instance:
i. On 1 Gaudi card
```bash
model=meta-llama/Llama-2-7b-hf
hf_token=YOUR_ACCESS_TOKEN
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HUGGING_FACE_HUB_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 1024 --max-total-tokens 2048
```
> For gated models such as [StarCoder](https://huggingface.co/bigcode/starcoder), you will have to pass `-e HUGGING_FACE_HUB_TOKEN=<token>` to the `docker run` command above with a valid Hugging Face Hub read token.
2. Use one of the following snippets to launch a local server instance:
> [!NOTE]
> For gated models such as [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf), you will have to pass `-e HF_TOKEN=<token>` to the `docker run` commands below with a valid Hugging Face Hub read token.
ii. On 1 Gaudi card using PyTorch eager mode with torch compile:
i. On 1 Gaudi card
```bash
model=meta-llama/Llama-2-7b-hf
hf_token=YOUR_ACCESS_TOKEN
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e PT_HPU_LAZY_MODE=0 -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HUGGING_FACE_HUB_TOKEN=$hf_token --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 1024 --max-total-tokens 2048
docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HF_TOKEN=$hf_token \
-e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true \
-e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 1024 \
--max-total-tokens 2048
```
iii. On 8 Gaudi cards:
ii. On 8 Gaudi cards:
```bash
model=meta-llama/Llama-2-70b-hf
hf_token=YOUR_ACCESS_TOKEN
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run

docker run -p 8080:80 -v $volume:/data --runtime=habana -e PT_HPU_ENABLE_LAZY_COLLECTIVES=true -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HUGGING_FACE_HUB_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --sharded true --num-shard 8 --max-input-tokens 1024 --max-total-tokens 2048
docker run -p 8080:80 -v $volume:/data --runtime=habana -e PT_HPU_ENABLE_LAZY_COLLECTIVES=true \
-e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e HF_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true \
-e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice \
--ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --sharded true \
--num-shard 8 --max-input-tokens 1024 --max-total-tokens 2048
```
3. You can then send a simple request:
3. Wait for the TGI-Gaudi server to come online. You will see something like so:
> 2024-05-22T19:31:48.302239Z INFO text_generation_router: router/src/main.rs:378: Connected
You can then send a simple request to the server from a separate terminal:
```bash
curl 127.0.0.1:8080/generate \
-X POST \
Expand Down Expand Up @@ -124,7 +128,7 @@ docker run -p 8080:80 \
--runtime=habana \
-v $volume:/data \
-e HABANA_VISIBLE_DEVICES=all \
-e HUGGING_FACE_HUB_TOKEN=$hf_token \
-e HF_TOKEN=$hf_token \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN=true \
-e MAX_TOTAL_TOKENS=2048 \
Expand Down Expand Up @@ -155,7 +159,7 @@ docker run -p 8080:80 \
--runtime=habana \
-v $volume:/data \
-e HABANA_VISIBLE_DEVICES=all \
-e HUGGING_FACE_HUB_TOKEN=$hf_token \
-e HF_TOKEN=$hf_token \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN=true \
-e PT_HPU_ENABLE_LAZY_COLLECTIVES=true \
Expand Down Expand Up @@ -188,7 +192,7 @@ docker run -p 8080:80 \
--runtime=habana \
-v $volume:/data \
-e HABANA_VISIBLE_DEVICES=all \
-e HUGGING_FACE_HUB_TOKEN=$hf_token \
-e HF_TOKEN=$hf_token \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN=true \
-e MAX_TOTAL_TOKENS=2048 \
Expand Down Expand Up @@ -219,7 +223,7 @@ docker run -p 8080:80 \
--runtime=habana \
-v $volume:/data \
-e HABANA_VISIBLE_DEVICES=all \
-e HUGGING_FACE_HUB_TOKEN=$hf_token \
-e HF_TOKEN=$hf_token \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN=true \
-e PT_HPU_ENABLE_LAZY_COLLECTIVES=true \
Expand Down Expand Up @@ -281,7 +285,7 @@ curl -N 127.0.0.1:8080/generate_stream \

## Running TGI with FP8 Precision

TGI-Gaudi supports FP8 precision inference with INC (Intel Neural Compressor) and HQT (Habana Quantization Toolkit). FP8 inference can be run by setting QUANT_CONFIG environment variable in the docker command. From TGI-Gaudi 2.0.4 release, INC is used by default for quantization. HQT will be removed in future releases. To use HQT, disable INC by setting `-e USE_INC=0` in docker command.
TGI-Gaudi supports FP8 precision inference with [Intel Neural Compressor (INC)](https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_FP8.html). FP8 inference can be run by setting QUANT_CONFIG environment variable in the docker command.

To run FP8 Inference:

Expand All @@ -303,7 +307,7 @@ docker run -p 8080:80 \
-v $PWD/hqt_output:/usr/src/hqt_output \
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
-e HABANA_VISIBLE_DEVICES=all \
-e HUGGING_FACE_HUB_TOKEN=$hf_token \
-e HF_TOKEN=$hf_token \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN=true \
-e MAX_TOTAL_TOKENS=2048 \
Expand Down Expand Up @@ -337,7 +341,7 @@ docker run -p 8080:80 \
-v $PWD/hqt_output:/usr/src/hqt_output \
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
-e HABANA_VISIBLE_DEVICES=all \
-e HUGGING_FACE_HUB_TOKEN=$hf_token \
-e HF_TOKEN=$hf_token \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN=true \
-e PT_HPU_ENABLE_LAZY_COLLECTIVES=true \
Expand Down Expand Up @@ -374,7 +378,7 @@ docker run -p 8080:80 \
-v $PWD/hqt_output:/usr/src/hqt_output \
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
-e HABANA_VISIBLE_DEVICES=all \
-e HUGGING_FACE_HUB_TOKEN=$hf_token \
-e HF_TOKEN=$hf_token \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN=true \
-e MAX_TOTAL_TOKENS=2048 \
Expand Down Expand Up @@ -408,7 +412,7 @@ docker run -p 8080:80 \
-v $PWD/hqt_output:/usr/src/hqt_output \
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
-e HABANA_VISIBLE_DEVICES=all \
-e HUGGING_FACE_HUB_TOKEN=$hf_token \
-e HF_TOKEN=$hf_token \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN=true \
-e PT_HPU_ENABLE_LAZY_COLLECTIVES=true \
Expand Down
4 changes: 2 additions & 2 deletions examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ This example provide a simple way of usage of `tgi-gaudi` with continuous batchi
### Install

```
pip install -r requirements
pip install -r requirements.txt
```

### Setup TGI server
Expand Down Expand Up @@ -36,4 +36,4 @@ All possible parameters are described in the below table:
| TOTAL_SAMPLE_COUNT | 2048 | Number of samples to run. |
| MAX_CONCURRENT_REQUESTS | 256 | The number of requests sent simultaneously to the TGI server. |

</div>
</div>
38 changes: 28 additions & 10 deletions server/text_generation_server/habana_quantization_env.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.

import os
import habana_frameworks.torch as htorch

quant_config = os.getenv("QUANT_CONFIG", "")
is_quantization_enabled = quant_config != ""
Expand All @@ -10,18 +11,35 @@
os.environ.setdefault("USE_DEFAULT_QUANT_PARAM", "true")
os.environ.setdefault("UPDATE_GRAPH_OUTPUT_MME", "false")
os.environ.setdefault("ENABLE_CALC_DYNAMIC_RANGE", "false")
os.environ.setdefault(
"UPDATE_MME_OUTPUT_PRECISION_FILTER", "v_proj,matmul_av")
os.environ.setdefault("UPDATE_MME_OUTPUT_PRECISION_FILTER", "v_proj,matmul_av")
os.environ.setdefault("EXPERIMENTAL_WEIGHT_SHARING", "FALSE")


def patch_scoped_linear_all_reduce(model):
from deepspeed.module_inject.layers import LinearAllreduce
from optimum.habana.transformers.models.modeling_all_models import ScopedLinearAllReduce

for name, module in model.named_children():
if type(module) is LinearAllreduce:
SL = ScopedLinearAllReduce(mod=module)
setattr(model, name, SL)
patch_scoped_linear_all_reduce(module)


def setup_quantization(model):
if is_quantization_enabled:
htorch.core.quantization._mark_params_as_const(model)
htorch.core.quantization._check_params_as_const(model)
htorch.core.hpu_initialize(model)
return model


def prepare_model_for_quantization(model):
if is_quantization_enabled:
if os.getenv("USE_INC", "1") != "0":
from neural_compressor.torch.quantization import FP8Config, convert
config = FP8Config.from_json_file(quant_config)
model = convert(model, config)
else:
import habana_quantization_toolkit
habana_quantization_toolkit.prep_model(model)
return model
if model.config.model_type in ["llama", "falcon", "qwen2", "starcoder2", "gemma"]:
patch_scoped_linear_all_reduce(model)
from neural_compressor.torch.quantization import FP8Config, convert

config = FP8Config.from_json_file(quant_config)
model = convert(model, config)
return model
53 changes: 20 additions & 33 deletions server/text_generation_server/models/causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ def __init__(
model = self.get_deepspeed_model(
model_id, dtype, revision
)
model = self.prepare_model_for_quantization(model)
model = hq_env.prepare_model_for_quantization(model)
else:
get_repo_root(model_id)

Expand All @@ -684,12 +684,15 @@ def __init__(
trust_remote_code=trust_remote_code,
**model_kwargs
)
model = self.prepare_model_for_quantization(model)
model = hq_env.prepare_model_for_quantization(model)
model = model.eval().to(device)

self.enable_hpu_graph = os.getenv("ENABLE_HPU_GRAPH", "true").lower() == "true" and LAZY_MODE == 1
self.limit_hpu_graph = os.getenv("LIMIT_HPU_GRAPH", "false").lower() == "true"
model = remove_kv_cache_from_output(model)

if model.config.model_type not in ["gpt_bigcode"]: # gpt_bigcode/starcoderbase-3b skips remove_kv_cache_from_output()
model = remove_kv_cache_from_output(model)

if self.enable_hpu_graph:
from habana_frameworks.torch.hpu import wrap_in_hpu_graph
model = wrap_in_hpu_graph(model, disable_tensor_cache=True)
Expand All @@ -700,7 +703,7 @@ def __init__(
"TORCH COMPILE", f'Torch compiling of model')
model.model = torch.compile(model.model, backend="hpu_backend", options={"keep_input_mutations": True})

model = self.setup_quantization(model)
model = hq_env.setup_quantization(model)

if model.config.model_type not in MODELS_OPTIMIZED_WITH_STATIC_SHAPES:
raise ValueError(f"Model type {model.config.model_type} is not supported!")
Expand All @@ -727,10 +730,13 @@ def __init__(
"return_dict": True,
}

if model.config.model_type in ["llama", "mistral", "starcoder2", "qwen2"]:

if model.config.model_type in ["llama", "mistral", "qwen2"]:

if model.config.model_type in ["llama", "mistral", "starcoder2", "qwen2", "falcon", "gemma"]:

if model.config.model_type not in ["falcon"]:
self.kwargs["attn_softmax_bf16"] = True

if model.config.model_type not in ["gemma"]:
self.kwargs["trim_logits"] = True

if os.getenv("USE_FLASH_ATTENTION", "false").lower() == "true":
Expand Down Expand Up @@ -832,29 +838,6 @@ def get_rope_scaling(self) -> Optional[Dict]:
'type': rope_scaling, 'factor': float(rope_factor)
}

def setup_quantization(self, model):
if hq_env.is_quantization_enabled:
htorch.core.quantization._mark_params_as_const(model)
htorch.core.quantization._check_params_as_const(model)
htorch.core.hpu_initialize(model)
return model

def prepare_model_for_quantization(self, model):
if hq_env.is_quantization_enabled:
if model.config.model_type == "llama":
self.patch_scoped_linear_all_reduce(model)
model = hq_env.prepare_model_for_quantization(model)
return model

def patch_scoped_linear_all_reduce(self, model):
from deepspeed.module_inject.layers import LinearAllreduce
from optimum.habana.transformers.models.modeling_all_models import ScopedLinearAllReduce
for name, module in model.named_children():
if type(module) is LinearAllreduce:
SL = ScopedLinearAllReduce(mod=module)
setattr(model, name, SL)
self.patch_scoped_linear_all_reduce(module)

@property
def batch_type(self) -> Type[CausalLMBatch]:
return CausalLMBatch
Expand Down Expand Up @@ -903,7 +886,7 @@ def forward(

kwargs.update(self.kwargs)

if past_key_values is not None:
if past_key_values is not None and self.model.config.model_type not in ["gpt_bigcode"]:
return self.model.forward(**kwargs)
else:
outputs = self.model.forward(**kwargs)
Expand Down Expand Up @@ -988,7 +971,7 @@ def generate_token(
else:
batch.position_ids += 1
# Update past key values
if prefill:
if prefill or self.model.config.model_type in ["gpt_bigcode"]:
batch.past_key_values = past

htorch.core.mark_step()
Expand Down Expand Up @@ -1032,14 +1015,18 @@ def generate_token(
else:
token_idx = torch.tensor(batch.attention_mask.shape[-1] - batch.right_padding).to(self.device)
input_ids = torch.index_select(batch.input_ids, 1, token_idx - 1)
batch.logits = self.forward(
logits = self.forward(
input_ids,
batch.attention_mask,
batch.position_ids,
token_idx,
batch.past_key_values,
bypass_hpu_graph=prefill and self.limit_hpu_graph if self.enable_hpu_graph else None,
)
if self.model.config.model_type in ["gpt_bigcode"]:
batch.logits, batch.past = logits
else:
batch.logits = logits

htorch.core.mark_step()

Expand Down
Loading

0 comments on commit 8686a0f

Please sign in to comment.