From b87244e3475623784006a98fb76dc1cd8aa40d89 Mon Sep 17 00:00:00 2001 From: mgoin Date: Mon, 10 Jun 2024 18:14:45 +0000 Subject: [PATCH 1/4] Add documentation for FP8 W8A8 --- docs/source/quantization/fp8.rst | 165 +++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 docs/source/quantization/fp8.rst diff --git a/docs/source/quantization/fp8.rst b/docs/source/quantization/fp8.rst new file mode 100644 index 0000000000000..813d224d3cdbd --- /dev/null +++ b/docs/source/quantization/fp8.rst @@ -0,0 +1,165 @@ +.. _fp8: + +FP8 Weight and Activation Quantization +====================================== + +Introduction to FP8 +------------------- + +vLLM supports FP8 (8-bit floating point) computation using hardware acceleration on GPUs such as Nvidia H100 and AMD MI300x. Currently, only Hopper and Ada Lovelace GPUs are supported. Quantization of models with FP8 allows for a 2x reduction in model memory requirements and up to a 1.6x improvement in throughput with minimal impact on accuracy. + +The FP8 types typically supported in hardware have two distinct representations, each useful in different scenarios: + +- **E4M3**: Consists of 1 sign bit, 4 exponent bits, and 3 bits of mantissa. It can store values up to +/-448 and `nan`. +- **E5M2**: Consists of 1 sign bit, 5 exponent bits, and 2 bits of mantissa. It can store values up to +/-57344, +/- `inf`, and `nan`. The tradeoff for the increased dynamic range is lower precision of the stored values. + +Quick Start with Online Dynamic Quantization +------------------------------------- + +Dynamic quantization of an original precision BF16/FP16 model to FP8 can be achieved with vLLM without any calibration data required. You can enable the feature by specifying `--quantization="fp8"` in the command line or setting `quantization="fp8"` in the LLM constructor. + +In this mode, all Linear modules (except for the final `lm_head`) have their weights quantized down to FP8_E4M3 precision with a per-tensor scale. Activations have their minimum and maximum values calculated during each forward pass to provide a dynamic per-tensor scale for high accuracy. As a result, latency improvements are limited in this mode. + +.. code-block:: python + + from vllm import LLM + model = LLM("facebook/opt-125m", quantization="fp8") + # INFO 06-10 17:55:42 model_runner.py:157] Loading model weights took 0.1550 GB + +.. note:: + + Currently, we load the model at original precision before quantizing down to 8-bits, so you need enough memory to load the whole model. + +Offline Quantization +-------------------- + +For offline quantization to FP8, please install the `AutoFP8 library `_. + +.. code-block:: bash + + git clone https://github.com/neuralmagic/AutoFP8.git + pip install -e AutoFP8 + +This package introduces the `AutoFP8ForCausalLM` and `BaseQuantizeConfig` objects for managing how your model will be compressed. + +Offline Quantization with Dynamic Activation Scaling Factors +------------------------------------------------------------ + +You can use AutoFP8 to produce checkpoints with their weights quantized to FP8 ahead of time and let vLLM handle calculating dynamic scales for the activations at runtime for maximum accuracy. You can enable this with the `activation_scheme="dynamic"` argument. + +.. code-block:: python + + from auto_fp8 import AutoFP8ForCausalLM, BaseQuantizeConfig + + pretrained_model_dir = "meta-llama/Meta-Llama-3-8B-Instruct" + quantized_model_dir = "Meta-Llama-3-8B-Instruct-FP8-Dynamic" + + examples = [] + quantize_config = BaseQuantizeConfig(quant_method="fp8", activation_scheme="dynamic") + + model = AutoFP8ForCausalLM.from_pretrained(pretrained_model_dir, quantize_config) + model.quantize(examples) + model.save_quantized(quantized_model_dir) + +In the output of the above script, you should be able to see the quantized Linear modules replaced in the model definition. Note that the `lm_head` Linear module at the end is currently skipped by default. + +.. code-block:: text + + LlamaForCausalLM( + (model): LlamaModel( + (embed_tokens): Embedding(128256, 4096) + (layers): ModuleList( + (0-31): 32 x LlamaDecoderLayer( + (self_attn): LlamaSdpaAttention( + (q_proj): FP8DynamicLinear() + (k_proj): FP8DynamicLinear() + (v_proj): FP8DynamicLinear() + (o_proj): FP8DynamicLinear() + (rotary_emb): LlamaRotaryEmbedding() + ) + (mlp): LlamaMLP( + (gate_proj): FP8DynamicLinear() + (up_proj): FP8DynamicLinear() + (down_proj): FP8DynamicLinear() + (act_fn): SiLU() + ) + (input_layernorm): LlamaRMSNorm() + (post_attention_layernorm): LlamaRMSNorm() + ) + ) + (norm): LlamaRMSNorm() + ) + (lm_head): Linear(in_features=4096, out_features=128256, bias=False) + ) + Saving the model to Meta-Llama-3-8B-Instruct-FP8-Dynamic + +Your model checkpoint with quantized weights should be available at `quantized_model_dir`. We can see that the weights are smaller than the original BF16 precision. + +.. code-block:: bash + + ls -lh Meta-Llama-3-8B-Instruct-FP8-Dynamic/ + total 8.5G + -rw-rw-r-- 1 user user 869 Jun 7 14:43 config.json + -rw-rw-r-- 1 user user 194 Jun 7 14:43 generation_config.json + -rw-rw-r-- 1 user user 4.7G Jun 7 14:43 model-00001-of-00002.safetensors + -rw-rw-r-- 1 user user 3.9G Jun 7 14:43 model-00002-of-00002.safetensors + -rw-rw-r-- 1 user user 43K Jun 7 14:43 model.safetensors.index.json + -rw-rw-r-- 1 user user 296 Jun 7 14:43 special_tokens_map.json + -rw-rw-r-- 1 user user 50K Jun 7 14:43 tokenizer_config.json + -rw-rw-r-- 1 user user 8.7M Jun 7 14:43 tokenizer.json + +Finally, you can load the quantized model checkpoint directly in vLLM. + +.. code-block:: python + + from vllm import LLM + model = LLM(model="Meta-Llama-3-8B-Instruct-FP8-Dynamic/") + +Offline Quantization with Static Activation Scaling Factors +----------------------------------------------------------- + +For the best inference performance, you can use AutoFP8 with calibration data to produce per-tensor static scales for both the weights and activations by enabling the `activation_scheme="static"` argument. + +.. code-block:: python + + from datasets import load_dataset + from transformers import AutoTokenizer + from auto_fp8 import AutoFP8ForCausalLM, BaseQuantizeConfig + + pretrained_model_dir = "meta-llama/Meta-Llama-3-8B-Instruct" + quantized_model_dir = "Meta-Llama-3-8B-Instruct-FP8" + + tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True) + tokenizer.pad_token = tokenizer.eos_token + + ds = load_dataset("mgoin/ultrachat_2k", split="train_sft").select(range(512)) + examples = [tokenizer.apply_chat_template(batch["messages"], tokenize=False) for batch in ds] + examples = tokenizer(examples, padding=True, truncation=True, return_tensors="pt").to("cuda") + + quantize_config = BaseQuantizeConfig(quant_method="fp8", activation_scheme="static") + + model = AutoFP8ForCausalLM.from_pretrained(pretrained_model_dir, quantize_config) + model.quantize(examples) + model.save_quantized(quantized_model_dir) + +Your model checkpoint with quantized weights should be available at `Meta-Llama-3-8B-Instruct-FP8/`. We can see that the weights are smaller than the original BF16 precision. + +.. code-block:: bash + + ls -lh Meta-Llama-3-8B-Instruct-FP8/ + total 8.5G + -rw-rw-r-- 1 user user 869 Jun 7 14:43 config.json + -rw-rw-r-- 1 user user 194 Jun 7 14:43 generation_config.json + -rw-rw-r-- 1 user user 4.7G Jun 7 14:43 model-00001-of-00002.safetensors + -rw-rw-r-- 1 user user 3.9G Jun 7 14:43 model-00002-of-00002.safetensors + -rw-rw-r-- 1 user user 43K Jun 7 14:43 model.safetensors.index.json + -rw-rw-r-- 1 user user 296 Jun 7 14:43 special_tokens_map.json + -rw-rw-r-- 1 user user 50K Jun 7 14:43 tokenizer_config.json + -rw-rw-r-- 1 user user 8.7M Jun 7 14:43 tokenizer.json + +Finally, you can load the quantized model checkpoint directly in vLLM. + +.. code-block:: python + + from vllm import LLM + model = LLM(model="Meta-Llama-3-8B-Instruct-FP8/") From 09488ab85f8bcc6be1b8a95465a296c2a2f69688 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 10 Jun 2024 15:16:22 -0400 Subject: [PATCH 2/4] Update fp8.rst --- docs/source/quantization/fp8.rst | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/source/quantization/fp8.rst b/docs/source/quantization/fp8.rst index 813d224d3cdbd..a7aaae8a28050 100644 --- a/docs/source/quantization/fp8.rst +++ b/docs/source/quantization/fp8.rst @@ -1,13 +1,12 @@ .. _fp8: -FP8 Weight and Activation Quantization -====================================== - -Introduction to FP8 -------------------- +FP8 +================== vLLM supports FP8 (8-bit floating point) computation using hardware acceleration on GPUs such as Nvidia H100 and AMD MI300x. Currently, only Hopper and Ada Lovelace GPUs are supported. Quantization of models with FP8 allows for a 2x reduction in model memory requirements and up to a 1.6x improvement in throughput with minimal impact on accuracy. +Please visit the HF collection of `quantized FP8 checkpoints of popular LLMs ready to use with vLLM `_. + The FP8 types typically supported in hardware have two distinct representations, each useful in different scenarios: - **E4M3**: Consists of 1 sign bit, 4 exponent bits, and 3 bits of mantissa. It can store values up to +/-448 and `nan`. @@ -26,7 +25,7 @@ In this mode, all Linear modules (except for the final `lm_head`) have their wei model = LLM("facebook/opt-125m", quantization="fp8") # INFO 06-10 17:55:42 model_runner.py:157] Loading model weights took 0.1550 GB -.. note:: +.. warning:: Currently, we load the model at original precision before quantizing down to 8-bits, so you need enough memory to load the whole model. @@ -47,6 +46,10 @@ Offline Quantization with Dynamic Activation Scaling Factors You can use AutoFP8 to produce checkpoints with their weights quantized to FP8 ahead of time and let vLLM handle calculating dynamic scales for the activations at runtime for maximum accuracy. You can enable this with the `activation_scheme="dynamic"` argument. +.. warning:: + + Please note that although this mode doesn't give you better performance, it reduces memory footprint compared to online quantization. + .. code-block:: python from auto_fp8 import AutoFP8ForCausalLM, BaseQuantizeConfig @@ -54,6 +57,7 @@ You can use AutoFP8 to produce checkpoints with their weights quantized to FP8 a pretrained_model_dir = "meta-llama/Meta-Llama-3-8B-Instruct" quantized_model_dir = "Meta-Llama-3-8B-Instruct-FP8-Dynamic" + # For dynamic activation scales, there is no need for calbration examples examples = [] quantize_config = BaseQuantizeConfig(quant_method="fp8", activation_scheme="dynamic") From cb78405d0e4c98c19c9a2c558d497c5f1ca05d8e Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 10 Jun 2024 15:30:04 -0400 Subject: [PATCH 3/4] Add FP8 to toctree --- docs/source/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/index.rst b/docs/source/index.rst index fad3c3b05b0c0..0ebd1fb7a7dec 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -96,6 +96,7 @@ Documentation :caption: Quantization quantization/auto_awq + quantization/fp8 quantization/fp8_e5m2_kvcache quantization/fp8_e4m3_kvcache From 555334dec6d708da1f9601cd0ee0084585beb768 Mon Sep 17 00:00:00 2001 From: mgoin Date: Mon, 10 Jun 2024 21:36:54 +0000 Subject: [PATCH 4/4] Fix inline code and add checkpoint format --- docs/source/quantization/fp8.rst | 83 ++++++++++++++++++++++---------- 1 file changed, 58 insertions(+), 25 deletions(-) diff --git a/docs/source/quantization/fp8.rst b/docs/source/quantization/fp8.rst index a7aaae8a28050..0c88d8d715096 100644 --- a/docs/source/quantization/fp8.rst +++ b/docs/source/quantization/fp8.rst @@ -9,21 +9,22 @@ Please visit the HF collection of `quantized FP8 checkpoints of popular LLMs rea The FP8 types typically supported in hardware have two distinct representations, each useful in different scenarios: -- **E4M3**: Consists of 1 sign bit, 4 exponent bits, and 3 bits of mantissa. It can store values up to +/-448 and `nan`. -- **E5M2**: Consists of 1 sign bit, 5 exponent bits, and 2 bits of mantissa. It can store values up to +/-57344, +/- `inf`, and `nan`. The tradeoff for the increased dynamic range is lower precision of the stored values. +- **E4M3**: Consists of 1 sign bit, 4 exponent bits, and 3 bits of mantissa. It can store values up to +/-448 and ``nan``. +- **E5M2**: Consists of 1 sign bit, 5 exponent bits, and 2 bits of mantissa. It can store values up to +/-57344, +/- ``inf``, and ``nan``. The tradeoff for the increased dynamic range is lower precision of the stored values. Quick Start with Online Dynamic Quantization ------------------------------------- -Dynamic quantization of an original precision BF16/FP16 model to FP8 can be achieved with vLLM without any calibration data required. You can enable the feature by specifying `--quantization="fp8"` in the command line or setting `quantization="fp8"` in the LLM constructor. +Dynamic quantization of an original precision BF16/FP16 model to FP8 can be achieved with vLLM without any calibration data required. You can enable the feature by specifying ``--quantization="fp8"`` in the command line or setting ``quantization="fp8"`` in the LLM constructor. -In this mode, all Linear modules (except for the final `lm_head`) have their weights quantized down to FP8_E4M3 precision with a per-tensor scale. Activations have their minimum and maximum values calculated during each forward pass to provide a dynamic per-tensor scale for high accuracy. As a result, latency improvements are limited in this mode. +In this mode, all Linear modules (except for the final ``lm_head``) have their weights quantized down to FP8_E4M3 precision with a per-tensor scale. Activations have their minimum and maximum values calculated during each forward pass to provide a dynamic per-tensor scale for high accuracy. As a result, latency improvements are limited in this mode. .. code-block:: python from vllm import LLM model = LLM("facebook/opt-125m", quantization="fp8") # INFO 06-10 17:55:42 model_runner.py:157] Loading model weights took 0.1550 GB + result = model.generate("Hello, my name is") .. warning:: @@ -39,12 +40,12 @@ For offline quantization to FP8, please install the `AutoFP8 library `_ contained within quantized checkpoints specified through the ``.kv_scale`` parameter present on the Attention Module, such as: + +.. code-block:: text + model.layers.0.self_attn.kv_scale < F32 \ No newline at end of file