We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 2427791 commit ae9e713Copy full SHA for ae9e713
vllm/model_executor/models/chatglm.py
@@ -10,6 +10,7 @@
10
from torch.nn import LayerNorm
11
12
from vllm.attention import Attention
13
+from vllm.compilation.decorators import support_torch_compile
14
from vllm.config import CacheConfig, VllmConfig
15
from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size
16
from vllm.model_executor.layers.activation import SiluAndMul
@@ -293,6 +294,7 @@ def forward(
293
294
return hidden_states
295
296
297
+@support_torch_compile
298
class ChatGLMModel(nn.Module):
299
300
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
0 commit comments