We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 901e13c commit 33f6e95Copy full SHA for 33f6e95
vllm/model_executor/models/vision.py
@@ -169,9 +169,10 @@ def resolve_visual_encoder_outputs(
169
]
170
171
# Apply post-norm on the final hidden state if we are using it
172
- uses_last_layer = select_layers[-1] in (len(hs_pool) - 1, -1)
+ uses_last_layer = select_layers[-1] in (max_possible_layers - 1, -1)
173
if post_layer_norm is not None and uses_last_layer:
174
- hs_pool[-1] = post_layer_norm(encoder_outputs)
+ hs_pool[-1] = post_layer_norm(hs_pool[-1])
175
+
176
return torch.cat(hs_pool, dim=-1)
177
178
0 commit comments