From c1dba83a216c8de298d814f91293d9a4f3a1d8b1 Mon Sep 17 00:00:00 2001 From: denis-ismailaj Date: Sat, 2 Dec 2023 14:51:29 +0100 Subject: [PATCH] Replace _expand_mask with _prepare_4d_attention_mask --- lavis/models/blip_diffusion_models/modeling_ctx_clip.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lavis/models/blip_diffusion_models/modeling_ctx_clip.py b/lavis/models/blip_diffusion_models/modeling_ctx_clip.py index 737b77d3f..2e8c6efe0 100644 --- a/lavis/models/blip_diffusion_models/modeling_ctx_clip.py +++ b/lavis/models/blip_diffusion_models/modeling_ctx_clip.py @@ -13,8 +13,8 @@ from transformers.models.clip.modeling_clip import ( CLIPEncoder, CLIPPreTrainedModel, - _expand_mask, ) +from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask class CtxCLIPTextModel(CLIPPreTrainedModel): @@ -136,7 +136,7 @@ def forward( # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _expand_mask(attention_mask, hidden_states.dtype) + attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states,