--- ldm/modules/attention.py 2022-09-04 12:27:32.000000000 +0100 +++ ldm/modules/attention.py.opt 2022-09-04 12:27:03.000000000 +0100 @@ -179,25 +179,29 @@ context = default(context, x) k = self.to_k(context) v = self.to_v(context) + del context, x q, k, v = map( lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v) ) sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + del q, k if exists(mask): mask = rearrange(mask, 'b ... -> b (...)') max_neg_value = -torch.finfo(sim.dtype).max mask = repeat(mask, 'b j -> (b h) () j', h=h) sim.masked_fill_(~mask, max_neg_value) + del mask # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) + sim[4:] = sim[4:].softmax(dim=-1) + sim[:4] = sim[:4].softmax(dim=-1) - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) + sim = einsum('b i j, b j d -> b i d', sim, v) + sim = rearrange(sim, '(b h) n d -> b n (h d)', h=h) + return self.to_out(sim) class BasicTransformerBlock(nn.Module):