Unverified Commit 280ed8f0 authored by FNSpd's avatar FNSpd Committed by GitHub
Browse files

Update sd_hijack_optimizations.py

parent beb7dda5
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -372,7 +372,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None):

    dtype = q.dtype
    if shared.opts.upcast_attn:
        q, k = q.float(), k.float()
        q, k, v = q.float(), k.float(), v.float()

    # the output of sdp = (batch, num_heads, seq_len, head_dim)
    hidden_states = torch.nn.functional.scaled_dot_product_attention(