use softmax_last_dim (metal and cuda kernel) in llama attention layer (#2572)
This commit is contained in:
parent
7c09215ef4
commit
a2e9d41b20
|
@ -341,7 +341,8 @@ impl CausalSelfAttention {
|
|||
let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?;
|
||||
masked_fill(&att, &mask, f32::NEG_INFINITY)?
|
||||
};
|
||||
let att = candle_nn::ops::softmax(&att, D::Minus1)?;
|
||||
|
||||
let att = candle_nn::ops::softmax_last_dim(&att)?;
|
||||
// Convert to contiguous as matmul doesn't support strided vs for now.
|
||||
att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue