mirror of https://github.com/InternLM/InternLM
Fit to flash attention 1.0.5.
parent
b38ba5dad2
commit
66eba48c9f
|
@ -10,15 +10,15 @@ import torch.nn.functional as F
|
||||||
from einops import rearrange
|
from einops import rearrange
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from flash_attn import flash_attn_unpadded_func
|
from flash_attn.flash_attn_interface import flash_attn_unpadded_func
|
||||||
except ImportError:
|
except ImportError:
|
||||||
try:
|
try:
|
||||||
from flash_attn import (
|
from flash_attn.flash_attn_interface import (
|
||||||
flash_attn_unpadded_kvpacked_func as flash_attn_unpadded_func,
|
flash_attn_unpadded_kvpacked_func as flash_attn_unpadded_func,
|
||||||
)
|
)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
try:
|
try:
|
||||||
from flash_attn import (
|
from flash_attn.flash_attn_interface import (
|
||||||
flash_attn_varlen_kvpacked_func as flash_attn_unpadded_func,
|
flash_attn_varlen_kvpacked_func as flash_attn_unpadded_func,
|
||||||
)
|
)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
Loading…
Reference in New Issue