We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 26403fa commit 01864e4Copy full SHA for 01864e4
src/fairseq2/models/transformer/_sdpa/_flex.py
@@ -8,7 +8,6 @@
8
9
from typing import Callable, TypeAlias, final
10
11
-import torch
12
from torch import Tensor
13
from torch.nn.attention.flex_attention import flex_attention
14
from typing_extensions import override
0 commit comments