optimize lib dependencies

This commit is contained in:
zxy 2025-02-19 13:46:33 +08:00
parent 32d92585ed
commit cf21982aa5
3 changed files with 2 additions and 3 deletions

View File

@ -1,4 +1,3 @@
from attrdict import AttrDict
from dataclasses import dataclass
import logging
import gc

View File

@ -13,7 +13,6 @@ from timm.layers import (
)
from timm.models._manipulate import named_apply, checkpoint_seq, adapt_input_conv
from transformers.modeling_utils import is_flash_attn_2_available
from xformers.ops import memory_efficient_attention
from functools import partial
@ -134,6 +133,8 @@ class Attention(nn.Module):
self.proj_drop = nn.Dropout(proj_drop) if proj_drop > 0. else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
from xformers.ops import memory_efficient_attention
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim)

View File

@ -4,7 +4,6 @@ xformers>=0.0.21
timm>=0.9.16
accelerate
sentencepiece
attrdict
einops
# for gradio demo