File size: 324 Bytes
fce4951
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
transformers==4.31.0
accelerate
tiktoken
einops

# flash-attention
# git clone -b v1.0.8 https://github.com/Dao-AILab/flash-attention
# cd flash-attention && pip install .
# pip install csrc/layer_norm
# pip install csrc/rotary

torch  # 2.0.1
safetensors
bitsandbytes
transformers_stream_generator
scipy

loguru
about-time