hans00 commited on
Commit
26829c7
·
unverified ·
1 Parent(s): 646604a

Not use flashattn

Browse files
Files changed (2) hide show
  1. app.py +0 -1
  2. requirements.txt +0 -1
app.py CHANGED
@@ -71,7 +71,6 @@ def get_interface(model_name: str):
71
  backend=outetts.Backend.HF,
72
  additional_model_config={
73
  "device_map": "auto" if has_cuda else "cpu",
74
- "attn_implementation": "flash_attention_2" if has_cuda else "eager",
75
  "quantization_config": BitsAndBytesConfig(
76
  load_in_8bit=True
77
  ) if has_cuda else None,
 
71
  backend=outetts.Backend.HF,
72
  additional_model_config={
73
  "device_map": "auto" if has_cuda else "cpu",
 
74
  "quantization_config": BitsAndBytesConfig(
75
  load_in_8bit=True
76
  ) if has_cuda else None,
requirements.txt CHANGED
@@ -3,7 +3,6 @@
3
  llama-cpp-python
4
  outetts@git+https://github.com/mybigday/OuteTTS.git@fix
5
  accelerate
6
- flash-attn
7
  absl-py==2.3.1
8
  aiofiles==24.1.0
9
  aiohappyeyeballs==2.6.1
 
3
  llama-cpp-python
4
  outetts@git+https://github.com/mybigday/OuteTTS.git@fix
5
  accelerate
 
6
  absl-py==2.3.1
7
  aiofiles==24.1.0
8
  aiohappyeyeballs==2.6.1