hans00 commited on
Commit
a6b9820
·
unverified ·
1 Parent(s): e72b0a5

Fix work for models < 1.0

Browse files
Files changed (1) hide show
  1. app.py +43 -8
app.py CHANGED
@@ -8,6 +8,9 @@ import hashlib
8
  import os
9
  from functools import lru_cache
10
  from typing import Optional
 
 
 
11
 
12
  # Available OuteTTS models based on the documentation
13
  MODELS = {v.value: v for _, v in outetts.Models.__members__.items()}
@@ -23,17 +26,48 @@ def get_file_hash(file_path):
23
  hash_md5.update(chunk)
24
  return hash_md5.hexdigest()
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  @lru_cache(maxsize=5)
27
  def get_cached_interface(model_name: str):
28
  """Get cached interface instance for the model."""
29
  model = MODELS[model_name]
30
-
31
- # Configure the model
32
- config = outetts.ModelConfig.auto_config(
33
- model=model,
34
- backend=outetts.Backend.LLAMACPP,
35
- quantization=outetts.LlamaCppQuantization.Q6_K,
36
- )
 
 
 
 
37
 
38
  # Initialize the interface
39
  interface = outetts.Interface(config=config)
@@ -84,7 +118,8 @@ def create_speaker_and_generate(model_name, audio_file, test_text: Optional[str]
84
  speaker=speaker,
85
  sampler_config=outetts.SamplerConfig(
86
  temperature=temperature
87
- )
 
88
  )
89
  )
90
 
 
8
  import os
9
  from functools import lru_cache
10
  from typing import Optional
11
+ from outetts.models.info import MODEL_INFO
12
+ from outetts.utils import helpers
13
+ from huggingface_hub import hf_hub_download
14
 
15
  # Available OuteTTS models based on the documentation
16
  MODELS = {v.value: v for _, v in outetts.Models.__members__.items()}
 
26
  hash_md5.update(chunk)
27
  return hash_md5.hexdigest()
28
 
29
+ def try_auto_model_config(model: outetts.Models, backend: outetts.Backend, quantization: outetts.LlamaCppQuantization):
30
+ model_config = MODEL_INFO[model]
31
+ try:
32
+ repo = f"OuteAI/{model.value}-GGUF"
33
+ filename = f"{model.value}-{quantization.value}.gguf"
34
+ model_path = hf_hub_download(
35
+ repo_id=repo,
36
+ filename=filename,
37
+ local_dir=os.path.join(helpers.get_cache_dir(), "gguf"),
38
+ local_files_only=False
39
+ )
40
+ return outetts.ModelConfig(
41
+ model_path=model_path,
42
+ tokenizer_path=f"OuteAI/{model.value}",
43
+ backend=backend,
44
+ n_gpu_layers=99,
45
+ verbose=False,
46
+ device=None,
47
+ dtype=None,
48
+ additional_model_config={},
49
+ audio_codec_path=None,
50
+ **model_config
51
+ )
52
+ except Exception as e:
53
+ print(f"Error: {e}")
54
+ return None
55
+
56
  @lru_cache(maxsize=5)
57
  def get_cached_interface(model_name: str):
58
  """Get cached interface instance for the model."""
59
  model = MODELS[model_name]
60
+
61
+ config = try_auto_model_config(model, outetts.Backend.LLAMACPP, outetts.LlamaCppQuantization.Q6_K)
62
+ if not config:
63
+ # Fallback to HF model
64
+ model_config = MODEL_INFO[model]
65
+ config = outetts.ModelConfig(
66
+ model_path=f"OuteAI/{model_name}",
67
+ tokenizer_path=f"OuteAI/{model_name}",
68
+ backend=outetts.Backend.HF,
69
+ **model_config
70
+ )
71
 
72
  # Initialize the interface
73
  interface = outetts.Interface(config=config)
 
118
  speaker=speaker,
119
  sampler_config=outetts.SamplerConfig(
120
  temperature=temperature
121
+ ),
122
+ max_length=MODEL_INFO[MODELS[model_name]]["max_seq_length"]
123
  )
124
  )
125