ramimu commited on
Commit
d52290c
·
verified ·
1 Parent(s): b32e924

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -46
app.py CHANGED
@@ -112,79 +112,87 @@ if chatterbox_available:
112
  device = "cuda" if torch.cuda.is_available() else "cpu"
113
  print(f"Using device: {device}")
114
 
115
- # Try different loading methods based on the ChatterboxTTS API
 
 
 
116
  try:
117
- # Method 1: Try from_local with just the path (let it handle device internally)
118
- model = ChatterboxTTS.from_local(LOCAL_MODEL_PATH)
119
  print("Chatterbox model loaded successfully using from_local method.")
120
  except Exception as e1:
121
  print(f"from_local attempt failed: {e1}")
122
  try:
123
- # Method 2: Try from_pretrained with just the path
124
- model = ChatterboxTTS.from_pretrained(LOCAL_MODEL_PATH)
125
  print("Chatterbox model loaded successfully with from_pretrained.")
126
  except Exception as e2:
127
  print(f"from_pretrained failed: {e2}")
128
  try:
129
- # Method 3: Try with explicit CPU mapping in from_local
 
130
  import pathlib
 
 
131
  model_path = pathlib.Path(LOCAL_MODEL_PATH)
132
 
133
- # Load model components with explicit CPU mapping
 
 
134
  s3gen_path = model_path / "s3gen.pt"
135
  ve_path = model_path / "ve.pt"
136
  tokenizer_path = model_path / "tokenizer.json"
137
  t3_cfg_path = model_path / "t3_cfg.pt"
138
 
139
- print(f"Loading components with CPU mapping...")
140
- print(f" s3gen: {s3gen_path}")
141
- print(f" ve: {ve_path}")
142
- print(f" tokenizer: {tokenizer_path}")
143
- print(f" t3_cfg: {t3_cfg_path}")
144
-
145
- # Load components with CPU mapping
146
  s3gen = torch.load(s3gen_path, map_location=torch.device('cpu'))
 
 
147
  ve = torch.load(ve_path, map_location=torch.device('cpu'))
 
 
148
  t3_cfg = torch.load(t3_cfg_path, map_location=torch.device('cpu'))
149
 
150
- # Load tokenizer
151
- import json
152
  with open(tokenizer_path, 'r') as f:
153
- tokenizer = json.load(f)
154
-
155
- print("Components loaded, creating ChatterboxTTS instance...")
156
 
157
- # Try different constructor signatures
 
158
  try:
159
- # Try with all components
160
- model = ChatterboxTTS(s3gen, ve, tokenizer, t3_cfg)
161
- print("Chatterbox model loaded with 4-argument constructor.")
162
- except Exception as constructor_error:
163
- print(f"4-arg constructor failed: {constructor_error}")
164
- try:
165
- # Try with device parameter
166
- model = ChatterboxTTS(s3gen, ve, tokenizer, device=device)
167
- print("Chatterbox model loaded with device parameter.")
168
- except Exception as device_error:
169
- print(f"Device constructor failed: {device_error}")
170
- # Try creating with from_pretrained but patch the loading
171
- print("Attempting to patch model loading...")
172
-
173
- # Temporarily patch torch.load to always use CPU
174
- original_load = torch.load
175
- def patched_load(f, map_location=None, *args, **kwargs):
176
- return original_load(f, map_location=torch.device('cpu'), *args, **kwargs)
177
-
178
- torch.load = patched_load
179
- try:
180
- model = ChatterboxTTS.from_local(LOCAL_MODEL_PATH)
181
- print("Chatterbox model loaded with patched torch.load.")
182
- finally:
183
- torch.load = original_load
184
 
185
  except Exception as e3:
186
  print(f"Manual loading failed: {e3}")
187
- raise e3
 
 
 
 
 
 
 
 
 
 
 
188
 
189
  except Exception as e:
190
  print(f"ERROR: Failed to load Chatterbox model from local directory: {e}")
 
112
  device = "cuda" if torch.cuda.is_available() else "cpu"
113
  print(f"Using device: {device}")
114
 
115
+ # Based on API inspection:
116
+ # ChatterboxTTS.from_local signature: (ckpt_dir, device) -> 'ChatterboxTTS'
117
+ # ChatterboxTTS.from_pretrained signature: (device) -> 'ChatterboxTTS'
118
+
119
  try:
120
+ # Method 1: Use from_local with correct signature (ckpt_dir, device)
121
+ model = ChatterboxTTS.from_local(LOCAL_MODEL_PATH, device)
122
  print("Chatterbox model loaded successfully using from_local method.")
123
  except Exception as e1:
124
  print(f"from_local attempt failed: {e1}")
125
  try:
126
+ # Method 2: Use from_pretrained with device only
127
+ model = ChatterboxTTS.from_pretrained(device)
128
  print("Chatterbox model loaded successfully with from_pretrained.")
129
  except Exception as e2:
130
  print(f"from_pretrained failed: {e2}")
131
  try:
132
+ # Method 3: Manual loading with correct constructor signature
133
+ # ChatterboxTTS.__init__ signature: (self, t3, s3gen, ve, tokenizer, device, conds=None)
134
  import pathlib
135
+ import json
136
+
137
  model_path = pathlib.Path(LOCAL_MODEL_PATH)
138
 
139
+ print(f"Manual loading with correct constructor signature...")
140
+
141
+ # Load all components
142
  s3gen_path = model_path / "s3gen.pt"
143
  ve_path = model_path / "ve.pt"
144
  tokenizer_path = model_path / "tokenizer.json"
145
  t3_cfg_path = model_path / "t3_cfg.pt"
146
 
147
+ print(f" Loading s3gen from: {s3gen_path}")
 
 
 
 
 
 
148
  s3gen = torch.load(s3gen_path, map_location=torch.device('cpu'))
149
+
150
+ print(f" Loading ve from: {ve_path}")
151
  ve = torch.load(ve_path, map_location=torch.device('cpu'))
152
+
153
+ print(f" Loading t3_cfg from: {t3_cfg_path}")
154
  t3_cfg = torch.load(t3_cfg_path, map_location=torch.device('cpu'))
155
 
156
+ print(f" Loading tokenizer from: {tokenizer_path}")
 
157
  with open(tokenizer_path, 'r') as f:
158
+ tokenizer_data = json.load(f)
 
 
159
 
160
+ # The tokenizer might need to be instantiated as a proper object
161
+ # Let's try to use the ChatterboxTTS internal tokenizer class
162
  try:
163
+ from chatterbox.models.tokenizers.tokenizer import EnTokenizer
164
+ tokenizer = EnTokenizer.from_dict(tokenizer_data)
165
+ print(" Created EnTokenizer from JSON data")
166
+ except Exception as tok_error:
167
+ print(f" Could not create EnTokenizer: {tok_error}")
168
+ tokenizer = tokenizer_data # Use raw data as fallback
169
+
170
+ print(" Creating ChatterboxTTS instance with correct signature...")
171
+
172
+ # Constructor signature: (self, t3, s3gen, ve, tokenizer, device, conds=None)
173
+ model = ChatterboxTTS(
174
+ t3=t3_cfg,
175
+ s3gen=s3gen,
176
+ ve=ve,
177
+ tokenizer=tokenizer,
178
+ device=device
179
+ )
180
+ print("Chatterbox model loaded successfully with manual constructor.")
 
 
 
 
 
 
 
181
 
182
  except Exception as e3:
183
  print(f"Manual loading failed: {e3}")
184
+ print(f"Detailed error: {str(e3)}")
185
+
186
+ # Last resort: try with different parameter orders
187
+ try:
188
+ print("Trying alternative parameter order...")
189
+ model = ChatterboxTTS(
190
+ s3gen, ve, tokenizer, t3_cfg, device
191
+ )
192
+ print("Chatterbox model loaded with alternative parameter order.")
193
+ except Exception as e4:
194
+ print(f"Alternative parameter order failed: {e4}")
195
+ raise e3
196
 
197
  except Exception as e:
198
  print(f"ERROR: Failed to load Chatterbox model from local directory: {e}")