Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -19,6 +19,34 @@ try:
|
|
19 |
from chatterbox.tts import ChatterboxTTS
|
20 |
chatterbox_available = True
|
21 |
print("Chatterbox TTS imported successfully")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
except ImportError as e:
|
23 |
print(f"Failed to import ChatterboxTTS: {e}")
|
24 |
print("Trying alternative import...")
|
@@ -84,46 +112,76 @@ if chatterbox_available:
|
|
84 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
85 |
print(f"Using device: {device}")
|
86 |
|
87 |
-
#
|
88 |
-
# based on the error message showing from_local is called internally
|
89 |
try:
|
90 |
-
|
|
|
91 |
print("Chatterbox model loaded successfully using from_local method.")
|
92 |
except Exception as e1:
|
93 |
print(f"from_local attempt failed: {e1}")
|
94 |
try:
|
95 |
-
#
|
96 |
-
|
97 |
-
model
|
98 |
-
print("Chatterbox model loaded successfully with corrected from_pretrained.")
|
99 |
except Exception as e2:
|
100 |
-
print(f"
|
101 |
try:
|
102 |
-
# Try
|
103 |
-
import
|
104 |
-
|
105 |
-
ve_path = os.path.join(LOCAL_MODEL_PATH, "ve.pt")
|
106 |
-
tokenizer_path = os.path.join(LOCAL_MODEL_PATH, "tokenizer.json")
|
107 |
-
t3_cfg_path = os.path.join(LOCAL_MODEL_PATH, "t3_cfg.pt")
|
108 |
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
print(f" s3gen: {s3gen_path}")
|
111 |
print(f" ve: {ve_path}")
|
112 |
print(f" tokenizer: {tokenizer_path}")
|
113 |
print(f" t3_cfg: {t3_cfg_path}")
|
114 |
|
115 |
-
# Load
|
116 |
-
s3gen = torch.load(s3gen_path, map_location=device)
|
117 |
-
ve = torch.load(ve_path, map_location=device)
|
|
|
118 |
|
119 |
# Load tokenizer
|
120 |
import json
|
121 |
with open(tokenizer_path, 'r') as f:
|
122 |
tokenizer = json.load(f)
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
except Exception as e3:
|
128 |
print(f"Manual loading failed: {e3}")
|
129 |
raise e3
|
|
|
19 |
from chatterbox.tts import ChatterboxTTS
|
20 |
chatterbox_available = True
|
21 |
print("Chatterbox TTS imported successfully")
|
22 |
+
|
23 |
+
# Inspect the ChatterboxTTS class to understand its API
|
24 |
+
import inspect
|
25 |
+
print(f"ChatterboxTTS methods: {[method for method in dir(ChatterboxTTS) if not method.startswith('_')]}")
|
26 |
+
|
27 |
+
# Check constructor signature
|
28 |
+
try:
|
29 |
+
sig = inspect.signature(ChatterboxTTS.__init__)
|
30 |
+
print(f"ChatterboxTTS.__init__ signature: {sig}")
|
31 |
+
except:
|
32 |
+
pass
|
33 |
+
|
34 |
+
# Check from_local signature if it exists
|
35 |
+
if hasattr(ChatterboxTTS, 'from_local'):
|
36 |
+
try:
|
37 |
+
sig = inspect.signature(ChatterboxTTS.from_local)
|
38 |
+
print(f"ChatterboxTTS.from_local signature: {sig}")
|
39 |
+
except:
|
40 |
+
pass
|
41 |
+
|
42 |
+
# Check from_pretrained signature if it exists
|
43 |
+
if hasattr(ChatterboxTTS, 'from_pretrained'):
|
44 |
+
try:
|
45 |
+
sig = inspect.signature(ChatterboxTTS.from_pretrained)
|
46 |
+
print(f"ChatterboxTTS.from_pretrained signature: {sig}")
|
47 |
+
except:
|
48 |
+
pass
|
49 |
+
|
50 |
except ImportError as e:
|
51 |
print(f"Failed to import ChatterboxTTS: {e}")
|
52 |
print("Trying alternative import...")
|
|
|
112 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
113 |
print(f"Using device: {device}")
|
114 |
|
115 |
+
# Try different loading methods based on the ChatterboxTTS API
|
|
|
116 |
try:
|
117 |
+
# Method 1: Try from_local with just the path (let it handle device internally)
|
118 |
+
model = ChatterboxTTS.from_local(LOCAL_MODEL_PATH)
|
119 |
print("Chatterbox model loaded successfully using from_local method.")
|
120 |
except Exception as e1:
|
121 |
print(f"from_local attempt failed: {e1}")
|
122 |
try:
|
123 |
+
# Method 2: Try from_pretrained with just the path
|
124 |
+
model = ChatterboxTTS.from_pretrained(LOCAL_MODEL_PATH)
|
125 |
+
print("Chatterbox model loaded successfully with from_pretrained.")
|
|
|
126 |
except Exception as e2:
|
127 |
+
print(f"from_pretrained failed: {e2}")
|
128 |
try:
|
129 |
+
# Method 3: Try with explicit CPU mapping in from_local
|
130 |
+
import pathlib
|
131 |
+
model_path = pathlib.Path(LOCAL_MODEL_PATH)
|
|
|
|
|
|
|
132 |
|
133 |
+
# Load model components with explicit CPU mapping
|
134 |
+
s3gen_path = model_path / "s3gen.pt"
|
135 |
+
ve_path = model_path / "ve.pt"
|
136 |
+
tokenizer_path = model_path / "tokenizer.json"
|
137 |
+
t3_cfg_path = model_path / "t3_cfg.pt"
|
138 |
+
|
139 |
+
print(f"Loading components with CPU mapping...")
|
140 |
print(f" s3gen: {s3gen_path}")
|
141 |
print(f" ve: {ve_path}")
|
142 |
print(f" tokenizer: {tokenizer_path}")
|
143 |
print(f" t3_cfg: {t3_cfg_path}")
|
144 |
|
145 |
+
# Load components with CPU mapping
|
146 |
+
s3gen = torch.load(s3gen_path, map_location=torch.device('cpu'))
|
147 |
+
ve = torch.load(ve_path, map_location=torch.device('cpu'))
|
148 |
+
t3_cfg = torch.load(t3_cfg_path, map_location=torch.device('cpu'))
|
149 |
|
150 |
# Load tokenizer
|
151 |
import json
|
152 |
with open(tokenizer_path, 'r') as f:
|
153 |
tokenizer = json.load(f)
|
154 |
|
155 |
+
print("Components loaded, creating ChatterboxTTS instance...")
|
156 |
+
|
157 |
+
# Try different constructor signatures
|
158 |
+
try:
|
159 |
+
# Try with all components
|
160 |
+
model = ChatterboxTTS(s3gen, ve, tokenizer, t3_cfg)
|
161 |
+
print("Chatterbox model loaded with 4-argument constructor.")
|
162 |
+
except Exception as constructor_error:
|
163 |
+
print(f"4-arg constructor failed: {constructor_error}")
|
164 |
+
try:
|
165 |
+
# Try with device parameter
|
166 |
+
model = ChatterboxTTS(s3gen, ve, tokenizer, device=device)
|
167 |
+
print("Chatterbox model loaded with device parameter.")
|
168 |
+
except Exception as device_error:
|
169 |
+
print(f"Device constructor failed: {device_error}")
|
170 |
+
# Try creating with from_pretrained but patch the loading
|
171 |
+
print("Attempting to patch model loading...")
|
172 |
+
|
173 |
+
# Temporarily patch torch.load to always use CPU
|
174 |
+
original_load = torch.load
|
175 |
+
def patched_load(f, map_location=None, *args, **kwargs):
|
176 |
+
return original_load(f, map_location=torch.device('cpu'), *args, **kwargs)
|
177 |
+
|
178 |
+
torch.load = patched_load
|
179 |
+
try:
|
180 |
+
model = ChatterboxTTS.from_local(LOCAL_MODEL_PATH)
|
181 |
+
print("Chatterbox model loaded with patched torch.load.")
|
182 |
+
finally:
|
183 |
+
torch.load = original_load
|
184 |
+
|
185 |
except Exception as e3:
|
186 |
print(f"Manual loading failed: {e3}")
|
187 |
raise e3
|