Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,21 @@ import json
|
|
8 |
import asyncio
|
9 |
import logging
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
# Add the orpheus-tts module to the path
|
12 |
sys.path.append(os.path.join(os.path.dirname(__file__), 'orpheus-tts'))
|
13 |
|
@@ -39,10 +54,10 @@ async def startup_event():
|
|
39 |
global engine
|
40 |
try:
|
41 |
engine = OrpheusModel(
|
42 |
-
model_name=
|
43 |
-
tokenizer=
|
44 |
)
|
45 |
-
logger.info("Orpheus model loaded successfully")
|
46 |
except Exception as e:
|
47 |
logger.error(f"Error loading Orpheus model: {e}")
|
48 |
raise e
|
@@ -79,7 +94,7 @@ async def health_check():
|
|
79 |
@app.get("/tts")
|
80 |
async def tts_stream(
|
81 |
prompt: str = Query(..., description="Text to synthesize"),
|
82 |
-
voice: str = Query(
|
83 |
temperature: float = Query(0.4, description="Temperature for generation"),
|
84 |
top_p: float = Query(0.9, description="Top-p for generation"),
|
85 |
max_tokens: int = Query(2000, description="Maximum tokens"),
|
@@ -139,7 +154,7 @@ async def websocket_tts(websocket: WebSocket):
|
|
139 |
request = json.loads(data)
|
140 |
|
141 |
prompt = request.get("prompt", "")
|
142 |
-
voice = request.get("voice",
|
143 |
temperature = request.get("temperature", 0.4)
|
144 |
top_p = request.get("top_p", 0.9)
|
145 |
max_tokens = request.get("max_tokens", 2000)
|
@@ -211,7 +226,7 @@ async def root():
|
|
211 |
"message": "Orpheus TTS Server",
|
212 |
"endpoints": {
|
213 |
"health": "/health",
|
214 |
-
"tts_http": "/tts?prompt=your_text&voice=
|
215 |
"tts_websocket": "/ws/tts",
|
216 |
"voices": "/voices"
|
217 |
},
|
|
|
8 |
import asyncio
|
9 |
import logging
|
10 |
|
11 |
+
# ===== MODEL CONFIGURATION =====
|
12 |
+
# Einfach zwischen den deutschen Modellen wechseln:
|
13 |
+
USE_KARTOFFEL_MODEL = True # True = Kartoffel, False = Canopy-Deutsch
|
14 |
+
|
15 |
+
if USE_KARTOFFEL_MODEL:
|
16 |
+
MODEL_NAME = "SebastianBodza/Kartoffel_Orpheus-3B_german_natural-v0.1"
|
17 |
+
TOKENIZER_NAME = "SebastianBodza/Kartoffel_Orpheus-3B_german_natural-v0.1"
|
18 |
+
DEFAULT_VOICE = "Jakob"
|
19 |
+
print("🥔 Using Kartoffel German Model")
|
20 |
+
else:
|
21 |
+
MODEL_NAME = "canopylabs/3b-de-ft-research_release"
|
22 |
+
TOKENIZER_NAME = "canopylabs/3b-de-ft-research_release"
|
23 |
+
DEFAULT_VOICE = "thomas"
|
24 |
+
print("🇩🇪 Using Canopy German Model")
|
25 |
+
|
26 |
# Add the orpheus-tts module to the path
|
27 |
sys.path.append(os.path.join(os.path.dirname(__file__), 'orpheus-tts'))
|
28 |
|
|
|
54 |
global engine
|
55 |
try:
|
56 |
engine = OrpheusModel(
|
57 |
+
model_name=MODEL_NAME,
|
58 |
+
tokenizer=TOKENIZER_NAME
|
59 |
)
|
60 |
+
logger.info(f"Orpheus model loaded successfully: {MODEL_NAME}")
|
61 |
except Exception as e:
|
62 |
logger.error(f"Error loading Orpheus model: {e}")
|
63 |
raise e
|
|
|
94 |
@app.get("/tts")
|
95 |
async def tts_stream(
|
96 |
prompt: str = Query(..., description="Text to synthesize"),
|
97 |
+
voice: str = Query(DEFAULT_VOICE, description="Voice to use"),
|
98 |
temperature: float = Query(0.4, description="Temperature for generation"),
|
99 |
top_p: float = Query(0.9, description="Top-p for generation"),
|
100 |
max_tokens: int = Query(2000, description="Maximum tokens"),
|
|
|
154 |
request = json.loads(data)
|
155 |
|
156 |
prompt = request.get("prompt", "")
|
157 |
+
voice = request.get("voice", DEFAULT_VOICE)
|
158 |
temperature = request.get("temperature", 0.4)
|
159 |
top_p = request.get("top_p", 0.9)
|
160 |
max_tokens = request.get("max_tokens", 2000)
|
|
|
226 |
"message": "Orpheus TTS Server",
|
227 |
"endpoints": {
|
228 |
"health": "/health",
|
229 |
+
"tts_http": f"/tts?prompt=your_text&voice={DEFAULT_VOICE}",
|
230 |
"tts_websocket": "/ws/tts",
|
231 |
"voices": "/voices"
|
232 |
},
|