vincenthugging's picture
🔧 修复场景加载错误
893d32f
raw
history blame
22.9 kB
#!/usr/bin/env python3
"""
Hugging Face Space 首页 - MOSS-TTSD
参考 fnlp/MOSS-TTSD Space 的实现,并结合本仓 UI 与文档做了增强:
- 默认中文界面,保留简洁工作流
- 提供场景选择与一键加载
- 支持文本规范化选项
- 右侧提供简明的使用说明与文档链接
如需在本地运行本 Space 脚本:
python hf_space/app.py
"""
import os
import json
import time
import shutil
import tempfile
from typing import Optional, Tuple
import gradio as gr
import torch
import torchaudio
# HF Spaces GPU 调度
try:
import spaces # 在HF空间中可用,本地不存在也不影响
except Exception: # noqa: BLE001
class _DummySpaces: # 兜底占位,以便本地运行不报错
def GPU(self, *args, **kwargs): # type: ignore[override]
def deco(fn):
return fn
return deco
spaces = _DummySpaces() # type: ignore
from huggingface_hub import hf_hub_download
# 复用本仓通用推理工具
from generation_utils import load_model, process_batch
# =========================
# 配置
# =========================
SYSTEM_PROMPT = (
"You are a speech synthesizer that generates natural, realistic, and human-like conversational audio from dialogue text."
)
# 场景配置映射
SCENARIO_CONFIG = {
"科技播客_AI发展": {
"title": "🤖 科技播客 - AI发展趋势",
"description": "探讨人工智能的最新发展与未来趋势",
"file": "scenarios/科技播客_AI发展.jsonl"
},
"教育播客_学习方法": {
"title": "📚 教育播客 - 高效学习方法",
"description": "分享科学的学习方法与技巧",
"file": "scenarios/教育播客_学习方法.jsonl"
},
"生活播客_美食文化": {
"title": "🍜 生活播客 - 美食文化探索",
"description": "品味各地美食文化的魅力",
"file": "scenarios/生活播客_美食文化.jsonl"
},
"商业播客_创业经验": {
"title": "💼 商业播客 - 创业经验分享",
"description": "创业路上的经验教训与心得",
"file": "scenarios/商业播客_创业经验.jsonl"
},
"健康播客_运动健身": {
"title": "🏃 健康播客 - 运动健身指南",
"description": "科学健身与健康生活方式",
"file": "scenarios/健康播客_运动健身.jsonl"
},
"心理播客_情绪管理": {
"title": "🧠 心理播客 - 情绪管理技巧",
"description": "探索情绪管理与心理健康",
"file": "scenarios/心理播客_情绪管理.jsonl"
}
}
# 默认音频配置
DEFAULT_AUDIO_CONFIG = {
"speaker1": {
"audio": "examples/zh_spk1_moon.wav",
"text": "周一到周五,每天早晨七点半到九点半的直播片段。言下之意呢,就是废话有点多,大家也别嫌弃,因为这都是直播间最真实的状态了。"
},
"speaker2": {
"audio": "examples/zh_spk2_moon.wav",
"text": "如果大家想听到更丰富更及时的直播内容,记得在周一到周五准时进入直播间,和大家一起畅聊新消费新科技新趋势。"
}
}
MODEL_PATH = "fnlp/MOSS-TTSD-v0.5"
SPT_CONFIG_PATH = "XY_Tokenizer/config/xy_tokenizer_config.yaml"
# 自动下载 XY_Tokenizer 权重到本地缓存(HF Space 会复用缓存)
os.makedirs("XY_Tokenizer/weights", exist_ok=True)
try:
SPT_CHECKPOINT_PATH = hf_hub_download(
repo_id="fnlp/XY_Tokenizer_TTSD_V0",
filename="xy_tokenizer.ckpt",
cache_dir="XY_Tokenizer/weights",
)
except Exception as e: # noqa: BLE001
# 失败时保留占位路径,稍后初始化时再提示
print(f"⚠️ XY_Tokenizer 权重下载失败: {e}")
SPT_CHECKPOINT_PATH = "XY_Tokenizer/weights/xy_tokenizer.ckpt"
# 全局缓存
tokenizer = None
model = None
spt = None
device = None
# =========================
# 工具函数
# =========================
def get_scenario_examples():
"""获取所有可用的场景示例,整合 JSON 文件和默认配置"""
scenarios = {}
# 加载 JSON 文件场景
for key, config in SCENARIO_CONFIG.items():
try:
file_path = config["file"]
print(f"🔍 检查场景文件: {file_path}")
if os.path.exists(file_path):
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
scenarios[config["title"]] = {
"text": data.get("text", ""),
"description": config["description"],
"audio1": data.get("prompt_audio_speaker1", ""),
"text1": data.get("prompt_text_speaker1", ""),
"audio2": data.get("prompt_audio_speaker2", ""),
"text2": data.get("prompt_text_speaker2", ""),
"base_path": data.get("base_path", ""),
}
print(f"✅ 成功加载场景: {config['title']}")
else:
print(f"❌ 场景文件不存在: {file_path}")
except Exception as e:
print(f"⚠️ 加载场景 {key} 失败: {e}")
# 添加默认示例(确保总有可用场景)
scenarios["🎧 默认示例"] = {
"text": (
"[S1]大家好,欢迎收听今天的节目,我是主播小雨。"
"[S2]大家好,我是嘉宾阿明,很高兴和大家见面。"
"[S1]今天我们要聊的话题非常有趣,相信大家会喜欢的。"
"[S2]是的,让我们开始今天的精彩内容吧!"
),
"description": "默认的示例对话,适合快速体验",
"audio1": DEFAULT_AUDIO_CONFIG["speaker1"]["audio"],
"text1": DEFAULT_AUDIO_CONFIG["speaker1"]["text"],
"audio2": DEFAULT_AUDIO_CONFIG["speaker2"]["audio"],
"text2": DEFAULT_AUDIO_CONFIG["speaker2"]["text"],
"base_path": "",
}
print(f"📊 总共加载了 {len(scenarios)} 个场景")
return scenarios
def load_scenario_data(scenario_key: str):
"""加载场景数据,确保音频和文本一一对应"""
if scenario_key not in SCENARIO_CONFIG:
return None, None, None, None, None
try:
scenario_file = SCENARIO_CONFIG[scenario_key]["file"]
if not os.path.exists(scenario_file):
return None, None, None, None, None
with open(scenario_file, "r", encoding="utf-8") as f:
data = json.load(f)
# 确保音频文件路径正确
audio1_path = data.get("prompt_audio_speaker1", "")
audio2_path = data.get("prompt_audio_speaker2", "")
if audio1_path and not audio1_path.startswith("/"):
audio1_path = os.path.join(data.get("base_path", ""), audio1_path)
if audio2_path and not audio2_path.startswith("/"):
audio2_path = os.path.join(data.get("base_path", ""), audio2_path)
return (
data.get("text", ""),
audio1_path if os.path.exists(audio1_path) else None,
data.get("prompt_text_speaker1", ""),
audio2_path if os.path.exists(audio2_path) else None,
data.get("prompt_text_speaker2", "")
)
except Exception as e:
print(f"❌ 加载场景失败: {e}")
return None, None, None, None, None
def load_default_audio():
"""加载默认音频和文本"""
audio1 = DEFAULT_AUDIO_CONFIG["speaker1"]["audio"]
text1 = DEFAULT_AUDIO_CONFIG["speaker1"]["text"]
audio2 = DEFAULT_AUDIO_CONFIG["speaker2"]["audio"]
text2 = DEFAULT_AUDIO_CONFIG["speaker2"]["text"]
# 默认对话文本
default_text = (
"[S1]大家好,欢迎收听今天的节目,我是主播小雨。"
"[S2]大家好,我是嘉宾阿明,很高兴和大家见面。"
"[S1]今天我们要聊的话题非常有趣,相信大家会喜欢的。"
"[S2]是的,让我们开始今天的精彩内容吧!"
)
return (
default_text,
audio1 if os.path.exists(audio1) else None,
text1,
audio2 if os.path.exists(audio2) else None,
text2
)
def initialize_model():
global tokenizer, model, spt, device
if tokenizer is not None:
return tokenizer, model, spt, device
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"🔧 初始化模型,设备: {device}")
if not os.path.exists(SPT_CHECKPOINT_PATH):
raise FileNotFoundError(
"未找到 XY_Tokenizer 权重,请检查网络或手动放置到 XY_Tokenizer/weights/xy_tokenizer.ckpt"
)
tokenizer, model, spt = load_model(
MODEL_PATH,
SPT_CONFIG_PATH,
SPT_CHECKPOINT_PATH,
)
model = model.to(device)
spt = spt.to(device)
# 合理限制生成长度,避免超时
try:
model.generation_config.max_new_tokens = min(
getattr(model.generation_config, "max_new_tokens", 4096), 4096
)
except Exception: # noqa: BLE001
pass
print("✅ 模型初始化完成!")
return tokenizer, model, spt, device
# =========================
# 推理函数(供 UI 调用)
# =========================
@spaces.GPU(duration=150)
def generate_dialogue_audio(
dialogue_text: str,
speaker1_audio: Optional[str],
speaker1_text: str,
speaker2_audio: Optional[str],
speaker2_text: str,
use_normalize: bool,
) -> Tuple[Optional[str], str]:
try:
if not dialogue_text or not dialogue_text.strip():
return None, "❌ 请输入对话文本"
# 允许只提供一个音频:会自动退化为单音频模式
if not speaker1_audio and not speaker2_audio:
return None, "❌ 请上传至少一个参考音频文件"
# 初始化模型
tokenizer, model, spt, device = initialize_model()
# 根据输入拼装 item(process_batch 兼容单/双说话者)
item = {"text": dialogue_text}
if speaker1_audio and speaker2_audio:
item.update(
{
"prompt_audio_speaker1": speaker1_audio,
"prompt_text_speaker1": speaker1_text or "",
"prompt_audio_speaker2": speaker2_audio,
"prompt_text_speaker2": speaker2_text or "",
}
)
else:
# 单音频模式
single_audio = speaker1_audio or speaker2_audio
single_text = speaker1_text or speaker2_text or ""
item.update({"prompt_audio": single_audio, "prompt_text": single_text})
# 执行合成
actual_texts_data, audio_results = process_batch(
batch_items=[item],
tokenizer=tokenizer,
model=model,
spt=spt,
device=device,
system_prompt=SYSTEM_PROMPT,
start_idx=0,
use_normalize=use_normalize,
)
if not audio_results or audio_results[0] is None:
return None, "❌ 音频生成失败"
audio_result = audio_results[0]
out_path = tempfile.NamedTemporaryFile(suffix=".wav", delete=False).name
torchaudio.save(out_path, audio_result["audio_data"], audio_result["sample_rate"]) # type: ignore[index]
status = (
f"✅ 生成成功!\n\n"
f"📊 音频信息:\n"
f"- 采样率: {audio_result['sample_rate']} Hz\n"
f"- 时长: {audio_result['audio_data'].shape[-1] / audio_result['sample_rate']:.2f} 秒\n"
f"- 通道数: {audio_result['audio_data'].shape[0]}\n\n"
f"📝 文本处理:\n"
f"- 是否规范化: {use_normalize}\n"
)
return out_path, status
except Exception as e: # noqa: BLE001
import traceback
return None, f"❌ 生成出错: {e}\n\n{traceback.format_exc()}"
# =========================
# UI 构建
# =========================
def create_space_ui() -> gr.Blocks:
custom_css = """
.gradio-container {
max-width: 1400px !important;
margin: 0 auto !important;
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
}
.header {
text-align: center;
margin-bottom: 2rem;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
padding: 2.5rem;
border-radius: 20px;
color: white;
box-shadow: 0 10px 30px rgba(0,0,0,0.2);
}
.header h1 {
font-size: 2.5rem;
margin-bottom: 0.5rem;
font-weight: 700;
}
.header p {
font-size: 1.2rem;
opacity: 0.9;
margin: 0;
}
.section {
background: #f8fafc;
padding: 1.5rem;
border-radius: 15px;
border: 1px solid #e2e8f0;
margin-bottom: 1rem;
box-shadow: 0 2px 10px rgba(0,0,0,0.05);
}
.quick-btn {
background: linear-gradient(45deg, #FF6B6B, #4ECDC4) !important;
border: none !important;
color: white !important;
font-weight: 600 !important;
border-radius: 10px !important;
transition: all 0.3s ease !important;
}
.quick-btn:hover {
transform: translateY(-2px) !important;
box-shadow: 0 5px 15px rgba(0,0,0,0.2) !important;
}
.generate-btn {
background: linear-gradient(45deg, #667eea, #764ba2) !important;
border: none !important;
color: white !important;
font-weight: 700 !important;
font-size: 1.1rem !important;
border-radius: 15px !important;
padding: 1rem 2rem !important;
width: 100% !important;
transition: all 0.3s ease !important;
}
.generate-btn:hover {
transform: translateY(-3px) !important;
box-shadow: 0 8px 25px rgba(102, 126, 234, 0.4) !important;
}
.speaker-section {
background: linear-gradient(135deg, #667eea15, #764ba215);
padding: 1.5rem;
border-radius: 15px;
border: 2px solid #667eea20;
}
"""
with gr.Blocks(css=custom_css, title="🎙️ MOSS-TTSD | Hugging Face Space", theme=gr.themes.Soft()) as demo:
gr.HTML(
"""
<div class="header">
<h1>🎙️ MOSS-TTSD 对话语音合成</h1>
<p>零样本双说话者对话合成 · 默认中文界面 · 一键加载场景</p>
</div>
"""
)
with gr.Row():
# 左侧:输入
with gr.Column(scale=3):
with gr.Group():
gr.Markdown("### 📝 对话文本")
dialogue_text = gr.TextArea(
label="",
lines=6,
placeholder="请输入对话内容,使用[S1]/[S2]标记不同说话者...",
value=(
"[S1]大家好,欢迎收听今天的《AI前沿》播客。"
"[S2]你好,我是嘉宾阿明。"
"[S1]今天我们来聊聊最新的语音合成技术,特别是MOSS-TTSD这个项目。"
"[S2]是的,这个开源项目确实很有意思,它能生成非常自然的对话音频。"
),
)
with gr.Group():
gr.Markdown("### 🚀 快速操作")
# 获取场景选项,设置第一个为默认值
scenario_choices = list(get_scenario_examples().keys())
default_scenario = scenario_choices[0] if scenario_choices else None
scenario_dropdown = gr.Dropdown(
choices=scenario_choices,
value=default_scenario,
label="🎭 选择场景",
info="选择一个预设场景,自动填充对话文本和参考音频"
)
with gr.Row():
btn_load_scenario = gr.Button("📥 加载场景", variant="secondary")
btn_load_default = gr.Button("🎧 默认音频", variant="secondary")
with gr.Row():
with gr.Group():
gr.Markdown("### 🎵 说话者1 (女声)")
speaker1_audio = gr.Audio(label="参考音频", type="filepath")
speaker1_text = gr.TextArea(
label="参考文本",
lines=2,
placeholder="请输入与参考音频内容完全匹配的文本..."
)
with gr.Group():
gr.Markdown("### 🎵 说话者2 (男声)")
speaker2_audio = gr.Audio(label="参考音频", type="filepath")
speaker2_text = gr.TextArea(
label="参考文本",
lines=2,
placeholder="请输入与参考音频内容完全匹配的文本..."
)
with gr.Group():
gr.Markdown("### ⚙️ 设置")
with gr.Row():
use_normalize = gr.Checkbox(label="✅ 文本标准化(推荐)", value=True)
btn_generate = gr.Button("🎬 开始合成", variant="primary")
# 右侧:输出与说明
with gr.Column(scale=2):
with gr.Group():
gr.Markdown("### 🎧 生成结果")
output_audio = gr.Audio(label="生成的音频", type="filepath")
status_info = gr.TextArea(label="状态信息", lines=12, interactive=False)
with gr.Group():
gr.Markdown("### 📚 使用说明")
gr.Markdown(
"""
**🎯 快速开始:**
1. 选择场景并点击"加载场景",或自己输入对话文本
2. 上传两个参考音频(分别对应说话者1和说话者2)
3. 输入与参考音频完全匹配的参考文本
4. 勾选"文本标准化"(推荐)
5. 点击"开始合成"
**📝 格式要求:**
- 使用 `[S1]`/`[S2]` 标记不同说话者
- 参考文本需与参考音频内容完全匹配
- 支持上传两个参考音频(双说话者)或一个(单说话者)
**🎵 音频建议:**
- 格式: WAV, MP3, FLAC
- 时长: 10-30秒最佳
- 质量: 清晰无背景噪音
- 语速: 自然正常语速
**💡 提示:**
- 文本标准化开启可提升质量(数字、标点等处理更稳定)
- 文本尽量短句、自然口语化
- 生成时间根据文本长度而定,请耐心等待
"""
)
# ===== 交互逻辑 =====
def on_load_scenario(name: str):
"""加载选中的场景,包括文本和音频"""
if not name or name.strip() == "":
gr.Warning("⚠️ 请先选择一个场景")
return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
scenarios = get_scenario_examples()
if name not in scenarios:
gr.Error(f"❌ 场景不存在: {name}")
return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
try:
scenario = scenarios[name]
# 处理音频路径
audio1_path = None
audio2_path = None
if scenario.get("audio1"):
audio1_full = scenario["audio1"]
if scenario.get("base_path") and not audio1_full.startswith("/"):
audio1_full = os.path.join(scenario["base_path"], audio1_full)
if os.path.exists(audio1_full):
audio1_path = audio1_full
else:
print(f"⚠️ 音频文件不存在: {audio1_full}")
if scenario.get("audio2"):
audio2_full = scenario["audio2"]
if scenario.get("base_path") and not audio2_full.startswith("/"):
audio2_full = os.path.join(scenario["base_path"], audio2_full)
if os.path.exists(audio2_full):
audio2_path = audio2_full
else:
print(f"⚠️ 音频文件不存在: {audio2_full}")
gr.Info(f"✅ 成功加载场景: {name}")
return (
scenario.get("text", ""),
audio1_path,
scenario.get("text1", ""),
audio2_path,
scenario.get("text2", "")
)
except Exception as e:
gr.Error(f"❌ 加载场景时出错: {str(e)}")
return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
def on_load_default():
"""加载默认音频和文本"""
try:
result = load_default_audio()
gr.Info("✅ 成功加载默认音频和文本")
return result
except Exception as e:
gr.Error(f"❌ 加载默认音频时出错: {str(e)}")
return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
btn_load_scenario.click(
fn=on_load_scenario,
inputs=[scenario_dropdown],
outputs=[dialogue_text, speaker1_audio, speaker1_text, speaker2_audio, speaker2_text],
)
btn_load_default.click(
fn=on_load_default,
outputs=[dialogue_text, speaker1_audio, speaker1_text, speaker2_audio, speaker2_text],
)
btn_generate.click(
fn=generate_dialogue_audio,
inputs=[dialogue_text, speaker1_audio, speaker1_text, speaker2_audio, speaker2_text, use_normalize],
outputs=[output_audio, status_info],
show_progress=True,
)
return demo
# 供 HF Spaces 直接加载
demo = create_space_ui()
def main():
demo.queue(max_size=16).launch()
if __name__ == "__main__":
main()