Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,41 +1,76 @@
|
|
1 |
-
# app.py
|
2 |
import streamlit as st
|
3 |
import tempfile
|
|
|
|
|
4 |
from pydub import AudioSegment
|
5 |
from pyannote.audio import Pipeline
|
6 |
from faster_whisper import WhisperModel
|
7 |
from docx import Document
|
8 |
from io import BytesIO
|
9 |
|
10 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
st.set_page_config(page_title="話者分離付き文字起こし", layout="centered")
|
12 |
st.title("🎤 話者分離付き文字起こしアプリ(Hugging Face対応)")
|
13 |
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
16 |
if uploaded_file:
|
17 |
st.audio(uploaded_file)
|
18 |
|
19 |
if st.button("▶️ 文字起こしスタート"):
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
status = st.info("準備中…")
|
21 |
progress = st.progress(0)
|
22 |
|
|
|
23 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
|
24 |
sound = AudioSegment.from_file(uploaded_file)
|
25 |
sound.export(tmp.name, format="wav")
|
26 |
audio_path = tmp.name
|
27 |
|
28 |
progress.progress(20)
|
|
|
|
|
29 |
status.info("話者分離中…")
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
progress.progress(50)
|
32 |
|
|
|
33 |
status.info("Whisperモデルで文字起こし中…")
|
34 |
model = WhisperModel("small", compute_type="int8")
|
35 |
segments, _ = model.transcribe(audio_path, vad_filter=True, language="ja")
|
|
|
36 |
progress.progress(70)
|
37 |
|
38 |
-
#
|
39 |
transcript = ""
|
40 |
word_blocks = []
|
41 |
for segment in segments:
|
@@ -52,7 +87,7 @@ if uploaded_file:
|
|
52 |
progress.progress(90)
|
53 |
status.success("完了!")
|
54 |
|
55 |
-
#
|
56 |
st.subheader("📝 話者ごとの文字起こし結果")
|
57 |
colors = ["#E6F7FF", "#FFFAE6", "#E6FFEA", "#F9E6FF"]
|
58 |
speakers = list(sorted(set(s for s, _ in word_blocks)))
|
@@ -66,7 +101,7 @@ if uploaded_file:
|
|
66 |
unsafe_allow_html=True
|
67 |
)
|
68 |
|
69 |
-
# Word
|
70 |
doc = Document()
|
71 |
for speaker, text in word_blocks:
|
72 |
doc.add_paragraph(f"{speaker}: {text}")
|
|
|
|
|
1 |
import streamlit as st
|
2 |
import tempfile
|
3 |
+
import requests
|
4 |
+
import os
|
5 |
from pydub import AudioSegment
|
6 |
from pyannote.audio import Pipeline
|
7 |
from faster_whisper import WhisperModel
|
8 |
from docx import Document
|
9 |
from io import BytesIO
|
10 |
|
11 |
+
# ------------------------------------------
|
12 |
+
# ✅ トークン検証関数
|
13 |
+
# ------------------------------------------
|
14 |
+
def is_token_valid(token: str) -> bool:
|
15 |
+
try:
|
16 |
+
headers = {"Authorization": f"Bearer {token}"}
|
17 |
+
response = requests.get("https://huggingface.co/api/whoami-v2", headers=headers)
|
18 |
+
return response.status_code == 200
|
19 |
+
except:
|
20 |
+
return False
|
21 |
+
|
22 |
+
# ------------------------------------------
|
23 |
+
# ✅ Streamlit UI
|
24 |
+
# ------------------------------------------
|
25 |
st.set_page_config(page_title="話者分離付き文字起こし", layout="centered")
|
26 |
st.title("🎤 話者分離付き文字起こしアプリ(Hugging Face対応)")
|
27 |
|
28 |
+
st.markdown("このアプリは、音声ファイルをアップロードすると話者分離と文字起こしを行い、話者ごとに色分けして表示し、Wordファイルでダウンロードできます。")
|
29 |
+
|
30 |
+
# Hugging Face トークンの入力(安全な入力)
|
31 |
+
token = st.text_input("🔑 Hugging Face アクセストークンを入力してください", type="password")
|
32 |
+
|
33 |
+
uploaded_file = st.file_uploader("🎵 音声ファイルをアップロード(mp3, wav, m4a)", type=["mp3", "wav", "m4a"])
|
34 |
if uploaded_file:
|
35 |
st.audio(uploaded_file)
|
36 |
|
37 |
if st.button("▶️ 文字起こしスタート"):
|
38 |
+
|
39 |
+
# トークンチェック
|
40 |
+
if not token or not is_token_valid(token):
|
41 |
+
st.error("❌ 有効な Hugging Face トークンを入力してください。")
|
42 |
+
st.stop()
|
43 |
+
|
44 |
status = st.info("準備中…")
|
45 |
progress = st.progress(0)
|
46 |
|
47 |
+
# 一時ファイルに保存
|
48 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
|
49 |
sound = AudioSegment.from_file(uploaded_file)
|
50 |
sound.export(tmp.name, format="wav")
|
51 |
audio_path = tmp.name
|
52 |
|
53 |
progress.progress(20)
|
54 |
+
|
55 |
+
# 話者分離
|
56 |
status.info("話者分離中…")
|
57 |
+
try:
|
58 |
+
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=token)
|
59 |
+
diarization = pipeline(audio_path)
|
60 |
+
except Exception as e:
|
61 |
+
st.error(f"❌ 話者分離エラー: {e}")
|
62 |
+
st.stop()
|
63 |
+
|
64 |
progress.progress(50)
|
65 |
|
66 |
+
# Whisperで文字起こし
|
67 |
status.info("Whisperモデルで文字起こし中…")
|
68 |
model = WhisperModel("small", compute_type="int8")
|
69 |
segments, _ = model.transcribe(audio_path, vad_filter=True, language="ja")
|
70 |
+
|
71 |
progress.progress(70)
|
72 |
|
73 |
+
# 話者ごとのテキスト作成
|
74 |
transcript = ""
|
75 |
word_blocks = []
|
76 |
for segment in segments:
|
|
|
87 |
progress.progress(90)
|
88 |
status.success("完了!")
|
89 |
|
90 |
+
# 表示(色分け)
|
91 |
st.subheader("📝 話者ごとの文字起こし結果")
|
92 |
colors = ["#E6F7FF", "#FFFAE6", "#E6FFEA", "#F9E6FF"]
|
93 |
speakers = list(sorted(set(s for s, _ in word_blocks)))
|
|
|
101 |
unsafe_allow_html=True
|
102 |
)
|
103 |
|
104 |
+
# Wordファイル出力
|
105 |
doc = Document()
|
106 |
for speaker, text in word_blocks:
|
107 |
doc.add_paragraph(f"{speaker}: {text}")
|