syamashita commited on
Commit
f2ee535
·
verified ·
1 Parent(s): eb44092

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -0
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import streamlit as st
3
+ import tempfile
4
+ from pydub import AudioSegment
5
+ from pyannote.audio import Pipeline
6
+ from faster_whisper import WhisperModel
7
+ from docx import Document
8
+ from io import BytesIO
9
+
10
+ # Streamlit設定
11
+ st.set_page_config(page_title="話者分離付き文字起こし", layout="centered")
12
+ st.title("🎤 話者分離付き文字起こしアプリ(Hugging Face対応)")
13
+
14
+ # 音声アップロード
15
+ uploaded_file = st.file_uploader("音声ファイルをアップロード(mp3, wav, m4a)", type=["mp3", "wav", "m4a"])
16
+ if uploaded_file:
17
+ st.audio(uploaded_file)
18
+
19
+ if st.button("▶️ 文字起こしスタート"):
20
+ status = st.info("準備中…")
21
+ progress = st.progress(0)
22
+
23
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
24
+ sound = AudioSegment.from_file(uploaded_file)
25
+ sound.export(tmp.name, format="wav")
26
+ audio_path = tmp.name
27
+
28
+ progress.progress(20)
29
+ status.info("話者分離中…")
30
+ diarization = Pipeline.from_pretrained("pyannote/speaker-diarization")(audio_path)
31
+ progress.progress(50)
32
+
33
+ status.info("Whisperモデルで文字起こし中…")
34
+ model = WhisperModel("small", compute_type="int8")
35
+ segments, _ = model.transcribe(audio_path, vad_filter=True, language="ja")
36
+ progress.progress(70)
37
+
38
+ # 話者情報を付与
39
+ transcript = ""
40
+ word_blocks = []
41
+ for segment in segments:
42
+ start = segment.start
43
+ speaker = "unknown"
44
+ for turn in diarization.itertracks(yield_label=True):
45
+ if turn[0].start <= start <= turn[0].end:
46
+ speaker = turn[2]
47
+ break
48
+ line = f"[{speaker}] {segment.text.strip()}"
49
+ word_blocks.append((speaker, segment.text.strip()))
50
+ transcript += line + "\n"
51
+
52
+ progress.progress(90)
53
+ status.success("完了!")
54
+
55
+ # 色分け&表示
56
+ st.subheader("📝 話者ごとの文字起こし結果")
57
+ colors = ["#E6F7FF", "#FFFAE6", "#E6FFEA", "#F9E6FF"]
58
+ speakers = list(sorted(set(s for s, _ in word_blocks)))
59
+ color_map = {s: colors[i % len(colors)] for i, s in enumerate(speakers)}
60
+
61
+ for speaker, text in word_blocks:
62
+ st.markdown(
63
+ f"<div style='background-color:{color_map[speaker]}; padding:8px; border-radius:5px; margin-bottom:4px;'>"
64
+ f"<b>{speaker}</b>: {text}"
65
+ f"</div>",
66
+ unsafe_allow_html=True
67
+ )
68
+
69
+ # Wordファイルとして出力
70
+ doc = Document()
71
+ for speaker, text in word_blocks:
72
+ doc.add_paragraph(f"{speaker}: {text}")
73
+ docx_io = BytesIO()
74
+ doc.save(docx_io)
75
+ docx_io.seek(0)
76
+
77
+ st.download_button("💾 Wordファイルでダウンロード", docx_io, file_name="transcription.docx", mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document")
78
+ progress.progress(100)