File size: 3,052 Bytes
f2ee535
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# app.py
import streamlit as st
import tempfile
from pydub import AudioSegment
from pyannote.audio import Pipeline
from faster_whisper import WhisperModel
from docx import Document
from io import BytesIO

# Streamlit設定
st.set_page_config(page_title="話者分離付き文字起こし", layout="centered")
st.title("🎤 話者分離付き文字起こしアプリ(Hugging Face対応)")

# 音声アップロード
uploaded_file = st.file_uploader("音声ファイルをアップロード(mp3, wav, m4a)", type=["mp3", "wav", "m4a"])
if uploaded_file:
    st.audio(uploaded_file)

    if st.button("▶️ 文字起こしスタート"):
        status = st.info("準備中…")
        progress = st.progress(0)

        with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
            sound = AudioSegment.from_file(uploaded_file)
            sound.export(tmp.name, format="wav")
            audio_path = tmp.name

        progress.progress(20)
        status.info("話者分離中…")
        diarization = Pipeline.from_pretrained("pyannote/speaker-diarization")(audio_path)
        progress.progress(50)

        status.info("Whisperモデルで文字起こし中…")
        model = WhisperModel("small", compute_type="int8")
        segments, _ = model.transcribe(audio_path, vad_filter=True, language="ja")
        progress.progress(70)

        # 話者情報を付与
        transcript = ""
        word_blocks = []
        for segment in segments:
            start = segment.start
            speaker = "unknown"
            for turn in diarization.itertracks(yield_label=True):
                if turn[0].start <= start <= turn[0].end:
                    speaker = turn[2]
                    break
            line = f"[{speaker}] {segment.text.strip()}"
            word_blocks.append((speaker, segment.text.strip()))
            transcript += line + "\n"

        progress.progress(90)
        status.success("完了!")

        # 色分け&表示
        st.subheader("📝 話者ごとの文字起こし結果")
        colors = ["#E6F7FF", "#FFFAE6", "#E6FFEA", "#F9E6FF"]
        speakers = list(sorted(set(s for s, _ in word_blocks)))
        color_map = {s: colors[i % len(colors)] for i, s in enumerate(speakers)}

        for speaker, text in word_blocks:
            st.markdown(
                f"<div style='background-color:{color_map[speaker]}; padding:8px; border-radius:5px; margin-bottom:4px;'>"
                f"<b>{speaker}</b>: {text}"
                f"</div>",
                unsafe_allow_html=True
            )

        # Wordファイルとして出力
        doc = Document()
        for speaker, text in word_blocks:
            doc.add_paragraph(f"{speaker}: {text}")
        docx_io = BytesIO()
        doc.save(docx_io)
        docx_io.seek(0)

        st.download_button("💾 Wordファイルでダウンロード", docx_io, file_name="transcription.docx", mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document")
        progress.progress(100)