File size: 4,379 Bytes
5543320
 
 
 
 
 
 
 
 
2a0048a
a090817
5543320
763f8fd
658ecdc
d164ea6
5543320
 
 
 
 
 
 
 
 
 
 
 
a1a48f2
5543320
 
 
 
 
 
d164ea6
5543320
 
2a0048a
5543320
2a0048a
 
5543320
d164ea6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5543320
d164ea6
 
 
 
 
 
 
 
 
a1a48f2
d164ea6
 
 
 
 
 
a1a48f2
d164ea6
 
5543320
d164ea6
 
 
 
a1a48f2
d164ea6
 
 
 
 
 
 
 
 
 
 
 
 
 
a090817
d164ea6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import gradio as gr
import torch
import librosa
from transformers import Wav2Vec2Processor, AutoModelForCTC
import zipfile
import os
import firebase_admin
from firebase_admin import credentials, firestore
from datetime import datetime
import json
import tempfile

# Initialize Firebase
firebase_config = json.loads(os.environ.get('firebase_creds'))
cred = credentials.Certificate(firebase_config)
firebase_admin.initialize_app(cred)
db = firestore.client()

# Load the ASR model and processor
MODEL_NAME = "eleferrand/xlsr53_Amis"
processor = Wav2Vec2Processor.from_pretrained(MODEL_NAME)
model = AutoModelForCTC.from_pretrained(MODEL_NAME)

def transcribe(audio_file):
    try:
        audio, rate = librosa.load(audio_file, sr=16000)
        input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values

        with torch.no_grad():
            logits = model(input_values).logits
        predicted_ids = torch.argmax(logits, dim=-1)
        transcription = processor.batch_decode(predicted_ids)[0]
        return transcription.replace("[UNK]", "")
    except Exception as e:
        return f"處理文件錯誤: {e}"

def transcribe_both(audio_file):
    start_time = datetime.now()
    transcription = transcribe(audio_file)
    processing_time = (datetime.now() - start_time).total_seconds()
    return transcription, transcription, processing_time

def toggle_language(switch):
    """Switch UI text between English and Traditional Chinese"""
    if switch:
        return (
            "阿美語轉錄與修正系統", 
            "步驟 1:音訊上傳與轉錄",
            "步驟 2:審閱與編輯轉錄",
            "步驟 3:使用者資訊",
            "步驟 4:儲存與下載",
            "音訊輸入", "轉錄音訊",
            "原始轉錄", "更正轉錄",
            "年齡", "以阿美語為母語?",
            "儲存更正", "儲存狀態",
            "下載 ZIP 檔案"
        )
    else:
        return (
            "Amis ASR Transcription & Correction System", 
            "Step 1: Audio Upload & Transcription",
            "Step 2: Review & Edit Transcription",
            "Step 3: User Information",
            "Step 4: Save & Download",
            "Audio Input", "Transcribe Audio",
            "Original Transcription", "Corrected Transcription",
            "Age", "Native Amis Speaker?",
            "Save Correction", "Save Status",
            "Download ZIP File"
        )

# Interface
with gr.Blocks() as demo:
    lang_switch = gr.Checkbox(label="切換到繁體中文 (Switch to Traditional Chinese)")
    
    title = gr.Markdown("Amis ASR Transcription & Correction System")
    step1 = gr.Markdown("Step 1: Audio Upload & Transcription")
    step2 = gr.Markdown("Step 2: Review & Edit Transcription")
    step3 = gr.Markdown("Step 3: User Information")
    step4 = gr.Markdown("Step 4: Save & Download")

    with gr.Row():
        audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio Input")
        transcribe_button = gr.Button("Transcribe Audio")
    
    original_text = gr.Textbox(label="Original Transcription", interactive=False, lines=5)
    corrected_text = gr.Textbox(label="Corrected Transcription", interactive=True, lines=5)

    age_input = gr.Slider(minimum=0, maximum=100, step=1, label="Age", value=25)
    native_speaker_input = gr.Checkbox(label="Native Amis Speaker?", value=True)

    save_button = gr.Button("Save Correction")
    save_status = gr.Textbox(label="Save Status", interactive=False)
    download_button = gr.Button("Download ZIP File")
    download_output = gr.File()

    # Toggle language dynamically
    lang_switch.change(
        toggle_language, 
        inputs=lang_switch, 
        outputs=[title, step1, step2, step3, step4, audio_input, transcribe_button,
                 original_text, corrected_text, age_input, native_speaker_input,
                 save_button, save_status, download_button]
    )

    transcribe_button.click(transcribe_both, inputs=audio_input, outputs=[original_text, corrected_text])

    save_button.click(store_correction, inputs=[original_text, corrected_text, audio_input, age_input, native_speaker_input], outputs=save_status)

    download_button.click(prepare_download, inputs=[audio_input, original_text, corrected_text], outputs=download_output)

demo.launch()