File size: 1,621 Bytes
b3d97d5
cd2b0ba
b3d97d5
 
cd2b0ba
 
 
 
 
b3d97d5
cd2b0ba
 
 
f216530
 
b3d97d5
 
cd2b0ba
f216530
cd2b0ba
f216530
 
cd2b0ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f216530
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import gradio as gr
from transformers import BertTokenizer, EncoderDecoderModel
import torch

# Load model and tokenizer
model = EncoderDecoderModel.from_pretrained("Imsachinsingh00/bert2bert-mts-summary")
tokenizer = BertTokenizer.from_pretrained("Imsachinsingh00/bert2bert-mts-summary")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)

def summarize_text(dialogue):
    inputs = tokenizer(dialogue, return_tensors="pt", padding=True, truncation=True, max_length=512)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    outputs = model.generate(**inputs, max_length=64)
    summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return summary

# Gradio app
with gr.Blocks() as demo:
    gr.Markdown("## 🎀 Medical Dialogue Summarization App")

    with gr.Row():
        mic_input = gr.Microphone(label="πŸŽ™οΈ Record Dialogue")
        text_input = gr.Textbox(label="πŸ“ Or Paste Dialogue", lines=10, placeholder="Paste or speak a conversation here...")

    summarize_btn = gr.Button("Summarize")
    summary_output = gr.Textbox(label="πŸ“„ Summary", interactive=False)

    def transcribe_and_summarize(audio, text):
        import whisper
        model_whisper = whisper.load_model("base")
        result = model_whisper.transcribe(audio)
        transcribed_text = result["text"]
        text = text or transcribed_text
        summary = summarize_text(text)
        return transcribed_text, summary

    summarize_btn.click(fn=transcribe_and_summarize, inputs=[mic_input, text_input], outputs=[text_input, summary_output])

demo.launch()