Med_bot / app.py
Imsachinsingh00's picture
updated requirements.txt
cd2b0ba
raw
history blame
1.62 kB
import gradio as gr
from transformers import BertTokenizer, EncoderDecoderModel
import torch
# Load model and tokenizer
model = EncoderDecoderModel.from_pretrained("Imsachinsingh00/bert2bert-mts-summary")
tokenizer = BertTokenizer.from_pretrained("Imsachinsingh00/bert2bert-mts-summary")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
def summarize_text(dialogue):
inputs = tokenizer(dialogue, return_tensors="pt", padding=True, truncation=True, max_length=512)
inputs = {k: v.to(device) for k, v in inputs.items()}
outputs = model.generate(**inputs, max_length=64)
summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
return summary
# Gradio app
with gr.Blocks() as demo:
gr.Markdown("## 🎀 Medical Dialogue Summarization App")
with gr.Row():
mic_input = gr.Microphone(label="πŸŽ™οΈ Record Dialogue")
text_input = gr.Textbox(label="πŸ“ Or Paste Dialogue", lines=10, placeholder="Paste or speak a conversation here...")
summarize_btn = gr.Button("Summarize")
summary_output = gr.Textbox(label="πŸ“„ Summary", interactive=False)
def transcribe_and_summarize(audio, text):
import whisper
model_whisper = whisper.load_model("base")
result = model_whisper.transcribe(audio)
transcribed_text = result["text"]
text = text or transcribed_text
summary = summarize_text(text)
return transcribed_text, summary
summarize_btn.click(fn=transcribe_and_summarize, inputs=[mic_input, text_input], outputs=[text_input, summary_output])
demo.launch()