Spaces:
Running
Running
import gradio as gr | |
from transformers import pipeline, AutoTokenizer, AutoModel | |
import torch | |
import warnings | |
warnings.filterwarnings('ignore') | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
try: | |
models = { | |
'transcription': pipeline("automatic-speech-recognition", | |
model="openai/whisper-small", | |
device=device), | |
'translation': pipeline("translation", | |
model="facebook/mbart-large-50-many-to-many-mmt", | |
device=device), | |
'summarization': pipeline("summarization", | |
model="facebook/bart-large-cnn", | |
device=device), | |
'sentiment': pipeline("sentiment-analysis", | |
model="nlptown/bert-base-multilingual-uncased-sentiment", | |
device=device), | |
'question_answering': pipeline("question-answering", | |
model="deepset/roberta-base-squad2", | |
device=device), | |
'chat': pipeline("text-generation", | |
model="facebook/opt-125m", | |
device=device) | |
} | |
except Exception as e: | |
print(f"Erro ao carregar modelos: {str(e)}") | |
def safe_process(func): | |
def wrapper(*args, **kwargs): | |
try: | |
return func(*args, **kwargs) | |
except Exception as e: | |
return f"Erro ao processar: {str(e)}" | |
return wrapper | |
def transcribe(audio): | |
if not audio: | |
return "Por favor, forneça um arquivo de áudio." | |
return models['transcription'](audio)["text"] | |
def translate(text, direction): | |
if not text: | |
return "Por favor, insira um texto para tradução." | |
if direction == "pt_en": | |
result = models['translation'](text, src_lang="pt", tgt_lang="en")[0] | |
else: | |
result = models['translation'](text, src_lang="en", tgt_lang="pt")[0] | |
return result['translation_text'] | |
def summarize(text): | |
if not text: | |
return "Por favor, insira um texto para resumir." | |
return models['summarization'](text, max_length=130, min_length=30)[0]['summary_text'] | |
def analyze_sentiment(text): | |
if not text: | |
return "Por favor, insira um texto para análise." | |
return models['sentiment'](text)[0]['label'] | |
def answer_question(question, context): | |
if not question or not context: | |
return "Por favor, forneça tanto a pergunta quanto o contexto." | |
return models['question_answering'](question=question, context=context)['answer'] | |
def chat_response(message, history): | |
if not message: | |
return [], history | |
response = models['chat'](message, max_length=100, do_sample=True, temperature=0.7) | |
history.append((message, response[0]['generated_text'])) | |
return "", history | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
with gr.Tab("Início"): | |
gr.HTML(open("index.html").read()) | |
with gr.Tab("Transcrição de Áudio"): | |
audio_input = gr.Audio(type="filepath", label="Upload de Áudio") | |
transcribe_button = gr.Button("Transcrever") | |
transcription_output = gr.Textbox(label="Transcrição", lines=3) | |
transcribe_button.click(transcribe, inputs=audio_input, outputs=transcription_output) | |
with gr.Tab("Tradução"): | |
with gr.Row(): | |
translation_direction = gr.Radio( | |
["en_pt", "pt_en"], | |
label="Direção da Tradução", | |
value="en_pt" | |
) | |
text_to_translate = gr.Textbox(label="Texto para Traduzir", lines=3) | |
translate_button = gr.Button("Traduzir") | |
translation_output = gr.Textbox(label="Tradução", lines=3) | |
translate_button.click( | |
translate, | |
inputs=[text_to_translate, translation_direction], | |
outputs=translation_output | |
) | |
with gr.Tab("Resumo"): | |
text_to_summarize = gr.Textbox(label="Texto para Resumir", lines=5) | |
summarize_button = gr.Button("Resumir") | |
summary_output = gr.Textbox(label="Resumo", lines=3) | |
summarize_button.click(summarize, inputs=text_to_summarize, outputs=summary_output) | |
with gr.Tab("Análise de Sentimento"): | |
sentiment_text = gr.Textbox(label="Texto para Análise", lines=3) | |
sentiment_button = gr.Button("Analisar") | |
sentiment_output = gr.Textbox(label="Sentimento") | |
sentiment_button.click(analyze_sentiment, inputs=sentiment_text, outputs=sentiment_output) | |
with gr.Tab("Perguntas e Respostas"): | |
question_input = gr.Textbox(label="Pergunta") | |
context_input = gr.Textbox(label="Contexto", lines=5) | |
qa_button = gr.Button("Responder") | |
qa_output = gr.Textbox(label="Resposta", lines=2) | |
qa_button.click( | |
answer_question, | |
inputs=[question_input, context_input], | |
outputs=qa_output | |
) | |
with gr.Tab("Chat"): | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(label="Mensagem") | |
clear = gr.Button("Limpar") | |
msg.submit(chat_response, inputs=[msg, chatbot], outputs=[msg, chatbot]) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
if __name__ == "__main__": | |
demo.launch(share=True) |