Spaces:
Running
Running
File size: 5,429 Bytes
4b310eb f8e5757 4b310eb ca0d3a8 f8e5757 ca0d3a8 4b310eb ca0d3a8 4b310eb f8e5757 4b310eb f8e5757 ca0d3a8 f8e5757 ca0d3a8 f8e5757 4b310eb ca0d3a8 4b310eb ca0d3a8 4b310eb f8e5757 4b310eb ca0d3a8 4b310eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModel
import torch
import warnings
warnings.filterwarnings('ignore')
device = "cuda" if torch.cuda.is_available() else "cpu"
try:
models = {
'transcription': pipeline("automatic-speech-recognition",
model="openai/whisper-small",
device=device),
'translation': pipeline("translation",
model="facebook/mbart-large-50-many-to-many-mmt",
device=device),
'summarization': pipeline("summarization",
model="facebook/bart-large-cnn",
device=device),
'sentiment': pipeline("sentiment-analysis",
model="nlptown/bert-base-multilingual-uncased-sentiment",
device=device),
'question_answering': pipeline("question-answering",
model="deepset/roberta-base-squad2",
device=device),
'chat': pipeline("text-generation",
model="facebook/opt-125m",
device=device)
}
except Exception as e:
print(f"Erro ao carregar modelos: {str(e)}")
def safe_process(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
return f"Erro ao processar: {str(e)}"
return wrapper
@safe_process
def transcribe(audio):
if not audio:
return "Por favor, forneça um arquivo de áudio."
return models['transcription'](audio)["text"]
@safe_process
def translate(text, direction):
if not text:
return "Por favor, insira um texto para tradução."
if direction == "pt_en":
result = models['translation'](text, src_lang="pt", tgt_lang="en")[0]
else:
result = models['translation'](text, src_lang="en", tgt_lang="pt")[0]
return result['translation_text']
@safe_process
def summarize(text):
if not text:
return "Por favor, insira um texto para resumir."
return models['summarization'](text, max_length=130, min_length=30)[0]['summary_text']
@safe_process
def analyze_sentiment(text):
if not text:
return "Por favor, insira um texto para análise."
return models['sentiment'](text)[0]['label']
@safe_process
def answer_question(question, context):
if not question or not context:
return "Por favor, forneça tanto a pergunta quanto o contexto."
return models['question_answering'](question=question, context=context)['answer']
@safe_process
def chat_response(message, history):
if not message:
return [], history
response = models['chat'](message, max_length=100, do_sample=True, temperature=0.7)
history.append((message, response[0]['generated_text']))
return "", history
with gr.Blocks(theme=gr.themes.Soft()) as demo:
with gr.Tab("Início"):
gr.HTML(open("index.html").read())
with gr.Tab("Transcrição de Áudio"):
audio_input = gr.Audio(type="filepath", label="Upload de Áudio")
transcribe_button = gr.Button("Transcrever")
transcription_output = gr.Textbox(label="Transcrição", lines=3)
transcribe_button.click(transcribe, inputs=audio_input, outputs=transcription_output)
with gr.Tab("Tradução"):
with gr.Row():
translation_direction = gr.Radio(
["en_pt", "pt_en"],
label="Direção da Tradução",
value="en_pt"
)
text_to_translate = gr.Textbox(label="Texto para Traduzir", lines=3)
translate_button = gr.Button("Traduzir")
translation_output = gr.Textbox(label="Tradução", lines=3)
translate_button.click(
translate,
inputs=[text_to_translate, translation_direction],
outputs=translation_output
)
with gr.Tab("Resumo"):
text_to_summarize = gr.Textbox(label="Texto para Resumir", lines=5)
summarize_button = gr.Button("Resumir")
summary_output = gr.Textbox(label="Resumo", lines=3)
summarize_button.click(summarize, inputs=text_to_summarize, outputs=summary_output)
with gr.Tab("Análise de Sentimento"):
sentiment_text = gr.Textbox(label="Texto para Análise", lines=3)
sentiment_button = gr.Button("Analisar")
sentiment_output = gr.Textbox(label="Sentimento")
sentiment_button.click(analyze_sentiment, inputs=sentiment_text, outputs=sentiment_output)
with gr.Tab("Perguntas e Respostas"):
question_input = gr.Textbox(label="Pergunta")
context_input = gr.Textbox(label="Contexto", lines=5)
qa_button = gr.Button("Responder")
qa_output = gr.Textbox(label="Resposta", lines=2)
qa_button.click(
answer_question,
inputs=[question_input, context_input],
outputs=qa_output
)
with gr.Tab("Chat"):
chatbot = gr.Chatbot()
msg = gr.Textbox(label="Mensagem")
clear = gr.Button("Limpar")
msg.submit(chat_response, inputs=[msg, chatbot], outputs=[msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch(share=True) |