Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,144 +1,98 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
import torch
|
4 |
import warnings
|
5 |
warnings.filterwarnings('ignore')
|
6 |
|
7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
device=device),
|
23 |
-
'question_answering': pipeline("question-answering",
|
24 |
-
model="deepset/roberta-base-squad2",
|
25 |
-
device=device),
|
26 |
-
'chat': pipeline("text-generation",
|
27 |
-
model="facebook/opt-125m",
|
28 |
-
device=device)
|
29 |
-
}
|
30 |
-
|
31 |
-
except Exception as e:
|
32 |
-
print(f"Erro ao carregar modelos: {str(e)}")
|
33 |
-
|
34 |
-
def safe_process(func):
|
35 |
-
def wrapper(*args, **kwargs):
|
36 |
-
try:
|
37 |
-
return func(*args, **kwargs)
|
38 |
-
except Exception as e:
|
39 |
-
return f"Erro ao processar: {str(e)}"
|
40 |
-
return wrapper
|
41 |
-
|
42 |
-
@safe_process
|
43 |
-
def transcribe(audio):
|
44 |
-
if not audio:
|
45 |
-
return "Por favor, forneça um arquivo de áudio."
|
46 |
-
return models['transcription'](audio)["text"]
|
47 |
-
|
48 |
-
@safe_process
|
49 |
-
def translate(text, direction):
|
50 |
-
if not text:
|
51 |
-
return "Por favor, insira um texto para tradução."
|
52 |
-
|
53 |
-
if direction == "pt_en":
|
54 |
-
result = models['translation'](text, src_lang="pt", tgt_lang="en")[0]
|
55 |
-
else:
|
56 |
-
result = models['translation'](text, src_lang="en", tgt_lang="pt")[0]
|
57 |
-
|
58 |
-
return result['translation_text']
|
59 |
-
|
60 |
-
@safe_process
|
61 |
-
def summarize(text):
|
62 |
-
if not text:
|
63 |
-
return "Por favor, insira um texto para resumir."
|
64 |
-
return models['summarization'](text, max_length=130, min_length=30)[0]['summary_text']
|
65 |
-
|
66 |
-
@safe_process
|
67 |
-
def analyze_sentiment(text):
|
68 |
-
if not text:
|
69 |
-
return "Por favor, insira um texto para análise."
|
70 |
-
return models['sentiment'](text)[0]['label']
|
71 |
-
|
72 |
-
@safe_process
|
73 |
-
def answer_question(question, context):
|
74 |
-
if not question or not context:
|
75 |
-
return "Por favor, forneça tanto a pergunta quanto o contexto."
|
76 |
-
return models['question_answering'](question=question, context=context)['answer']
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
87 |
-
|
88 |
-
gr.HTML(open("index.html").read())
|
89 |
-
|
90 |
-
with gr.Tab("Transcrição de Áudio"):
|
91 |
-
audio_input = gr.Audio(type="filepath", label="Upload de Áudio")
|
92 |
-
transcribe_button = gr.Button("Transcrever")
|
93 |
-
transcription_output = gr.Textbox(label="Transcrição", lines=3)
|
94 |
-
transcribe_button.click(transcribe, inputs=audio_input, outputs=transcription_output)
|
95 |
-
|
96 |
-
with gr.Tab("Tradução"):
|
97 |
-
with gr.Row():
|
98 |
-
translation_direction = gr.Radio(
|
99 |
-
["en_pt", "pt_en"],
|
100 |
-
label="Direção da Tradução",
|
101 |
-
value="en_pt"
|
102 |
-
)
|
103 |
-
text_to_translate = gr.Textbox(label="Texto para Traduzir", lines=3)
|
104 |
-
translate_button = gr.Button("Traduzir")
|
105 |
-
translation_output = gr.Textbox(label="Tradução", lines=3)
|
106 |
-
translate_button.click(
|
107 |
-
translate,
|
108 |
-
inputs=[text_to_translate, translation_direction],
|
109 |
-
outputs=translation_output
|
110 |
-
)
|
111 |
-
|
112 |
-
with gr.Tab("Resumo"):
|
113 |
-
text_to_summarize = gr.Textbox(label="Texto para Resumir", lines=5)
|
114 |
-
summarize_button = gr.Button("Resumir")
|
115 |
-
summary_output = gr.Textbox(label="Resumo", lines=3)
|
116 |
-
summarize_button.click(summarize, inputs=text_to_summarize, outputs=summary_output)
|
117 |
|
118 |
-
with gr.
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
-
|
144 |
-
demo.launch(share=True)
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
import torch
|
4 |
import warnings
|
5 |
warnings.filterwarnings('ignore')
|
6 |
|
7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
|
9 |
+
# Expanded models dictionary with more services
|
10 |
+
models = {
|
11 |
+
'transcription': pipeline("automatic-speech-recognition", model="openai/whisper-small", device=device),
|
12 |
+
'translation': pipeline("translation", model="facebook/mbart-large-50-many-to-many-mmt", device=device),
|
13 |
+
'summarization': pipeline("summarization", model="facebook/bart-large-cnn", device=device),
|
14 |
+
'sentiment': pipeline("sentiment-analysis", model="nlptown/bert-base-multilingual-uncased-sentiment", device=device),
|
15 |
+
'question_answering': pipeline("question-answering", model="deepset/roberta-base-squad2", device=device),
|
16 |
+
'chat': pipeline("text-generation", model="facebook/opt-125m", device=device),
|
17 |
+
'image_caption': pipeline("image-to-text", model="Salesforce/blip-image-captioning-base", device=device),
|
18 |
+
'text_to_speech': pipeline("text-to-audio", model="facebook/mms-tts-eng", device=device),
|
19 |
+
'zero_shot': pipeline("zero-shot-classification", model="facebook/bart-large-mnli", device=device),
|
20 |
+
'ner': pipeline("token-classification", model="dslim/bert-base-NER", device=device)
|
21 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
def process_request(service_type, *args, **kwargs):
|
24 |
+
try:
|
25 |
+
if not args[0]: return "Por favor, forneça os dados necessários."
|
26 |
+
|
27 |
+
processors = {
|
28 |
+
'transcription': lambda audio: models['transcription'](audio)["text"],
|
29 |
+
'translation': lambda text, direction: models['translation'](text,
|
30 |
+
src_lang="pt" if direction=="pt_en" else "en",
|
31 |
+
tgt_lang="en" if direction=="pt_en" else "pt")[0]['translation_text'],
|
32 |
+
'summarization': lambda text: models['summarization'](text, max_length=130, min_length=30)[0]['summary_text'],
|
33 |
+
'sentiment': lambda text: models['sentiment'](text)[0]['label'],
|
34 |
+
'qa': lambda q, ctx: models['question_answering'](question=q, context=ctx)['answer'],
|
35 |
+
'chat': lambda msg, history: (models['chat'](msg, max_length=100, do_sample=True)[0]['generated_text'], history + [(msg, "")]),
|
36 |
+
'image_caption': lambda img: models['image_caption'](img)[0]['generated_text'],
|
37 |
+
'tts': lambda text: models['text_to_speech'](text),
|
38 |
+
'zero_shot': lambda text, labels: models['zero_shot'](text, labels)['labels'][0],
|
39 |
+
'ner': lambda text: models['ner'](text)
|
40 |
+
}
|
41 |
+
|
42 |
+
return processors[service_type](*args)
|
43 |
+
except Exception as e:
|
44 |
+
return f"Erro: {str(e)}"
|
45 |
|
46 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
47 |
+
gr.HTML(open("index.html").read())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
with gr.Tabs():
|
50 |
+
with gr.TabItem("📝 Transcrição"):
|
51 |
+
audio_input = gr.Audio(type="filepath")
|
52 |
+
transcribe_button = gr.Button("Transcrever")
|
53 |
+
transcription_output = gr.Textbox(label="Resultado")
|
54 |
+
transcribe_button.click(lambda x: process_request('transcription', x), inputs=audio_input, outputs=transcription_output)
|
55 |
+
|
56 |
+
with gr.TabItem("🔄 Tradução"):
|
57 |
+
with gr.Row():
|
58 |
+
text_to_translate = gr.Textbox(label="Texto")
|
59 |
+
translation_direction = gr.Radio(["en_pt", "pt_en"], value="en_pt")
|
60 |
+
translate_button = gr.Button("Traduzir")
|
61 |
+
translation_output = gr.Textbox(label="Resultado")
|
62 |
+
translate_button.click(lambda x, y: process_request('translation', x, y),
|
63 |
+
inputs=[text_to_translate, translation_direction],
|
64 |
+
outputs=translation_output)
|
65 |
+
|
66 |
+
with gr.TabItem("📊 Análise"):
|
67 |
+
with gr.Tabs():
|
68 |
+
with gr.TabItem("Resumo"):
|
69 |
+
text_sum = gr.Textbox(label="Texto", lines=5)
|
70 |
+
sum_button = gr.Button("Resumir")
|
71 |
+
sum_output = gr.Textbox(label="Resultado")
|
72 |
+
sum_button.click(lambda x: process_request('summarization', x), inputs=text_sum, outputs=sum_output)
|
73 |
+
|
74 |
+
with gr.TabItem("Sentimento"):
|
75 |
+
text_sent = gr.Textbox(label="Texto")
|
76 |
+
sent_button = gr.Button("Analisar")
|
77 |
+
sent_output = gr.Textbox(label="Resultado")
|
78 |
+
sent_button.click(lambda x: process_request('sentiment', x), inputs=text_sent, outputs=sent_output)
|
79 |
|
80 |
+
with gr.TabItem("🤖 IA Avançada"):
|
81 |
+
with gr.Tabs():
|
82 |
+
with gr.TabItem("Chat"):
|
83 |
+
chatbot = gr.Chatbot()
|
84 |
+
msg = gr.Textbox(label="Mensagem")
|
85 |
+
clear = gr.Button("Limpar")
|
86 |
+
msg.submit(lambda x, y: process_request('chat', x, y), inputs=[msg, chatbot], outputs=[msg, chatbot])
|
87 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
88 |
+
|
89 |
+
with gr.TabItem("Classificação"):
|
90 |
+
zero_text = gr.Textbox(label="Texto")
|
91 |
+
zero_labels = gr.Textbox(label="Categorias (separadas por vírgula)")
|
92 |
+
zero_button = gr.Button("Classificar")
|
93 |
+
zero_output = gr.Textbox(label="Resultado")
|
94 |
+
zero_button.click(lambda x, y: process_request('zero_shot', x, y.split(',')),
|
95 |
+
inputs=[zero_text, zero_labels],
|
96 |
+
outputs=zero_output)
|
97 |
|
98 |
+
demo.launch(share=True)
|
|