DHEIVER commited on
Commit
b2b70a5
·
verified ·
1 Parent(s): 0cee204

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -131
app.py CHANGED
@@ -1,144 +1,98 @@
1
  import gradio as gr
2
- from transformers import pipeline, AutoTokenizer, AutoModel
3
  import torch
4
  import warnings
5
  warnings.filterwarnings('ignore')
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
- try:
10
- models = {
11
- 'transcription': pipeline("automatic-speech-recognition",
12
- model="openai/whisper-small",
13
- device=device),
14
- 'translation': pipeline("translation",
15
- model="facebook/mbart-large-50-many-to-many-mmt",
16
- device=device),
17
- 'summarization': pipeline("summarization",
18
- model="facebook/bart-large-cnn",
19
- device=device),
20
- 'sentiment': pipeline("sentiment-analysis",
21
- model="nlptown/bert-base-multilingual-uncased-sentiment",
22
- device=device),
23
- 'question_answering': pipeline("question-answering",
24
- model="deepset/roberta-base-squad2",
25
- device=device),
26
- 'chat': pipeline("text-generation",
27
- model="facebook/opt-125m",
28
- device=device)
29
- }
30
-
31
- except Exception as e:
32
- print(f"Erro ao carregar modelos: {str(e)}")
33
-
34
- def safe_process(func):
35
- def wrapper(*args, **kwargs):
36
- try:
37
- return func(*args, **kwargs)
38
- except Exception as e:
39
- return f"Erro ao processar: {str(e)}"
40
- return wrapper
41
-
42
- @safe_process
43
- def transcribe(audio):
44
- if not audio:
45
- return "Por favor, forneça um arquivo de áudio."
46
- return models['transcription'](audio)["text"]
47
-
48
- @safe_process
49
- def translate(text, direction):
50
- if not text:
51
- return "Por favor, insira um texto para tradução."
52
-
53
- if direction == "pt_en":
54
- result = models['translation'](text, src_lang="pt", tgt_lang="en")[0]
55
- else:
56
- result = models['translation'](text, src_lang="en", tgt_lang="pt")[0]
57
-
58
- return result['translation_text']
59
-
60
- @safe_process
61
- def summarize(text):
62
- if not text:
63
- return "Por favor, insira um texto para resumir."
64
- return models['summarization'](text, max_length=130, min_length=30)[0]['summary_text']
65
-
66
- @safe_process
67
- def analyze_sentiment(text):
68
- if not text:
69
- return "Por favor, insira um texto para análise."
70
- return models['sentiment'](text)[0]['label']
71
-
72
- @safe_process
73
- def answer_question(question, context):
74
- if not question or not context:
75
- return "Por favor, forneça tanto a pergunta quanto o contexto."
76
- return models['question_answering'](question=question, context=context)['answer']
77
 
78
- @safe_process
79
- def chat_response(message, history):
80
- if not message:
81
- return [], history
82
- response = models['chat'](message, max_length=100, do_sample=True, temperature=0.7)
83
- history.append((message, response[0]['generated_text']))
84
- return "", history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
87
- with gr.Tab("Início"):
88
- gr.HTML(open("index.html").read())
89
-
90
- with gr.Tab("Transcrição de Áudio"):
91
- audio_input = gr.Audio(type="filepath", label="Upload de Áudio")
92
- transcribe_button = gr.Button("Transcrever")
93
- transcription_output = gr.Textbox(label="Transcrição", lines=3)
94
- transcribe_button.click(transcribe, inputs=audio_input, outputs=transcription_output)
95
-
96
- with gr.Tab("Tradução"):
97
- with gr.Row():
98
- translation_direction = gr.Radio(
99
- ["en_pt", "pt_en"],
100
- label="Direção da Tradução",
101
- value="en_pt"
102
- )
103
- text_to_translate = gr.Textbox(label="Texto para Traduzir", lines=3)
104
- translate_button = gr.Button("Traduzir")
105
- translation_output = gr.Textbox(label="Tradução", lines=3)
106
- translate_button.click(
107
- translate,
108
- inputs=[text_to_translate, translation_direction],
109
- outputs=translation_output
110
- )
111
-
112
- with gr.Tab("Resumo"):
113
- text_to_summarize = gr.Textbox(label="Texto para Resumir", lines=5)
114
- summarize_button = gr.Button("Resumir")
115
- summary_output = gr.Textbox(label="Resumo", lines=3)
116
- summarize_button.click(summarize, inputs=text_to_summarize, outputs=summary_output)
117
 
118
- with gr.Tab("Análise de Sentimento"):
119
- sentiment_text = gr.Textbox(label="Texto para Análise", lines=3)
120
- sentiment_button = gr.Button("Analisar")
121
- sentiment_output = gr.Textbox(label="Sentimento")
122
- sentiment_button.click(analyze_sentiment, inputs=sentiment_text, outputs=sentiment_output)
123
-
124
- with gr.Tab("Perguntas e Respostas"):
125
- question_input = gr.Textbox(label="Pergunta")
126
- context_input = gr.Textbox(label="Contexto", lines=5)
127
- qa_button = gr.Button("Responder")
128
- qa_output = gr.Textbox(label="Resposta", lines=2)
129
- qa_button.click(
130
- answer_question,
131
- inputs=[question_input, context_input],
132
- outputs=qa_output
133
- )
134
-
135
- with gr.Tab("Chat"):
136
- chatbot = gr.Chatbot()
137
- msg = gr.Textbox(label="Mensagem")
138
- clear = gr.Button("Limpar")
 
 
 
 
 
 
 
 
 
139
 
140
- msg.submit(chat_response, inputs=[msg, chatbot], outputs=[msg, chatbot])
141
- clear.click(lambda: None, None, chatbot, queue=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
- if __name__ == "__main__":
144
- demo.launch(share=True)
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
  import torch
4
  import warnings
5
  warnings.filterwarnings('ignore')
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
+ # Expanded models dictionary with more services
10
+ models = {
11
+ 'transcription': pipeline("automatic-speech-recognition", model="openai/whisper-small", device=device),
12
+ 'translation': pipeline("translation", model="facebook/mbart-large-50-many-to-many-mmt", device=device),
13
+ 'summarization': pipeline("summarization", model="facebook/bart-large-cnn", device=device),
14
+ 'sentiment': pipeline("sentiment-analysis", model="nlptown/bert-base-multilingual-uncased-sentiment", device=device),
15
+ 'question_answering': pipeline("question-answering", model="deepset/roberta-base-squad2", device=device),
16
+ 'chat': pipeline("text-generation", model="facebook/opt-125m", device=device),
17
+ 'image_caption': pipeline("image-to-text", model="Salesforce/blip-image-captioning-base", device=device),
18
+ 'text_to_speech': pipeline("text-to-audio", model="facebook/mms-tts-eng", device=device),
19
+ 'zero_shot': pipeline("zero-shot-classification", model="facebook/bart-large-mnli", device=device),
20
+ 'ner': pipeline("token-classification", model="dslim/bert-base-NER", device=device)
21
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ def process_request(service_type, *args, **kwargs):
24
+ try:
25
+ if not args[0]: return "Por favor, forneça os dados necessários."
26
+
27
+ processors = {
28
+ 'transcription': lambda audio: models['transcription'](audio)["text"],
29
+ 'translation': lambda text, direction: models['translation'](text,
30
+ src_lang="pt" if direction=="pt_en" else "en",
31
+ tgt_lang="en" if direction=="pt_en" else "pt")[0]['translation_text'],
32
+ 'summarization': lambda text: models['summarization'](text, max_length=130, min_length=30)[0]['summary_text'],
33
+ 'sentiment': lambda text: models['sentiment'](text)[0]['label'],
34
+ 'qa': lambda q, ctx: models['question_answering'](question=q, context=ctx)['answer'],
35
+ 'chat': lambda msg, history: (models['chat'](msg, max_length=100, do_sample=True)[0]['generated_text'], history + [(msg, "")]),
36
+ 'image_caption': lambda img: models['image_caption'](img)[0]['generated_text'],
37
+ 'tts': lambda text: models['text_to_speech'](text),
38
+ 'zero_shot': lambda text, labels: models['zero_shot'](text, labels)['labels'][0],
39
+ 'ner': lambda text: models['ner'](text)
40
+ }
41
+
42
+ return processors[service_type](*args)
43
+ except Exception as e:
44
+ return f"Erro: {str(e)}"
45
 
46
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
47
+ gr.HTML(open("index.html").read())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
+ with gr.Tabs():
50
+ with gr.TabItem("📝 Transcrição"):
51
+ audio_input = gr.Audio(type="filepath")
52
+ transcribe_button = gr.Button("Transcrever")
53
+ transcription_output = gr.Textbox(label="Resultado")
54
+ transcribe_button.click(lambda x: process_request('transcription', x), inputs=audio_input, outputs=transcription_output)
55
+
56
+ with gr.TabItem("🔄 Tradução"):
57
+ with gr.Row():
58
+ text_to_translate = gr.Textbox(label="Texto")
59
+ translation_direction = gr.Radio(["en_pt", "pt_en"], value="en_pt")
60
+ translate_button = gr.Button("Traduzir")
61
+ translation_output = gr.Textbox(label="Resultado")
62
+ translate_button.click(lambda x, y: process_request('translation', x, y),
63
+ inputs=[text_to_translate, translation_direction],
64
+ outputs=translation_output)
65
+
66
+ with gr.TabItem("📊 Análise"):
67
+ with gr.Tabs():
68
+ with gr.TabItem("Resumo"):
69
+ text_sum = gr.Textbox(label="Texto", lines=5)
70
+ sum_button = gr.Button("Resumir")
71
+ sum_output = gr.Textbox(label="Resultado")
72
+ sum_button.click(lambda x: process_request('summarization', x), inputs=text_sum, outputs=sum_output)
73
+
74
+ with gr.TabItem("Sentimento"):
75
+ text_sent = gr.Textbox(label="Texto")
76
+ sent_button = gr.Button("Analisar")
77
+ sent_output = gr.Textbox(label="Resultado")
78
+ sent_button.click(lambda x: process_request('sentiment', x), inputs=text_sent, outputs=sent_output)
79
 
80
+ with gr.TabItem("🤖 IA Avançada"):
81
+ with gr.Tabs():
82
+ with gr.TabItem("Chat"):
83
+ chatbot = gr.Chatbot()
84
+ msg = gr.Textbox(label="Mensagem")
85
+ clear = gr.Button("Limpar")
86
+ msg.submit(lambda x, y: process_request('chat', x, y), inputs=[msg, chatbot], outputs=[msg, chatbot])
87
+ clear.click(lambda: None, None, chatbot, queue=False)
88
+
89
+ with gr.TabItem("Classificação"):
90
+ zero_text = gr.Textbox(label="Texto")
91
+ zero_labels = gr.Textbox(label="Categorias (separadas por vírgula)")
92
+ zero_button = gr.Button("Classificar")
93
+ zero_output = gr.Textbox(label="Resultado")
94
+ zero_button.click(lambda x, y: process_request('zero_shot', x, y.split(',')),
95
+ inputs=[zero_text, zero_labels],
96
+ outputs=zero_output)
97
 
98
+ demo.launch(share=True)