rmayormartins commited on
Commit
7731d94
·
1 Parent(s): bdd4371
Files changed (2) hide show
  1. app.py +304 -144
  2. requirements.txt +2 -1
app.py CHANGED
@@ -7,145 +7,275 @@ import torch.optim as optim
7
  from torchvision import datasets, transforms, models
8
  from torch.utils.data import DataLoader, random_split
9
  from PIL import Image
 
 
 
 
 
 
10
  import tempfile
11
  import warnings
12
  warnings.filterwarnings("ignore")
13
 
14
- # Estado global simples
15
- model = None
16
- train_loader = None
17
- test_loader = None
18
- dataset_path = None
19
- class_names = ["classe_0", "classe_1"]
20
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
21
 
22
- def setup_dataset():
23
- """Cria estrutura de pastas"""
24
- global dataset_path
25
- dataset_path = tempfile.mkdtemp()
26
-
27
- # Criar pastas para 2 classes
28
- for i in range(2):
29
- os.makedirs(os.path.join(dataset_path, f"classe_{i}"), exist_ok=True)
30
-
31
- return f"✅ Dataset criado em: {dataset_path}"
32
 
33
- def save_image(image, class_id):
34
- """Salva uma imagem na classe especificada"""
35
- if dataset_path is None:
36
- return "❌ Execute 'Criar Dataset' primeiro"
37
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  if image is None:
39
- return "❌ Selecione uma imagem"
40
 
 
 
 
 
 
41
  try:
42
- class_dir = os.path.join(dataset_path, f"classe_{int(class_id)}")
 
43
 
44
- # Salvar imagem
45
- import time
46
- filename = f"img_{int(time.time())}.jpg"
47
- filepath = os.path.join(class_dir, filename)
48
- image.save(filepath)
49
 
50
- return f"✅ Imagem salva na classe {int(class_id)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  except Exception as e:
52
  return f"❌ Erro: {str(e)}"
53
 
54
- def prepare_and_train():
55
- """Prepara dados e treina modelo"""
56
- global model, train_loader, test_loader
57
-
 
 
 
58
  try:
59
- if dataset_path is None:
60
- return "❌ Crie o dataset primeiro"
61
 
62
- # Transformações
63
  transform = transforms.Compose([
64
  transforms.Resize((224, 224)),
65
  transforms.ToTensor(),
66
  transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
67
  ])
68
 
69
- # Carregar dataset
70
- dataset = datasets.ImageFolder(dataset_path, transform=transform)
71
 
72
- if len(dataset) < 4:
73
- return f"❌ Poucas imagens ({len(dataset)}). Adicione pelo menos 2 por classe."
74
 
75
- # Dividir dados: 70% treino, 30% teste
76
  train_size = int(0.7 * len(dataset))
77
- test_size = len(dataset) - train_size
 
78
 
79
- train_dataset, test_dataset = random_split(dataset, [train_size, test_size])
 
 
 
80
 
81
- train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True)
82
- test_loader = DataLoader(test_dataset, batch_size=4, shuffle=False)
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  # Carregar modelo
85
- model = models.mobilenet_v2(pretrained=True)
86
- model.classifier = nn.Sequential(
87
- nn.Dropout(0.2),
88
- nn.Linear(model.classifier[1].in_features, 2)
89
- )
90
- model = model.to(device)
 
 
91
 
92
- # Treinar
93
  criterion = nn.CrossEntropyLoss()
94
- optimizer = optim.Adam(model.parameters(), lr=0.001)
95
 
96
- model.train()
97
- for epoch in range(3): # Apenas 3 épocas
98
- for inputs, labels in train_loader:
 
 
 
 
 
 
99
  inputs, labels = inputs.to(device), labels.to(device)
100
 
101
  optimizer.zero_grad()
102
- outputs = model(inputs)
103
  loss = criterion(outputs, labels)
104
  loss.backward()
105
  optimizer.step()
 
 
 
 
 
 
 
 
 
106
 
107
- return f"✅ Modelo treinado! Dataset: {train_size} treino, {test_size} teste"
108
-
109
  except Exception as e:
110
  return f"❌ Erro: {str(e)}"
111
 
112
  def evaluate_model():
113
- """Avalia modelo"""
114
- global model, test_loader
115
-
116
- if model is None or test_loader is None:
117
- return "❌ Treine o modelo primeiro"
118
-
119
  try:
120
- model.eval()
121
- correct = 0
122
- total = 0
 
 
 
123
 
124
  with torch.no_grad():
125
- for inputs, labels in test_loader:
126
  inputs, labels = inputs.to(device), labels.to(device)
127
- outputs = model(inputs)
128
- _, predicted = torch.max(outputs, 1)
129
- total += labels.size(0)
130
- correct += (predicted == labels).sum().item()
131
-
132
- accuracy = 100 * correct / total if total > 0 else 0
133
- return f"📊 Acurácia: {accuracy:.2f}% ({correct}/{total})"
134
 
 
 
135
  except Exception as e:
136
  return f"❌ Erro: {str(e)}"
137
 
138
- def predict_single_image(image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  """Prediz uma única imagem"""
140
- global model
141
-
142
- if model is None:
143
- return "❌ Treine o modelo primeiro"
144
-
145
- if image is None:
146
- return "❌ Selecione uma imagem"
147
-
148
  try:
 
 
 
 
 
 
149
  transform = transforms.Compose([
150
  transforms.Resize((224, 224)),
151
  transforms.ToTensor(),
@@ -154,76 +284,106 @@ def predict_single_image(image):
154
 
155
  img_tensor = transform(image).unsqueeze(0).to(device)
156
 
157
- model.eval()
158
  with torch.no_grad():
159
- outputs = model(img_tensor)
160
  probs = torch.nn.functional.softmax(outputs[0], dim=0)
161
  _, predicted = torch.max(outputs, 1)
162
 
163
  class_id = predicted.item()
164
  confidence = probs[class_id].item() * 100
165
- class_name = class_names[class_id]
166
 
167
  return f"🎯 Predição: {class_name}\n📊 Confiança: {confidence:.2f}%"
168
-
169
  except Exception as e:
170
  return f"❌ Erro: {str(e)}"
171
 
172
- def set_class_names(name0, name1):
173
- """Define nomes das classes"""
174
- global class_names
175
-
176
- if not name0.strip() or not name1.strip():
177
- return "❌ Preencha ambos os nomes"
178
-
179
- class_names = [name0.strip(), name1.strip()]
180
- return f" Classes: {class_names[0]} e {class_names[1]}"
181
-
182
- # Interface ultra-simples
183
- with gr.Blocks(title="🖼️ Classificador Simples") as demo:
184
-
185
- gr.Markdown("# 🖼️ Classificador de Imagens Simples")
186
-
187
- with gr.Row():
188
- with gr.Column():
189
- gr.Markdown("### 1️⃣ Configurar Classes")
190
- class_0_name = gr.Textbox(label="Nome Classe 0", value="gato")
191
- class_1_name = gr.Textbox(label="Nome Classe 1", value="cachorro")
192
- set_names_btn = gr.Button("🏷️ Definir Nomes")
193
- names_status = gr.Textbox(label="Status")
194
 
195
- gr.Markdown("### 2️⃣ Criar Dataset")
196
- create_btn = gr.Button("🔧 Criar Dataset", variant="primary")
197
- create_status = gr.Textbox(label="Status")
198
-
199
- with gr.Column():
200
- gr.Markdown("### 3️⃣ Adicionar Imagens")
201
- upload_image = gr.Image(type="pil", label="Imagem")
202
- class_selector = gr.Number(label="Classe (0 ou 1)", value=0, precision=0)
203
- save_btn = gr.Button("💾 Salvar Imagem")
204
- save_status = gr.Textbox(label="Status")
205
-
206
- with gr.Row():
207
- with gr.Column():
208
- gr.Markdown("### 4️⃣ Treinar")
209
- train_btn = gr.Button("🚀 Preparar + Treinar", variant="primary")
210
- train_status = gr.Textbox(label="Status", lines=3)
 
 
 
 
 
 
 
 
 
 
211
 
212
- eval_btn = gr.Button("📊 Avaliar")
213
- eval_status = gr.Textbox(label="Resultado")
214
-
215
- with gr.Column():
216
- gr.Markdown("### 5️⃣ Predizer")
217
- predict_image = gr.Image(type="pil", label="Imagem para Predição")
218
- predict_btn = gr.Button("🔮 Predizer")
219
- predict_result = gr.Textbox(label="Resultado")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
- # Conectar eventos
222
- set_names_btn.click(set_class_names, [class_0_name, class_1_name], names_status)
223
- create_btn.click(setup_dataset, outputs=create_status)
224
- save_btn.click(save_image, [upload_image, class_selector], save_status)
225
- train_btn.click(prepare_and_train, outputs=train_status)
226
- eval_btn.click(evaluate_model, outputs=eval_status)
227
- predict_btn.click(predict_single_image, predict_image, predict_result)
228
-
229
- demo.launch()
 
7
  from torchvision import datasets, transforms, models
8
  from torch.utils.data import DataLoader, random_split
9
  from PIL import Image
10
+ import matplotlib
11
+ matplotlib.use('Agg')
12
+ import matplotlib.pyplot as plt
13
+ import seaborn as sns
14
+ import numpy as np
15
+ from sklearn.metrics import classification_report, confusion_matrix
16
  import tempfile
17
  import warnings
18
  warnings.filterwarnings("ignore")
19
 
20
+ print("🖥️ Iniciando sistema...")
 
 
 
 
 
21
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
+ print(f"Device: {device}")
23
 
24
+ # Modelos disponíveis
25
+ MODELS = {
26
+ 'ResNet18': models.resnet18,
27
+ 'ResNet34': models.resnet34,
28
+ 'MobileNetV2': models.mobilenet_v2
29
+ }
 
 
 
 
30
 
31
+ # Estado global
32
+ class AppState:
33
+ def __init__(self):
34
+ self.model = None
35
+ self.train_loader = None
36
+ self.val_loader = None
37
+ self.test_loader = None
38
+ self.dataset_path = None
39
+ self.class_dirs = []
40
+ self.class_labels = ['classe_0', 'classe_1']
41
+ self.num_classes = 2
42
+ self.image_queue = [] # Para armazenar imagens uploaded
43
+
44
+ state = AppState()
45
+
46
+ def setup_classes(num_classes_value):
47
+ """Configura número de classes"""
48
+ try:
49
+ state.num_classes = int(num_classes_value)
50
+ state.dataset_path = tempfile.mkdtemp()
51
+ state.class_labels = [f'classe_{i}' for i in range(state.num_classes)]
52
+
53
+ # Criar diretórios
54
+ state.class_dirs = []
55
+ for i in range(state.num_classes):
56
+ class_dir = os.path.join(state.dataset_path, f'classe_{i}')
57
+ os.makedirs(class_dir, exist_ok=True)
58
+ state.class_dirs.append(class_dir)
59
+
60
+ return f"✅ Sistema configurado para {state.num_classes} classes"
61
+ except Exception as e:
62
+ return f"❌ Erro: {str(e)}"
63
+
64
+ def set_class_labels(labels_text):
65
+ """Define rótulos das classes"""
66
+ try:
67
+ labels = [label.strip() for label in labels_text.split(',')]
68
+ if len(labels) != state.num_classes:
69
+ return f"❌ Forneça {state.num_classes} rótulos separados por vírgula"
70
+
71
+ state.class_labels = labels
72
+ return f"✅ Rótulos definidos: {', '.join(state.class_labels)}"
73
+ except Exception as e:
74
+ return f"❌ Erro: {str(e)}"
75
+
76
+ def add_image_to_queue(image):
77
+ """Adiciona imagem à fila"""
78
  if image is None:
79
+ return "❌ Selecione uma imagem", 0
80
 
81
+ state.image_queue.append(image)
82
+ return f"✅ Imagem adicionada à fila. Total: {len(state.image_queue)}", len(state.image_queue)
83
+
84
+ def save_images_to_class(class_id, clear_queue=True):
85
+ """Salva todas as imagens da fila para uma classe"""
86
  try:
87
+ if not state.image_queue:
88
+ return "❌ Nenhuma imagem na fila"
89
 
90
+ if not state.class_dirs:
91
+ return "❌ Configure as classes primeiro"
 
 
 
92
 
93
+ class_idx = int(class_id)
94
+ if class_idx >= len(state.class_dirs):
95
+ return "❌ Classe inválida"
96
+
97
+ class_dir = state.class_dirs[class_idx]
98
+ count = 0
99
+
100
+ for i, image in enumerate(state.image_queue):
101
+ try:
102
+ import time
103
+ filename = f"img_{int(time.time())}_{i}.jpg"
104
+ filepath = os.path.join(class_dir, filename)
105
+ image.save(filepath)
106
+ count += 1
107
+ except Exception as e:
108
+ print(f"Erro salvando imagem {i}: {e}")
109
+
110
+ if clear_queue:
111
+ state.image_queue = []
112
+
113
+ class_name = state.class_labels[class_idx]
114
+ return f"✅ {count} imagens salvas em '{class_name}'"
115
  except Exception as e:
116
  return f"❌ Erro: {str(e)}"
117
 
118
+ def clear_image_queue():
119
+ """Limpa a fila de imagens"""
120
+ state.image_queue = []
121
+ return "✅ Fila limpa", 0
122
+
123
+ def prepare_data(batch_size):
124
+ """Prepara dados para treinamento"""
125
  try:
126
+ if not state.dataset_path:
127
+ return "❌ Configure as classes primeiro"
128
 
 
129
  transform = transforms.Compose([
130
  transforms.Resize((224, 224)),
131
  transforms.ToTensor(),
132
  transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
133
  ])
134
 
135
+ dataset = datasets.ImageFolder(state.dataset_path, transform=transform)
 
136
 
137
+ if len(dataset) < 6:
138
+ return f"❌ Poucas imagens ({len(dataset)}). Mínimo: 6"
139
 
140
+ # Divisão: 70% treino, 20% val, 10% teste
141
  train_size = int(0.7 * len(dataset))
142
+ val_size = int(0.2 * len(dataset))
143
+ test_size = len(dataset) - train_size - val_size
144
 
145
+ train_dataset, val_dataset, test_dataset = random_split(
146
+ dataset, [train_size, val_size, test_size],
147
+ generator=torch.Generator().manual_seed(42)
148
+ )
149
 
150
+ batch_size = max(1, min(int(batch_size), 32))
151
+ state.train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
152
+ state.val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
153
+ state.test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
154
+
155
+ return f"✅ Dados preparados:\n• Treino: {train_size}\n• Validação: {val_size}\n• Teste: {test_size}\n• Batch size: {batch_size}"
156
+ except Exception as e:
157
+ return f"❌ Erro: {str(e)}"
158
+
159
+ def train_model(model_name, epochs, lr):
160
+ """Treina o modelo"""
161
+ try:
162
+ if state.train_loader is None:
163
+ return "❌ Prepare os dados primeiro"
164
 
165
  # Carregar modelo
166
+ state.model = MODELS[model_name](pretrained=True)
167
+
168
+ # Adaptar última camada
169
+ if hasattr(state.model, 'fc'):
170
+ state.model.fc = nn.Linear(state.model.fc.in_features, state.num_classes)
171
+ elif hasattr(state.model, 'classifier'):
172
+ if isinstance(state.model.classifier, nn.Sequential):
173
+ state.model.classifier[-1] = nn.Linear(state.model.classifier[-1].in_features, state.num_classes)
174
 
175
+ state.model = state.model.to(device)
176
  criterion = nn.CrossEntropyLoss()
177
+ optimizer = optim.Adam(state.model.parameters(), lr=float(lr))
178
 
179
+ results = [f"🚀 Treinando {model_name}"]
180
+ state.model.train()
181
+
182
+ for epoch in range(int(epochs)):
183
+ running_loss = 0.0
184
+ correct = 0
185
+ total = 0
186
+
187
+ for inputs, labels in state.train_loader:
188
  inputs, labels = inputs.to(device), labels.to(device)
189
 
190
  optimizer.zero_grad()
191
+ outputs = state.model(inputs)
192
  loss = criterion(outputs, labels)
193
  loss.backward()
194
  optimizer.step()
195
+
196
+ running_loss += loss.item()
197
+ _, predicted = torch.max(outputs, 1)
198
+ total += labels.size(0)
199
+ correct += (predicted == labels).sum().item()
200
+
201
+ epoch_loss = running_loss / len(state.train_loader)
202
+ epoch_acc = 100. * correct / total
203
+ results.append(f"Época {epoch+1}: Loss={epoch_loss:.4f}, Acc={epoch_acc:.2f}%")
204
 
205
+ results.append("✅ Treinamento concluído!")
206
+ return "\n".join(results)
207
  except Exception as e:
208
  return f"❌ Erro: {str(e)}"
209
 
210
  def evaluate_model():
211
+ """Avalia o modelo"""
 
 
 
 
 
212
  try:
213
+ if state.model is None or state.test_loader is None:
214
+ return "❌ Modelo/dados não disponíveis"
215
+
216
+ state.model.eval()
217
+ all_preds = []
218
+ all_labels = []
219
 
220
  with torch.no_grad():
221
+ for inputs, labels in state.test_loader:
222
  inputs, labels = inputs.to(device), labels.to(device)
223
+ outputs = state.model(inputs)
224
+ _, preds = torch.max(outputs, 1)
225
+ all_preds.extend(preds.cpu().numpy())
226
+ all_labels.extend(labels.cpu().numpy())
 
 
 
227
 
228
+ report = classification_report(all_labels, all_preds, target_names=state.class_labels, zero_division=0)
229
+ return f"📊 RELATÓRIO DE AVALIAÇÃO:\n\n{report}"
230
  except Exception as e:
231
  return f"❌ Erro: {str(e)}"
232
 
233
+ def generate_confusion_matrix():
234
+ """Gera matriz de confusão"""
235
+ try:
236
+ if state.model is None or state.test_loader is None:
237
+ return None
238
+
239
+ state.model.eval()
240
+ all_preds = []
241
+ all_labels = []
242
+
243
+ with torch.no_grad():
244
+ for inputs, labels in state.test_loader:
245
+ inputs, labels = inputs.to(device), labels.to(device)
246
+ outputs = state.model(inputs)
247
+ _, preds = torch.max(outputs, 1)
248
+ all_preds.extend(preds.cpu().numpy())
249
+ all_labels.extend(labels.cpu().numpy())
250
+
251
+ cm = confusion_matrix(all_labels, all_preds)
252
+
253
+ plt.figure(figsize=(8, 6))
254
+ sns.heatmap(cm, annot=True, fmt="d", cmap="Blues",
255
+ xticklabels=state.class_labels,
256
+ yticklabels=state.class_labels)
257
+ plt.xlabel('Predições')
258
+ plt.ylabel('Valores Reais')
259
+ plt.title('Matriz de Confusão')
260
+ plt.tight_layout()
261
+
262
+ temp_path = tempfile.NamedTemporaryFile(suffix='.png', delete=False).name
263
+ plt.savefig(temp_path, dpi=150, bbox_inches='tight')
264
+ plt.close()
265
+
266
+ return temp_path
267
+ except Exception as e:
268
+ return None
269
+
270
+ def predict_image(image):
271
  """Prediz uma única imagem"""
 
 
 
 
 
 
 
 
272
  try:
273
+ if state.model is None:
274
+ return "❌ Treine o modelo primeiro"
275
+
276
+ if image is None:
277
+ return "❌ Selecione uma imagem"
278
+
279
  transform = transforms.Compose([
280
  transforms.Resize((224, 224)),
281
  transforms.ToTensor(),
 
284
 
285
  img_tensor = transform(image).unsqueeze(0).to(device)
286
 
287
+ state.model.eval()
288
  with torch.no_grad():
289
+ outputs = state.model(img_tensor)
290
  probs = torch.nn.functional.softmax(outputs[0], dim=0)
291
  _, predicted = torch.max(outputs, 1)
292
 
293
  class_id = predicted.item()
294
  confidence = probs[class_id].item() * 100
295
+ class_name = state.class_labels[class_id]
296
 
297
  return f"🎯 Predição: {class_name}\n📊 Confiança: {confidence:.2f}%"
 
298
  except Exception as e:
299
  return f"❌ Erro: {str(e)}"
300
 
301
+ # Interface usando componentes mais antigos/estáveis
302
+ def create_interface():
303
+ with gr.Blocks(title="🖼️ Classificador Completo") as demo:
304
+
305
+ gr.Markdown("# 🖼️ Sistema de Classificação de Imagens Completo")
306
+
307
+ # Configuração
308
+ with gr.Group():
309
+ gr.Markdown("## 1️⃣ Configuração")
310
+ with gr.Row():
311
+ num_classes = gr.Number(label="Número de Classes (2-5)", value=2, precision=0)
312
+ setup_btn = gr.Button("🔧 Configurar")
313
+ setup_status = gr.Textbox(label="Status")
 
 
 
 
 
 
 
 
 
314
 
315
+ labels_input = gr.Textbox(label="Rótulos (separados por vírgula)", value="gato,cachorro")
316
+ labels_btn = gr.Button("🏷️ Definir Rótulos")
317
+ labels_status = gr.Textbox(label="Status dos Rótulos")
318
+
319
+ # Upload de Imagens
320
+ with gr.Group():
321
+ gr.Markdown("## 2️⃣ Upload de Imagens")
322
+ with gr.Row():
323
+ upload_image = gr.Image(type="pil", label="Upload de Imagem")
324
+ with gr.Column():
325
+ add_btn = gr.Button("➕ Adicionar à Fila")
326
+ queue_status = gr.Textbox(label="Fila de Imagens")
327
+ queue_count = gr.Number(label="Total na Fila", value=0)
328
+
329
+ with gr.Row():
330
+ class_id = gr.Number(label="Classe (0, 1, 2...)", value=0, precision=0)
331
+ save_btn = gr.Button("💾 Salvar Fila na Classe", variant="primary")
332
+ clear_btn = gr.Button("🗑️ Limpar Fila")
333
+ save_status = gr.Textbox(label="Status do Upload")
334
+
335
+ # Treinamento
336
+ with gr.Group():
337
+ gr.Markdown("## 3️⃣ Preparação e Treinamento")
338
+ batch_size = gr.Number(label="Batch Size", value=8, precision=0)
339
+ prepare_btn = gr.Button("⚙️ Preparar Dados", variant="primary")
340
+ prepare_status = gr.Textbox(label="Status da Preparação", lines=4)
341
 
342
+ with gr.Row():
343
+ model_choice = gr.Dropdown(choices=list(MODELS.keys()), value="MobileNetV2", label="Modelo")
344
+ epochs = gr.Number(label="Épocas", value=5, precision=0)
345
+ learning_rate = gr.Number(label="Learning Rate", value=0.001)
346
+
347
+ train_btn = gr.Button("🚀 Treinar Modelo", variant="primary")
348
+ train_status = gr.Textbox(label="Status do Treinamento", lines=8)
349
+
350
+ # Avaliação
351
+ with gr.Group():
352
+ gr.Markdown("## 4️⃣ Avaliação")
353
+ with gr.Row():
354
+ eval_btn = gr.Button("📊 Avaliar Modelo", variant="primary")
355
+ matrix_btn = gr.Button("📈 Matriz de Confusão")
356
+
357
+ eval_results = gr.Textbox(label="Relatório de Avaliação", lines=12)
358
+ confusion_plot = gr.Image(label="Matriz de Confusão")
359
+
360
+ # Predição
361
+ with gr.Group():
362
+ gr.Markdown("## 5️⃣ Predição")
363
+ predict_img = gr.Image(type="pil", label="Imagem para Predição")
364
+ predict_btn = gr.Button("🔮 Predizer", variant="primary")
365
+ predict_result = gr.Textbox(label="Resultado da Predição", lines=3)
366
+
367
+ # Conectar eventos
368
+ setup_btn.click(setup_classes, [num_classes], [setup_status])
369
+ labels_btn.click(set_class_labels, [labels_input], [labels_status])
370
+
371
+ add_btn.click(add_image_to_queue, [upload_image], [queue_status, queue_count])
372
+ save_btn.click(save_images_to_class, [class_id], [save_status])
373
+ clear_btn.click(clear_image_queue, outputs=[queue_status, queue_count])
374
+
375
+ prepare_btn.click(prepare_data, [batch_size], [prepare_status])
376
+ train_btn.click(train_model, [model_choice, epochs, learning_rate], [train_status])
377
+
378
+ eval_btn.click(evaluate_model, outputs=[eval_results])
379
+ matrix_btn.click(generate_confusion_matrix, outputs=[confusion_plot])
380
+
381
+ predict_btn.click(predict_image, [predict_img], [predict_result])
382
 
383
+ return demo
384
+
385
+ if __name__ == "__main__":
386
+ print("🎯 Criando interface...")
387
+ demo = create_interface()
388
+ print("🚀 Iniciando aplicação...")
389
+ demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False)
 
 
requirements.txt CHANGED
@@ -1,7 +1,8 @@
1
- gradio==4.8.0
2
  torch==2.0.1
3
  torchvision==0.15.2
4
  scikit-learn==1.3.0
5
  matplotlib==3.7.1
 
6
  numpy==1.24.3
7
  Pillow==9.5.0
 
1
+ gradio==4.15.0
2
  torch==2.0.1
3
  torchvision==0.15.2
4
  scikit-learn==1.3.0
5
  matplotlib==3.7.1
6
+ seaborn==0.12.2
7
  numpy==1.24.3
8
  Pillow==9.5.0