ruminasval commited on
Commit
fc0fa35
·
verified ·
1 Parent(s): 2f457aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -152
app.py CHANGED
@@ -1,153 +1,3 @@
1
- import gradio as gr
2
- import torch
3
- from transformers import SwinForImageClassification, AutoFeatureExtractor
4
- import mediapipe as mp
5
- import cv2
6
- from PIL import Image
7
- import numpy as np
8
- import os
9
-
10
- # Face shape descriptions
11
- face_shape_descriptions = {
12
- "Heart": "dengan dahi lebar dan dagu yang runcing.",
13
- "Oblong": "yang lebih panjang dari lebar dengan garis pipi lurus.",
14
- "Oval": "dengan proporsi seimbang dan dagu sedikit melengkung.",
15
- "Round": "dengan garis rahang melengkung dan pipi penuh.",
16
- "Square": "dengan rahang tegas dan dahi lebar."
17
- }
18
-
19
- # Frame images path
20
- glasses_images = {
21
- "Oval": "glasses/oval.jpg",
22
- "Round": "glasses/round.jpg",
23
- "Square": "glasses/square.jpg",
24
- "Octagon": "glasses/octagon.jpg",
25
- "Cat Eye": "glasses/cat eye.jpg",
26
- "Pilot (Aviator)": "glasses/aviator.jpg"
27
- }
28
-
29
- # Ensure the 'glasses' directory exists and contains the images
30
- if not os.path.exists("glasses"):
31
- os.makedirs("glasses")
32
- # Create dummy image files if they don't exist
33
- for _, path in glasses_images.items():
34
- if not os.path.exists(path):
35
- dummy_image = Image.new('RGB', (200, 100), color='gray')
36
- dummy_image.save(path)
37
-
38
- id2label = {0: 'Heart', 1: 'Oblong', 2: 'Oval', 3: 'Round', 4: 'Square'}
39
- label2id = {v: k for k, v in id2label.items()}
40
-
41
- # Load model
42
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
43
- model_checkpoint = "microsoft/swin-tiny-patch4-window7-224"
44
- feature_extractor = AutoFeatureExtractor.from_pretrained(model_checkpoint)
45
-
46
- model = SwinForImageClassification.from_pretrained(
47
- model_checkpoint,
48
- label2id=label2id,
49
- id2label=id2label,
50
- ignore_mismatched_sizes=True
51
- )
52
-
53
- # Load your trained weights
54
- # Ensure 'LR-0001-adamW-32-64swin.pth' is in the same directory or provide the correct path
55
- if os.path.exists('LR-0001-adamW-32-64swin.pth'):
56
- state_dict = torch.load('LR-0001-adamW-32-64swin.pth', map_location=device)
57
- model.load_state_dict(state_dict, strict=False)
58
- model.to(device)
59
- model.eval()
60
- else:
61
- print("Warning: Trained weights file 'LR-0001-adamW-32-64swin.pth' not found. Using pre-trained weights only.")
62
-
63
- # Initialize Mediapipe
64
- mp_face_detection = mp.solutions.face_detection.FaceDetection(model_selection=1, min_detection_confidence=0.5)
65
-
66
- # --- New: Decision tree function
67
- def recommend_glasses_tree(face_shape):
68
- face_shape = face_shape.lower()
69
- if face_shape == "square":
70
- return ["Oval", "Round"]
71
- elif face_shape == "round":
72
- return ["Square", "Octagon", "Cat Eye"]
73
- elif face_shape == "oval":
74
- return ["Oval", "Pilot (Aviator)", "Cat Eye", "Round"]
75
- elif face_shape == "heart":
76
- return ["Pilot (Aviator)", "Cat Eye", "Round"]
77
- elif face_shape == "oblong":
78
- return ["Square", "Oval", "Pilot (Aviator)", "Cat Eye"]
79
- else:
80
- return []
81
-
82
- # Preprocess function
83
- def preprocess_image(image):
84
- img = np.array(image, dtype=np.uint8)
85
- img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
86
-
87
- results = mp_face_detection.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
88
-
89
- if results.detections:
90
- detection = results.detections[0]
91
- bbox = detection.location_data.relative_bounding_box
92
- h, w, _ = img.shape
93
- x1 = int(bbox.xmin * w)
94
- y1 = int(bbox.ymin * h)
95
- x2 = int((bbox.xmin + bbox.width) * w)
96
- y2 = int((bbox.ymin + bbox.height) * h)
97
-
98
- img = img[y1:y2, x1:x2]
99
- else:
100
- raise ValueError("Wajah tidak terdeteksi.")
101
-
102
- img = cv2.resize(img, (224, 224))
103
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
104
- inputs = feature_extractor(images=img, return_tensors="pt")
105
- return inputs['pixel_values'].squeeze(0)
106
-
107
- # Prediction function
108
- def predict(image):
109
- try:
110
- inputs = preprocess_image(image).unsqueeze(0).to(device)
111
- with torch.no_grad():
112
- outputs = model(inputs)
113
- probs = torch.nn.functional.softmax(outputs.logits, dim=1)
114
- pred_idx = torch.argmax(probs, dim=1).item()
115
- pred_label = id2label[pred_idx]
116
- pred_prob = probs[0][pred_idx].item() * 100
117
-
118
- # --- Use decision tree for recommendations
119
- frame_recommendations = recommend_glasses_tree(pred_label)
120
-
121
- description = face_shape_descriptions.get(pred_label, "tidak dikenali")
122
- gallery_items = []
123
-
124
- # Load images for all recommended frames
125
- for frame in frame_recommendations:
126
- frame_image_path = glasses_images.get(frame)
127
- if frame_image_path and os.path.exists(frame_image_path):
128
- try:
129
- frame_image = Image.open(frame_image_path)
130
- gallery_items.append((frame_image, frame)) # Tambahkan nama frame
131
- except Exception as e:
132
- print(f"Error loading image for {frame}: {e}")
133
-
134
- # Build explanation text
135
- if frame_recommendations:
136
- recommended_frames_text = ', '.join(frame_recommendations)
137
- explanation = (f"Bentuk wajah kamu adalah {pred_label} ({pred_prob:.2f}%). "
138
- f"Kamu memiliki bentuk wajah {description} "
139
- f"Rekomendasi bentuk kacamata yang sesuai dengan wajah kamu adalah: {recommended_frames_text}.")
140
- else:
141
- explanation = (f"Bentuk wajah kamu adalah {pred_label} ({pred_prob:.2f}%). "
142
- f"Tidak ada rekomendasi frame untuk bentuk wajah ini.")
143
-
144
- return pred_label, explanation, gallery_items
145
-
146
- except ValueError as ve:
147
- return "Error", str(ve), []
148
- except Exception as e:
149
- return "Error", f"Terjadi kesalahan: {str(e)}", []
150
-
151
  # Gradio Interface
152
  with gr.Blocks(theme=gr.themes.Soft()) as iface:
153
  gr.Markdown("# Program Rekomendasi Kacamata Berdasarkan Bentuk Wajah")
@@ -155,8 +5,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
155
 
156
  with gr.Row():
157
  with gr.Column():
158
- image_input = gr.Image(type="pil", file_count="single", interactive=False)
159
- upload_button = gr.UploadButton("Unggah Gambar")
160
  confirm_button = gr.Button("Konfirmasi")
161
  restart_button = gr.Button("Restart")
162
  with gr.Column():
@@ -164,6 +14,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
164
  explanation_output = gr.Textbox(label="Penjelasan")
165
  recommendation_gallery = gr.Gallery(label="Rekomendasi Kacamata", columns=3, show_label=False)
166
 
 
 
167
  confirm_button.click(predict, inputs=image_input, outputs=[detected_shape, explanation_output, recommendation_gallery])
168
  restart_button.click(lambda: (None, "", [], []), inputs=None, outputs=[image_input, detected_shape, explanation_output, recommendation_gallery])
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # Gradio Interface
2
  with gr.Blocks(theme=gr.themes.Soft()) as iface:
3
  gr.Markdown("# Program Rekomendasi Kacamata Berdasarkan Bentuk Wajah")
 
5
 
6
  with gr.Row():
7
  with gr.Column():
8
+ image_input = gr.Image(type="pil", image_mode="single", interactive=False, label="Unggah Gambar") # Changed to "single" and added label
9
+ upload_button = gr.Button("Unggah Gambar") # Add a button to upload the image
10
  confirm_button = gr.Button("Konfirmasi")
11
  restart_button = gr.Button("Restart")
12
  with gr.Column():
 
14
  explanation_output = gr.Textbox(label="Penjelasan")
15
  recommendation_gallery = gr.Gallery(label="Rekomendasi Kacamata", columns=3, show_label=False)
16
 
17
+ # Adjust the actions
18
+ upload_button.click(lambda: None, inputs=None, outputs=[image_input]) # Handle image upload
19
  confirm_button.click(predict, inputs=image_input, outputs=[detected_shape, explanation_output, recommendation_gallery])
20
  restart_button.click(lambda: (None, "", [], []), inputs=None, outputs=[image_input, detected_shape, explanation_output, recommendation_gallery])
21