File size: 11,303 Bytes
6368661
 
 
 
2ee1f45
6368661
 
 
2ee1f45
6368661
 
 
 
2ee1f45
6368661
 
 
 
 
 
 
 
 
 
 
fd83372
6368661
 
 
 
 
 
 
 
 
 
2ee1f45
6368661
 
 
 
 
 
2ee1f45
6368661
 
2ee1f45
6368661
 
 
 
 
 
 
 
 
 
 
 
 
a5fa4ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6368661
2b7299f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac724cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db34d43
fb13326
 
 
5c2da46
4c285e5
 
 
fb13326
 
4c285e5
 
5c2da46
 
e12043f
d50b816
 
 
4c285e5
 
 
fb13326
4c285e5
 
 
 
fb13326
f23dab3
4c285e5
 
ac724cd
4c285e5
 
 
fb13326
4c285e5
 
 
 
 
645e6a2
 
 
4c285e5
 
 
 
a1f32fc
645e6a2
2ba022b
d91f74d
2ba022b
645e6a2
fcdb808
 
f3e66cc
c69a4de
 
a25049c
 
c69a4de
 
4c285e5
7356e47
 
645e6a2
78f6d8f
 
7356e47
645e6a2
4c285e5
78f6d8f
07be95f
645e6a2
78f6d8f
 
12a96f1
645e6a2
 
9f76001
827caed
ab8f0fe
78f6d8f
16cd1ab
9f76001
7ccf16b
 
 
 
 
96f1f98
4669305
 
 
 
 
 
 
ef51cda
 
 
7ccf16b
ab8f0fe
 
 
 
 
 
ef51cda
78ab471
b1b22e2
 
 
 
78ab471
12a96f1
4c285e5
fb13326
 
 
ac87b40
b1b22e2
 
fb13326
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
# import gradio as gr
# import cv2
# import numpy as np
# import onnxruntime as ort

# # Load the ONNX model using onnxruntime
# onnx_model_path = "Model_IV.onnx"  # Update with your ONNX model path
# session = ort.InferenceSession(onnx_model_path)

# # Function to perform object detection with the ONNX model
# def detect_objects(frame, confidence_threshold=0.5):
#     # Convert the frame from BGR (OpenCV) to RGB
#     image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    
#     # Preprocessing: Resize and normalize the image
#     # Assuming YOLO model input is 640x640, update according to your model's input size
#     input_size = (640, 640)
#     image_resized = cv2.resize(image, input_size)
#     image_normalized = image_resized / 255.0  # Normalize to [0, 1]
#     image_input = np.transpose(image_normalized, (2, 0, 1))  # Change to CHW format
#     image_input = np.expand_dims(image_input, axis=0).astype(np.float32)  # Add batch dimension

#     # Perform inference
#     inputs = {session.get_inputs()[0].name: image_input}
#     outputs = session.run(None, inputs)
    
#     # # Assuming YOLO model outputs are in the form of [boxes, confidences, class_probs]
#     # boxes, confidences, class_probs = outputs

#     # # Post-processing: Filter boxes by confidence threshold
#     # detections = []
#     # for i, confidence in enumerate(confidences[0]):
#     #     if confidence >= confidence_threshold:
#     #         x1, y1, x2, y2 = boxes[0][i]
#     #         class_id = np.argmax(class_probs[0][i])  # Get class with highest probability
#     #         detections.append((x1, y1, x2, y2, confidence, class_id))
    
#     # # Draw bounding boxes and labels on the image
#     # for (x1, y1, x2, y2, confidence, class_id) in detections:
#     #     color = (0, 255, 0)  # Green color for bounding boxes
#     #     cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
#     #     label = f"Class {class_id}: {confidence:.2f}"
#     #     cv2.putText(image, label, (int(x1), int(y1)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
    
#     # # Convert the image back to BGR for displaying in Gradio
#     # image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    
#     return outputs

# # Gradio interface to use the webcam for real-time object detection
# # Added a slider for the confidence threshold
# iface = gr.Interface(fn=detect_objects, 
#                      #inputs=[
#                          # gr.Video(sources="webcam", type="numpy"),  # Webcam input
#                          inputs = gr.Image(sources=["webcam"], type="numpy"),
#                          # gr.Slider(minimum=0.0, maximum=1.0, default=0.5, label="Confidence Threshold")  # Confidence slider
#                      # ],  
#                      outputs="image")  # Show output image with bounding boxes

# iface.launch()
###
# import gradio as gr
# import cv2
# from huggingface_hub import hf_hub_download
# from gradio_webrtc import WebRTC
# from twilio.rest import Client
# import os
# from inference import YOLOv8

# model_file = hf_hub_download(
#     repo_id="aje6/ASL-Fingerspelling-Detection", filename="onnx/Model_IV.onnx"
# )

# model = YOLOv8(model_file)

# account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
# auth_token = os.environ.get("TWILIO_AUTH_TOKEN")

# if account_sid and auth_token:
#     client = Client(account_sid, auth_token)

#     token = client.tokens.create()

#     rtc_configuration = {
#         "iceServers": token.ice_servers,
#         "iceTransportPolicy": "relay",
#     }
# else:
#     rtc_configuration = None


# def detection(image, conf_threshold=0.3):
#     image = cv2.resize(image, (model.input_width, model.input_height))
#     new_image = model.detect_objects(image, conf_threshold)
#     return cv2.resize(new_image, (500, 500))


# css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
#                       .my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""


# with gr.Blocks(css=css) as demo:
#     gr.HTML(
#         """
#     <h1 style='text-align: center'>
#     YOLOv10 Webcam Stream (Powered by WebRTC ⚡️)
#     </h1>
#     """
#     )
#     gr.HTML(
#         """
#         <h3 style='text-align: center'>
#         <a href='https://arxiv.org/abs/2405.14458' target='_blank'>arXiv</a> | <a href='https://github.com/THU-MIG/yolov10' target='_blank'>github</a>
#         </h3>
#         """
#     )
#     with gr.Column(elem_classes=["my-column"]):
#         with gr.Group(elem_classes=["my-group"]):
#             image = WebRTC(label="Stream", rtc_configuration=rtc_configuration)
#             conf_threshold = gr.Slider(
#                 label="Confidence Threshold",
#                 minimum=0.0,
#                 maximum=1.0,
#                 step=0.05,
#                 value=0.30,
#             )

#         image.stream(
#             fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=10
#         )

# if __name__ == "__main__":
#     demo.launch()

# import gradio as gr
# import numpy as np
# import cv2
# from ultralytics import YOLO

# model = YOLO('Model_IV.pt')

# def transform_cv2(frame, transform):
#     if transform == "cartoon":
#         # prepare color
#         img_color = cv2.pyrDown(cv2.pyrDown(frame))
#         for _ in range(6):
#             img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
#         img_color = cv2.pyrUp(cv2.pyrUp(img_color))

#         # prepare edges
#         img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
#         img_edges = cv2.adaptiveThreshold(
#             cv2.medianBlur(img_edges, 7),
#             255,
#             cv2.ADAPTIVE_THRESH_MEAN_C,
#             cv2.THRESH_BINARY,
#             9,
#             2,
#         )
#         img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
#         # combine color and edges
#         img = cv2.bitwise_and(img_color, img_edges)
#         return img
#     elif transform == "edges":
#         # perform edge detection
#         img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)
#         return img
#     else:
#         return np.flipud(frame)

# with gr.Blocks() as demo:
#     with gr.Row():
#         with gr.Column():
#             transform = gr.Dropdown(choices=["cartoon", "edges", "flip"],
#                                     value="flip", label="Transformation")
#             input_img = gr.Image(sources=["webcam"], type="numpy")
#         with gr.Column():
#             output_img = gr.Image(streaming=True)
#         dep = input_img.stream(transform_cv2, [input_img, transform], [output_img],
#                                 time_limit=30, stream_every=0.1, concurrency_limit=30)

# if __name__ == "__main__":
#     demo.launch()

###

# import gradio as gr
# import torch
# import cv2

# # Load the YOLOv8 model
# model = torch.hub.load('ultralytics/yolov8', 'yolov8s', trust_repo=True)
# model.load_state_dict(torch.load('Model_IV'))

# def inference(img):
#     results = model(img)
#     annotated_img = results.render()[0]
#     return annotated_img

# iface = gr.Interface(fn=inference, inputs="webcam", outputs="image")
# iface.launch()

import gradio as gr
import torch
from PIL import Image
import torchvision.transforms as T
from ultralytics import YOLO
import onnxruntime as ort
import cv2
import numpy as np

# Load your model

# model = YOLO("Model_IV.pt")
# model = torch.load("Model_IV.pt")
# model.eval()
# checkpoint = torch.load("Model_IV.pt")
# model.load_state_dict(checkpoint)  # Load the saved weights
# model.eval()  # Set the model to evaluation mode

# Load the onnx model
model = ort.InferenceSession("Model_IV.onnx")

# Define preprocessing
# transform = T.Compose([
#     T.Resize((224, 224)),  # Adjust to your model's input size
#     T.ToTensor(),
# ])

def predict(image):
    # # Preprocess the image
    # img_tensor = transform(image).unsqueeze(0)  # Add batch dimension
    
    # # # Make prediction
    # # with torch.no_grad():
    # #     output = model(img_tensor)
    
    # # Process output (adjust based on your model's format)
    # results = model(image)
    # annotated_img = results[0].plot()
    # return annotated_img

    # Preprocess the image

    # Get name and shape of the model's inputs
    input_name = model.get_inputs()[0].name
    input_shape = model.get_inputs()[0].shape

    # Resize the image to the model's input shape
    image = cv2.resize(image, (input_shape[2], input_shape[3]))

    original_image_shape = image.shape
    print("Original image shape:", original_image_shape)

    # Reshape the image to match the model's input shape
    image = image.reshape(3, 640, 640)

    # Normalize output image using ImageNet-style normalization
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    mean = np.expand_dims(mean, axis=(1,2))
    std = np.expand_dims(std, axis=(1,2))
    image = (image / 255.0 - mean)/std
    
    # Convert the image to a numpy array and add a batch dimension
    if len(input_shape) == 4 and input_shape[0] == 1:
        image = np.expand_dims(image, axis=0)
    image = image.astype(np.float32)

    print("Input image shape:", image.shape)
    
    # Make prediction
    output = model.run(None, {input_name: image})

    # print("Output shape:", output.shape)
    
    # print("type output:", type(output))
    # print(output)

    # Postprocess output image
    
    annotated_img = output[0]

    
    
    # annotated_img = (output[0] / 255.0 - mean)/std
    # annotated_img = classes[output[0][0].argmax(0)]
    
    print("Annotated image type before normalization:", type(annotated_img))
    # print("Annotated image before normalization:", annotated_img)
    print("Min value of image before normalization:", np.min(annotated_img))
    print("Max value of image before normalization:", np.max(annotated_img))

    # # Normalize output image using ImageNet-style normalization (again)
    # annotated_img = (annotated_img / 255.0 - mean)/std

    # Normalize output image using Min-Max normalization
    min_val = np.min(annotated_img)
    max_val = np.max(annotated_img)
    annotated_img = (annotated_img - min_val) / (max_val - min_val)

    print("Min value of image after normalization:", np.min(annotated_img))
    print("Max value of image after normalization:", np.max(annotated_img))
    print("annotated_img type after normalization:", type(annotated_img))
    # print("annotated_img shape after normalization:", annotated_img.shape)

    # Reshape the image to match the PIL Image input shape
    print("annotated_img shape before reshape:", annotated_img.shape)
    annotated_img = annotated_img.reshape(original_image_shape)
    print("annotated_img shape after reshape:", annotated_img.shape)

    # Convert to PIL Image
    annotated_img = Image.fromarray(annotated_img)

    print("PIL Image type:", type(annotated_img))
    # print("PIL Image shape:", annotated_img.shape)

    return annotated_img
    
# Gradio interface
demo = gr.Interface(
    fn=predict,
    inputs=gr.Image(sources=["webcam"], type="numpy"),  # Accepts image input
    # outputs="image"  # Customize based on your output format
    outputs=gr.Image(type="pil"),  # Accepts image input
)

if __name__ == "__main__":
    demo.launch()