random2222 commited on
Commit
f3929e1
·
verified ·
1 Parent(s): fa0caf2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +220 -63
app.py CHANGED
@@ -1,64 +1,221 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ import os
5
+ import tempfile
6
+ from PIL import Image
7
+
8
+ # ------------------- Common Functions ------------------- #
9
+ def enable_opencl():
10
+ cv2.ocl.setUseOpenCL(True)
11
+
12
+ # ------------------- Black & White Converter Functions ------------------- #
13
+ def convert_to_black_white(image, threshold_value=127, method="otsu"):
14
+ if isinstance(image, str):
15
+ image = cv2.imread(image)
16
+ elif isinstance(image, np.ndarray) and len(image.shape) == 3 and image.shape[2] == 4:
17
+ # Convert RGBA to BGR
18
+ image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
19
+
20
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
21
+
22
+ if method == "adaptive":
23
+ binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
24
+ cv2.THRESH_BINARY, 11, 2)
25
+ elif method == "otsu":
26
+ _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
27
+ else:
28
+ _, binary = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_BINARY)
29
+ return binary
30
+
31
+ def process_image_bw(image, threshold_value, method):
32
+ return convert_to_black_white(image, threshold_value, method)
33
+
34
+ def process_video_bw(video_path, threshold_value, method):
35
+ temp_output = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
36
+
37
+ cap = cv2.VideoCapture(video_path)
38
+ if not cap.isOpened():
39
+ raise ValueError("Could not open video file.")
40
+
41
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
42
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
43
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
44
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
45
+
46
+ out = cv2.VideoWriter(temp_output, fourcc, fps, (frame_width, frame_height), isColor=False)
47
+
48
+ while cap.isOpened():
49
+ ret, frame = cap.read()
50
+ if not ret:
51
+ break
52
+
53
+ bw_frame = convert_to_black_white(frame, threshold_value, method)
54
+ out.write(bw_frame)
55
+
56
+ cap.release()
57
+ out.release()
58
+
59
+ return temp_output
60
+
61
+ # ------------------- Pencil Sketch Converter Functions ------------------- #
62
+ def convert_to_sketch(image, intensity=255, blur_ksize=21, sigma=0):
63
+ if isinstance(image, str):
64
+ image = cv2.imread(image)
65
+ elif isinstance(image, np.ndarray) and len(image.shape) == 3 and image.shape[2] == 4:
66
+ # Convert RGBA to BGR
67
+ image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
68
+
69
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
70
+ inverted = cv2.bitwise_not(gray)
71
+ blur_ksize = blur_ksize if blur_ksize % 2 == 1 else blur_ksize + 1
72
+ blurred = cv2.GaussianBlur(inverted, (blur_ksize, blur_ksize), sigma)
73
+ sketch = cv2.divide(gray, cv2.bitwise_not(blurred), scale=intensity)
74
+ return sketch
75
+
76
+ def process_image_sketch(image, intensity, blur_ksize, sigma):
77
+ return convert_to_sketch(image, intensity, blur_ksize, sigma)
78
+
79
+ def process_video_sketch(video_path, intensity, blur_ksize, sigma):
80
+ temp_output = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
81
+
82
+ cap = cv2.VideoCapture(video_path)
83
+ if not cap.isOpened():
84
+ raise ValueError("Failed to open video file.")
85
+
86
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
87
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
88
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
89
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
90
+
91
+ out = cv2.VideoWriter(temp_output, fourcc, fps, (frame_width, frame_height), isColor=True)
92
+
93
+ while cap.isOpened():
94
+ ret, frame = cap.read()
95
+ if not ret:
96
+ break
97
+ sketch = convert_to_sketch(frame, intensity, blur_ksize, sigma)
98
+ sketch_bgr = cv2.cvtColor(sketch, cv2.COLOR_GRAY2BGR)
99
+ out.write(sketch_bgr)
100
+
101
+ cap.release()
102
+ out.release()
103
+
104
+ return temp_output
105
+
106
+ # ------------------- Gradio UI ------------------- #
107
+ def bw_image_processor(image, threshold_value, method):
108
+ result = process_image_bw(image, threshold_value, method)
109
+ return result
110
+
111
+ def bw_video_processor(video, threshold_value, method):
112
+ result = process_video_bw(video, threshold_value, method)
113
+ return result
114
+
115
+ def sketch_image_processor(image, intensity, blur_ksize, sigma):
116
+ result = process_image_sketch(image, intensity, blur_ksize, sigma)
117
+ return result
118
+
119
+ def sketch_video_processor(video, intensity, blur_ksize, sigma):
120
+ result = process_video_sketch(video, intensity, blur_ksize, sigma)
121
+ return result
122
+
123
+ # Enable OpenCL for performance
124
+ enable_opencl()
125
+
126
+ # Create Gradio interface
127
+ with gr.Blocks(title="Image Processing Tool") as app:
128
+ gr.Markdown("# Image Processing Tool")
129
+
130
+ with gr.Tab("Black & White Converter"):
131
+ with gr.Row():
132
+ with gr.Column():
133
+ gr.Markdown("## Convert Images to Black & White")
134
+ bw_image_input = gr.Image(label="Input Image", type="numpy")
135
+ bw_threshold = gr.Slider(minimum=0, maximum=255, value=127, step=1,
136
+ label="Threshold Value", visible=True)
137
+ bw_method = gr.Radio(["otsu", "adaptive", "manual"], label="Thresholding Method", value="otsu")
138
+ bw_image_button = gr.Button("Process Image")
139
+
140
+ with gr.Column():
141
+ bw_image_output = gr.Image(label="Output Image")
142
+
143
+ gr.Markdown("---")
144
+
145
+ with gr.Row():
146
+ with gr.Column():
147
+ gr.Markdown("## Convert Videos to Black & White")
148
+ bw_video_input = gr.Video(label="Input Video")
149
+ bw_video_threshold = gr.Slider(minimum=0, maximum=255, value=127, step=1,
150
+ label="Threshold Value", visible=True)
151
+ bw_video_method = gr.Radio(["otsu", "adaptive", "manual"], label="Thresholding Method", value="otsu")
152
+ bw_video_button = gr.Button("Process Video")
153
+
154
+ with gr.Column():
155
+ bw_video_output = gr.Video(label="Output Video")
156
+
157
+ def update_threshold_visibility(method):
158
+ return gr.update(visible=(method == "manual"))
159
+
160
+ bw_method.change(fn=update_threshold_visibility, inputs=bw_method, outputs=bw_threshold)
161
+ bw_video_method.change(fn=update_threshold_visibility, inputs=bw_video_method, outputs=bw_video_threshold)
162
+
163
+ bw_image_button.click(
164
+ fn=bw_image_processor,
165
+ inputs=[bw_image_input, bw_threshold, bw_method],
166
+ outputs=bw_image_output
167
+ )
168
+
169
+ bw_video_button.click(
170
+ fn=bw_video_processor,
171
+ inputs=[bw_video_input, bw_video_threshold, bw_video_method],
172
+ outputs=bw_video_output
173
+ )
174
+
175
+ with gr.Tab("Pencil Sketch Converter"):
176
+ with gr.Row():
177
+ with gr.Column():
178
+ gr.Markdown("## Convert Images to Pencil Sketch")
179
+ sketch_image_input = gr.Image(label="Input Image", type="numpy")
180
+ sketch_intensity = gr.Slider(minimum=1, maximum=255, value=255, step=1,
181
+ label="Intensity (1-255)")
182
+ sketch_blur = gr.Slider(minimum=1, maximum=99, value=21, step=2,
183
+ label="Blur Kernel Size (odd, 1-99)")
184
+ sketch_sigma = gr.Slider(minimum=0, maximum=50, value=0, step=0.1,
185
+ label="Standard Deviation (0-50)")
186
+ sketch_image_button = gr.Button("Process Image")
187
+
188
+ with gr.Column():
189
+ sketch_image_output = gr.Image(label="Output Image")
190
+
191
+ gr.Markdown("---")
192
+
193
+ with gr.Row():
194
+ with gr.Column():
195
+ gr.Markdown("## Convert Videos to Pencil Sketch")
196
+ sketch_video_input = gr.Video(label="Input Video")
197
+ sketch_video_intensity = gr.Slider(minimum=1, maximum=255, value=255, step=1,
198
+ label="Intensity (1-255)")
199
+ sketch_video_blur = gr.Slider(minimum=1, maximum=99, value=21, step=2,
200
+ label="Blur Kernel Size (odd, 1-99)")
201
+ sketch_video_sigma = gr.Slider(minimum=0, maximum=50, value=0, step=0.1,
202
+ label="Standard Deviation (0-50)")
203
+ sketch_video_button = gr.Button("Process Video")
204
+
205
+ with gr.Column():
206
+ sketch_video_output = gr.Video(label="Output Video")
207
+
208
+ sketch_image_button.click(
209
+ fn=sketch_image_processor,
210
+ inputs=[sketch_image_input, sketch_intensity, sketch_blur, sketch_sigma],
211
+ outputs=sketch_image_output
212
+ )
213
+
214
+ sketch_video_button.click(
215
+ fn=sketch_video_processor,
216
+ inputs=[sketch_video_input, sketch_video_intensity, sketch_video_blur, sketch_video_sigma],
217
+ outputs=sketch_video_output
218
+ )
219
+
220
+ # Launch the app
221
+ app.launch()