wolfofbackstreet commited on
Commit
dc9bb94
·
verified ·
1 Parent(s): b9bd75d

Upload 3 files

Browse files
Files changed (3) hide show
  1. cmd_helper.py +56 -0
  2. notebook_utils.py +756 -0
  3. pip_helper.py +10 -0
cmd_helper.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import subprocess # nosec - disable B404:import-subprocess check
3
+ import sys
4
+ import os
5
+ from pathlib import Path
6
+ import platform
7
+
8
+
9
+ def clone_repo(repo_url: str, revision: str = None, add_to_sys_path: bool = True) -> Path:
10
+ repo_path = Path(repo_url.split("/")[-1].replace(".git", ""))
11
+
12
+ if not repo_path.exists():
13
+ try:
14
+ subprocess.run(["git", "clone", repo_url], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
15
+ except Exception as exc:
16
+ print(f"Failed to clone the repository: {exc.stderr}")
17
+ raise
18
+
19
+ if revision:
20
+ subprocess.Popen(["git", "checkout", revision], cwd=str(repo_path))
21
+ if add_to_sys_path and str(repo_path.resolve()) not in sys.path:
22
+ sys.path.insert(0, str(repo_path.resolve()))
23
+
24
+ return repo_path
25
+
26
+
27
+ def optimum_cli(model_id, output_dir, show_command=True, additional_args: dict[str, str] = None, debug_logs=False):
28
+ export_command = f"optimum-cli export openvino --model {model_id} {output_dir}"
29
+ if additional_args is not None:
30
+ for arg, value in additional_args.items():
31
+ export_command += f" --{arg}"
32
+ if value:
33
+ export_command += f" {value}"
34
+
35
+ if show_command:
36
+ from IPython.display import Markdown, display
37
+
38
+ display(Markdown("**Export command:**"))
39
+ display(Markdown(f"`{export_command}`"))
40
+
41
+ transofrmers_loglevel = None
42
+ if debug_logs:
43
+ transofrmers_loglevel = os.environ.pop("TRANSFORMERS_VERBOSITY", None)
44
+ os.environ["TRANSFORMERS_VERBOSITY"] = "debug"
45
+
46
+ try:
47
+ subprocess.run(export_command.split(" "), shell=(platform.system() == "Windows"), check=True, capture_output=True)
48
+ except subprocess.CalledProcessError as exc:
49
+ logger = logging.getLogger()
50
+ logger.exception(exc.stderr)
51
+ if transofrmers_loglevel is not None:
52
+ os.environ["TRANSFORMERS_VERBOSITY"] = transofrmers_loglevel
53
+ raise exc
54
+ finally:
55
+ if transofrmers_loglevel is not None:
56
+ os.environ["TRANSFORMERS_VERBOSITY"] = transofrmers_loglevel
notebook_utils.py ADDED
@@ -0,0 +1,756 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[ ]:
5
+
6
+
7
+ import platform
8
+ import sys
9
+ import threading
10
+ import time
11
+ from os import PathLike
12
+ from pathlib import Path
13
+ from typing import NamedTuple, Optional
14
+
15
+
16
+ # ## Files
17
+ #
18
+ # Load an image, download a file, download an IR model, and create a progress bar to show download progress.
19
+
20
+ # In[ ]:
21
+
22
+
23
+ def device_widget(default="AUTO", exclude=None, added=None, description="Device:"):
24
+ import openvino as ov
25
+ import ipywidgets as widgets
26
+
27
+ core = ov.Core()
28
+
29
+ supported_devices = core.available_devices + ["AUTO"]
30
+ exclude = exclude or []
31
+ if exclude:
32
+ for ex_device in exclude:
33
+ if ex_device in supported_devices:
34
+ supported_devices.remove(ex_device)
35
+
36
+ added = added or []
37
+ if added:
38
+ for add_device in added:
39
+ if add_device not in supported_devices:
40
+ supported_devices.append(add_device)
41
+
42
+ device = widgets.Dropdown(
43
+ options=supported_devices,
44
+ value=default,
45
+ description=description,
46
+ disabled=False,
47
+ )
48
+ return device
49
+
50
+
51
+ def quantization_widget(default=True):
52
+ import ipywidgets as widgets
53
+
54
+ to_quantize = widgets.Checkbox(
55
+ value=default,
56
+ description="Quantization",
57
+ disabled=False,
58
+ )
59
+
60
+ return to_quantize
61
+
62
+
63
+ def pip_install(*args):
64
+ import subprocess # nosec - disable B404:import-subprocess check
65
+
66
+ cli_args = []
67
+ for arg in args:
68
+ cli_args.extend(str(arg).split(" "))
69
+ subprocess.run([sys.executable, "-m", "pip", "install", *cli_args], shell=(platform.system() == "Windows"), check=True)
70
+
71
+
72
+ def load_image(name: str, url: str = None):
73
+ """
74
+ Loads an image by `url` and returns it as BGR numpy array. The image is
75
+ stored to the filesystem with name `name`. If the image file already exists
76
+ loads the local image.
77
+
78
+ :param name: Local path name of the image.
79
+ :param url: url to the image
80
+ :return: image as BGR numpy array
81
+ """
82
+ import cv2
83
+ import numpy as np
84
+ import requests
85
+
86
+ if not Path(name).exists():
87
+ # Set User-Agent to Mozilla because some websites block
88
+ # requests with User-Agent Python
89
+ response = requests.get(url, headers={"User-Agent": "Mozilla/5.0"})
90
+ array = np.asarray(bytearray(response.content), dtype="uint8")
91
+ image = cv2.imdecode(array, -1) # Loads the image as BGR
92
+ cv2.imwrite(name, image)
93
+ else:
94
+ image = cv2.imread(name)
95
+
96
+ return image
97
+
98
+
99
+ def download_file(
100
+ url: PathLike,
101
+ filename: PathLike = None,
102
+ directory: PathLike = None,
103
+ show_progress: bool = True,
104
+ ) -> PathLike:
105
+ """
106
+ Download a file from a url and save it to the local filesystem. The file is saved to the
107
+ current directory by default, or to `directory` if specified. If a filename is not given,
108
+ the filename of the URL will be used.
109
+
110
+ :param url: URL that points to the file to download
111
+ :param filename: Name of the local file to save. Should point to the name of the file only,
112
+ not the full path. If None the filename from the url will be used
113
+ :param directory: Directory to save the file to. Will be created if it doesn't exist
114
+ If None the file will be saved to the current working directory
115
+ :param show_progress: If True, show an TQDM ProgressBar
116
+ :param silent: If True, do not print a message if the file already exists
117
+ :param timeout: Number of seconds before cancelling the connection attempt
118
+ :return: path to downloaded file
119
+ """
120
+ from tqdm.notebook import tqdm_notebook
121
+ import requests
122
+ import urllib.parse
123
+
124
+ filename = filename or Path(urllib.parse.urlparse(url).path).name
125
+ chunk_size = 16384 # make chunks bigger so that not too many updates are triggered for Jupyter front-end
126
+
127
+ filename = Path(filename)
128
+ if len(filename.parts) > 1:
129
+ raise ValueError(
130
+ "`filename` should refer to the name of the file, excluding the directory. "
131
+ "Use the `directory` parameter to specify a target directory for the downloaded file."
132
+ )
133
+
134
+ filepath = Path(directory) / filename if directory is not None else filename
135
+ if filepath.exists():
136
+ return filepath.resolve()
137
+
138
+ # create the directory if it does not exist, and add the directory to the filename
139
+ if directory is not None:
140
+ Path(directory).mkdir(parents=True, exist_ok=True)
141
+
142
+ try:
143
+ response = requests.get(url=url, headers={"User-agent": "Mozilla/5.0"}, stream=True)
144
+ response.raise_for_status()
145
+ except (
146
+ requests.exceptions.HTTPError
147
+ ) as error: # For error associated with not-200 codes. Will output something like: "404 Client Error: Not Found for url: {url}"
148
+ raise Exception(error) from None
149
+ except requests.exceptions.Timeout:
150
+ raise Exception(
151
+ "Connection timed out. If you access the internet through a proxy server, please "
152
+ "make sure the proxy is set in the shell from where you launched Jupyter."
153
+ ) from None
154
+ except requests.exceptions.RequestException as error:
155
+ raise Exception(f"File downloading failed with error: {error}") from None
156
+
157
+ # download the file if it does not exist
158
+ filesize = int(response.headers.get("Content-length", 0))
159
+ if not filepath.exists():
160
+ with tqdm_notebook(
161
+ total=filesize,
162
+ unit="B",
163
+ unit_scale=True,
164
+ unit_divisor=1024,
165
+ desc=str(filename),
166
+ disable=not show_progress,
167
+ ) as progress_bar:
168
+ with open(filepath, "wb") as file_object:
169
+ for chunk in response.iter_content(chunk_size):
170
+ file_object.write(chunk)
171
+ progress_bar.update(len(chunk))
172
+ progress_bar.refresh()
173
+ else:
174
+ print(f"'{filepath}' already exists.")
175
+
176
+ response.close()
177
+
178
+ return filepath.resolve()
179
+
180
+
181
+ def download_ir_model(model_xml_url: str, destination_folder: PathLike = None) -> PathLike:
182
+ """
183
+ Download IR model from `model_xml_url`. Downloads model xml and bin file; the weights file is
184
+ assumed to exist at the same location and name as model_xml_url with a ".bin" extension.
185
+
186
+ :param model_xml_url: URL to model xml file to download
187
+ :param destination_folder: Directory where downloaded model xml and bin are saved. If None, model
188
+ files are saved to the current directory
189
+ :return: path to downloaded xml model file
190
+ """
191
+ model_bin_url = model_xml_url[:-4] + ".bin"
192
+ model_xml_path = download_file(model_xml_url, directory=destination_folder, show_progress=False)
193
+ download_file(model_bin_url, directory=destination_folder)
194
+ return model_xml_path
195
+
196
+
197
+ # ## Images
198
+
199
+ # ### Convert Pixel Data
200
+ #
201
+ # Normalize image pixel values between 0 and 1, and convert images to RGB and BGR.
202
+
203
+ # In[ ]:
204
+
205
+
206
+ def normalize_minmax(data):
207
+ """
208
+ Normalizes the values in `data` between 0 and 1
209
+ """
210
+ if data.max() == data.min():
211
+ raise ValueError("Normalization is not possible because all elements of" f"`data` have the same value: {data.max()}.")
212
+ return (data - data.min()) / (data.max() - data.min())
213
+
214
+
215
+ def to_rgb(image_data):
216
+ """
217
+ Convert image_data from BGR to RGB
218
+ """
219
+ import cv2
220
+
221
+ return cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
222
+
223
+
224
+ def to_bgr(image_data):
225
+ """
226
+ Convert image_data from RGB to BGR
227
+ """
228
+ import cv2
229
+
230
+ return cv2.cvtColor(image_data, cv2.COLOR_RGB2BGR)
231
+
232
+
233
+ # ## Videos
234
+
235
+ # ### Video Player
236
+ #
237
+ # Custom video player to fulfill FPS requirements. You can set target FPS and output size, flip the video horizontally or skip first N frames.
238
+
239
+ # In[ ]:
240
+
241
+
242
+ class VideoPlayer:
243
+ """
244
+ Custom video player to fulfill FPS requirements. You can set target FPS and output size,
245
+ flip the video horizontally or skip first N frames.
246
+
247
+ :param source: Video source. It could be either camera device or video file.
248
+ :param size: Output frame size.
249
+ :param flip: Flip source horizontally.
250
+ :param fps: Target FPS.
251
+ :param skip_first_frames: Skip first N frames.
252
+ """
253
+
254
+ def __init__(self, source, size=None, flip=False, fps=None, skip_first_frames=0, width=1280, height=720):
255
+ import cv2
256
+
257
+ self.cv2 = cv2 # This is done to access the package in class methods
258
+ self.__cap = cv2.VideoCapture(source)
259
+ # try HD by default to get better video quality
260
+ self.__cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
261
+ self.__cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
262
+
263
+ if not self.__cap.isOpened():
264
+ raise RuntimeError(f"Cannot open {'camera' if isinstance(source, int) else ''} {source}")
265
+ # skip first N frames
266
+ self.__cap.set(cv2.CAP_PROP_POS_FRAMES, skip_first_frames)
267
+ # fps of input file
268
+ self.__input_fps = self.__cap.get(cv2.CAP_PROP_FPS)
269
+ if self.__input_fps <= 0:
270
+ self.__input_fps = 60
271
+ # target fps given by user
272
+ self.__output_fps = fps if fps is not None else self.__input_fps
273
+ self.__flip = flip
274
+ self.__size = None
275
+ self.__interpolation = None
276
+ if size is not None:
277
+ self.__size = size
278
+ # AREA better for shrinking, LINEAR better for enlarging
279
+ self.__interpolation = cv2.INTER_AREA if size[0] < self.__cap.get(cv2.CAP_PROP_FRAME_WIDTH) else cv2.INTER_LINEAR
280
+ # first frame
281
+ _, self.__frame = self.__cap.read()
282
+ self.__lock = threading.Lock()
283
+ self.__thread = None
284
+ self.__stop = False
285
+
286
+ """
287
+ Start playing.
288
+ """
289
+
290
+ def start(self):
291
+ self.__stop = False
292
+ self.__thread = threading.Thread(target=self.__run, daemon=True)
293
+ self.__thread.start()
294
+
295
+ """
296
+ Stop playing and release resources.
297
+ """
298
+
299
+ def stop(self):
300
+ self.__stop = True
301
+ if self.__thread is not None:
302
+ self.__thread.join()
303
+ self.__cap.release()
304
+
305
+ def __run(self):
306
+ prev_time = 0
307
+ while not self.__stop:
308
+ t1 = time.time()
309
+ ret, frame = self.__cap.read()
310
+ if not ret:
311
+ break
312
+
313
+ # fulfill target fps
314
+ if 1 / self.__output_fps < time.time() - prev_time:
315
+ prev_time = time.time()
316
+ # replace by current frame
317
+ with self.__lock:
318
+ self.__frame = frame
319
+
320
+ t2 = time.time()
321
+ # time to wait [s] to fulfill input fps
322
+ wait_time = 1 / self.__input_fps - (t2 - t1)
323
+ # wait until
324
+ time.sleep(max(0, wait_time))
325
+
326
+ self.__frame = None
327
+
328
+ """
329
+ Get current frame.
330
+ """
331
+
332
+ def next(self):
333
+ import cv2
334
+
335
+ with self.__lock:
336
+ if self.__frame is None:
337
+ return None
338
+ # need to copy frame, because can be cached and reused if fps is low
339
+ frame = self.__frame.copy()
340
+ if self.__size is not None:
341
+ frame = self.cv2.resize(frame, self.__size, interpolation=self.__interpolation)
342
+ if self.__flip:
343
+ frame = self.cv2.flip(frame, 1)
344
+ return frame
345
+
346
+
347
+ # ## Visualization
348
+
349
+ # ### Segmentation
350
+ #
351
+ # Define a SegmentationMap NamedTuple that keeps the labels and colormap for a segmentation project/dataset. Create CityScapesSegmentation and BinarySegmentation SegmentationMaps. Create a function to convert a segmentation map to an RGB image with a colormap, and to show the segmentation result as an overlay over the original image.
352
+
353
+ # In[ ]:
354
+
355
+
356
+ class Label(NamedTuple):
357
+ index: int
358
+ color: tuple
359
+ name: Optional[str] = None
360
+
361
+
362
+ # In[ ]:
363
+
364
+
365
+ class SegmentationMap(NamedTuple):
366
+ labels: list
367
+
368
+ def get_colormap(self):
369
+ import numpy as np
370
+
371
+ return np.array([label.color for label in self.labels])
372
+
373
+ def get_labels(self):
374
+ labelnames = [label.name for label in self.labels]
375
+ if any(labelnames):
376
+ return labelnames
377
+ else:
378
+ return None
379
+
380
+
381
+ # In[ ]:
382
+
383
+
384
+ cityscape_labels = [
385
+ Label(index=0, color=(128, 64, 128), name="road"),
386
+ Label(index=1, color=(244, 35, 232), name="sidewalk"),
387
+ Label(index=2, color=(70, 70, 70), name="building"),
388
+ Label(index=3, color=(102, 102, 156), name="wall"),
389
+ Label(index=4, color=(190, 153, 153), name="fence"),
390
+ Label(index=5, color=(153, 153, 153), name="pole"),
391
+ Label(index=6, color=(250, 170, 30), name="traffic light"),
392
+ Label(index=7, color=(220, 220, 0), name="traffic sign"),
393
+ Label(index=8, color=(107, 142, 35), name="vegetation"),
394
+ Label(index=9, color=(152, 251, 152), name="terrain"),
395
+ Label(index=10, color=(70, 130, 180), name="sky"),
396
+ Label(index=11, color=(220, 20, 60), name="person"),
397
+ Label(index=12, color=(255, 0, 0), name="rider"),
398
+ Label(index=13, color=(0, 0, 142), name="car"),
399
+ Label(index=14, color=(0, 0, 70), name="truck"),
400
+ Label(index=15, color=(0, 60, 100), name="bus"),
401
+ Label(index=16, color=(0, 80, 100), name="train"),
402
+ Label(index=17, color=(0, 0, 230), name="motorcycle"),
403
+ Label(index=18, color=(119, 11, 32), name="bicycle"),
404
+ Label(index=19, color=(255, 255, 255), name="background"),
405
+ ]
406
+
407
+ CityScapesSegmentation = SegmentationMap(cityscape_labels)
408
+
409
+ binary_labels = [
410
+ Label(index=0, color=(255, 255, 255), name="background"),
411
+ Label(index=1, color=(0, 0, 0), name="foreground"),
412
+ ]
413
+
414
+ BinarySegmentation = SegmentationMap(binary_labels)
415
+
416
+
417
+ # In[ ]:
418
+
419
+
420
+ def segmentation_map_to_image(result, colormap, remove_holes: bool = False):
421
+ """
422
+ Convert network result of floating point numbers to an RGB image with
423
+ integer values from 0-255 by applying a colormap.
424
+
425
+ :param result: A single network result after converting to pixel values in H,W or 1,H,W shape.
426
+ :param colormap: A numpy array of shape (num_classes, 3) with an RGB value per class.
427
+ :param remove_holes: If True, remove holes in the segmentation result.
428
+ :return: An RGB image where each pixel is an int8 value according to colormap.
429
+ """
430
+ import cv2
431
+ import numpy as np
432
+
433
+ if len(result.shape) != 2 and result.shape[0] != 1:
434
+ raise ValueError(f"Expected result with shape (H,W) or (1,H,W), got result with shape {result.shape}")
435
+
436
+ if len(np.unique(result)) > colormap.shape[0]:
437
+ raise ValueError(
438
+ f"Expected max {colormap[0]} classes in result, got {len(np.unique(result))} "
439
+ "different output values. Please make sure to convert the network output to "
440
+ "pixel values before calling this function."
441
+ )
442
+ elif result.shape[0] == 1:
443
+ result = result.squeeze(0)
444
+
445
+ result = result.astype(np.uint8)
446
+
447
+ contour_mode = cv2.RETR_EXTERNAL if remove_holes else cv2.RETR_TREE
448
+ mask = np.zeros((result.shape[0], result.shape[1], 3), dtype=np.uint8)
449
+ for label_index, color in enumerate(colormap):
450
+ label_index_map = result == label_index
451
+ label_index_map = label_index_map.astype(np.uint8) * 255
452
+ contours, hierarchies = cv2.findContours(label_index_map, contour_mode, cv2.CHAIN_APPROX_SIMPLE)
453
+ cv2.drawContours(
454
+ mask,
455
+ contours,
456
+ contourIdx=-1,
457
+ color=color.tolist(),
458
+ thickness=cv2.FILLED,
459
+ )
460
+
461
+ return mask
462
+
463
+
464
+ def segmentation_map_to_overlay(image, result, alpha, colormap, remove_holes=False):
465
+ """
466
+ Returns a new image where a segmentation mask (created with colormap) is overlayed on
467
+ the source image.
468
+
469
+ :param image: Source image.
470
+ :param result: A single network result after converting to pixel values in H,W or 1,H,W shape.
471
+ :param alpha: Alpha transparency value for the overlay image.
472
+ :param colormap: A numpy array of shape (num_classes, 3) with an RGB value per class.
473
+ :param remove_holes: If True, remove holes in the segmentation result.
474
+ :return: An RGP image with segmentation mask overlayed on the source image.
475
+ """
476
+ import cv2
477
+ import numpy as np
478
+
479
+ if len(image.shape) == 2:
480
+ image = np.repeat(np.expand_dims(image, -1), 3, 2)
481
+ mask = segmentation_map_to_image(result, colormap, remove_holes)
482
+ image_height, image_width = image.shape[:2]
483
+ mask = cv2.resize(src=mask, dsize=(image_width, image_height))
484
+ return cv2.addWeighted(mask, alpha, image, 1 - alpha, 0)
485
+
486
+
487
+ # ### Network Results
488
+ #
489
+ # Show network result image, optionally together with the source image and a legend with labels.
490
+
491
+ # In[ ]:
492
+
493
+
494
+ def viz_result_image(
495
+ result_image,
496
+ source_image=None,
497
+ source_title: str = None,
498
+ result_title: str = None,
499
+ labels: list[Label] = None,
500
+ resize: bool = False,
501
+ bgr_to_rgb: bool = False,
502
+ hide_axes: bool = False,
503
+ ):
504
+ """
505
+ Show result image, optionally together with source images, and a legend with labels.
506
+
507
+ :param result_image: Numpy array of RGB result image.
508
+ :param source_image: Numpy array of source image. If provided this image will be shown
509
+ next to the result image. source_image is expected to be in RGB format.
510
+ Set bgr_to_rgb to True if source_image is in BGR format.
511
+ :param source_title: Title to display for the source image.
512
+ :param result_title: Title to display for the result image.
513
+ :param labels: list of labels. If provided, a legend will be shown with the given labels.
514
+ :param resize: If true, resize the result image to the same shape as the source image.
515
+ :param bgr_to_rgb: If true, convert the source image from BGR to RGB. Use this option if
516
+ source_image is a BGR image.
517
+ :param hide_axes: If true, do not show matplotlib axes.
518
+ :return: Matplotlib figure with result image
519
+ """
520
+ import cv2
521
+ import numpy as np
522
+ import matplotlib.pyplot as plt
523
+ from matplotlib.lines import Line2D
524
+
525
+ if bgr_to_rgb:
526
+ source_image = to_rgb(source_image)
527
+ if resize:
528
+ result_image = cv2.resize(result_image, (source_image.shape[1], source_image.shape[0]))
529
+
530
+ num_images = 1 if source_image is None else 2
531
+
532
+ fig, ax = plt.subplots(1, num_images, figsize=(16, 8), squeeze=False)
533
+ if source_image is not None:
534
+ ax[0, 0].imshow(source_image)
535
+ ax[0, 0].set_title(source_title)
536
+
537
+ ax[0, num_images - 1].imshow(result_image)
538
+ ax[0, num_images - 1].set_title(result_title)
539
+
540
+ if hide_axes:
541
+ for a in ax.ravel():
542
+ a.axis("off")
543
+ if labels:
544
+ colors = labels.get_colormap()
545
+ lines = [
546
+ Line2D(
547
+ [0],
548
+ [0],
549
+ color=[item / 255 for item in c.tolist()],
550
+ linewidth=3,
551
+ linestyle="-",
552
+ )
553
+ for c in colors
554
+ ]
555
+ plt.legend(
556
+ lines,
557
+ labels.get_labels(),
558
+ bbox_to_anchor=(1, 1),
559
+ loc="upper left",
560
+ prop={"size": 12},
561
+ )
562
+ plt.close(fig)
563
+ return fig
564
+
565
+
566
+ # ### Live Inference
567
+
568
+ # In[ ]:
569
+
570
+
571
+ def show_array(frame, display_handle=None):
572
+ """
573
+ Display array `frame`. Replace information at `display_handle` with `frame`
574
+ encoded as jpeg image. `frame` is expected to have data in BGR order.
575
+
576
+ Create a display_handle with: `display_handle = display(display_id=True)`
577
+ """
578
+ import cv2
579
+ from IPython.display import Image, display
580
+
581
+ _, frame = cv2.imencode(ext=".jpeg", img=frame)
582
+ if display_handle is None:
583
+ display_handle = display(Image(data=frame.tobytes()), display_id=True)
584
+ else:
585
+ display_handle.update(Image(data=frame.tobytes()))
586
+ return display_handle
587
+
588
+
589
+ # ## Checks and Alerts
590
+ #
591
+ # Create an alert class to show stylized info/error/warning messages and a `check_device` function that checks whether a given device is available.
592
+
593
+ # In[ ]:
594
+
595
+
596
+ class NotebookAlert(Exception):
597
+ def __init__(self, message: str, alert_class: str):
598
+ """
599
+ Show an alert box with the given message.
600
+
601
+ :param message: The message to display.
602
+ :param alert_class: The class for styling the message. Options: info, warning, success, danger.
603
+ """
604
+ self.message = message
605
+ self.alert_class = alert_class
606
+ self.show_message()
607
+
608
+ def show_message(self):
609
+ from IPython.display import HTML, display
610
+
611
+ display(HTML(f"""<div class="alert alert-{self.alert_class}">{self.message}"""))
612
+
613
+
614
+ class DeviceNotFoundAlert(NotebookAlert):
615
+ def __init__(self, device: str):
616
+ """
617
+ Show a warning message about an unavailable device. This class does not check whether or
618
+ not the device is available, use the `check_device` function to check this. `check_device`
619
+ also shows the warning if the device is not found.
620
+
621
+ :param device: The unavailable device.
622
+ :return: A formatted alert box with the message that `device` is not available, and a list
623
+ of devices that are available.
624
+ """
625
+ import openvino as ov
626
+
627
+ core = ov.Core()
628
+ supported_devices = core.available_devices
629
+ self.message = f"Running this cell requires a {device} device, " "which is not available on this system. "
630
+ self.alert_class = "warning"
631
+ if len(supported_devices) == 1:
632
+ self.message += f"The following device is available: {core.available_devices[0]}"
633
+ else:
634
+ self.message += "The following devices are available: " f"{', '.join(core.available_devices)}"
635
+ super().__init__(self.message, self.alert_class)
636
+
637
+
638
+ def check_device(device: str) -> bool:
639
+ """
640
+ Check if the specified device is available on the system.
641
+
642
+ :param device: Device to check. e.g. CPU, GPU
643
+ :return: True if the device is available, False if not. If the device is not available,
644
+ a DeviceNotFoundAlert will be shown.
645
+ """
646
+ import openvino as ov
647
+
648
+ core = ov.Core()
649
+ if device not in core.available_devices:
650
+ DeviceNotFoundAlert(device)
651
+ return False
652
+ else:
653
+ return True
654
+
655
+
656
+ def check_openvino_version(version: str) -> bool:
657
+ """
658
+ Check if the specified OpenVINO version is installed.
659
+
660
+ :param version: the OpenVINO version to check. Example: 2021.4
661
+ :return: True if the version is installed, False if not. If the version is not installed,
662
+ an alert message will be shown.
663
+ """
664
+ import openvino as ov
665
+
666
+ installed_version = ov.get_version()
667
+ if version not in installed_version:
668
+ NotebookAlert(
669
+ f"This notebook requires OpenVINO {version}. "
670
+ f"The version on your system is: <i>{installed_version}</i>.<br>"
671
+ "Please run <span style='font-family:monospace'>pip install --upgrade -r requirements.txt</span> "
672
+ "in the openvino_env environment to install this version. "
673
+ "See the <a href='https://github.com/openvinotoolkit/openvino_notebooks'>"
674
+ "OpenVINO Notebooks README</a> for detailed instructions",
675
+ alert_class="danger",
676
+ )
677
+ return False
678
+ else:
679
+ return True
680
+
681
+
682
+ def optimize_bge_embedding(model_path, output_model_path):
683
+ """
684
+ optimize_bge_embedding used to optimize BGE model for NPU device
685
+
686
+ Arguments:
687
+ model_path {str} -- original BGE IR model path
688
+ output_model_path {str} -- Converted BGE IR model path
689
+ """
690
+ import openvino as ov
691
+
692
+ try:
693
+ from openvino.passes import Manager, MatcherPass, WrapType, Matcher
694
+ from openvino import opset10 as ops
695
+ except ImportError:
696
+ from openvino.runtime.passes import Manager, MatcherPass, WrapType, Matcher
697
+ from openvino.runtime import opset10 as ops
698
+ core = ov.Core()
699
+ ov_model = core.read_model(model_path)
700
+ manager = Manager()
701
+ packed_layername_tensor_dict_list = [{"name": "aten::mul/Multiply"}]
702
+
703
+ class ReplaceTensor(MatcherPass):
704
+ def __init__(self, packed_layername_tensor_dict_list):
705
+ MatcherPass.__init__(self)
706
+ self.model_changed = False
707
+
708
+ param = WrapType("opset10.Multiply")
709
+
710
+ def callback(matcher: Matcher) -> bool:
711
+ import numpy as np
712
+
713
+ root = matcher.get_match_root()
714
+ if root is None:
715
+ return False
716
+ for y in packed_layername_tensor_dict_list:
717
+ root_name = root.get_friendly_name()
718
+ if root_name.find(y["name"]) != -1:
719
+ max_fp16 = np.array([[[[-np.finfo(np.float16).max]]]]).astype(np.float32)
720
+ new_tenser = ops.constant(max_fp16, ov.Type.f32, name="Constant_4431")
721
+ root.set_arguments([root.input_value(0).node, new_tenser])
722
+ packed_layername_tensor_dict_list.remove(y)
723
+
724
+ return True
725
+
726
+ self.register_matcher(Matcher(param, "ReplaceTensor"), callback)
727
+
728
+ manager.register_pass(ReplaceTensor(packed_layername_tensor_dict_list))
729
+ manager.run_passes(ov_model)
730
+ ov.save_model(ov_model, output_model_path, compress_to_fp16=False)
731
+
732
+
733
+ def collect_telemetry(file: str = ""):
734
+ """
735
+ The function only tracks that the notebooks cell was executed and does not include any personally identifiable information (PII).
736
+ """
737
+ try:
738
+ import os
739
+ import requests
740
+ import platform
741
+ from pathlib import Path
742
+
743
+ if os.getenv("SCARF_NO_ANALYTICS") == "1" or os.getenv("DO_NOT_TRACK") == "1":
744
+ return
745
+ url = "https://openvino.gateway.scarf.sh/telemetry"
746
+ params = {
747
+ "notebook_dir": Path(__file__).parent.name,
748
+ "platform": platform.system(),
749
+ "arch": platform.machine(),
750
+ "python_version": platform.python_version(),
751
+ }
752
+ if file:
753
+ params["file"] = file
754
+ requests.get(url, params=params)
755
+ except Exception:
756
+ pass
pip_helper.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+
4
+ def pip_install(*args):
5
+ import subprocess # nosec - disable B404:import-subprocess check
6
+
7
+ cli_args = []
8
+ for arg in args:
9
+ cli_args.extend(str(arg).split(" "))
10
+ subprocess.run([sys.executable, "-m", "pip", "install", *cli_args], check=True)