try fix
Browse files
app.py
CHANGED
|
@@ -101,27 +101,27 @@ def main(
|
|
| 101 |
n_samples=4,
|
| 102 |
):
|
| 103 |
debug_print("main")
|
| 104 |
-
images = []
|
| 105 |
-
for url in test_images_urls:
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
return images
|
| 125 |
|
| 126 |
embeddings = base64_to_embedding(embeddings)
|
| 127 |
# convert to python array
|
|
@@ -268,18 +268,18 @@ def on_example_image_click_set_image(input_image, image_url):
|
|
| 268 |
# device = torch.device("mps" if torch.backends.mps.is_available() else "cuda:0" if torch.cuda.is_available() else "cpu")
|
| 269 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 270 |
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
#
|
| 281 |
-
|
| 282 |
-
model, preprocess, tokenizer, clip_retrieval_client = None, None, None, None
|
| 283 |
|
| 284 |
|
| 285 |
examples = [
|
|
|
|
| 101 |
n_samples=4,
|
| 102 |
):
|
| 103 |
debug_print("main")
|
| 104 |
+
# images = []
|
| 105 |
+
# for url in test_images_urls:
|
| 106 |
+
# import requests
|
| 107 |
+
# from io import BytesIO
|
| 108 |
+
# from PIL import Image
|
| 109 |
+
# try:
|
| 110 |
+
# response = requests.get(url)
|
| 111 |
+
# if not response.ok:
|
| 112 |
+
# continue
|
| 113 |
+
# bytes = BytesIO(response.content)
|
| 114 |
+
# image = Image.open(bytes)
|
| 115 |
+
# if image.mode != 'RGB':
|
| 116 |
+
# image = image.convert('RGB')
|
| 117 |
+
# # width = 336
|
| 118 |
+
# # aspect_ratio = float(image.height) / float(image.width)
|
| 119 |
+
# # height = int(width * aspect_ratio)
|
| 120 |
+
# # image = image.resize((width, height), Image.Resampling.LANCZOS)
|
| 121 |
+
# images.append((image, "title"))
|
| 122 |
+
# except Exception as e:
|
| 123 |
+
# print(e)
|
| 124 |
+
# return images
|
| 125 |
|
| 126 |
embeddings = base64_to_embedding(embeddings)
|
| 127 |
# convert to python array
|
|
|
|
| 268 |
# device = torch.device("mps" if torch.backends.mps.is_available() else "cuda:0" if torch.cuda.is_available() else "cpu")
|
| 269 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 270 |
|
| 271 |
+
from clip_retrieval.load_clip import load_clip, get_tokenizer
|
| 272 |
+
from clip_retrieval.clip_client import ClipClient, Modality
|
| 273 |
+
model, preprocess = load_clip(clip_model, use_jit=True, device=device)
|
| 274 |
+
tokenizer = get_tokenizer(clip_model)
|
| 275 |
+
clip_retrieval_client = ClipClient(
|
| 276 |
+
url=clip_retrieval_service_url,
|
| 277 |
+
indice_name=clip_model_id,
|
| 278 |
+
use_safety_model = False,
|
| 279 |
+
use_violence_detector = False,
|
| 280 |
+
# modality = Modality.TEXT,
|
| 281 |
+
)
|
| 282 |
+
# model, preprocess, tokenizer, clip_retrieval_client = None, None, None, None
|
| 283 |
|
| 284 |
|
| 285 |
examples = [
|