Shivdutta commited on
Commit
aa762fc
·
verified ·
1 Parent(s): 6d38902

Upload 12 files

Browse files
Files changed (13) hide show
  1. .gitattributes +4 -0
  2. app.py +12 -4
  3. dogs.jpg +0 -0
  4. flowers.jpg +0 -0
  5. fruits.jpg +0 -0
  6. sa_10039.jpg +0 -0
  7. sa_11025.jpg +0 -0
  8. sa_1309.jpg +3 -0
  9. sa_192.jpg +3 -0
  10. sa_414.jpg +3 -0
  11. sa_561.jpg +0 -0
  12. sa_862.jpg +3 -0
  13. sa_8776.jpg +0 -0
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ sa_1309.jpg filter=lfs diff=lfs merge=lfs -text
37
+ sa_192.jpg filter=lfs diff=lfs merge=lfs -text
38
+ sa_414.jpg filter=lfs diff=lfs merge=lfs -text
39
+ sa_862.jpg filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -17,11 +17,19 @@ from PIL import Image as PILIMAGE
17
 
18
  from transformers import CLIPProcessor, CLIPModel, CLIPTokenizer
19
  from sentence_transformers import SentenceTransformer, util
20
-
21
-
22
-
23
  device = "cuda" if torch.cuda.is_available() else "cpu"
24
 
 
 
 
 
 
 
 
 
 
 
 
25
  # Define model
26
  model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
27
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
@@ -104,7 +112,7 @@ gr.Interface(fn=find_best_matches,
104
  gr.Radio([T2I, I2I]),
105
  gr.Textbox(lines=1, label="Text query", placeholder="Introduce the search text...",
106
  )],
107
- theme="grass",
108
  outputs=[gr.Gallery(label="Generated images", show_label=False, elem_id="gallery", scale=2)],title="CLIP Image Search",
109
  description="This application displays TOP THREE images from Unsplash dataset that best match the search query provided by the user. Moreover, the input can be provided via two modes ie text or image form.").launch()
110
 
 
17
 
18
  from transformers import CLIPProcessor, CLIPModel, CLIPTokenizer
19
  from sentence_transformers import SentenceTransformer, util
 
 
 
20
  device = "cuda" if torch.cuda.is_available() else "cpu"
21
 
22
+ examples = [["fruits.jpg"],
23
+ ["flowers.jpg"],
24
+ ["sa_10039.jpg"],
25
+ ["dogs.jpg"],
26
+ ["sa_8776.jpg"],
27
+ ["sa_561.jpg"],
28
+ ["sa_11025.jpg"],
29
+ ["sa_1309.jpg"],
30
+ ["sa_192.jpg"],
31
+ ["sa_862.jpg"],["sa_414.jpg"]]
32
+
33
  # Define model
34
  model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
35
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
 
112
  gr.Radio([T2I, I2I]),
113
  gr.Textbox(lines=1, label="Text query", placeholder="Introduce the search text...",
114
  )],
115
+ theme="grass",examples=examples,
116
  outputs=[gr.Gallery(label="Generated images", show_label=False, elem_id="gallery", scale=2)],title="CLIP Image Search",
117
  description="This application displays TOP THREE images from Unsplash dataset that best match the search query provided by the user. Moreover, the input can be provided via two modes ie text or image form.").launch()
118
 
dogs.jpg ADDED
flowers.jpg ADDED
fruits.jpg ADDED
sa_10039.jpg ADDED
sa_11025.jpg ADDED
sa_1309.jpg ADDED

Git LFS Details

  • SHA256: b1012cbfd3ffe4ee0da940dc45961fbd1ce7546bea566f650514ec56d72b0460
  • Pointer size: 132 Bytes
  • Size of remote file: 1.11 MB
sa_192.jpg ADDED

Git LFS Details

  • SHA256: dcec4fce91382cbfeb2711fff3caeae183c23cb6d8a6c9e2ca0cd2e8eac39512
  • Pointer size: 132 Bytes
  • Size of remote file: 1.21 MB
sa_414.jpg ADDED

Git LFS Details

  • SHA256: 69dbead40b43e54d3bb80fb372c2e241b0f3ff2159d32525433a75153e067c65
  • Pointer size: 132 Bytes
  • Size of remote file: 2.23 MB
sa_561.jpg ADDED
sa_862.jpg ADDED

Git LFS Details

  • SHA256: 06efc970f0d95faa6e8c69ee73f2032627569dde1c28bc783faebdaefa5eb2a8
  • Pointer size: 132 Bytes
  • Size of remote file: 1.56 MB
sa_8776.jpg ADDED