Duplicate from nielsr/CLIPSeg
Browse filesCo-authored-by: Niels Rogge <[email protected]>
- .gitattributes +34 -0
- README.md +13 -0
- app.py +48 -0
- example_image.png +0 -0
- packages.txt +1 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: CLIPSeg
|
3 |
+
emoji: 🦀
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.9.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: nielsr/CLIPSeg
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
|
2 |
+
import gradio as gr
|
3 |
+
from PIL import Image
|
4 |
+
import torch
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
import cv2
|
7 |
+
|
8 |
+
processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
|
9 |
+
model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
|
10 |
+
|
11 |
+
def process_image(image, prompt):
|
12 |
+
inputs = processor(text=prompt, images=image, padding="max_length", return_tensors="pt")
|
13 |
+
|
14 |
+
# predict
|
15 |
+
with torch.no_grad():
|
16 |
+
outputs = model(**inputs)
|
17 |
+
preds = outputs.logits
|
18 |
+
|
19 |
+
filename = f"mask.png"
|
20 |
+
plt.imsave(filename, torch.sigmoid(preds))
|
21 |
+
|
22 |
+
# # img2 = cv2.imread(filename)
|
23 |
+
# # gray_image = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
|
24 |
+
|
25 |
+
# # (thresh, bw_image) = cv2.threshold(gray_image, 100, 255, cv2.THRESH_BINARY)
|
26 |
+
|
27 |
+
# # # fix color format
|
28 |
+
# # cv2.cvtColor(bw_image, cv2.COLOR_BGR2RGB)
|
29 |
+
|
30 |
+
# # return Image.fromarray(bw_image)
|
31 |
+
|
32 |
+
return Image.open("mask.png").convert("RGB")
|
33 |
+
|
34 |
+
title = "Interactive demo: zero-shot image segmentation with CLIPSeg"
|
35 |
+
description = "Demo for using CLIPSeg, a CLIP-based model for zero- and one-shot image segmentation. To use it, simply upload an image and add a text to mask (identify in the image), or use one of the examples below and click 'submit'. Results will show up in a few seconds."
|
36 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.10003'>CLIPSeg: Image Segmentation Using Text and Image Prompts</a> | <a href='https://huggingface.co/docs/transformers/main/en/model_doc/clipseg'>HuggingFace docs</a></p>"
|
37 |
+
|
38 |
+
examples = [["example_image.png", "wood"]]
|
39 |
+
|
40 |
+
interface = gr.Interface(fn=process_image,
|
41 |
+
inputs=[gr.Image(type="pil"), gr.Textbox(label="Please describe what you want to identify")],
|
42 |
+
outputs=gr.Image(type="pil"),
|
43 |
+
title=title,
|
44 |
+
description=description,
|
45 |
+
article=article,
|
46 |
+
examples=examples)
|
47 |
+
|
48 |
+
interface.launch(debug=True)
|
example_image.png
ADDED
![]() |
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
python3-opencv
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/huggingface/transformers.git
|
2 |
+
torch
|
3 |
+
opencv-python
|