Update app.py
Browse files
app.py
CHANGED
@@ -1,36 +1,62 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
|
|
|
|
3 |
from PIL import Image
|
4 |
import numpy as np
|
|
|
|
|
5 |
|
6 |
-
#
|
7 |
-
|
8 |
|
9 |
-
#
|
10 |
-
|
11 |
-
# Run U-2-Net pipeline
|
12 |
-
segments = pipe(image)
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
return image
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
mask_np = np.array(mask) / 255.0
|
21 |
-
image_np = np.array(image).astype(np.uint8)
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
26 |
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
gr.Interface(
|
31 |
fn=segment_dress,
|
32 |
inputs=gr.Image(type="pil", label="Upload Image"),
|
33 |
-
outputs=gr.Image(type="pil", label="Segmented Dress
|
34 |
-
title="Dress Segmentation
|
35 |
-
description="
|
36 |
).launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
from torchvision import transforms
|
5 |
from PIL import Image
|
6 |
import numpy as np
|
7 |
+
import requests
|
8 |
+
from io import BytesIO
|
9 |
|
10 |
+
# U-2-Net architecture (simplified, or import from a .py file if you've saved it)
|
11 |
+
# You can get the U-2-Net code from https://github.com/xuebinqin/U-2-Net
|
12 |
|
13 |
+
# For demo, let's download the pre-trained model and use a wrapper instead
|
14 |
+
from huggingface_hub import hf_hub_download
|
|
|
|
|
15 |
|
16 |
+
# Download u2net.pth from HuggingFace Hub
|
17 |
+
model_path = hf_hub_download(repo_id="BritishWerewolf/U-2-Net", filename="u2net.pth")
|
|
|
18 |
|
19 |
+
# Use a known U2NET implementation (e.g., from https://github.com/xuebinqin/U-2-Net/blob/master/u2net_test.py)
|
20 |
+
from u2net import U2NET # Assume you copied the model code as u2net.py
|
|
|
|
|
21 |
|
22 |
+
# Load model
|
23 |
+
model = U2NET(3, 1)
|
24 |
+
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
|
25 |
+
model.eval()
|
26 |
|
27 |
+
# Preprocessing
|
28 |
+
transform = transforms.Compose([
|
29 |
+
transforms.Resize((320, 320)),
|
30 |
+
transforms.ToTensor(),
|
31 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
32 |
+
std=[0.229, 0.224, 0.225])
|
33 |
+
])
|
34 |
|
35 |
+
def segment_dress(image):
|
36 |
+
original = image.convert("RGB")
|
37 |
+
input_tensor = transform(original).unsqueeze(0)
|
38 |
+
|
39 |
+
with torch.no_grad():
|
40 |
+
d1, _, _, _, _, _, _ = model(input_tensor)
|
41 |
+
pred = d1[0][0]
|
42 |
+
pred = (pred - pred.min()) / (pred.max() - pred.min())
|
43 |
+
pred_np = pred.cpu().numpy()
|
44 |
+
|
45 |
+
# Resize to original size
|
46 |
+
pred_resized = Image.fromarray((pred_np * 255).astype(np.uint8)).resize(original.size)
|
47 |
+
|
48 |
+
# Apply mask
|
49 |
+
mask = np.array(pred_resized) / 255.0
|
50 |
+
image_np = np.array(original).astype(np.uint8)
|
51 |
+
segmented = (image_np * mask[..., None]).astype(np.uint8)
|
52 |
+
|
53 |
+
return Image.fromarray(segmented)
|
54 |
+
|
55 |
+
# Launch Gradio app
|
56 |
gr.Interface(
|
57 |
fn=segment_dress,
|
58 |
inputs=gr.Image(type="pil", label="Upload Image"),
|
59 |
+
outputs=gr.Image(type="pil", label="Segmented Dress"),
|
60 |
+
title="Dress Segmentation with U-2-Net",
|
61 |
+
description="Segments the dress (or full foreground) using U-2-Net from Hugging Face"
|
62 |
).launch()
|