Spaces:
Runtime error
Runtime error
minor fixes
Browse files- .gitattributes +1 -0
- app.py +18 -137
- dp2/detection/structures.py +2 -1
- g7-summit-leaders-distraction.jpg +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 35 |
erling.jpg filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 35 |
erling.jpg filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
|
@@ -4,124 +4,14 @@ os.system("pip install ftfy regex tqdm")
|
|
| 4 |
os.system("pip install git+https://github.com/openai/CLIP.git")
|
| 5 |
os.system("pip install git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose")
|
| 6 |
os.system("pip install git+https://github.com/hukkelas/DSFD-Pytorch-Inference")
|
| 7 |
-
from collections import defaultdict
|
| 8 |
import gradio
|
| 9 |
import numpy as np
|
| 10 |
import torch
|
| 11 |
-
import cv2
|
| 12 |
from PIL import Image
|
| 13 |
from dp2 import utils
|
| 14 |
from tops.config import instantiate
|
| 15 |
import tops
|
| 16 |
import gradio.inputs
|
| 17 |
-
from stylemc import get_and_cache_direction, get_styles
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
class GuidedDemo:
|
| 21 |
-
def __init__(self, face_anonymizer, cfg_face) -> None:
|
| 22 |
-
self.anonymizer = face_anonymizer
|
| 23 |
-
assert sum([x is not None for x in list(face_anonymizer.generators.values())]) == 1
|
| 24 |
-
self.generator = [x for x in list(face_anonymizer.generators.values()) if x is not None][0]
|
| 25 |
-
face_G_cfg = utils.load_config(cfg_face.anonymizer.face_G_cfg)
|
| 26 |
-
face_G_cfg.train.batch_size = 1
|
| 27 |
-
self.dl = instantiate(face_G_cfg.data.val.loader)
|
| 28 |
-
self.cache_dir = face_G_cfg.output_dir
|
| 29 |
-
self.precompute_edits()
|
| 30 |
-
|
| 31 |
-
def precompute_edits(self):
|
| 32 |
-
self.precomputed_edits = set()
|
| 33 |
-
for edit in self.precomputed_edits:
|
| 34 |
-
get_and_cache_direction(self.cache_dir, self.dl, self.generator, edit)
|
| 35 |
-
if self.cache_dir.joinpath("stylemc_cache").is_dir():
|
| 36 |
-
for path in self.cache_dir.joinpath("stylemc_cache").iterdir():
|
| 37 |
-
text_prompt = path.stem.replace("_", " ")
|
| 38 |
-
self.precomputed_edits.add(text_prompt)
|
| 39 |
-
print(text_prompt)
|
| 40 |
-
self.edits = defaultdict(defaultdict)
|
| 41 |
-
|
| 42 |
-
def anonymize(self, img, show_boxes: bool, current_box_idx: int, current_styles, current_boxes, update_identity, edits, cache_id=None):
|
| 43 |
-
if not isinstance(img, torch.Tensor):
|
| 44 |
-
img, cache_id = pil2torch(img)
|
| 45 |
-
img = tops.to_cuda(img)
|
| 46 |
-
|
| 47 |
-
current_box_idx = current_box_idx % len(current_boxes)
|
| 48 |
-
edited_styles = [s.clone() for s in current_styles]
|
| 49 |
-
for face_idx, face_edits in edits.items():
|
| 50 |
-
for prompt, strength in face_edits.items():
|
| 51 |
-
direction = get_and_cache_direction(self.cache_dir, self.dl, self.generator, prompt)
|
| 52 |
-
edited_styles[int(face_idx)] += direction * strength
|
| 53 |
-
update_identity[int(face_idx)] = True
|
| 54 |
-
assert img.dtype == torch.uint8
|
| 55 |
-
img = self.anonymizer(
|
| 56 |
-
img, truncation_value=0,
|
| 57 |
-
multi_modal_truncation=True, amp=True,
|
| 58 |
-
cache_id=cache_id,
|
| 59 |
-
all_styles=edited_styles,
|
| 60 |
-
update_identity=update_identity)
|
| 61 |
-
update_identity = [True for i in range(len(update_identity))]
|
| 62 |
-
img = utils.im2numpy(img)
|
| 63 |
-
if show_boxes:
|
| 64 |
-
x0, y0, x1, y1 = [int(_) for _ in current_boxes[int(current_box_idx)]]
|
| 65 |
-
img = cv2.rectangle(img, (x0, y0), (x1, y1), (255, 0, 0), 1)
|
| 66 |
-
return img, update_identity
|
| 67 |
-
|
| 68 |
-
def update_image(self, img, show_boxes):
|
| 69 |
-
img, cache_id = pil2torch(img)
|
| 70 |
-
img = tops.to_cuda(img)
|
| 71 |
-
det = self.anonymizer.detector.forward_and_cache(img, cache_id, load_cache=True)[0]
|
| 72 |
-
current_styles = []
|
| 73 |
-
for i in range(len(det)):
|
| 74 |
-
s = get_styles(
|
| 75 |
-
np.random.randint(0, 999999),self.generator,
|
| 76 |
-
None, truncation_value=0)
|
| 77 |
-
current_styles.append(s)
|
| 78 |
-
update_identity = [True for i in range(len(det))]
|
| 79 |
-
current_boxes = np.array(det.boxes)
|
| 80 |
-
edits = defaultdict(defaultdict)
|
| 81 |
-
cur_face_idx = -1 % len(current_boxes)
|
| 82 |
-
img, update_identity = self.anonymize(img, show_boxes, cur_face_idx, current_styles, current_boxes, update_identity, edits, cache_id=cache_id)
|
| 83 |
-
return img, current_styles, current_boxes, update_identity, edits, cur_face_idx
|
| 84 |
-
|
| 85 |
-
def change_face(self, change, cur_face_idx, current_boxes, input_image, show_boxes, current_styles, update_identity, edits):
|
| 86 |
-
cur_face_idx = (cur_face_idx+change) % len(current_boxes)
|
| 87 |
-
img, update_identity = self.anonymize(input_image, show_boxes, cur_face_idx, current_styles, current_boxes, update_identity, edits)
|
| 88 |
-
return img, update_identity, cur_face_idx
|
| 89 |
-
|
| 90 |
-
def add_style(self, face_idx: int, prompt: str, strength: float, input_image, show_boxes, current_styles, current_boxes, update_identity, edits):
|
| 91 |
-
face_idx = face_idx % len(current_boxes)
|
| 92 |
-
edits[face_idx][prompt] = strength
|
| 93 |
-
img, update_identity = self.anonymize(input_image, show_boxes, face_idx, current_styles, current_boxes, update_identity, edits)
|
| 94 |
-
return img, update_identity, edits
|
| 95 |
-
|
| 96 |
-
def setup_interface(self):
|
| 97 |
-
current_styles = gradio.State()
|
| 98 |
-
current_boxes = gradio.State(None)
|
| 99 |
-
update_identity = gradio.State([])
|
| 100 |
-
edits = gradio.State([])
|
| 101 |
-
with gradio.Row():
|
| 102 |
-
input_image = gradio.Image(
|
| 103 |
-
type="pil", label="Upload your image or try the example below!",source="webcam")
|
| 104 |
-
output_image = gradio.Image(type="numpy", label="Output")
|
| 105 |
-
with gradio.Row():
|
| 106 |
-
update_btn = gradio.Button("Update Anonymization").style(full_width=True)
|
| 107 |
-
with gradio.Row():
|
| 108 |
-
show_boxes = gradio.Checkbox(value=True, label="Show Selected")
|
| 109 |
-
cur_face_idx = gradio.Number(value=-1,label="Current", interactive=False)
|
| 110 |
-
previous = gradio.Button("Previous Person")
|
| 111 |
-
next_ = gradio.Button("Next Person")
|
| 112 |
-
with gradio.Row():
|
| 113 |
-
text_prompt = gradio.Textbox(
|
| 114 |
-
placeholder=" | ".join(list(self.precomputed_edits)),
|
| 115 |
-
label="Text Prompt for Edit")
|
| 116 |
-
edit_strength = gradio.Slider(0, 5, step=.01)
|
| 117 |
-
add_btn = gradio.Button("Add Edit")
|
| 118 |
-
add_btn.click(self.add_style, inputs=[cur_face_idx, text_prompt, edit_strength, input_image, show_boxes, current_styles, current_boxes, update_identity, edits], outputs=[output_image, update_identity, edits])
|
| 119 |
-
update_btn.click(self.update_image, inputs=[input_image, show_boxes], outputs=[output_image, current_styles, current_boxes, update_identity, edits, cur_face_idx])
|
| 120 |
-
input_image.change(self.update_image, inputs=[input_image, show_boxes], outputs=[output_image, current_styles, current_boxes, update_identity, edits, cur_face_idx])
|
| 121 |
-
previous.click(self.change_face, inputs=[gradio.State(-1), cur_face_idx, current_boxes, input_image, show_boxes, current_styles, update_identity, edits], outputs=[output_image, update_identity, cur_face_idx])
|
| 122 |
-
next_.click(self.change_face, inputs=[gradio.State(1), cur_face_idx, current_boxes, input_image, show_boxes, current_styles, update_identity, edits], outputs=[output_image, update_identity, cur_face_idx])
|
| 123 |
-
|
| 124 |
-
show_boxes.change(self.anonymize, inputs=[input_image, show_boxes, cur_face_idx, current_styles, current_boxes, update_identity, edits], outputs=[output_image, update_identity])
|
| 125 |
|
| 126 |
|
| 127 |
cfg_body = utils.load_config("configs/anonymizers/FB_cse.py")
|
|
@@ -131,32 +21,11 @@ cfg_face = utils.load_config("configs/anonymizers/face.py")
|
|
| 131 |
anonymizer_face = instantiate(cfg_face.anonymizer, load_cache=False)
|
| 132 |
anonymizer_face.initialize_tracker(fps=1)
|
| 133 |
|
| 134 |
-
class WebcamDemo:
|
| 135 |
|
| 136 |
-
|
| 137 |
-
self.anonymizer = anonymizer
|
| 138 |
-
with gradio.Row():
|
| 139 |
-
input_image = gradio.Image(type="pil", source="webcam", streaming=True)
|
| 140 |
-
output_image = gradio.Image(type="numpy", label="Output")
|
| 141 |
-
visualize_det = gradio.Checkbox(value=False, label="Show Detections")
|
| 142 |
-
input_image.stream(self.anonymize, [input_image, visualize_det], [output_image])
|
| 143 |
-
self.track = True
|
| 144 |
-
|
| 145 |
-
def anonymize(self, img: Image, visualize_detection: bool):
|
| 146 |
-
img, cache_id = pil2torch(img)
|
| 147 |
-
img = tops.to_cuda(img)
|
| 148 |
-
if visualize_detection:
|
| 149 |
-
img = self.anonymizer.visualize_detection(img, cache_id=cache_id)
|
| 150 |
-
else:
|
| 151 |
-
img = self.anonymizer(
|
| 152 |
-
img, truncation_value=0, multi_modal_truncation=True, amp=True,
|
| 153 |
-
cache_id=cache_id, track=self.track)
|
| 154 |
-
img = utils.im2numpy(img)
|
| 155 |
-
return img
|
| 156 |
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
def __init__(self, anonymizer) -> None:
|
| 160 |
self.anonymizer = anonymizer
|
| 161 |
with gradio.Row():
|
| 162 |
input_image = gradio.Image(type="pil", label="Upload your image or try the example below!")
|
|
@@ -166,12 +35,24 @@ class ExampleDemo(WebcamDemo):
|
|
| 166 |
visualize_det = gradio.Checkbox(value=False, label="Show Detections")
|
| 167 |
visualize_det.change(self.anonymize, inputs=[input_image, visualize_det], outputs=[output_image])
|
| 168 |
gradio.Examples(
|
| 169 |
-
["erling.jpg", "
|
| 170 |
)
|
| 171 |
update_btn.click(self.anonymize, inputs=[input_image, visualize_det], outputs=[output_image])
|
| 172 |
input_image.change(self.anonymize, inputs=[input_image, visualize_det], outputs=[output_image])
|
| 173 |
self.track = False
|
| 174 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
|
| 176 |
def pil2torch(img: Image.Image):
|
| 177 |
img = img.convert("RGB")
|
|
@@ -184,9 +65,9 @@ with gradio.Blocks() as demo:
|
|
| 184 |
gradio.Markdown("# <center> DeepPrivacy2 - Realistic Image Anonymization </center>")
|
| 185 |
gradio.Markdown("### <center> Håkon Hukkelås, Rudolf Mester, Frank Lindseth </center>")
|
| 186 |
with gradio.Tab("Full-Body Anonymization"):
|
| 187 |
-
ExampleDemo(anonymizer_body)
|
| 188 |
with gradio.Tab("Face Anonymization"):
|
| 189 |
-
ExampleDemo(anonymizer_face)
|
| 190 |
|
| 191 |
|
| 192 |
demo.launch()
|
|
|
|
| 4 |
os.system("pip install git+https://github.com/openai/CLIP.git")
|
| 5 |
os.system("pip install git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose")
|
| 6 |
os.system("pip install git+https://github.com/hukkelas/DSFD-Pytorch-Inference")
|
|
|
|
| 7 |
import gradio
|
| 8 |
import numpy as np
|
| 9 |
import torch
|
|
|
|
| 10 |
from PIL import Image
|
| 11 |
from dp2 import utils
|
| 12 |
from tops.config import instantiate
|
| 13 |
import tops
|
| 14 |
import gradio.inputs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
cfg_body = utils.load_config("configs/anonymizers/FB_cse.py")
|
|
|
|
| 21 |
anonymizer_face = instantiate(cfg_face.anonymizer, load_cache=False)
|
| 22 |
anonymizer_face.initialize_tracker(fps=1)
|
| 23 |
|
|
|
|
| 24 |
|
| 25 |
+
class ExampleDemo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
+
def __init__(self, anonymizer, multi_modal_truncation=False) -> None:
|
| 28 |
+
self.multi_modal_truncation = multi_modal_truncation
|
|
|
|
| 29 |
self.anonymizer = anonymizer
|
| 30 |
with gradio.Row():
|
| 31 |
input_image = gradio.Image(type="pil", label="Upload your image or try the example below!")
|
|
|
|
| 35 |
visualize_det = gradio.Checkbox(value=False, label="Show Detections")
|
| 36 |
visualize_det.change(self.anonymize, inputs=[input_image, visualize_det], outputs=[output_image])
|
| 37 |
gradio.Examples(
|
| 38 |
+
["erling.jpg", "g7-summit-leaders-distraction.jpg"], inputs=[input_image]
|
| 39 |
)
|
| 40 |
update_btn.click(self.anonymize, inputs=[input_image, visualize_det], outputs=[output_image])
|
| 41 |
input_image.change(self.anonymize, inputs=[input_image, visualize_det], outputs=[output_image])
|
| 42 |
self.track = False
|
| 43 |
|
| 44 |
+
def anonymize(self, img: Image, visualize_detection: bool):
|
| 45 |
+
img, cache_id = pil2torch(img)
|
| 46 |
+
img = tops.to_cuda(img)
|
| 47 |
+
if visualize_detection:
|
| 48 |
+
img = self.anonymizer.visualize_detection(img, cache_id=cache_id)
|
| 49 |
+
else:
|
| 50 |
+
img = self.anonymizer(
|
| 51 |
+
img, truncation_value=0 if self.multi_modal_truncation else 1, multi_modal_truncation=self.multi_modal_truncation, amp=True,
|
| 52 |
+
cache_id=cache_id, track=self.track)
|
| 53 |
+
img = utils.im2numpy(img)
|
| 54 |
+
return img
|
| 55 |
+
|
| 56 |
|
| 57 |
def pil2torch(img: Image.Image):
|
| 58 |
img = img.convert("RGB")
|
|
|
|
| 65 |
gradio.Markdown("# <center> DeepPrivacy2 - Realistic Image Anonymization </center>")
|
| 66 |
gradio.Markdown("### <center> Håkon Hukkelås, Rudolf Mester, Frank Lindseth </center>")
|
| 67 |
with gradio.Tab("Full-Body Anonymization"):
|
| 68 |
+
ExampleDemo(anonymizer_body, multi_modal_truncation=True)
|
| 69 |
with gradio.Tab("Face Anonymization"):
|
| 70 |
+
ExampleDemo(anonymizer_face, multi_modal_truncation=False)
|
| 71 |
|
| 72 |
|
| 73 |
demo.launch()
|
dp2/detection/structures.py
CHANGED
|
@@ -84,7 +84,8 @@ class FaceDetection:
|
|
| 84 |
def get_crop(self, idx: int, im):
|
| 85 |
assert idx < len(self)
|
| 86 |
box = self.boxes[idx].numpy()
|
| 87 |
-
|
|
|
|
| 88 |
im = cut_pad_resize(im, expanded_boxes, self.target_imsize, fdf_resize=True)
|
| 89 |
area = (self.boxes[:, 2] - self.boxes[:, 0]) * (self.boxes[:, 3] - self.boxes[:, 1]).view(-1)
|
| 90 |
|
|
|
|
| 84 |
def get_crop(self, idx: int, im):
|
| 85 |
assert idx < len(self)
|
| 86 |
box = self.boxes[idx].numpy()
|
| 87 |
+
simple_expand = False if self.fdf128_expand else True
|
| 88 |
+
expanded_boxes = expand_bbox_fdf(box, im.shape[-2:], simple_expand=simple_expand)
|
| 89 |
im = cut_pad_resize(im, expanded_boxes, self.target_imsize, fdf_resize=True)
|
| 90 |
area = (self.boxes[:, 2] - self.boxes[:, 0]) * (self.boxes[:, 3] - self.boxes[:, 1]).view(-1)
|
| 91 |
|
g7-summit-leaders-distraction.jpg
ADDED
|
Git LFS Details
|