DHEIVER commited on
Commit
77d0df4
·
verified ·
1 Parent(s): 191525b

Upload 5 files

Browse files
Files changed (5) hide show
  1. README (4).md +13 -0
  2. app (6).py +183 -0
  3. gitattributes (3) +34 -0
  4. image.jpg +0 -0
  5. requirements (5).txt +7 -0
README (4).md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Transform Image
3
+ emoji: 🏃
4
+ colorFrom: purple
5
+ colorTo: yellow
6
+ sdk: streamlit
7
+ sdk_version: 1.10.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app (6).py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import DetrFeatureExtractor, DetrForObjectDetection
2
+ import requests
3
+ import torch
4
+
5
+ feature_extractor = DetrFeatureExtractor.from_pretrained("facebook/detr-resnet-50")
6
+ model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
7
+
8
+
9
+ # Core Pkgs
10
+ import time
11
+ from json import load
12
+ import streamlit as st
13
+ import cv2
14
+ from PIL import Image,ImageEnhance
15
+ import numpy as np
16
+ from io import BytesIO
17
+ from transformers import pipeline
18
+ st.set_page_config(page_title="Do Transform Images", initial_sidebar_state = "auto" )
19
+ st.title("Image Transformation & Detection App")
20
+ st.text("Build with Streamlit and OpenCV")
21
+
22
+ face_cascade = cv2.CascadeClassifier('frecog/haarcascade_frontalface_default.xml')
23
+ eye_cascade = cv2.CascadeClassifier('frecog/haarcascade_eye.xml')
24
+ smile_cascade = cv2.CascadeClassifier('frecog/haarcascade_smile.xml')
25
+ #@st_cache
26
+ #od():
27
+ #obj_detector = pipeline('object-detection')
28
+ #return obj_detector
29
+ def detect_faces(our_image):
30
+ new_img = np.array(our_image.convert('RGB'))
31
+ img = cv2.cvtColor(new_img,1)
32
+ gray = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)
33
+ # Detect faces
34
+ faces = face_cascade.detectMultiScale(gray, 1.1, 4)
35
+ # Draw rectangle around the faces
36
+ for (x, y, w, h) in faces:
37
+ cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
38
+ return img,faces
39
+ def detect_eyes(our_image):
40
+ new_img = np.array(our_image.convert('RGB'))
41
+ img = cv2.cvtColor(new_img,1)
42
+ gray = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)
43
+ eyes = eye_cascade.detectMultiScale(gray, 1.3, 5)
44
+ for (ex,ey,ew,eh) in eyes:
45
+ cv2.rectangle(img,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
46
+ return img
47
+
48
+ def detect_smiles(our_image):
49
+ new_img = np.array(our_image.convert('RGB'))
50
+ img = cv2.cvtColor(new_img,1)
51
+ gray = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)
52
+ # Detect Smiles
53
+ smiles = smile_cascade.detectMultiScale(gray, 1.1, 4)
54
+ # Draw rectangle around the Smiles
55
+ for (x, y, w, h) in smiles:
56
+ cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
57
+ return img
58
+
59
+ def cartonize_image(our_image):
60
+ new_img = np.array(our_image.convert('RGB'))
61
+ img = cv2.cvtColor(new_img,1)
62
+ gray = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)
63
+ # Edges
64
+ gray = cv2.medianBlur(gray, 5)
65
+ edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
66
+ #Color
67
+ color = cv2.bilateralFilter(img, 9, 300, 300)
68
+ #Cartoon
69
+ cartoon = cv2.bitwise_and(color, color, mask=edges)
70
+
71
+ return cartoon
72
+
73
+
74
+ def cannize_image(our_image):
75
+ new_img = np.array(our_image.convert('RGB'))
76
+ img = cv2.cvtColor(new_img,1)
77
+ img = cv2.GaussianBlur(img, (11, 11), 0)
78
+ canny = cv2.Canny(img, 100, 150)
79
+ return canny
80
+ def detect_objects(im):
81
+ inputs = feature_extractor(images=im, return_tensors="pt")
82
+ outputs = model(**inputs)
83
+ # convert outputs (bounding boxes and class logits) to COCO API
84
+ target_sizes = torch.tensor([im.size[::-1]])
85
+ results = feature_extractor.post_process(outputs, target_sizes=target_sizes)[0]
86
+ boxes = []
87
+ f=None
88
+ for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
89
+ box = [round(i, 2) for i in box.tolist()]
90
+ # let's only keep detections with score > 0.9
91
+ if score > 0.9:
92
+ st.success(
93
+ f"Detected {model.config.id2label[label.item()]} with confidence "
94
+ f"{round(score.item(), 3)} at location {box}"
95
+ )
96
+ boxes.append(box)
97
+ new_img = np.array(im.convert('RGB'))
98
+ img = cv2.cvtColor(new_img,1)
99
+ for (x, y, w, h) in boxes:
100
+ cv2.rectangle(img,(int(x),int(y)),(int(w), int(h)), (0, 0, 255))
101
+ return st.image(img)#st.image(box)
102
+
103
+ @st.cache
104
+ def load_image(img):
105
+ im = Image.open(img)
106
+ return im
107
+ activities = ["Detection","About"]
108
+ choice = st.sidebar.selectbox("Select Activty",activities)
109
+ def change_photo_state():
110
+ st.session_state["photo"]="done"
111
+ uploaded_photo = st.file_uploader("Upload Image",type=['jpg','png','jpeg'], on_change=change_photo_state)
112
+ camera_photo = st.camera_input("Take a photo", on_change=change_photo_state)
113
+ if "photo" not in st.session_state:
114
+ st.session_state["photo"]="not done"
115
+ if choice == 'Detection':
116
+ st.subheader("Process your images ...")
117
+ if st.session_state["photo"]=="done":
118
+ if uploaded_photo:
119
+ our_image= load_image(uploaded_photo)
120
+ if camera_photo:
121
+ our_image= load_image(camera_photo)
122
+ if uploaded_photo==None and camera_photo==None:
123
+ our_image=load_image("image.jpg")
124
+ enhance_type = st.sidebar.radio("Enhance Type",["Original","Gray-Scale","Contrast","Brightness","Blurring"])
125
+ if enhance_type == 'Gray-Scale':
126
+ new_img = np.array(our_image.convert('RGB'))
127
+ img = cv2.cvtColor(new_img,1)
128
+ gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
129
+ # st.write(new_img)
130
+ st.image(gray)
131
+ elif enhance_type == 'Contrast':
132
+ c_rate = st.sidebar.slider("Contrast",0.5,3.5)
133
+ enhancer = ImageEnhance.Contrast(our_image)
134
+ img_output = enhancer.enhance(c_rate)
135
+ st.image(img_output)
136
+ elif enhance_type == 'Brightness':
137
+ c_rate = st.sidebar.slider("Brightness",0.5,3.5)
138
+ enhancer = ImageEnhance.Brightness(our_image)
139
+ img_output = enhancer.enhance(c_rate)
140
+ st.image(img_output)
141
+ elif enhance_type == 'Blurring':
142
+ new_img = np.array(our_image.convert('RGB'))
143
+ blur_rate = st.sidebar.slider("Brightness",0.5,3.5)
144
+ img = cv2.cvtColor(new_img,1)
145
+ blur_img = cv2.GaussianBlur(img,(11,11),blur_rate)
146
+ st.image(blur_img)
147
+ elif enhance_type == 'Original':
148
+ st.image(our_image,width=300)
149
+
150
+ else:
151
+ st.image(our_image,width=300)
152
+ # Face Detection
153
+ task = ["Detect_any_objects", "Faces","Smiles","Eyes","Cannize","Cartonize"]
154
+ feature_choice = st.sidebar.selectbox("Find Features",task)
155
+ if st.button("Process"):
156
+ if feature_choice == 'Faces':
157
+ result_img,result_faces = detect_faces(our_image)
158
+ st.image(result_img)
159
+
160
+ st.success("Found {} faces".format(len(result_faces)))
161
+ elif feature_choice == 'Smiles':
162
+ result_img = detect_smiles(our_image)
163
+ st.image(result_img)
164
+ elif feature_choice == 'Eyes':
165
+ with st.spinner('Wait for it...'):
166
+ time.sleep(5)
167
+ result_img = detect_eyes(our_image)
168
+ st.image(result_img)
169
+
170
+ elif feature_choice == 'Cartonize':
171
+ result_img = cartonize_image(our_image)
172
+ st.image(result_img)
173
+ elif feature_choice == 'Cannize':
174
+ result_canny = cannize_image(our_image)
175
+ st.image(result_canny)
176
+ elif feature_choice == 'Detect_any_objects':
177
+ detect_objects(our_image)
178
+
179
+ elif choice == 'About':
180
+ st.subheader("About Face Detection App")
181
+ st.markdown("Built with Streamlit by [Soumen Sarker](https://soumen-sarker-personal-website.streamlitapp.com/)")
182
+ st.markdown("Credit [here](https://huggingface.co/models?pipeline_tag=object-detection)")
183
+ #st.success("Isshor Saves @Soumen Sarker")
gitattributes (3) ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
image.jpg ADDED
requirements (5).txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Pillow
2
+ altair<5
3
+ streamlit==1.22.0
4
+ opencv-python
5
+ transformers
6
+ torch
7
+ timm