justin2341 commited on
Commit
497f61b
·
verified ·
1 Parent(s): 88dd5d7

Upload 11 files

Browse files
Files changed (12) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +45 -0
  3. app.py +214 -0
  4. libopencv.zip +3 -0
  5. libvein.so +3 -0
  6. license.txt +5 -0
  7. model/vein1v0.bin +3 -0
  8. ncnn.zip +3 -0
  9. requirements.txt +6 -0
  10. roi.py +128 -0
  11. run.sh +3 -0
  12. veinsdk.py +26 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ libvein.so filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM openvino/ubuntu20_runtime:2024.5.0
2
+
3
+ USER root
4
+ RUN rm -rf /var/lib/apt/lists/* && apt update && apt install -y unzip \
5
+ libjpeg8 \
6
+ libwebp6 \
7
+ libpng16-16 \
8
+ libtbb2 \
9
+ libtiff5 \
10
+ libtbb-dev \
11
+ libopenexr-dev \
12
+ libgl1-mesa-glx \
13
+ libglib2.0-0 \
14
+ libgomp1
15
+
16
+ # Set up working directory
17
+ RUN mkdir -p /home/openvino/kby-ai-palmvein
18
+ WORKDIR /home/openvino/kby-ai-palmvein
19
+
20
+ # Copy shared libraries and application files
21
+ COPY ./libopencv.zip .
22
+ RUN unzip libopencv.zip
23
+ RUN cp -f libopencv/* /usr/local/lib/
24
+ RUN ldconfig
25
+
26
+ # Copy Python and application files
27
+ COPY ./model ./model
28
+ COPY ./libvein.so .
29
+ COPY ./app.py .
30
+ COPY ./roi.py .
31
+ COPY ./veinsdk.py .
32
+ COPY ./requirements.txt .
33
+ COPY ./run.sh .
34
+ COPY ./license.txt .
35
+ COPY ./ncnn.zip .
36
+ RUN unzip ncnn.zip
37
+
38
+ # Install Python dependencies
39
+ RUN pip3 install --no-cache-dir -r requirements.txt
40
+
41
+ # Set up entrypoint
42
+ CMD ["bash", "./run.sh"]
43
+
44
+ # Expose ports
45
+ EXPOSE 8080 9000
app.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('.')
3
+
4
+ import os
5
+ import base64
6
+ import json
7
+ from ctypes import *
8
+ import cv2
9
+ import numpy as np
10
+ from flask import Flask, request, jsonify
11
+ from veinsdk import *
12
+ from roi import *
13
+
14
+ licensePath = "license.txt"
15
+ license = ""
16
+
17
+ machineCode = getMachineCode()
18
+ print("\nmachineCode: ", machineCode.decode('utf-8'))
19
+
20
+ try:
21
+ with open(licensePath, 'r') as file:
22
+ license = file.read().strip()
23
+ except IOError as exc:
24
+ print("failed to open license.txt: ", exc.errno)
25
+
26
+ print("\nlicense: ", license)
27
+
28
+ ret = setActivation(license.encode('utf-8'))
29
+ print("\nactivation: ", ret)
30
+
31
+ ret = initSDK()
32
+ print("init: ", ret)
33
+
34
+ app = Flask(__name__)
35
+
36
+ def mat_to_bytes(mat):
37
+ """
38
+ Convert cv::Mat image data (NumPy array in Python) to raw bytes.
39
+ """
40
+ # Encode cv::Mat as PNG bytes
41
+ is_success, buffer = cv2.imencode(".png", mat)
42
+ if not is_success:
43
+ raise ValueError("Failed to encode cv::Mat image")
44
+ return buffer.tobytes()
45
+
46
+ @app.route('/palmvein', methods=['POST'])
47
+ def palmvein():
48
+ result = None
49
+ score = None
50
+
51
+ file1 = request.files['file1']
52
+ file2 = request.files['file2']
53
+
54
+ try:
55
+ image1 = cv2.imdecode(np.frombuffer(file1.read(), np.uint8), cv2.IMREAD_COLOR)
56
+ except:
57
+ result = "Failed to open file1"
58
+ response = jsonify({"result": result, "score": float(score)})
59
+
60
+ response.status_code = 200
61
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
62
+ return response
63
+
64
+ try:
65
+ image2 = cv2.imdecode(np.frombuffer(file2.read(), np.uint8), cv2.IMREAD_COLOR)
66
+ except:
67
+ result = "Failed to open file2"
68
+ response = jsonify({"result": result, "score": float(score)})
69
+
70
+ response.status_code = 200
71
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
72
+ return response
73
+
74
+ roi1, label1 = get_roi_image(cv2.flip(image1, 1))
75
+ roi2, label2 = get_roi_image(cv2.flip(image2, 1))
76
+
77
+ if label1 != label2:
78
+ result = "2 images are from the different hand"
79
+ score = 0.0
80
+ response = jsonify({"result": result, "score": float(score)})
81
+
82
+ response.status_code = 200
83
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
84
+ return response
85
+
86
+ if roi1 is None or roi2 is None:
87
+ result = "\n hand detection failed !\n plesae make sure that input hand image is valid or not."
88
+ response = jsonify({"result": result, "score": float(score)})
89
+
90
+ response.status_code = 200
91
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
92
+ return response
93
+
94
+ roi_byte1 = mat_to_bytes(roi1)
95
+ roi_byte2 = mat_to_bytes(roi2)
96
+ feature_array1, feature_array2 = (c_float * 1024)(), (c_float * 1024)() # Assuming a maximum of 256 rectangles
97
+ cnt1 = getFeature(roi_byte1, len(roi_byte1), feature_array1)
98
+ cnt2 = getFeature(roi_byte2, len(roi_byte2), feature_array2)
99
+
100
+ if cnt1 == 0 or cnt2 ==0:
101
+ result = "feature extraction failed !"
102
+ response = jsonify({"result": result, "score": float(score)})
103
+
104
+ response.status_code = 200
105
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
106
+ return response
107
+
108
+ score = getScore(feature_array1, cnt1, feature_array2, cnt2)
109
+ if score >= 0.65:
110
+ result = "Same Hand !"
111
+ # print(f"\n 2 images are from the same hand\n similarity: {score}")
112
+ response = jsonify({"result": result, "score": float(score)})
113
+
114
+ response.status_code = 200
115
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
116
+ return response
117
+ else:
118
+ result = "Different Hand !"
119
+ # print(f"\n 2 images are from the different hand\n similarity: {score}")
120
+ response = jsonify({"result": result, "score": float(score)})
121
+
122
+ response.status_code = 200
123
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
124
+ return response
125
+
126
+ @app.route('/palmvein_base64', methods=['POST'])
127
+ def palmvein_base64():
128
+
129
+ result = None
130
+ score = None
131
+
132
+ content = request.get_json()
133
+
134
+ try:
135
+ imageBase64 = content['base64_1']
136
+ image_data = base64.b64decode(imageBase64)
137
+ np_array = np.frombuffer(image_data, np.uint8)
138
+ image1 = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
139
+ except:
140
+ result = "Failed to open file1"
141
+ response = jsonify({"result": result, "score": float(score)})
142
+
143
+ response.status_code = 200
144
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
145
+ return response
146
+
147
+ try:
148
+ imageBase64 = content['base64_2']
149
+ image_data = base64.b64decode(imageBase64)
150
+ np_array = np.frombuffer(image_data, np.uint8)
151
+ image2 = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
152
+ except:
153
+ result = "Failed to open file2"
154
+ response = jsonify({"result": result, "score": float(score)})
155
+
156
+ response.status_code = 200
157
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
158
+ return response
159
+
160
+ roi1, label1 = get_roi_image(cv2.flip(image1, 1))
161
+ roi2, label2 = get_roi_image(cv2.flip(image2, 1))
162
+
163
+ if label1 != label2:
164
+ result = "2 images are from the different hand"
165
+ score = 0.0
166
+ response = jsonify({"result": result, "score": float(score)})
167
+
168
+ response.status_code = 200
169
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
170
+ return response
171
+
172
+ if roi1 is None or roi2 is None:
173
+ result = "\n hand detection failed !\n plesae make sure that input hand image is valid or not."
174
+ response = jsonify({"result": result, "score": float(score)})
175
+
176
+ response.status_code = 200
177
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
178
+ return response
179
+
180
+ roi_byte1 = mat_to_bytes(roi1)
181
+ roi_byte2 = mat_to_bytes(roi2)
182
+ feature_array1, feature_array2 = (c_float * 1024)(), (c_float * 1024)() # Assuming a maximum of 256 rectangles
183
+ cnt1 = getFeature(roi_byte1, len(roi_byte1), feature_array1)
184
+ cnt2 = getFeature(roi_byte2, len(roi_byte2), feature_array2)
185
+
186
+ if cnt1 == 0 or cnt2 ==0:
187
+ result = "feature extraction failed !"
188
+ response = jsonify({"result": result, "score": float(score)})
189
+
190
+ response.status_code = 200
191
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
192
+ return response
193
+
194
+ score = getScore(feature_array1, cnt1, feature_array2, cnt2)
195
+ if score >= 0.65:
196
+ result = "Same Hand !"
197
+ # print(f"\n 2 images are from the same hand\n similarity: {score}")
198
+ response = jsonify({"result": result, "score": float(score)})
199
+
200
+ response.status_code = 200
201
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
202
+ return response
203
+ else:
204
+ result = "Different Hand !"
205
+ # print(f"\n 2 images are from the different hand\n similarity: {score}")
206
+ response = jsonify({"result": result, "score": float(score)})
207
+
208
+ response.status_code = 200
209
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
210
+ return response
211
+
212
+ if __name__ == '__main__':
213
+ port = int(os.environ.get("PORT", 8080))
214
+ app.run(host='0.0.0.0', port=port)
libopencv.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8845c1412c45c484e054235269944e2ac43c90a148ce3444215fe52049cf7479
3
+ size 61014815
libvein.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85bf6dbeae145a4de388a31fbd0a066ab151ef3202acaef8a918aa732ccd9980
3
+ size 3030231
license.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gE2IfAwDsg/dQZwcfNkjWKKn2Q85X46TXZ1RimFkGkpQZIj58/YnxQRVcEQDTpHfLzl7RuemGG0E
2
+ e4j1UL2ed2g3dbMJaJb6EWCyihr3NqtwQcXYTLLDUjrs0Yca9BeeToggaReRzFzwXGZgSPLnPdQ3
3
+ RwOF1nPLEjcD/u/Z1ZdM487PaMlUivP3t8ZHlzUZvSOowfzHOlt8SIPP8C2BL3ZpqjA+dp2ycLC+
4
+ Ak7lYVHnaGwiI3mjq4Gv/rUp9HK6/OkwAPxdZ7PbHpMahVyqoyy861LB9WNPi7ngbPxHWmaJQR26
5
+ mi/+m+dew1YJ4pRQtqNsxLRNQBtgULkaZHx/qw==
model/vein1v0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98b3becafba3078506f57a7a63270e7b8e5adbd9f33e36facefe9c13fcc1dcf7
3
+ size 96046676
ncnn.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a702183c67f189f3a29c32186ab99e35b2df47a7f61c74aef2c600e7386059b
3
+ size 32239397
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ flask
2
+ flask-cors
3
+ gradio==3.50.2
4
+ datadog_api_client
5
+ opencv-python
6
+ mediapipe
roi.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import mediapipe as mp
4
+ mp_drawing = mp.solutions.drawing_utils
5
+ mp_drawing_styles = mp.solutions.drawing_styles
6
+ mp_hands = mp.solutions.hands
7
+
8
+ roi_bad_pixel_number = 500 # the number of pixel where 3 values are zero in RGB channel on ROI image
9
+ roi_aspect_ratio_threshold = 100 # the passable aspect ratio threshold of ROI image
10
+ roi_size_threshold = 0.23
11
+ padding_size = 300 # the extra marine size of the frame inputted into Google Mediapipe Graph (this value must be a multiple of two)
12
+
13
+ def img_padding(img):
14
+ h, w, _ = img.shape
15
+ image = np.zeros((h + padding_size, w + padding_size, 3), np.uint8)
16
+ image[int(padding_size / 2):-int(padding_size / 2), int(padding_size / 2):-int(padding_size / 2), :] = img
17
+ return image
18
+
19
+ def img_crop(img_original, x2, x1, y2, y1, label):
20
+
21
+ h, w, _ = img_original.shape
22
+ img = np.zeros((h + 20, w + 20, 3), np.uint8)
23
+ img[10:-10, 10:-10, :] = img_original
24
+ if label == "Right":
25
+ v1 = np.array([x2 * w, y2 * h])
26
+ v2 = np.array([x1 * w, y1 * h])
27
+ else:
28
+ v2 = np.array([x2 * w, y2 * h])
29
+ v1 = np.array([x1 * w, y1 * h])
30
+
31
+ theta = np.arctan2((v2 - v1)[1], (v2 - v1)[0]) * 180 / np.pi
32
+ R = cv2.getRotationMatrix2D(tuple([int(v2[0]), int(v2[1])]), theta, 1)
33
+
34
+ v1 = (R[:, :2] @ v1 + R[:, -1]).astype(int)
35
+ v2 = (R[:, :2] @ v2 + R[:, -1]).astype(int)
36
+ img_r = cv2.warpAffine(img, R, (w, h))
37
+
38
+ if 1:
39
+ ux = int(v1[0] - (v2 - v1)[0] * 0.05)
40
+ uy = int(v1[1] + (v2 - v1)[0] * 0.05)
41
+ lx = int(v2[0] + (v2 - v1)[0] * 0.05)
42
+ ly = int(v2[1] + (v2 - v1)[0] * 1)
43
+ else:
44
+ ux = int(v1[0] - (v2 - v1)[0] * 0.1)
45
+ uy = int(v1[1] + (v2 - v1)[0] * 0.1)
46
+ lx = int(v2[0] + (v2 - v1)[0] * 0.1)
47
+ ly = int(v2[1] + (v2 - v1)[0] * 1.2)
48
+
49
+ # delta_y is movement value in y ward
50
+ delta_y = (ly - uy) * 0.15
51
+
52
+ ly = int(ly - delta_y)
53
+ uy = int(uy - delta_y)
54
+
55
+ delta_x = (lx - ux) * 0.01
56
+ lx = int(lx + delta_x)
57
+ ux = int(ux + delta_x)
58
+
59
+ if label == "Right":
60
+ delta_x = (lx - ux) * 0.05
61
+ lx = int(lx + delta_x)
62
+ ux = int(ux + delta_x)
63
+ # roi = img_r
64
+ roi = img_r[uy:ly, ux:lx]
65
+ if roi.shape[0] == 0 or roi.shape[1] == 0:
66
+ print("error 1")
67
+ return None, 3
68
+
69
+ if abs(roi.shape[0] - roi.shape[1]) > roi_aspect_ratio_threshold:
70
+ print("error 2", abs(roi.shape[0] - roi.shape[1]))
71
+ return None, 4
72
+ if roi.shape[1] / w < roi_size_threshold:
73
+ print("error 3", roi.shape[1] / w)
74
+ return None, 7
75
+
76
+ n_zeros = np.count_nonzero(roi == 0)
77
+ if n_zeros > roi_bad_pixel_number:
78
+ print("error 4", n_zeros)
79
+ return None, 5
80
+ return roi, 0
81
+
82
+ def cupped_hand_filter(hand_landmarks):
83
+ return hand_landmarks.landmark[12].y - hand_landmarks.landmark[11].y
84
+
85
+ def get_roi(path, hand_type, x1, y1, x2, y2):
86
+ img = cv2.imread(path)
87
+
88
+ if hand_type != 0:
89
+ label = "Left"
90
+ else:
91
+ label = "Right"
92
+ roi, _ = img_crop(img, x1, x2, y1, y2, label)
93
+ return roi
94
+
95
+ def get_roi_image(img):
96
+
97
+ label = ""
98
+ with mp_hands.Hands(
99
+ static_image_mode=True,
100
+ max_num_hands=2,
101
+ min_detection_confidence=0.5) as hands:
102
+ # Read an image, flip it around y-axis for correct handedness output (see
103
+ # above).
104
+ if 1:
105
+ image = img_padding(img)
106
+ else:
107
+ image = cv2.flip(cv2.imread(file), 1)
108
+
109
+ # Convert the BGR image to RGB before processing.
110
+ results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
111
+
112
+ # Print handedness and draw hand landmarks on the image.
113
+ if results.multi_handedness is not None:
114
+ label = results.multi_handedness[0].classification[0].label
115
+
116
+ if results.multi_hand_landmarks is None:
117
+ return None, None
118
+
119
+ image_height, image_width, _ = image.shape
120
+ hand_landmarks = results.multi_hand_landmarks[0]
121
+
122
+ if cupped_hand_filter(hand_landmarks) > 0:
123
+ return None, None
124
+ else:
125
+ roi, roi_msg_index = img_crop(image, hand_landmarks.landmark[5].x, hand_landmarks.landmark[17].x,
126
+ hand_landmarks.landmark[5].y, hand_landmarks.landmark[17].y, label)
127
+
128
+ return roi, label
run.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # cd /home/openvino/kby-ai-fire
2
+ # exec python3 demo.py &
3
+ exec python3 app.py
veinsdk.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from ctypes import *
4
+
5
+ libPath = os.path.abspath(os.path.dirname(__file__)) + '/libvein.so'
6
+ veinsdk = cdll.LoadLibrary(libPath)
7
+
8
+ getMachineCode = veinsdk.getMachineCode
9
+ getMachineCode.argtypes = []
10
+ getMachineCode.restype = c_char_p
11
+
12
+ setActivation = veinsdk.setActivation
13
+ setActivation.argtypes = [c_char_p]
14
+ setActivation.restype = c_int32
15
+
16
+ initSDK = veinsdk.initSDK
17
+ initSDK.argtypes = []
18
+ initSDK.restype = c_int32
19
+
20
+ getFeature = veinsdk.get_feature
21
+ getFeature.argtypes = [c_char_p, c_ulong, POINTER(c_float)]
22
+ getFeature.restype = c_int32
23
+
24
+ getScore = veinsdk.get_score
25
+ getScore.argtypes = [POINTER(c_float), c_ulong, POINTER(c_float), c_ulong]
26
+ getScore.restype = c_float