Mzphyr commited on
Commit
5969345
·
verified ·
1 Parent(s): a889b2c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,10 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
3
+ media/avgpcperformancedemo.gif filter=lfs diff=lfs merge=lfs -text
4
+ media/deepwarebench.gif filter=lfs diff=lfs merge=lfs -text
5
+ media/demo.gif filter=lfs diff=lfs merge=lfs -text
6
+ media/live_show.gif filter=lfs diff=lfs merge=lfs -text
7
+ media/ludwig.gif filter=lfs diff=lfs merge=lfs -text
8
+ media/meme.gif filter=lfs diff=lfs merge=lfs -text
9
+ media/movie.gif filter=lfs diff=lfs merge=lfs -text
10
+ media/streamers.gif filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.github/ISSUE_TEMPLATE/bug_report.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ***[Remove this]The issue would be closed without notice and be considered spam if the template is not followed.***
2
+
3
+ **Describe the bug**
4
+ A clear and concise description of what the bug is.
5
+
6
+ **Screenshots**
7
+ If applicable, add screenshots to help explain your problem.
8
+
9
+ **Error Message**
10
+
11
+ `<The error message in terminal>`
12
+
13
+ **Desktop (please complete the following information):**
14
+ - OS: [e.g. Windows]
15
+ - Version [e.g. 22]
16
+ - GPU
17
+ - CPU
18
+
19
+ **Additional context**
20
+ Add any other context about the problem here.
21
+
22
+ **Confirmation (Mandatory)**
23
+ - [ ] I have followed the template
24
+ - [ ] This is not a query about how to increase performance
25
+ - [ ] I have checked the issues page, and this is not a duplicate
26
+
.gitignore ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.py[cod]
3
+ *$py.class
4
+ *.pyc
5
+ .idea
6
+ .todo
7
+ *.log
8
+ *.backup
9
+ tf_env/
10
+ *.png
11
+ *.mp4
12
+ *.mkv
13
+
14
+ .tmp/
15
+ temp/
16
+ .venv/
17
+ venv/
18
+ env/
19
+ workflow/
20
+ gfpgan/
21
+ models/inswapper_128.onnx
22
+ models/GFPGANv1.4.pth
23
+ *.onnx
24
+ models/DMDNet.pth
25
+ faceswap/
26
+ .vscode/
27
+ switch_states.json
CONTRIBUTING.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Collaboration Guidelines and Codebase Quality Standards
2
+
3
+ To ensure smooth collaboration and maintain the high quality of our codebase, please adhere to the following guidelines:
4
+
5
+ ## Branching Strategy
6
+
7
+ * **`premain`**:
8
+ * Always push your changes to the `premain` branch initially.
9
+ * This safeguards the `main` branch from unintentional disruptions.
10
+ * All tests will be performed on the `premain` branch.
11
+ * Changes will only be merged into `main` after several hours or days of rigorous testing.
12
+ * **`experimental`**:
13
+ * For large or potentially disruptive changes, use the `experimental` branch.
14
+ * This allows for thorough discussion and review before considering a merge into `main`.
15
+
16
+ ## Pre-Pull Request Checklist
17
+
18
+ Before creating a Pull Request (PR), ensure you have completed the following tests:
19
+
20
+ ### Functionality
21
+
22
+ * **Realtime Faceswap**:
23
+ * Test with face enhancer **enabled** and **disabled**.
24
+ * **Map Faces**:
25
+ * Test with both options (**enabled** and **disabled**).
26
+ * **Camera Listing**:
27
+ * Verify that all cameras are listed accurately.
28
+
29
+ ### Stability
30
+
31
+ * **Realtime FPS**:
32
+ * Confirm that there is no drop in real-time frames per second (FPS).
33
+ * **Boot Time**:
34
+ * Changes should not negatively impact the boot time of either the application or the real-time faceswap feature.
35
+ * **GPU Overloading**:
36
+ * Test for a minimum of 15 minutes to guarantee no GPU overloading, which could lead to crashes.
37
+ * **App Performance**:
38
+ * The application should remain responsive and not exhibit any lag.
LICENSE ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
README.md CHANGED
@@ -1,12 +1,388 @@
1
- ---
2
- title: Dlcmuz
3
- emoji: 📉
4
- colorFrom: green
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 5.29.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h1 align="center">Deep-Live-Cam</h1>
2
+
3
+ <p align="center">
4
+ Real-time face swap and video deepfake with a single click and only a single image.
5
+ </p>
6
+
7
+ <p align="center">
8
+ <a href="https://trendshift.io/repositories/11395" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11395" alt="hacksider%2FDeep-Live-Cam | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
9
+ </p>
10
+
11
+ <p align="center">
12
+ <img src="media/demo.gif" alt="Demo GIF" width="800">
13
+ </p>
14
+
15
+ ## Disclaimer
16
+
17
+ This deepfake software is designed to be a productive tool for the AI-generated media industry. It can assist artists in animating custom characters, creating engaging content, and even using models for clothing design.
18
+
19
+ We are aware of the potential for unethical applications and are committed to preventative measures. A built-in check prevents the program from processing inappropriate media (nudity, graphic content, sensitive material like war footage, etc.). We will continue to develop this project responsibly, adhering to the law and ethics. We may shut down the project or add watermarks if legally required.
20
+
21
+ - Ethical Use: Users are expected to use this software responsibly and legally. If using a real person's face, obtain their consent and clearly label any output as a deepfake when sharing online.
22
+
23
+ - Content Restrictions: The software includes built-in checks to prevent processing inappropriate media, such as nudity, graphic content, or sensitive material.
24
+
25
+ - Legal Compliance: We adhere to all relevant laws and ethical guidelines. If legally required, we may shut down the project or add watermarks to the output.
26
+
27
+ - User Responsibility: We are not responsible for end-user actions. Users must ensure their use of the software aligns with ethical standards and legal requirements.
28
+
29
+ By using this software, you agree to these terms and commit to using it in a manner that respects the rights and dignity of others.
30
+
31
+ Users are expected to use this software responsibly and legally. If using a real person's face, obtain their consent and clearly label any output as a deepfake when sharing online. We are not responsible for end-user actions.
32
+
33
+ ## Exclusive v2.0 Quick Start - Pre-built (Windows)
34
+
35
+ <a href="https://deeplivecam.net/index.php/quickstart"> <img src="media/Download.png" width="285" height="77" />
36
+
37
+ ##### This is the fastest build you can get if you have a discrete NVIDIA or AMD GPU.
38
+
39
+ ###### These Pre-builts are perfect for non-technical users or those who don't have time to, or can't manually install all the requirements. Just a heads-up: this is an open-source project, so you can also install it manually. This will be 60 days ahead on the open source version.
40
+
41
+ ## TLDR; Live Deepfake in just 3 Clicks
42
+ ![easysteps](https://github.com/user-attachments/assets/af825228-852c-411b-b787-ffd9aac72fc6)
43
+ 1. Select a face
44
+ 2. Select which camera to use
45
+ 3. Press live!
46
+
47
+ ## Features & Uses - Everything is in real-time
48
+
49
+ ### Mouth Mask
50
+
51
+ **Retain your original mouth for accurate movement using Mouth Mask**
52
+
53
+ <p align="center">
54
+ <img src="media/ludwig.gif" alt="resizable-gif">
55
+ </p>
56
+
57
+ ### Face Mapping
58
+
59
+ **Use different faces on multiple subjects simultaneously**
60
+
61
+ <p align="center">
62
+ <img src="media/streamers.gif" alt="face_mapping_source">
63
+ </p>
64
+
65
+ ### Your Movie, Your Face
66
+
67
+ **Watch movies with any face in real-time**
68
+
69
+ <p align="center">
70
+ <img src="media/movie.gif" alt="movie">
71
+ </p>
72
+
73
+ ### Live Show
74
+
75
+ **Run Live shows and performances**
76
+
77
+ <p align="center">
78
+ <img src="media/live_show.gif" alt="show">
79
+ </p>
80
+
81
+ ### Memes
82
+
83
+ **Create Your Most Viral Meme Yet**
84
+
85
+ <p align="center">
86
+ <img src="media/meme.gif" alt="show" width="450">
87
+ <br>
88
+ <sub>Created using Many Faces feature in Deep-Live-Cam</sub>
89
+ </p>
90
+
91
+ ### Omegle
92
+
93
+ **Surprise people on Omegle**
94
+
95
+ <p align="center">
96
+ <video src="https://github.com/user-attachments/assets/2e9b9b82-fa04-4b70-9f56-b1f68e7672d0" width="450" controls></video>
97
+ </p>
98
+
99
+ ## Installation (Manual)
100
+
101
+ **Please be aware that the installation requires technical skills and is not for beginners. Consider downloading the prebuilt version.**
102
+
103
+ <details>
104
+ <summary>Click to see the process</summary>
105
+
106
+ ### Installation
107
+
108
+ This is more likely to work on your computer but will be slower as it utilizes the CPU.
109
+
110
+ **1. Set up Your Platform**
111
+
112
+ - Python (3.10 recommended)
113
+ - pip
114
+ - git
115
+ - [ffmpeg](https://www.youtube.com/watch?v=OlNWCpFdVMA) - ```iex (irm ffmpeg.tc.ht)```
116
+ - [Visual Studio 2022 Runtimes (Windows)](https://visualstudio.microsoft.com/visual-cpp-build-tools/)
117
+
118
+ **2. Clone the Repository**
119
+
120
+ ```bash
121
+ git clone https://github.com/hacksider/Deep-Live-Cam.git
122
+ cd Deep-Live-Cam
123
+ ```
124
+
125
+ **3. Download the Models**
126
+
127
+ 1. [GFPGANv1.4](https://huggingface.co/hacksider/deep-live-cam/resolve/main/GFPGANv1.4.pth)
128
+ 2. [inswapper\_128\_fp16.onnx](https://huggingface.co/hacksider/deep-live-cam/resolve/main/inswapper_128_fp16.onnx)
129
+
130
+ Place these files in the "**models**" folder.
131
+
132
+ **4. Install Dependencies**
133
+
134
+ We highly recommend using a `venv` to avoid issues.
135
+
136
+
137
+ For Windows:
138
+ ```bash
139
+ python -m venv venv
140
+ venv\Scripts\activate
141
+ pip install -r requirements.txt
142
+ ```
143
+ For Linux:
144
+ ```bash
145
+ # Ensure you use the installed Python 3.10
146
+ python3 -m venv venv
147
+ source venv/bin/activate
148
+ pip install -r requirements.txt
149
+ ```
150
+
151
+ **For macOS:**
152
+
153
+ Apple Silicon (M1/M2/M3) requires specific setup:
154
+
155
+ ```bash
156
+ # Install Python 3.10 (specific version is important)
157
+ brew install [email protected]
158
+
159
+ # Install tkinter package (required for the GUI)
160
+ brew install [email protected]
161
+
162
+ # Create and activate virtual environment with Python 3.10
163
+ python3.10 -m venv venv
164
+ source venv/bin/activate
165
+
166
+ # Install dependencies
167
+ pip install -r requirements.txt
168
+ ```
169
+
170
+ ** In case something goes wrong and you need to reinstall the virtual environment **
171
+
172
+ ```bash
173
+ # Deactivate the virtual environment
174
+ rm -rf venv
175
+
176
+ # Reinstall the virtual environment
177
+ python -m venv venv
178
+ source venv/bin/activate
179
+
180
+ # install the dependencies again
181
+ pip install -r requirements.txt
182
+ ```
183
+
184
+ **Run:** If you don't have a GPU, you can run Deep-Live-Cam using `python run.py`. Note that initial execution will download models (~300MB).
185
+
186
+ ### GPU Acceleration
187
+
188
+ **CUDA Execution Provider (Nvidia)**
189
+
190
+ 1. Install [CUDA Toolkit 11.8.0](https://developer.nvidia.com/cuda-11-8-0-download-archive)
191
+ 2. Install dependencies:
192
+
193
+ ```bash
194
+ pip uninstall onnxruntime onnxruntime-gpu
195
+ pip install onnxruntime-gpu==1.16.3
196
+ ```
197
+
198
+ 3. Usage:
199
+
200
+ ```bash
201
+ python run.py --execution-provider cuda
202
+ ```
203
+
204
+ **CoreML Execution Provider (Apple Silicon)**
205
+
206
+ Apple Silicon (M1/M2/M3) specific installation:
207
+
208
+ 1. Make sure you've completed the macOS setup above using Python 3.10.
209
+ 2. Install dependencies:
210
+
211
+ ```bash
212
+ pip uninstall onnxruntime onnxruntime-silicon
213
+ pip install onnxruntime-silicon==1.13.1
214
+ ```
215
+
216
+ 3. Usage (important: specify Python 3.10):
217
+
218
+ ```bash
219
+ python3.10 run.py --execution-provider coreml
220
+ ```
221
+
222
+ **Important Notes for macOS:**
223
+ - You **must** use Python 3.10, not newer versions like 3.11 or 3.13
224
+ - Always run with `python3.10` command not just `python` if you have multiple Python versions installed
225
+ - If you get error about `_tkinter` missing, reinstall the tkinter package: `brew reinstall [email protected]`
226
+ - If you get model loading errors, check that your models are in the correct folder
227
+ - If you encounter conflicts with other Python versions, consider uninstalling them:
228
+ ```bash
229
+ # List all installed Python versions
230
+ brew list | grep python
231
+
232
+ # Uninstall conflicting versions if needed
233
+ brew uninstall --ignore-dependencies [email protected] [email protected]
234
+
235
+ # Keep only Python 3.10
236
+ brew cleanup
237
+ ```
238
+
239
+ **CoreML Execution Provider (Apple Legacy)**
240
+
241
+ 1. Install dependencies:
242
+
243
+ ```bash
244
+ pip uninstall onnxruntime onnxruntime-coreml
245
+ pip install onnxruntime-coreml==1.13.1
246
+ ```
247
+
248
+ 2. Usage:
249
+
250
+ ```bash
251
+ python run.py --execution-provider coreml
252
+ ```
253
+
254
+ **DirectML Execution Provider (Windows)**
255
+
256
+ 1. Install dependencies:
257
+
258
+ ```bash
259
+ pip uninstall onnxruntime onnxruntime-directml
260
+ pip install onnxruntime-directml==1.15.1
261
+ ```
262
+
263
+ 2. Usage:
264
+
265
+ ```bash
266
+ python run.py --execution-provider directml
267
+ ```
268
+
269
+ **OpenVINO™ Execution Provider (Intel)**
270
+
271
+ 1. Install dependencies:
272
+
273
+ ```bash
274
+ pip uninstall onnxruntime onnxruntime-openvino
275
+ pip install onnxruntime-openvino==1.15.0
276
+ ```
277
+
278
+ 2. Usage:
279
+
280
+ ```bash
281
+ python run.py --execution-provider openvino
282
+ ```
283
+ </details>
284
+
285
+ ## Usage
286
+
287
+ **1. Image/Video Mode**
288
+
289
+ - Execute `python run.py`.
290
+ - Choose a source face image and a target image/video.
291
+ - Click "Start".
292
+ - The output will be saved in a directory named after the target video.
293
+
294
+ **2. Webcam Mode**
295
+
296
+ - Execute `python run.py`.
297
+ - Select a source face image.
298
+ - Click "Live".
299
+ - Wait for the preview to appear (10-30 seconds).
300
+ - Use a screen capture tool like OBS to stream.
301
+ - To change the face, select a new source image.
302
+
303
+ ## Tips and Tricks
304
+
305
+ Check out these helpful guides to get the most out of Deep-Live-Cam:
306
+
307
+ - [Unlocking the Secrets to the Perfect Deepfake Image](https://deeplivecam.net/index.php/blog/tips-and-tricks/unlocking-the-secrets-to-the-perfect-deepfake-image) - Learn how to create the best deepfake with full head coverage
308
+ - [Video Call with DeepLiveCam](https://deeplivecam.net/index.php/blog/tips-and-tricks/video-call-with-deeplivecam) - Make your meetings livelier by using DeepLiveCam with OBS and meeting software
309
+ - [Have a Special Guest!](https://deeplivecam.net/index.php/blog/tips-and-tricks/have-a-special-guest) - Tutorial on how to use face mapping to add special guests to your stream
310
+ - [Watch Deepfake Movies in Realtime](https://deeplivecam.net/index.php/blog/tips-and-tricks/watch-deepfake-movies-in-realtime) - See yourself star in any video without processing the video
311
+ - [Better Quality without Sacrificing Speed](https://deeplivecam.net/index.php/blog/tips-and-tricks/better-quality-without-sacrificing-speed) - Tips for achieving better results without impacting performance
312
+ - [Instant Vtuber!](https://deeplivecam.net/index.php/blog/tips-and-tricks/instant-vtuber) - Create a new persona/vtuber easily using Metahuman Creator
313
+
314
+ Visit our [official blog](https://deeplivecam.net/index.php/blog/tips-and-tricks) for more tips and tutorials.
315
+
316
+ ## Command Line Arguments (Unmaintained)
317
+
318
+ ```
319
+ options:
320
+ -h, --help show this help message and exit
321
+ -s SOURCE_PATH, --source SOURCE_PATH select a source image
322
+ -t TARGET_PATH, --target TARGET_PATH select a target image or video
323
+ -o OUTPUT_PATH, --output OUTPUT_PATH select output file or directory
324
+ --frame-processor FRAME_PROCESSOR [FRAME_PROCESSOR ...] frame processors (choices: face_swapper, face_enhancer, ...)
325
+ --keep-fps keep original fps
326
+ --keep-audio keep original audio
327
+ --keep-frames keep temporary frames
328
+ --many-faces process every face
329
+ --map-faces map source target faces
330
+ --mouth-mask mask the mouth region
331
+ --video-encoder {libx264,libx265,libvpx-vp9} adjust output video encoder
332
+ --video-quality [0-51] adjust output video quality
333
+ --live-mirror the live camera display as you see it in the front-facing camera frame
334
+ --live-resizable the live camera frame is resizable
335
+ --max-memory MAX_MEMORY maximum amount of RAM in GB
336
+ --execution-provider {cpu} [{cpu} ...] available execution provider (choices: cpu, ...)
337
+ --execution-threads EXECUTION_THREADS number of execution threads
338
+ -v, --version show program's version number and exit
339
+ ```
340
+
341
+ Looking for a CLI mode? Using the -s/--source argument will make the run program in cli mode.
342
+
343
+ ## Press
344
+
345
+ **We are always open to criticism and are ready to improve, that's why we didn't cherry-pick anything.**
346
+
347
+ - [*"Deep-Live-Cam goes viral, allowing anyone to become a digital doppelganger"*](https://arstechnica.com/information-technology/2024/08/new-ai-tool-enables-real-time-face-swapping-on-webcams-raising-fraud-concerns/) - Ars Technica
348
+ - [*"Thanks Deep Live Cam, shapeshifters are among us now"*](https://dataconomy.com/2024/08/15/what-is-deep-live-cam-github-deepfake/) - Dataconomy
349
+ - [*"This free AI tool lets you become anyone during video-calls"*](https://www.newsbytesapp.com/news/science/deep-live-cam-ai-impersonation-tool-goes-viral/story) - NewsBytes
350
+ - [*"OK, this viral AI live stream software is truly terrifying"*](https://www.creativebloq.com/ai/ok-this-viral-ai-live-stream-software-is-truly-terrifying) - Creative Bloq
351
+ - [*"Deepfake AI Tool Lets You Become Anyone in a Video Call With Single Photo"*](https://petapixel.com/2024/08/14/deep-live-cam-deepfake-ai-tool-lets-you-become-anyone-in-a-video-call-with-single-photo-mark-zuckerberg-jd-vance-elon-musk/) - PetaPixel
352
+ - [*"Deep-Live-Cam Uses AI to Transform Your Face in Real-Time, Celebrities Included"*](https://www.techeblog.com/deep-live-cam-ai-transform-face/) - TechEBlog
353
+ - [*"An AI tool that "makes you look like anyone" during a video call is going viral online"*](https://telegrafi.com/en/a-tool-that-makes-you-look-like-anyone-during-a-video-call-is-going-viral-on-the-Internet/) - Telegrafi
354
+ - [*"This Deepfake Tool Turning Images Into Livestreams is Topping the GitHub Charts"*](https://decrypt.co/244565/this-deepfake-tool-turning-images-into-livestreams-is-topping-the-github-charts) - Emerge
355
+ - [*"New Real-Time Face-Swapping AI Allows Anyone to Mimic Famous Faces"*](https://www.digitalmusicnews.com/2024/08/15/face-swapping-ai-real-time-mimic/) - Digital Music News
356
+ - [*"This real-time webcam deepfake tool raises alarms about the future of identity theft"*](https://www.diyphotography.net/this-real-time-webcam-deepfake-tool-raises-alarms-about-the-future-of-identity-theft/) - DIYPhotography
357
+ - [*"That's Crazy, Oh God. That's Fucking Freaky Dude... That's So Wild Dude"*](https://www.youtube.com/watch?time_continue=1074&v=py4Tc-Y8BcY) - SomeOrdinaryGamers
358
+ - [*"Alright look look look, now look chat, we can do any face we want to look like chat"*](https://www.youtube.com/live/mFsCe7AIxq8?feature=shared&t=2686) - IShowSpeed
359
+
360
+ ## Credits
361
+
362
+ - [ffmpeg](https://ffmpeg.org/): for making video-related operations easy
363
+ - [deepinsight](https://github.com/deepinsight): for their [insightface](https://github.com/deepinsight/insightface) project which provided a well-made library and models. Please be reminded that the [use of the model is for non-commercial research purposes only](https://github.com/deepinsight/insightface?tab=readme-ov-file#license).
364
+ - [havok2-htwo](https://github.com/havok2-htwo): for sharing the code for webcam
365
+ - [GosuDRM](https://github.com/GosuDRM): for the open version of roop
366
+ - [pereiraroland26](https://github.com/pereiraroland26): Multiple faces support
367
+ - [vic4key](https://github.com/vic4key): For supporting/contributing to this project
368
+ - [kier007](https://github.com/kier007): for improving the user experience
369
+ - [qitianai](https://github.com/qitianai): for multi-lingual support
370
+ - and [all developers](https://github.com/hacksider/Deep-Live-Cam/graphs/contributors) behind libraries used in this project.
371
+ - Footnote: Please be informed that the base author of the code is [s0md3v](https://github.com/s0md3v/roop)
372
+ - All the wonderful users who helped make this project go viral by starring the repo ❤️
373
+
374
+ [![Stargazers](https://reporoster.com/stars/hacksider/Deep-Live-Cam)](https://github.com/hacksider/Deep-Live-Cam/stargazers)
375
+
376
+ ## Contributions
377
+
378
+ ![Alt](https://repobeats.axiom.co/api/embed/fec8e29c45dfdb9c5916f3a7830e1249308d20e1.svg "Repobeats analytics image")
379
+
380
+ ## Stars to the Moon 🚀
381
+
382
+ <a href="https://star-history.com/#hacksider/deep-live-cam&Date">
383
+ <picture>
384
+ <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=hacksider/deep-live-cam&type=Date&theme=dark" />
385
+ <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=hacksider/deep-live-cam&type=Date" />
386
+ <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=hacksider/deep-live-cam&type=Date" />
387
+ </picture>
388
+ </a>
locales/zh.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Source x Target Mapper": "Source x Target Mapper",
3
+ "select an source image": "选择一个源图像",
4
+ "Preview": "预览",
5
+ "select an target image or video": "选择一个目标图像或视频",
6
+ "save image output file": "保存图像输出文件",
7
+ "save video output file": "保存视频输出文件",
8
+ "select an target image": "选择一个目标图像",
9
+ "source": "源",
10
+ "Select a target": "选择一个目标",
11
+ "Select a face": "选择一张脸",
12
+ "Keep audio": "保留音频",
13
+ "Face Enhancer": "面纹增强器",
14
+ "Many faces": "多脸",
15
+ "Show FPS": "显示帧率",
16
+ "Keep fps": "保持帧率",
17
+ "Keep frames": "保持帧数",
18
+ "Fix Blueish Cam": "修复偏蓝的摄像头",
19
+ "Mouth Mask": "口罩",
20
+ "Show Mouth Mask Box": "显示口罩盒",
21
+ "Start": "开始",
22
+ "Live": "直播",
23
+ "Destroy": "结束",
24
+ "Map faces": "识别人脸",
25
+ "Processing...": "处理中...",
26
+ "Processing succeed!": "处理成功!",
27
+ "Processing ignored!": "处理被忽略!",
28
+ "Failed to start camera": "启动相机失败",
29
+ "Please complete pop-up or close it.": "请先完成弹出窗口或者关闭它",
30
+ "Getting unique faces": "获取独特面部",
31
+ "Please select a source image first": "请先选择一个源图像",
32
+ "No faces found in target": "目标图像中没有人脸",
33
+ "Add": "添加",
34
+ "Clear": "清除",
35
+ "Submit": "确认",
36
+ "Select source image": "请选取源图像",
37
+ "Select target image": "请选取目标图像",
38
+ "Please provide mapping!": "请提供映射",
39
+ "Atleast 1 source with target is required!": "至少需要一个来源图像与目标图像相关!",
40
+ "At least 1 source with target is required!": "至少需要一个来源图像与目标图像相关!",
41
+ "Face could not be detected in last upload!": "最近上传的图像中没有检测到人脸!",
42
+ "Select Camera:": "选择摄像头",
43
+ "All mappings cleared!": "所有映射均已清除!",
44
+ "Mappings successfully submitted!": "成功提交映射!",
45
+ "Source x Target Mapper is already open.": "源 x 目标映射器已打开。"
46
+ }
media/avgpcperformancedemo.gif ADDED

Git LFS Details

  • SHA256: a216845c38eadf591953feac2e6b0463a6ac4ecdb08371dcbfe91621b9860bbe
  • Pointer size: 132 Bytes
  • Size of remote file: 5.47 MB
media/deepwarebench.gif ADDED

Git LFS Details

  • SHA256: 83d132b220f93846bc3777b36ffdcfcc16d512f3cb2e0ed93434f2c8ce19540b
  • Pointer size: 132 Bytes
  • Size of remote file: 2.92 MB
media/demo.gif ADDED

Git LFS Details

  • SHA256: 45facedf7451273dd9582c38dde5b1e9204b1e21e6db61d9311b50d2912498c7
  • Pointer size: 133 Bytes
  • Size of remote file: 11.4 MB
media/live_show.gif ADDED

Git LFS Details

  • SHA256: 67e6154c2836c1f9575e16f1516c46952ef0ed90ef8b332a72b8a7d1566b6a3e
  • Pointer size: 132 Bytes
  • Size of remote file: 8.61 MB
media/ludwig.gif ADDED

Git LFS Details

  • SHA256: ac57219d7b64849dcee23eff5782d7420db8eb791e953992d7ffbb1a184d563a
  • Pointer size: 132 Bytes
  • Size of remote file: 5.58 MB
media/meme.gif ADDED

Git LFS Details

  • SHA256: f2e9a17dbc835a39a9318839866e688fd957757223e95c4af517a223990f538c
  • Pointer size: 132 Bytes
  • Size of remote file: 5.2 MB
media/movie.gif ADDED

Git LFS Details

  • SHA256: e5ab125768e909fb02a4f245ec9ebce5423a030c7c489053ccf678d14c72fe29
  • Pointer size: 133 Bytes
  • Size of remote file: 14.5 MB
media/streamers.gif ADDED

Git LFS Details

  • SHA256: ec63f37c7ca61eb2de7e4714473a9aa16a32a45235af3f39fa5a077b404a09c8
  • Pointer size: 133 Bytes
  • Size of remote file: 13.6 MB
models/instructions.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ just put the models in this folder -
2
+
3
+ https://huggingface.co/hacksider/deep-live-cam/resolve/main/inswapper_128_fp16.onnx?download=true
4
+ https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth
modules/__init__.py ADDED
File without changes
modules/capturer.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ import cv2
3
+ import modules.globals # Import the globals to check the color correction toggle
4
+
5
+
6
+ def get_video_frame(video_path: str, frame_number: int = 0) -> Any:
7
+ capture = cv2.VideoCapture(video_path)
8
+
9
+ # Set MJPEG format to ensure correct color space handling
10
+ capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
11
+
12
+ # Only force RGB conversion if color correction is enabled
13
+ if modules.globals.color_correction:
14
+ capture.set(cv2.CAP_PROP_CONVERT_RGB, 1)
15
+
16
+ frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT)
17
+ capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
18
+ has_frame, frame = capture.read()
19
+
20
+ if has_frame and modules.globals.color_correction:
21
+ # Convert the frame color if necessary
22
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
23
+
24
+ capture.release()
25
+ return frame if has_frame else None
26
+
27
+
28
+ def get_video_frame_total(video_path: str) -> int:
29
+ capture = cv2.VideoCapture(video_path)
30
+ video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
31
+ capture.release()
32
+ return video_frame_total
modules/cluster_analysis.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from sklearn.cluster import KMeans
3
+ from sklearn.metrics import silhouette_score
4
+ from typing import Any
5
+
6
+
7
+ def find_cluster_centroids(embeddings, max_k=10) -> Any:
8
+ inertia = []
9
+ cluster_centroids = []
10
+ K = range(1, max_k+1)
11
+
12
+ for k in K:
13
+ kmeans = KMeans(n_clusters=k, random_state=0)
14
+ kmeans.fit(embeddings)
15
+ inertia.append(kmeans.inertia_)
16
+ cluster_centroids.append({"k": k, "centroids": kmeans.cluster_centers_})
17
+
18
+ diffs = [inertia[i] - inertia[i+1] for i in range(len(inertia)-1)]
19
+ optimal_centroids = cluster_centroids[diffs.index(max(diffs)) + 1]['centroids']
20
+
21
+ return optimal_centroids
22
+
23
+ def find_closest_centroid(centroids: list, normed_face_embedding) -> list:
24
+ try:
25
+ centroids = np.array(centroids)
26
+ normed_face_embedding = np.array(normed_face_embedding)
27
+ similarities = np.dot(centroids, normed_face_embedding)
28
+ closest_centroid_index = np.argmax(similarities)
29
+
30
+ return closest_centroid_index, centroids[closest_centroid_index]
31
+ except ValueError:
32
+ return None
modules/core.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ # single thread doubles cuda performance - needs to be set before torch import
4
+ if any(arg.startswith('--execution-provider') for arg in sys.argv):
5
+ os.environ['OMP_NUM_THREADS'] = '1'
6
+ # reduce tensorflow log level
7
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
8
+ import warnings
9
+ from typing import List
10
+ import platform
11
+ import signal
12
+ import shutil
13
+ import argparse
14
+ import torch
15
+ import onnxruntime
16
+ import tensorflow
17
+
18
+ import modules.globals
19
+ import modules.metadata
20
+ import modules.ui as ui
21
+ from modules.processors.frame.core import get_frame_processors_modules
22
+ from modules.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path
23
+
24
+ if 'ROCMExecutionProvider' in modules.globals.execution_providers:
25
+ del torch
26
+
27
+ warnings.filterwarnings('ignore', category=FutureWarning, module='insightface')
28
+ warnings.filterwarnings('ignore', category=UserWarning, module='torchvision')
29
+
30
+
31
+ def parse_args() -> None:
32
+ signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
33
+ program = argparse.ArgumentParser()
34
+ program.add_argument('-s', '--source', help='select an source image', dest='source_path')
35
+ program.add_argument('-t', '--target', help='select an target image or video', dest='target_path')
36
+ program.add_argument('-o', '--output', help='select output file or directory', dest='output_path')
37
+ program.add_argument('--frame-processor', help='pipeline of frame processors', dest='frame_processor', default=['face_swapper'], choices=['face_swapper', 'face_enhancer'], nargs='+')
38
+ program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False)
39
+ program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True)
40
+ program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False)
41
+ program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False)
42
+ program.add_argument('--nsfw-filter', help='filter the NSFW image or video', dest='nsfw_filter', action='store_true', default=False)
43
+ program.add_argument('--map-faces', help='map source target faces', dest='map_faces', action='store_true', default=False)
44
+ program.add_argument('--mouth-mask', help='mask the mouth region', dest='mouth_mask', action='store_true', default=False)
45
+ program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9'])
46
+ program.add_argument('--video-quality', help='adjust output video quality', dest='video_quality', type=int, default=18, choices=range(52), metavar='[0-51]')
47
+ program.add_argument('-l', '--lang', help='Ui language', default="en")
48
+ program.add_argument('--live-mirror', help='The live camera display as you see it in the front-facing camera frame', dest='live_mirror', action='store_true', default=False)
49
+ program.add_argument('--live-resizable', help='The live camera frame is resizable', dest='live_resizable', action='store_true', default=False)
50
+ program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory())
51
+ program.add_argument('--execution-provider', help='execution provider', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+')
52
+ program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads())
53
+ program.add_argument('-v', '--version', action='version', version=f'{modules.metadata.name} {modules.metadata.version}')
54
+
55
+ # register deprecated args
56
+ program.add_argument('-f', '--face', help=argparse.SUPPRESS, dest='source_path_deprecated')
57
+ program.add_argument('--cpu-cores', help=argparse.SUPPRESS, dest='cpu_cores_deprecated', type=int)
58
+ program.add_argument('--gpu-vendor', help=argparse.SUPPRESS, dest='gpu_vendor_deprecated')
59
+ program.add_argument('--gpu-threads', help=argparse.SUPPRESS, dest='gpu_threads_deprecated', type=int)
60
+
61
+ args = program.parse_args()
62
+
63
+ modules.globals.source_path = args.source_path
64
+ modules.globals.target_path = args.target_path
65
+ modules.globals.output_path = normalize_output_path(modules.globals.source_path, modules.globals.target_path, args.output_path)
66
+ modules.globals.frame_processors = args.frame_processor
67
+ modules.globals.headless = args.source_path or args.target_path or args.output_path
68
+ modules.globals.keep_fps = args.keep_fps
69
+ modules.globals.keep_audio = args.keep_audio
70
+ modules.globals.keep_frames = args.keep_frames
71
+ modules.globals.many_faces = args.many_faces
72
+ modules.globals.mouth_mask = args.mouth_mask
73
+ modules.globals.nsfw_filter = args.nsfw_filter
74
+ modules.globals.map_faces = args.map_faces
75
+ modules.globals.video_encoder = args.video_encoder
76
+ modules.globals.video_quality = args.video_quality
77
+ modules.globals.live_mirror = args.live_mirror
78
+ modules.globals.live_resizable = args.live_resizable
79
+ modules.globals.max_memory = args.max_memory
80
+ modules.globals.execution_providers = decode_execution_providers(args.execution_provider)
81
+ modules.globals.execution_threads = args.execution_threads
82
+ modules.globals.lang = args.lang
83
+
84
+ #for ENHANCER tumbler:
85
+ if 'face_enhancer' in args.frame_processor:
86
+ modules.globals.fp_ui['face_enhancer'] = True
87
+ else:
88
+ modules.globals.fp_ui['face_enhancer'] = False
89
+
90
+ # translate deprecated args
91
+ if args.source_path_deprecated:
92
+ print('\033[33mArgument -f and --face are deprecated. Use -s and --source instead.\033[0m')
93
+ modules.globals.source_path = args.source_path_deprecated
94
+ modules.globals.output_path = normalize_output_path(args.source_path_deprecated, modules.globals.target_path, args.output_path)
95
+ if args.cpu_cores_deprecated:
96
+ print('\033[33mArgument --cpu-cores is deprecated. Use --execution-threads instead.\033[0m')
97
+ modules.globals.execution_threads = args.cpu_cores_deprecated
98
+ if args.gpu_vendor_deprecated == 'apple':
99
+ print('\033[33mArgument --gpu-vendor apple is deprecated. Use --execution-provider coreml instead.\033[0m')
100
+ modules.globals.execution_providers = decode_execution_providers(['coreml'])
101
+ if args.gpu_vendor_deprecated == 'nvidia':
102
+ print('\033[33mArgument --gpu-vendor nvidia is deprecated. Use --execution-provider cuda instead.\033[0m')
103
+ modules.globals.execution_providers = decode_execution_providers(['cuda'])
104
+ if args.gpu_vendor_deprecated == 'amd':
105
+ print('\033[33mArgument --gpu-vendor amd is deprecated. Use --execution-provider cuda instead.\033[0m')
106
+ modules.globals.execution_providers = decode_execution_providers(['rocm'])
107
+ if args.gpu_threads_deprecated:
108
+ print('\033[33mArgument --gpu-threads is deprecated. Use --execution-threads instead.\033[0m')
109
+ modules.globals.execution_threads = args.gpu_threads_deprecated
110
+
111
+
112
+ def encode_execution_providers(execution_providers: List[str]) -> List[str]:
113
+ return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers]
114
+
115
+
116
+ def decode_execution_providers(execution_providers: List[str]) -> List[str]:
117
+ return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers()))
118
+ if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)]
119
+
120
+
121
+ def suggest_max_memory() -> int:
122
+ if platform.system().lower() == 'darwin':
123
+ return 4
124
+ return 16
125
+
126
+
127
+ def suggest_execution_providers() -> List[str]:
128
+ return encode_execution_providers(onnxruntime.get_available_providers())
129
+
130
+
131
+ def suggest_execution_threads() -> int:
132
+ if 'DmlExecutionProvider' in modules.globals.execution_providers:
133
+ return 1
134
+ if 'ROCMExecutionProvider' in modules.globals.execution_providers:
135
+ return 1
136
+ return 8
137
+
138
+
139
+ def limit_resources() -> None:
140
+ # prevent tensorflow memory leak
141
+ gpus = tensorflow.config.experimental.list_physical_devices('GPU')
142
+ for gpu in gpus:
143
+ tensorflow.config.experimental.set_memory_growth(gpu, True)
144
+ # limit memory usage
145
+ if modules.globals.max_memory:
146
+ memory = modules.globals.max_memory * 1024 ** 3
147
+ if platform.system().lower() == 'darwin':
148
+ memory = modules.globals.max_memory * 1024 ** 6
149
+ if platform.system().lower() == 'windows':
150
+ import ctypes
151
+ kernel32 = ctypes.windll.kernel32
152
+ kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
153
+ else:
154
+ import resource
155
+ resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
156
+
157
+
158
+ def release_resources() -> None:
159
+ if 'CUDAExecutionProvider' in modules.globals.execution_providers:
160
+ torch.cuda.empty_cache()
161
+
162
+
163
+ def pre_check() -> bool:
164
+ if sys.version_info < (3, 9):
165
+ update_status('Python version is not supported - please upgrade to 3.9 or higher.')
166
+ return False
167
+ if not shutil.which('ffmpeg'):
168
+ update_status('ffmpeg is not installed.')
169
+ return False
170
+ return True
171
+
172
+
173
+ def update_status(message: str, scope: str = 'DLC.CORE') -> None:
174
+ print(f'[{scope}] {message}')
175
+ if not modules.globals.headless:
176
+ ui.update_status(message)
177
+
178
+ def start() -> None:
179
+ for frame_processor in get_frame_processors_modules(modules.globals.frame_processors):
180
+ if not frame_processor.pre_start():
181
+ return
182
+ update_status('Processing...')
183
+ # process image to image
184
+ if has_image_extension(modules.globals.target_path):
185
+ if modules.globals.nsfw_filter and ui.check_and_ignore_nsfw(modules.globals.target_path, destroy):
186
+ return
187
+ try:
188
+ shutil.copy2(modules.globals.target_path, modules.globals.output_path)
189
+ except Exception as e:
190
+ print("Error copying file:", str(e))
191
+ for frame_processor in get_frame_processors_modules(modules.globals.frame_processors):
192
+ update_status('Progressing...', frame_processor.NAME)
193
+ frame_processor.process_image(modules.globals.source_path, modules.globals.output_path, modules.globals.output_path)
194
+ release_resources()
195
+ if is_image(modules.globals.target_path):
196
+ update_status('Processing to image succeed!')
197
+ else:
198
+ update_status('Processing to image failed!')
199
+ return
200
+ # process image to videos
201
+ if modules.globals.nsfw_filter and ui.check_and_ignore_nsfw(modules.globals.target_path, destroy):
202
+ return
203
+
204
+ if not modules.globals.map_faces:
205
+ update_status('Creating temp resources...')
206
+ create_temp(modules.globals.target_path)
207
+ update_status('Extracting frames...')
208
+ extract_frames(modules.globals.target_path)
209
+
210
+ temp_frame_paths = get_temp_frame_paths(modules.globals.target_path)
211
+ for frame_processor in get_frame_processors_modules(modules.globals.frame_processors):
212
+ update_status('Progressing...', frame_processor.NAME)
213
+ frame_processor.process_video(modules.globals.source_path, temp_frame_paths)
214
+ release_resources()
215
+ # handles fps
216
+ if modules.globals.keep_fps:
217
+ update_status('Detecting fps...')
218
+ fps = detect_fps(modules.globals.target_path)
219
+ update_status(f'Creating video with {fps} fps...')
220
+ create_video(modules.globals.target_path, fps)
221
+ else:
222
+ update_status('Creating video with 30.0 fps...')
223
+ create_video(modules.globals.target_path)
224
+ # handle audio
225
+ if modules.globals.keep_audio:
226
+ if modules.globals.keep_fps:
227
+ update_status('Restoring audio...')
228
+ else:
229
+ update_status('Restoring audio might cause issues as fps are not kept...')
230
+ restore_audio(modules.globals.target_path, modules.globals.output_path)
231
+ else:
232
+ move_temp(modules.globals.target_path, modules.globals.output_path)
233
+ # clean and validate
234
+ clean_temp(modules.globals.target_path)
235
+ if is_video(modules.globals.target_path):
236
+ update_status('Processing to video succeed!')
237
+ else:
238
+ update_status('Processing to video failed!')
239
+
240
+
241
+ def destroy(to_quit=True) -> None:
242
+ if modules.globals.target_path:
243
+ clean_temp(modules.globals.target_path)
244
+ if to_quit: quit()
245
+
246
+
247
+ def run() -> None:
248
+ parse_args()
249
+ if not pre_check():
250
+ return
251
+ for frame_processor in get_frame_processors_modules(modules.globals.frame_processors):
252
+ if not frame_processor.pre_check():
253
+ return
254
+ limit_resources()
255
+ if modules.globals.headless:
256
+ start()
257
+ else:
258
+ window = ui.init(start, destroy, modules.globals.lang)
259
+ window.mainloop()
modules/face_analyser.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from typing import Any
4
+ import insightface
5
+
6
+ import cv2
7
+ import numpy as np
8
+ import modules.globals
9
+ from tqdm import tqdm
10
+ from modules.typing import Frame
11
+ from modules.cluster_analysis import find_cluster_centroids, find_closest_centroid
12
+ from modules.utilities import get_temp_directory_path, create_temp, extract_frames, clean_temp, get_temp_frame_paths
13
+ from pathlib import Path
14
+
15
+ FACE_ANALYSER = None
16
+
17
+
18
+ def get_face_analyser() -> Any:
19
+ global FACE_ANALYSER
20
+
21
+ if FACE_ANALYSER is None:
22
+ FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=modules.globals.execution_providers)
23
+ FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
24
+ return FACE_ANALYSER
25
+
26
+
27
+ def get_one_face(frame: Frame) -> Any:
28
+ face = get_face_analyser().get(frame)
29
+ try:
30
+ return min(face, key=lambda x: x.bbox[0])
31
+ except ValueError:
32
+ return None
33
+
34
+
35
+ def get_many_faces(frame: Frame) -> Any:
36
+ try:
37
+ return get_face_analyser().get(frame)
38
+ except IndexError:
39
+ return None
40
+
41
+ def has_valid_map() -> bool:
42
+ for map in modules.globals.source_target_map:
43
+ if "source" in map and "target" in map:
44
+ return True
45
+ return False
46
+
47
+ def default_source_face() -> Any:
48
+ for map in modules.globals.source_target_map:
49
+ if "source" in map:
50
+ return map['source']['face']
51
+ return None
52
+
53
+ def simplify_maps() -> Any:
54
+ centroids = []
55
+ faces = []
56
+ for map in modules.globals.source_target_map:
57
+ if "source" in map and "target" in map:
58
+ centroids.append(map['target']['face'].normed_embedding)
59
+ faces.append(map['source']['face'])
60
+
61
+ modules.globals.simple_map = {'source_faces': faces, 'target_embeddings': centroids}
62
+ return None
63
+
64
+ def add_blank_map() -> Any:
65
+ try:
66
+ max_id = -1
67
+ if len(modules.globals.source_target_map) > 0:
68
+ max_id = max(modules.globals.source_target_map, key=lambda x: x['id'])['id']
69
+
70
+ modules.globals.source_target_map.append({
71
+ 'id' : max_id + 1
72
+ })
73
+ except ValueError:
74
+ return None
75
+
76
+ def get_unique_faces_from_target_image() -> Any:
77
+ try:
78
+ modules.globals.source_target_map = []
79
+ target_frame = cv2.imread(modules.globals.target_path)
80
+ many_faces = get_many_faces(target_frame)
81
+ i = 0
82
+
83
+ for face in many_faces:
84
+ x_min, y_min, x_max, y_max = face['bbox']
85
+ modules.globals.source_target_map.append({
86
+ 'id' : i,
87
+ 'target' : {
88
+ 'cv2' : target_frame[int(y_min):int(y_max), int(x_min):int(x_max)],
89
+ 'face' : face
90
+ }
91
+ })
92
+ i = i + 1
93
+ except ValueError:
94
+ return None
95
+
96
+
97
+ def get_unique_faces_from_target_video() -> Any:
98
+ try:
99
+ modules.globals.source_target_map = []
100
+ frame_face_embeddings = []
101
+ face_embeddings = []
102
+
103
+ print('Creating temp resources...')
104
+ clean_temp(modules.globals.target_path)
105
+ create_temp(modules.globals.target_path)
106
+ print('Extracting frames...')
107
+ extract_frames(modules.globals.target_path)
108
+
109
+ temp_frame_paths = get_temp_frame_paths(modules.globals.target_path)
110
+
111
+ i = 0
112
+ for temp_frame_path in tqdm(temp_frame_paths, desc="Extracting face embeddings from frames"):
113
+ temp_frame = cv2.imread(temp_frame_path)
114
+ many_faces = get_many_faces(temp_frame)
115
+
116
+ for face in many_faces:
117
+ face_embeddings.append(face.normed_embedding)
118
+
119
+ frame_face_embeddings.append({'frame': i, 'faces': many_faces, 'location': temp_frame_path})
120
+ i += 1
121
+
122
+ centroids = find_cluster_centroids(face_embeddings)
123
+
124
+ for frame in frame_face_embeddings:
125
+ for face in frame['faces']:
126
+ closest_centroid_index, _ = find_closest_centroid(centroids, face.normed_embedding)
127
+ face['target_centroid'] = closest_centroid_index
128
+
129
+ for i in range(len(centroids)):
130
+ modules.globals.source_target_map.append({
131
+ 'id' : i
132
+ })
133
+
134
+ temp = []
135
+ for frame in tqdm(frame_face_embeddings, desc=f"Mapping frame embeddings to centroids-{i}"):
136
+ temp.append({'frame': frame['frame'], 'faces': [face for face in frame['faces'] if face['target_centroid'] == i], 'location': frame['location']})
137
+
138
+ modules.globals.source_target_map[i]['target_faces_in_frame'] = temp
139
+
140
+ # dump_faces(centroids, frame_face_embeddings)
141
+ default_target_face()
142
+ except ValueError:
143
+ return None
144
+
145
+
146
+ def default_target_face():
147
+ for map in modules.globals.source_target_map:
148
+ best_face = None
149
+ best_frame = None
150
+ for frame in map['target_faces_in_frame']:
151
+ if len(frame['faces']) > 0:
152
+ best_face = frame['faces'][0]
153
+ best_frame = frame
154
+ break
155
+
156
+ for frame in map['target_faces_in_frame']:
157
+ for face in frame['faces']:
158
+ if face['det_score'] > best_face['det_score']:
159
+ best_face = face
160
+ best_frame = frame
161
+
162
+ x_min, y_min, x_max, y_max = best_face['bbox']
163
+
164
+ target_frame = cv2.imread(best_frame['location'])
165
+ map['target'] = {
166
+ 'cv2' : target_frame[int(y_min):int(y_max), int(x_min):int(x_max)],
167
+ 'face' : best_face
168
+ }
169
+
170
+
171
+ def dump_faces(centroids: Any, frame_face_embeddings: list):
172
+ temp_directory_path = get_temp_directory_path(modules.globals.target_path)
173
+
174
+ for i in range(len(centroids)):
175
+ if os.path.exists(temp_directory_path + f"/{i}") and os.path.isdir(temp_directory_path + f"/{i}"):
176
+ shutil.rmtree(temp_directory_path + f"/{i}")
177
+ Path(temp_directory_path + f"/{i}").mkdir(parents=True, exist_ok=True)
178
+
179
+ for frame in tqdm(frame_face_embeddings, desc=f"Copying faces to temp/./{i}"):
180
+ temp_frame = cv2.imread(frame['location'])
181
+
182
+ j = 0
183
+ for face in frame['faces']:
184
+ if face['target_centroid'] == i:
185
+ x_min, y_min, x_max, y_max = face['bbox']
186
+
187
+ if temp_frame[int(y_min):int(y_max), int(x_min):int(x_max)].size > 0:
188
+ cv2.imwrite(temp_directory_path + f"/{i}/{frame['frame']}_{j}.png", temp_frame[int(y_min):int(y_max), int(x_min):int(x_max)])
189
+ j += 1
modules/gettext.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+
4
+ class LanguageManager:
5
+ def __init__(self, default_language="en"):
6
+ self.current_language = default_language
7
+ self.translations = {}
8
+ self.load_language(default_language)
9
+
10
+ def load_language(self, language_code) -> bool:
11
+ """load language file"""
12
+ if language_code == "en":
13
+ return True
14
+ try:
15
+ file_path = Path(__file__).parent.parent / f"locales/{language_code}.json"
16
+ with open(file_path, "r", encoding="utf-8") as file:
17
+ self.translations = json.load(file)
18
+ self.current_language = language_code
19
+ return True
20
+ except FileNotFoundError:
21
+ print(f"Language file not found: {language_code}")
22
+ return False
23
+
24
+ def _(self, key, default=None) -> str:
25
+ """get translate text"""
26
+ return self.translations.get(key, default if default else key)
modules/globals.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Dict, Any
3
+
4
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
5
+ WORKFLOW_DIR = os.path.join(ROOT_DIR, "workflow")
6
+
7
+ file_types = [
8
+ ("Image", ("*.png", "*.jpg", "*.jpeg", "*.gif", "*.bmp")),
9
+ ("Video", ("*.mp4", "*.mkv")),
10
+ ]
11
+
12
+ source_target_map = []
13
+ simple_map = {}
14
+
15
+ source_path = None
16
+ target_path = None
17
+ output_path = None
18
+ frame_processors: List[str] = []
19
+ keep_fps = True
20
+ keep_audio = True
21
+ keep_frames = False
22
+ many_faces = False
23
+ map_faces = False
24
+ color_correction = False # New global variable for color correction toggle
25
+ nsfw_filter = False
26
+ video_encoder = None
27
+ video_quality = None
28
+ live_mirror = False
29
+ live_resizable = True
30
+ max_memory = None
31
+ execution_providers: List[str] = []
32
+ execution_threads = None
33
+ headless = None
34
+ log_level = "error"
35
+ fp_ui: Dict[str, bool] = {"face_enhancer": False}
36
+ camera_input_combobox = None
37
+ webcam_preview_running = False
38
+ show_fps = False
39
+ mouth_mask = False
40
+ show_mouth_mask_box = False
41
+ mask_feather_ratio = 8
42
+ mask_down_size = 0.50
43
+ mask_size = 1
modules/metadata.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ name = 'Deep-Live-Cam'
2
+ version = '1.8'
3
+ edition = 'GitHub Edition'
modules/predicter.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+ import opennsfw2
3
+ from PIL import Image
4
+ import cv2 # Add OpenCV import
5
+ import modules.globals # Import globals to access the color correction toggle
6
+
7
+ from modules.typing import Frame
8
+
9
+ MAX_PROBABILITY = 0.85
10
+
11
+ # Preload the model once for efficiency
12
+ model = None
13
+
14
+ def predict_frame(target_frame: Frame) -> bool:
15
+ # Convert the frame to RGB before processing if color correction is enabled
16
+ if modules.globals.color_correction:
17
+ target_frame = cv2.cvtColor(target_frame, cv2.COLOR_BGR2RGB)
18
+
19
+ image = Image.fromarray(target_frame)
20
+ image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO)
21
+ global model
22
+ if model is None:
23
+ model = opennsfw2.make_open_nsfw_model()
24
+
25
+ views = numpy.expand_dims(image, axis=0)
26
+ _, probability = model.predict(views)[0]
27
+ return probability > MAX_PROBABILITY
28
+
29
+
30
+ def predict_image(target_path: str) -> bool:
31
+ return opennsfw2.predict_image(target_path) > MAX_PROBABILITY
32
+
33
+
34
+ def predict_video(target_path: str) -> bool:
35
+ _, probabilities = opennsfw2.predict_video_frames(video_path=target_path, frame_interval=100)
36
+ return any(probability > MAX_PROBABILITY for probability in probabilities)
modules/processors/__init__.py ADDED
File without changes
modules/processors/frame/__init__.py ADDED
File without changes
modules/processors/frame/core.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import importlib
3
+ from concurrent.futures import ThreadPoolExecutor
4
+ from types import ModuleType
5
+ from typing import Any, List, Callable
6
+ from tqdm import tqdm
7
+
8
+ import modules
9
+ import modules.globals
10
+
11
+ FRAME_PROCESSORS_MODULES: List[ModuleType] = []
12
+ FRAME_PROCESSORS_INTERFACE = [
13
+ 'pre_check',
14
+ 'pre_start',
15
+ 'process_frame',
16
+ 'process_image',
17
+ 'process_video'
18
+ ]
19
+
20
+
21
+ def load_frame_processor_module(frame_processor: str) -> Any:
22
+ try:
23
+ frame_processor_module = importlib.import_module(f'modules.processors.frame.{frame_processor}')
24
+ for method_name in FRAME_PROCESSORS_INTERFACE:
25
+ if not hasattr(frame_processor_module, method_name):
26
+ sys.exit()
27
+ except ImportError:
28
+ print(f"Frame processor {frame_processor} not found")
29
+ sys.exit()
30
+ return frame_processor_module
31
+
32
+
33
+ def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType]:
34
+ global FRAME_PROCESSORS_MODULES
35
+
36
+ if not FRAME_PROCESSORS_MODULES:
37
+ for frame_processor in frame_processors:
38
+ frame_processor_module = load_frame_processor_module(frame_processor)
39
+ FRAME_PROCESSORS_MODULES.append(frame_processor_module)
40
+ set_frame_processors_modules_from_ui(frame_processors)
41
+ return FRAME_PROCESSORS_MODULES
42
+
43
+ def set_frame_processors_modules_from_ui(frame_processors: List[str]) -> None:
44
+ global FRAME_PROCESSORS_MODULES
45
+ current_processor_names = [proc.__name__.split('.')[-1] for proc in FRAME_PROCESSORS_MODULES]
46
+
47
+ for frame_processor, state in modules.globals.fp_ui.items():
48
+ if state == True and frame_processor not in current_processor_names:
49
+ try:
50
+ frame_processor_module = load_frame_processor_module(frame_processor)
51
+ FRAME_PROCESSORS_MODULES.append(frame_processor_module)
52
+ if frame_processor not in modules.globals.frame_processors:
53
+ modules.globals.frame_processors.append(frame_processor)
54
+ except SystemExit:
55
+ print(f"Warning: Failed to load frame processor {frame_processor} requested by UI state.")
56
+ except Exception as e:
57
+ print(f"Warning: Error loading frame processor {frame_processor} requested by UI state: {e}")
58
+
59
+ elif state == False and frame_processor in current_processor_names:
60
+ try:
61
+ module_to_remove = next((mod for mod in FRAME_PROCESSORS_MODULES if mod.__name__.endswith(f'.{frame_processor}')), None)
62
+ if module_to_remove:
63
+ FRAME_PROCESSORS_MODULES.remove(module_to_remove)
64
+ if frame_processor in modules.globals.frame_processors:
65
+ modules.globals.frame_processors.remove(frame_processor)
66
+ except Exception as e:
67
+ print(f"Warning: Error removing frame processor {frame_processor}: {e}")
68
+
69
+ def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames: Callable[[str, List[str], Any], None], progress: Any = None) -> None:
70
+ with ThreadPoolExecutor(max_workers=modules.globals.execution_threads) as executor:
71
+ futures = []
72
+ for path in temp_frame_paths:
73
+ future = executor.submit(process_frames, source_path, [path], progress)
74
+ futures.append(future)
75
+ for future in futures:
76
+ future.result()
77
+
78
+
79
+ def process_video(source_path: str, frame_paths: list[str], process_frames: Callable[[str, List[str], Any], None]) -> None:
80
+ progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
81
+ total = len(frame_paths)
82
+ with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
83
+ progress.set_postfix({'execution_providers': modules.globals.execution_providers, 'execution_threads': modules.globals.execution_threads, 'max_memory': modules.globals.max_memory})
84
+ multi_process_frame(source_path, frame_paths, process_frames, progress)
modules/processors/frame/face_enhancer.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List
2
+ import cv2
3
+ import threading
4
+ import gfpgan
5
+ import os
6
+
7
+ import modules.globals
8
+ import modules.processors.frame.core
9
+ from modules.core import update_status
10
+ from modules.face_analyser import get_one_face
11
+ from modules.typing import Frame, Face
12
+ import platform
13
+ import torch
14
+ from modules.utilities import (
15
+ conditional_download,
16
+ is_image,
17
+ is_video,
18
+ )
19
+
20
+ FACE_ENHANCER = None
21
+ THREAD_SEMAPHORE = threading.Semaphore()
22
+ THREAD_LOCK = threading.Lock()
23
+ NAME = "DLC.FACE-ENHANCER"
24
+
25
+ abs_dir = os.path.dirname(os.path.abspath(__file__))
26
+ models_dir = os.path.join(
27
+ os.path.dirname(os.path.dirname(os.path.dirname(abs_dir))), "models"
28
+ )
29
+
30
+
31
+ def pre_check() -> bool:
32
+ download_directory_path = models_dir
33
+ conditional_download(
34
+ download_directory_path,
35
+ [
36
+ "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth"
37
+ ],
38
+ )
39
+ return True
40
+
41
+
42
+ def pre_start() -> bool:
43
+ if not is_image(modules.globals.target_path) and not is_video(
44
+ modules.globals.target_path
45
+ ):
46
+ update_status("Select an image or video for target path.", NAME)
47
+ return False
48
+ return True
49
+
50
+
51
+ TENSORRT_AVAILABLE = False
52
+ try:
53
+ import torch_tensorrt
54
+ TENSORRT_AVAILABLE = True
55
+ except ImportError as im:
56
+ print(f"TensorRT is not available: {im}")
57
+ pass
58
+ except Exception as e:
59
+ print(f"TensorRT is not available: {e}")
60
+ pass
61
+
62
+ def get_face_enhancer() -> Any:
63
+ global FACE_ENHANCER
64
+
65
+ with THREAD_LOCK:
66
+ if FACE_ENHANCER is None:
67
+ model_path = os.path.join(models_dir, "GFPGANv1.4.pth")
68
+
69
+ selected_device = None
70
+ device_priority = []
71
+
72
+ if TENSORRT_AVAILABLE and torch.cuda.is_available():
73
+ selected_device = torch.device("cuda")
74
+ device_priority.append("TensorRT+CUDA")
75
+ elif torch.cuda.is_available():
76
+ selected_device = torch.device("cuda")
77
+ device_priority.append("CUDA")
78
+ elif torch.backends.mps.is_available() and platform.system() == "Darwin":
79
+ selected_device = torch.device("mps")
80
+ device_priority.append("MPS")
81
+ elif not torch.cuda.is_available():
82
+ selected_device = torch.device("cpu")
83
+ device_priority.append("CPU")
84
+
85
+ FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1, device=selected_device)
86
+
87
+ # for debug:
88
+ print(f"Selected device: {selected_device} and device priority: {device_priority}")
89
+ return FACE_ENHANCER
90
+
91
+
92
+ def enhance_face(temp_frame: Frame) -> Frame:
93
+ with THREAD_SEMAPHORE:
94
+ _, _, temp_frame = get_face_enhancer().enhance(temp_frame, paste_back=True)
95
+ return temp_frame
96
+
97
+
98
+ def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
99
+ target_face = get_one_face(temp_frame)
100
+ if target_face:
101
+ temp_frame = enhance_face(temp_frame)
102
+ return temp_frame
103
+
104
+
105
+ def process_frames(
106
+ source_path: str, temp_frame_paths: List[str], progress: Any = None
107
+ ) -> None:
108
+ for temp_frame_path in temp_frame_paths:
109
+ temp_frame = cv2.imread(temp_frame_path)
110
+ result = process_frame(None, temp_frame)
111
+ cv2.imwrite(temp_frame_path, result)
112
+ if progress:
113
+ progress.update(1)
114
+
115
+
116
+ def process_image(source_path: str, target_path: str, output_path: str) -> None:
117
+ target_frame = cv2.imread(target_path)
118
+ result = process_frame(None, target_frame)
119
+ cv2.imwrite(output_path, result)
120
+
121
+
122
+ def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
123
+ modules.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
124
+
125
+
126
+ def process_frame_v2(temp_frame: Frame) -> Frame:
127
+ target_face = get_one_face(temp_frame)
128
+ if target_face:
129
+ temp_frame = enhance_face(temp_frame)
130
+ return temp_frame
modules/processors/frame/face_swapper.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List
2
+ import cv2
3
+ import insightface
4
+ import threading
5
+ import numpy as np
6
+ import modules.globals
7
+ import logging
8
+ import modules.processors.frame.core
9
+ from modules.core import update_status
10
+ from modules.face_analyser import get_one_face, get_many_faces, default_source_face
11
+ from modules.typing import Face, Frame
12
+ from modules.utilities import (
13
+ conditional_download,
14
+ is_image,
15
+ is_video,
16
+ )
17
+ from modules.cluster_analysis import find_closest_centroid
18
+ import os
19
+
20
+ FACE_SWAPPER = None
21
+ THREAD_LOCK = threading.Lock()
22
+ NAME = "DLC.FACE-SWAPPER"
23
+
24
+ abs_dir = os.path.dirname(os.path.abspath(__file__))
25
+ models_dir = os.path.join(
26
+ os.path.dirname(os.path.dirname(os.path.dirname(abs_dir))), "models"
27
+ )
28
+
29
+
30
+ def pre_check() -> bool:
31
+ download_directory_path = abs_dir
32
+ conditional_download(
33
+ download_directory_path,
34
+ [
35
+ "https://huggingface.co/hacksider/deep-live-cam/blob/main/inswapper_128_fp16.onnx"
36
+ ],
37
+ )
38
+ return True
39
+
40
+
41
+ def pre_start() -> bool:
42
+ if not modules.globals.map_faces and not is_image(modules.globals.source_path):
43
+ update_status("Select an image for source path.", NAME)
44
+ return False
45
+ elif not modules.globals.map_faces and not get_one_face(
46
+ cv2.imread(modules.globals.source_path)
47
+ ):
48
+ update_status("No face in source path detected.", NAME)
49
+ return False
50
+ if not is_image(modules.globals.target_path) and not is_video(
51
+ modules.globals.target_path
52
+ ):
53
+ update_status("Select an image or video for target path.", NAME)
54
+ return False
55
+ return True
56
+
57
+
58
+ def get_face_swapper() -> Any:
59
+ global FACE_SWAPPER
60
+
61
+ with THREAD_LOCK:
62
+ if FACE_SWAPPER is None:
63
+ model_path = os.path.join(models_dir, "inswapper_128_fp16.onnx")
64
+ FACE_SWAPPER = insightface.model_zoo.get_model(
65
+ model_path, providers=modules.globals.execution_providers
66
+ )
67
+ return FACE_SWAPPER
68
+
69
+
70
+ def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
71
+ face_swapper = get_face_swapper()
72
+
73
+ # Apply the face swap
74
+ swapped_frame = face_swapper.get(
75
+ temp_frame, target_face, source_face, paste_back=True
76
+ )
77
+
78
+ if modules.globals.mouth_mask:
79
+ # Create a mask for the target face
80
+ face_mask = create_face_mask(target_face, temp_frame)
81
+
82
+ # Create the mouth mask
83
+ mouth_mask, mouth_cutout, mouth_box, lower_lip_polygon = (
84
+ create_lower_mouth_mask(target_face, temp_frame)
85
+ )
86
+
87
+ # Apply the mouth area
88
+ swapped_frame = apply_mouth_area(
89
+ swapped_frame, mouth_cutout, mouth_box, face_mask, lower_lip_polygon
90
+ )
91
+
92
+ if modules.globals.show_mouth_mask_box:
93
+ mouth_mask_data = (mouth_mask, mouth_cutout, mouth_box, lower_lip_polygon)
94
+ swapped_frame = draw_mouth_mask_visualization(
95
+ swapped_frame, target_face, mouth_mask_data
96
+ )
97
+
98
+ return swapped_frame
99
+
100
+
101
+ def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
102
+ if modules.globals.color_correction:
103
+ temp_frame = cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB)
104
+
105
+ if modules.globals.many_faces:
106
+ many_faces = get_many_faces(temp_frame)
107
+ if many_faces:
108
+ for target_face in many_faces:
109
+ if source_face and target_face:
110
+ temp_frame = swap_face(source_face, target_face, temp_frame)
111
+ else:
112
+ print("Face detection failed for target/source.")
113
+ else:
114
+ target_face = get_one_face(temp_frame)
115
+ if target_face and source_face:
116
+ temp_frame = swap_face(source_face, target_face, temp_frame)
117
+ else:
118
+ logging.error("Face detection failed for target or source.")
119
+ return temp_frame
120
+
121
+
122
+
123
+ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
124
+ if is_image(modules.globals.target_path):
125
+ if modules.globals.many_faces:
126
+ source_face = default_source_face()
127
+ for map in modules.globals.source_target_map:
128
+ target_face = map["target"]["face"]
129
+ temp_frame = swap_face(source_face, target_face, temp_frame)
130
+
131
+ elif not modules.globals.many_faces:
132
+ for map in modules.globals.source_target_map:
133
+ if "source" in map:
134
+ source_face = map["source"]["face"]
135
+ target_face = map["target"]["face"]
136
+ temp_frame = swap_face(source_face, target_face, temp_frame)
137
+
138
+ elif is_video(modules.globals.target_path):
139
+ if modules.globals.many_faces:
140
+ source_face = default_source_face()
141
+ for map in modules.globals.source_target_map:
142
+ target_frame = [
143
+ f
144
+ for f in map["target_faces_in_frame"]
145
+ if f["location"] == temp_frame_path
146
+ ]
147
+
148
+ for frame in target_frame:
149
+ for target_face in frame["faces"]:
150
+ temp_frame = swap_face(source_face, target_face, temp_frame)
151
+
152
+ elif not modules.globals.many_faces:
153
+ for map in modules.globals.source_target_map:
154
+ if "source" in map:
155
+ target_frame = [
156
+ f
157
+ for f in map["target_faces_in_frame"]
158
+ if f["location"] == temp_frame_path
159
+ ]
160
+ source_face = map["source"]["face"]
161
+
162
+ for frame in target_frame:
163
+ for target_face in frame["faces"]:
164
+ temp_frame = swap_face(source_face, target_face, temp_frame)
165
+
166
+ else:
167
+ detected_faces = get_many_faces(temp_frame)
168
+ if modules.globals.many_faces:
169
+ if detected_faces:
170
+ source_face = default_source_face()
171
+ for target_face in detected_faces:
172
+ temp_frame = swap_face(source_face, target_face, temp_frame)
173
+
174
+ elif not modules.globals.many_faces:
175
+ if detected_faces:
176
+ if len(detected_faces) <= len(
177
+ modules.globals.simple_map["target_embeddings"]
178
+ ):
179
+ for detected_face in detected_faces:
180
+ closest_centroid_index, _ = find_closest_centroid(
181
+ modules.globals.simple_map["target_embeddings"],
182
+ detected_face.normed_embedding,
183
+ )
184
+
185
+ temp_frame = swap_face(
186
+ modules.globals.simple_map["source_faces"][
187
+ closest_centroid_index
188
+ ],
189
+ detected_face,
190
+ temp_frame,
191
+ )
192
+ else:
193
+ detected_faces_centroids = []
194
+ for face in detected_faces:
195
+ detected_faces_centroids.append(face.normed_embedding)
196
+ i = 0
197
+ for target_embedding in modules.globals.simple_map[
198
+ "target_embeddings"
199
+ ]:
200
+ closest_centroid_index, _ = find_closest_centroid(
201
+ detected_faces_centroids, target_embedding
202
+ )
203
+
204
+ temp_frame = swap_face(
205
+ modules.globals.simple_map["source_faces"][i],
206
+ detected_faces[closest_centroid_index],
207
+ temp_frame,
208
+ )
209
+ i += 1
210
+ return temp_frame
211
+
212
+
213
+ def process_frames(
214
+ source_path: str, temp_frame_paths: List[str], progress: Any = None
215
+ ) -> None:
216
+ if not modules.globals.map_faces:
217
+ source_face = get_one_face(cv2.imread(source_path))
218
+ for temp_frame_path in temp_frame_paths:
219
+ temp_frame = cv2.imread(temp_frame_path)
220
+ try:
221
+ result = process_frame(source_face, temp_frame)
222
+ cv2.imwrite(temp_frame_path, result)
223
+ except Exception as exception:
224
+ print(exception)
225
+ pass
226
+ if progress:
227
+ progress.update(1)
228
+ else:
229
+ for temp_frame_path in temp_frame_paths:
230
+ temp_frame = cv2.imread(temp_frame_path)
231
+ try:
232
+ result = process_frame_v2(temp_frame, temp_frame_path)
233
+ cv2.imwrite(temp_frame_path, result)
234
+ except Exception as exception:
235
+ print(exception)
236
+ pass
237
+ if progress:
238
+ progress.update(1)
239
+
240
+
241
+ def process_image(source_path: str, target_path: str, output_path: str) -> None:
242
+ if not modules.globals.map_faces:
243
+ source_face = get_one_face(cv2.imread(source_path))
244
+ target_frame = cv2.imread(target_path)
245
+ result = process_frame(source_face, target_frame)
246
+ cv2.imwrite(output_path, result)
247
+ else:
248
+ if modules.globals.many_faces:
249
+ update_status(
250
+ "Many faces enabled. Using first source image. Progressing...", NAME
251
+ )
252
+ target_frame = cv2.imread(output_path)
253
+ result = process_frame_v2(target_frame)
254
+ cv2.imwrite(output_path, result)
255
+
256
+
257
+ def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
258
+ if modules.globals.map_faces and modules.globals.many_faces:
259
+ update_status(
260
+ "Many faces enabled. Using first source image. Progressing...", NAME
261
+ )
262
+ modules.processors.frame.core.process_video(
263
+ source_path, temp_frame_paths, process_frames
264
+ )
265
+
266
+
267
+ def create_lower_mouth_mask(
268
+ face: Face, frame: Frame
269
+ ) -> (np.ndarray, np.ndarray, tuple, np.ndarray):
270
+ mask = np.zeros(frame.shape[:2], dtype=np.uint8)
271
+ mouth_cutout = None
272
+ landmarks = face.landmark_2d_106
273
+ if landmarks is not None:
274
+ # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
275
+ lower_lip_order = [
276
+ 65,
277
+ 66,
278
+ 62,
279
+ 70,
280
+ 69,
281
+ 18,
282
+ 19,
283
+ 20,
284
+ 21,
285
+ 22,
286
+ 23,
287
+ 24,
288
+ 0,
289
+ 8,
290
+ 7,
291
+ 6,
292
+ 5,
293
+ 4,
294
+ 3,
295
+ 2,
296
+ 65,
297
+ ]
298
+ lower_lip_landmarks = landmarks[lower_lip_order].astype(
299
+ np.float32
300
+ ) # Use float for precise calculations
301
+
302
+ # Calculate the center of the landmarks
303
+ center = np.mean(lower_lip_landmarks, axis=0)
304
+
305
+ # Expand the landmarks outward
306
+ expansion_factor = (
307
+ 1 + modules.globals.mask_down_size
308
+ ) # Adjust this for more or less expansion
309
+ expanded_landmarks = (lower_lip_landmarks - center) * expansion_factor + center
310
+
311
+ # Extend the top lip part
312
+ toplip_indices = [
313
+ 20,
314
+ 0,
315
+ 1,
316
+ 2,
317
+ 3,
318
+ 4,
319
+ 5,
320
+ ] # Indices for landmarks 2, 65, 66, 62, 70, 69, 18
321
+ toplip_extension = (
322
+ modules.globals.mask_size * 0.5
323
+ ) # Adjust this factor to control the extension
324
+ for idx in toplip_indices:
325
+ direction = expanded_landmarks[idx] - center
326
+ direction = direction / np.linalg.norm(direction)
327
+ expanded_landmarks[idx] += direction * toplip_extension
328
+
329
+ # Extend the bottom part (chin area)
330
+ chin_indices = [
331
+ 11,
332
+ 12,
333
+ 13,
334
+ 14,
335
+ 15,
336
+ 16,
337
+ ] # Indices for landmarks 21, 22, 23, 24, 0, 8
338
+ chin_extension = 2 * 0.2 # Adjust this factor to control the extension
339
+ for idx in chin_indices:
340
+ expanded_landmarks[idx][1] += (
341
+ expanded_landmarks[idx][1] - center[1]
342
+ ) * chin_extension
343
+
344
+ # Convert back to integer coordinates
345
+ expanded_landmarks = expanded_landmarks.astype(np.int32)
346
+
347
+ # Calculate bounding box for the expanded lower mouth
348
+ min_x, min_y = np.min(expanded_landmarks, axis=0)
349
+ max_x, max_y = np.max(expanded_landmarks, axis=0)
350
+
351
+ # Add some padding to the bounding box
352
+ padding = int((max_x - min_x) * 0.1) # 10% padding
353
+ min_x = max(0, min_x - padding)
354
+ min_y = max(0, min_y - padding)
355
+ max_x = min(frame.shape[1], max_x + padding)
356
+ max_y = min(frame.shape[0], max_y + padding)
357
+
358
+ # Ensure the bounding box dimensions are valid
359
+ if max_x <= min_x or max_y <= min_y:
360
+ if (max_x - min_x) <= 1:
361
+ max_x = min_x + 1
362
+ if (max_y - min_y) <= 1:
363
+ max_y = min_y + 1
364
+
365
+ # Create the mask
366
+ mask_roi = np.zeros((max_y - min_y, max_x - min_x), dtype=np.uint8)
367
+ cv2.fillPoly(mask_roi, [expanded_landmarks - [min_x, min_y]], 255)
368
+
369
+ # Apply Gaussian blur to soften the mask edges
370
+ mask_roi = cv2.GaussianBlur(mask_roi, (15, 15), 5)
371
+
372
+ # Place the mask ROI in the full-sized mask
373
+ mask[min_y:max_y, min_x:max_x] = mask_roi
374
+
375
+ # Extract the masked area from the frame
376
+ mouth_cutout = frame[min_y:max_y, min_x:max_x].copy()
377
+
378
+ # Return the expanded lower lip polygon in original frame coordinates
379
+ lower_lip_polygon = expanded_landmarks
380
+
381
+ return mask, mouth_cutout, (min_x, min_y, max_x, max_y), lower_lip_polygon
382
+
383
+
384
+ def draw_mouth_mask_visualization(
385
+ frame: Frame, face: Face, mouth_mask_data: tuple
386
+ ) -> Frame:
387
+ landmarks = face.landmark_2d_106
388
+ if landmarks is not None and mouth_mask_data is not None:
389
+ mask, mouth_cutout, (min_x, min_y, max_x, max_y), lower_lip_polygon = (
390
+ mouth_mask_data
391
+ )
392
+
393
+ vis_frame = frame.copy()
394
+
395
+ # Ensure coordinates are within frame bounds
396
+ height, width = vis_frame.shape[:2]
397
+ min_x, min_y = max(0, min_x), max(0, min_y)
398
+ max_x, max_y = min(width, max_x), min(height, max_y)
399
+
400
+ # Adjust mask to match the region size
401
+ mask_region = mask[0 : max_y - min_y, 0 : max_x - min_x]
402
+
403
+ # Remove the color mask overlay
404
+ # color_mask = cv2.applyColorMap((mask_region * 255).astype(np.uint8), cv2.COLORMAP_JET)
405
+
406
+ # Ensure shapes match before blending
407
+ vis_region = vis_frame[min_y:max_y, min_x:max_x]
408
+ # Remove blending with color_mask
409
+ # if vis_region.shape[:2] == color_mask.shape[:2]:
410
+ # blended = cv2.addWeighted(vis_region, 0.7, color_mask, 0.3, 0)
411
+ # vis_frame[min_y:max_y, min_x:max_x] = blended
412
+
413
+ # Draw the lower lip polygon
414
+ cv2.polylines(vis_frame, [lower_lip_polygon], True, (0, 255, 0), 2)
415
+
416
+ # Remove the red box
417
+ # cv2.rectangle(vis_frame, (min_x, min_y), (max_x, max_y), (0, 0, 255), 2)
418
+
419
+ # Visualize the feathered mask
420
+ feather_amount = max(
421
+ 1,
422
+ min(
423
+ 30,
424
+ (max_x - min_x) // modules.globals.mask_feather_ratio,
425
+ (max_y - min_y) // modules.globals.mask_feather_ratio,
426
+ ),
427
+ )
428
+ # Ensure kernel size is odd
429
+ kernel_size = 2 * feather_amount + 1
430
+ feathered_mask = cv2.GaussianBlur(
431
+ mask_region.astype(float), (kernel_size, kernel_size), 0
432
+ )
433
+ feathered_mask = (feathered_mask / feathered_mask.max() * 255).astype(np.uint8)
434
+ # Remove the feathered mask color overlay
435
+ # color_feathered_mask = cv2.applyColorMap(feathered_mask, cv2.COLORMAP_VIRIDIS)
436
+
437
+ # Ensure shapes match before blending feathered mask
438
+ # if vis_region.shape == color_feathered_mask.shape:
439
+ # blended_feathered = cv2.addWeighted(vis_region, 0.7, color_feathered_mask, 0.3, 0)
440
+ # vis_frame[min_y:max_y, min_x:max_x] = blended_feathered
441
+
442
+ # Add labels
443
+ cv2.putText(
444
+ vis_frame,
445
+ "Lower Mouth Mask",
446
+ (min_x, min_y - 10),
447
+ cv2.FONT_HERSHEY_SIMPLEX,
448
+ 0.5,
449
+ (255, 255, 255),
450
+ 1,
451
+ )
452
+ cv2.putText(
453
+ vis_frame,
454
+ "Feathered Mask",
455
+ (min_x, max_y + 20),
456
+ cv2.FONT_HERSHEY_SIMPLEX,
457
+ 0.5,
458
+ (255, 255, 255),
459
+ 1,
460
+ )
461
+
462
+ return vis_frame
463
+ return frame
464
+
465
+
466
+ def apply_mouth_area(
467
+ frame: np.ndarray,
468
+ mouth_cutout: np.ndarray,
469
+ mouth_box: tuple,
470
+ face_mask: np.ndarray,
471
+ mouth_polygon: np.ndarray,
472
+ ) -> np.ndarray:
473
+ min_x, min_y, max_x, max_y = mouth_box
474
+ box_width = max_x - min_x
475
+ box_height = max_y - min_y
476
+
477
+ if (
478
+ mouth_cutout is None
479
+ or box_width is None
480
+ or box_height is None
481
+ or face_mask is None
482
+ or mouth_polygon is None
483
+ ):
484
+ return frame
485
+
486
+ try:
487
+ resized_mouth_cutout = cv2.resize(mouth_cutout, (box_width, box_height))
488
+ roi = frame[min_y:max_y, min_x:max_x]
489
+
490
+ if roi.shape != resized_mouth_cutout.shape:
491
+ resized_mouth_cutout = cv2.resize(
492
+ resized_mouth_cutout, (roi.shape[1], roi.shape[0])
493
+ )
494
+
495
+ color_corrected_mouth = apply_color_transfer(resized_mouth_cutout, roi)
496
+
497
+ # Use the provided mouth polygon to create the mask
498
+ polygon_mask = np.zeros(roi.shape[:2], dtype=np.uint8)
499
+ adjusted_polygon = mouth_polygon - [min_x, min_y]
500
+ cv2.fillPoly(polygon_mask, [adjusted_polygon], 255)
501
+
502
+ # Apply feathering to the polygon mask
503
+ feather_amount = min(
504
+ 30,
505
+ box_width // modules.globals.mask_feather_ratio,
506
+ box_height // modules.globals.mask_feather_ratio,
507
+ )
508
+ feathered_mask = cv2.GaussianBlur(
509
+ polygon_mask.astype(float), (0, 0), feather_amount
510
+ )
511
+ feathered_mask = feathered_mask / feathered_mask.max()
512
+
513
+ face_mask_roi = face_mask[min_y:max_y, min_x:max_x]
514
+ combined_mask = feathered_mask * (face_mask_roi / 255.0)
515
+
516
+ combined_mask = combined_mask[:, :, np.newaxis]
517
+ blended = (
518
+ color_corrected_mouth * combined_mask + roi * (1 - combined_mask)
519
+ ).astype(np.uint8)
520
+
521
+ # Apply face mask to blended result
522
+ face_mask_3channel = (
523
+ np.repeat(face_mask_roi[:, :, np.newaxis], 3, axis=2) / 255.0
524
+ )
525
+ final_blend = blended * face_mask_3channel + roi * (1 - face_mask_3channel)
526
+
527
+ frame[min_y:max_y, min_x:max_x] = final_blend.astype(np.uint8)
528
+ except Exception as e:
529
+ pass
530
+
531
+ return frame
532
+
533
+
534
+ def create_face_mask(face: Face, frame: Frame) -> np.ndarray:
535
+ mask = np.zeros(frame.shape[:2], dtype=np.uint8)
536
+ landmarks = face.landmark_2d_106
537
+ if landmarks is not None:
538
+ # Convert landmarks to int32
539
+ landmarks = landmarks.astype(np.int32)
540
+
541
+ # Extract facial features
542
+ right_side_face = landmarks[0:16]
543
+ left_side_face = landmarks[17:32]
544
+ right_eye = landmarks[33:42]
545
+ right_eye_brow = landmarks[43:51]
546
+ left_eye = landmarks[87:96]
547
+ left_eye_brow = landmarks[97:105]
548
+
549
+ # Calculate forehead extension
550
+ right_eyebrow_top = np.min(right_eye_brow[:, 1])
551
+ left_eyebrow_top = np.min(left_eye_brow[:, 1])
552
+ eyebrow_top = min(right_eyebrow_top, left_eyebrow_top)
553
+
554
+ face_top = np.min([right_side_face[0, 1], left_side_face[-1, 1]])
555
+ forehead_height = face_top - eyebrow_top
556
+ extended_forehead_height = int(forehead_height * 5.0) # Extend by 50%
557
+
558
+ # Create forehead points
559
+ forehead_left = right_side_face[0].copy()
560
+ forehead_right = left_side_face[-1].copy()
561
+ forehead_left[1] -= extended_forehead_height
562
+ forehead_right[1] -= extended_forehead_height
563
+
564
+ # Combine all points to create the face outline
565
+ face_outline = np.vstack(
566
+ [
567
+ [forehead_left],
568
+ right_side_face,
569
+ left_side_face[
570
+ ::-1
571
+ ], # Reverse left side to create a continuous outline
572
+ [forehead_right],
573
+ ]
574
+ )
575
+
576
+ # Calculate padding
577
+ padding = int(
578
+ np.linalg.norm(right_side_face[0] - left_side_face[-1]) * 0.05
579
+ ) # 5% of face width
580
+
581
+ # Create a slightly larger convex hull for padding
582
+ hull = cv2.convexHull(face_outline)
583
+ hull_padded = []
584
+ for point in hull:
585
+ x, y = point[0]
586
+ center = np.mean(face_outline, axis=0)
587
+ direction = np.array([x, y]) - center
588
+ direction = direction / np.linalg.norm(direction)
589
+ padded_point = np.array([x, y]) + direction * padding
590
+ hull_padded.append(padded_point)
591
+
592
+ hull_padded = np.array(hull_padded, dtype=np.int32)
593
+
594
+ # Fill the padded convex hull
595
+ cv2.fillConvexPoly(mask, hull_padded, 255)
596
+
597
+ # Smooth the mask edges
598
+ mask = cv2.GaussianBlur(mask, (5, 5), 3)
599
+
600
+ return mask
601
+
602
+
603
+ def apply_color_transfer(source, target):
604
+ """
605
+ Apply color transfer from target to source image
606
+ """
607
+ source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
608
+ target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")
609
+
610
+ source_mean, source_std = cv2.meanStdDev(source)
611
+ target_mean, target_std = cv2.meanStdDev(target)
612
+
613
+ # Reshape mean and std to be broadcastable
614
+ source_mean = source_mean.reshape(1, 1, 3)
615
+ source_std = source_std.reshape(1, 1, 3)
616
+ target_mean = target_mean.reshape(1, 1, 3)
617
+ target_std = target_std.reshape(1, 1, 3)
618
+
619
+ # Perform the color transfer
620
+ source = (source - source_mean) * (target_std / source_std) + target_mean
621
+
622
+ return cv2.cvtColor(np.clip(source, 0, 255).astype("uint8"), cv2.COLOR_LAB2BGR)
modules/typing.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ from insightface.app.common import Face
4
+ import numpy
5
+
6
+ Face = Face
7
+ Frame = numpy.ndarray[Any, Any]
modules/ui.json ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "CTk": {
3
+ "fg_color": ["gray95", "gray10"]
4
+ },
5
+ "CTkToplevel": {
6
+ "fg_color": ["gray95", "gray10"]
7
+ },
8
+ "CTkFrame": {
9
+ "corner_radius": 0,
10
+ "border_width": 0,
11
+ "fg_color": ["gray90", "gray13"],
12
+ "top_fg_color": ["gray85", "gray16"],
13
+ "border_color": ["gray65", "gray28"]
14
+ },
15
+ "CTkButton": {
16
+ "corner_radius": 0,
17
+ "border_width": 0,
18
+ "fg_color": ["#2aa666", "#1f538d"],
19
+ "hover_color": ["#3cb666", "#14375e"],
20
+ "border_color": ["#3e4a40", "#949A9F"],
21
+ "text_color": ["#f3faf6", "#f3faf6"],
22
+ "text_color_disabled": ["gray74", "gray60"]
23
+ },
24
+ "CTkLabel": {
25
+ "corner_radius": 0,
26
+ "fg_color": "transparent",
27
+ "text_color": ["gray14", "gray84"]
28
+ },
29
+ "CTkEntry": {
30
+ "corner_radius": 0,
31
+ "border_width": 2,
32
+ "fg_color": ["#F9F9FA", "#343638"],
33
+ "border_color": ["#979DA2", "#565B5E"],
34
+ "text_color": ["gray14", "gray84"],
35
+ "placeholder_text_color": ["gray52", "gray62"]
36
+ },
37
+ "CTkCheckbox": {
38
+ "corner_radius": 0,
39
+ "border_width": 3,
40
+ "fg_color": ["#2aa666", "#1f538d"],
41
+ "border_color": ["#3e4a40", "#949A9F"],
42
+ "hover_color": ["#3cb666", "#14375e"],
43
+ "checkmark_color": ["#f3faf6", "gray90"],
44
+ "text_color": ["gray14", "gray84"],
45
+ "text_color_disabled": ["gray60", "gray45"]
46
+ },
47
+ "CTkSwitch": {
48
+ "corner_radius": 1000,
49
+ "border_width": 3,
50
+ "button_length": 0,
51
+ "fg_color": ["#939BA2", "#4A4D50"],
52
+ "progress_color": ["#2aa666", "#1f538d"],
53
+ "button_color": ["gray36", "#D5D9DE"],
54
+ "button_hover_color": ["gray20", "gray100"],
55
+ "text_color": ["gray14", "gray84"],
56
+ "text_color_disabled": ["gray60", "gray45"]
57
+ },
58
+ "CTkRadiobutton": {
59
+ "corner_radius": 1000,
60
+ "border_width_checked": 6,
61
+ "border_width_unchecked": 3,
62
+ "fg_color": ["#2aa666", "#1f538d"],
63
+ "border_color": ["#3e4a40", "#949A9F"],
64
+ "hover_color": ["#3cb666", "#14375e"],
65
+ "text_color": ["gray14", "gray84"],
66
+ "text_color_disabled": ["gray60", "gray45"]
67
+ },
68
+ "CTkProgressBar": {
69
+ "corner_radius": 1000,
70
+ "border_width": 0,
71
+ "fg_color": ["#939BA2", "#4A4D50"],
72
+ "progress_color": ["#2aa666", "#1f538d"],
73
+ "border_color": ["gray", "gray"]
74
+ },
75
+ "CTkSlider": {
76
+ "corner_radius": 1000,
77
+ "button_corner_radius": 1000,
78
+ "border_width": 6,
79
+ "button_length": 0,
80
+ "fg_color": ["#939BA2", "#4A4D50"],
81
+ "progress_color": ["gray40", "#AAB0B5"],
82
+ "button_color": ["#2aa666", "#1f538d"],
83
+ "button_hover_color": ["#3cb666", "#14375e"]
84
+ },
85
+ "CTkOptionMenu": {
86
+ "corner_radius": 0,
87
+ "fg_color": ["#2aa666", "#1f538d"],
88
+ "button_color": ["#3cb666", "#14375e"],
89
+ "button_hover_color": ["#234567", "#1e2c40"],
90
+ "text_color": ["#f3faf6", "#f3faf6"],
91
+ "text_color_disabled": ["gray74", "gray60"]
92
+ },
93
+ "CTkComboBox": {
94
+ "corner_radius": 0,
95
+ "border_width": 2,
96
+ "fg_color": ["#F9F9FA", "#343638"],
97
+ "border_color": ["#979DA2", "#565B5E"],
98
+ "button_color": ["#979DA2", "#565B5E"],
99
+ "button_hover_color": ["#6E7174", "#7A848D"],
100
+ "text_color": ["gray14", "gray84"],
101
+ "text_color_disabled": ["gray50", "gray45"]
102
+ },
103
+ "CTkScrollbar": {
104
+ "corner_radius": 1000,
105
+ "border_spacing": 4,
106
+ "fg_color": "transparent",
107
+ "button_color": ["gray55", "gray41"],
108
+ "button_hover_color": ["gray40", "gray53"]
109
+ },
110
+ "CTkSegmentedButton": {
111
+ "corner_radius": 0,
112
+ "border_width": 2,
113
+ "fg_color": ["#979DA2", "gray29"],
114
+ "selected_color": ["#2aa666", "#1f538d"],
115
+ "selected_hover_color": ["#3cb666", "#14375e"],
116
+ "unselected_color": ["#979DA2", "gray29"],
117
+ "unselected_hover_color": ["gray70", "gray41"],
118
+ "text_color": ["#f3faf6", "#f3faf6"],
119
+ "text_color_disabled": ["gray74", "gray60"]
120
+ },
121
+ "CTkTextbox": {
122
+ "corner_radius": 0,
123
+ "border_width": 0,
124
+ "fg_color": ["gray100", "gray20"],
125
+ "border_color": ["#979DA2", "#565B5E"],
126
+ "text_color": ["gray14", "gray84"],
127
+ "scrollbar_button_color": ["gray55", "gray41"],
128
+ "scrollbar_button_hover_color": ["gray40", "gray53"]
129
+ },
130
+ "CTkScrollableFrame": {
131
+ "label_fg_color": ["gray80", "gray21"]
132
+ },
133
+ "DropdownMenu": {
134
+ "fg_color": ["gray90", "gray20"],
135
+ "hover_color": ["gray75", "gray28"],
136
+ "text_color": ["gray14", "gray84"]
137
+ },
138
+ "CTkFont": {
139
+ "macOS": {
140
+ "family": "Avenir",
141
+ "size": 18,
142
+ "weight": "normal"
143
+ },
144
+ "Windows": {
145
+ "family": "Corbel",
146
+ "size": 18,
147
+ "weight": "normal"
148
+ },
149
+ "Linux": {
150
+ "family": "Montserrat",
151
+ "size": 18,
152
+ "weight": "normal"
153
+ }
154
+ },
155
+ "URL": {
156
+ "text_color": ["gray74", "gray60"]
157
+ }
158
+ }
modules/ui.py ADDED
@@ -0,0 +1,1206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import webbrowser
3
+ import customtkinter as ctk
4
+ from typing import Callable, Tuple
5
+ import cv2
6
+ from cv2_enumerate_cameras import enumerate_cameras # Add this import
7
+ from PIL import Image, ImageOps
8
+ import time
9
+ import json
10
+ import modules.globals
11
+ import modules.metadata
12
+ from modules.face_analyser import (
13
+ get_one_face,
14
+ get_unique_faces_from_target_image,
15
+ get_unique_faces_from_target_video,
16
+ add_blank_map,
17
+ has_valid_map,
18
+ simplify_maps,
19
+ )
20
+ from modules.capturer import get_video_frame, get_video_frame_total
21
+ from modules.processors.frame.core import get_frame_processors_modules
22
+ from modules.utilities import (
23
+ is_image,
24
+ is_video,
25
+ resolve_relative_path,
26
+ has_image_extension,
27
+ )
28
+ from modules.video_capture import VideoCapturer
29
+ from modules.gettext import LanguageManager
30
+ import platform
31
+
32
+ if platform.system() == "Windows":
33
+ from pygrabber.dshow_graph import FilterGraph
34
+
35
+ ROOT = None
36
+ POPUP = None
37
+ POPUP_LIVE = None
38
+ ROOT_HEIGHT = 700
39
+ ROOT_WIDTH = 600
40
+
41
+ PREVIEW = None
42
+ PREVIEW_MAX_HEIGHT = 700
43
+ PREVIEW_MAX_WIDTH = 1200
44
+ PREVIEW_DEFAULT_WIDTH = 960
45
+ PREVIEW_DEFAULT_HEIGHT = 540
46
+
47
+ POPUP_WIDTH = 750
48
+ POPUP_HEIGHT = 810
49
+ POPUP_SCROLL_WIDTH = (740,)
50
+ POPUP_SCROLL_HEIGHT = 700
51
+
52
+ POPUP_LIVE_WIDTH = 900
53
+ POPUP_LIVE_HEIGHT = 820
54
+ POPUP_LIVE_SCROLL_WIDTH = (890,)
55
+ POPUP_LIVE_SCROLL_HEIGHT = 700
56
+
57
+ MAPPER_PREVIEW_MAX_HEIGHT = 100
58
+ MAPPER_PREVIEW_MAX_WIDTH = 100
59
+
60
+ DEFAULT_BUTTON_WIDTH = 200
61
+ DEFAULT_BUTTON_HEIGHT = 40
62
+
63
+ RECENT_DIRECTORY_SOURCE = None
64
+ RECENT_DIRECTORY_TARGET = None
65
+ RECENT_DIRECTORY_OUTPUT = None
66
+
67
+ _ = None
68
+ preview_label = None
69
+ preview_slider = None
70
+ source_label = None
71
+ target_label = None
72
+ status_label = None
73
+ popup_status_label = None
74
+ popup_status_label_live = None
75
+ source_label_dict = {}
76
+ source_label_dict_live = {}
77
+ target_label_dict_live = {}
78
+
79
+ img_ft, vid_ft = modules.globals.file_types
80
+
81
+
82
+ def init(start: Callable[[], None], destroy: Callable[[], None], lang: str) -> ctk.CTk:
83
+ global ROOT, PREVIEW, _
84
+
85
+ lang_manager = LanguageManager(lang)
86
+ _ = lang_manager._
87
+ ROOT = create_root(start, destroy)
88
+ PREVIEW = create_preview(ROOT)
89
+
90
+ return ROOT
91
+
92
+
93
+ def save_switch_states():
94
+ switch_states = {
95
+ "keep_fps": modules.globals.keep_fps,
96
+ "keep_audio": modules.globals.keep_audio,
97
+ "keep_frames": modules.globals.keep_frames,
98
+ "many_faces": modules.globals.many_faces,
99
+ "map_faces": modules.globals.map_faces,
100
+ "color_correction": modules.globals.color_correction,
101
+ "nsfw_filter": modules.globals.nsfw_filter,
102
+ "live_mirror": modules.globals.live_mirror,
103
+ "live_resizable": modules.globals.live_resizable,
104
+ "fp_ui": modules.globals.fp_ui,
105
+ "show_fps": modules.globals.show_fps,
106
+ "mouth_mask": modules.globals.mouth_mask,
107
+ "show_mouth_mask_box": modules.globals.show_mouth_mask_box,
108
+ }
109
+ with open("switch_states.json", "w") as f:
110
+ json.dump(switch_states, f)
111
+
112
+
113
+ def load_switch_states():
114
+ try:
115
+ with open("switch_states.json", "r") as f:
116
+ switch_states = json.load(f)
117
+ modules.globals.keep_fps = switch_states.get("keep_fps", True)
118
+ modules.globals.keep_audio = switch_states.get("keep_audio", True)
119
+ modules.globals.keep_frames = switch_states.get("keep_frames", False)
120
+ modules.globals.many_faces = switch_states.get("many_faces", False)
121
+ modules.globals.map_faces = switch_states.get("map_faces", False)
122
+ modules.globals.color_correction = switch_states.get("color_correction", False)
123
+ modules.globals.nsfw_filter = switch_states.get("nsfw_filter", False)
124
+ modules.globals.live_mirror = switch_states.get("live_mirror", False)
125
+ modules.globals.live_resizable = switch_states.get("live_resizable", False)
126
+ modules.globals.fp_ui = switch_states.get("fp_ui", {"face_enhancer": False})
127
+ modules.globals.show_fps = switch_states.get("show_fps", False)
128
+ modules.globals.mouth_mask = switch_states.get("mouth_mask", False)
129
+ modules.globals.show_mouth_mask_box = switch_states.get(
130
+ "show_mouth_mask_box", False
131
+ )
132
+ except FileNotFoundError:
133
+ # If the file doesn't exist, use default values
134
+ pass
135
+
136
+
137
+ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
138
+ global source_label, target_label, status_label, show_fps_switch
139
+
140
+ load_switch_states()
141
+
142
+ ctk.deactivate_automatic_dpi_awareness()
143
+ ctk.set_appearance_mode("system")
144
+ ctk.set_default_color_theme(resolve_relative_path("ui.json"))
145
+
146
+ root = ctk.CTk()
147
+ root.minsize(ROOT_WIDTH, ROOT_HEIGHT)
148
+ root.title(
149
+ f"{modules.metadata.name} {modules.metadata.version} {modules.metadata.edition}"
150
+ )
151
+ root.configure()
152
+ root.protocol("WM_DELETE_WINDOW", lambda: destroy())
153
+
154
+ source_label = ctk.CTkLabel(root, text=None)
155
+ source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25)
156
+
157
+ target_label = ctk.CTkLabel(root, text=None)
158
+ target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25)
159
+
160
+ select_face_button = ctk.CTkButton(
161
+ root, text=_("Select a face"), cursor="hand2", command=lambda: select_source_path()
162
+ )
163
+ select_face_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
164
+
165
+ swap_faces_button = ctk.CTkButton(
166
+ root, text="↔", cursor="hand2", command=lambda: swap_faces_paths()
167
+ )
168
+ swap_faces_button.place(relx=0.45, rely=0.4, relwidth=0.1, relheight=0.1)
169
+
170
+ select_target_button = ctk.CTkButton(
171
+ root,
172
+ text=_("Select a target"),
173
+ cursor="hand2",
174
+ command=lambda: select_target_path(),
175
+ )
176
+ select_target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
177
+
178
+ keep_fps_value = ctk.BooleanVar(value=modules.globals.keep_fps)
179
+ keep_fps_checkbox = ctk.CTkSwitch(
180
+ root,
181
+ text=_("Keep fps"),
182
+ variable=keep_fps_value,
183
+ cursor="hand2",
184
+ command=lambda: (
185
+ setattr(modules.globals, "keep_fps", keep_fps_value.get()),
186
+ save_switch_states(),
187
+ ),
188
+ )
189
+ keep_fps_checkbox.place(relx=0.1, rely=0.6)
190
+
191
+ keep_frames_value = ctk.BooleanVar(value=modules.globals.keep_frames)
192
+ keep_frames_switch = ctk.CTkSwitch(
193
+ root,
194
+ text=_("Keep frames"),
195
+ variable=keep_frames_value,
196
+ cursor="hand2",
197
+ command=lambda: (
198
+ setattr(modules.globals, "keep_frames", keep_frames_value.get()),
199
+ save_switch_states(),
200
+ ),
201
+ )
202
+ keep_frames_switch.place(relx=0.1, rely=0.65)
203
+
204
+ enhancer_value = ctk.BooleanVar(value=modules.globals.fp_ui["face_enhancer"])
205
+ enhancer_switch = ctk.CTkSwitch(
206
+ root,
207
+ text=_("Face Enhancer"),
208
+ variable=enhancer_value,
209
+ cursor="hand2",
210
+ command=lambda: (
211
+ update_tumbler("face_enhancer", enhancer_value.get()),
212
+ save_switch_states(),
213
+ ),
214
+ )
215
+ enhancer_switch.place(relx=0.1, rely=0.7)
216
+
217
+ keep_audio_value = ctk.BooleanVar(value=modules.globals.keep_audio)
218
+ keep_audio_switch = ctk.CTkSwitch(
219
+ root,
220
+ text=_("Keep audio"),
221
+ variable=keep_audio_value,
222
+ cursor="hand2",
223
+ command=lambda: (
224
+ setattr(modules.globals, "keep_audio", keep_audio_value.get()),
225
+ save_switch_states(),
226
+ ),
227
+ )
228
+ keep_audio_switch.place(relx=0.6, rely=0.6)
229
+
230
+ many_faces_value = ctk.BooleanVar(value=modules.globals.many_faces)
231
+ many_faces_switch = ctk.CTkSwitch(
232
+ root,
233
+ text=_("Many faces"),
234
+ variable=many_faces_value,
235
+ cursor="hand2",
236
+ command=lambda: (
237
+ setattr(modules.globals, "many_faces", many_faces_value.get()),
238
+ save_switch_states(),
239
+ ),
240
+ )
241
+ many_faces_switch.place(relx=0.6, rely=0.65)
242
+
243
+ color_correction_value = ctk.BooleanVar(value=modules.globals.color_correction)
244
+ color_correction_switch = ctk.CTkSwitch(
245
+ root,
246
+ text=_("Fix Blueish Cam"),
247
+ variable=color_correction_value,
248
+ cursor="hand2",
249
+ command=lambda: (
250
+ setattr(modules.globals, "color_correction", color_correction_value.get()),
251
+ save_switch_states(),
252
+ ),
253
+ )
254
+ color_correction_switch.place(relx=0.6, rely=0.70)
255
+
256
+ # nsfw_value = ctk.BooleanVar(value=modules.globals.nsfw_filter)
257
+ # nsfw_switch = ctk.CTkSwitch(root, text='NSFW filter', variable=nsfw_value, cursor='hand2', command=lambda: setattr(modules.globals, 'nsfw_filter', nsfw_value.get()))
258
+ # nsfw_switch.place(relx=0.6, rely=0.7)
259
+
260
+ map_faces = ctk.BooleanVar(value=modules.globals.map_faces)
261
+ map_faces_switch = ctk.CTkSwitch(
262
+ root,
263
+ text=_("Map faces"),
264
+ variable=map_faces,
265
+ cursor="hand2",
266
+ command=lambda: (
267
+ setattr(modules.globals, "map_faces", map_faces.get()),
268
+ save_switch_states(),
269
+ close_mapper_window() if not map_faces.get() else None
270
+ ),
271
+ )
272
+ map_faces_switch.place(relx=0.1, rely=0.75)
273
+
274
+ show_fps_value = ctk.BooleanVar(value=modules.globals.show_fps)
275
+ show_fps_switch = ctk.CTkSwitch(
276
+ root,
277
+ text=_("Show FPS"),
278
+ variable=show_fps_value,
279
+ cursor="hand2",
280
+ command=lambda: (
281
+ setattr(modules.globals, "show_fps", show_fps_value.get()),
282
+ save_switch_states(),
283
+ ),
284
+ )
285
+ show_fps_switch.place(relx=0.6, rely=0.75)
286
+
287
+ mouth_mask_var = ctk.BooleanVar(value=modules.globals.mouth_mask)
288
+ mouth_mask_switch = ctk.CTkSwitch(
289
+ root,
290
+ text=_("Mouth Mask"),
291
+ variable=mouth_mask_var,
292
+ cursor="hand2",
293
+ command=lambda: setattr(modules.globals, "mouth_mask", mouth_mask_var.get()),
294
+ )
295
+ mouth_mask_switch.place(relx=0.1, rely=0.55)
296
+
297
+ show_mouth_mask_box_var = ctk.BooleanVar(value=modules.globals.show_mouth_mask_box)
298
+ show_mouth_mask_box_switch = ctk.CTkSwitch(
299
+ root,
300
+ text=_("Show Mouth Mask Box"),
301
+ variable=show_mouth_mask_box_var,
302
+ cursor="hand2",
303
+ command=lambda: setattr(
304
+ modules.globals, "show_mouth_mask_box", show_mouth_mask_box_var.get()
305
+ ),
306
+ )
307
+ show_mouth_mask_box_switch.place(relx=0.6, rely=0.55)
308
+
309
+ start_button = ctk.CTkButton(
310
+ root, text=_("Start"), cursor="hand2", command=lambda: analyze_target(start, root)
311
+ )
312
+ start_button.place(relx=0.15, rely=0.80, relwidth=0.2, relheight=0.05)
313
+
314
+ stop_button = ctk.CTkButton(
315
+ root, text=_("Destroy"), cursor="hand2", command=lambda: destroy()
316
+ )
317
+ stop_button.place(relx=0.4, rely=0.80, relwidth=0.2, relheight=0.05)
318
+
319
+ preview_button = ctk.CTkButton(
320
+ root, text=_("Preview"), cursor="hand2", command=lambda: toggle_preview()
321
+ )
322
+ preview_button.place(relx=0.65, rely=0.80, relwidth=0.2, relheight=0.05)
323
+
324
+ # --- Camera Selection ---
325
+ camera_label = ctk.CTkLabel(root, text=_("Select Camera:"))
326
+ camera_label.place(relx=0.1, rely=0.86, relwidth=0.2, relheight=0.05)
327
+
328
+ available_cameras = get_available_cameras()
329
+ camera_indices, camera_names = available_cameras
330
+
331
+ if not camera_names or camera_names[0] == "No cameras found":
332
+ camera_variable = ctk.StringVar(value="No cameras found")
333
+ camera_optionmenu = ctk.CTkOptionMenu(
334
+ root,
335
+ variable=camera_variable,
336
+ values=["No cameras found"],
337
+ state="disabled",
338
+ )
339
+ else:
340
+ camera_variable = ctk.StringVar(value=camera_names[0])
341
+ camera_optionmenu = ctk.CTkOptionMenu(
342
+ root, variable=camera_variable, values=camera_names
343
+ )
344
+
345
+ camera_optionmenu.place(relx=0.35, rely=0.86, relwidth=0.25, relheight=0.05)
346
+
347
+ live_button = ctk.CTkButton(
348
+ root,
349
+ text=_("Live"),
350
+ cursor="hand2",
351
+ command=lambda: webcam_preview(
352
+ root,
353
+ (
354
+ camera_indices[camera_names.index(camera_variable.get())]
355
+ if camera_names and camera_names[0] != "No cameras found"
356
+ else None
357
+ ),
358
+ ),
359
+ state=(
360
+ "normal"
361
+ if camera_names and camera_names[0] != "No cameras found"
362
+ else "disabled"
363
+ ),
364
+ )
365
+ live_button.place(relx=0.65, rely=0.86, relwidth=0.2, relheight=0.05)
366
+ # --- End Camera Selection ---
367
+
368
+ status_label = ctk.CTkLabel(root, text=None, justify="center")
369
+ status_label.place(relx=0.1, rely=0.9, relwidth=0.8)
370
+
371
+ donate_label = ctk.CTkLabel(
372
+ root, text="Deep Live Cam", justify="center", cursor="hand2"
373
+ )
374
+ donate_label.place(relx=0.1, rely=0.95, relwidth=0.8)
375
+ donate_label.configure(
376
+ text_color=ctk.ThemeManager.theme.get("URL").get("text_color")
377
+ )
378
+ donate_label.bind(
379
+ "<Button>", lambda event: webbrowser.open("https://deeplivecam.net")
380
+ )
381
+
382
+ return root
383
+
384
+ def close_mapper_window():
385
+ global POPUP, POPUP_LIVE
386
+ if POPUP and POPUP.winfo_exists():
387
+ POPUP.destroy()
388
+ POPUP = None
389
+ if POPUP_LIVE and POPUP_LIVE.winfo_exists():
390
+ POPUP_LIVE.destroy()
391
+ POPUP_LIVE = None
392
+
393
+
394
+ def analyze_target(start: Callable[[], None], root: ctk.CTk):
395
+ if POPUP != None and POPUP.winfo_exists():
396
+ update_status("Please complete pop-up or close it.")
397
+ return
398
+
399
+ if modules.globals.map_faces:
400
+ modules.globals.source_target_map = []
401
+
402
+ if is_image(modules.globals.target_path):
403
+ update_status("Getting unique faces")
404
+ get_unique_faces_from_target_image()
405
+ elif is_video(modules.globals.target_path):
406
+ update_status("Getting unique faces")
407
+ get_unique_faces_from_target_video()
408
+
409
+ if len(modules.globals.source_target_map) > 0:
410
+ create_source_target_popup(start, root, modules.globals.source_target_map)
411
+ else:
412
+ update_status("No faces found in target")
413
+ else:
414
+ select_output_path(start)
415
+
416
+
417
+ def create_source_target_popup(
418
+ start: Callable[[], None], root: ctk.CTk, map: list
419
+ ) -> None:
420
+ global POPUP, popup_status_label
421
+
422
+ POPUP = ctk.CTkToplevel(root)
423
+ POPUP.title(_("Source x Target Mapper"))
424
+ POPUP.geometry(f"{POPUP_WIDTH}x{POPUP_HEIGHT}")
425
+ POPUP.focus()
426
+
427
+ def on_submit_click(start):
428
+ if has_valid_map():
429
+ POPUP.destroy()
430
+ select_output_path(start)
431
+ else:
432
+ update_pop_status("Atleast 1 source with target is required!")
433
+
434
+ scrollable_frame = ctk.CTkScrollableFrame(
435
+ POPUP, width=POPUP_SCROLL_WIDTH, height=POPUP_SCROLL_HEIGHT
436
+ )
437
+ scrollable_frame.grid(row=0, column=0, padx=0, pady=0, sticky="nsew")
438
+
439
+ def on_button_click(map, button_num):
440
+ map = update_popup_source(scrollable_frame, map, button_num)
441
+
442
+ for item in map:
443
+ id = item["id"]
444
+
445
+ button = ctk.CTkButton(
446
+ scrollable_frame,
447
+ text=_("Select source image"),
448
+ command=lambda id=id: on_button_click(map, id),
449
+ width=DEFAULT_BUTTON_WIDTH,
450
+ height=DEFAULT_BUTTON_HEIGHT,
451
+ )
452
+ button.grid(row=id, column=0, padx=50, pady=10)
453
+
454
+ x_label = ctk.CTkLabel(
455
+ scrollable_frame,
456
+ text=f"X",
457
+ width=MAPPER_PREVIEW_MAX_WIDTH,
458
+ height=MAPPER_PREVIEW_MAX_HEIGHT,
459
+ )
460
+ x_label.grid(row=id, column=2, padx=10, pady=10)
461
+
462
+ image = Image.fromarray(cv2.cvtColor(item["target"]["cv2"], cv2.COLOR_BGR2RGB))
463
+ image = image.resize(
464
+ (MAPPER_PREVIEW_MAX_WIDTH, MAPPER_PREVIEW_MAX_HEIGHT), Image.LANCZOS
465
+ )
466
+ tk_image = ctk.CTkImage(image, size=image.size)
467
+
468
+ target_image = ctk.CTkLabel(
469
+ scrollable_frame,
470
+ text=f"T-{id}",
471
+ width=MAPPER_PREVIEW_MAX_WIDTH,
472
+ height=MAPPER_PREVIEW_MAX_HEIGHT,
473
+ )
474
+ target_image.grid(row=id, column=3, padx=10, pady=10)
475
+ target_image.configure(image=tk_image)
476
+
477
+ popup_status_label = ctk.CTkLabel(POPUP, text=None, justify="center")
478
+ popup_status_label.grid(row=1, column=0, pady=15)
479
+
480
+ close_button = ctk.CTkButton(
481
+ POPUP, text=_("Submit"), command=lambda: on_submit_click(start)
482
+ )
483
+ close_button.grid(row=2, column=0, pady=10)
484
+
485
+
486
+ def update_popup_source(
487
+ scrollable_frame: ctk.CTkScrollableFrame, map: list, button_num: int
488
+ ) -> list:
489
+ global source_label_dict
490
+
491
+ source_path = ctk.filedialog.askopenfilename(
492
+ title=_("select an source image"),
493
+ initialdir=RECENT_DIRECTORY_SOURCE,
494
+ filetypes=[img_ft],
495
+ )
496
+
497
+ if "source" in map[button_num]:
498
+ map[button_num].pop("source")
499
+ source_label_dict[button_num].destroy()
500
+ del source_label_dict[button_num]
501
+
502
+ if source_path == "":
503
+ return map
504
+ else:
505
+ cv2_img = cv2.imread(source_path)
506
+ face = get_one_face(cv2_img)
507
+
508
+ if face:
509
+ x_min, y_min, x_max, y_max = face["bbox"]
510
+
511
+ map[button_num]["source"] = {
512
+ "cv2": cv2_img[int(y_min): int(y_max), int(x_min): int(x_max)],
513
+ "face": face,
514
+ }
515
+
516
+ image = Image.fromarray(
517
+ cv2.cvtColor(map[button_num]["source"]["cv2"], cv2.COLOR_BGR2RGB)
518
+ )
519
+ image = image.resize(
520
+ (MAPPER_PREVIEW_MAX_WIDTH, MAPPER_PREVIEW_MAX_HEIGHT), Image.LANCZOS
521
+ )
522
+ tk_image = ctk.CTkImage(image, size=image.size)
523
+
524
+ source_image = ctk.CTkLabel(
525
+ scrollable_frame,
526
+ text=f"S-{button_num}",
527
+ width=MAPPER_PREVIEW_MAX_WIDTH,
528
+ height=MAPPER_PREVIEW_MAX_HEIGHT,
529
+ )
530
+ source_image.grid(row=button_num, column=1, padx=10, pady=10)
531
+ source_image.configure(image=tk_image)
532
+ source_label_dict[button_num] = source_image
533
+ else:
534
+ update_pop_status("Face could not be detected in last upload!")
535
+ return map
536
+
537
+
538
+ def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel:
539
+ global preview_label, preview_slider
540
+
541
+ preview = ctk.CTkToplevel(parent)
542
+ preview.withdraw()
543
+ preview.title(_("Preview"))
544
+ preview.configure()
545
+ preview.protocol("WM_DELETE_WINDOW", lambda: toggle_preview())
546
+ preview.resizable(width=True, height=True)
547
+
548
+ preview_label = ctk.CTkLabel(preview, text=None)
549
+ preview_label.pack(fill="both", expand=True)
550
+
551
+ preview_slider = ctk.CTkSlider(
552
+ preview, from_=0, to=0, command=lambda frame_value: update_preview(frame_value)
553
+ )
554
+
555
+ return preview
556
+
557
+
558
+ def update_status(text: str) -> None:
559
+ status_label.configure(text=_(text))
560
+ ROOT.update()
561
+
562
+
563
+ def update_pop_status(text: str) -> None:
564
+ popup_status_label.configure(text=_(text))
565
+
566
+
567
+ def update_pop_live_status(text: str) -> None:
568
+ popup_status_label_live.configure(text=_(text))
569
+
570
+
571
+ def update_tumbler(var: str, value: bool) -> None:
572
+ modules.globals.fp_ui[var] = value
573
+ save_switch_states()
574
+ # If we're currently in a live preview, update the frame processors
575
+ if PREVIEW.state() == "normal":
576
+ global frame_processors
577
+ frame_processors = get_frame_processors_modules(
578
+ modules.globals.frame_processors
579
+ )
580
+
581
+
582
+ def select_source_path() -> None:
583
+ global RECENT_DIRECTORY_SOURCE, img_ft, vid_ft
584
+
585
+ PREVIEW.withdraw()
586
+ source_path = ctk.filedialog.askopenfilename(
587
+ title=_("select an source image"),
588
+ initialdir=RECENT_DIRECTORY_SOURCE,
589
+ filetypes=[img_ft],
590
+ )
591
+ if is_image(source_path):
592
+ modules.globals.source_path = source_path
593
+ RECENT_DIRECTORY_SOURCE = os.path.dirname(modules.globals.source_path)
594
+ image = render_image_preview(modules.globals.source_path, (200, 200))
595
+ source_label.configure(image=image)
596
+ else:
597
+ modules.globals.source_path = None
598
+ source_label.configure(image=None)
599
+
600
+
601
+ def swap_faces_paths() -> None:
602
+ global RECENT_DIRECTORY_SOURCE, RECENT_DIRECTORY_TARGET
603
+
604
+ source_path = modules.globals.source_path
605
+ target_path = modules.globals.target_path
606
+
607
+ if not is_image(source_path) or not is_image(target_path):
608
+ return
609
+
610
+ modules.globals.source_path = target_path
611
+ modules.globals.target_path = source_path
612
+
613
+ RECENT_DIRECTORY_SOURCE = os.path.dirname(modules.globals.source_path)
614
+ RECENT_DIRECTORY_TARGET = os.path.dirname(modules.globals.target_path)
615
+
616
+ PREVIEW.withdraw()
617
+
618
+ source_image = render_image_preview(modules.globals.source_path, (200, 200))
619
+ source_label.configure(image=source_image)
620
+
621
+ target_image = render_image_preview(modules.globals.target_path, (200, 200))
622
+ target_label.configure(image=target_image)
623
+
624
+
625
+ def select_target_path() -> None:
626
+ global RECENT_DIRECTORY_TARGET, img_ft, vid_ft
627
+
628
+ PREVIEW.withdraw()
629
+ target_path = ctk.filedialog.askopenfilename(
630
+ title=_("select an target image or video"),
631
+ initialdir=RECENT_DIRECTORY_TARGET,
632
+ filetypes=[img_ft, vid_ft],
633
+ )
634
+ if is_image(target_path):
635
+ modules.globals.target_path = target_path
636
+ RECENT_DIRECTORY_TARGET = os.path.dirname(modules.globals.target_path)
637
+ image = render_image_preview(modules.globals.target_path, (200, 200))
638
+ target_label.configure(image=image)
639
+ elif is_video(target_path):
640
+ modules.globals.target_path = target_path
641
+ RECENT_DIRECTORY_TARGET = os.path.dirname(modules.globals.target_path)
642
+ video_frame = render_video_preview(target_path, (200, 200))
643
+ target_label.configure(image=video_frame)
644
+ else:
645
+ modules.globals.target_path = None
646
+ target_label.configure(image=None)
647
+
648
+
649
+ def select_output_path(start: Callable[[], None]) -> None:
650
+ global RECENT_DIRECTORY_OUTPUT, img_ft, vid_ft
651
+
652
+ if is_image(modules.globals.target_path):
653
+ output_path = ctk.filedialog.asksaveasfilename(
654
+ title=_("save image output file"),
655
+ filetypes=[img_ft],
656
+ defaultextension=".png",
657
+ initialfile="output.png",
658
+ initialdir=RECENT_DIRECTORY_OUTPUT,
659
+ )
660
+ elif is_video(modules.globals.target_path):
661
+ output_path = ctk.filedialog.asksaveasfilename(
662
+ title=_("save video output file"),
663
+ filetypes=[vid_ft],
664
+ defaultextension=".mp4",
665
+ initialfile="output.mp4",
666
+ initialdir=RECENT_DIRECTORY_OUTPUT,
667
+ )
668
+ else:
669
+ output_path = None
670
+ if output_path:
671
+ modules.globals.output_path = output_path
672
+ RECENT_DIRECTORY_OUTPUT = os.path.dirname(modules.globals.output_path)
673
+ start()
674
+
675
+
676
+ def check_and_ignore_nsfw(target, destroy: Callable = None) -> bool:
677
+ """Check if the target is NSFW.
678
+ TODO: Consider to make blur the target.
679
+ """
680
+ from numpy import ndarray
681
+ from modules.predicter import predict_image, predict_video, predict_frame
682
+
683
+ if type(target) is str: # image/video file path
684
+ check_nsfw = predict_image if has_image_extension(target) else predict_video
685
+ elif type(target) is ndarray: # frame object
686
+ check_nsfw = predict_frame
687
+ if check_nsfw and check_nsfw(target):
688
+ if destroy:
689
+ destroy(
690
+ to_quit=False
691
+ ) # Do not need to destroy the window frame if the target is NSFW
692
+ update_status("Processing ignored!")
693
+ return True
694
+ else:
695
+ return False
696
+
697
+
698
+ def fit_image_to_size(image, width: int, height: int):
699
+ if width is None or height is None or width <= 0 or height <= 0:
700
+ return image
701
+ h, w, _ = image.shape
702
+ ratio_h = 0.0
703
+ ratio_w = 0.0
704
+ ratio_w = width / w
705
+ ratio_h = height / h
706
+ # Use the smaller ratio to ensure the image fits within the given dimensions
707
+ ratio = min(ratio_w, ratio_h)
708
+
709
+ # Compute new dimensions, ensuring they're at least 1 pixel
710
+ new_width = max(1, int(ratio * w))
711
+ new_height = max(1, int(ratio * h))
712
+ new_size = (new_width, new_height)
713
+
714
+ return cv2.resize(image, dsize=new_size)
715
+
716
+
717
+ def render_image_preview(image_path: str, size: Tuple[int, int]) -> ctk.CTkImage:
718
+ image = Image.open(image_path)
719
+ if size:
720
+ image = ImageOps.fit(image, size, Image.LANCZOS)
721
+ return ctk.CTkImage(image, size=image.size)
722
+
723
+
724
+ def render_video_preview(
725
+ video_path: str, size: Tuple[int, int], frame_number: int = 0
726
+ ) -> ctk.CTkImage:
727
+ capture = cv2.VideoCapture(video_path)
728
+ if frame_number:
729
+ capture.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
730
+ has_frame, frame = capture.read()
731
+ if has_frame:
732
+ image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
733
+ if size:
734
+ image = ImageOps.fit(image, size, Image.LANCZOS)
735
+ return ctk.CTkImage(image, size=image.size)
736
+ capture.release()
737
+ cv2.destroyAllWindows()
738
+
739
+
740
+ def toggle_preview() -> None:
741
+ if PREVIEW.state() == "normal":
742
+ PREVIEW.withdraw()
743
+ elif modules.globals.source_path and modules.globals.target_path:
744
+ init_preview()
745
+ update_preview()
746
+
747
+
748
+ def init_preview() -> None:
749
+ if is_image(modules.globals.target_path):
750
+ preview_slider.pack_forget()
751
+ if is_video(modules.globals.target_path):
752
+ video_frame_total = get_video_frame_total(modules.globals.target_path)
753
+ preview_slider.configure(to=video_frame_total)
754
+ preview_slider.pack(fill="x")
755
+ preview_slider.set(0)
756
+
757
+
758
+ def update_preview(frame_number: int = 0) -> None:
759
+ if modules.globals.source_path and modules.globals.target_path:
760
+ update_status("Processing...")
761
+ temp_frame = get_video_frame(modules.globals.target_path, frame_number)
762
+ if modules.globals.nsfw_filter and check_and_ignore_nsfw(temp_frame):
763
+ return
764
+ for frame_processor in get_frame_processors_modules(
765
+ modules.globals.frame_processors
766
+ ):
767
+ temp_frame = frame_processor.process_frame(
768
+ get_one_face(cv2.imread(modules.globals.source_path)), temp_frame
769
+ )
770
+ image = Image.fromarray(cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB))
771
+ image = ImageOps.contain(
772
+ image, (PREVIEW_MAX_WIDTH, PREVIEW_MAX_HEIGHT), Image.LANCZOS
773
+ )
774
+ image = ctk.CTkImage(image, size=image.size)
775
+ preview_label.configure(image=image)
776
+ update_status("Processing succeed!")
777
+ PREVIEW.deiconify()
778
+
779
+
780
+ def webcam_preview(root: ctk.CTk, camera_index: int):
781
+ global POPUP_LIVE
782
+
783
+ if POPUP_LIVE and POPUP_LIVE.winfo_exists():
784
+ update_status("Source x Target Mapper is already open.")
785
+ POPUP_LIVE.focus()
786
+ return
787
+
788
+ if not modules.globals.map_faces:
789
+ if modules.globals.source_path is None:
790
+ update_status("Please select a source image first")
791
+ return
792
+ create_webcam_preview(camera_index)
793
+ else:
794
+ modules.globals.source_target_map = []
795
+ create_source_target_popup_for_webcam(
796
+ root, modules.globals.source_target_map, camera_index
797
+ )
798
+
799
+
800
+
801
+ def get_available_cameras():
802
+ """Returns a list of available camera names and indices."""
803
+ if platform.system() == "Windows":
804
+ try:
805
+ graph = FilterGraph()
806
+ devices = graph.get_input_devices()
807
+
808
+ # Create list of indices and names
809
+ camera_indices = list(range(len(devices)))
810
+ camera_names = devices
811
+
812
+ # If no cameras found through DirectShow, try OpenCV fallback
813
+ if not camera_names:
814
+ # Try to open camera with index -1 and 0
815
+ test_indices = [-1, 0]
816
+ working_cameras = []
817
+
818
+ for idx in test_indices:
819
+ cap = cv2.VideoCapture(idx)
820
+ if cap.isOpened():
821
+ working_cameras.append(f"Camera {idx}")
822
+ cap.release()
823
+
824
+ if working_cameras:
825
+ return test_indices[: len(working_cameras)], working_cameras
826
+
827
+ # If still no cameras found, return empty lists
828
+ if not camera_names:
829
+ return [], ["No cameras found"]
830
+
831
+ return camera_indices, camera_names
832
+
833
+ except Exception as e:
834
+ print(f"Error detecting cameras: {str(e)}")
835
+ return [], ["No cameras found"]
836
+ else:
837
+ # Unix-like systems (Linux/Mac) camera detection
838
+ camera_indices = []
839
+ camera_names = []
840
+
841
+ if platform.system() == "Darwin": # macOS specific handling
842
+ # Try to open the default FaceTime camera first
843
+ cap = cv2.VideoCapture(0)
844
+ if cap.isOpened():
845
+ camera_indices.append(0)
846
+ camera_names.append("FaceTime Camera")
847
+ cap.release()
848
+
849
+ # On macOS, additional cameras typically use indices 1 and 2
850
+ for i in [1, 2]:
851
+ cap = cv2.VideoCapture(i)
852
+ if cap.isOpened():
853
+ camera_indices.append(i)
854
+ camera_names.append(f"Camera {i}")
855
+ cap.release()
856
+ else:
857
+ # Linux camera detection - test first 10 indices
858
+ for i in range(10):
859
+ cap = cv2.VideoCapture(i)
860
+ if cap.isOpened():
861
+ camera_indices.append(i)
862
+ camera_names.append(f"Camera {i}")
863
+ cap.release()
864
+
865
+ if not camera_names:
866
+ return [], ["No cameras found"]
867
+
868
+ return camera_indices, camera_names
869
+
870
+
871
+ def create_webcam_preview(camera_index: int):
872
+ global preview_label, PREVIEW
873
+
874
+ cap = VideoCapturer(camera_index)
875
+ if not cap.start(PREVIEW_DEFAULT_WIDTH, PREVIEW_DEFAULT_HEIGHT, 60):
876
+ update_status("Failed to start camera")
877
+ return
878
+
879
+ preview_label.configure(width=PREVIEW_DEFAULT_WIDTH, height=PREVIEW_DEFAULT_HEIGHT)
880
+ PREVIEW.deiconify()
881
+
882
+ frame_processors = get_frame_processors_modules(modules.globals.frame_processors)
883
+ source_image = None
884
+ prev_time = time.time()
885
+ fps_update_interval = 0.5
886
+ frame_count = 0
887
+ fps = 0
888
+
889
+ while True:
890
+ ret, frame = cap.read()
891
+ if not ret:
892
+ break
893
+
894
+ temp_frame = frame.copy()
895
+
896
+ if modules.globals.live_mirror:
897
+ temp_frame = cv2.flip(temp_frame, 1)
898
+
899
+ if modules.globals.live_resizable:
900
+ temp_frame = fit_image_to_size(
901
+ temp_frame, PREVIEW.winfo_width(), PREVIEW.winfo_height()
902
+ )
903
+
904
+ else:
905
+ temp_frame = fit_image_to_size(
906
+ temp_frame, PREVIEW.winfo_width(), PREVIEW.winfo_height()
907
+ )
908
+
909
+ if not modules.globals.map_faces:
910
+ if source_image is None and modules.globals.source_path:
911
+ source_image = get_one_face(cv2.imread(modules.globals.source_path))
912
+
913
+ for frame_processor in frame_processors:
914
+ if frame_processor.NAME == "DLC.FACE-ENHANCER":
915
+ if modules.globals.fp_ui["face_enhancer"]:
916
+ temp_frame = frame_processor.process_frame(None, temp_frame)
917
+ else:
918
+ temp_frame = frame_processor.process_frame(source_image, temp_frame)
919
+ else:
920
+ modules.globals.target_path = None
921
+ for frame_processor in frame_processors:
922
+ if frame_processor.NAME == "DLC.FACE-ENHANCER":
923
+ if modules.globals.fp_ui["face_enhancer"]:
924
+ temp_frame = frame_processor.process_frame_v2(temp_frame)
925
+ else:
926
+ temp_frame = frame_processor.process_frame_v2(temp_frame)
927
+
928
+ # Calculate and display FPS
929
+ current_time = time.time()
930
+ frame_count += 1
931
+ if current_time - prev_time >= fps_update_interval:
932
+ fps = frame_count / (current_time - prev_time)
933
+ frame_count = 0
934
+ prev_time = current_time
935
+
936
+ if modules.globals.show_fps:
937
+ cv2.putText(
938
+ temp_frame,
939
+ f"FPS: {fps:.1f}",
940
+ (10, 30),
941
+ cv2.FONT_HERSHEY_SIMPLEX,
942
+ 1,
943
+ (0, 255, 0),
944
+ 2,
945
+ )
946
+
947
+ image = cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB)
948
+ image = Image.fromarray(image)
949
+ image = ImageOps.contain(
950
+ image, (temp_frame.shape[1], temp_frame.shape[0]), Image.LANCZOS
951
+ )
952
+ image = ctk.CTkImage(image, size=image.size)
953
+ preview_label.configure(image=image)
954
+ ROOT.update()
955
+
956
+ if PREVIEW.state() == "withdrawn":
957
+ break
958
+
959
+ cap.release()
960
+ PREVIEW.withdraw()
961
+
962
+
963
+ def create_source_target_popup_for_webcam(
964
+ root: ctk.CTk, map: list, camera_index: int
965
+ ) -> None:
966
+ global POPUP_LIVE, popup_status_label_live
967
+
968
+ POPUP_LIVE = ctk.CTkToplevel(root)
969
+ POPUP_LIVE.title(_("Source x Target Mapper"))
970
+ POPUP_LIVE.geometry(f"{POPUP_LIVE_WIDTH}x{POPUP_LIVE_HEIGHT}")
971
+ POPUP_LIVE.focus()
972
+
973
+ def on_submit_click():
974
+ if has_valid_map():
975
+ simplify_maps()
976
+ update_pop_live_status("Mappings successfully submitted!")
977
+ create_webcam_preview(camera_index) # Open the preview window
978
+ else:
979
+ update_pop_live_status("At least 1 source with target is required!")
980
+
981
+ def on_add_click():
982
+ add_blank_map()
983
+ refresh_data(map)
984
+ update_pop_live_status("Please provide mapping!")
985
+
986
+ def on_clear_click():
987
+ clear_source_target_images(map)
988
+ refresh_data(map)
989
+ update_pop_live_status("All mappings cleared!")
990
+
991
+ popup_status_label_live = ctk.CTkLabel(POPUP_LIVE, text=None, justify="center")
992
+ popup_status_label_live.grid(row=1, column=0, pady=15)
993
+
994
+ add_button = ctk.CTkButton(POPUP_LIVE, text=_("Add"), command=lambda: on_add_click())
995
+ add_button.place(relx=0.1, rely=0.92, relwidth=0.2, relheight=0.05)
996
+
997
+ clear_button = ctk.CTkButton(POPUP_LIVE, text=_("Clear"), command=lambda: on_clear_click())
998
+ clear_button.place(relx=0.4, rely=0.92, relwidth=0.2, relheight=0.05)
999
+
1000
+ close_button = ctk.CTkButton(
1001
+ POPUP_LIVE, text=_("Submit"), command=lambda: on_submit_click()
1002
+ )
1003
+ close_button.place(relx=0.7, rely=0.92, relwidth=0.2, relheight=0.05)
1004
+
1005
+
1006
+
1007
+ def clear_source_target_images(map: list):
1008
+ global source_label_dict_live, target_label_dict_live
1009
+
1010
+ for item in map:
1011
+ if "source" in item:
1012
+ del item["source"]
1013
+ if "target" in item:
1014
+ del item["target"]
1015
+
1016
+ for button_num in list(source_label_dict_live.keys()):
1017
+ source_label_dict_live[button_num].destroy()
1018
+ del source_label_dict_live[button_num]
1019
+
1020
+ for button_num in list(target_label_dict_live.keys()):
1021
+ target_label_dict_live[button_num].destroy()
1022
+ del target_label_dict_live[button_num]
1023
+
1024
+
1025
+ def refresh_data(map: list):
1026
+ global POPUP_LIVE
1027
+
1028
+ scrollable_frame = ctk.CTkScrollableFrame(
1029
+ POPUP_LIVE, width=POPUP_LIVE_SCROLL_WIDTH, height=POPUP_LIVE_SCROLL_HEIGHT
1030
+ )
1031
+ scrollable_frame.grid(row=0, column=0, padx=0, pady=0, sticky="nsew")
1032
+
1033
+ def on_sbutton_click(map, button_num):
1034
+ map = update_webcam_source(scrollable_frame, map, button_num)
1035
+
1036
+ def on_tbutton_click(map, button_num):
1037
+ map = update_webcam_target(scrollable_frame, map, button_num)
1038
+
1039
+ for item in map:
1040
+ id = item["id"]
1041
+
1042
+ button = ctk.CTkButton(
1043
+ scrollable_frame,
1044
+ text=_("Select source image"),
1045
+ command=lambda id=id: on_sbutton_click(map, id),
1046
+ width=DEFAULT_BUTTON_WIDTH,
1047
+ height=DEFAULT_BUTTON_HEIGHT,
1048
+ )
1049
+ button.grid(row=id, column=0, padx=30, pady=10)
1050
+
1051
+ x_label = ctk.CTkLabel(
1052
+ scrollable_frame,
1053
+ text=f"X",
1054
+ width=MAPPER_PREVIEW_MAX_WIDTH,
1055
+ height=MAPPER_PREVIEW_MAX_HEIGHT,
1056
+ )
1057
+ x_label.grid(row=id, column=2, padx=10, pady=10)
1058
+
1059
+ button = ctk.CTkButton(
1060
+ scrollable_frame,
1061
+ text=_("Select target image"),
1062
+ command=lambda id=id: on_tbutton_click(map, id),
1063
+ width=DEFAULT_BUTTON_WIDTH,
1064
+ height=DEFAULT_BUTTON_HEIGHT,
1065
+ )
1066
+ button.grid(row=id, column=3, padx=20, pady=10)
1067
+
1068
+ if "source" in item:
1069
+ image = Image.fromarray(
1070
+ cv2.cvtColor(item["source"]["cv2"], cv2.COLOR_BGR2RGB)
1071
+ )
1072
+ image = image.resize(
1073
+ (MAPPER_PREVIEW_MAX_WIDTH, MAPPER_PREVIEW_MAX_HEIGHT), Image.LANCZOS
1074
+ )
1075
+ tk_image = ctk.CTkImage(image, size=image.size)
1076
+
1077
+ source_image = ctk.CTkLabel(
1078
+ scrollable_frame,
1079
+ text=f"S-{id}",
1080
+ width=MAPPER_PREVIEW_MAX_WIDTH,
1081
+ height=MAPPER_PREVIEW_MAX_HEIGHT,
1082
+ )
1083
+ source_image.grid(row=id, column=1, padx=10, pady=10)
1084
+ source_image.configure(image=tk_image)
1085
+
1086
+ if "target" in item:
1087
+ image = Image.fromarray(
1088
+ cv2.cvtColor(item["target"]["cv2"], cv2.COLOR_BGR2RGB)
1089
+ )
1090
+ image = image.resize(
1091
+ (MAPPER_PREVIEW_MAX_WIDTH, MAPPER_PREVIEW_MAX_HEIGHT), Image.LANCZOS
1092
+ )
1093
+ tk_image = ctk.CTkImage(image, size=image.size)
1094
+
1095
+ target_image = ctk.CTkLabel(
1096
+ scrollable_frame,
1097
+ text=f"T-{id}",
1098
+ width=MAPPER_PREVIEW_MAX_WIDTH,
1099
+ height=MAPPER_PREVIEW_MAX_HEIGHT,
1100
+ )
1101
+ target_image.grid(row=id, column=4, padx=20, pady=10)
1102
+ target_image.configure(image=tk_image)
1103
+
1104
+
1105
+ def update_webcam_source(
1106
+ scrollable_frame: ctk.CTkScrollableFrame, map: list, button_num: int
1107
+ ) -> list:
1108
+ global source_label_dict_live
1109
+
1110
+ source_path = ctk.filedialog.askopenfilename(
1111
+ title=_("select an source image"),
1112
+ initialdir=RECENT_DIRECTORY_SOURCE,
1113
+ filetypes=[img_ft],
1114
+ )
1115
+
1116
+ if "source" in map[button_num]:
1117
+ map[button_num].pop("source")
1118
+ source_label_dict_live[button_num].destroy()
1119
+ del source_label_dict_live[button_num]
1120
+
1121
+ if source_path == "":
1122
+ return map
1123
+ else:
1124
+ cv2_img = cv2.imread(source_path)
1125
+ face = get_one_face(cv2_img)
1126
+
1127
+ if face:
1128
+ x_min, y_min, x_max, y_max = face["bbox"]
1129
+
1130
+ map[button_num]["source"] = {
1131
+ "cv2": cv2_img[int(y_min): int(y_max), int(x_min): int(x_max)],
1132
+ "face": face,
1133
+ }
1134
+
1135
+ image = Image.fromarray(
1136
+ cv2.cvtColor(map[button_num]["source"]["cv2"], cv2.COLOR_BGR2RGB)
1137
+ )
1138
+ image = image.resize(
1139
+ (MAPPER_PREVIEW_MAX_WIDTH, MAPPER_PREVIEW_MAX_HEIGHT), Image.LANCZOS
1140
+ )
1141
+ tk_image = ctk.CTkImage(image, size=image.size)
1142
+
1143
+ source_image = ctk.CTkLabel(
1144
+ scrollable_frame,
1145
+ text=f"S-{button_num}",
1146
+ width=MAPPER_PREVIEW_MAX_WIDTH,
1147
+ height=MAPPER_PREVIEW_MAX_HEIGHT,
1148
+ )
1149
+ source_image.grid(row=button_num, column=1, padx=10, pady=10)
1150
+ source_image.configure(image=tk_image)
1151
+ source_label_dict_live[button_num] = source_image
1152
+ else:
1153
+ update_pop_live_status("Face could not be detected in last upload!")
1154
+ return map
1155
+
1156
+
1157
+ def update_webcam_target(
1158
+ scrollable_frame: ctk.CTkScrollableFrame, map: list, button_num: int
1159
+ ) -> list:
1160
+ global target_label_dict_live
1161
+
1162
+ target_path = ctk.filedialog.askopenfilename(
1163
+ title=_("select an target image"),
1164
+ initialdir=RECENT_DIRECTORY_SOURCE,
1165
+ filetypes=[img_ft],
1166
+ )
1167
+
1168
+ if "target" in map[button_num]:
1169
+ map[button_num].pop("target")
1170
+ target_label_dict_live[button_num].destroy()
1171
+ del target_label_dict_live[button_num]
1172
+
1173
+ if target_path == "":
1174
+ return map
1175
+ else:
1176
+ cv2_img = cv2.imread(target_path)
1177
+ face = get_one_face(cv2_img)
1178
+
1179
+ if face:
1180
+ x_min, y_min, x_max, y_max = face["bbox"]
1181
+
1182
+ map[button_num]["target"] = {
1183
+ "cv2": cv2_img[int(y_min): int(y_max), int(x_min): int(x_max)],
1184
+ "face": face,
1185
+ }
1186
+
1187
+ image = Image.fromarray(
1188
+ cv2.cvtColor(map[button_num]["target"]["cv2"], cv2.COLOR_BGR2RGB)
1189
+ )
1190
+ image = image.resize(
1191
+ (MAPPER_PREVIEW_MAX_WIDTH, MAPPER_PREVIEW_MAX_HEIGHT), Image.LANCZOS
1192
+ )
1193
+ tk_image = ctk.CTkImage(image, size=image.size)
1194
+
1195
+ target_image = ctk.CTkLabel(
1196
+ scrollable_frame,
1197
+ text=f"T-{button_num}",
1198
+ width=MAPPER_PREVIEW_MAX_WIDTH,
1199
+ height=MAPPER_PREVIEW_MAX_HEIGHT,
1200
+ )
1201
+ target_image.grid(row=button_num, column=4, padx=20, pady=10)
1202
+ target_image.configure(image=tk_image)
1203
+ target_label_dict_live[button_num] = target_image
1204
+ else:
1205
+ update_pop_live_status("Face could not be detected in last upload!")
1206
+ return map
modules/utilities.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import mimetypes
3
+ import os
4
+ import platform
5
+ import shutil
6
+ import ssl
7
+ import subprocess
8
+ import urllib
9
+ from pathlib import Path
10
+ from typing import List, Any
11
+ from tqdm import tqdm
12
+
13
+ import modules.globals
14
+
15
+ TEMP_FILE = "temp.mp4"
16
+ TEMP_DIRECTORY = "temp"
17
+
18
+ # monkey patch ssl for mac
19
+ if platform.system().lower() == "darwin":
20
+ ssl._create_default_https_context = ssl._create_unverified_context
21
+
22
+
23
+ def run_ffmpeg(args: List[str]) -> bool:
24
+ commands = [
25
+ "ffmpeg",
26
+ "-hide_banner",
27
+ "-hwaccel",
28
+ "auto",
29
+ "-loglevel",
30
+ modules.globals.log_level,
31
+ ]
32
+ commands.extend(args)
33
+ try:
34
+ subprocess.check_output(commands, stderr=subprocess.STDOUT)
35
+ return True
36
+ except Exception:
37
+ pass
38
+ return False
39
+
40
+
41
+ def detect_fps(target_path: str) -> float:
42
+ command = [
43
+ "ffprobe",
44
+ "-v",
45
+ "error",
46
+ "-select_streams",
47
+ "v:0",
48
+ "-show_entries",
49
+ "stream=r_frame_rate",
50
+ "-of",
51
+ "default=noprint_wrappers=1:nokey=1",
52
+ target_path,
53
+ ]
54
+ output = subprocess.check_output(command).decode().strip().split("/")
55
+ try:
56
+ numerator, denominator = map(int, output)
57
+ return numerator / denominator
58
+ except Exception:
59
+ pass
60
+ return 30.0
61
+
62
+
63
+ def extract_frames(target_path: str) -> None:
64
+ temp_directory_path = get_temp_directory_path(target_path)
65
+ run_ffmpeg(
66
+ [
67
+ "-i",
68
+ target_path,
69
+ "-pix_fmt",
70
+ "rgb24",
71
+ os.path.join(temp_directory_path, "%04d.png"),
72
+ ]
73
+ )
74
+
75
+
76
+ def create_video(target_path: str, fps: float = 30.0) -> None:
77
+ temp_output_path = get_temp_output_path(target_path)
78
+ temp_directory_path = get_temp_directory_path(target_path)
79
+ run_ffmpeg(
80
+ [
81
+ "-r",
82
+ str(fps),
83
+ "-i",
84
+ os.path.join(temp_directory_path, "%04d.png"),
85
+ "-c:v",
86
+ modules.globals.video_encoder,
87
+ "-crf",
88
+ str(modules.globals.video_quality),
89
+ "-pix_fmt",
90
+ "yuv420p",
91
+ "-vf",
92
+ "colorspace=bt709:iall=bt601-6-625:fast=1",
93
+ "-y",
94
+ temp_output_path,
95
+ ]
96
+ )
97
+
98
+
99
+ def restore_audio(target_path: str, output_path: str) -> None:
100
+ temp_output_path = get_temp_output_path(target_path)
101
+ done = run_ffmpeg(
102
+ [
103
+ "-i",
104
+ temp_output_path,
105
+ "-i",
106
+ target_path,
107
+ "-c:v",
108
+ "copy",
109
+ "-map",
110
+ "0:v:0",
111
+ "-map",
112
+ "1:a:0",
113
+ "-y",
114
+ output_path,
115
+ ]
116
+ )
117
+ if not done:
118
+ move_temp(target_path, output_path)
119
+
120
+
121
+ def get_temp_frame_paths(target_path: str) -> List[str]:
122
+ temp_directory_path = get_temp_directory_path(target_path)
123
+ return glob.glob((os.path.join(glob.escape(temp_directory_path), "*.png")))
124
+
125
+
126
+ def get_temp_directory_path(target_path: str) -> str:
127
+ target_name, _ = os.path.splitext(os.path.basename(target_path))
128
+ target_directory_path = os.path.dirname(target_path)
129
+ return os.path.join(target_directory_path, TEMP_DIRECTORY, target_name)
130
+
131
+
132
+ def get_temp_output_path(target_path: str) -> str:
133
+ temp_directory_path = get_temp_directory_path(target_path)
134
+ return os.path.join(temp_directory_path, TEMP_FILE)
135
+
136
+
137
+ def normalize_output_path(source_path: str, target_path: str, output_path: str) -> Any:
138
+ if source_path and target_path:
139
+ source_name, _ = os.path.splitext(os.path.basename(source_path))
140
+ target_name, target_extension = os.path.splitext(os.path.basename(target_path))
141
+ if os.path.isdir(output_path):
142
+ return os.path.join(
143
+ output_path, source_name + "-" + target_name + target_extension
144
+ )
145
+ return output_path
146
+
147
+
148
+ def create_temp(target_path: str) -> None:
149
+ temp_directory_path = get_temp_directory_path(target_path)
150
+ Path(temp_directory_path).mkdir(parents=True, exist_ok=True)
151
+
152
+
153
+ def move_temp(target_path: str, output_path: str) -> None:
154
+ temp_output_path = get_temp_output_path(target_path)
155
+ if os.path.isfile(temp_output_path):
156
+ if os.path.isfile(output_path):
157
+ os.remove(output_path)
158
+ shutil.move(temp_output_path, output_path)
159
+
160
+
161
+ def clean_temp(target_path: str) -> None:
162
+ temp_directory_path = get_temp_directory_path(target_path)
163
+ parent_directory_path = os.path.dirname(temp_directory_path)
164
+ if not modules.globals.keep_frames and os.path.isdir(temp_directory_path):
165
+ shutil.rmtree(temp_directory_path)
166
+ if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
167
+ os.rmdir(parent_directory_path)
168
+
169
+
170
+ def has_image_extension(image_path: str) -> bool:
171
+ return image_path.lower().endswith(("png", "jpg", "jpeg"))
172
+
173
+
174
+ def is_image(image_path: str) -> bool:
175
+ if image_path and os.path.isfile(image_path):
176
+ mimetype, _ = mimetypes.guess_type(image_path)
177
+ return bool(mimetype and mimetype.startswith("image/"))
178
+ return False
179
+
180
+
181
+ def is_video(video_path: str) -> bool:
182
+ if video_path and os.path.isfile(video_path):
183
+ mimetype, _ = mimetypes.guess_type(video_path)
184
+ return bool(mimetype and mimetype.startswith("video/"))
185
+ return False
186
+
187
+
188
+ def conditional_download(download_directory_path: str, urls: List[str]) -> None:
189
+ if not os.path.exists(download_directory_path):
190
+ os.makedirs(download_directory_path)
191
+ for url in urls:
192
+ download_file_path = os.path.join(
193
+ download_directory_path, os.path.basename(url)
194
+ )
195
+ if not os.path.exists(download_file_path):
196
+ request = urllib.request.urlopen(url) # type: ignore[attr-defined]
197
+ total = int(request.headers.get("Content-Length", 0))
198
+ with tqdm(
199
+ total=total,
200
+ desc="Downloading",
201
+ unit="B",
202
+ unit_scale=True,
203
+ unit_divisor=1024,
204
+ ) as progress:
205
+ urllib.request.urlretrieve(url, download_file_path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined]
206
+
207
+
208
+ def resolve_relative_path(path: str) -> str:
209
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
modules/video_capture.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from typing import Optional, Tuple, Callable
4
+ import platform
5
+ import threading
6
+
7
+ # Only import Windows-specific library if on Windows
8
+ if platform.system() == "Windows":
9
+ from pygrabber.dshow_graph import FilterGraph
10
+
11
+
12
+ class VideoCapturer:
13
+ def __init__(self, device_index: int):
14
+ self.device_index = device_index
15
+ self.frame_callback = None
16
+ self._current_frame = None
17
+ self._frame_ready = threading.Event()
18
+ self.is_running = False
19
+ self.cap = None
20
+
21
+ # Initialize Windows-specific components if on Windows
22
+ if platform.system() == "Windows":
23
+ self.graph = FilterGraph()
24
+ # Verify device exists
25
+ devices = self.graph.get_input_devices()
26
+ if self.device_index >= len(devices):
27
+ raise ValueError(
28
+ f"Invalid device index {device_index}. Available devices: {len(devices)}"
29
+ )
30
+
31
+ def start(self, width: int = 960, height: int = 540, fps: int = 60) -> bool:
32
+ """Initialize and start video capture"""
33
+ try:
34
+ if platform.system() == "Windows":
35
+ # Windows-specific capture methods
36
+ capture_methods = [
37
+ (self.device_index, cv2.CAP_DSHOW), # Try DirectShow first
38
+ (self.device_index, cv2.CAP_ANY), # Then try default backend
39
+ (-1, cv2.CAP_ANY), # Try -1 as fallback
40
+ (0, cv2.CAP_ANY), # Finally try 0 without specific backend
41
+ ]
42
+
43
+ for dev_id, backend in capture_methods:
44
+ try:
45
+ self.cap = cv2.VideoCapture(dev_id, backend)
46
+ if self.cap.isOpened():
47
+ break
48
+ self.cap.release()
49
+ except Exception:
50
+ continue
51
+ else:
52
+ # Unix-like systems (Linux/Mac) capture method
53
+ self.cap = cv2.VideoCapture(self.device_index)
54
+
55
+ if not self.cap or not self.cap.isOpened():
56
+ raise RuntimeError("Failed to open camera")
57
+
58
+ # Configure format
59
+ self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
60
+ self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
61
+ self.cap.set(cv2.CAP_PROP_FPS, fps)
62
+
63
+ self.is_running = True
64
+ return True
65
+
66
+ except Exception as e:
67
+ print(f"Failed to start capture: {str(e)}")
68
+ if self.cap:
69
+ self.cap.release()
70
+ return False
71
+
72
+ def read(self) -> Tuple[bool, Optional[np.ndarray]]:
73
+ """Read a frame from the camera"""
74
+ if not self.is_running or self.cap is None:
75
+ return False, None
76
+
77
+ ret, frame = self.cap.read()
78
+ if ret:
79
+ self._current_frame = frame
80
+ if self.frame_callback:
81
+ self.frame_callback(frame)
82
+ return True, frame
83
+ return False, None
84
+
85
+ def release(self) -> None:
86
+ """Stop capture and release resources"""
87
+ if self.is_running and self.cap is not None:
88
+ self.cap.release()
89
+ self.is_running = False
90
+ self.cap = None
91
+
92
+ def set_frame_callback(self, callback: Callable[[np.ndarray], None]) -> None:
93
+ """Set callback for frame processing"""
94
+ self.frame_callback = callback
mypi.ini ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [mypy]
2
+ check_untyped_defs = True
3
+ disallow_any_generics = True
4
+ disallow_untyped_calls = True
5
+ disallow_untyped_defs = True
6
+ ignore_missing_imports = True
7
+ strict_optional = False
requirements.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu118
2
+
3
+ numpy>=1.23.5,<2
4
+ typing-extensions>=4.8.0
5
+ opencv-python==4.10.0.84
6
+ cv2_enumerate_cameras==1.1.15
7
+ onnx==1.16.0
8
+ insightface==0.7.3
9
+ psutil==5.9.8
10
+ tk==0.1.0
11
+ customtkinter==5.2.2
12
+ pillow==11.1.0
13
+ torch==2.5.1+cu118; sys_platform != 'darwin'
14
+ torch==2.5.1; sys_platform == 'darwin'
15
+ torchvision==0.20.1; sys_platform != 'darwin'
16
+ torchvision==0.20.1; sys_platform == 'darwin'
17
+ onnxruntime-silicon==1.16.3; sys_platform == 'darwin' and platform_machine == 'arm64'
18
+ onnxruntime-gpu==1.17; sys_platform != 'darwin'
19
+ tensorflow; sys_platform != 'darwin'
20
+ opennsfw2==0.10.2
21
+ protobuf==4.23.2
run-cuda.bat ADDED
@@ -0,0 +1 @@
 
 
1
+ python run.py --execution-provider cuda
run-directml.bat ADDED
@@ -0,0 +1 @@
 
 
1
+ python run.py --execution-provider dml
run.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ from modules import core
4
+
5
+ if __name__ == '__main__':
6
+ core.run()