Spaces:
Paused
Paused
Upload fakevace_14B_2_2.json
Browse files
finetunes/fakevace_14B_2_2.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model": {
|
3 |
+
"name": "Wan2.2 Fake Vace Boneless 14B",
|
4 |
+
"architecture": "vace_14B",
|
5 |
+
"description": "This finetune has been created by JohnDopamine. It is an attempt to create a working Vace for Wan 2.2. For fast generations it is combined wit the Loras of FusioniX (the weight of the Detail Enhancer Lora has been reduced to improve identity preservation).",
|
6 |
+
"URLs": [
|
7 |
+
"https://huggingface.co/CCP6/FakeVace2.2/resolve/main/Fake-Vace2.2-High.fp16.safetensors"
|
8 |
+
],
|
9 |
+
"URLs2": [
|
10 |
+
"https://huggingface.co/CCP6/FakeVace2.2/resolve/main/Fake-Vace2.2-Low.fp16.safetensors"
|
11 |
+
],
|
12 |
+
"loras": [
|
13 |
+
],
|
14 |
+
"loras_multipliers": [
|
15 |
+
],
|
16 |
+
"group": "wan2_2",
|
17 |
+
"auto_quantize": true
|
18 |
+
},
|
19 |
+
"num_inference_steps": 10,
|
20 |
+
"guidance_scale": 1,
|
21 |
+
"guidance2_scale": 1,
|
22 |
+
"flow_shift": 2,
|
23 |
+
"switch_threshold": 875
|
24 |
+
}
|