Spaces:
Running
Running
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -42,10 +42,10 @@ root.addHandler(handler)
|
|
42 |
|
43 |
if torch.cuda.is_available():
|
44 |
device = "cuda"
|
45 |
-
dtype = torch.
|
46 |
else:
|
47 |
device = "cpu"
|
48 |
-
dtype = torch.
|
49 |
|
50 |
base = "emilianJR/epiCRealism"
|
51 |
|
@@ -55,10 +55,10 @@ base = "emilianJR/epiCRealism"
|
|
55 |
|
56 |
# precision data
|
57 |
|
58 |
-
seq=
|
59 |
fps=20
|
60 |
-
width=
|
61 |
-
height=
|
62 |
image_steps=40
|
63 |
video_steps=20
|
64 |
accu=7
|
@@ -110,12 +110,12 @@ function custom(){
|
|
110 |
image_pipe = StableDiffusionPipeline.from_pretrained(base, torch_dtype=dtype, safety_checker=None).to(device)
|
111 |
video_pipe = CogVideoXImageToVideoPipeline.from_pretrained(
|
112 |
"THUDM/CogVideoX-5b-I2V",
|
113 |
-
torch_dtype=
|
114 |
).to(device)
|
115 |
|
116 |
-
video_pipe.vae.enable_tiling()
|
117 |
-
video_pipe.vae.enable_slicing()
|
118 |
-
video_pipe.enable_model_cpu_offload()
|
119 |
|
120 |
# functionality
|
121 |
|
@@ -199,7 +199,7 @@ def pipe_generate(img,p1,p2,time,title):
|
|
199 |
|
200 |
return video_pipe(
|
201 |
prompt=p1,
|
202 |
-
negative_prompt=p2,
|
203 |
image=img,
|
204 |
num_inference_steps=video_steps,
|
205 |
guidance_scale=accu,
|
@@ -218,12 +218,12 @@ def handle_generate(*_inp):
|
|
218 |
if inp[2] != "":
|
219 |
inp[2] = ", related to: " + inp[2]
|
220 |
|
221 |
-
inp[2] = f"
|
222 |
|
223 |
if inp[1] != "":
|
224 |
inp[1] = ", related to: " + inp[1]
|
225 |
|
226 |
-
inp[1] = f'photographed, realistic,
|
227 |
|
228 |
print(f"""
|
229 |
|
|
|
42 |
|
43 |
if torch.cuda.is_available():
|
44 |
device = "cuda"
|
45 |
+
dtype = torch.float32
|
46 |
else:
|
47 |
device = "cpu"
|
48 |
+
dtype = torch.float32
|
49 |
|
50 |
base = "emilianJR/epiCRealism"
|
51 |
|
|
|
55 |
|
56 |
# precision data
|
57 |
|
58 |
+
seq=256
|
59 |
fps=20
|
60 |
+
width=1280
|
61 |
+
height=720
|
62 |
image_steps=40
|
63 |
video_steps=20
|
64 |
accu=7
|
|
|
110 |
image_pipe = StableDiffusionPipeline.from_pretrained(base, torch_dtype=dtype, safety_checker=None).to(device)
|
111 |
video_pipe = CogVideoXImageToVideoPipeline.from_pretrained(
|
112 |
"THUDM/CogVideoX-5b-I2V",
|
113 |
+
torch_dtype=dtype
|
114 |
).to(device)
|
115 |
|
116 |
+
#video_pipe.vae.enable_tiling()
|
117 |
+
#video_pipe.vae.enable_slicing()
|
118 |
+
#video_pipe.enable_model_cpu_offload()
|
119 |
|
120 |
# functionality
|
121 |
|
|
|
199 |
|
200 |
return video_pipe(
|
201 |
prompt=p1,
|
202 |
+
negative_prompt=p2.replace("textual, ",""),
|
203 |
image=img,
|
204 |
num_inference_steps=video_steps,
|
205 |
guidance_scale=accu,
|
|
|
218 |
if inp[2] != "":
|
219 |
inp[2] = ", related to: " + inp[2]
|
220 |
|
221 |
+
inp[2] = f"textual, fake, unreal, pixelated, deformed, semi-realistic, cgi, 3d, sketch, cartoon, drawing, anime, cropped, out of frame, low quality, rendering artifacts, ugly, duplicated, weird, mutation, blurry, bad anatomy, unproportional, cloned face, disfigured, gross, malformed, missing parts, extra parts, fused parts, too many parts{inp[2]}"
|
222 |
|
223 |
if inp[1] != "":
|
224 |
inp[1] = ", related to: " + inp[1]
|
225 |
|
226 |
+
inp[1] = f'photographed, filmed, realistic, normal, logical, genuine, authentic, reasonable, deep field, natural, best quality, masterpiece, highly detailed, focused{inp[1]}'
|
227 |
|
228 |
print(f"""
|
229 |
|