Yaron Koresh commited on
Commit
b632387
verified
1 Parent(s): b80194f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -26
app.py CHANGED
@@ -8,22 +8,25 @@ import requests
8
  import gradio as gr
9
  import numpy as np
10
  from lxml.html import fromstring
11
- from diffusers import AutoPipelineForText2Image
12
- #from tempfile import NamedTemporaryFile
13
  from pathos.threading import ThreadPool as Pool
14
-
15
- #model_id = "runwayml/stable-diffusion-v1-5"
16
- model_id = "kandinsky-community/kandinsky-3"
17
- #model_id = "stabilityai/stable-diffusion-3-medium-diffusers"
18
 
19
  device = "cuda" if torch.cuda.is_available() else "cpu"
20
- if torch.cuda.is_available():
21
- torch.cuda.max_memory_allocated(device=device)
22
- pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, token=os.getenv('hf_token'))
23
- pipe = pipe.to(device)
24
- else:
25
- pipe = AutoPipelineForText2Image.from_pretrained(model_id, use_safetensors=True, token=os.getenv('hf_token'))
26
- pipe = pipe.to(device)
 
 
 
 
 
27
 
28
  def translate(text,lang):
29
 
@@ -90,21 +93,21 @@ def Piper(_do,_dont):
90
  height=256,
91
  width=768,
92
  negative_prompt=_dont,
93
- num_inference_steps=50,
94
  guidance_scale=7
95
  )
96
 
97
  def infer(prompt_en,prompt2_en):
98
  name = generate_random_string(12)+".png"
99
  if prompt_en == "":
100
- _do = 'photograph'
101
  else:
102
- _do = f'photographed { prompt_en }'
103
  if prompt2_en == "":
104
  _dont = 'complex scene, ugly human body, partial human body, smooth texture, fictional content, blurred content, amputated human body, distorted palm fingers, missing legs, unreal eyes, squinting eyes, text anywhere, prints anywhere'
105
  else:
106
  _dont = f'{prompt2_en} anywhere, complex scene, ugly human body, partial human body, smooth texture, fictional content, blurred content, amputated human body, distorted palm fingers, missing legs, unreal eyes, squinting eyes, text anywhere, prints anywhere'
107
- image = Piper(_do,_dont).images[0].save(name)
108
  return name
109
 
110
  css="""
@@ -146,21 +149,16 @@ function custom(){
146
  }
147
  """
148
 
149
- if torch.cuda.is_available():
150
- power_device = "GPU"
151
- else:
152
- power_device = "CPU"
153
-
154
  with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
155
  result = []
156
  with gr.Column(elem_id="col-container"):
157
  gr.Markdown(f"""
158
- # 诪讞讜诇诇 转诪讜谞讜转 {power_device}
159
  """)
160
  with gr.Row():
161
  prompt = gr.Textbox(
162
  elem_id="prompt",
163
- placeholder="诪讛 *讻谉* 诇讛讜住讬祝",
164
  container=False,
165
  rtl=True,
166
  max_lines=1
@@ -168,13 +166,13 @@ with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
168
  with gr.Row():
169
  prompt2 = gr.Textbox(
170
  elem_id="prompt2",
171
- placeholder="诪讛 *诇讗* 诇讛讜住讬祝",
172
  container=False,
173
  rtl=True,
174
  max_lines=1
175
  )
176
  with gr.Row():
177
- run_button = gr.Button("讛转讞诇讛",elem_classes="btn",scale=0)
178
  with gr.Row():
179
  result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
180
  with gr.Row():
 
8
  import gradio as gr
9
  import numpy as np
10
  from lxml.html import fromstring
 
 
11
  from pathos.threading import ThreadPool as Pool
12
+ from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
13
+ from diffusers.utils import export_to_gif
14
+ from huggingface_hub import hf_hub_download
15
+ from safetensors.torch import load_file
16
 
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ dtype = torch.float16
19
+
20
+ step = 8
21
+ repo = "ByteDance/AnimateDiff-Lightning"
22
+ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
23
+ #base = "emilianJR/epiCRealism"
24
+ base = "black-forest-labs/FLUX.1-dev"
25
+
26
+ adapter = MotionAdapter().to(device, dtype)
27
+ adapter.load_state_dict(load_file(hf_hub_download(repo ,ckpt), device=device))
28
+ pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
29
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
30
 
31
  def translate(text,lang):
32
 
 
93
  height=256,
94
  width=768,
95
  negative_prompt=_dont,
96
+ num_inference_steps=step,
97
  guidance_scale=7
98
  )
99
 
100
  def infer(prompt_en,prompt2_en):
101
  name = generate_random_string(12)+".png"
102
  if prompt_en == "":
103
+ _do = 'film'
104
  else:
105
+ _do = f'filmed { prompt_en }'
106
  if prompt2_en == "":
107
  _dont = 'complex scene, ugly human body, partial human body, smooth texture, fictional content, blurred content, amputated human body, distorted palm fingers, missing legs, unreal eyes, squinting eyes, text anywhere, prints anywhere'
108
  else:
109
  _dont = f'{prompt2_en} anywhere, complex scene, ugly human body, partial human body, smooth texture, fictional content, blurred content, amputated human body, distorted palm fingers, missing legs, unreal eyes, squinting eyes, text anywhere, prints anywhere'
110
+ export_to_gif(Piper(_do,_dont).frames[0],name)
111
  return name
112
 
113
  css="""
 
149
  }
150
  """
151
 
 
 
 
 
 
152
  with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
153
  result = []
154
  with gr.Column(elem_id="col-container"):
155
  gr.Markdown(f"""
156
+ # GIF AI
157
  """)
158
  with gr.Row():
159
  prompt = gr.Textbox(
160
  elem_id="prompt",
161
+ placeholder="WHAT TO CREATE...",
162
  container=False,
163
  rtl=True,
164
  max_lines=1
 
166
  with gr.Row():
167
  prompt2 = gr.Textbox(
168
  elem_id="prompt2",
169
+ placeholder="WHAT TO AVOID...",
170
  container=False,
171
  rtl=True,
172
  max_lines=1
173
  )
174
  with gr.Row():
175
+ run_button = gr.Button("START",elem_classes="btn",scale=0)
176
  with gr.Row():
177
  result.append(gr.Image(interactive=False,elem_classes="image-container", label="Result", show_label=False, type='filepath', show_share_button=False))
178
  with gr.Row():