Laughify commited on
Commit
6d48f70
Β·
1 Parent(s): 15af579

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -12
app.py CHANGED
@@ -1,18 +1,53 @@
1
- git clone https://huggingface.co/spaces/Laughify/Among_Us_Logic_AI_Generator
2
-
3
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- def greet(name):
6
- return "Hello " + name + "!!"
 
7
 
8
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
9
- iface.launch()
 
 
 
 
10
 
11
- git add app.py
12
- $git commit -m "Add application file"
13
- $git push
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- git lfs install
16
- git clone https://huggingface.co/Laughify/among-us-logic-ai-characters
17
 
18
- share=True
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from torch import autocast
4
+ from diffusers import StableDiffusionPipeline
5
+ import random
6
+
7
+ model = "Laughify/among-us-logic-ai-characters"
8
+ device = "cpu"
9
+
10
+ pipe = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch.float32)
11
+ pipe = pipe.to(device)
12
+
13
+ block = gr.Blocks(css=".container { max-width: 800px; margin: auto; }")
14
 
15
+ def infer(prompt, width, height, nums, steps, guidance_scale, seed):
16
+ print(prompt)
17
+ print(width, height, nums, steps, guidance_scale, seed)
18
 
19
+ if prompt is not None and prompt != "":
20
+ if seed is None or seed == '' or seed == -1:
21
+ seed = int(random.randrange(4294967294))
22
+ generator = torch.Generator(device).manual_seed(seed)
23
+ images = pipe([prompt] * nums, height=height, width=width, num_inference_steps=steps, generator=generator, guidance_scale=guidance_scale )["sample"]
24
+ return images
25
 
26
+ # with block as demo:
27
+ def run():
28
+ _app = gr.Interface(
29
+ fn=infer,
30
+ title="Among Us Logic Character Generator",
31
+ inputs=[
32
+ gr.Textbox(label="prompt"),
33
+ gr.Slider(512, 1024, 512, step=64, label="width"),
34
+ gr.Slider(512, 1024, 512, step=64, label="height"),
35
+ gr.Slider(1, 4, 1, step=1, label="Number of Images"),
36
+ gr.Slider(10, 150, step=1, value=50,
37
+ label="num_inference_steps:\n"
38
+ "The number of denoising steps. More de-scaling steps usually result in a higher quality image, but will slow down inference."),
39
+ gr.Slider(0, 20, 7.5, step=0.5,
40
+ label="guidance_scale:\n" +
41
+ "A higher boot ratio encourages the generation of images that are closely related to text \"hints\", often at the expense of reduced image quality"),
42
+ gr.Textbox(label="Random seed",
43
+ placeholder="Random Seed",
44
+ lines=1),
45
+ ],
46
+ outputs=[
47
+ gr.Gallery(label="Generated images")
48
+ ])
49
 
50
+ return _app
 
51
 
52
+ app = run()
53
+ app.launch(debug=True)