fffiloni commited on
Commit
a91f570
·
1 Parent(s): 072c110

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -9
app.py CHANGED
@@ -5,6 +5,7 @@ from gradio_client import Client
5
  title="Prompt Converter"
6
 
7
  description="""
 
8
  <p style="text-align:center;">
9
  Stable Diffusion 2 uses OpenCLIP ViT-H model trained on LAION dataset so it knows different things than the OpenAI ViT-L we're all used to prompting.
10
  <br />This demo converts a v1.x stable diffusion prompt to a stable diffusion 2.x prompt,
@@ -35,17 +36,35 @@ def infer(prompt, mode):
35
  img = get_images(prompt)
36
  result = get_new_prompt(img, mode)
37
  return result[0]
 
 
38
 
39
- prompt_input = gr.Textbox(lines=4, label="Input v1.x Stable Diffusion prompt")
40
- mode_input = gr.Radio(['best', 'classic', 'fast'], label='mode', value='fast')
41
- prompt_output = gr.Textbox(lines=4, label="Converted v2.x Stable Diffusion prompt")
 
 
 
 
 
 
 
 
 
42
 
43
- examples=[
44
- ["girl with steampunk weapons and uniform, serious, finely detailed, made by wlop, boichi, ilya kuvshinov, full body portrait, illustration, grass, sunny, sky, anime, side view, perfect anime face, detailed face, zoomed out, smooth","fast"],
45
- ["a yellow cockatiel riding on the rings of saturn wearing a propeller hat, fantasy, intricate, elegant, highly detailed, digital painting, artstation, concept art, smooth, sharp focus, illustration, art by artgerm and greg rutkowski and alphonse mucha ","classic"],
46
- ["painting, view from inside edward hopper's painting nighthawks, of a group of werebears robbing a bank, foggy ","best"]
47
- ]
 
 
 
 
 
 
 
48
 
49
- demo=gr.Interface(fn=infer, inputs=[prompt_input,mode_input], outputs=[prompt_output],title=title,description=description,examples=examples)
50
  demo.queue(max_size=10,concurrency_count=20)
51
  demo.launch(enable_queue=True)
 
5
  title="Prompt Converter"
6
 
7
  description="""
8
+ <h1>Prompt Converter</h1>
9
  <p style="text-align:center;">
10
  Stable Diffusion 2 uses OpenCLIP ViT-H model trained on LAION dataset so it knows different things than the OpenAI ViT-L we're all used to prompting.
11
  <br />This demo converts a v1.x stable diffusion prompt to a stable diffusion 2.x prompt,
 
36
  img = get_images(prompt)
37
  result = get_new_prompt(img, mode)
38
  return result[0]
39
+
40
+ with gr.Block() as demo:
41
 
42
+ gr.HTML(description)
43
+ with gr.Row():
44
+ with gr.Column():
45
+ prompt_input = gr.Textbox(lines=4, label="Input v1.x Stable Diffusion prompt")
46
+ mode_input = gr.Radio(['best', 'classic', 'fast'], label='mode', value='fast')
47
+ submit_btn = gr.Button("Submit")
48
+
49
+ prompt_output = gr.Textbox(lines=4, label="Converted v2.x Stable Diffusion prompt")
50
+
51
+ submit_btn.click(
52
+ fn=infer, inputs=[prompt_input,mode_input], outputs=[prompt_output]
53
+ )
54
 
55
+ examples=[
56
+ ["girl with steampunk weapons and uniform, serious, finely detailed, made by wlop, boichi, ilya kuvshinov, full body portrait, illustration, grass, sunny, sky, anime, side view, perfect anime face, detailed face, zoomed out, smooth","fast"],
57
+ ["a yellow cockatiel riding on the rings of saturn wearing a propeller hat, fantasy, intricate, elegant, highly detailed, digital painting, artstation, concept art, smooth, sharp focus, illustration, art by artgerm and greg rutkowski and alphonse mucha ","classic"],
58
+ ["painting, view from inside edward hopper's painting nighthawks, of a group of werebears robbing a bank, foggy ","best"]
59
+ ]
60
+
61
+ gr.Examples(
62
+ examples = examples,
63
+ fn = infer,
64
+ inputs=[prompt_input,mode_input],
65
+ outputs=[prompt_output]
66
+ )
67
 
68
+ #demo=gr.Interface(fn=infer, inputs=[prompt_input,mode_input], outputs=[prompt_output],title=title,description=description,examples=examples)
69
  demo.queue(max_size=10,concurrency_count=20)
70
  demo.launch(enable_queue=True)