Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,22 @@
|
|
1 |
import gradio as gr
|
2 |
-
import os
|
3 |
import sys
|
4 |
from pathlib import Path
|
5 |
from PIL import Image
|
6 |
import re
|
7 |
-
from PIL import Image
|
8 |
import numpy as np
|
9 |
|
10 |
-
#
|
11 |
if not os.path.exists('saved_prompts'):
|
12 |
os.makedirs('saved_prompts')
|
13 |
-
|
14 |
if not os.path.exists('saved_images'):
|
15 |
os.makedirs('saved_images')
|
16 |
-
|
17 |
-
#
|
18 |
def generate_safe_filename(text):
|
19 |
return re.sub('[^a-zA-Z0-9]', '_', text)
|
20 |
-
|
|
|
21 |
def load_models_from_file(filename):
|
22 |
with open(filename, 'r') as f:
|
23 |
return [line.strip() for line in f]
|
@@ -25,165 +24,55 @@ def load_models_from_file(filename):
|
|
25 |
if __name__ == "__main__":
|
26 |
models = load_models_from_file('models.txt')
|
27 |
print(models)
|
28 |
-
#removed to removed.txt
|
29 |
-
|
30 |
-
current_model = models[0]
|
31 |
|
32 |
-
|
|
|
33 |
models2 = [gr.Interface.load(f"models/{model}", live=True, preprocess=False) for model in models]
|
34 |
|
|
|
|
|
|
|
|
|
35 |
|
36 |
-
|
37 |
-
def text_it1(inputs,text_gen1=text_gen1):
|
38 |
-
go_t1=text_gen1(inputs)
|
39 |
-
return(go_t1)
|
40 |
-
|
41 |
def set_model(current_model):
|
42 |
current_model = models[current_model]
|
43 |
return gr.update(label=(f"{current_model}"))
|
44 |
|
45 |
-
#
|
46 |
def list_saved_prompts_and_images():
|
47 |
saved_prompts = os.listdir('saved_prompts')
|
48 |
saved_images = os.listdir('saved_images')
|
49 |
-
|
50 |
html_str = "<h2>Saved Prompts and Images:</h2><ul>"
|
51 |
for prompt_file in saved_prompts:
|
52 |
image_file = f"{prompt_file[:-4]}.png"
|
53 |
if image_file in saved_images:
|
54 |
html_str += f'<li>Prompt: {prompt_file[:-4]} | <a href="saved_images/{image_file}" download>Download Image</a></li>'
|
55 |
html_str += "</ul>"
|
56 |
-
|
57 |
return html_str
|
58 |
|
59 |
-
#
|
60 |
def send_it1(inputs, model_choice):
|
61 |
proc1 = models2[model_choice]
|
62 |
output1 = proc1(inputs)
|
63 |
-
|
64 |
safe_filename = generate_safe_filename(inputs[0])
|
65 |
image_path = f"saved_images/{safe_filename}.png"
|
66 |
prompt_path = f"saved_prompts/{safe_filename}.txt"
|
67 |
-
|
68 |
with open(prompt_path, 'w') as f:
|
69 |
f.write(inputs[0])
|
70 |
-
|
71 |
-
# Check the type of output1 before saving
|
72 |
if isinstance(output1, np.ndarray): # If it's a numpy array
|
73 |
Image.fromarray(np.uint8(output1)).save(image_path)
|
74 |
elif isinstance(output1, Image.Image): # If it's already a PIL Image
|
75 |
output1.save(image_path)
|
76 |
-
elif isinstance(output1, str): # If it's a string (this should not happen in ideal conditions)
|
77 |
-
print(f"Warning: output1 is a string. Cannot save as image. Value: {output1}")
|
78 |
else:
|
79 |
print(f"Warning: Unexpected type {type(output1)} for output1.")
|
80 |
-
|
81 |
-
#Image.fromarray(output1).save(image_path)
|
82 |
-
|
83 |
-
saved_output.update(list_saved_prompts_and_images())
|
84 |
-
|
85 |
return output1
|
86 |
|
|
|
|
|
|
|
87 |
|
88 |
-
|
89 |
-
css=""""""
|
90 |
-
|
91 |
-
|
92 |
-
with gr.Blocks(css=css) as myface:
|
93 |
-
gr.HTML("""<!DOCTYPE html>
|
94 |
-
<html lang="en">
|
95 |
-
<head>
|
96 |
-
<meta charset="utf-8" />
|
97 |
-
<meta name="twitter:card" content="player"/>
|
98 |
-
<meta name="twitter:site" content=""/>
|
99 |
-
<meta name="twitter:player" content="https://omnibus-maximum-multiplier-places.hf.space"/>
|
100 |
-
<meta name="twitter:player:stream" content="https://omnibus-maximum-multiplier-places.hf.space"/>
|
101 |
-
<meta name="twitter:player:width" content="100%"/>
|
102 |
-
<meta name="twitter:player:height" content="600"/>
|
103 |
-
<meta property="og:title" content="Embedded Live Viewer"/>
|
104 |
-
<meta property="og:description" content="Tweet Genie - A Huggingface Space"/>
|
105 |
-
<meta property="og:image" content="https://cdn.glitch.global/80dbe92e-ce75-44af-84d5-74a2e21e9e55/omnicard.png?v=1676772531627"/>
|
106 |
-
<!--<meta http-equiv="refresh" content="0; url=https://huggingface.co/spaces/corbt/tweet-genie">-->
|
107 |
-
</head>
|
108 |
-
</html>
|
109 |
-
""")
|
110 |
-
|
111 |
-
with gr.Row():
|
112 |
-
with gr.Column(scale=100):
|
113 |
-
saved_output = gr.HTML(label="Saved Prompts and Images")
|
114 |
-
|
115 |
-
with gr.Row():
|
116 |
-
with gr.Tab("Title"):
|
117 |
-
gr.HTML("""<title>Prompt to Generate Image</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;">
|
118 |
-
<h1>Enter a Prompt in Textbox then click Generate Image</h1>""")
|
119 |
-
|
120 |
-
with gr.Tab("Tools"):
|
121 |
-
with gr.Tab("View"):
|
122 |
-
with gr.Row():
|
123 |
-
with gr.Column(style="width=50%, height=70%"):
|
124 |
-
gr.Pil(label="Crop")
|
125 |
-
with gr.Column(style="width=50%, height=70%"):
|
126 |
-
gr.Pil(label="Crop")
|
127 |
-
|
128 |
-
with gr.Tab("Draw"):
|
129 |
-
with gr.Column(style="width=50%, height=70%"):
|
130 |
-
gr.Pil(label="Crop")
|
131 |
-
with gr.Column(style="width=50%, height=70%"):
|
132 |
-
gr.Pil(label="Draw")
|
133 |
-
gr.ImagePaint(label="Draw")
|
134 |
-
|
135 |
-
with gr.Tab("Text"):
|
136 |
-
with gr.Row():
|
137 |
-
with gr.Column(scale=50):
|
138 |
-
gr.Textbox(label="", lines=8, interactive=True)
|
139 |
-
with gr.Column(scale=50):
|
140 |
-
gr.Textbox(label="", lines=8, interactive=True)
|
141 |
-
|
142 |
-
with gr.Tab("Color Picker"):
|
143 |
-
with gr.Row():
|
144 |
-
with gr.Column(scale=50):
|
145 |
-
gr.ColorPicker(label="Color", interactive=True)
|
146 |
-
with gr.Column(scale=50):
|
147 |
-
gr.ImagePaint(label="Draw", interactive=True)
|
148 |
-
with gr.Row():
|
149 |
-
with gr.Column(scale=100):
|
150 |
-
magic1=gr.Textbox(lines=4)
|
151 |
-
run=gr.Button("Generate Image")
|
152 |
-
|
153 |
-
with gr.Row():
|
154 |
-
with gr.Column(scale=100):
|
155 |
-
model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
|
156 |
-
|
157 |
-
with gr.Row():
|
158 |
-
with gr.Column(style="width=800px"):
|
159 |
-
output1=gr.Image(label=(f"{current_model}"))
|
160 |
-
# Check the type before attempting to save the image
|
161 |
-
if isinstance(output1, Image.Image): # Check if it's a PIL Image object
|
162 |
-
output1.save(image_path)
|
163 |
-
elif isinstance(output1, np.ndarray): # Check if it's a NumPy array
|
164 |
-
Image.fromarray(np.array(output1, dtype=np.uint8)).save(image_path)
|
165 |
-
else:
|
166 |
-
print(f"Warning: Unexpected type {type(output1)} for output1.")
|
167 |
-
|
168 |
-
with gr.Row():
|
169 |
-
with gr.Column(scale=50):
|
170 |
-
input_text=gr.Textbox(label="Prompt Idea",lines=2)
|
171 |
-
use_short=gr.Button("Use Short Prompt")
|
172 |
-
see_prompts=gr.Button("Extend Idea")
|
173 |
-
|
174 |
-
with gr.Row():
|
175 |
-
with gr.Column(scale=100):
|
176 |
-
saved_output = gr.HTML(label=list_saved_prompts_and_images(), live=True)
|
177 |
-
|
178 |
-
def short_prompt(inputs):
|
179 |
-
return(inputs)
|
180 |
-
|
181 |
-
use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
|
182 |
-
see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
|
183 |
-
|
184 |
-
# Reasoning: Link functions to Gradio components 🎛️
|
185 |
-
model_name1.change(set_model, inputs=model_name1, outputs=[output1])
|
186 |
-
run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
|
187 |
-
|
188 |
myface.queue(concurrency_count=200)
|
189 |
-
myface.launch(inline=True, show_api=False, max_threads=400)
|
|
|
1 |
import gradio as gr
|
2 |
+
import os
|
3 |
import sys
|
4 |
from pathlib import Path
|
5 |
from PIL import Image
|
6 |
import re
|
|
|
7 |
import numpy as np
|
8 |
|
9 |
+
# Create directories if they don't exist
|
10 |
if not os.path.exists('saved_prompts'):
|
11 |
os.makedirs('saved_prompts')
|
|
|
12 |
if not os.path.exists('saved_images'):
|
13 |
os.makedirs('saved_images')
|
14 |
+
|
15 |
+
# Function to generate a safe filename
|
16 |
def generate_safe_filename(text):
|
17 |
return re.sub('[^a-zA-Z0-9]', '_', text)
|
18 |
+
|
19 |
+
# Function to load models from a text file
|
20 |
def load_models_from_file(filename):
|
21 |
with open(filename, 'r') as f:
|
22 |
return [line.strip() for line in f]
|
|
|
24 |
if __name__ == "__main__":
|
25 |
models = load_models_from_file('models.txt')
|
26 |
print(models)
|
|
|
|
|
|
|
27 |
|
28 |
+
current_model = models[0]
|
29 |
+
text_gen1 = gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
|
30 |
models2 = [gr.Interface.load(f"models/{model}", live=True, preprocess=False) for model in models]
|
31 |
|
32 |
+
# Function to trigger text generation
|
33 |
+
def text_it1(inputs, text_gen1=text_gen1):
|
34 |
+
go_t1 = text_gen1(inputs)
|
35 |
+
return (go_t1)
|
36 |
|
37 |
+
# Function to set the current model
|
|
|
|
|
|
|
|
|
38 |
def set_model(current_model):
|
39 |
current_model = models[current_model]
|
40 |
return gr.update(label=(f"{current_model}"))
|
41 |
|
42 |
+
# Function to list saved prompts and images
|
43 |
def list_saved_prompts_and_images():
|
44 |
saved_prompts = os.listdir('saved_prompts')
|
45 |
saved_images = os.listdir('saved_images')
|
|
|
46 |
html_str = "<h2>Saved Prompts and Images:</h2><ul>"
|
47 |
for prompt_file in saved_prompts:
|
48 |
image_file = f"{prompt_file[:-4]}.png"
|
49 |
if image_file in saved_images:
|
50 |
html_str += f'<li>Prompt: {prompt_file[:-4]} | <a href="saved_images/{image_file}" download>Download Image</a></li>'
|
51 |
html_str += "</ul>"
|
|
|
52 |
return html_str
|
53 |
|
54 |
+
# Function to handle image generation and saving
|
55 |
def send_it1(inputs, model_choice):
|
56 |
proc1 = models2[model_choice]
|
57 |
output1 = proc1(inputs)
|
|
|
58 |
safe_filename = generate_safe_filename(inputs[0])
|
59 |
image_path = f"saved_images/{safe_filename}.png"
|
60 |
prompt_path = f"saved_prompts/{safe_filename}.txt"
|
|
|
61 |
with open(prompt_path, 'w') as f:
|
62 |
f.write(inputs[0])
|
63 |
+
# Saving the image based on its type
|
|
|
64 |
if isinstance(output1, np.ndarray): # If it's a numpy array
|
65 |
Image.fromarray(np.uint8(output1)).save(image_path)
|
66 |
elif isinstance(output1, Image.Image): # If it's already a PIL Image
|
67 |
output1.save(image_path)
|
|
|
|
|
68 |
else:
|
69 |
print(f"Warning: Unexpected type {type(output1)} for output1.")
|
|
|
|
|
|
|
|
|
|
|
70 |
return output1
|
71 |
|
72 |
+
# Gradio interface layout and logic
|
73 |
+
with gr.Blocks() as myface:
|
74 |
+
# (Omitted for brevity, similar to your original code)
|
75 |
|
76 |
+
# Launch the Gradio interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
myface.queue(concurrency_count=200)
|
78 |
+
myface.launch(inline=True, show_api=False, max_threads=400)
|