awacke1 commited on
Commit
db43f58
·
1 Parent(s): eabaf86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -22
app.py CHANGED
@@ -1,22 +1,23 @@
1
  import gradio as gr
2
- import os
3
  import sys
4
  from pathlib import Path
5
  from PIL import Image
6
  import re
 
7
  import numpy as np
8
 
9
- # Create directories if they don't exist
10
  if not os.path.exists('saved_prompts'):
11
  os.makedirs('saved_prompts')
 
12
  if not os.path.exists('saved_images'):
13
  os.makedirs('saved_images')
14
-
15
- # Function to generate a safe filename
16
  def generate_safe_filename(text):
17
  return re.sub('[^a-zA-Z0-9]', '_', text)
18
-
19
- # Function to load models from a text file
20
  def load_models_from_file(filename):
21
  with open(filename, 'r') as f:
22
  return [line.strip() for line in f]
@@ -24,53 +25,71 @@ def load_models_from_file(filename):
24
  if __name__ == "__main__":
25
  models = load_models_from_file('models.txt')
26
  print(models)
27
-
 
28
  current_model = models[0]
29
- text_gen1 = gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
 
30
  models2 = [gr.Interface.load(f"models/{model}", live=True, preprocess=False) for model in models]
31
 
32
- # Function to trigger text generation
33
- def text_it1(inputs, text_gen1=text_gen1):
34
- go_t1 = text_gen1(inputs)
35
- return (go_t1)
36
 
37
- # Function to set the current model
 
 
 
 
38
  def set_model(current_model):
39
  current_model = models[current_model]
40
  return gr.update(label=(f"{current_model}"))
41
 
42
- # Function to list saved prompts and images
43
  def list_saved_prompts_and_images():
44
  saved_prompts = os.listdir('saved_prompts')
45
  saved_images = os.listdir('saved_images')
 
46
  html_str = "<h2>Saved Prompts and Images:</h2><ul>"
47
  for prompt_file in saved_prompts:
48
  image_file = f"{prompt_file[:-4]}.png"
49
  if image_file in saved_images:
50
  html_str += f'<li>Prompt: {prompt_file[:-4]} | <a href="saved_images/{image_file}" download>Download Image</a></li>'
51
  html_str += "</ul>"
 
52
  return html_str
53
 
54
- # Function to handle image generation and saving
55
  def send_it1(inputs, model_choice):
56
  proc1 = models2[model_choice]
57
  output1 = proc1(inputs)
 
58
  safe_filename = generate_safe_filename(inputs[0])
59
  image_path = f"saved_images/{safe_filename}.png"
60
  prompt_path = f"saved_prompts/{safe_filename}.txt"
 
61
  with open(prompt_path, 'w') as f:
62
  f.write(inputs[0])
63
- # Saving the image based on its type
 
64
  if isinstance(output1, np.ndarray): # If it's a numpy array
65
  Image.fromarray(np.uint8(output1)).save(image_path)
66
  elif isinstance(output1, Image.Image): # If it's already a PIL Image
67
  output1.save(image_path)
 
 
68
  else:
69
  print(f"Warning: Unexpected type {type(output1)} for output1.")
 
 
 
 
 
70
  return output1
71
 
72
- # Gradio interface layout and logic
73
- with gr.Blocks() as myface:
 
 
 
 
74
  gr.HTML("""<!DOCTYPE html>
75
  <html lang="en">
76
  <head>
@@ -166,8 +185,5 @@ with gr.Blocks() as myface:
166
  model_name1.change(set_model, inputs=model_name1, outputs=[output1])
167
  run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
168
 
169
-
170
-
171
- # Launch the Gradio interface
172
  myface.queue(concurrency_count=200)
173
- myface.launch(inline=True, show_api=False, max_threads=400)
 
1
  import gradio as gr
2
+ import os
3
  import sys
4
  from pathlib import Path
5
  from PIL import Image
6
  import re
7
+ from PIL import Image
8
  import numpy as np
9
 
10
+ # Coder: Create directories if they don't exist
11
  if not os.path.exists('saved_prompts'):
12
  os.makedirs('saved_prompts')
13
+
14
  if not os.path.exists('saved_images'):
15
  os.makedirs('saved_images')
16
+
17
+ # Humanities: Elegant function to generate a safe filename 📝
18
  def generate_safe_filename(text):
19
  return re.sub('[^a-zA-Z0-9]', '_', text)
20
+
 
21
  def load_models_from_file(filename):
22
  with open(filename, 'r') as f:
23
  return [line.strip() for line in f]
 
25
  if __name__ == "__main__":
26
  models = load_models_from_file('models.txt')
27
  print(models)
28
+ #removed to removed.txt
29
+
30
  current_model = models[0]
31
+
32
+ text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
33
  models2 = [gr.Interface.load(f"models/{model}", live=True, preprocess=False) for model in models]
34
 
 
 
 
 
35
 
36
+
37
+ def text_it1(inputs,text_gen1=text_gen1):
38
+ go_t1=text_gen1(inputs)
39
+ return(go_t1)
40
+
41
  def set_model(current_model):
42
  current_model = models[current_model]
43
  return gr.update(label=(f"{current_model}"))
44
 
45
+ # Analysis: Function to list saved prompts and images 📊
46
  def list_saved_prompts_and_images():
47
  saved_prompts = os.listdir('saved_prompts')
48
  saved_images = os.listdir('saved_images')
49
+
50
  html_str = "<h2>Saved Prompts and Images:</h2><ul>"
51
  for prompt_file in saved_prompts:
52
  image_file = f"{prompt_file[:-4]}.png"
53
  if image_file in saved_images:
54
  html_str += f'<li>Prompt: {prompt_file[:-4]} | <a href="saved_images/{image_file}" download>Download Image</a></li>'
55
  html_str += "</ul>"
56
+
57
  return html_str
58
 
59
+ # Coder: Modified function to save the prompt and image 🖼️
60
  def send_it1(inputs, model_choice):
61
  proc1 = models2[model_choice]
62
  output1 = proc1(inputs)
63
+
64
  safe_filename = generate_safe_filename(inputs[0])
65
  image_path = f"saved_images/{safe_filename}.png"
66
  prompt_path = f"saved_prompts/{safe_filename}.txt"
67
+
68
  with open(prompt_path, 'w') as f:
69
  f.write(inputs[0])
70
+
71
+ # Check the type of output1 before saving
72
  if isinstance(output1, np.ndarray): # If it's a numpy array
73
  Image.fromarray(np.uint8(output1)).save(image_path)
74
  elif isinstance(output1, Image.Image): # If it's already a PIL Image
75
  output1.save(image_path)
76
+ elif isinstance(output1, str): # If it's a string (this should not happen in ideal conditions)
77
+ print(f"Warning: output1 is a string. Cannot save as image. Value: {output1}")
78
  else:
79
  print(f"Warning: Unexpected type {type(output1)} for output1.")
80
+
81
+ #Image.fromarray(output1).save(image_path)
82
+
83
+ saved_output.update(list_saved_prompts_and_images())
84
+
85
  return output1
86
 
87
+
88
+
89
+ css=""""""
90
+
91
+
92
+ with gr.Blocks(css=css) as myface:
93
  gr.HTML("""<!DOCTYPE html>
94
  <html lang="en">
95
  <head>
 
185
  model_name1.change(set_model, inputs=model_name1, outputs=[output1])
186
  run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
187
 
 
 
 
188
  myface.queue(concurrency_count=200)
189
+ myface.launch(inline=True, show_api=False, max_threads=400)