File size: 2,702 Bytes
8ae0ff2
e09576c
57a9c16
8ae0ff2
f413eb4
f80fd5d
59d4efc
ab9256d
e09576c
cbc71ea
 
 
 
e09576c
 
cbc71ea
 
e09576c
 
ab9256d
 
 
 
628051d
 
 
57a9c16
e09576c
 
9f870a3
8ae0ff2
e09576c
 
 
 
57a9c16
e09576c
628051d
 
 
8ae0ff2
e09576c
f413eb4
 
 
b76872c
f413eb4
b76872c
 
 
 
 
f413eb4
e09576c
8ae0ff2
f413eb4
 
cbc71ea
 
 
 
 
e09576c
821ffa5
 
 
 
 
 
cbc71ea
 
e09576c
 
 
cbc71ea
e09576c
57a9c16
e09576c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import gradio as gr
import os
import sys
from pathlib import Path
from PIL import Image
import re
import numpy as np

# Create directories if they don't exist
if not os.path.exists('saved_prompts'):
    os.makedirs('saved_prompts')
if not os.path.exists('saved_images'):
    os.makedirs('saved_images')

# Function to generate a safe filename
def generate_safe_filename(text):
    return re.sub('[^a-zA-Z0-9]', '_', text)

# Function to load models from a text file
def load_models_from_file(filename):
    with open(filename, 'r') as f:
        return [line.strip() for line in f]

if __name__ == "__main__":
    models = load_models_from_file('models.txt')
    print(models)

current_model = models[0]
text_gen1 = gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
models2 = [gr.Interface.load(f"models/{model}", live=True, preprocess=False) for model in models]

# Function to trigger text generation
def text_it1(inputs, text_gen1=text_gen1):
    go_t1 = text_gen1(inputs)
    return (go_t1)

# Function to set the current model
def set_model(current_model):
    current_model = models[current_model]
    return gr.update(label=(f"{current_model}"))

# Function to list saved prompts and images
def list_saved_prompts_and_images():
    saved_prompts = os.listdir('saved_prompts')
    saved_images = os.listdir('saved_images')
    html_str = "<h2>Saved Prompts and Images:</h2><ul>"
    for prompt_file in saved_prompts:
        image_file = f"{prompt_file[:-4]}.png"
        if image_file in saved_images:
            html_str += f'<li>Prompt: {prompt_file[:-4]} | <a href="saved_images/{image_file}" download>Download Image</a></li>'
    html_str += "</ul>"
    return html_str

# Function to handle image generation and saving
def send_it1(inputs, model_choice):
    proc1 = models2[model_choice]
    output1 = proc1(inputs)
    safe_filename = generate_safe_filename(inputs[0])
    image_path = f"saved_images/{safe_filename}.png"
    prompt_path = f"saved_prompts/{safe_filename}.txt"
    with open(prompt_path, 'w') as f:
        f.write(inputs[0])
    # Saving the image based on its type
    if isinstance(output1, np.ndarray):  # If it's a numpy array
        Image.fromarray(np.uint8(output1)).save(image_path)
    elif isinstance(output1, Image.Image):  # If it's already a PIL Image
        output1.save(image_path)
    else:
        print(f"Warning: Unexpected type {type(output1)} for output1.")
    return output1

# Gradio interface layout and logic
with gr.Blocks() as myface:
    # (Omitted for brevity, similar to your original code)

# Launch the Gradio interface
myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=False, max_threads=400)