juanelot commited on
Commit
66e7106
verified
1 Parent(s): 4e2c5a6
Files changed (1) hide show
  1. app.py +35 -142
app.py CHANGED
@@ -1,146 +1,39 @@
1
  import gradio as gr
2
- import numpy as np
3
- import random
4
- from diffusers import DiffusionPipeline
5
  import torch
6
 
7
- device = "cuda" if torch.cuda.is_available() else "cpu"
8
-
9
- if torch.cuda.is_available():
10
- torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
- pipe.enable_xformers_memory_efficient_attention()
13
- pipe = pipe.to(device)
14
- else:
15
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
- pipe = pipe.to(device)
17
-
18
- MAX_SEED = np.iinfo(np.int32).max
19
- MAX_IMAGE_SIZE = 1024
20
-
21
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
22
-
23
- if randomize_seed:
24
- seed = random.randint(0, MAX_SEED)
25
-
26
- generator = torch.Generator().manual_seed(seed)
27
-
28
- image = pipe(
29
- prompt = prompt,
30
- negative_prompt = negative_prompt,
31
- guidance_scale = guidance_scale,
32
- num_inference_steps = num_inference_steps,
33
- width = width,
34
- height = height,
35
- generator = generator
36
- ).images[0]
37
-
38
- return image
39
-
40
- examples = [
41
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
42
- "An astronaut riding a green horse",
43
- "A delicious ceviche cheesecake slice",
44
- ]
45
 
46
- css="""
47
- #col-container {
48
- margin: 0 auto;
49
- max-width: 520px;
50
- }
51
- """
52
-
53
- if torch.cuda.is_available():
54
- power_device = "GPU"
55
- else:
56
- power_device = "CPU"
57
-
58
- with gr.Blocks(css=css) as demo:
59
-
60
- with gr.Column(elem_id="col-container"):
61
- gr.Markdown(f"""
62
- # Text-to-Image Gradio Template
63
- Currently running on {power_device}.
64
- """)
65
-
66
- with gr.Row():
67
-
68
- prompt = gr.Text(
69
- label="Prompt",
70
- show_label=False,
71
- max_lines=1,
72
- placeholder="Enter your prompt",
73
- container=False,
74
- )
75
-
76
- run_button = gr.Button("Run", scale=0)
77
-
78
- result = gr.Image(label="Result", show_label=False)
79
-
80
- with gr.Accordion("Advanced Settings", open=False):
81
-
82
- negative_prompt = gr.Text(
83
- label="Negative prompt",
84
- max_lines=1,
85
- placeholder="Enter a negative prompt",
86
- visible=False,
87
- )
88
-
89
- seed = gr.Slider(
90
- label="Seed",
91
- minimum=0,
92
- maximum=MAX_SEED,
93
- step=1,
94
- value=0,
95
- )
96
-
97
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
98
-
99
- with gr.Row():
100
-
101
- width = gr.Slider(
102
- label="Width",
103
- minimum=256,
104
- maximum=MAX_IMAGE_SIZE,
105
- step=32,
106
- value=512,
107
- )
108
-
109
- height = gr.Slider(
110
- label="Height",
111
- minimum=256,
112
- maximum=MAX_IMAGE_SIZE,
113
- step=32,
114
- value=512,
115
- )
116
-
117
- with gr.Row():
118
-
119
- guidance_scale = gr.Slider(
120
- label="Guidance scale",
121
- minimum=0.0,
122
- maximum=10.0,
123
- step=0.1,
124
- value=0.0,
125
- )
126
-
127
- num_inference_steps = gr.Slider(
128
- label="Number of inference steps",
129
- minimum=1,
130
- maximum=12,
131
- step=1,
132
- value=2,
133
- )
134
-
135
- gr.Examples(
136
- examples = examples,
137
- inputs = [prompt]
138
- )
139
-
140
- run_button.click(
141
- fn = infer,
142
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
143
- outputs = [result]
144
- )
145
-
146
- demo.queue().launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from torch import autocast
 
4
  import torch
5
 
6
+ # Cargar el modelo y el tokenizer
7
+ model_name = "ZB-Tech/Text-to-Image"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
+ model = model.to(device)
13
+
14
+ # Definir la funci贸n de generaci贸n de imagen
15
+ def generate_image(prompt):
16
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
17
+ with autocast(device):
18
+ output = model.generate(**inputs)
19
+ # Convertir el output en una imagen adecuada para mostrar
20
+ # (El c贸digo depende de la salida del modelo)
21
+ return tokenizer.decode(output[0], skip_special_tokens=True)
22
+
23
+ # Crear la interfaz de Gradio
24
+ iface = gr.Interface(
25
+ fn=generate_image,
26
+ inputs=gr.inputs.Textbox(lines=5, label="Descripci贸n de la imagen", placeholder="Introduce el texto aqu铆..."),
27
+ outputs=gr.outputs.Image(type="pil", label="Imagen generada"),
28
+ title="Generador de Im谩genes a partir de Texto",
29
+ description="Ingrese un texto y obtenga una imagen generada por IA.",
30
+ theme="default", # Puedes cambiar el tema
31
+ layout="vertical", # Puedes cambiar el dise帽o a 'horizontal' o 'vertical'
32
+ examples=[
33
+ ["a picturesque of local market. The first light of day illuminates the stone facades and worn tiles of the houses and buildings, some of which date back centuries. At the center of the scene, a cobblestone square leads to an open-air market that begins to come to life, with vendors setting up their stalls selling fruits, vegetables, flowers and local crafts. The narrow, winding streets are lined with old lanterns, now unlit, while lazy cats lounge on the stone steps. In one corner, an ancient fountain, adorned with weathered carvings, murmurs softly, adding to the tranquil atmosphere. In the background, the towers of an ancient cathedral rise, capturing the first rays of sunlight that paint the sky in soft pinks and oranges. This image should convey a sense of tranquility, beauty and a deep connection to the past, celebrating the rich history and timeless charm of the ancient village or town."]
34
+ ],
35
+ live=True # Si deseas que los resultados se actualicen en tiempo real
36
+ )
37
+
38
+ # Ejecutar la interfaz
39
+ iface.launch()