juanelot commited on
Commit
1fe8eb4
verified
1 Parent(s): b46cce9
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -1,19 +1,18 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForTextToImageGeneration
3
- import torch
 
4
 
5
- # Cargar el tokenizador y el modelo
6
- tokenizer = AutoTokenizer.from_pretrained("ZB-Tech/Text-to-Image")
7
- model = AutoModelForTextToImageGeneration.from_pretrained("ZB-Tech/Text-to-Image")
8
 
9
- # Definir la funci贸n de generaci贸n de imagen
10
  def generate_image(prompt):
11
- inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
12
- with torch.no_grad():
13
- output = model.generate(**inputs)
14
- return output
 
15
 
16
- # Crear la interfaz de Gradio
17
  iface = gr.Interface(
18
  fn=generate_image,
19
  inputs=gr.Textbox(lines=5, label="Descripci贸n de la imagen", placeholder="Introduce el texto aqu铆..."),
@@ -36,5 +35,4 @@ iface = gr.Interface(
36
  """
37
  )
38
 
39
- # Ejecutar la interfaz
40
  iface.launch()
 
1
  import gradio as gr
2
+ import requests
3
+ import io
4
+ from PIL import Image
5
 
6
+ API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image"
7
+ headers = {"Authorization": "Bearer HF_API_KEY"}
 
8
 
 
9
  def generate_image(prompt):
10
+ payload = {"inputs": prompt}
11
+ response = requests.post(API_URL, headers=headers, json=payload)
12
+ image_bytes = response.content
13
+ image = Image.open(io.BytesIO(image_bytes))
14
+ return image
15
 
 
16
  iface = gr.Interface(
17
  fn=generate_image,
18
  inputs=gr.Textbox(lines=5, label="Descripci贸n de la imagen", placeholder="Introduce el texto aqu铆..."),
 
35
  """
36
  )
37
 
 
38
  iface.launch()