hoh2000 commited on
Commit
9cea247
·
verified ·
1 Parent(s): 168f7aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -26
app.py CHANGED
@@ -1,27 +1,4 @@
1
- import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
- from PIL import Image
5
- import base64
6
- import io
7
 
8
- # Load model and tokenizer
9
- model = AutoModelForCausalLM.from_pretrained("cloudqi/cqi_text_to_image_pt_v0")
10
- tokenizer = AutoTokenizer.from_pretrained("cloudqi/cqi_text_to_image_pt_v0")
11
-
12
- def generate_image(prompt):
13
- inputs = tokenizer(prompt, return_tensors="pt")
14
- output_ids = model.generate(**inputs, max_length=256)
15
- output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True)
16
-
17
- # Decode base64 to image
18
- if "data:image/png;base64," in output_str:
19
- b64_img = output_str.split("data:image/png;base64,")[1]
20
- image_data = base64.b64decode(b64_img)
21
- image = Image.open(io.BytesIO(image_data))
22
- return image
23
- return "No image found in output."
24
-
25
- demo = gr.Interface(fn=generate_image, inputs="text", outputs="image", title="CQI Text-to-Image")
26
-
27
- demo.launch()
 
1
+ from huggingface_hub import snapshot_download
 
 
 
 
 
2
 
3
+ local_dir = snapshot_download(repo_id="cloudqi/cqi_text_to_image_pt_v0")
4
+ print(f"Model files downloaded to: {local_dir}")