Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -241,11 +241,24 @@ def auto_enhance_image_api():
|
|
241 |
|
242 |
return jsonify({"error": "An unknown error occurred"}), 500
|
243 |
|
244 |
-
# --- AI IMAGE GENERATOR (TEXT-TO-IMAGE) API ENDPOINT ---
|
245 |
-
|
246 |
-
#
|
247 |
-
|
248 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
|
250 |
@app.route('/generate-image', methods=['POST'])
|
251 |
def generate_image_api():
|
@@ -264,7 +277,14 @@ def generate_image_api():
|
|
264 |
|
265 |
# AI Model se Image Generate Karna
|
266 |
try:
|
267 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
|
269 |
output_buffer = io.BytesIO()
|
270 |
image.save(output_buffer, format='PNG')
|
|
|
241 |
|
242 |
return jsonify({"error": "An unknown error occurred"}), 500
|
243 |
|
244 |
+
# --- AI IMAGE GENERATOR (TEXT-TO-IMAGE) API ENDPOINT (FINAL, GUARANTEED LAZY LOADING) ---
|
245 |
+
|
246 |
+
# Hum model ke liye ek global "box" (variable) banayenge, lekin usko start mein khali rakhenge.
|
247 |
+
pipe = None
|
248 |
+
|
249 |
+
def get_pipeline():
|
250 |
+
"""
|
251 |
+
Yeh ek helper function hai jo model ko sirf ek baar load karega.
|
252 |
+
"""
|
253 |
+
global pipe
|
254 |
+
if pipe is None:
|
255 |
+
print("Model not loaded. Loading Tiny Stable Diffusion model for the first time...")
|
256 |
+
# Note: Secrets (like HUGGING_FACE_HUB_CACHE) will be available here.
|
257 |
+
pipe = DiffusionPipeline.from_pretrained(
|
258 |
+
"hf-internal-testing/tiny-stable-diffusion-torch"
|
259 |
+
)
|
260 |
+
print("Model loaded successfully into memory.")
|
261 |
+
return pipe
|
262 |
|
263 |
@app.route('/generate-image', methods=['POST'])
|
264 |
def generate_image_api():
|
|
|
277 |
|
278 |
# AI Model se Image Generate Karna
|
279 |
try:
|
280 |
+
# Har request per, hum helper function ko call karke model haasil karenge.
|
281 |
+
# Agar model pehle se loaded hai, to woh foran mil jayega.
|
282 |
+
# Agar nahi, to woh ab load hoga.
|
283 |
+
pipeline = get_pipeline()
|
284 |
+
|
285 |
+
print(f"Generating image for prompt: {prompt}")
|
286 |
+
image = pipeline(prompt, num_inference_steps=10).images[0]
|
287 |
+
print("Image generated.")
|
288 |
|
289 |
output_buffer = io.BytesIO()
|
290 |
image.save(output_buffer, format='PNG')
|