Spaces:
Runtime error
Runtime error
all static one folder
Browse files- app-controlnet.py +22 -8
- app-img2img.py +14 -5
- app-txt2img.py +10 -4
- img2img/tailwind.config.js +0 -0
- controlnet/index.html → static/controlnet.html +0 -0
- img2img/index.html → static/img2img.html +0 -0
- {controlnet → static}/tailwind.config.js +0 -0
- txt2img/index.html → static/txt2img.html +0 -0
- txt2img/tailwind.config.js +0 -0
app-controlnet.py
CHANGED
|
@@ -6,15 +6,20 @@ from pydantic import BaseModel
|
|
| 6 |
|
| 7 |
from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
|
| 8 |
from fastapi.middleware.cors import CORSMiddleware
|
| 9 |
-
from fastapi.responses import
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
from diffusers import AutoencoderTiny, ControlNetModel
|
| 13 |
from latent_consistency_controlnet import LatentConsistencyModelPipeline_controlnet
|
| 14 |
from compel import Compel
|
| 15 |
import torch
|
| 16 |
|
| 17 |
-
from canny_gpu import SobelOperator
|
|
|
|
| 18 |
# from controlnet_aux import OpenposeDetector
|
| 19 |
# import cv2
|
| 20 |
|
|
@@ -35,7 +40,7 @@ import psutil
|
|
| 35 |
MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
| 36 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
| 37 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
| 38 |
-
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
| 39 |
WIDTH = 512
|
| 40 |
HEIGHT = 512
|
| 41 |
# disable tiny autoencoder for better quality speed tradeoff
|
|
@@ -110,7 +115,11 @@ if TORCH_COMPILE:
|
|
| 110 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 111 |
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
| 112 |
|
| 113 |
-
pipe(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
|
| 116 |
user_queue_map = {}
|
|
@@ -132,12 +141,15 @@ class InputParams(BaseModel):
|
|
| 132 |
canny_high_threshold: float = 0.78
|
| 133 |
debug_canny: bool = False
|
| 134 |
|
|
|
|
| 135 |
def predict(
|
| 136 |
input_image: Image.Image, params: InputParams, prompt_embeds: torch.Tensor = None
|
| 137 |
):
|
| 138 |
generator = torch.manual_seed(params.seed)
|
| 139 |
-
|
| 140 |
-
control_image = canny_torch(
|
|
|
|
|
|
|
| 141 |
results = pipe(
|
| 142 |
control_image=control_image,
|
| 143 |
prompt_embeds=prompt_embeds,
|
|
@@ -305,4 +317,6 @@ async def handle_websocket_data(websocket: WebSocket, user_id: uuid.UUID):
|
|
| 305 |
traceback.print_exc()
|
| 306 |
|
| 307 |
|
| 308 |
-
app.
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
|
| 8 |
from fastapi.middleware.cors import CORSMiddleware
|
| 9 |
+
from fastapi.responses import (
|
| 10 |
+
StreamingResponse,
|
| 11 |
+
JSONResponse,
|
| 12 |
+
HTMLResponse,
|
| 13 |
+
FileResponse,
|
| 14 |
+
)
|
| 15 |
|
| 16 |
from diffusers import AutoencoderTiny, ControlNetModel
|
| 17 |
from latent_consistency_controlnet import LatentConsistencyModelPipeline_controlnet
|
| 18 |
from compel import Compel
|
| 19 |
import torch
|
| 20 |
|
| 21 |
+
from canny_gpu import SobelOperator
|
| 22 |
+
|
| 23 |
# from controlnet_aux import OpenposeDetector
|
| 24 |
# import cv2
|
| 25 |
|
|
|
|
| 40 |
MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
| 41 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
| 42 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
| 43 |
+
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
| 44 |
WIDTH = 512
|
| 45 |
HEIGHT = 512
|
| 46 |
# disable tiny autoencoder for better quality speed tradeoff
|
|
|
|
| 115 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 116 |
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
| 117 |
|
| 118 |
+
pipe(
|
| 119 |
+
prompt="warmup",
|
| 120 |
+
image=[Image.new("RGB", (768, 768))],
|
| 121 |
+
control_image=[Image.new("RGB", (768, 768))],
|
| 122 |
+
)
|
| 123 |
|
| 124 |
|
| 125 |
user_queue_map = {}
|
|
|
|
| 141 |
canny_high_threshold: float = 0.78
|
| 142 |
debug_canny: bool = False
|
| 143 |
|
| 144 |
+
|
| 145 |
def predict(
|
| 146 |
input_image: Image.Image, params: InputParams, prompt_embeds: torch.Tensor = None
|
| 147 |
):
|
| 148 |
generator = torch.manual_seed(params.seed)
|
| 149 |
+
|
| 150 |
+
control_image = canny_torch(
|
| 151 |
+
input_image, params.canny_low_threshold, params.canny_high_threshold
|
| 152 |
+
)
|
| 153 |
results = pipe(
|
| 154 |
control_image=control_image,
|
| 155 |
prompt_embeds=prompt_embeds,
|
|
|
|
| 317 |
traceback.print_exc()
|
| 318 |
|
| 319 |
|
| 320 |
+
@app.get("/", response_class=HTMLResponse)
|
| 321 |
+
async def root():
|
| 322 |
+
return FileResponse("./static/controlnet.html")
|
app-img2img.py
CHANGED
|
@@ -6,8 +6,12 @@ from pydantic import BaseModel
|
|
| 6 |
|
| 7 |
from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
|
| 8 |
from fastapi.middleware.cors import CORSMiddleware
|
| 9 |
-
from fastapi.responses import
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
from diffusers import AutoPipelineForImage2Image, AutoencoderTiny
|
| 13 |
from compel import Compel
|
|
@@ -29,7 +33,7 @@ import psutil
|
|
| 29 |
MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
| 30 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
| 31 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
| 32 |
-
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
| 33 |
|
| 34 |
WIDTH = 512
|
| 35 |
HEIGHT = 512
|
|
@@ -102,7 +106,10 @@ class InputParams(BaseModel):
|
|
| 102 |
width: int = WIDTH
|
| 103 |
height: int = HEIGHT
|
| 104 |
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
| 106 |
generator = torch.manual_seed(params.seed)
|
| 107 |
results = pipe(
|
| 108 |
prompt_embeds=prompt_embeds,
|
|
@@ -259,4 +266,6 @@ async def handle_websocket_data(websocket: WebSocket, user_id: uuid.UUID):
|
|
| 259 |
traceback.print_exc()
|
| 260 |
|
| 261 |
|
| 262 |
-
app.
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
|
| 8 |
from fastapi.middleware.cors import CORSMiddleware
|
| 9 |
+
from fastapi.responses import (
|
| 10 |
+
StreamingResponse,
|
| 11 |
+
JSONResponse,
|
| 12 |
+
HTMLResponse,
|
| 13 |
+
FileResponse,
|
| 14 |
+
)
|
| 15 |
|
| 16 |
from diffusers import AutoPipelineForImage2Image, AutoencoderTiny
|
| 17 |
from compel import Compel
|
|
|
|
| 33 |
MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
| 34 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
| 35 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
| 36 |
+
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
| 37 |
|
| 38 |
WIDTH = 512
|
| 39 |
HEIGHT = 512
|
|
|
|
| 106 |
width: int = WIDTH
|
| 107 |
height: int = HEIGHT
|
| 108 |
|
| 109 |
+
|
| 110 |
+
def predict(
|
| 111 |
+
input_image: Image.Image, params: InputParams, prompt_embeds: torch.Tensor = None
|
| 112 |
+
):
|
| 113 |
generator = torch.manual_seed(params.seed)
|
| 114 |
results = pipe(
|
| 115 |
prompt_embeds=prompt_embeds,
|
|
|
|
| 266 |
traceback.print_exc()
|
| 267 |
|
| 268 |
|
| 269 |
+
@app.get("/", response_class=HTMLResponse)
|
| 270 |
+
async def root():
|
| 271 |
+
return FileResponse("./static/img2img.html")
|
app-txt2img.py
CHANGED
|
@@ -6,8 +6,12 @@ from pydantic import BaseModel
|
|
| 6 |
|
| 7 |
from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
|
| 8 |
from fastapi.middleware.cors import CORSMiddleware
|
| 9 |
-
from fastapi.responses import
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
from diffusers import DiffusionPipeline, AutoencoderTiny
|
| 13 |
from compel import Compel
|
|
@@ -30,7 +34,7 @@ import psutil
|
|
| 30 |
MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
| 31 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
| 32 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
| 33 |
-
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
| 34 |
|
| 35 |
WIDTH = 768
|
| 36 |
HEIGHT = 768
|
|
@@ -246,4 +250,6 @@ async def handle_websocket_data(websocket: WebSocket, user_id: uuid.UUID):
|
|
| 246 |
traceback.print_exc()
|
| 247 |
|
| 248 |
|
| 249 |
-
app.
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
|
| 8 |
from fastapi.middleware.cors import CORSMiddleware
|
| 9 |
+
from fastapi.responses import (
|
| 10 |
+
StreamingResponse,
|
| 11 |
+
JSONResponse,
|
| 12 |
+
HTMLResponse,
|
| 13 |
+
FileResponse,
|
| 14 |
+
)
|
| 15 |
|
| 16 |
from diffusers import DiffusionPipeline, AutoencoderTiny
|
| 17 |
from compel import Compel
|
|
|
|
| 34 |
MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
| 35 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
| 36 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
| 37 |
+
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
| 38 |
|
| 39 |
WIDTH = 768
|
| 40 |
HEIGHT = 768
|
|
|
|
| 250 |
traceback.print_exc()
|
| 251 |
|
| 252 |
|
| 253 |
+
@app.get("/", response_class=HTMLResponse)
|
| 254 |
+
async def root():
|
| 255 |
+
return FileResponse("./static/txt2img.html")
|
img2img/tailwind.config.js
DELETED
|
File without changes
|
controlnet/index.html → static/controlnet.html
RENAMED
|
File without changes
|
img2img/index.html → static/img2img.html
RENAMED
|
File without changes
|
{controlnet → static}/tailwind.config.js
RENAMED
|
File without changes
|
txt2img/index.html → static/txt2img.html
RENAMED
|
File without changes
|
txt2img/tailwind.config.js
DELETED
|
File without changes
|