Spaces:
Paused
Paused
fisrt
Browse files- .gitignore +2 -0
- Dockerfile +55 -0
- README.md +2 -3
- invokeai.yaml +40 -0
- requirements.txt +2 -0
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.venv
|
| 2 |
+
venv
|
Dockerfile
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM nvidia/cuda:12.0.0-cudnn8-devel-ubuntu22.04
|
| 2 |
+
|
| 3 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
| 4 |
+
TZ=America/Los_Angeles
|
| 5 |
+
|
| 6 |
+
ARG USE_PERSISTENT_DATA
|
| 7 |
+
|
| 8 |
+
RUN apt-get update && apt-get install -y \
|
| 9 |
+
git \
|
| 10 |
+
make build-essential libssl-dev zlib1g-dev \
|
| 11 |
+
libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \
|
| 12 |
+
libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev git-lfs \
|
| 13 |
+
ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx \
|
| 14 |
+
&& rm -rf /var/lib/apt/lists/* \
|
| 15 |
+
&& git lfs install
|
| 16 |
+
|
| 17 |
+
WORKDIR /code
|
| 18 |
+
|
| 19 |
+
COPY ./requirements.txt /code/requirements.txt
|
| 20 |
+
|
| 21 |
+
# User
|
| 22 |
+
RUN useradd -m -u 1000 user
|
| 23 |
+
USER user
|
| 24 |
+
ENV HOME=/home/user \
|
| 25 |
+
PATH=/home/user/.local/bin:$PATH
|
| 26 |
+
|
| 27 |
+
# Pyenv
|
| 28 |
+
RUN curl https://pyenv.run | bash
|
| 29 |
+
ENV PATH=$HOME/.pyenv/shims:$HOME/.pyenv/bin:$PATH
|
| 30 |
+
|
| 31 |
+
ARG PYTHON_VERSION=3.9.17
|
| 32 |
+
# Python
|
| 33 |
+
RUN pyenv install $PYTHON_VERSION && \
|
| 34 |
+
pyenv global $PYTHON_VERSION && \
|
| 35 |
+
pyenv rehash && \
|
| 36 |
+
pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
| 37 |
+
pip install --no-cache-dir \
|
| 38 |
+
datasets \
|
| 39 |
+
huggingface-hub "protobuf<4" "click<8.1"
|
| 40 |
+
|
| 41 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
| 42 |
+
|
| 43 |
+
# Set the working directory to /data if USE_PERSISTENT_DATA is set, otherwise set to $HOME/app
|
| 44 |
+
WORKDIR $HOME/app
|
| 45 |
+
EXPOSE 9090
|
| 46 |
+
|
| 47 |
+
COPY --chown=user:user ./invokeai.yaml $HOME/app/invokeai.yaml
|
| 48 |
+
|
| 49 |
+
RUN ["invokeai-configure", "--yes", "--default_only", "--root", "."]
|
| 50 |
+
|
| 51 |
+
# Install models according to https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/050_INSTALLING_MODELS.md
|
| 52 |
+
# RUN ["invokeai-model-install", "--add","https://civitai.com/api/download/models/128713"]
|
| 53 |
+
|
| 54 |
+
# Run IDE
|
| 55 |
+
CMD ["invokeai-web", "--host", "0.0.0.0"]
|
README.md
CHANGED
|
@@ -4,7 +4,6 @@ emoji: ⚡
|
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: docker
|
|
|
|
| 7 |
pinned: false
|
| 8 |
-
---
|
| 9 |
-
|
| 10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: docker
|
| 7 |
+
app_port: 9090
|
| 8 |
pinned: false
|
| 9 |
+
---
|
|
|
|
|
|
invokeai.yaml
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
InvokeAI:
|
| 2 |
+
Web Server:
|
| 3 |
+
host: 127.0.0.1
|
| 4 |
+
port: 9090
|
| 5 |
+
allow_origins: []
|
| 6 |
+
allow_credentials: true
|
| 7 |
+
allow_methods:
|
| 8 |
+
- '*'
|
| 9 |
+
allow_headers:
|
| 10 |
+
- '*'
|
| 11 |
+
Features:
|
| 12 |
+
esrgan: true
|
| 13 |
+
internet_available: true
|
| 14 |
+
log_tokenization: false
|
| 15 |
+
patchmatch: true
|
| 16 |
+
Memory/Performance:
|
| 17 |
+
always_use_cpu: false
|
| 18 |
+
free_gpu_mem: false
|
| 19 |
+
max_cache_size: 6.0
|
| 20 |
+
max_vram_cache_size: 2.75
|
| 21 |
+
precision: auto
|
| 22 |
+
sequential_guidance: false
|
| 23 |
+
xformers_enabled: true
|
| 24 |
+
tiled_decode: false
|
| 25 |
+
Paths:
|
| 26 |
+
autoimport_dir: autoimport
|
| 27 |
+
lora_dir: null
|
| 28 |
+
embedding_dir: null
|
| 29 |
+
controlnet_dir: null
|
| 30 |
+
conf_path: configs/models.yaml
|
| 31 |
+
models_dir: models
|
| 32 |
+
legacy_conf_dir: configs/stable-diffusion
|
| 33 |
+
db_dir: databases
|
| 34 |
+
outdir: outputs # set to /data/outputs if you have persistent storage
|
| 35 |
+
use_memory_db: true
|
| 36 |
+
Logging:
|
| 37 |
+
log_handlers:
|
| 38 |
+
- console
|
| 39 |
+
log_format: color
|
| 40 |
+
log_level: debug
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
--extra-index-url https://download.pytorch.org/whl/cu117
|
| 2 |
+
git+https://github.com/invoke-ai/InvokeAI@d09dfc3e9bdbd9a8522559fb8bd0bc70d205fd94#egg=InvokeAI[xformers]
|