Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .gitattributes +3 -35
- .github/workflows/update_space.yml +28 -0
- .gitignore +175 -0
- .gradio/certificate.pem +31 -0
- .python-version +1 -0
- .vscode/settings.json +3 -0
- ObjToLego.ipynb +1006 -0
- README.md +5 -9
- app.py +366 -0
- constants.py +43 -0
- doll.obj +0 -0
- lego_quantize.py +103 -0
- mesh.obj +0 -0
- pyproject.toml +22 -0
- requirements.txt +8 -0
- scene.obj +0 -0
- uv.lock +0 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,3 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
# GitHub syntax highlighting
|
| 2 |
+
pixi.lock linguist-language=YAML
|
| 3 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.github/workflows/update_space.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Run Python script
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches:
|
| 6 |
+
- main
|
| 7 |
+
|
| 8 |
+
jobs:
|
| 9 |
+
build:
|
| 10 |
+
runs-on: ubuntu-latest
|
| 11 |
+
|
| 12 |
+
steps:
|
| 13 |
+
- name: Checkout
|
| 14 |
+
uses: actions/checkout@v2
|
| 15 |
+
|
| 16 |
+
- name: Set up Python
|
| 17 |
+
uses: actions/setup-python@v2
|
| 18 |
+
with:
|
| 19 |
+
python-version: '3.9'
|
| 20 |
+
|
| 21 |
+
- name: Install Gradio
|
| 22 |
+
run: python -m pip install gradio
|
| 23 |
+
|
| 24 |
+
- name: Log in to Hugging Face
|
| 25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
| 26 |
+
|
| 27 |
+
- name: Deploy to Spaces
|
| 28 |
+
run: gradio deploy
|
.gitignore
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
|
| 110 |
+
# pdm
|
| 111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 112 |
+
#pdm.lock
|
| 113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 114 |
+
# in version control.
|
| 115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 116 |
+
.pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 121 |
+
__pypackages__/
|
| 122 |
+
|
| 123 |
+
# Celery stuff
|
| 124 |
+
celerybeat-schedule
|
| 125 |
+
celerybeat.pid
|
| 126 |
+
|
| 127 |
+
# SageMath parsed files
|
| 128 |
+
*.sage.py
|
| 129 |
+
|
| 130 |
+
# Environments
|
| 131 |
+
.env
|
| 132 |
+
.venv
|
| 133 |
+
env/
|
| 134 |
+
venv/
|
| 135 |
+
ENV/
|
| 136 |
+
env.bak/
|
| 137 |
+
venv.bak/
|
| 138 |
+
|
| 139 |
+
# Spyder project settings
|
| 140 |
+
.spyderproject
|
| 141 |
+
.spyproject
|
| 142 |
+
|
| 143 |
+
# Rope project settings
|
| 144 |
+
.ropeproject
|
| 145 |
+
|
| 146 |
+
# mkdocs documentation
|
| 147 |
+
/site
|
| 148 |
+
|
| 149 |
+
# mypy
|
| 150 |
+
.mypy_cache/
|
| 151 |
+
.dmypy.json
|
| 152 |
+
dmypy.json
|
| 153 |
+
|
| 154 |
+
# Pyre type checker
|
| 155 |
+
.pyre/
|
| 156 |
+
|
| 157 |
+
# pytype static type analyzer
|
| 158 |
+
.pytype/
|
| 159 |
+
|
| 160 |
+
# Cython debug symbols
|
| 161 |
+
cython_debug/
|
| 162 |
+
|
| 163 |
+
# PyCharm
|
| 164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 168 |
+
#.idea/
|
| 169 |
+
|
| 170 |
+
# PyPI configuration file
|
| 171 |
+
.pypirc
|
| 172 |
+
# pixi environments
|
| 173 |
+
.pixi
|
| 174 |
+
*.egg-info
|
| 175 |
+
|
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.11
|
.vscode/settings.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"python.defaultInterpreterPath": "/Users/londogard/git/img2lego/.pixi/envs/default/bin/python"
|
| 3 |
+
}
|
ObjToLego.ipynb
ADDED
|
@@ -0,0 +1,1006 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {
|
| 6 |
+
"id": "_vSR56u0u74i"
|
| 7 |
+
},
|
| 8 |
+
"source": [
|
| 9 |
+
"# 3D File to Lego\n",
|
| 10 |
+
"\n",
|
| 11 |
+
"First create 3D file using something like InstantMesh. Then we turn the 3D file into Lego build."
|
| 12 |
+
]
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"cell_type": "markdown",
|
| 16 |
+
"metadata": {
|
| 17 |
+
"id": "M-2cYolwvEcp"
|
| 18 |
+
},
|
| 19 |
+
"source": [
|
| 20 |
+
"## Install Dependencies"
|
| 21 |
+
]
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"cell_type": "code",
|
| 25 |
+
"execution_count": null,
|
| 26 |
+
"metadata": {
|
| 27 |
+
"collapsed": true,
|
| 28 |
+
"id": "cuPmdwpq0txu"
|
| 29 |
+
},
|
| 30 |
+
"outputs": [],
|
| 31 |
+
"source": [
|
| 32 |
+
"%%capture\n",
|
| 33 |
+
"!pip install trimesh rtree PyQt5 colormath # xformers==0.0.22.post7 rembg"
|
| 34 |
+
]
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"cell_type": "markdown",
|
| 38 |
+
"metadata": {
|
| 39 |
+
"id": "IuNdIvDsvIgz"
|
| 40 |
+
},
|
| 41 |
+
"source": [
|
| 42 |
+
"## Load a 3D object\n",
|
| 43 |
+
"\n",
|
| 44 |
+
"And display our mesh"
|
| 45 |
+
]
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"cell_type": "code",
|
| 49 |
+
"execution_count": null,
|
| 50 |
+
"metadata": {
|
| 51 |
+
"colab": {
|
| 52 |
+
"base_uri": "https://localhost:8080/",
|
| 53 |
+
"height": 521
|
| 54 |
+
},
|
| 55 |
+
"id": "kyIn7pDq2h30",
|
| 56 |
+
"outputId": "98ab2ba7-eda8-471c-ee93-6eaba7b053a8"
|
| 57 |
+
},
|
| 58 |
+
"outputs": [],
|
| 59 |
+
"source": [
|
| 60 |
+
"import trimesh\n",
|
| 61 |
+
"import numpy as np\n",
|
| 62 |
+
"\n",
|
| 63 |
+
"mesh = trimesh.load('doll.obj')\n",
|
| 64 |
+
"mesh.show()"
|
| 65 |
+
]
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"cell_type": "markdown",
|
| 69 |
+
"metadata": {
|
| 70 |
+
"id": "HE58Fe0xvOkD"
|
| 71 |
+
},
|
| 72 |
+
"source": [
|
| 73 |
+
"## Voxelize\n",
|
| 74 |
+
"\n",
|
| 75 |
+
"We can then voxelize using a custom resolution."
|
| 76 |
+
]
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"cell_type": "code",
|
| 80 |
+
"execution_count": 3,
|
| 81 |
+
"metadata": {
|
| 82 |
+
"id": "i5el0q3H2xLr"
|
| 83 |
+
},
|
| 84 |
+
"outputs": [],
|
| 85 |
+
"source": [
|
| 86 |
+
"def voxelize(mesh, resolution: int = 64):\n",
|
| 87 |
+
" bounds = mesh.bounds\n",
|
| 88 |
+
" voxel_size = (bounds[1] - bounds[0]).max() / 64 # pitch\n",
|
| 89 |
+
"\n",
|
| 90 |
+
" return mesh.voxelized(pitch=voxel_size)\n",
|
| 91 |
+
"\n",
|
| 92 |
+
"def display_scene(mesh, voxels):\n",
|
| 93 |
+
" voxels_mesh = voxels.as_boxes().apply_translation((1.5,0,0))\n",
|
| 94 |
+
" scene = trimesh.Scene([mesh, voxels_mesh])\n",
|
| 95 |
+
"\n",
|
| 96 |
+
" return scene.show()"
|
| 97 |
+
]
|
| 98 |
+
},
|
| 99 |
+
{
|
| 100 |
+
"cell_type": "code",
|
| 101 |
+
"execution_count": null,
|
| 102 |
+
"metadata": {
|
| 103 |
+
"colab": {
|
| 104 |
+
"base_uri": "https://localhost:8080/",
|
| 105 |
+
"height": 521
|
| 106 |
+
},
|
| 107 |
+
"id": "hYzOb6Oq4z48",
|
| 108 |
+
"outputId": "3a84d86a-7184-44a3-a993-be3851e02e5c"
|
| 109 |
+
},
|
| 110 |
+
"outputs": [],
|
| 111 |
+
"source": [
|
| 112 |
+
"voxels = voxelize(mesh)\n",
|
| 113 |
+
"\n",
|
| 114 |
+
"display_scene(mesh, voxels)"
|
| 115 |
+
]
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"cell_type": "markdown",
|
| 119 |
+
"metadata": {
|
| 120 |
+
"id": "5UOYrWRpvdaS"
|
| 121 |
+
},
|
| 122 |
+
"source": [
|
| 123 |
+
"## Voxelize with Color\n",
|
| 124 |
+
"\n",
|
| 125 |
+
"We need to colorize voxels by fetching the N nearest colors and taking the mean."
|
| 126 |
+
]
|
| 127 |
+
},
|
| 128 |
+
{
|
| 129 |
+
"cell_type": "code",
|
| 130 |
+
"execution_count": 127,
|
| 131 |
+
"metadata": {
|
| 132 |
+
"id": "eOW-K4PVDOlv"
|
| 133 |
+
},
|
| 134 |
+
"outputs": [],
|
| 135 |
+
"source": [
|
| 136 |
+
"import plotly.graph_objects as go\n",
|
| 137 |
+
"import plotly.express as px\n",
|
| 138 |
+
"import polars as pl\n",
|
| 139 |
+
"\n",
|
| 140 |
+
"def display_voxels_px(voxels, colors):\n",
|
| 141 |
+
" # Convert occupied_voxel_indices to a Polars DataFrame (if not already done)\n",
|
| 142 |
+
" df = pl.from_numpy(voxels.sparse_indices, schema=[\"x\", \"y\", \"z\"])\n",
|
| 143 |
+
" df = df.with_columns(color=pl.Series(colors))\n",
|
| 144 |
+
" px.scatter_3d(df, x=\"x\", y=\"y\", z=\"z\", color=\"color\",\n",
|
| 145 |
+
" color_discrete_map=\"identity\", symbol=[\"square\"]*len(df), symbol_map=\"identity\"\n",
|
| 146 |
+
").show()"
|
| 147 |
+
]
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"cell_type": "code",
|
| 151 |
+
"execution_count": 128,
|
| 152 |
+
"metadata": {
|
| 153 |
+
"id": "Iyz1dRYbHPg7"
|
| 154 |
+
},
|
| 155 |
+
"outputs": [],
|
| 156 |
+
"source": [
|
| 157 |
+
"from scipy.spatial import cKDTree\n",
|
| 158 |
+
"import numpy as np\n",
|
| 159 |
+
"\n",
|
| 160 |
+
"def tree_knearest_colors(k: int, mesh, voxels):\n",
|
| 161 |
+
" tree = cKDTree(mesh.vertices)\n",
|
| 162 |
+
" distances, vertex_indices = tree.query(voxels.points, k=k)\n",
|
| 163 |
+
" voxel_colors = []\n",
|
| 164 |
+
"\n",
|
| 165 |
+
" for nearest_indices in vertex_indices:\n",
|
| 166 |
+
" neighbor_colors = mesh.visual.vertex_colors[nearest_indices]\n",
|
| 167 |
+
" average_color = np.mean(neighbor_colors, axis=0).astype(np.uint8)\n",
|
| 168 |
+
" voxel_colors.append(average_color)\n",
|
| 169 |
+
"\n",
|
| 170 |
+
" return voxel_colors\n",
|
| 171 |
+
"\n",
|
| 172 |
+
"def tree_knearest_color_mesh(k: int, mesh, voxels):\n",
|
| 173 |
+
" tree = cKDTree(mesh.vertices)\n",
|
| 174 |
+
" distances, vertex_indices = tree.query(voxels.points, k=k)\n",
|
| 175 |
+
" voxel_colors = []\n",
|
| 176 |
+
"\n",
|
| 177 |
+
" for nearest_indices in vertex_indices:\n",
|
| 178 |
+
" neighbor_colors = mesh.visual.vertex_colors[nearest_indices]\n",
|
| 179 |
+
" average_color = np.mean(neighbor_colors, axis=0).astype(np.uint8)\n",
|
| 180 |
+
" voxel_colors.append(average_color)\n",
|
| 181 |
+
"\n",
|
| 182 |
+
" # 2. Create a (X, Y, Z, 4) color matrix\n",
|
| 183 |
+
" color_matrix = np.zeros(voxels.shape + (4,), dtype=np.uint8) # Initialize with default color (e.g., transparent black)\n",
|
| 184 |
+
" color_matrix[voxels.sparse_indices[:, 0], voxels.sparse_indices[:, 1], voxels.sparse_indices[:, 2]] = voxel_colors\n",
|
| 185 |
+
"\n",
|
| 186 |
+
" # 3. Create a VoxelMesh using as_boxes() with the color matrix\n",
|
| 187 |
+
" voxel_mesh = voxels.as_boxes(colors=color_matrix)\n",
|
| 188 |
+
" return voxel_mesh"
|
| 189 |
+
]
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"cell_type": "markdown",
|
| 193 |
+
"metadata": {
|
| 194 |
+
"id": "DTphj_klvtv7"
|
| 195 |
+
},
|
| 196 |
+
"source": [
|
| 197 |
+
"### Display using scatter3d"
|
| 198 |
+
]
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"cell_type": "code",
|
| 202 |
+
"execution_count": null,
|
| 203 |
+
"metadata": {
|
| 204 |
+
"colab": {
|
| 205 |
+
"base_uri": "https://localhost:8080/",
|
| 206 |
+
"height": 542
|
| 207 |
+
},
|
| 208 |
+
"id": "a9dn8T6Xeq-i",
|
| 209 |
+
"outputId": "6264392d-fd82-454a-80a3-18810e746e57"
|
| 210 |
+
},
|
| 211 |
+
"outputs": [],
|
| 212 |
+
"source": [
|
| 213 |
+
"colors = tree_knearest_colors(5, mesh, voxels)\n",
|
| 214 |
+
"display_voxels_px(voxels, [f\"rgb{c[0],c[1],c[2]}\" for c in colors])"
|
| 215 |
+
]
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"cell_type": "markdown",
|
| 219 |
+
"metadata": {
|
| 220 |
+
"id": "s8Ta34iYvzTY"
|
| 221 |
+
},
|
| 222 |
+
"source": [
|
| 223 |
+
"### Display using Blocks in Plotly"
|
| 224 |
+
]
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"cell_type": "code",
|
| 228 |
+
"execution_count": null,
|
| 229 |
+
"metadata": {
|
| 230 |
+
"colab": {
|
| 231 |
+
"base_uri": "https://localhost:8080/",
|
| 232 |
+
"height": 542
|
| 233 |
+
},
|
| 234 |
+
"id": "B_1Z2bXIt5TP",
|
| 235 |
+
"outputId": "ca19a34d-98a6-4741-eebc-f490b16dd56d"
|
| 236 |
+
},
|
| 237 |
+
"outputs": [],
|
| 238 |
+
"source": [
|
| 239 |
+
"import plotly.graph_objects as go\n",
|
| 240 |
+
"import numpy as np\n",
|
| 241 |
+
"\n",
|
| 242 |
+
"# Assuming you have 'voxel_grid', 'colors_blocks', and 'voxels' from your previous code\n",
|
| 243 |
+
"\n",
|
| 244 |
+
"# Create Mesh3d traces for each occupied voxel\n",
|
| 245 |
+
"mesh_data = []\n",
|
| 246 |
+
"for i in range(voxels.sparse_indices.shape[0]):\n",
|
| 247 |
+
" x, y, z = voxels.sparse_indices[i]\n",
|
| 248 |
+
" color = colors[i] # Get color from colors_blocks\n",
|
| 249 |
+
" vertices = np.array([\n",
|
| 250 |
+
" [x, y, z], [x + 1, y, z], [x + 1, y + 1, z], [x, y + 1, z],\n",
|
| 251 |
+
" [x, y, z + 1], [x + 1, y, z + 1], [x + 1, y + 1, z + 1], [x, y + 1, z + 1]\n",
|
| 252 |
+
" ])\n",
|
| 253 |
+
" faces = np.array([\n",
|
| 254 |
+
" [0, 1, 2], [0, 2, 3], # Bottom face\n",
|
| 255 |
+
" [4, 5, 6], [4, 6, 7], # Top face\n",
|
| 256 |
+
" [0, 1, 5], [0, 5, 4], # Front face\n",
|
| 257 |
+
" [2, 3, 7], [2, 7, 6], # Back face\n",
|
| 258 |
+
" [0, 3, 7], [0, 7, 4], # Left face\n",
|
| 259 |
+
" [1, 2, 6], [1, 6, 5] # Right face\n",
|
| 260 |
+
" ])\n",
|
| 261 |
+
" mesh_data.append(go.Mesh3d(\n",
|
| 262 |
+
" x=vertices[:, 0],\n",
|
| 263 |
+
" y=vertices[:, 1],\n",
|
| 264 |
+
" z=vertices[:, 2],\n",
|
| 265 |
+
" i=faces[:, 0],\n",
|
| 266 |
+
" j=faces[:, 1],\n",
|
| 267 |
+
" k=faces[:, 2],\n",
|
| 268 |
+
" color=f'rgb({color[0]}, {color[1]}, {color[2]})', # Convert to rgb string\n",
|
| 269 |
+
" flatshading=True\n",
|
| 270 |
+
" ))\n",
|
| 271 |
+
"\n",
|
| 272 |
+
"# Create Plotly figure\n",
|
| 273 |
+
"fig = go.Figure(data=mesh_data)\n",
|
| 274 |
+
"fig.show(renderer=\"colab\")"
|
| 275 |
+
]
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"cell_type": "markdown",
|
| 279 |
+
"metadata": {
|
| 280 |
+
"id": "Jxvh2XwuwFgP"
|
| 281 |
+
},
|
| 282 |
+
"source": [
|
| 283 |
+
"## Routing Algorithm 'Merge Blocks'\n",
|
| 284 |
+
"\n",
|
| 285 |
+
"We need to merge blocks into LEGO pieces. Similar colors by threshold merges into uniblock. Prefer large?"
|
| 286 |
+
]
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"cell_type": "code",
|
| 290 |
+
"execution_count": 131,
|
| 291 |
+
"metadata": {
|
| 292 |
+
"id": "PimIkgBqs1fJ"
|
| 293 |
+
},
|
| 294 |
+
"outputs": [],
|
| 295 |
+
"source": [
|
| 296 |
+
"LEGO_COLORS_RGB = np.asarray([\n",
|
| 297 |
+
" (239, 239, 239), # White\n",
|
| 298 |
+
" (165, 165, 165), # Light Bluish Gray\n",
|
| 299 |
+
" (155, 155, 155), # Light Gray\n",
|
| 300 |
+
" (109, 110, 109), # Dark Bluish Gray\n",
|
| 301 |
+
" (88, 88, 88), # Dark Gray\n",
|
| 302 |
+
" (48, 48, 48), # Black\n",
|
| 303 |
+
" (196, 40, 28), # Red\n",
|
| 304 |
+
" (214, 0, 0), # Bright Red\n",
|
| 305 |
+
" (128, 0, 0), # Dark Red\n",
|
| 306 |
+
" (0, 85, 191), # Blue\n",
|
| 307 |
+
" (0, 51, 204), # Bright Blue\n",
|
| 308 |
+
" (0, 32, 96), # Dark Blue\n",
|
| 309 |
+
" (35, 122, 33), # Green\n",
|
| 310 |
+
" (0, 153, 0), # Bright Green\n",
|
| 311 |
+
" (0, 77, 0), # Dark Green\n",
|
| 312 |
+
" (247, 205, 24), # Yellow\n",
|
| 313 |
+
" (255, 204, 0), # Bright Yellow\n",
|
| 314 |
+
" (255, 153, 0), # Dark Yellow\n",
|
| 315 |
+
" (255, 102, 0), # Orange\n",
|
| 316 |
+
" (255, 128, 0), # Bright Orange\n",
|
| 317 |
+
" (124, 72, 36), # Brown\n",
|
| 318 |
+
" (160, 96, 53), # Light Brown\n",
|
| 319 |
+
" (215, 194, 149), # Tan\n",
|
| 320 |
+
" (144, 118, 72), # Dark Tan\n",
|
| 321 |
+
" (167, 205, 36), # Lime\n",
|
| 322 |
+
" (242, 176, 61), # Bright Light Orange\n",
|
| 323 |
+
" (247, 234, 142), # Bright Light Yellow\n",
|
| 324 |
+
" (115, 150, 200), # Medium Blue\n",
|
| 325 |
+
" (65, 165, 222), # Medium Azure\n",
|
| 326 |
+
" (137, 200, 240), # Light Azure\n",
|
| 327 |
+
" (144, 31, 118), # Magenta\n",
|
| 328 |
+
" (255, 153, 204), # Pink\n",
|
| 329 |
+
" (255, 189, 216) # Light Pink\n",
|
| 330 |
+
"])"
|
| 331 |
+
]
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"cell_type": "code",
|
| 335 |
+
"execution_count": 194,
|
| 336 |
+
"metadata": {
|
| 337 |
+
"id": "DqA4M69ZwPAi"
|
| 338 |
+
},
|
| 339 |
+
"outputs": [],
|
| 340 |
+
"source": [
|
| 341 |
+
"from scipy.spatial.distance import cdist\n",
|
| 342 |
+
"from sklearn.cluster import KMeans\n",
|
| 343 |
+
"from scipy.spatial import cKDTree\n",
|
| 344 |
+
"from colormath.color_objects import sRGBColor, LabColor\n",
|
| 345 |
+
"from colormath.color_conversions import convert_color\n",
|
| 346 |
+
"from colormath.color_diff import delta_e_cie1976\n",
|
| 347 |
+
"\n",
|
| 348 |
+
"def map_colors_to_lego(model_colors, lego_palette):\n",
|
| 349 |
+
" \"\"\"\n",
|
| 350 |
+
" cdist is optimized broadcast OP.\n",
|
| 351 |
+
" \"\"\"\n",
|
| 352 |
+
" distances = cdist(model_colors, lego_palette, 'euclidean') # Calculate Euclidean distances\n",
|
| 353 |
+
" closest_indices = np.argmin(distances, axis=1) # Find indices of minimum distances\n",
|
| 354 |
+
" return lego_palette[closest_indices]\n",
|
| 355 |
+
"\n",
|
| 356 |
+
"\n",
|
| 357 |
+
"def convert_colors_max_diff(original_colors, predefined_colors):\n",
|
| 358 |
+
" \"\"\"\n",
|
| 359 |
+
" Converts colors by minimizing the maximum channel difference.\n",
|
| 360 |
+
"\n",
|
| 361 |
+
" Args:\n",
|
| 362 |
+
" original_colors: A NumPy array of shape (N, 3) representing original RGB colors.\n",
|
| 363 |
+
" predefined_colors: A NumPy array of shape (M, 3) representing predefined RGB colors.\n",
|
| 364 |
+
"\n",
|
| 365 |
+
" Returns:\n",
|
| 366 |
+
" A NumPy array of shape (N, 3) representing converted RGB colors.\n",
|
| 367 |
+
" \"\"\"\n",
|
| 368 |
+
" diffs = np.abs(original_colors[:, np.newaxis, :] - predefined_colors)\n",
|
| 369 |
+
" max_diffs = np.max(diffs, axis=2)\n",
|
| 370 |
+
" indices = np.argmin(max_diffs, axis=1)\n",
|
| 371 |
+
"\n",
|
| 372 |
+
" converted_colors = predefined_colors[indices]\n",
|
| 373 |
+
"\n",
|
| 374 |
+
" return converted_colors\n",
|
| 375 |
+
"\n",
|
| 376 |
+
"def quantize_colors(model_colors, k: int = 16):\n",
|
| 377 |
+
" \"\"\"\n",
|
| 378 |
+
" quantize colors by fitting into 16 unique colors.\n",
|
| 379 |
+
" \"\"\"\n",
|
| 380 |
+
" original_colors = np.array(colors)[:,:3]\n",
|
| 381 |
+
"\n",
|
| 382 |
+
" kmeans = KMeans(n_clusters=k, random_state=42)\n",
|
| 383 |
+
" kmeans.fit(original_colors)\n",
|
| 384 |
+
"\n",
|
| 385 |
+
" # Get the representative colors\n",
|
| 386 |
+
" representative_colors = kmeans.cluster_centers_.astype(int)\n",
|
| 387 |
+
"\n",
|
| 388 |
+
" # Transform the original colors to representative colors\n",
|
| 389 |
+
" transformed_colors = representative_colors[kmeans.labels_]\n",
|
| 390 |
+
" return transformed_colors\n",
|
| 391 |
+
"\n",
|
| 392 |
+
"\n",
|
| 393 |
+
"def map_color_cie(model_colors, lego_palette):\n",
|
| 394 |
+
" original_lab = np.array([convert_color(sRGBColor(*rgb, is_upscaled=True), LabColor).get_value_tuple()\n",
|
| 395 |
+
" for rgb in model_colors])\n",
|
| 396 |
+
" predefined_lab = np.array([convert_color(sRGBColor(*rgb, is_upscaled=True), LabColor).get_value_tuple()\n",
|
| 397 |
+
" for rgb in lego_palette])\n",
|
| 398 |
+
"\n",
|
| 399 |
+
" original_lab = original_lab[:, np.newaxis, :] # Reshape for broadcasting\n",
|
| 400 |
+
" predefined_lab = predefined_lab[np.newaxis, :, :] # Reshape for broadcasting\n",
|
| 401 |
+
" delta_e = np.sqrt(np.sum((original_lab - predefined_lab)**2, axis=2))\n",
|
| 402 |
+
"\n",
|
| 403 |
+
" # Find closest predefined color for each original color\n",
|
| 404 |
+
" indices = np.argmin(delta_e, axis=1)\n",
|
| 405 |
+
"\n",
|
| 406 |
+
" # Transform colors\n",
|
| 407 |
+
" transformed_colors = lego_palette[indices]\n",
|
| 408 |
+
"\n",
|
| 409 |
+
" return transformed_colors\n",
|
| 410 |
+
"\n",
|
| 411 |
+
"\n",
|
| 412 |
+
"def normalize_value_to_mid(rgb, target_v=0.7):\n",
|
| 413 |
+
" import colorsys\n",
|
| 414 |
+
" r, g, b, _ = rgb\n",
|
| 415 |
+
" # Scale to [0..1]\n",
|
| 416 |
+
" rr, gg, bb = (r/255, g/255, b/255)\n",
|
| 417 |
+
" h, s, v = colorsys.rgb_to_hsv(rr, gg, bb)\n",
|
| 418 |
+
" # Force to target_v\n",
|
| 419 |
+
" rr2, gg2, bb2 = colorsys.hsv_to_rgb(h, s, target_v)\n",
|
| 420 |
+
" # Scale back to [0..255]\n",
|
| 421 |
+
" return (int(rr2*255), int(gg2*255), int(bb2*255))\n",
|
| 422 |
+
"\n",
|
| 423 |
+
"\n",
|
| 424 |
+
"def lab_color_tfm(colors: np.ndarray, lego_palette: np.ndarray) -> np.ndarray:\n",
|
| 425 |
+
" from skimage import color\n",
|
| 426 |
+
"\n",
|
| 427 |
+
" scaled = rgb_array.astype(np.float32) / 255.0\n",
|
| 428 |
+
"\n",
|
| 429 |
+
" # 2) Reshape to (N,1,3) so that rgb2lab sees it as an image of height=N, width=1, channels=3\n",
|
| 430 |
+
" reshaped = scaled.reshape((-1, 1, 3))\n",
|
| 431 |
+
"\n",
|
| 432 |
+
" # 3) Convert to Lab\n",
|
| 433 |
+
" lab_reshaped = rgb2lab(reshaped) # shape: (N,1,3)\n",
|
| 434 |
+
"\n",
|
| 435 |
+
" # 4) Reshape back to (N,3)\n",
|
| 436 |
+
" lab_array = lab_reshaped.reshape((-1, 3))\n",
|
| 437 |
+
"\n",
|
| 438 |
+
"def find_nearest_lego_colors_lab_weighted(\n",
|
| 439 |
+
" lab_colors: np.ndarray,\n",
|
| 440 |
+
" lego_palette_lab: np.ndarray,\n",
|
| 441 |
+
" lego_palette_names: list,\n",
|
| 442 |
+
" lightness_weight: float = 0.2\n",
|
| 443 |
+
") -> np.ndarray:\n",
|
| 444 |
+
" \"\"\"\n",
|
| 445 |
+
" Find the nearest LEGO color in Lab space for each input Lab color,\n",
|
| 446 |
+
" reducing the influence of Lightness (L) in the distance calculation.\n",
|
| 447 |
+
"\n",
|
| 448 |
+
" Args:\n",
|
| 449 |
+
" lab_colors (np.ndarray): (N,3) array of Lab colors (input colors).\n",
|
| 450 |
+
" lego_palette_lab (np.ndarray): (M,3) array of Lab colors (LEGO palette colors).\n",
|
| 451 |
+
" lego_palette_names (list): List of M names corresponding to the LEGO colors.\n",
|
| 452 |
+
" lightness_weight (float): Weight for the L (lightness) component in the distance calculation.\n",
|
| 453 |
+
"\n",
|
| 454 |
+
" Returns:\n",
|
| 455 |
+
" np.ndarray: (N,) array of LEGO color names corresponding to the closest match for each input color.\n",
|
| 456 |
+
" \"\"\"\n",
|
| 457 |
+
" # Expand lab_colors to (N,1,3) and lego_palette_lab to (1,M,3)\n",
|
| 458 |
+
" lab_colors_exp = lab_colors[:, np.newaxis, :] # (N,1,3)\n",
|
| 459 |
+
" lego_palette_exp = lego_palette_lab[np.newaxis, :, :] # (1,M,3)\n",
|
| 460 |
+
"\n",
|
| 461 |
+
" # Apply weights: Scale L component\n",
|
| 462 |
+
" lab_colors_exp[:, :, 0] *= lightness_weight\n",
|
| 463 |
+
" lego_palette_exp[:, :, 0] *= lightness_weight\n",
|
| 464 |
+
"\n",
|
| 465 |
+
" # Compute weighted Euclidean distance in Lab space (L2 Norm) across the last axis\n",
|
| 466 |
+
" distances = np.linalg.norm(lab_colors_exp - lego_palette_exp, axis=2) # (N,M)\n",
|
| 467 |
+
"\n",
|
| 468 |
+
" # Find the index of the minimum distance for each row\n",
|
| 469 |
+
" closest_indices = np.argmin(distances, axis=1) # (N,)\n",
|
| 470 |
+
"\n",
|
| 471 |
+
" # Map indices to LEGO color names\n",
|
| 472 |
+
" closest_colors = np.array([lego_palette_names[i] for i in closest_indices])\n",
|
| 473 |
+
"\n",
|
| 474 |
+
" return closest_colors\n",
|
| 475 |
+
"\n",
|
| 476 |
+
"\n",
|
| 477 |
+
"# Should I merge colors first or colors and bits at the same time?\n",
|
| 478 |
+
"def to_df(voxels, colors) -> pl.DataFrame:\n",
|
| 479 |
+
" df = pl.from_numpy(voxels.sparse_indices, schema=[\"x\", \"z\", \"y\"])\n",
|
| 480 |
+
" df = df.with_columns(color=pl.Series(colors))\n",
|
| 481 |
+
" return df\n"
|
| 482 |
+
]
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"cell_type": "code",
|
| 486 |
+
"execution_count": null,
|
| 487 |
+
"metadata": {
|
| 488 |
+
"colab": {
|
| 489 |
+
"base_uri": "https://localhost:8080/",
|
| 490 |
+
"height": 443
|
| 491 |
+
},
|
| 492 |
+
"id": "whURTjlRbog1",
|
| 493 |
+
"outputId": "2e575ee3-08be-4443-f034-4a02ea9901e8"
|
| 494 |
+
},
|
| 495 |
+
"outputs": [],
|
| 496 |
+
"source": [
|
| 497 |
+
"df"
|
| 498 |
+
]
|
| 499 |
+
},
|
| 500 |
+
{
|
| 501 |
+
"cell_type": "code",
|
| 502 |
+
"execution_count": null,
|
| 503 |
+
"metadata": {
|
| 504 |
+
"colab": {
|
| 505 |
+
"base_uri": "https://localhost:8080/"
|
| 506 |
+
},
|
| 507 |
+
"id": "R37iNUS_FOgd",
|
| 508 |
+
"outputId": "e01e300c-8f66-4d7e-a626-82f5ba4c0481"
|
| 509 |
+
},
|
| 510 |
+
"outputs": [],
|
| 511 |
+
"source": [
|
| 512 |
+
"np.unique(np.asarray(mid_colors), axis=0).shape,np.unique(np.asarray(colors)[:,:3], axis=0).shape"
|
| 513 |
+
]
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"cell_type": "code",
|
| 517 |
+
"execution_count": 197,
|
| 518 |
+
"metadata": {
|
| 519 |
+
"id": "QtaCdCb20tUQ"
|
| 520 |
+
},
|
| 521 |
+
"outputs": [],
|
| 522 |
+
"source": [
|
| 523 |
+
"# This merges color while walking along neighbors. It's suboptimal in that it might override a previous group with a new neighbour. Should include visited.\n",
|
| 524 |
+
"if False:\n",
|
| 525 |
+
" import pandas as pd\n",
|
| 526 |
+
" from scipy.spatial.distance import cdist\n",
|
| 527 |
+
" def get_neighbors(x, y, z):\n",
|
| 528 |
+
" \"\"\"6-connected neighbors in 3D.\"\"\"\n",
|
| 529 |
+
" return [\n",
|
| 530 |
+
" (x+1, y, z), (x-1, y, z),\n",
|
| 531 |
+
" (x, y+1, z), (x, y-1, z),\n",
|
| 532 |
+
" (x, y, z+1), (x, y, z-1)\n",
|
| 533 |
+
" ]\n",
|
| 534 |
+
"\n",
|
| 535 |
+
" def coord_to_idx(df: pl.DataFrame) -> dict[tuple[int,int,int], int]:\n",
|
| 536 |
+
" return dict(zip(zip(df[\"x\"], df[\"y\"], df[\"z\"]), range(len(df))))\n",
|
| 537 |
+
"\n",
|
| 538 |
+
" df = df.drop(\"r\", \"b\", \"g\", strict=False).with_columns(colors=pl.Series(colors))\n",
|
| 539 |
+
" coord_to_idx = coord_to_idx(df)\n",
|
| 540 |
+
" groups = {}\n",
|
| 541 |
+
" df = df.with_columns(group=pl.arange(pl.len()))\n",
|
| 542 |
+
" color_np = df[\"color\"].to_numpy()\n",
|
| 543 |
+
" color_diff = cdist(color_np, color_np, 'euclidean') # indexed diff\n",
|
| 544 |
+
"\n",
|
| 545 |
+
" for idx in range(len(df)):\n",
|
| 546 |
+
" neighbors = get_neighbors(df[idx, \"x\"], df[idx, \"y\"], df[idx, \"z\"])\n",
|
| 547 |
+
" for neighbor in neighbors:\n",
|
| 548 |
+
" if neighbor in coord_to_idx:\n",
|
| 549 |
+
" neighbor_idx = coord_to_idx[neighbor]\n",
|
| 550 |
+
" if neighbor_idx < idx:\n",
|
| 551 |
+
" continue\n",
|
| 552 |
+
" if (color_diff[idx, neighbor_idx] < 50):\n",
|
| 553 |
+
" df[neighbor_idx, \"group\"] = df[idx, \"group\"] # bad mutation...\n",
|
| 554 |
+
"\n",
|
| 555 |
+
" color_list = pl.col(\"color\").arr\n",
|
| 556 |
+
" df_group_color = df.with_columns(r=color_list.get(0), b=color_list.get(1), g=color_list.get(2)).group_by(\"group\").agg(pl.mean(\"r\", \"g\", \"b\"))\n",
|
| 557 |
+
" df = df.join(df_group_color, on=\"group\")\n",
|
| 558 |
+
" df.head()\n",
|
| 559 |
+
" df = df.with_columns(color_rgb=pl.concat_str(\"r\", \"b\", \"g\", separator=\",\")).with_columns(\n",
|
| 560 |
+
" color_rgb = \"rgb(\"+pl.col(\"color_rgb\")+\")\"\n",
|
| 561 |
+
" )\n",
|
| 562 |
+
" display_voxels_px(voxels, df[\"color_rgb\"])"
|
| 563 |
+
]
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"cell_type": "code",
|
| 567 |
+
"execution_count": null,
|
| 568 |
+
"metadata": {
|
| 569 |
+
"colab": {
|
| 570 |
+
"base_uri": "https://localhost:8080/",
|
| 571 |
+
"height": 542
|
| 572 |
+
},
|
| 573 |
+
"id": "EzdZxLoREyMc",
|
| 574 |
+
"outputId": "46a654f0-436d-455c-ee81-dd0f8c9a20b0"
|
| 575 |
+
},
|
| 576 |
+
"outputs": [],
|
| 577 |
+
"source": [
|
| 578 |
+
"mid_colors = [normalize_value_to_mid(x, 0.8) for x in colors]\n",
|
| 579 |
+
"display_voxels_px(voxels, [f\"rgb{c[0],c[1],c[2]}\" for c in mid_colors])"
|
| 580 |
+
]
|
| 581 |
+
},
|
| 582 |
+
{
|
| 583 |
+
"cell_type": "code",
|
| 584 |
+
"execution_count": null,
|
| 585 |
+
"metadata": {
|
| 586 |
+
"colab": {
|
| 587 |
+
"base_uri": "https://localhost:8080/",
|
| 588 |
+
"height": 542
|
| 589 |
+
},
|
| 590 |
+
"id": "31jo_asa2uW8",
|
| 591 |
+
"outputId": "4173b76f-9a8e-4091-8769-6f9adcb36ae7"
|
| 592 |
+
},
|
| 593 |
+
"outputs": [],
|
| 594 |
+
"source": [
|
| 595 |
+
"# map_colors_to_lego, map_color_cie, quantize_colors, convert_colors_max_diff\n",
|
| 596 |
+
"color_lego = map_color_cie(np.asarray(mid_colors)[:,:3], np.asarray(LEGO_COLORS_RGB))\n",
|
| 597 |
+
"display_voxels_px(voxels, [f\"rgb{c[0],c[1],c[2]}\" for c in color_lego])"
|
| 598 |
+
]
|
| 599 |
+
},
|
| 600 |
+
{
|
| 601 |
+
"cell_type": "code",
|
| 602 |
+
"execution_count": null,
|
| 603 |
+
"metadata": {
|
| 604 |
+
"colab": {
|
| 605 |
+
"base_uri": "https://localhost:8080/",
|
| 606 |
+
"height": 542
|
| 607 |
+
},
|
| 608 |
+
"id": "VgXRAunt5dKW",
|
| 609 |
+
"outputId": "e6b06c30-8683-4173-a4e4-5f9ec3facdfe"
|
| 610 |
+
},
|
| 611 |
+
"outputs": [],
|
| 612 |
+
"source": [
|
| 613 |
+
"color_lego = quantize_colors(np.asarray(mid_colors)[:,:3], k=16)\n",
|
| 614 |
+
"display_voxels_px(voxels, [f\"rgb{c[0],c[1],c[2]}\" for c in color_lego])"
|
| 615 |
+
]
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"cell_type": "code",
|
| 619 |
+
"execution_count": null,
|
| 620 |
+
"metadata": {
|
| 621 |
+
"colab": {
|
| 622 |
+
"base_uri": "https://localhost:8080/",
|
| 623 |
+
"height": 542
|
| 624 |
+
},
|
| 625 |
+
"id": "t1QvgKbC3lsL",
|
| 626 |
+
"outputId": "7558cb5c-de8b-4bb5-f0b2-f8685c2ad3b4"
|
| 627 |
+
},
|
| 628 |
+
"outputs": [],
|
| 629 |
+
"source": [
|
| 630 |
+
"display_voxels_px(voxels, [f\"rgb{c[0],c[1],c[2]}\" for c in colors])"
|
| 631 |
+
]
|
| 632 |
+
},
|
| 633 |
+
{
|
| 634 |
+
"cell_type": "code",
|
| 635 |
+
"execution_count": 202,
|
| 636 |
+
"metadata": {
|
| 637 |
+
"id": "aEgErcK3lCxq"
|
| 638 |
+
},
|
| 639 |
+
"outputs": [],
|
| 640 |
+
"source": [
|
| 641 |
+
"df = to_df(voxels, color_lego)"
|
| 642 |
+
]
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"cell_type": "code",
|
| 646 |
+
"execution_count": null,
|
| 647 |
+
"metadata": {
|
| 648 |
+
"colab": {
|
| 649 |
+
"base_uri": "https://localhost:8080/",
|
| 650 |
+
"height": 443
|
| 651 |
+
},
|
| 652 |
+
"id": "kAGJZc_9f-3e",
|
| 653 |
+
"outputId": "bd950ea5-6832-4813-97f1-2ccd1b741685"
|
| 654 |
+
},
|
| 655 |
+
"outputs": [],
|
| 656 |
+
"source": [
|
| 657 |
+
"df"
|
| 658 |
+
]
|
| 659 |
+
},
|
| 660 |
+
{
|
| 661 |
+
"cell_type": "code",
|
| 662 |
+
"execution_count": 204,
|
| 663 |
+
"metadata": {
|
| 664 |
+
"id": "Meq7cqqhiis1"
|
| 665 |
+
},
|
| 666 |
+
"outputs": [],
|
| 667 |
+
"source": [
|
| 668 |
+
"BLOCK_SIZES = np.asarray([\n",
|
| 669 |
+
" [1,1],[1,2],[1,3],[1,4],[1,6],[1,8],\n",
|
| 670 |
+
" [2,2],[2,3],[2,4],[2,6],[2,8]\n",
|
| 671 |
+
"])\n",
|
| 672 |
+
"coords = {(x,y,z) for x,y,z in df.select(\"x\", \"y\", \"z\").to_numpy()}"
|
| 673 |
+
]
|
| 674 |
+
},
|
| 675 |
+
{
|
| 676 |
+
"cell_type": "code",
|
| 677 |
+
"execution_count": null,
|
| 678 |
+
"metadata": {
|
| 679 |
+
"colab": {
|
| 680 |
+
"base_uri": "https://localhost:8080/"
|
| 681 |
+
},
|
| 682 |
+
"id": "VMqkFCJdg-NZ",
|
| 683 |
+
"outputId": "f5725408-f8ad-49ed-e2dc-9f05d44d720a"
|
| 684 |
+
},
|
| 685 |
+
"outputs": [],
|
| 686 |
+
"source": [
|
| 687 |
+
"# TODO: make sure user flips figure to stand on z = 0, z being height axis.\n",
|
| 688 |
+
"\n",
|
| 689 |
+
"def get_xy_neighbors(x, y, z):\n",
|
| 690 |
+
" return [(x-1,y,z), (x+1,y,z), (x, y-1,z), (x,y+1,z)]\n",
|
| 691 |
+
"\n",
|
| 692 |
+
"y_group_1 = df.filter((pl.col(\"z\") == 16) & (pl.col(\"color\") == [44, 94, 130]))\n",
|
| 693 |
+
"group_coords = {(x,y,z) for x,y,z in y_group_1[[\"x\", \"y\", \"z\"]].to_numpy()}\n",
|
| 694 |
+
"\n",
|
| 695 |
+
"for row in range(len(y_group_1)):\n",
|
| 696 |
+
" for neighbor in get_xy_neighbors(y_group_1[row, \"x\"], y_group_1[row, \"y\"], y_group_1[row, \"z\"]):\n",
|
| 697 |
+
" if neighbor in group_coords:\n",
|
| 698 |
+
" \"found\""
|
| 699 |
+
]
|
| 700 |
+
},
|
| 701 |
+
{
|
| 702 |
+
"cell_type": "code",
|
| 703 |
+
"execution_count": null,
|
| 704 |
+
"metadata": {
|
| 705 |
+
"id": "_ZuNj25io0OK"
|
| 706 |
+
},
|
| 707 |
+
"outputs": [],
|
| 708 |
+
"source": [
|
| 709 |
+
"def merge_into_bricks(grouped_df: pl.DataFrame) -> pl.DataFrame:\n",
|
| 710 |
+
" color_str = grouped_df[0,\"color_str\"]\n",
|
| 711 |
+
" z_val = grouped_df[0, \"z\"]\n",
|
| 712 |
+
"\n",
|
| 713 |
+
" xy_grid = np.zeros((grouped_df[\"x\"].max() + 1, grouped_df[\"y\"].max() +1), dtype=bool)\n",
|
| 714 |
+
" xy_grid[grouped_df[\"x\"], grouped_df[\"y\"]] = 1\n",
|
| 715 |
+
" out_rows = []\n",
|
| 716 |
+
" grouped_df = grouped_df.sort(by=[\"x\", \"y\"])\n",
|
| 717 |
+
" coords = {(x,y) for x,y in grouped_df[[\"x\", \"y\"]].to_numpy()}\n",
|
| 718 |
+
"\n",
|
| 719 |
+
" while coords:\n",
|
| 720 |
+
" (x0, y0) = coords.pop()\n",
|
| 721 |
+
" coords.add((x0, y0)) # reinsert until placed\n",
|
| 722 |
+
"\n",
|
| 723 |
+
" placed = False\n",
|
| 724 |
+
" for (width, height) in BLOCK_SIZES:\n",
|
| 725 |
+
" if x0+width > xy_grid.shape[0] or y0+height > xy_grid.shape[1]:\n",
|
| 726 |
+
" continue\n",
|
| 727 |
+
" if np.all(xy_grid[x0:x0+width, y0:y0+height] == 1):\n",
|
| 728 |
+
" place_block(x0, y0, width, height, coords)\n",
|
| 729 |
+
" out_rows.append((color_str, z_val, x0, y0, width, height))\n",
|
| 730 |
+
" placed = True\n",
|
| 731 |
+
" break\n",
|
| 732 |
+
"\n",
|
| 733 |
+
" if not placed:\n",
|
| 734 |
+
" # fallback to 1x1\n",
|
| 735 |
+
" coords.remove((x0, y0))\n",
|
| 736 |
+
" out_rows.append((color_str, z_val, x0, y0, 1, 1))\n",
|
| 737 |
+
"\n",
|
| 738 |
+
" return pl.DataFrame(\n",
|
| 739 |
+
" {\n",
|
| 740 |
+
" \"color_str\": [row[0] for row in out_rows],\n",
|
| 741 |
+
" \"z\": [row[1] for row in out_rows],\n",
|
| 742 |
+
" \"x\": [row[2] for row in out_rows],\n",
|
| 743 |
+
" \"y\": [row[3] for row in out_rows],\n",
|
| 744 |
+
" \"width\": [row[4] for row in out_rows],\n",
|
| 745 |
+
" \"height\": [row[5] for row in out_rows],\n",
|
| 746 |
+
" }\n",
|
| 747 |
+
" )\n",
|
| 748 |
+
"\n",
|
| 749 |
+
"def can_place_block(x0, y0, w, h, coords):\n",
|
| 750 |
+
" for xx in range(x0, x0 + w):\n",
|
| 751 |
+
" for yy in range(y0, y0 + h):\n",
|
| 752 |
+
" if (xx, yy) not in coords:\n",
|
| 753 |
+
" return False\n",
|
| 754 |
+
" return True\n",
|
| 755 |
+
"\n",
|
| 756 |
+
"def place_block(x0, y0, w, h, coords):\n",
|
| 757 |
+
" for xx in range(x0, x0 + w):\n",
|
| 758 |
+
" for yy in range(y0, y0 + h):\n",
|
| 759 |
+
" coords.remove((xx, yy))"
|
| 760 |
+
]
|
| 761 |
+
},
|
| 762 |
+
{
|
| 763 |
+
"cell_type": "code",
|
| 764 |
+
"execution_count": null,
|
| 765 |
+
"metadata": {
|
| 766 |
+
"id": "edX0wBA7_O-g"
|
| 767 |
+
},
|
| 768 |
+
"outputs": [],
|
| 769 |
+
"source": [
|
| 770 |
+
"def merge_cells_recursive(df_group: pl.DataFrame, coords: set[tuple[int, int, int]]) -> pl.DataFrame:\n",
|
| 771 |
+
" # Merge by xy_grid as above.\n",
|
| 772 |
+
"\n",
|
| 773 |
+
" pass\n",
|
| 774 |
+
"\n",
|
| 775 |
+
"\n",
|
| 776 |
+
"def merge_cells(df_group: pl.DataFrame, coords: set[tuple[int, int, int]]) -> pl.DataFrame:\n",
|
| 777 |
+
" # df [x, y, z, x2, y2, z2, color] (merged)\n",
|
| 778 |
+
" # df [x, y, z, color] (unmerged)\n",
|
| 779 |
+
" pass\n",
|
| 780 |
+
"\n",
|
| 781 |
+
"BLOCK_SIZES = [\n",
|
| 782 |
+
" [1,1],[1,2],[1,3],[1,4],[1,6],[1,8],\n",
|
| 783 |
+
" [2,1],[3,1],[4,1],[6,1],[8,1],\n",
|
| 784 |
+
" [2,2],[2,3],[2,4],[2,6],[2,8],\n",
|
| 785 |
+
" [3,2],[4,2],[6,2],[8,2]\n",
|
| 786 |
+
"]\n",
|
| 787 |
+
"# Sort array by area, largest first.\n",
|
| 788 |
+
"BLOCK_SIZES.sort(key=lambda x: x[0]*x[1], reverse=True)\n",
|
| 789 |
+
"\n",
|
| 790 |
+
"coords = {(x,y,z) for x,y,z in df.select(\"x\", \"y\", \"z\").to_numpy()}\n",
|
| 791 |
+
"# Colors already merged.\n",
|
| 792 |
+
"df = df.with_columns(color_str = pl.col(\"color\").cast(pl.List(pl.String)).list.join(\"_\"))\n",
|
| 793 |
+
"merge_into_bricks(df.filter(pl.col(\"z\") == 17))\n",
|
| 794 |
+
"(df\n",
|
| 795 |
+
" .group_by(\"color_str\", \"z\")\n",
|
| 796 |
+
" .map_groups(merge_into_bricks)\n",
|
| 797 |
+
" .select(w_h=pl.struct(\"width\", \"height\").value_counts()).unnest(\"w_h\")\n",
|
| 798 |
+
")\n",
|
| 799 |
+
"\n"
|
| 800 |
+
]
|
| 801 |
+
},
|
| 802 |
+
{
|
| 803 |
+
"cell_type": "markdown",
|
| 804 |
+
"metadata": {
|
| 805 |
+
"id": "8BUgE9SYv3Y2"
|
| 806 |
+
},
|
| 807 |
+
"source": [
|
| 808 |
+
"## Utils"
|
| 809 |
+
]
|
| 810 |
+
},
|
| 811 |
+
{
|
| 812 |
+
"cell_type": "markdown",
|
| 813 |
+
"metadata": {
|
| 814 |
+
"id": "pSnD2PwLv4wV"
|
| 815 |
+
},
|
| 816 |
+
"source": [
|
| 817 |
+
"### Enhance Brightness, Gamma and Saturation (color)"
|
| 818 |
+
]
|
| 819 |
+
},
|
| 820 |
+
{
|
| 821 |
+
"cell_type": "code",
|
| 822 |
+
"execution_count": null,
|
| 823 |
+
"metadata": {
|
| 824 |
+
"colab": {
|
| 825 |
+
"base_uri": "https://localhost:8080/",
|
| 826 |
+
"height": 521
|
| 827 |
+
},
|
| 828 |
+
"collapsed": true,
|
| 829 |
+
"id": "DulCp6to6adh",
|
| 830 |
+
"outputId": "6b64e715-80b0-43a2-ed74-c171e5664431"
|
| 831 |
+
},
|
| 832 |
+
"outputs": [],
|
| 833 |
+
"source": [
|
| 834 |
+
"def enhance_mesh_colors_vectorized(mesh, saturation_boost=1.5, brightness_factor=1.2, gamma=1.8):\n",
|
| 835 |
+
" \"\"\"\n",
|
| 836 |
+
" Enhances the saturation, brightness, and optionally applies gamma correction to mesh colors (vectorized).\n",
|
| 837 |
+
"\n",
|
| 838 |
+
" Args:\n",
|
| 839 |
+
" mesh: The trimesh mesh object.\n",
|
| 840 |
+
" saturation_boost: Factor to boost saturation ( > 1 increases saturation).\n",
|
| 841 |
+
" brightness_factor: Factor to adjust brightness ( > 1 increases brightness).\n",
|
| 842 |
+
" gamma: Gamma value for gamma correction (typically between 1.8 and 2.2).\n",
|
| 843 |
+
" \"\"\"\n",
|
| 844 |
+
" # Convert RGB to HSV (vectorized)\n",
|
| 845 |
+
" colors = mesh.visual.vertex_colors.astype(np.float32) / 255.0 # Normalize to 0-1\n",
|
| 846 |
+
" hsv_colors = np.array([colorsys.rgb_to_hsv(r, g, b) for r, g, b, a in colors])\n",
|
| 847 |
+
"\n",
|
| 848 |
+
" # Boost saturation (vectorized)\n",
|
| 849 |
+
" hsv_colors[:, 1] = np.minimum(hsv_colors[:, 1] * saturation_boost, 1.0)\n",
|
| 850 |
+
"\n",
|
| 851 |
+
" # Adjust brightness (vectorized)\n",
|
| 852 |
+
" hsv_colors[:, 2] = np.minimum(hsv_colors[:, 2] * brightness_factor, 1.0)\n",
|
| 853 |
+
"\n",
|
| 854 |
+
" # Gamma correction (vectorized)\n",
|
| 855 |
+
" hsv_colors[:, 2] = hsv_colors[:, 2]**(1/gamma)\n",
|
| 856 |
+
"\n",
|
| 857 |
+
" # Convert back to RGB (vectorized)\n",
|
| 858 |
+
" rgb_colors = np.array([colorsys.hsv_to_rgb(h, s, v) for h, s, v in hsv_colors])\n",
|
| 859 |
+
"\n",
|
| 860 |
+
" # Add alpha channel back\n",
|
| 861 |
+
" rgb_colors = np.concatenate((rgb_colors, colors[:, 3:]), axis=1)\n",
|
| 862 |
+
"\n",
|
| 863 |
+
" # Denormalize and set back to mesh\n",
|
| 864 |
+
" mesh = mesh.copy()\n",
|
| 865 |
+
" mesh.visual.vertex_colors = (rgb_colors * 255).astype(np.uint8)\n",
|
| 866 |
+
" return mesh\n",
|
| 867 |
+
"\n",
|
| 868 |
+
"# ... (load mesh)\n",
|
| 869 |
+
"\n",
|
| 870 |
+
"# Enhance colors (vectorized)\n",
|
| 871 |
+
"enhance_mesh_colors_vectorized(mesh, saturation_boost=1.8, brightness_factor=1.2, gamma=2.0).show()"
|
| 872 |
+
]
|
| 873 |
+
},
|
| 874 |
+
{
|
| 875 |
+
"cell_type": "markdown",
|
| 876 |
+
"metadata": {
|
| 877 |
+
"id": "AHpn3zunv8IC"
|
| 878 |
+
},
|
| 879 |
+
"source": [
|
| 880 |
+
"### Show RGB"
|
| 881 |
+
]
|
| 882 |
+
},
|
| 883 |
+
{
|
| 884 |
+
"cell_type": "code",
|
| 885 |
+
"execution_count": null,
|
| 886 |
+
"metadata": {
|
| 887 |
+
"id": "LYD6Bxf_jvd2"
|
| 888 |
+
},
|
| 889 |
+
"outputs": [],
|
| 890 |
+
"source": [
|
| 891 |
+
"from IPython.display import display, HTML\n",
|
| 892 |
+
"def show_rgb(rgb_color):\n",
|
| 893 |
+
" html_code = f'<div style=\"background-color: rgb{rgb_color}; width: 100px; height: 100px;\"></div>'\n",
|
| 894 |
+
"\n",
|
| 895 |
+
" display(HTML(html_code))"
|
| 896 |
+
]
|
| 897 |
+
},
|
| 898 |
+
{
|
| 899 |
+
"cell_type": "markdown",
|
| 900 |
+
"metadata": {
|
| 901 |
+
"id": "DOFUBRglv9X6"
|
| 902 |
+
},
|
| 903 |
+
"source": [
|
| 904 |
+
"### Validate Points similar"
|
| 905 |
+
]
|
| 906 |
+
},
|
| 907 |
+
{
|
| 908 |
+
"cell_type": "code",
|
| 909 |
+
"execution_count": null,
|
| 910 |
+
"metadata": {
|
| 911 |
+
"id": "aQOHdarW-Dt-"
|
| 912 |
+
},
|
| 913 |
+
"outputs": [],
|
| 914 |
+
"source": [
|
| 915 |
+
"import plotly.graph_objects as go\n",
|
| 916 |
+
"import numpy as np\n",
|
| 917 |
+
"def validate_points_similar(index: int):\n",
|
| 918 |
+
" \"\"\"N.B. Uses attributes available in notebook!\"\"\"\n",
|
| 919 |
+
" # Create Plotly figure\n",
|
| 920 |
+
" fig = go.Figure(data=[\n",
|
| 921 |
+
" go.Mesh3d(\n",
|
| 922 |
+
" x=mesh.vertices[:, 0],\n",
|
| 923 |
+
" y=mesh.vertices[:, 1],\n",
|
| 924 |
+
" z=mesh.vertices[:, 2],\n",
|
| 925 |
+
" i=mesh.faces[:, 0],\n",
|
| 926 |
+
" j=mesh.faces[:, 1],\n",
|
| 927 |
+
" k=mesh.faces[:, 2],\n",
|
| 928 |
+
" color='lightgray',\n",
|
| 929 |
+
" opacity=0.8\n",
|
| 930 |
+
" ),\n",
|
| 931 |
+
" go.Scatter3d(\n",
|
| 932 |
+
" x=mesh.vertices[vertex_indices[index:index+1], 0],\n",
|
| 933 |
+
" y=mesh.vertices[vertex_indices[index:index+1], 1],\n",
|
| 934 |
+
" z=mesh.vertices[vertex_indices[index:index+1], 2],\n",
|
| 935 |
+
" mode='markers',\n",
|
| 936 |
+
" marker=dict(size=5, color='red'),\n",
|
| 937 |
+
" name='Mesh Vertex'\n",
|
| 938 |
+
" ),\n",
|
| 939 |
+
" go.Scatter3d(\n",
|
| 940 |
+
" x=[voxels.points[index, 0]],\n",
|
| 941 |
+
" y=[voxels.points[index, 1]],\n",
|
| 942 |
+
" z=[voxels.points[index, 2]],\n",
|
| 943 |
+
" mode='markers',\n",
|
| 944 |
+
" marker=dict(size=5, color='blue'),\n",
|
| 945 |
+
" name='Voxel Point'\n",
|
| 946 |
+
" )\n",
|
| 947 |
+
" ])\n",
|
| 948 |
+
"\n",
|
| 949 |
+
" # Set layout (axis labels, title)\n",
|
| 950 |
+
" fig.update_layout(\n",
|
| 951 |
+
" scene=dict(\n",
|
| 952 |
+
" xaxis_title='X',\n",
|
| 953 |
+
" yaxis_title='Y',\n",
|
| 954 |
+
" zaxis_title='Z'\n",
|
| 955 |
+
" ),\n",
|
| 956 |
+
" title='Mesh with Two Corresponding Points'\n",
|
| 957 |
+
" )\n",
|
| 958 |
+
"\n",
|
| 959 |
+
" # Enable interactive mode for Colab\n",
|
| 960 |
+
" fig.show(renderer=\"colab\")"
|
| 961 |
+
]
|
| 962 |
+
},
|
| 963 |
+
{
|
| 964 |
+
"cell_type": "code",
|
| 965 |
+
"execution_count": null,
|
| 966 |
+
"metadata": {
|
| 967 |
+
"colab": {
|
| 968 |
+
"base_uri": "https://localhost:8080/",
|
| 969 |
+
"height": 542
|
| 970 |
+
},
|
| 971 |
+
"id": "DBPOtFzAhtVJ",
|
| 972 |
+
"outputId": "c8b39ebf-c8c6-4966-95ab-54fe3484a4ae"
|
| 973 |
+
},
|
| 974 |
+
"outputs": [],
|
| 975 |
+
"source": [
|
| 976 |
+
"validate_points_similar(200)"
|
| 977 |
+
]
|
| 978 |
+
}
|
| 979 |
+
],
|
| 980 |
+
"metadata": {
|
| 981 |
+
"colab": {
|
| 982 |
+
"collapsed_sections": [
|
| 983 |
+
"pSnD2PwLv4wV"
|
| 984 |
+
],
|
| 985 |
+
"provenance": []
|
| 986 |
+
},
|
| 987 |
+
"kernelspec": {
|
| 988 |
+
"display_name": "Python 3",
|
| 989 |
+
"name": "python3"
|
| 990 |
+
},
|
| 991 |
+
"language_info": {
|
| 992 |
+
"codemirror_mode": {
|
| 993 |
+
"name": "ipython",
|
| 994 |
+
"version": 3
|
| 995 |
+
},
|
| 996 |
+
"file_extension": ".py",
|
| 997 |
+
"mimetype": "text/x-python",
|
| 998 |
+
"name": "python",
|
| 999 |
+
"nbconvert_exporter": "python",
|
| 1000 |
+
"pygments_lexer": "ipython3",
|
| 1001 |
+
"version": "3.13.1"
|
| 1002 |
+
}
|
| 1003 |
+
},
|
| 1004 |
+
"nbformat": 4,
|
| 1005 |
+
"nbformat_minor": 0
|
| 1006 |
+
}
|
README.md
CHANGED
|
@@ -1,12 +1,8 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji: 🐠
|
| 4 |
-
colorFrom: gray
|
| 5 |
-
colorTo: yellow
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 5.12.0
|
| 8 |
app_file: app.py
|
| 9 |
-
|
|
|
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Image_2_Lego
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
app_file: app.py
|
| 4 |
+
sdk: gradio
|
| 5 |
+
sdk_version: 5.9.1
|
| 6 |
---
|
| 7 |
+
# img2lego
|
| 8 |
+
Apply algorithms and Deep Learning to transform a single image into a 3D Lego build.
|
app.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gradio_client import Client, handle_file
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import numpy as np
|
| 5 |
+
from sklearn.cluster import KMeans
|
| 6 |
+
import trimesh
|
| 7 |
+
import plotly.graph_objects as go
|
| 8 |
+
import plotly.express as px
|
| 9 |
+
import polars as pl
|
| 10 |
+
from scipy.spatial import cKDTree
|
| 11 |
+
|
| 12 |
+
from constants import BLOCK_SIZES, LEGO_COLORS_RGB
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def get_client() -> Client:
|
| 16 |
+
return Client("TencentARC/InstantMesh") # TODO: enable global client.
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def generate_mesh(img: Path | str, seed: int = 42) -> str:
|
| 20 |
+
"""Takes a img (path or bytes) and returns a str (path) to the generated .obj-file"""
|
| 21 |
+
|
| 22 |
+
client = get_client()
|
| 23 |
+
result = client.predict(
|
| 24 |
+
input_image=handle_file(img), do_remove_background=True, api_name="/preprocess"
|
| 25 |
+
)
|
| 26 |
+
result = client.predict(
|
| 27 |
+
input_image=handle_file(result),
|
| 28 |
+
sample_steps=75,
|
| 29 |
+
sample_seed=seed,
|
| 30 |
+
api_name="/generate_mvs",
|
| 31 |
+
)
|
| 32 |
+
result = client.predict(api_name="/make3d")
|
| 33 |
+
obj_file = result[0]
|
| 34 |
+
|
| 35 |
+
return obj_file
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# ---- STEP 4: SELECT VOXEL SIZE ----
|
| 39 |
+
def voxelize(mesh_path: str | Path, resolution: str):
|
| 40 |
+
resolution = {"Small (16)": 16, "Medium (32)": 32, "Large (64)": 64}[resolution]
|
| 41 |
+
|
| 42 |
+
mesh = trimesh.load(mesh_path)
|
| 43 |
+
bounds = mesh.bounds
|
| 44 |
+
voxel_size = (bounds[1] - bounds[0]).max() / resolution # pitch
|
| 45 |
+
voxels = mesh.voxelized(pitch=voxel_size)
|
| 46 |
+
colors = tree_knearest_colors(1, mesh, voxels) # one is faster and good enough.
|
| 47 |
+
mesh_state = {"voxels": voxels, "mesh": mesh, "colors": colors}
|
| 48 |
+
|
| 49 |
+
return mesh_state
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def build_scene(mesh, voxels):
|
| 53 |
+
"""Writes trimesh scene to .obj file"""
|
| 54 |
+
voxels_mesh = voxels.as_boxes().apply_translation((1.5, 0, 0))
|
| 55 |
+
scene = trimesh.Scene([mesh, voxels_mesh])
|
| 56 |
+
scene.export("scene.obj")
|
| 57 |
+
|
| 58 |
+
return "scene.obj"
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# ---- STEP 5: VISUALIZE VOXELS ----
|
| 62 |
+
def quantize_colors(colors, k: int = 16):
|
| 63 |
+
"""
|
| 64 |
+
quantize colors by fitting into 16 unique colors.
|
| 65 |
+
"""
|
| 66 |
+
original_colors = np.array(colors)[:, :3]
|
| 67 |
+
|
| 68 |
+
kmeans = KMeans(n_clusters=k, random_state=42)
|
| 69 |
+
kmeans.fit(original_colors)
|
| 70 |
+
|
| 71 |
+
# Get the representative colors
|
| 72 |
+
representative_colors = kmeans.cluster_centers_.astype(int)
|
| 73 |
+
|
| 74 |
+
# Transform the original colors to representative colors
|
| 75 |
+
transformed_colors = representative_colors[kmeans.labels_]
|
| 76 |
+
|
| 77 |
+
return transformed_colors
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def lego_colors(colors):
|
| 81 |
+
"""
|
| 82 |
+
quantize colors by fitting into 16 unique colors.
|
| 83 |
+
"""
|
| 84 |
+
original_colors = np.array(colors)[:, :3]
|
| 85 |
+
# Use scipy cdist to calculate euclidean distance between original and LEGO_C..
|
| 86 |
+
from scipy.spatial.distance import cdist
|
| 87 |
+
|
| 88 |
+
distances = cdist(original_colors, LEGO_COLORS_RGB, metric="sqeuclidean")
|
| 89 |
+
distances = np.sqrt(distances)
|
| 90 |
+
closest = np.argmin(distances, axis=1)
|
| 91 |
+
|
| 92 |
+
return LEGO_COLORS_RGB[closest]
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def pl_color_to_str():
|
| 96 |
+
color_arr = pl.col("color").arr
|
| 97 |
+
return pl.format(
|
| 98 |
+
"rgb({},{},{})", color_arr.get(0), color_arr.get(1), color_arr.get(2)
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def visualize_voxels(mesh_state):
|
| 103 |
+
# Step 1: Extract Colors
|
| 104 |
+
# colors = tree_knearest_colors(5, mesh_state["mesh"], mesh_state["voxels"])
|
| 105 |
+
# Step 2: Lego'ify Colors
|
| 106 |
+
colors = mesh_state["colors"]
|
| 107 |
+
# colors = quantize_colors(colors)
|
| 108 |
+
# Step 3: Visualize
|
| 109 |
+
voxels = mesh_state["voxels"]
|
| 110 |
+
# Convert occupied_voxel_indices to a Polars DataFrame (if not already done)
|
| 111 |
+
df = pl.from_numpy(voxels.sparse_indices, schema=["x", "z", "y"])
|
| 112 |
+
df = df.with_columns(color=pl.Series(colors)).with_columns(
|
| 113 |
+
color_str=pl_color_to_str()
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
return (
|
| 117 |
+
px.scatter_3d(
|
| 118 |
+
df,
|
| 119 |
+
x="x",
|
| 120 |
+
y="y",
|
| 121 |
+
z="z",
|
| 122 |
+
color="color_str",
|
| 123 |
+
color_discrete_map="identity",
|
| 124 |
+
symbol=["square"] * len(df),
|
| 125 |
+
symbol_map="identity",
|
| 126 |
+
),
|
| 127 |
+
df,
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def tree_knearest_colors(k: int, mesh, voxels):
|
| 132 |
+
tree = cKDTree(mesh.vertices)
|
| 133 |
+
distances, vertex_indices = tree.query(voxels.points, k=k)
|
| 134 |
+
|
| 135 |
+
if k == 1:
|
| 136 |
+
return mesh.visual.vertex_colors[vertex_indices]
|
| 137 |
+
|
| 138 |
+
voxel_colors = []
|
| 139 |
+
|
| 140 |
+
for nearest_indices in vertex_indices:
|
| 141 |
+
neighbor_colors = mesh.visual.vertex_colors[nearest_indices]
|
| 142 |
+
average_color = np.mean(neighbor_colors, axis=0).astype(np.uint8)
|
| 143 |
+
voxel_colors.append(average_color)
|
| 144 |
+
|
| 145 |
+
return voxel_colors
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
# ---- STEP 6: ADJUST BRIGHTNESS ----
|
| 149 |
+
# def adjust_brightness(image, brightness):
|
| 150 |
+
# adjusted_image = cv2.convertScaleAbs(image, alpha=brightness)
|
| 151 |
+
# return adjusted_image
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# ---- STEP 8: LEGO BUILD ANIMATION ----
|
| 155 |
+
def merge_into_bricks(grouped_df: pl.DataFrame, BLOCK_SIZES) -> pl.DataFrame:
|
| 156 |
+
color_str = grouped_df[0, "color_str"]
|
| 157 |
+
z_val = grouped_df[0, "z"]
|
| 158 |
+
|
| 159 |
+
xy_grid = np.zeros(
|
| 160 |
+
(grouped_df["x"].max() + 1, grouped_df["y"].max() + 1), dtype=bool
|
| 161 |
+
)
|
| 162 |
+
xy_grid[grouped_df["x"], grouped_df["y"]] = 1
|
| 163 |
+
out_rows = []
|
| 164 |
+
grouped_df = grouped_df.sort(by=["x", "y"])
|
| 165 |
+
coords = {(x, y) for x, y in grouped_df[["x", "y"]].to_numpy()}
|
| 166 |
+
|
| 167 |
+
while coords:
|
| 168 |
+
(x0, y0) = coords.pop()
|
| 169 |
+
coords.add((x0, y0)) # reinsert until placed
|
| 170 |
+
|
| 171 |
+
placed = False
|
| 172 |
+
for width, height in BLOCK_SIZES:
|
| 173 |
+
if x0 + width > xy_grid.shape[0] or y0 + height > xy_grid.shape[1]:
|
| 174 |
+
continue
|
| 175 |
+
if np.all(xy_grid[x0 : x0 + width, y0 : y0 + height] == 1):
|
| 176 |
+
place_block(x0, y0, width, height, coords)
|
| 177 |
+
xy_grid[x0 : x0 + width, y0 : y0 + height] = 0 # remove from xygrid
|
| 178 |
+
out_rows.append((color_str, z_val, x0, y0, width, height))
|
| 179 |
+
placed = True
|
| 180 |
+
break
|
| 181 |
+
|
| 182 |
+
if not placed:
|
| 183 |
+
# fallback to 1x1
|
| 184 |
+
coords.remove((x0, y0))
|
| 185 |
+
out_rows.append((color_str, z_val, x0, y0, 1, 1))
|
| 186 |
+
|
| 187 |
+
return pl.DataFrame(
|
| 188 |
+
{
|
| 189 |
+
"color_str": [row[0] for row in out_rows],
|
| 190 |
+
"z": [row[1] for row in out_rows],
|
| 191 |
+
"x": [row[2] for row in out_rows],
|
| 192 |
+
"y": [row[3] for row in out_rows],
|
| 193 |
+
"width": [row[4] for row in out_rows],
|
| 194 |
+
"height": [row[5] for row in out_rows],
|
| 195 |
+
}
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def can_place_block(x0, y0, w, h, coords):
|
| 200 |
+
for xx in range(x0, x0 + w):
|
| 201 |
+
for yy in range(y0, y0 + h):
|
| 202 |
+
if (xx, yy) not in coords:
|
| 203 |
+
return False
|
| 204 |
+
return True
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def place_block(x0, y0, w, h, coords):
|
| 208 |
+
for xx in range(x0, x0 + w):
|
| 209 |
+
for yy in range(y0, y0 + h):
|
| 210 |
+
coords.remove((xx, yy))
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
# Function to generate vertices for a rectangular prism (brick)
|
| 214 |
+
def create_brick(x, y, z, width, height, depth=1, color="gray"):
|
| 215 |
+
return go.Mesh3d(
|
| 216 |
+
x=[x, x + width, x + width, x, x, x + width, x + width, x], # X-coordinates
|
| 217 |
+
y=[y, y, y + height, y + height, y, y, y + height, y + height], # Y-coordinates
|
| 218 |
+
z=[z, z, z, z, z + depth, z + depth, z + depth, z + depth], # Z-coordinates
|
| 219 |
+
color=color,
|
| 220 |
+
alphahull=-1,
|
| 221 |
+
i=[7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2],
|
| 222 |
+
j=[3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],
|
| 223 |
+
k=[0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],
|
| 224 |
+
name=f"Z={z}",
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def get_range(series: pl.Series) -> tuple[int, int]:
|
| 229 |
+
return series.min(), series.max()
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def animate_lego_build(df_state):
|
| 233 |
+
# Colors already merged.
|
| 234 |
+
df: pl.DataFrame = df_state
|
| 235 |
+
df = df.with_columns(color=quantize_colors(df["color"])).with_columns(
|
| 236 |
+
color_str=pl_color_to_str()
|
| 237 |
+
)
|
| 238 |
+
# Quantize Colors... Need to split string and use..
|
| 239 |
+
merged_df = df.group_by("color_str", "z").map_groups(
|
| 240 |
+
lambda grp: merge_into_bricks(grp, BLOCK_SIZES)
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
fig = go.Figure()
|
| 244 |
+
fig.update_layout(
|
| 245 |
+
scene=dict(
|
| 246 |
+
xaxis=dict(range=get_range(df["x"]), autorange=False),
|
| 247 |
+
yaxis=dict(range=get_range(df["y"]), autorange=False),
|
| 248 |
+
zaxis=dict(range=get_range(df["z"]), autorange=False),
|
| 249 |
+
)
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Add each brick to the plot
|
| 253 |
+
for z in merged_df["z"].unique().sort():
|
| 254 |
+
for row in merged_df.filter(pl.col("z") == z).iter_rows(named=True):
|
| 255 |
+
fig.add_trace(
|
| 256 |
+
create_brick(
|
| 257 |
+
x=row["x"],
|
| 258 |
+
y=row["y"],
|
| 259 |
+
z=row["z"],
|
| 260 |
+
width=row["width"],
|
| 261 |
+
height=row["height"],
|
| 262 |
+
color=row["color_str"],
|
| 263 |
+
)
|
| 264 |
+
)
|
| 265 |
+
# frame_jpgs.append(f"frame_z_{z}.jpg")
|
| 266 |
+
# if not Path(frame_jpgs[-1]).exists():
|
| 267 |
+
# fig.write_image(frame_jpgs[-1])
|
| 268 |
+
|
| 269 |
+
return fig # , frame_jpgs
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# ---- GRADIO UI ----
|
| 273 |
+
with gr.Blocks() as demo:
|
| 274 |
+
gr.Markdown("# 🧱 **Image 2 Lego Builder** 🧱")
|
| 275 |
+
|
| 276 |
+
# Step 1: Upload Image and Build Mesh
|
| 277 |
+
with gr.Column(variant="compact"):
|
| 278 |
+
with gr.Row():
|
| 279 |
+
image_input = gr.Image(
|
| 280 |
+
type="filepath", height="250px", label="Upload an Image"
|
| 281 |
+
)
|
| 282 |
+
with gr.Column(variant="compact"):
|
| 283 |
+
seed = gr.Number(label="Seed", value=42)
|
| 284 |
+
# Potentially add color options.
|
| 285 |
+
voxel_size_selector = gr.Dropdown(
|
| 286 |
+
["Small (16)", "Medium (32)", "Large (64)"],
|
| 287 |
+
value="Medium (32)",
|
| 288 |
+
label="Select Voxel Size",
|
| 289 |
+
)
|
| 290 |
+
with gr.Row():
|
| 291 |
+
build_button = gr.Button("Generate Mesh")
|
| 292 |
+
voxelize_button = gr.Button("Generate Voxels")
|
| 293 |
+
|
| 294 |
+
# Visualizations...
|
| 295 |
+
# Mesh | Voxel Color | Voxel Lego Bricks+Color
|
| 296 |
+
with gr.Row():
|
| 297 |
+
mesh_info_display = gr.Model3D(
|
| 298 |
+
label="Mesh Visualization", height="250px", value="mesh.obj"
|
| 299 |
+
)
|
| 300 |
+
voxel_color_display = gr.Plot(label="Colorized Voxels")
|
| 301 |
+
voxel_bricks = gr.Plot(label="Lego Bricks")
|
| 302 |
+
brick_animation = gr.Gallery(label="Build Animation")
|
| 303 |
+
|
| 304 |
+
mesh_state = gr.State(value={})
|
| 305 |
+
|
| 306 |
+
build_button.click(
|
| 307 |
+
generate_mesh, inputs=[image_input, seed], outputs=mesh_info_display
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
# Step 4: Select Voxel Size
|
| 311 |
+
voxelize_button.click(
|
| 312 |
+
voxelize,
|
| 313 |
+
inputs=[mesh_info_display, voxel_size_selector],
|
| 314 |
+
outputs=[mesh_state],
|
| 315 |
+
)
|
| 316 |
+
df_state = gr.State()
|
| 317 |
+
mesh_state.change(
|
| 318 |
+
visualize_voxels,
|
| 319 |
+
inputs=[mesh_state],
|
| 320 |
+
outputs=[voxel_color_display, df_state],
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
df_state.change(animate_lego_build, inputs=[df_state], outputs=[voxel_bricks])
|
| 324 |
+
|
| 325 |
+
def anim_pltly(df):
|
| 326 |
+
df = df.with_columns(color=quantize_colors(df["color"])).with_columns(
|
| 327 |
+
color_str=pl_color_to_str()
|
| 328 |
+
)
|
| 329 |
+
# Quantize Colors... Need to split string and use..
|
| 330 |
+
merged_df = df.group_by("color_str", "z").map_groups(
|
| 331 |
+
lambda grp: merge_into_bricks(grp, BLOCK_SIZES)
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
fig = go.Figure()
|
| 335 |
+
fig.update_layout(
|
| 336 |
+
scene=dict(
|
| 337 |
+
xaxis=dict(range=get_range(df["x"]), autorange=False),
|
| 338 |
+
yaxis=dict(range=get_range(df["y"]), autorange=False),
|
| 339 |
+
zaxis=dict(range=get_range(df["z"]), autorange=False),
|
| 340 |
+
)
|
| 341 |
+
)
|
| 342 |
+
frame_jpgs = []
|
| 343 |
+
# Add each brick to the plot
|
| 344 |
+
for z in merged_df["z"].unique().sort():
|
| 345 |
+
for row in merged_df.filter(pl.col("z") == z).iter_rows(named=True):
|
| 346 |
+
fig.add_trace(
|
| 347 |
+
create_brick(
|
| 348 |
+
x=row["x"],
|
| 349 |
+
y=row["y"],
|
| 350 |
+
z=row["z"],
|
| 351 |
+
width=row["width"],
|
| 352 |
+
height=row["height"],
|
| 353 |
+
color=row["color_str"],
|
| 354 |
+
)
|
| 355 |
+
)
|
| 356 |
+
frame_jpgs.append(f"frame_z_{z}.jpg")
|
| 357 |
+
if not Path(frame_jpgs[-1]).exists():
|
| 358 |
+
fig.write_image(frame_jpgs[-1])
|
| 359 |
+
|
| 360 |
+
return frame_jpgs
|
| 361 |
+
|
| 362 |
+
# TODO: add to generate layer-by-layer
|
| 363 |
+
# df_state.change(anim_pltly, inputs=[df_state], outputs=[brick_animation])
|
| 364 |
+
|
| 365 |
+
# Launch the app
|
| 366 |
+
demo.launch(share=True, debug=True)
|
constants.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
LEGO_COLORS_RGB = np.asarray(
|
| 5 |
+
[
|
| 6 |
+
[244, 244, 244], # White
|
| 7 |
+
[255, 250, 200], # Light Yellow
|
| 8 |
+
[255, 236, 108], # Yellow
|
| 9 |
+
[255, 167, 11], # Orange
|
| 10 |
+
[217, 133, 108], # Light Salmon
|
| 11 |
+
[207, 96, 36], # Dark Orange
|
| 12 |
+
[238, 96, 85], # Red
|
| 13 |
+
[218, 41, 28], # Dark Red
|
| 14 |
+
[255, 148, 148], # Pink
|
| 15 |
+
[255, 67, 106], # Dark Pink
|
| 16 |
+
[205, 98, 152], # Magenta
|
| 17 |
+
[228, 173, 200], # Light Purple
|
| 18 |
+
[150, 112, 159], # Purple
|
| 19 |
+
[17, 90, 150], # Dark Blue
|
| 20 |
+
[0, 133, 184], # Blue
|
| 21 |
+
[90, 177, 229], # Light Blue
|
| 22 |
+
[52, 142, 64], # Dark Green
|
| 23 |
+
[88, 171, 65], # Green
|
| 24 |
+
[199, 210, 60], # Lime
|
| 25 |
+
[183, 215, 213], # Light Turquoise
|
| 26 |
+
[85, 165, 175], # Turquoise
|
| 27 |
+
[142, 66, 41], # Brown
|
| 28 |
+
[124, 92, 69], # Light Brown
|
| 29 |
+
[108, 110, 104], # Dark Gray
|
| 30 |
+
[155, 161, 157], # Gray
|
| 31 |
+
[220, 220, 220], # Light Gray
|
| 32 |
+
[0, 0, 0], # Black
|
| 33 |
+
]
|
| 34 |
+
)
|
| 35 |
+
# fmt: off
|
| 36 |
+
BLOCK_SIZES = [
|
| 37 |
+
[1, 1], [1, 2], [1, 3], [1, 4], [1, 6], [1, 8],[2, 1],[3, 1],[4, 1],[6, 1],[8, 1],
|
| 38 |
+
[2, 2],[2, 3],[2, 4],[2, 6],[2, 8],[3, 2],[4, 2],[6, 2],[8, 2],
|
| 39 |
+
]
|
| 40 |
+
# fmt: on
|
| 41 |
+
|
| 42 |
+
# Sort array by area, largest first.
|
| 43 |
+
BLOCK_SIZES.sort(key=lambda x: x[0] * x[1], reverse=True)
|
doll.obj
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
lego_quantize.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy import ndimage
|
| 3 |
+
from sklearn.cluster import MiniBatchKMeans
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def vectorized_edge_preserving_quantization(
|
| 7 |
+
voxel_matrix, lego_colors, n_initial_clusters=50
|
| 8 |
+
):
|
| 9 |
+
"""
|
| 10 |
+
Vectorized version of edge-preserving color quantization
|
| 11 |
+
|
| 12 |
+
Parameters:
|
| 13 |
+
voxel_matrix: numpy array of shape (x, y, z, 3) containing RGB values
|
| 14 |
+
lego_colors: list of [R, G, B] values for target LEGO colors
|
| 15 |
+
n_initial_clusters: number of initial color clusters before mapping to LEGO colors
|
| 16 |
+
"""
|
| 17 |
+
lego_colors = np.array(lego_colors)
|
| 18 |
+
shape = voxel_matrix.shape
|
| 19 |
+
|
| 20 |
+
# Reshape to 2D array of pixels
|
| 21 |
+
pixels = voxel_matrix.reshape(-1, 3)
|
| 22 |
+
|
| 23 |
+
# Step 1: Initial color clustering using K-means
|
| 24 |
+
kmeans = MiniBatchKMeans(
|
| 25 |
+
n_clusters=n_initial_clusters, batch_size=1000, random_state=42
|
| 26 |
+
)
|
| 27 |
+
labels = kmeans.fit_predict(pixels)
|
| 28 |
+
cluster_centers = kmeans.cluster_centers_
|
| 29 |
+
|
| 30 |
+
# Step 2: Create 3D gradient magnitude
|
| 31 |
+
gradients = np.zeros(shape[:3])
|
| 32 |
+
|
| 33 |
+
# Compute gradients along each axis
|
| 34 |
+
for axis in range(3):
|
| 35 |
+
# Forward difference
|
| 36 |
+
forward = np.roll(voxel_matrix, -1, axis=axis)
|
| 37 |
+
# Compute color differences
|
| 38 |
+
diff = np.sqrt(np.sum((forward - voxel_matrix) ** 2, axis=-1))
|
| 39 |
+
# Set boundary differences to 0
|
| 40 |
+
slice_idx = [slice(None)] * 3
|
| 41 |
+
slice_idx[axis] = -1
|
| 42 |
+
diff[tuple(slice_idx)] = 0
|
| 43 |
+
gradients += diff
|
| 44 |
+
|
| 45 |
+
# Step 3: Segment using watershed algorithm
|
| 46 |
+
# Reshape labels back to 3D
|
| 47 |
+
labels_3d = labels.reshape(shape[:3])
|
| 48 |
+
|
| 49 |
+
# Find local minima in gradient magnitude
|
| 50 |
+
markers = ndimage.label(ndimage.minimum_filter(gradients, size=3) == gradients)[0]
|
| 51 |
+
|
| 52 |
+
# Apply watershed segmentation
|
| 53 |
+
segments = ndimage.watershed_ift(gradients.astype(np.uint8), markers)
|
| 54 |
+
|
| 55 |
+
# Step 4: Map segments to LEGO colors
|
| 56 |
+
# Get mean color for each segment
|
| 57 |
+
segment_colors = ndimage.mean(
|
| 58 |
+
voxel_matrix.reshape(-1, 3),
|
| 59 |
+
labels=segments.ravel(),
|
| 60 |
+
index=np.arange(segments.max() + 1),
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
# Find nearest LEGO color for each segment color§
|
| 64 |
+
def find_nearest_lego_colors(colors):
|
| 65 |
+
# Reshape inputs for broadcasting
|
| 66 |
+
colors = colors[:, np.newaxis, :]
|
| 67 |
+
lego_colors_r = lego_colors[np.newaxis, :, :]
|
| 68 |
+
|
| 69 |
+
# Compute distances to all LEGO colors at once
|
| 70 |
+
distances = np.sqrt(np.sum((colors - lego_colors_r) ** 2, axis=2))
|
| 71 |
+
|
| 72 |
+
# Find index of minimum distance for each color
|
| 73 |
+
nearest_indices = np.argmin(distances, axis=1)
|
| 74 |
+
|
| 75 |
+
return lego_colors[nearest_indices]
|
| 76 |
+
|
| 77 |
+
segment_lego_colors = find_nearest_lego_colors(segment_colors)
|
| 78 |
+
|
| 79 |
+
# Create output array
|
| 80 |
+
result = np.zeros_like(voxel_matrix)
|
| 81 |
+
|
| 82 |
+
# Map segments to final colors
|
| 83 |
+
for i, color in enumerate(segment_lego_colors):
|
| 84 |
+
mask = segments == i
|
| 85 |
+
result[mask] = color
|
| 86 |
+
|
| 87 |
+
return result
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def analyze_quantization(original, quantized):
|
| 91 |
+
"""
|
| 92 |
+
Analyze the results of quantization
|
| 93 |
+
"""
|
| 94 |
+
original_colors = np.unique(original.reshape(-1, 3), axis=0)
|
| 95 |
+
quantized_colors = np.unique(quantized.reshape(-1, 3), axis=0)
|
| 96 |
+
|
| 97 |
+
stats = {
|
| 98 |
+
"original_colors": len(original_colors),
|
| 99 |
+
"quantized_colors": len(quantized_colors),
|
| 100 |
+
"reduction_ratio": len(quantized_colors) / len(original_colors),
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
return stats
|
mesh.obj
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pyproject.toml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "img2lego"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Add your description here"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.11"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"colormath>=3.0.0",
|
| 9 |
+
"gradio>=5.9.1",
|
| 10 |
+
"gradio-client>=1.5.2",
|
| 11 |
+
"ipykernel>=6.29.5",
|
| 12 |
+
"kaleido==0.2.1",
|
| 13 |
+
"marimo>=0.10.9",
|
| 14 |
+
"matplotlib>=3.10.0",
|
| 15 |
+
"numpy>=2.2.1",
|
| 16 |
+
"plotly>=5.24.1",
|
| 17 |
+
"polars>=1.18.0",
|
| 18 |
+
"rtree>=1.3.0",
|
| 19 |
+
"scikit-learn>=1.6.0",
|
| 20 |
+
"scipy>=1.14.1",
|
| 21 |
+
"trimesh>=4.5.3",
|
| 22 |
+
]
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
gradio_client
|
| 3 |
+
numpy
|
| 4 |
+
scikit-learn
|
| 5 |
+
trimesh
|
| 6 |
+
plotly
|
| 7 |
+
polars
|
| 8 |
+
scipy
|
scene.obj
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
uv.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|