Commit
·
8fed32c
0
Parent(s):
initial commit
Browse files- .dockerignore +4 -0
- .gitattributes +39 -0
- .gitignore +163 -0
- Dockerfile +19 -0
- LICENSE +21 -0
- Makefile +15 -0
- README.md +142 -0
- app.py +238 -0
- index.html +19 -0
- medgan/__init__.py +0 -0
- medgan/dcgan.py +109 -0
- medgan/progan.py +190 -0
- medgan/stylegan.py +419 -0
- medgan/vit.py +46 -0
- medgan/wgan.py +60 -0
- requirements.txt +0 -0
- static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png +3 -0
- static/css/IMG_0263.JPG +3 -0
- static/css/hamza.jpeg +3 -0
- static/css/me.jpeg +3 -0
- static/css/profile-pic (45).png +3 -0
- static/css/profile-pic - 2025-01-02T222023.790.png +3 -0
- static/css/style.css +804 -0
- static/css/zaloom.jpeg +3 -0
- static/icons/email-icon.png +3 -0
- static/icons/icons8-2-circled-64.png +3 -0
- static/icons/icons8-circled-3-c-64.png +3 -0
- static/icons/icons8-circled-4-c-64.png +3 -0
- static/icons/icons8-circled-5-64.png +3 -0
- static/icons/icons8-linkedin-50.png +3 -0
- static/icons/icons8-number-1-64.png +3 -0
- static/icons/image.png +3 -0
- static/icons/linkedin-icon.png +3 -0
- static/icons/linkedin-icon.svg +1 -0
- static/script.js +30 -0
- static/temp_image.jpg +3 -0
- style.css +28 -0
- templates/About_us.html +161 -0
- templates/contact.html +182 -0
- templates/detect.html +196 -0
- templates/error.html +39 -0
- templates/generate.html +502 -0
- templates/index.html +208 -0
- templates/results-detect.html +120 -0
- templates/results.html +65 -0
.dockerignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
.pytest_cache/
|
3 |
+
notebooks/
|
4 |
+
*.pyc
|
.gitattributes
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
38 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
39 |
+
*.JPG filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
110 |
+
.pdm.toml
|
111 |
+
.pdm-python
|
112 |
+
.pdm-build/
|
113 |
+
|
114 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
115 |
+
__pypackages__/
|
116 |
+
|
117 |
+
# Celery stuff
|
118 |
+
celerybeat-schedule
|
119 |
+
celerybeat.pid
|
120 |
+
|
121 |
+
# SageMath parsed files
|
122 |
+
*.sage.py
|
123 |
+
|
124 |
+
# Environments
|
125 |
+
.env
|
126 |
+
.venv
|
127 |
+
env/
|
128 |
+
venv/
|
129 |
+
ENV/
|
130 |
+
env.bak/
|
131 |
+
venv.bak/
|
132 |
+
|
133 |
+
# Spyder project settings
|
134 |
+
.spyderproject
|
135 |
+
.spyproject
|
136 |
+
|
137 |
+
# Rope project settings
|
138 |
+
.ropeproject
|
139 |
+
|
140 |
+
# mkdocs documentation
|
141 |
+
/site
|
142 |
+
|
143 |
+
# mypy
|
144 |
+
.mypy_cache/
|
145 |
+
.dmypy.json
|
146 |
+
dmypy.json
|
147 |
+
|
148 |
+
# Pyre type checker
|
149 |
+
.pyre/
|
150 |
+
|
151 |
+
# pytype static type analyzer
|
152 |
+
.pytype/
|
153 |
+
|
154 |
+
# Cython debug symbols
|
155 |
+
cython_debug/
|
156 |
+
|
157 |
+
# PyCharm
|
158 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
159 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
160 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
161 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
162 |
+
models/
|
163 |
+
notebooks/
|
Dockerfile
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11-slim
|
2 |
+
|
3 |
+
COPY ./requirements.txt /app/requirements.txt
|
4 |
+
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
RUN pip install -r requirements.txt
|
8 |
+
|
9 |
+
COPY . /app
|
10 |
+
|
11 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
12 |
+
ENV PYTHONUNBUFFERED=1
|
13 |
+
ENV FLASK_APP=app.py
|
14 |
+
ENV FLASK_ENV=production
|
15 |
+
|
16 |
+
EXPOSE 5000
|
17 |
+
|
18 |
+
CMD ["python", "app.py"]
|
19 |
+
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Mohammed Zaloom
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
Makefile
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
install:
|
2 |
+
pip install --upgrade pip &&\
|
3 |
+
pip install -r requirements.txt
|
4 |
+
|
5 |
+
test:
|
6 |
+
python -m pytest -vv test_*.py
|
7 |
+
python -m pytest --nbval notebooks/*/*.ipynb
|
8 |
+
|
9 |
+
format:
|
10 |
+
black *.py
|
11 |
+
|
12 |
+
lint:
|
13 |
+
pylint --disable=R,C,E1120 *.py medgan/*.py
|
14 |
+
|
15 |
+
all: install format lint test
|
README.md
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Medgan
|
3 |
+
emoji: ⚡
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: gray
|
6 |
+
sdk: static
|
7 |
+
pinned: false
|
8 |
+
license: mit
|
9 |
+
short_description: The project focuses on brain tumor MRI scans and includes im
|
10 |
+
---
|
11 |
+
|
12 |
+
<<<<<<< HEAD
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
+
=======
|
15 |
+
[](https://github.com/mozaloom/medgan/actions/workflows/main.yml)
|
16 |
+
[](https://github.com/mozaloom/medgan/actions/workflows/push-docker.yml)
|
17 |
+
|
18 |
+
# MedGAN: Advanced Medical Image Generation
|
19 |
+
|
20 |
+
<img src="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png" alt="medgan Logo" width="120" style="margin-bottom: 20px;">
|
21 |
+
|
22 |
+
## Overview
|
23 |
+
|
24 |
+
MedGAN is a comprehensive framework for generating high-quality synthetic medical images using state-of-the-art Generative Adversarial Networks (GANs). The project focuses on brain tumor MRI scans and includes implementations of multiple cutting-edge GAN architectures optimized for medical imaging applications.
|
25 |
+
|
26 |
+
## Features
|
27 |
+
|
28 |
+
- **Multiple GAN Implementations:**
|
29 |
+
- DCGAN (Deep Convolutional GAN)
|
30 |
+
- ProGAN (Progressive Growing of GANs)
|
31 |
+
- StyleGAN2 (Style-based Generator with improvements)
|
32 |
+
- WGAN (Wasserstein GAN with gradient penalty)
|
33 |
+
|
34 |
+
- **Web Application Interface:**
|
35 |
+
- Generate synthetic brain MRI scans
|
36 |
+
- Detect tumor types from uploaded MRI images
|
37 |
+
- Interactive and user-friendly interface
|
38 |
+
|
39 |
+
- **Pre-trained Models:**
|
40 |
+
- Models for three tumor types: Glioma, Meningioma, and Pituitary
|
41 |
+
- ViT-based tumor detection model (92% accuracy)
|
42 |
+
|
43 |
+
## Architecture Performance Comparison
|
44 |
+
|
45 |
+
| Architecture | Image Quality | Training Stability | Generation Diversity | Training Speed |
|
46 |
+
|--------------|---------------|--------------------|-----------------------|---------------|
|
47 |
+
| ProGAN | ⭐⭐⭐⭐ | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐ | ⭐⭐⭐ |
|
48 |
+
| StyleGAN2 | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐ | ⭐⭐⭐⭐⭐ | ⭐⭐ |
|
49 |
+
| WGAN-GP | ⭐⭐⭐ | ⭐⭐⭐⭐ | ⭐⭐⭐ | ⭐⭐⭐⭐ |
|
50 |
+
| DCGAN | ⭐⭐⭐ | ⭐⭐⭐ | ⭐⭐ | ⭐⭐⭐⭐⭐ |
|
51 |
+
|
52 |
+
## Getting Started
|
53 |
+
|
54 |
+
### Prerequisites
|
55 |
+
- Python 3.9+
|
56 |
+
- PyTorch 1.9+
|
57 |
+
- Flask (for web application)
|
58 |
+
- CUDA-capable GPU (recommended)
|
59 |
+
|
60 |
+
### Installation
|
61 |
+
|
62 |
+
1. Clone the repository:
|
63 |
+
```bash
|
64 |
+
git clone https://github.com/mozaloom/medgan.git
|
65 |
+
cd medgan
|
66 |
+
```
|
67 |
+
|
68 |
+
2. Install required packages:
|
69 |
+
```bash
|
70 |
+
pip install -r requirements.txt
|
71 |
+
```
|
72 |
+
|
73 |
+
3. Run the web application:
|
74 |
+
```bash
|
75 |
+
python app.py
|
76 |
+
```
|
77 |
+
|
78 |
+
4. Access the web interface at `http://localhost:5000`
|
79 |
+
|
80 |
+
## Usage
|
81 |
+
|
82 |
+
### Web Application
|
83 |
+
|
84 |
+
The MedGAN web application offers two primary functionalities:
|
85 |
+
|
86 |
+
1. **Generate synthetic brain MRI scans:**
|
87 |
+
- Select tumor type (Glioma, Meningioma, Pituitary)
|
88 |
+
- Choose GAN architecture
|
89 |
+
- Generate high-quality synthetic MRI images
|
90 |
+
|
91 |
+
2. **Detect tumor types:**
|
92 |
+
- Upload brain MRI scans
|
93 |
+
- Receive AI-powered tumor classification
|
94 |
+
- View detection confidence scores
|
95 |
+
|
96 |
+
|
97 |
+
Check the individual model implementation files for specific training parameters.
|
98 |
+
|
99 |
+
## Project Structure
|
100 |
+
|
101 |
+
```
|
102 |
+
medgan/
|
103 |
+
├── app.py # Flask web application
|
104 |
+
├── medgan/ # Core GAN implementations
|
105 |
+
│ ├── dcgan.py
|
106 |
+
│ ├── progan.py
|
107 |
+
│ ├── stylegan.py
|
108 |
+
│ ├── wgan.py
|
109 |
+
│ └── vit.py
|
110 |
+
├── models/ # Pre-trained model weights
|
111 |
+
├── notebooks/ # Training notebooks
|
112 |
+
│ ├── dcgan/
|
113 |
+
│ ├── progan/
|
114 |
+
│ ├── stylegan/
|
115 |
+
│ └── wgan/
|
116 |
+
├── static/ # Web assets
|
117 |
+
└── templates/ # HTML templates
|
118 |
+
```
|
119 |
+
|
120 |
+
## Contributing
|
121 |
+
|
122 |
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
123 |
+
|
124 |
+
1. Fork the repository
|
125 |
+
2. Create your feature branch (`git checkout -b feature/amazing-feature`)
|
126 |
+
3. Commit your changes (`git commit -m 'Add some amazing feature'`)
|
127 |
+
4. Push to the branch (`git push origin feature/amazing-feature`)
|
128 |
+
5. Open a Pull Request
|
129 |
+
|
130 |
+
## License
|
131 |
+
|
132 |
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
133 |
+
|
134 |
+
## Acknowledgments
|
135 |
+
|
136 |
+
- [Brain Tumor MRI Dataset](https://www.kaggle.com/datasets/masoudnickparvar/brain-tumor-mri-dataset/data) from Kaggle
|
137 |
+
- Research papers implementing the original GAN architectures:
|
138 |
+
- [DCGAN](https://arxiv.org/abs/1511.06434)
|
139 |
+
- [ProGAN](https://arxiv.org/abs/1710.10196)
|
140 |
+
- [StyleGAN2](https://arxiv.org/abs/1912.04958)
|
141 |
+
- [WGAN](https://arxiv.org/abs/1701.07875)
|
142 |
+
>>>>>>> c38c95c (Initial commit)
|
app.py
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import io
|
3 |
+
import torch
|
4 |
+
import base64
|
5 |
+
import zipfile
|
6 |
+
from flask import Flask, request, jsonify, render_template, send_file
|
7 |
+
from medgan.dcgan import Generator_DCGAN, generate_examples_DCGAN
|
8 |
+
from medgan.progan import Generator_ProGAN, generate_examples_ProGAN, seed_everything
|
9 |
+
from medgan.stylegan import Generator_SG2, MappingNetwork, generate_examples_SG2
|
10 |
+
from medgan.vit import TumorDetectionApp
|
11 |
+
from medgan.wgan import Generator_WGAN, generate_examples_WGAN
|
12 |
+
|
13 |
+
# Initialize Flask app
|
14 |
+
app = Flask(__name__)
|
15 |
+
|
16 |
+
# Set seeds for reproducibility
|
17 |
+
seed_everything()
|
18 |
+
|
19 |
+
# Constants
|
20 |
+
Z_DIM = 256
|
21 |
+
FEATURES_GEN = 64
|
22 |
+
CHANNELS_IMG = 3
|
23 |
+
progan_steps = 6 # Number of steps for ProGAN fade-in
|
24 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
25 |
+
|
26 |
+
# Model paths
|
27 |
+
model_paths = {
|
28 |
+
"DCGAN": {
|
29 |
+
"Glioma": "models/DCGAN-Glioma.pth",
|
30 |
+
"Pituitary": "models/DCGAN-Meningioma.pth",
|
31 |
+
"Meningioma": "models/DCGAN-Pituitary.pth",
|
32 |
+
},
|
33 |
+
"ProGAN": {
|
34 |
+
"Glioma": "models/ProGAN-Glioma.pth",
|
35 |
+
"Meningioma": "models/ProGAN-Meningioma.pth",
|
36 |
+
"Pituitary": "models/ProGAN-Pituitary.pth",
|
37 |
+
},
|
38 |
+
"StyleGAN2": {
|
39 |
+
"Glioma": {
|
40 |
+
"generator": "models/StyleGAN2-Glioma.pth",
|
41 |
+
"mapping": "models/StyleGAN2-Glioma-MappingNet.pth"
|
42 |
+
},
|
43 |
+
"Meningioma": {
|
44 |
+
"generator": "models/StyleGAN2-Meningioma.pth",
|
45 |
+
"mapping": "models/StyleGAN2-Meningioma-MappingNet.pth"
|
46 |
+
},
|
47 |
+
"Pituitary": {
|
48 |
+
"generator": "models/StyleGAN2-Pituitary.pth",
|
49 |
+
"mapping": "models/StyleGAN2-Pituitary-MappingNet.pth"
|
50 |
+
},
|
51 |
+
},
|
52 |
+
"WGANs": {
|
53 |
+
"Glioma": "models/WGAN-Glioma.pth",
|
54 |
+
"Meningioma": "models/WGAN-Pituitary.pth",
|
55 |
+
"Pituitary": "models/WGAN-Pituitary.pth",
|
56 |
+
}
|
57 |
+
}
|
58 |
+
|
59 |
+
|
60 |
+
# Load DCGAN models
|
61 |
+
dcgan_generators = {}
|
62 |
+
for label, path in model_paths["DCGAN"].items():
|
63 |
+
model = Generator_DCGAN(1, 256, 64, 3).to(torch.device('cpu')) # Corrected Z_DIM to 256
|
64 |
+
model.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
|
65 |
+
model.eval()
|
66 |
+
dcgan_generators[label] = model
|
67 |
+
|
68 |
+
# Load ProGAN models
|
69 |
+
progan_generators = {}
|
70 |
+
for label, path in model_paths["ProGAN"].items():
|
71 |
+
model = Generator_ProGAN(256, 256, 3).to(torch.device('cpu'))
|
72 |
+
model.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
|
73 |
+
model.eval()
|
74 |
+
progan_generators[label] = model
|
75 |
+
|
76 |
+
# Load StyleGAN2 models
|
77 |
+
stylegan2_generators = {}
|
78 |
+
stylegan2_mapping_networks = {}
|
79 |
+
for label, paths in model_paths["StyleGAN2"].items():
|
80 |
+
gen_model = Generator_SG2(log_resolution=8, W_DIM=256)
|
81 |
+
map_net = MappingNetwork(256, 256).to(DEVICE)
|
82 |
+
gen_model.load_state_dict(torch.load(paths["generator"], map_location=torch.device('cpu')))
|
83 |
+
map_net.load_state_dict(torch.load(paths["mapping"], map_location=torch.device('cpu')))
|
84 |
+
gen_model.eval()
|
85 |
+
map_net.eval()
|
86 |
+
stylegan2_generators[label] = gen_model
|
87 |
+
stylegan2_mapping_networks[label] = map_net
|
88 |
+
|
89 |
+
# Load WGAN models with weights_only and strict=False
|
90 |
+
wgan_generators = {}
|
91 |
+
for label, path in model_paths["WGANs"].items():
|
92 |
+
model = Generator_WGAN().to(torch.device('cpu'))
|
93 |
+
try:
|
94 |
+
# Load the state dict with weights_only=True
|
95 |
+
state_dict = torch.load(path, map_location=torch.device('cpu'))
|
96 |
+
model.load_state_dict(state_dict, strict=False) # Allows partial compatibility
|
97 |
+
model.eval()
|
98 |
+
wgan_generators[label] = model
|
99 |
+
except FileNotFoundError:
|
100 |
+
print(f"Checkpoint file not found for {label}: {path}")
|
101 |
+
except RuntimeError as e:
|
102 |
+
print(f"Error loading WGAN model for {label}: {e}")
|
103 |
+
|
104 |
+
|
105 |
+
# Routes
|
106 |
+
@app.route("/")
|
107 |
+
def home():
|
108 |
+
return render_template("index.html")
|
109 |
+
|
110 |
+
@app.route("/about_us")
|
111 |
+
def about_us():
|
112 |
+
return render_template("About_us.html")
|
113 |
+
|
114 |
+
@app.route("/generate_info")
|
115 |
+
def generate_info():
|
116 |
+
return render_template("generate.html")
|
117 |
+
|
118 |
+
@app.route("/contact")
|
119 |
+
def contact():
|
120 |
+
return render_template("contact.html")
|
121 |
+
|
122 |
+
@app.route("/detect_info")
|
123 |
+
def detect_info():
|
124 |
+
return render_template("detect.html")
|
125 |
+
|
126 |
+
@app.route("/generate", methods=["POST"])
|
127 |
+
def generate():
|
128 |
+
data = request.form
|
129 |
+
model_type = data.get("model") # "DCGANs", "Progressive GANs", "StyleGAN2", or "WGAN"
|
130 |
+
class_name = data.get("class_name")
|
131 |
+
num_images = int(data.get("num_images", 1))
|
132 |
+
|
133 |
+
# Select the appropriate model
|
134 |
+
if model_type == "DCGANs":
|
135 |
+
generators = dcgan_generators
|
136 |
+
generation_function = generate_examples_DCGAN
|
137 |
+
noise = torch.randn(num_images, Z_DIM, 1, 1).to(torch.device('cpu'))
|
138 |
+
elif model_type == "Progressive GANs":
|
139 |
+
generators = progan_generators
|
140 |
+
generation_function = generate_examples_ProGAN
|
141 |
+
noise = torch.randn(num_images, Z_DIM, 1, 1).to(torch.device('cpu'))
|
142 |
+
elif model_type == "StyleGAN2":
|
143 |
+
generators = stylegan2_generators
|
144 |
+
mapping_networks = stylegan2_mapping_networks
|
145 |
+
generation_function = generate_examples_SG2
|
146 |
+
elif model_type == "WGANs":
|
147 |
+
generators = wgan_generators
|
148 |
+
generation_function = generate_examples_WGAN
|
149 |
+
noise = torch.randn(num_images, 256, 1, 1).to(torch.device('cpu'))
|
150 |
+
else:
|
151 |
+
return jsonify({"error": "Invalid model type"}), 400
|
152 |
+
|
153 |
+
if class_name not in generators:
|
154 |
+
return jsonify({"error": f"Invalid class name for {model_type}"}), 400
|
155 |
+
|
156 |
+
if model_type == "StyleGAN2":
|
157 |
+
generator = generators[class_name]
|
158 |
+
mapping_net = mapping_networks[class_name]
|
159 |
+
images_base64, image_buffers = generation_function(generator, mapping_net, num_images)
|
160 |
+
else:
|
161 |
+
generator = generators[class_name]
|
162 |
+
images_base64, image_buffers = generation_function(generator, noise, num_images)
|
163 |
+
|
164 |
+
# Create ZIP file for download
|
165 |
+
zip_buffer = io.BytesIO()
|
166 |
+
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
|
167 |
+
for i, buf in enumerate(image_buffers):
|
168 |
+
if buf: # Ensure buffer is not empty
|
169 |
+
zip_file.writestr(f"generated_image_{i + 1}.png", buf.getvalue())
|
170 |
+
zip_buffer.seek(0)
|
171 |
+
|
172 |
+
# Render template with images and ZIP file link
|
173 |
+
return render_template("results.html", images=images_base64, zip_file=True)
|
174 |
+
|
175 |
+
@app.route("/download_zip", methods=["GET"])
|
176 |
+
def download_zip():
|
177 |
+
"""Route to download the ZIP file containing all generated images."""
|
178 |
+
zip_buffer = io.BytesIO()
|
179 |
+
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
|
180 |
+
for i, image_base64 in enumerate(app.config.get("images_base64", [])):
|
181 |
+
img_data = base64.b64decode(image_base64)
|
182 |
+
zip_file.writestr(f"generated_image_{i + 1}.png", img_data)
|
183 |
+
zip_buffer.seek(0)
|
184 |
+
return send_file(
|
185 |
+
zip_buffer,
|
186 |
+
mimetype="application/zip",
|
187 |
+
as_attachment=True,
|
188 |
+
download_name="generated_images.zip"
|
189 |
+
)
|
190 |
+
|
191 |
+
@app.route("/detect", methods=["POST"])
|
192 |
+
def detect():
|
193 |
+
try:
|
194 |
+
# Define paths and device
|
195 |
+
model_path = "models/vit-35-Epochs-92-NTP-model.pth"
|
196 |
+
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
197 |
+
|
198 |
+
# Get the uploaded file
|
199 |
+
file = request.files.get("file")
|
200 |
+
if not file:
|
201 |
+
print("No file uploaded.")
|
202 |
+
return jsonify({"error": "No file uploaded"}), 400
|
203 |
+
|
204 |
+
# Save the uploaded file temporarily in the static folder
|
205 |
+
file_path = os.path.join("static", "temp_image.jpg")
|
206 |
+
os.makedirs("static", exist_ok=True) # Ensure the directory exists
|
207 |
+
file.save(file_path)
|
208 |
+
print(f"File saved to: {file_path}")
|
209 |
+
|
210 |
+
# Initialize the detection app
|
211 |
+
detection_app = TumorDetectionApp(model_path=model_path, device=DEVICE)
|
212 |
+
print("Detection app initialized.")
|
213 |
+
|
214 |
+
# Predict the class
|
215 |
+
predicted_class = detection_app.predict_image(file_path)
|
216 |
+
if predicted_class is None:
|
217 |
+
print("Prediction failed.")
|
218 |
+
return jsonify({"error": "Prediction failed"}), 500
|
219 |
+
|
220 |
+
# Map the prediction to a class name
|
221 |
+
class_mapping = {
|
222 |
+
0: "Glioma",
|
223 |
+
1: "Meningioma",
|
224 |
+
2: "No Tumor",
|
225 |
+
3: "Pituitary"
|
226 |
+
}
|
227 |
+
result = class_mapping.get(predicted_class, "Unknown")
|
228 |
+
print(f"Prediction successful. Result: {result}")
|
229 |
+
|
230 |
+
# Serve results with the relative path
|
231 |
+
return render_template("results-detect.html", images=["temp_image.jpg"], result=result)
|
232 |
+
|
233 |
+
except Exception as e:
|
234 |
+
print(f"Error in /detect route: {e}")
|
235 |
+
return jsonify({"error": str(e)}), 500
|
236 |
+
|
237 |
+
if __name__ == "__main__":
|
238 |
+
app.run(debug=True)
|
index.html
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!doctype html>
|
2 |
+
<html>
|
3 |
+
<head>
|
4 |
+
<meta charset="utf-8" />
|
5 |
+
<meta name="viewport" content="width=device-width" />
|
6 |
+
<title>My static Space</title>
|
7 |
+
<link rel="stylesheet" href="style.css" />
|
8 |
+
</head>
|
9 |
+
<body>
|
10 |
+
<div class="card">
|
11 |
+
<h1>Welcome to your static Space!</h1>
|
12 |
+
<p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
|
13 |
+
<p>
|
14 |
+
Also don't forget to check the
|
15 |
+
<a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
|
16 |
+
</p>
|
17 |
+
</div>
|
18 |
+
</body>
|
19 |
+
</html>
|
medgan/__init__.py
ADDED
File without changes
|
medgan/dcgan.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import torch
|
3 |
+
import base64
|
4 |
+
import torch.nn as nn
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
|
7 |
+
|
8 |
+
# Define the Generator class for DCGAN
|
9 |
+
class Generator_DCGAN(nn.Module):
|
10 |
+
def __init__(self, ngpu, nz, ngf, nc):
|
11 |
+
super(Generator_DCGAN, self).__init__()
|
12 |
+
self.ngpu = ngpu
|
13 |
+
self.main = nn.Sequential(
|
14 |
+
nn.ConvTranspose2d(nz, ngf * 16, 4, 1, 0, bias=False),
|
15 |
+
nn.BatchNorm2d(ngf * 16),
|
16 |
+
nn.ReLU(True),
|
17 |
+
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
|
18 |
+
nn.BatchNorm2d(ngf * 8),
|
19 |
+
nn.ReLU(True),
|
20 |
+
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
|
21 |
+
nn.BatchNorm2d(ngf * 4),
|
22 |
+
nn.ReLU(True),
|
23 |
+
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
|
24 |
+
nn.BatchNorm2d(ngf * 2),
|
25 |
+
nn.ReLU(True),
|
26 |
+
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
|
27 |
+
nn.BatchNorm2d(ngf),
|
28 |
+
nn.ReLU(True),
|
29 |
+
nn.ConvTranspose2d(ngf, ngf // 2, 4, 2, 1, bias=False),
|
30 |
+
nn.BatchNorm2d(ngf // 2),
|
31 |
+
nn.ReLU(True),
|
32 |
+
nn.ConvTranspose2d(ngf // 2, nc, 4, 2, 1, bias=False),
|
33 |
+
nn.Tanh()
|
34 |
+
)
|
35 |
+
|
36 |
+
def forward(self, x):
|
37 |
+
return self.main(x)
|
38 |
+
|
39 |
+
|
40 |
+
def generate_examples_DCGAN_1(generator, z_dim, class_name="Example"):
|
41 |
+
"""
|
42 |
+
Generate an example using a DCGAN generator and visualize it.
|
43 |
+
|
44 |
+
Args:
|
45 |
+
generator (torch.nn.Module): The trained DCGAN generator model.
|
46 |
+
z_dim (int): Dimension of the latent noise vector.
|
47 |
+
class_name (str): Class label for the generated images.
|
48 |
+
|
49 |
+
Returns:
|
50 |
+
matplotlib.figure.Figure: A matplotlib figure showing the generated image.
|
51 |
+
"""
|
52 |
+
generator.eval()
|
53 |
+
noise = torch.randn(1, z_dim, 1, 1).to(torch.device('cpu'))
|
54 |
+
|
55 |
+
with torch.no_grad():
|
56 |
+
generated_image = generator(noise)
|
57 |
+
|
58 |
+
# Display the generated image
|
59 |
+
fig, ax = plt.subplots(figsize=(2, 2))
|
60 |
+
fig.patch.set_facecolor('white') # Set the figure's background color
|
61 |
+
img_to_display = (generated_image[0].permute(1, 2, 0).cpu().numpy() * 0.5 + 0.5).clip(0, 1)
|
62 |
+
ax.imshow(img_to_display) # Convert tensor to HxWxC for plt.imshow
|
63 |
+
ax.axis('off') # Turn off axis
|
64 |
+
ax.set_facecolor('white') # Set the axis background color
|
65 |
+
plt.subplots_adjust(left=0, right=1, top=1, bottom=0) # Remove space around image
|
66 |
+
|
67 |
+
return fig
|
68 |
+
|
69 |
+
|
70 |
+
|
71 |
+
def generate_examples_DCGAN(generator, noise, num_images):
|
72 |
+
"""
|
73 |
+
Generate images using a DCGAN generator and return them as Base64-encoded strings.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
generator (torch.nn.Module): Pre-trained DCGAN generator model.
|
77 |
+
noise (torch.Tensor): Random noise tensor for image generation.
|
78 |
+
num_images (int): Number of images to generate.
|
79 |
+
|
80 |
+
Returns:
|
81 |
+
list: Base64-encoded images.
|
82 |
+
list: BytesIO image buffers for optional ZIP creation.
|
83 |
+
"""
|
84 |
+
images_base64 = []
|
85 |
+
image_buffers = []
|
86 |
+
|
87 |
+
with torch.no_grad():
|
88 |
+
generated_images = generator(noise)
|
89 |
+
for i in range(num_images):
|
90 |
+
img_tensor = (generated_images[i] + 1) / 2 # Normalize to [0, 1]
|
91 |
+
img_np = img_tensor.permute(1, 2, 0).cpu().numpy()
|
92 |
+
|
93 |
+
# Convert to Base64-encoded image
|
94 |
+
fig, ax = plt.subplots(figsize=(2, 2))
|
95 |
+
ax.imshow(img_np)
|
96 |
+
ax.axis('off')
|
97 |
+
buf = io.BytesIO()
|
98 |
+
FigureCanvas(fig).print_png(buf)
|
99 |
+
buf.seek(0)
|
100 |
+
image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
|
101 |
+
images_base64.append(image_base64)
|
102 |
+
|
103 |
+
# Store the buffer for optional ZIP file creation
|
104 |
+
image_buffers.append(buf)
|
105 |
+
plt.close(fig)
|
106 |
+
|
107 |
+
return images_base64, image_buffers
|
108 |
+
|
109 |
+
|
medgan/progan.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Standard library imports
|
2 |
+
import base64
|
3 |
+
import io
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
|
7 |
+
# Third-party imports
|
8 |
+
import numpy as np
|
9 |
+
import torch
|
10 |
+
import torch.nn.functional as F
|
11 |
+
from torch import nn
|
12 |
+
import matplotlib.pyplot as plt
|
13 |
+
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
|
14 |
+
|
15 |
+
# Define the seed function for reproducibility
|
16 |
+
def seed_everything():
|
17 |
+
seed = random.randint(0, 2**32 - 1) # Generate a random seed between 0 and 2^32-1
|
18 |
+
os.environ['PYTHONHASHSEED'] = str(seed)
|
19 |
+
np.random.seed(seed)
|
20 |
+
torch.manual_seed(seed)
|
21 |
+
torch.cuda.manual_seed(seed)
|
22 |
+
torch.cuda.manual_seed_all(seed)
|
23 |
+
torch.backends.cudnn.deterministic = True
|
24 |
+
torch.backends.cudnn.benchmark = False
|
25 |
+
print(f"Random seed set to: {seed}")
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
START_TRAIN_AT_IMG_SIZE = 4
|
30 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
31 |
+
LEARNING_RATE = 1e-3
|
32 |
+
BATCH_SIZES = [32, 32, 32, 16, 16, 16]
|
33 |
+
IMAGE_SIZE = 128
|
34 |
+
CHANNELS_IMG = 3
|
35 |
+
Z_DIM = 256
|
36 |
+
IN_CHANNELS = 256
|
37 |
+
LAMBDA_GP = 10
|
38 |
+
PROGRESSIVE_EPOCHS = [30] * len(BATCH_SIZES)
|
39 |
+
factors = [1, 1, 1, 1, 1 / 2, 1 / 4, 1 / 8, 1 / 16, 1 / 32]
|
40 |
+
|
41 |
+
# Define the model components
|
42 |
+
class WSConv2d(nn.Module):
|
43 |
+
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
|
44 |
+
super(WSConv2d, self).__init__()
|
45 |
+
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
|
46 |
+
self.scale = (2 / (in_channels * (kernel_size ** 2))) ** 0.5
|
47 |
+
self.bias = self.conv.bias
|
48 |
+
self.conv.bias = None
|
49 |
+
nn.init.normal_(self.conv.weight)
|
50 |
+
nn.init.zeros_(self.bias)
|
51 |
+
|
52 |
+
def forward(self, x):
|
53 |
+
return self.conv(x * self.scale) + self.bias.view(1, self.bias.shape[0], 1, 1)
|
54 |
+
|
55 |
+
|
56 |
+
class PixelNorm(nn.Module):
|
57 |
+
def __init__(self):
|
58 |
+
super(PixelNorm, self).__init__()
|
59 |
+
self.epsilon = 1e-8
|
60 |
+
|
61 |
+
def forward(self, x):
|
62 |
+
return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + self.epsilon)
|
63 |
+
|
64 |
+
|
65 |
+
class ConvBlock(nn.Module):
|
66 |
+
def __init__(self, in_channels, out_channels, use_pixelnorm=True):
|
67 |
+
super(ConvBlock, self).__init__()
|
68 |
+
self.use_pn = use_pixelnorm
|
69 |
+
self.conv1 = WSConv2d(in_channels, out_channels)
|
70 |
+
self.conv2 = WSConv2d(out_channels, out_channels)
|
71 |
+
self.leaky = nn.LeakyReLU(0.2)
|
72 |
+
self.pn = PixelNorm()
|
73 |
+
|
74 |
+
def forward(self, x):
|
75 |
+
x = self.leaky(self.conv1(x))
|
76 |
+
x = self.pn(x) if self.use_pn else x
|
77 |
+
x = self.leaky(self.conv2(x))
|
78 |
+
x = self.pn(x) if self.use_pn else x
|
79 |
+
return x
|
80 |
+
|
81 |
+
|
82 |
+
class Generator_ProGAN(nn.Module):
|
83 |
+
def __init__(self, z_dim, in_channels, img_channels=3):
|
84 |
+
super(Generator_ProGAN, self).__init__()
|
85 |
+
|
86 |
+
# initial takes 1x1 -> 4x4
|
87 |
+
self.initial = nn.Sequential(
|
88 |
+
PixelNorm(),
|
89 |
+
nn.ConvTranspose2d(z_dim, in_channels, 4, 1, 0),
|
90 |
+
nn.LeakyReLU(0.2),
|
91 |
+
WSConv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
|
92 |
+
nn.LeakyReLU(0.2),
|
93 |
+
PixelNorm(),
|
94 |
+
)
|
95 |
+
|
96 |
+
self.initial_rgb = WSConv2d(
|
97 |
+
in_channels, img_channels, kernel_size=1, stride=1, padding=0
|
98 |
+
)
|
99 |
+
self.prog_blocks, self.rgb_layers = (
|
100 |
+
nn.ModuleList([]),
|
101 |
+
nn.ModuleList([self.initial_rgb]),
|
102 |
+
)
|
103 |
+
|
104 |
+
for i in range(len(factors) - 1): # -1 to prevent index error because of factors[i+1]
|
105 |
+
conv_in_c = int(in_channels * factors[i])
|
106 |
+
conv_out_c = int(in_channels * factors[i + 1])
|
107 |
+
self.prog_blocks.append(ConvBlock(conv_in_c, conv_out_c))
|
108 |
+
self.rgb_layers.append(
|
109 |
+
WSConv2d(conv_out_c, img_channels, kernel_size=1, stride=1, padding=0)
|
110 |
+
)
|
111 |
+
|
112 |
+
def fade_in(self, alpha, upscaled, generated):
|
113 |
+
return torch.tanh(alpha * generated + (1 - alpha) * upscaled)
|
114 |
+
|
115 |
+
def forward(self, x, alpha, steps):
|
116 |
+
out = self.initial(x)
|
117 |
+
|
118 |
+
if steps == 0:
|
119 |
+
return self.initial_rgb(out)
|
120 |
+
|
121 |
+
for step in range(steps):
|
122 |
+
upscaled = F.interpolate(out, scale_factor=2, mode="nearest")
|
123 |
+
out = self.prog_blocks[step](upscaled)
|
124 |
+
|
125 |
+
final_upscaled = self.rgb_layers[steps - 1](upscaled)
|
126 |
+
final_out = self.rgb_layers[steps](out)
|
127 |
+
return self.fade_in(alpha, final_upscaled, final_out)
|
128 |
+
|
129 |
+
|
130 |
+
def generate_example_and_show_ProGAN_1(gen, steps, n=1):
|
131 |
+
gen.eval() # Set the model to evaluation mode
|
132 |
+
alpha = 1.0
|
133 |
+
with torch.no_grad(): # Disable gradient computation
|
134 |
+
noise = torch.randn(1, Z_DIM, 1, 1).to(DEVICE) # Generate random noise
|
135 |
+
img = gen(noise, alpha, steps) # Generate an image
|
136 |
+
img = (img * 0.5 + 0.5).clamp(0, 1) # Normalize the image to [0, 1] range
|
137 |
+
|
138 |
+
# Display the image with enhancements
|
139 |
+
fig, ax = plt.subplots(figsize=(2, 2))
|
140 |
+
fig.patch.set_facecolor('white') # Set the figure's background color
|
141 |
+
ax.imshow(img.squeeze(0).permute(1, 2, 0).cpu().numpy()) # Convert to HxWxC for plt.imshow
|
142 |
+
ax.axis('off') # Turn off axis
|
143 |
+
ax.set_facecolor('white') # Set the axis background color
|
144 |
+
plt.subplots_adjust(left=0, right=1, top=1, bottom=0) # Remove space around image
|
145 |
+
return fig
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
def generate_examples_ProGAN(generator, noise, num_images):
|
150 |
+
"""
|
151 |
+
Generate images using a ProGAN generator and return them as Base64-encoded strings.
|
152 |
+
|
153 |
+
Args:
|
154 |
+
generator (torch.nn.Module): Pre-trained ProGAN generator model.
|
155 |
+
noise (torch.Tensor): Random noise tensor for image generation.
|
156 |
+
num_images (int): Number of images to generate.
|
157 |
+
steps (int): Number of steps for the ProGAN fade-in mechanism.
|
158 |
+
|
159 |
+
Returns:
|
160 |
+
list: Base64-encoded images.
|
161 |
+
list: BytesIO image buffers for optional ZIP creation.
|
162 |
+
"""
|
163 |
+
steps = 6
|
164 |
+
images_base64 = []
|
165 |
+
image_buffers = []
|
166 |
+
|
167 |
+
with torch.no_grad():
|
168 |
+
noise = noise.to(next(generator.parameters()).device)
|
169 |
+
for i in range(num_images):
|
170 |
+
alpha = 1.0 # Set alpha to 1.0 for full fade-in
|
171 |
+
img_tensor = generator(noise[i:i+1], alpha, steps).squeeze(0) # Generate an image
|
172 |
+
img_tensor = (img_tensor * 0.5 + 0.5).clamp(0, 1) # Normalize to [0, 1]
|
173 |
+
img_np = img_tensor.permute(1, 2, 0).cpu().numpy()
|
174 |
+
|
175 |
+
# Convert to Base64-encoded image
|
176 |
+
fig, ax = plt.subplots(figsize=(2, 2))
|
177 |
+
ax.imshow(img_np)
|
178 |
+
ax.axis('off')
|
179 |
+
buf = io.BytesIO()
|
180 |
+
FigureCanvas(fig).print_png(buf)
|
181 |
+
buf.seek(0)
|
182 |
+
image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
|
183 |
+
images_base64.append(image_base64)
|
184 |
+
|
185 |
+
# Store the buffer for optional ZIP file creation
|
186 |
+
image_buffers.append(buf)
|
187 |
+
plt.close(fig)
|
188 |
+
|
189 |
+
return images_base64, image_buffers
|
190 |
+
|
medgan/stylegan.py
ADDED
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Standard library imports
|
2 |
+
import base64
|
3 |
+
import io
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
from math import sqrt
|
7 |
+
|
8 |
+
# Third-party imports
|
9 |
+
import numpy as np
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
|
12 |
+
|
13 |
+
# PyTorch imports
|
14 |
+
import torch
|
15 |
+
import torch.nn.functional as F
|
16 |
+
from torch import nn
|
17 |
+
from torchvision.utils import save_image
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
#mapping_network_path = r"C:\Users\mzlwm\OneDrive\Desktop\MEDGAN\StyleGan2\StyleGAN2-256\StyleGAN2-256\StyleGAN2-256-Meningioma\mapping_net.pth" # Replace with actual path
|
22 |
+
# Define the seed function for reproducibility
|
23 |
+
def seed_everything():
|
24 |
+
seed = random.randint(0, 2**32 - 1) # Generate a random seed between 0 and 2^32-1
|
25 |
+
os.environ['PYTHONHASHSEED'] = str(seed)
|
26 |
+
np.random.seed(seed)
|
27 |
+
torch.manual_seed(seed)
|
28 |
+
torch.cuda.manual_seed(seed)
|
29 |
+
torch.cuda.manual_seed_all(seed)
|
30 |
+
torch.backends.cudnn.deterministic = True
|
31 |
+
torch.backends.cudnn.benchmark = False
|
32 |
+
print(f"Random seed set to: {seed}")
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
37 |
+
EPOCHS = 300
|
38 |
+
LEARNING_RATE = 1e-3
|
39 |
+
BATCH_SIZE = 64
|
40 |
+
LOG_RESOLUTION = 8
|
41 |
+
Z_DIM = 256
|
42 |
+
W_DIM = 256
|
43 |
+
LAMBDA_GP = 10
|
44 |
+
factors = [1, 1, 1, 1, 1 / 2, 1 / 4, 1 / 8, 1 / 16, 1 / 32]
|
45 |
+
|
46 |
+
|
47 |
+
class MappingNetwork(nn.Module):
|
48 |
+
def __init__(self, z_dim, w_dim):
|
49 |
+
super().__init__()
|
50 |
+
self.mapping = nn.Sequential(
|
51 |
+
EqualizedLinear(z_dim, w_dim),
|
52 |
+
nn.ReLU(),
|
53 |
+
EqualizedLinear(z_dim, w_dim),
|
54 |
+
nn.ReLU(),
|
55 |
+
EqualizedLinear(z_dim, w_dim),
|
56 |
+
nn.ReLU(),
|
57 |
+
EqualizedLinear(z_dim, w_dim),
|
58 |
+
nn.ReLU(),
|
59 |
+
EqualizedLinear(z_dim, w_dim),
|
60 |
+
nn.ReLU(),
|
61 |
+
EqualizedLinear(z_dim, w_dim),
|
62 |
+
nn.ReLU(),
|
63 |
+
EqualizedLinear(z_dim, w_dim)
|
64 |
+
)
|
65 |
+
|
66 |
+
def forward(self, x):
|
67 |
+
x = x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + 1e-8) # for PixelNorm
|
68 |
+
return self.mapping(x)
|
69 |
+
|
70 |
+
|
71 |
+
class Generator_SG2(nn.Module):
|
72 |
+
|
73 |
+
def __init__(self, log_resolution, W_DIM, n_features = 32, max_features = 256, device="cpu"):
|
74 |
+
|
75 |
+
super().__init__()
|
76 |
+
|
77 |
+
features = [min(max_features, n_features * (2 ** i)) for i in range(log_resolution - 2, -1, -1)]
|
78 |
+
self.n_blocks = len(features)
|
79 |
+
|
80 |
+
self.initial_constant = nn.Parameter(torch.randn((1, features[0], 4, 4)))
|
81 |
+
|
82 |
+
self.style_block = StyleBlock(W_DIM, features[0], features[0])
|
83 |
+
self.to_rgb = ToRGB(W_DIM, features[0])
|
84 |
+
self.device = torch.device(device)
|
85 |
+
|
86 |
+
blocks = [GeneratorBlock(W_DIM, features[i - 1], features[i]) for i in range(1, self.n_blocks)]
|
87 |
+
self.blocks = nn.ModuleList(blocks)
|
88 |
+
|
89 |
+
def forward(self, w, input_noise):
|
90 |
+
|
91 |
+
batch_size = w.shape[1]
|
92 |
+
|
93 |
+
x = self.initial_constant.expand(batch_size, -1, -1, -1)
|
94 |
+
x = self.style_block(x, w[0], input_noise[0][1])
|
95 |
+
rgb = self.to_rgb(x, w[0])
|
96 |
+
|
97 |
+
for i in range(1, self.n_blocks):
|
98 |
+
x = F.interpolate(x, scale_factor=2, mode="bilinear")
|
99 |
+
x, rgb_new = self.blocks[i - 1](x, w[i], input_noise[i])
|
100 |
+
rgb = F.interpolate(rgb, scale_factor=2, mode="bilinear") + rgb_new
|
101 |
+
|
102 |
+
return torch.tanh(rgb)
|
103 |
+
|
104 |
+
|
105 |
+
class GeneratorBlock(nn.Module):
|
106 |
+
|
107 |
+
def __init__(self, W_DIM, in_features, out_features):
|
108 |
+
|
109 |
+
super().__init__()
|
110 |
+
|
111 |
+
self.style_block1 = StyleBlock(W_DIM, in_features, out_features)
|
112 |
+
self.style_block2 = StyleBlock(W_DIM, out_features, out_features)
|
113 |
+
|
114 |
+
self.to_rgb = ToRGB(W_DIM, out_features)
|
115 |
+
|
116 |
+
def forward(self, x, w, noise):
|
117 |
+
|
118 |
+
x = self.style_block1(x, w, noise[0])
|
119 |
+
x = self.style_block2(x, w, noise[1])
|
120 |
+
|
121 |
+
rgb = self.to_rgb(x, w)
|
122 |
+
|
123 |
+
return x, rgb
|
124 |
+
|
125 |
+
|
126 |
+
class StyleBlock(nn.Module):
|
127 |
+
|
128 |
+
def __init__(self, W_DIM, in_features, out_features):
|
129 |
+
|
130 |
+
super().__init__()
|
131 |
+
|
132 |
+
self.to_style = EqualizedLinear(W_DIM, in_features, bias=1.0)
|
133 |
+
self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3)
|
134 |
+
self.scale_noise = nn.Parameter(torch.zeros(1))
|
135 |
+
self.bias = nn.Parameter(torch.zeros(out_features))
|
136 |
+
|
137 |
+
self.activation = nn.LeakyReLU(0.2, True)
|
138 |
+
|
139 |
+
def forward(self, x, w, noise):
|
140 |
+
|
141 |
+
s = self.to_style(w)
|
142 |
+
x = self.conv(x, s)
|
143 |
+
if noise is not None:
|
144 |
+
x = x + self.scale_noise[None, :, None, None] * noise
|
145 |
+
return self.activation(x + self.bias[None, :, None, None])
|
146 |
+
|
147 |
+
class ToRGB(nn.Module):
|
148 |
+
|
149 |
+
def __init__(self, W_DIM, features):
|
150 |
+
|
151 |
+
super().__init__()
|
152 |
+
self.to_style = EqualizedLinear(W_DIM, features, bias=1.0)
|
153 |
+
|
154 |
+
self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False)
|
155 |
+
self.bias = nn.Parameter(torch.zeros(3))
|
156 |
+
self.activation = nn.LeakyReLU(0.2, True)
|
157 |
+
|
158 |
+
def forward(self, x, w):
|
159 |
+
|
160 |
+
style = self.to_style(w)
|
161 |
+
x = self.conv(x, style)
|
162 |
+
return self.activation(x + self.bias[None, :, None, None])
|
163 |
+
|
164 |
+
class Conv2dWeightModulate(nn.Module):
|
165 |
+
|
166 |
+
def __init__(self, in_features, out_features, kernel_size,
|
167 |
+
demodulate = True, eps = 1e-8):
|
168 |
+
|
169 |
+
super().__init__()
|
170 |
+
self.out_features = out_features
|
171 |
+
self.demodulate = demodulate
|
172 |
+
self.padding = (kernel_size - 1) // 2
|
173 |
+
|
174 |
+
self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size])
|
175 |
+
self.eps = eps
|
176 |
+
|
177 |
+
def forward(self, x, s):
|
178 |
+
|
179 |
+
b, _, h, w = x.shape
|
180 |
+
|
181 |
+
s = s[:, None, :, None, None]
|
182 |
+
weights = self.weight()[None, :, :, :, :]
|
183 |
+
weights = weights * s
|
184 |
+
|
185 |
+
if self.demodulate:
|
186 |
+
sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps)
|
187 |
+
weights = weights * sigma_inv
|
188 |
+
|
189 |
+
x = x.reshape(1, -1, h, w)
|
190 |
+
|
191 |
+
_, _, *ws = weights.shape
|
192 |
+
weights = weights.reshape(b * self.out_features, *ws)
|
193 |
+
|
194 |
+
x = F.conv2d(x, weights, padding=self.padding, groups=b)
|
195 |
+
|
196 |
+
return x.reshape(-1, self.out_features, h, w)
|
197 |
+
|
198 |
+
class EqualizedLinear(nn.Module):
|
199 |
+
|
200 |
+
def __init__(self, in_features, out_features, bias = 0.):
|
201 |
+
|
202 |
+
super().__init__()
|
203 |
+
self.weight = EqualizedWeight([out_features, in_features])
|
204 |
+
self.bias = nn.Parameter(torch.ones(out_features) * bias)
|
205 |
+
|
206 |
+
def forward(self, x: torch.Tensor):
|
207 |
+
return F.linear(x, self.weight(), bias=self.bias)
|
208 |
+
|
209 |
+
class EqualizedConv2d(nn.Module):
|
210 |
+
|
211 |
+
def __init__(self, in_features, out_features,
|
212 |
+
kernel_size, padding = 0):
|
213 |
+
|
214 |
+
super().__init__()
|
215 |
+
self.padding = padding
|
216 |
+
self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size])
|
217 |
+
self.bias = nn.Parameter(torch.ones(out_features))
|
218 |
+
|
219 |
+
def forward(self, x: torch.Tensor):
|
220 |
+
return F.conv2d(x, self.weight(), bias=self.bias, padding=self.padding)
|
221 |
+
|
222 |
+
class EqualizedWeight(nn.Module):
|
223 |
+
|
224 |
+
def __init__(self, shape):
|
225 |
+
|
226 |
+
super().__init__()
|
227 |
+
|
228 |
+
self.c = 1 / sqrt(np.prod(shape[1:]))
|
229 |
+
self.weight = nn.Parameter(torch.randn(shape))
|
230 |
+
|
231 |
+
def forward(self):
|
232 |
+
return self.weight * self.c
|
233 |
+
|
234 |
+
class PathLengthPenalty(nn.Module):
|
235 |
+
|
236 |
+
def __init__(self, beta):
|
237 |
+
|
238 |
+
super().__init__()
|
239 |
+
|
240 |
+
self.beta = beta
|
241 |
+
self.steps = nn.Parameter(torch.tensor(0.), requires_grad=False)
|
242 |
+
|
243 |
+
self.exp_sum_a = nn.Parameter(torch.tensor(0.), requires_grad=False)
|
244 |
+
|
245 |
+
def forward(self, w, x):
|
246 |
+
|
247 |
+
device = x.device
|
248 |
+
image_size = x.shape[2] * x.shape[3]
|
249 |
+
y = torch.randn(x.shape, device=device)
|
250 |
+
|
251 |
+
output = (x * y).sum() / sqrt(image_size)
|
252 |
+
sqrt(image_size)
|
253 |
+
|
254 |
+
gradients, *_ = torch.autograd.grad(outputs=output,
|
255 |
+
inputs=w,
|
256 |
+
grad_outputs=torch.ones(output.shape, device=device),
|
257 |
+
create_graph=True)
|
258 |
+
|
259 |
+
norm = (gradients ** 2).sum(dim=2).mean(dim=1).sqrt()
|
260 |
+
|
261 |
+
if self.steps > 0:
|
262 |
+
|
263 |
+
a = self.exp_sum_a / (1 - self.beta ** self.steps)
|
264 |
+
|
265 |
+
loss = torch.mean((norm - a) ** 2)
|
266 |
+
else:
|
267 |
+
loss = norm.new_tensor(0)
|
268 |
+
|
269 |
+
mean = norm.mean().detach()
|
270 |
+
self.exp_sum_a.mul_(self.beta).add_(mean, alpha=1 - self.beta)
|
271 |
+
self.steps.add_(1.)
|
272 |
+
|
273 |
+
return loss
|
274 |
+
|
275 |
+
def gradient_penalty(critic, real, fake,device="cpu"):
|
276 |
+
BATCH_SIZE, C, H, W = real.shape
|
277 |
+
beta = torch.rand((BATCH_SIZE, 1, 1, 1)).repeat(1, C, H, W).to(device)
|
278 |
+
interpolated_images = real * beta + fake.detach() * (1 - beta)
|
279 |
+
interpolated_images.requires_grad_(True)
|
280 |
+
|
281 |
+
# Calculate critic scores
|
282 |
+
mixed_scores = critic(interpolated_images)
|
283 |
+
|
284 |
+
# Take the gradient of the scores with respect to the images
|
285 |
+
gradient = torch.autograd.grad(
|
286 |
+
inputs=interpolated_images,
|
287 |
+
outputs=mixed_scores,
|
288 |
+
grad_outputs=torch.ones_like(mixed_scores),
|
289 |
+
create_graph=True,
|
290 |
+
retain_graph=True,
|
291 |
+
)[0]
|
292 |
+
gradient = gradient.view(gradient.shape[0], -1)
|
293 |
+
gradient_norm = gradient.norm(2, dim=1)
|
294 |
+
gradient_penalty = torch.mean((gradient_norm - 1) ** 2)
|
295 |
+
return gradient_penalty
|
296 |
+
|
297 |
+
|
298 |
+
def get_w(batch_size, mapping_net):
|
299 |
+
z = torch.randn(batch_size, W_DIM).to(DEVICE)
|
300 |
+
w = mapping_net(z)
|
301 |
+
return w[None, :, :].expand(LOG_RESOLUTION, -1, -1)
|
302 |
+
|
303 |
+
def get_noise(batch_size):
|
304 |
+
|
305 |
+
noise = []
|
306 |
+
resolution = 4
|
307 |
+
|
308 |
+
for i in range(LOG_RESOLUTION):
|
309 |
+
if i == 0:
|
310 |
+
n1 = None
|
311 |
+
else:
|
312 |
+
n1 = torch.randn(batch_size, 1, resolution, resolution, device=DEVICE)
|
313 |
+
n2 = torch.randn(batch_size, 1, resolution, resolution, device=DEVICE)
|
314 |
+
|
315 |
+
noise.append((n1, n2))
|
316 |
+
|
317 |
+
resolution *= 2
|
318 |
+
|
319 |
+
return noise
|
320 |
+
|
321 |
+
def generate_examples(gen, epoch, n=100):
|
322 |
+
gen.eval()
|
323 |
+
alpha = 1.0
|
324 |
+
base_dir = '/content/drive/MyDrive/StyleGAN2-256/StyleGAN2-256-Pituitary/saved_examples'
|
325 |
+
if not os.path.exists(base_dir):
|
326 |
+
os.makedirs(base_dir)
|
327 |
+
epoch_dir = os.path.join(base_dir, f'epoch{epoch}')
|
328 |
+
if not os.path.exists(epoch_dir):
|
329 |
+
os.makedirs(epoch_dir)
|
330 |
+
for i in range(n):
|
331 |
+
with torch.no_grad():
|
332 |
+
w = get_w(1)
|
333 |
+
noise = get_noise(1)
|
334 |
+
img = gen(w, noise)
|
335 |
+
save_image(img * 0.5 + 0.5, os.path.join(epoch_dir, f"img_{i}.png"))
|
336 |
+
|
337 |
+
|
338 |
+
def generate_example_and_show_SG2(gen, mapping_net, steps=1, n=1):
|
339 |
+
"""
|
340 |
+
Generate and display an example image using StyleGAN2.
|
341 |
+
|
342 |
+
Args:
|
343 |
+
gen (torch.nn.Module): Generator model.
|
344 |
+
mapping_net (torch.nn.Module): Mapping network model.
|
345 |
+
steps (int): Number of steps (unused in StyleGAN2 but kept for consistency).
|
346 |
+
n (int): Number of examples to generate (unused here but retained for consistency).
|
347 |
+
|
348 |
+
Returns:
|
349 |
+
matplotlib.figure.Figure: A matplotlib figure containing the generated image.
|
350 |
+
"""
|
351 |
+
gen.eval() # Set the generator to evaluation mode
|
352 |
+
mapping_net.eval() # Set the mapping network to evaluation mode
|
353 |
+
|
354 |
+
with torch.no_grad():
|
355 |
+
# Generate latent vector and noise
|
356 |
+
w = get_w(1, mapping_net).to(DEVICE) # Generate a single latent vector
|
357 |
+
noise = get_noise(1) # Generate noise for all resolution levels
|
358 |
+
|
359 |
+
# Generate the image using the generator
|
360 |
+
img = gen(w, noise).to("cpu") * 0.5 + 0.5 # Scale to [0, 1]
|
361 |
+
|
362 |
+
# Prepare the matplotlib figure for display
|
363 |
+
fig, ax = plt.subplots(figsize=(2, 2))
|
364 |
+
fig.patch.set_facecolor('white') # Set the figure's background color
|
365 |
+
ax.imshow(img[0].permute(1, 2, 0).numpy()) # Convert tensor to HxWxC for plt.imshow
|
366 |
+
ax.axis('off') # Turn off axis
|
367 |
+
ax.set_facecolor('white') # Set the axis background color
|
368 |
+
plt.subplots_adjust(left=0, right=1, top=1, bottom=0) # Remove space around image
|
369 |
+
|
370 |
+
return fig
|
371 |
+
|
372 |
+
|
373 |
+
def generate_examples_SG2(generator, mapping_net, num_images):
|
374 |
+
"""
|
375 |
+
Generate images using a StyleGAN2 generator and return them as Base64-encoded strings.
|
376 |
+
|
377 |
+
Args:
|
378 |
+
generator (torch.nn.Module): Pre-trained StyleGAN2 generator model.
|
379 |
+
mapping_net (torch.nn.Module): Pre-trained mapping network model.
|
380 |
+
num_images (int): Number of images to generate.
|
381 |
+
|
382 |
+
Returns:
|
383 |
+
list: Base64-encoded images.
|
384 |
+
list: BytesIO image buffers for optional ZIP creation.
|
385 |
+
"""
|
386 |
+
images_base64 = []
|
387 |
+
image_buffers = []
|
388 |
+
|
389 |
+
generator.eval() # Set generator to evaluation mode
|
390 |
+
mapping_net.eval() # Set mapping network to evaluation mode
|
391 |
+
|
392 |
+
with torch.no_grad():
|
393 |
+
for i in range(num_images):
|
394 |
+
# Generate latent vector and noise
|
395 |
+
w = get_w(1, mapping_net).to(generator.device) # Generate a single latent vector
|
396 |
+
noise = get_noise(1) # Generate noise for all resolution levels
|
397 |
+
|
398 |
+
# Generate the image using the generator
|
399 |
+
img = generator(w, noise).to("cpu") * 0.5 + 0.5 # Normalize to [0, 1]
|
400 |
+
img_np = img[0].permute(1, 2, 0).numpy() # Convert tensor to HxWxC format
|
401 |
+
|
402 |
+
# Convert to Base64-encoded image
|
403 |
+
fig, ax = plt.subplots(figsize=(2, 2))
|
404 |
+
ax.imshow(img_np)
|
405 |
+
ax.axis('off')
|
406 |
+
buf = io.BytesIO()
|
407 |
+
FigureCanvas(fig).print_png(buf)
|
408 |
+
buf.seek(0)
|
409 |
+
image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
|
410 |
+
images_base64.append(image_base64)
|
411 |
+
|
412 |
+
# Store the buffer for optional ZIP file creation
|
413 |
+
image_buffers.append(buf)
|
414 |
+
plt.close(fig)
|
415 |
+
|
416 |
+
return images_base64, image_buffers
|
417 |
+
|
418 |
+
|
419 |
+
|
medgan/vit.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision
|
3 |
+
import torchvision.transforms as transforms
|
4 |
+
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
class TumorDetectionApp:
|
8 |
+
def __init__(self, model_path, device):
|
9 |
+
self.device = device
|
10 |
+
self.model_path = model_path
|
11 |
+
|
12 |
+
def predict_image(self, image_path):
|
13 |
+
# Load the model
|
14 |
+
model = torchvision.models.vit_b_16()
|
15 |
+
num_classes = 4 # Replace with the actual number of classes
|
16 |
+
num_features = model.heads[0].in_features
|
17 |
+
model.heads = torch.nn.Linear(num_features, num_classes)
|
18 |
+
model.load_state_dict(torch.load(self.model_path, map_location=self.device))
|
19 |
+
model.to(self.device)
|
20 |
+
model.eval()
|
21 |
+
|
22 |
+
# Define image transformations
|
23 |
+
IMG_SIZE = 224 # Ensure this matches the size used during training
|
24 |
+
transform = transforms.Compose([
|
25 |
+
transforms.Resize((IMG_SIZE, IMG_SIZE)),
|
26 |
+
transforms.ToTensor(),
|
27 |
+
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
|
28 |
+
])
|
29 |
+
|
30 |
+
# Load and preprocess the image
|
31 |
+
try:
|
32 |
+
img = Image.open(image_path)
|
33 |
+
img = transform(img).unsqueeze(0).to(self.device) # Add batch dimension
|
34 |
+
except FileNotFoundError:
|
35 |
+
print(f"Error: Image file not found at {image_path}")
|
36 |
+
return None
|
37 |
+
except Exception as e:
|
38 |
+
print(f"Error processing image: {e}")
|
39 |
+
return None
|
40 |
+
|
41 |
+
# Perform inference
|
42 |
+
with torch.no_grad():
|
43 |
+
outputs = model(img)
|
44 |
+
_, predicted = torch.max(outputs, 1)
|
45 |
+
|
46 |
+
return predicted.item()
|
medgan/wgan.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import torch
|
3 |
+
import base64
|
4 |
+
import torch.nn as nn
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvasgit
|
7 |
+
|
8 |
+
class Generator_WGAN(nn.Module):
|
9 |
+
def __init__(self, z_dim=256, img_channels=1, features_g=32):
|
10 |
+
super(Generator_WGAN, self).__init__()
|
11 |
+
self.gen = nn.Sequential(
|
12 |
+
self._block(z_dim, features_g * 32, 4, 2, 0),
|
13 |
+
self._block(features_g * 32, features_g * 16, 4, 2, 1),
|
14 |
+
self._block(features_g * 16, features_g * 8, 4, 2, 1),
|
15 |
+
self._block(features_g * 8, features_g * 4, 4, 2, 1),
|
16 |
+
self._block(features_g * 4, features_g * 2, 4, 2, 1),
|
17 |
+
self._block(features_g * 2, features_g, 4, 2, 1),
|
18 |
+
nn.ConvTranspose2d(features_g, img_channels, kernel_size=4, stride=2, padding=1),
|
19 |
+
nn.Tanh()
|
20 |
+
)
|
21 |
+
|
22 |
+
def _block(self, in_channels, out_channels, kernel_size, stride, padding):
|
23 |
+
return nn.Sequential(
|
24 |
+
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),
|
25 |
+
nn.BatchNorm2d(out_channels),
|
26 |
+
nn.ReLU()
|
27 |
+
)
|
28 |
+
|
29 |
+
def forward(self, x):
|
30 |
+
return self.gen(x)
|
31 |
+
|
32 |
+
# Function to generate WGAN images
|
33 |
+
def generate_examples_WGAN(generator, noise, num_images):
|
34 |
+
images_base64 = []
|
35 |
+
image_buffers = []
|
36 |
+
|
37 |
+
with torch.no_grad():
|
38 |
+
generated_images = generator(noise)
|
39 |
+
generated_images = (generated_images + 1) / 2 # Normalize to [0, 1]
|
40 |
+
|
41 |
+
for i in range(num_images):
|
42 |
+
img_tensor = generated_images[i].cpu().squeeze(0)
|
43 |
+
img_np = (img_tensor.numpy() * 255).astype('uint8')
|
44 |
+
|
45 |
+
# Convert to Base64-encoded image
|
46 |
+
fig, ax = plt.subplots(figsize=(2, 2))
|
47 |
+
ax.imshow(img_np, cmap='gray')
|
48 |
+
ax.axis('off')
|
49 |
+
buf = io.BytesIO()
|
50 |
+
FigureCanvas(fig).print_png(buf)
|
51 |
+
buf.seek(0)
|
52 |
+
image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
|
53 |
+
images_base64.append(image_base64)
|
54 |
+
|
55 |
+
# Store the buffer for optional ZIP file creation
|
56 |
+
image_buffers.append(buf)
|
57 |
+
plt.close(fig)
|
58 |
+
|
59 |
+
return images_base64, image_buffers
|
60 |
+
|
requirements.txt
ADDED
Binary file (298 Bytes). View file
|
|
static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png
ADDED
![]() |
Git LFS Details
|
static/css/IMG_0263.JPG
ADDED
|
Git LFS Details
|
static/css/hamza.jpeg
ADDED
![]() |
Git LFS Details
|
static/css/me.jpeg
ADDED
![]() |
Git LFS Details
|
static/css/profile-pic (45).png
ADDED
![]() |
Git LFS Details
|
static/css/profile-pic - 2025-01-02T222023.790.png
ADDED
![]() |
Git LFS Details
|
static/css/style.css
ADDED
@@ -0,0 +1,804 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* General Reset */
|
2 |
+
* {
|
3 |
+
margin: 0;
|
4 |
+
padding: 0;
|
5 |
+
box-sizing: border-box;
|
6 |
+
}
|
7 |
+
|
8 |
+
body {
|
9 |
+
font-family: 'Inter', sans-serif;
|
10 |
+
color: #333;
|
11 |
+
background: linear-gradient(120deg, #f5f7fa, #eaeff1);
|
12 |
+
line-height: 1.6;
|
13 |
+
overflow-x: hidden;
|
14 |
+
transition: all 0.3s ease-in-out;
|
15 |
+
}
|
16 |
+
|
17 |
+
/* Smooth Scroll */
|
18 |
+
html {
|
19 |
+
scroll-behavior: smooth;
|
20 |
+
}
|
21 |
+
|
22 |
+
/* Ultra-Enhanced Navbar */
|
23 |
+
.navbar {
|
24 |
+
background: linear-gradient(135deg, #1e3a8a, #1d4ed8, #60a5fa);
|
25 |
+
background-size: 400% 400%;
|
26 |
+
color: white;
|
27 |
+
padding: 10px 30px;
|
28 |
+
display: flex;
|
29 |
+
justify-content: space-between;
|
30 |
+
align-items: center;
|
31 |
+
position: sticky;
|
32 |
+
top: 0;
|
33 |
+
z-index: 1000;
|
34 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.4);
|
35 |
+
animation: gradientShift 8s ease infinite;
|
36 |
+
transition: all 0.5s ease;
|
37 |
+
}
|
38 |
+
|
39 |
+
.navbar:hover {
|
40 |
+
box-shadow: 0 12px 30px rgba(0, 0, 0, 0.5);
|
41 |
+
transform: translateY(-2px);
|
42 |
+
}
|
43 |
+
|
44 |
+
@keyframes gradientShift {
|
45 |
+
0% { background-position: 0% 50%; }
|
46 |
+
50% { background-position: 100% 50%; }
|
47 |
+
100% { background-position: 0% 50%; }
|
48 |
+
}
|
49 |
+
|
50 |
+
.navbar .logo {
|
51 |
+
display: flex;
|
52 |
+
align-items: center;
|
53 |
+
gap: 10px;
|
54 |
+
font-size: 1.8rem;
|
55 |
+
font-weight: bold;
|
56 |
+
color: white;
|
57 |
+
text-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
|
58 |
+
cursor: pointer;
|
59 |
+
transition: transform 0.5s ease, text-shadow 0.5s ease;
|
60 |
+
}
|
61 |
+
|
62 |
+
.navbar .logo img {
|
63 |
+
width: 50px;
|
64 |
+
height: auto;
|
65 |
+
border-radius: 50%;
|
66 |
+
transition: transform 0.6s ease, box-shadow 0.5s ease;
|
67 |
+
}
|
68 |
+
|
69 |
+
.navbar .logo img:hover {
|
70 |
+
transform: scale(1.3) rotate(360deg);
|
71 |
+
box-shadow: 0 8px 20px rgba(255, 255, 255, 0.4);
|
72 |
+
}
|
73 |
+
|
74 |
+
.navbar nav {
|
75 |
+
display: flex;
|
76 |
+
align-items: center;
|
77 |
+
gap: 20px;
|
78 |
+
}
|
79 |
+
|
80 |
+
.navbar nav ul {
|
81 |
+
list-style: none;
|
82 |
+
display: flex;
|
83 |
+
gap: 20px;
|
84 |
+
margin: 0;
|
85 |
+
padding: 0;
|
86 |
+
}
|
87 |
+
|
88 |
+
.navbar nav ul li {
|
89 |
+
position: relative;
|
90 |
+
}
|
91 |
+
|
92 |
+
.navbar nav ul li a {
|
93 |
+
text-decoration: none;
|
94 |
+
color: white;
|
95 |
+
font-weight: bold;
|
96 |
+
font-size: 1.1rem;
|
97 |
+
padding: 8px 16px;
|
98 |
+
border-radius: 8px;
|
99 |
+
background: linear-gradient(135deg, #0044ff, #0044ff, #60a5fa);
|
100 |
+
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.2);
|
101 |
+
transition: background 0.4s ease, transform 0.4s ease, box-shadow 0.4s ease;
|
102 |
+
position: relative;
|
103 |
+
}
|
104 |
+
|
105 |
+
.navbar nav ul li a:hover {
|
106 |
+
background: #b37ed4;
|
107 |
+
color: #ffffff;
|
108 |
+
transform: translateY(-5px);
|
109 |
+
box-shadow: 0 8px 25px rgba(255, 223, 87, 0.5);
|
110 |
+
}
|
111 |
+
|
112 |
+
.navbar nav ul li ul {
|
113 |
+
display: none;
|
114 |
+
position: absolute;
|
115 |
+
top: calc(100% + 5px);
|
116 |
+
left: 0;
|
117 |
+
background: rgba(30, 41, 59, 0.95);
|
118 |
+
border-radius: 10px;
|
119 |
+
overflow: hidden;
|
120 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.5);
|
121 |
+
opacity: 0;
|
122 |
+
transform: translateY(-10px);
|
123 |
+
transition: opacity 0.4s ease, transform 0.4s ease;
|
124 |
+
z-index: 100;
|
125 |
+
}
|
126 |
+
|
127 |
+
.navbar nav ul li:hover ul {
|
128 |
+
display: block;
|
129 |
+
opacity: 1;
|
130 |
+
transform: translateY(0);
|
131 |
+
}
|
132 |
+
|
133 |
+
.navbar nav ul li ul li {
|
134 |
+
padding: 10px;
|
135 |
+
}
|
136 |
+
|
137 |
+
.navbar nav ul li ul li a {
|
138 |
+
color: white;
|
139 |
+
padding: 8px 12px;
|
140 |
+
display: block;
|
141 |
+
background: none;
|
142 |
+
transition: color 0.3s ease, background 0.3s ease;
|
143 |
+
}
|
144 |
+
|
145 |
+
.navbar nav ul li ul li a:hover {
|
146 |
+
background: rgba(255, 255, 255, 0.2);
|
147 |
+
color: #38bdf8;
|
148 |
+
}
|
149 |
+
|
150 |
+
@keyframes fadeIn {
|
151 |
+
from { opacity: 0; transform: translateY(-15px); }
|
152 |
+
to { opacity: 1; transform: translateY(0); }
|
153 |
+
}
|
154 |
+
|
155 |
+
|
156 |
+
/* Hero Section */
|
157 |
+
.hero {
|
158 |
+
background: linear-gradient(135deg, #1e3a8a, #60a5fa);
|
159 |
+
color: white;
|
160 |
+
text-align: center;
|
161 |
+
padding: 150px 20px;
|
162 |
+
border-bottom-left-radius: 50px;
|
163 |
+
border-bottom-right-radius: 50px;
|
164 |
+
box-shadow: 0 10px 20px rgba(0, 0, 0, 0.2);
|
165 |
+
animation: fadeInHero 1.5s ease;
|
166 |
+
}
|
167 |
+
|
168 |
+
@keyframes fadeInHero {
|
169 |
+
from {
|
170 |
+
opacity: 0;
|
171 |
+
transform: translateY(-30px);
|
172 |
+
}
|
173 |
+
to {
|
174 |
+
opacity: 1;
|
175 |
+
transform: translateY(0);
|
176 |
+
}
|
177 |
+
}
|
178 |
+
|
179 |
+
.hero h1 {
|
180 |
+
font-size: 3.5rem;
|
181 |
+
margin-bottom: 20px;
|
182 |
+
font-weight: bold;
|
183 |
+
text-shadow: 0 0 10px #ffffff, 0 0 15px #007BFF;
|
184 |
+
animation: textGlow 2s infinite alternate;
|
185 |
+
}
|
186 |
+
|
187 |
+
@keyframes textGlow {
|
188 |
+
0% {
|
189 |
+
text-shadow: 0 0 10px #ffffff, 0 0 15px #007BFF;
|
190 |
+
}
|
191 |
+
100% {
|
192 |
+
text-shadow: 0 0 20px #ffffff, 0 0 30px #86E3CE;
|
193 |
+
}
|
194 |
+
}
|
195 |
+
|
196 |
+
.hero p {
|
197 |
+
font-size: 1.2rem;
|
198 |
+
max-width: 800px;
|
199 |
+
margin: 0 auto 30px;
|
200 |
+
color: #e0f7ff;
|
201 |
+
}
|
202 |
+
|
203 |
+
.btn-primary {
|
204 |
+
background: #007BFF;
|
205 |
+
color: white;
|
206 |
+
padding: 12px 30px;
|
207 |
+
border-radius: 30px;
|
208 |
+
font-size: 1rem;
|
209 |
+
font-weight: bold;
|
210 |
+
text-decoration: none;
|
211 |
+
transition: background 0.3s ease, transform 0.3s ease;
|
212 |
+
box-shadow: 0 6px 15px rgba(0, 0, 0, 0.2);
|
213 |
+
}
|
214 |
+
|
215 |
+
.btn-primary:hover {
|
216 |
+
background: #0056b3;
|
217 |
+
transform: translateY(-5px) scale(1.1);
|
218 |
+
}
|
219 |
+
|
220 |
+
/* About Section */
|
221 |
+
.about {
|
222 |
+
padding: 80px 20px;
|
223 |
+
text-align: center;
|
224 |
+
background: #f9f9fc;
|
225 |
+
border-radius: 20px;
|
226 |
+
animation: fadeInSection 1.5s ease;
|
227 |
+
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.1);
|
228 |
+
}
|
229 |
+
|
230 |
+
.about h2 {
|
231 |
+
font-size: 2.8rem;
|
232 |
+
color: #007BFF;
|
233 |
+
margin-bottom: 20px;
|
234 |
+
position: relative;
|
235 |
+
}
|
236 |
+
|
237 |
+
.about h2::after {
|
238 |
+
content: '';
|
239 |
+
position: absolute;
|
240 |
+
width: 80px;
|
241 |
+
height: 4px;
|
242 |
+
background: linear-gradient(135deg, #007BFF, #86E3CE);
|
243 |
+
bottom: -10px;
|
244 |
+
left: 50%;
|
245 |
+
transform: translateX(-50%);
|
246 |
+
animation: slideIn 1s ease;
|
247 |
+
}
|
248 |
+
|
249 |
+
.about p {
|
250 |
+
font-size: 21px;
|
251 |
+
color: #1d1d1d;
|
252 |
+
max-width: 800px;
|
253 |
+
margin: 20px auto 40px;
|
254 |
+
line-height: 1.8;
|
255 |
+
}
|
256 |
+
|
257 |
+
/* Features Grid */
|
258 |
+
.features-grid {
|
259 |
+
display: grid;
|
260 |
+
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
261 |
+
gap: 30px;
|
262 |
+
margin-top: 40px;
|
263 |
+
align-items: stretch;
|
264 |
+
}
|
265 |
+
|
266 |
+
.feature-card {
|
267 |
+
background: linear-gradient(135deg, #007BFF, #86E3CE);
|
268 |
+
color: white;
|
269 |
+
padding: 30px;
|
270 |
+
border-radius: 15px;
|
271 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.2);
|
272 |
+
text-align: center;
|
273 |
+
transform: scale(0.95);
|
274 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
275 |
+
position: relative;
|
276 |
+
overflow: hidden;
|
277 |
+
}
|
278 |
+
|
279 |
+
.feature-card::before {
|
280 |
+
content: '';
|
281 |
+
position: absolute;
|
282 |
+
top: -50%;
|
283 |
+
left: -50%;
|
284 |
+
width: 200%;
|
285 |
+
height: 200%;
|
286 |
+
background: radial-gradient(circle, rgba(255, 255, 255, 0.2), transparent 70%);
|
287 |
+
opacity: 0;
|
288 |
+
transform: scale(0);
|
289 |
+
transition: transform 0.5s ease, opacity 0.5s ease;
|
290 |
+
z-index: 0;
|
291 |
+
}
|
292 |
+
|
293 |
+
.feature-card:hover::before {
|
294 |
+
transform: scale(1);
|
295 |
+
opacity: 1;
|
296 |
+
}
|
297 |
+
|
298 |
+
.feature-card:hover {
|
299 |
+
transform: scale(1.05);
|
300 |
+
box-shadow: 0 15px 40px rgba(0, 0, 0, 0.3);
|
301 |
+
}
|
302 |
+
|
303 |
+
.feature-card h3 {
|
304 |
+
font-size: 1.8rem; /* Increased font size for the heading */
|
305 |
+
margin-bottom: 15px;
|
306 |
+
font-weight: bold;
|
307 |
+
position: relative;
|
308 |
+
z-index: 1;
|
309 |
+
}
|
310 |
+
|
311 |
+
.feature-card p {
|
312 |
+
font-size: 1.8rem; /* Increased font size for the paragraph */
|
313 |
+
line-height: 2; /* Adjusted line height for readability */
|
314 |
+
position: relative;
|
315 |
+
z-index: 1;
|
316 |
+
color: #ffffff; /* Optional: Ensure contrast with the background */
|
317 |
+
}
|
318 |
+
|
319 |
+
|
320 |
+
.feature-card:hover h3 {
|
321 |
+
text-shadow: 0 0 10px rgba(255, 255, 255, 0.8);
|
322 |
+
}
|
323 |
+
|
324 |
+
|
325 |
+
/* Features Section */
|
326 |
+
.features-grid {
|
327 |
+
display: grid;
|
328 |
+
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
329 |
+
gap: 20px;
|
330 |
+
margin-top: 40px;
|
331 |
+
}
|
332 |
+
|
333 |
+
.feature-card {
|
334 |
+
background: linear-gradient(135deg, #1e3a8a, #60a5fa);
|
335 |
+
color: white;
|
336 |
+
padding: 30px;
|
337 |
+
border-radius: 15px;
|
338 |
+
box-shadow: 0 8px 15px rgba(0, 0, 0, 0.2);
|
339 |
+
text-align: center;
|
340 |
+
transform: scale(0.95);
|
341 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
342 |
+
}
|
343 |
+
|
344 |
+
.feature-card:hover {
|
345 |
+
transform: scale(1.05);
|
346 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.3);
|
347 |
+
}
|
348 |
+
|
349 |
+
.feature-card h3 {
|
350 |
+
font-size: 1.5rem;
|
351 |
+
margin-bottom: 15px;
|
352 |
+
}
|
353 |
+
|
354 |
+
.feature-card p {
|
355 |
+
font-size: 1rem;
|
356 |
+
line-height: 1.5;
|
357 |
+
}
|
358 |
+
|
359 |
+
/* Generate Section */
|
360 |
+
.generate {
|
361 |
+
padding: 70px 20px;
|
362 |
+
background: linear-gradient(135deg, #ffffff, #f5f7fa);
|
363 |
+
text-align: center;
|
364 |
+
border-radius: 20px;
|
365 |
+
margin: 50px auto;
|
366 |
+
max-width: 700px;
|
367 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.2);
|
368 |
+
position: relative;
|
369 |
+
overflow: hidden;
|
370 |
+
z-index: 1;
|
371 |
+
}
|
372 |
+
|
373 |
+
.generate::before,
|
374 |
+
.generate::after {
|
375 |
+
content: '';
|
376 |
+
position: absolute;
|
377 |
+
width: 200%;
|
378 |
+
height: 200%;
|
379 |
+
top: -50%;
|
380 |
+
left: -50%;
|
381 |
+
background: radial-gradient(circle, rgba(0, 123, 255, 0.1), transparent 80%);
|
382 |
+
animation: rotateGlow 10s linear infinite;
|
383 |
+
z-index: -1;
|
384 |
+
}
|
385 |
+
|
386 |
+
@keyframes rotateGlow {
|
387 |
+
0% {
|
388 |
+
transform: rotate(0deg);
|
389 |
+
}
|
390 |
+
100% {
|
391 |
+
transform: rotate(360deg);
|
392 |
+
}
|
393 |
+
}
|
394 |
+
|
395 |
+
/* Generate Section */
|
396 |
+
.generate {
|
397 |
+
padding: 80px 20px;
|
398 |
+
background: linear-gradient(135deg, #e3f2fd, #ffffff);
|
399 |
+
text-align: center;
|
400 |
+
border-radius: 30px;
|
401 |
+
margin: 50px auto;
|
402 |
+
max-width: 700px;
|
403 |
+
box-shadow: 0 15px 30px rgba(0, 0, 0, 0.2);
|
404 |
+
position: relative;
|
405 |
+
overflow: hidden;
|
406 |
+
z-index: 1;
|
407 |
+
}
|
408 |
+
|
409 |
+
.generate::before {
|
410 |
+
content: '';
|
411 |
+
position: absolute;
|
412 |
+
top: -50%;
|
413 |
+
left: -50%;
|
414 |
+
width: 200%;
|
415 |
+
height: 200%;
|
416 |
+
background: radial-gradient(circle, rgba(0, 123, 255, 0.1), transparent 70%);
|
417 |
+
animation: rotateGlow 8s linear infinite;
|
418 |
+
z-index: -1;
|
419 |
+
}
|
420 |
+
|
421 |
+
@keyframes rotateGlow {
|
422 |
+
0% {
|
423 |
+
transform: rotate(0deg);
|
424 |
+
}
|
425 |
+
100% {
|
426 |
+
transform: rotate(360deg);
|
427 |
+
}
|
428 |
+
}
|
429 |
+
|
430 |
+
.generate h2 {
|
431 |
+
font-size: 2.5rem;
|
432 |
+
color: #007BFF;
|
433 |
+
font-weight: bold;
|
434 |
+
margin-bottom: 30px;
|
435 |
+
animation: fadeInTitle 1s ease-in-out;
|
436 |
+
}
|
437 |
+
|
438 |
+
@keyframes fadeInTitle {
|
439 |
+
0% {
|
440 |
+
opacity: 0;
|
441 |
+
transform: translateY(-20px);
|
442 |
+
}
|
443 |
+
100% {
|
444 |
+
opacity: 1;
|
445 |
+
transform: translateY(0);
|
446 |
+
}
|
447 |
+
}
|
448 |
+
|
449 |
+
/* Form Styling */
|
450 |
+
.form {
|
451 |
+
background: #ffffff;
|
452 |
+
padding: 40px;
|
453 |
+
border-radius: 20px;
|
454 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
|
455 |
+
animation: slideInForm 1.5s ease;
|
456 |
+
max-width: 500px;
|
457 |
+
margin: 0 auto;
|
458 |
+
}
|
459 |
+
|
460 |
+
@keyframes slideInForm {
|
461 |
+
0% {
|
462 |
+
opacity: 0;
|
463 |
+
transform: translateY(30px);
|
464 |
+
}
|
465 |
+
100% {
|
466 |
+
opacity: 1;
|
467 |
+
transform: translateY(0);
|
468 |
+
}
|
469 |
+
}
|
470 |
+
|
471 |
+
.form-group {
|
472 |
+
margin-bottom: 20px;
|
473 |
+
text-align: left;
|
474 |
+
}
|
475 |
+
|
476 |
+
label {
|
477 |
+
font-size: 1rem;
|
478 |
+
font-weight: bold;
|
479 |
+
color: #333;
|
480 |
+
display: block;
|
481 |
+
margin-bottom: 8px;
|
482 |
+
}
|
483 |
+
|
484 |
+
select, input {
|
485 |
+
width: 100%;
|
486 |
+
padding: 12px;
|
487 |
+
font-size: 1rem;
|
488 |
+
border: 1px solid #ddd;
|
489 |
+
border-radius: 10px;
|
490 |
+
transition: border 0.3s ease, box-shadow 0.3s ease;
|
491 |
+
box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.1);
|
492 |
+
}
|
493 |
+
|
494 |
+
select:focus, input:focus {
|
495 |
+
border-color: #007BFF;
|
496 |
+
box-shadow: 0 0 10px rgba(0, 123, 255, 0.5);
|
497 |
+
outline: none;
|
498 |
+
transform: scale(1.02);
|
499 |
+
}
|
500 |
+
|
501 |
+
button {
|
502 |
+
display: inline-block;
|
503 |
+
background: linear-gradient(135deg, #007BFF, #0056b3);
|
504 |
+
color: white;
|
505 |
+
padding: 15px 40px;
|
506 |
+
border: none;
|
507 |
+
border-radius: 30px;
|
508 |
+
font-size: 1.2rem;
|
509 |
+
font-weight: bold;
|
510 |
+
text-transform: uppercase;
|
511 |
+
cursor: pointer;
|
512 |
+
position: relative;
|
513 |
+
transition: all 0.3s ease-in-out;
|
514 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.2);
|
515 |
+
}
|
516 |
+
|
517 |
+
button:hover {
|
518 |
+
background: linear-gradient(135deg, #0056b3, #003f7f);
|
519 |
+
transform: translateY(-3px) scale(1.05);
|
520 |
+
box-shadow: 0 15px 30px rgba(0, 0, 0, 0.3);
|
521 |
+
}
|
522 |
+
|
523 |
+
button:active {
|
524 |
+
transform: translateY(2px);
|
525 |
+
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2);
|
526 |
+
}
|
527 |
+
|
528 |
+
|
529 |
+
/* Results Section */
|
530 |
+
.results {
|
531 |
+
padding: 60px 20px;
|
532 |
+
background: #ffffff;
|
533 |
+
margin: 40px auto;
|
534 |
+
max-width: 1200px;
|
535 |
+
border-radius: 15px;
|
536 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.1);
|
537 |
+
text-align: center;
|
538 |
+
}
|
539 |
+
|
540 |
+
.results h1 {
|
541 |
+
font-size: 2.5rem;
|
542 |
+
color: #007BFF;
|
543 |
+
margin-bottom: 20px;
|
544 |
+
font-weight: bold;
|
545 |
+
}
|
546 |
+
|
547 |
+
.results p {
|
548 |
+
font-size: 1.2rem;
|
549 |
+
margin-bottom: 30px;
|
550 |
+
color: #555;
|
551 |
+
}
|
552 |
+
|
553 |
+
.gallery {
|
554 |
+
display: grid;
|
555 |
+
grid-template-columns: repeat(4, 1fr);
|
556 |
+
gap: 20px;
|
557 |
+
justify-items: center;
|
558 |
+
}
|
559 |
+
|
560 |
+
.image-card {
|
561 |
+
background: white;
|
562 |
+
border-radius: 15px;
|
563 |
+
overflow: hidden;
|
564 |
+
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2);
|
565 |
+
padding: 15px;
|
566 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
567 |
+
text-align: center;
|
568 |
+
}
|
569 |
+
|
570 |
+
.image-card img {
|
571 |
+
width: 100%;
|
572 |
+
height: auto;
|
573 |
+
border-radius: 10px;
|
574 |
+
}
|
575 |
+
|
576 |
+
.image-card:hover {
|
577 |
+
transform: scale(1.1);
|
578 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
|
579 |
+
}
|
580 |
+
|
581 |
+
/* Button Styles */
|
582 |
+
.results .btn-primary, .results .btn-secondary {
|
583 |
+
display: inline-block;
|
584 |
+
background: linear-gradient(135deg, #007BFF, #0056b3);
|
585 |
+
color: white;
|
586 |
+
padding: 12px 30px;
|
587 |
+
border: none;
|
588 |
+
border-radius: 25px;
|
589 |
+
font-size: 1rem;
|
590 |
+
font-weight: bold;
|
591 |
+
text-align: center;
|
592 |
+
text-transform: uppercase;
|
593 |
+
text-decoration: none;
|
594 |
+
cursor: pointer;
|
595 |
+
position: relative;
|
596 |
+
transition: all 0.3s ease-in-out;
|
597 |
+
box-shadow: 0 8px 15px rgba(0, 0, 0, 0.2);
|
598 |
+
}
|
599 |
+
|
600 |
+
.results .btn-primary:hover, .results .btn-secondary:hover {
|
601 |
+
background: linear-gradient(135deg, #0056b3, #003f7f);
|
602 |
+
box-shadow: 0 12px 20px rgba(0, 0, 0, 0.3), 0 0 10px #007BFF, 0 0 20px #0056b3;
|
603 |
+
transform: translateY(-3px);
|
604 |
+
}
|
605 |
+
|
606 |
+
.results .btn-primary:active, .results .btn-secondary:active {
|
607 |
+
transform: translateY(2px);
|
608 |
+
box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
|
609 |
+
}
|
610 |
+
|
611 |
+
/* Glow Effect on Hover */
|
612 |
+
.results .btn-primary::after, .results .btn-secondary::after {
|
613 |
+
content: '';
|
614 |
+
position: absolute;
|
615 |
+
top: 0;
|
616 |
+
left: 0;
|
617 |
+
width: 100%;
|
618 |
+
height: 100%;
|
619 |
+
border-radius: 25px;
|
620 |
+
background: radial-gradient(circle, rgba(255, 255, 255, 0.3), transparent 70%);
|
621 |
+
opacity: 0;
|
622 |
+
transition: opacity 0.3s ease-in-out;
|
623 |
+
z-index: -1;
|
624 |
+
}
|
625 |
+
|
626 |
+
.results .btn-primary:hover::after, .results .btn-secondary:hover::after {
|
627 |
+
opacity: 1;
|
628 |
+
}
|
629 |
+
.results .btn-primary {
|
630 |
+
margin-top: 40px; /* Add space above the button */
|
631 |
+
display: inline-block;
|
632 |
+
background: linear-gradient(135deg, #007BFF, #0056b3);
|
633 |
+
color: white;
|
634 |
+
padding: 12px 30px;
|
635 |
+
border: none;
|
636 |
+
border-radius: 25px;
|
637 |
+
font-size: 1rem;
|
638 |
+
font-weight: bold;
|
639 |
+
text-align: center;
|
640 |
+
text-transform: uppercase;
|
641 |
+
text-decoration: none;
|
642 |
+
cursor: pointer;
|
643 |
+
transition: all 0.3s ease-in-out;
|
644 |
+
box-shadow: 0 8px 15px rgba(0, 0, 0, 0.2);
|
645 |
+
}
|
646 |
+
|
647 |
+
.results .btn-primary:hover {
|
648 |
+
background: linear-gradient(135deg, #0056b3, #003f7f);
|
649 |
+
transform: translateY(-3px);
|
650 |
+
box-shadow: 0 12px 20px rgba(0, 0, 0, 0.3), 0 0 10px #007BFF, 0 0 20px #0056b3;
|
651 |
+
}
|
652 |
+
|
653 |
+
/* Footer */
|
654 |
+
.footer {
|
655 |
+
background: linear-gradient(135deg, #007BFF, #1100fd);
|
656 |
+
padding: 40px 20px;
|
657 |
+
text-align: center;
|
658 |
+
position: relative;
|
659 |
+
overflow: hidden;
|
660 |
+
border-top: 5px solid #0056b3;
|
661 |
+
box-shadow: 0 -4px 20px rgba(0, 0, 0, 0.3);
|
662 |
+
z-index: 1;
|
663 |
+
}
|
664 |
+
|
665 |
+
.footer::before,
|
666 |
+
.footer::after {
|
667 |
+
content: '';
|
668 |
+
position: absolute;
|
669 |
+
top: 0;
|
670 |
+
left: -50%;
|
671 |
+
width: 200%;
|
672 |
+
height: 200%;
|
673 |
+
background: radial-gradient(circle at center, rgba(255, 255, 255, 0.2) 0%, transparent 70%);
|
674 |
+
animation: shineEffect 8s infinite linear;
|
675 |
+
transform: rotate(45deg);
|
676 |
+
z-index: -1;
|
677 |
+
}
|
678 |
+
|
679 |
+
@keyframes shineEffect {
|
680 |
+
0% {
|
681 |
+
transform: translateX(-100%) rotate(45deg);
|
682 |
+
}
|
683 |
+
100% {
|
684 |
+
transform: translateX(100%) rotate(45deg);
|
685 |
+
}
|
686 |
+
}
|
687 |
+
|
688 |
+
.footer p {
|
689 |
+
font-size: 1.1rem;
|
690 |
+
font-weight: 600;
|
691 |
+
color: #fff;
|
692 |
+
margin-bottom: 15px;
|
693 |
+
text-shadow: 0 2px 4px rgba(0, 0, 0, 0.4);
|
694 |
+
}
|
695 |
+
|
696 |
+
|
697 |
+
|
698 |
+
|
699 |
+
.footer {
|
700 |
+
text-align: center;
|
701 |
+
padding: 20px;
|
702 |
+
background: linear-gradient(135deg, #1e3a8a, #60a5fa);
|
703 |
+
color: white;
|
704 |
+
font-size: 1rem;
|
705 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.2);
|
706 |
+
}
|
707 |
+
|
708 |
+
.footer .footer-links {
|
709 |
+
margin-top: 10px;
|
710 |
+
display: flex;
|
711 |
+
justify-content: center;
|
712 |
+
gap: 15px;
|
713 |
+
}
|
714 |
+
|
715 |
+
.footer .footer-links a {
|
716 |
+
color: white;
|
717 |
+
font-size: 1.2rem;
|
718 |
+
text-decoration: none;
|
719 |
+
transition: color 0.3s;
|
720 |
+
}
|
721 |
+
|
722 |
+
@keyframes fadeInLinks {
|
723 |
+
0% {
|
724 |
+
opacity: 0;
|
725 |
+
transform: translateY(20px);
|
726 |
+
}
|
727 |
+
100% {
|
728 |
+
opacity: 1;
|
729 |
+
transform: translateY(0);
|
730 |
+
}
|
731 |
+
}
|
732 |
+
|
733 |
+
.footer-links a {
|
734 |
+
color: #fff;
|
735 |
+
text-decoration: none;
|
736 |
+
font-size: 1rem;
|
737 |
+
font-weight: bold;
|
738 |
+
background: rgba(255, 255, 255, 0.1);
|
739 |
+
padding: 10px 20px;
|
740 |
+
border-radius: 25px;
|
741 |
+
transition: background 0.3s ease, transform 0.3s ease, box-shadow 0.3s ease;
|
742 |
+
box-shadow: 0 3px 6px rgba(0, 0, 0, 0.2);
|
743 |
+
}
|
744 |
+
|
745 |
+
.footer-links a:hover {
|
746 |
+
background: #b37ed4;
|
747 |
+
color: #ffffff;
|
748 |
+
transform: translateY(-5px);
|
749 |
+
box-shadow: 0 6px 15px rgba(0, 0, 0, 0.3);
|
750 |
+
}
|
751 |
+
|
752 |
+
.footer-social {
|
753 |
+
margin-top: 20px;
|
754 |
+
display: flex;
|
755 |
+
justify-content: center;
|
756 |
+
gap: 20px;
|
757 |
+
animation: socialIconsPulse 2s infinite;
|
758 |
+
}
|
759 |
+
|
760 |
+
@keyframes socialIconsPulse {
|
761 |
+
0%, 100% {
|
762 |
+
transform: scale(1);
|
763 |
+
}
|
764 |
+
50% {
|
765 |
+
transform: scale(1.2);
|
766 |
+
}
|
767 |
+
}
|
768 |
+
|
769 |
+
.footer-social a {
|
770 |
+
font-size: 1.5rem;
|
771 |
+
color: #fff;
|
772 |
+
transition: color 0.3s ease, transform 0.3s ease;
|
773 |
+
}
|
774 |
+
|
775 |
+
.footer-social a:hover {
|
776 |
+
color: #e5e5e5;
|
777 |
+
transform: scale(1.5) rotate(15deg);
|
778 |
+
}
|
779 |
+
.action-buttons {
|
780 |
+
display: flex;
|
781 |
+
justify-content: center;
|
782 |
+
gap: 15px; /* Space between buttons */
|
783 |
+
margin-top: 20px;
|
784 |
+
}
|
785 |
+
|
786 |
+
.action-buttons .btn-primary,
|
787 |
+
.action-buttons .btn-secondary {
|
788 |
+
display: inline-block;
|
789 |
+
padding: 10px 20px;
|
790 |
+
font-size: 16px;
|
791 |
+
color: #fff;
|
792 |
+
background: #007BFF;
|
793 |
+
border: none;
|
794 |
+
border-radius: 25px;
|
795 |
+
text-align: center;
|
796 |
+
text-decoration: none;
|
797 |
+
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
|
798 |
+
transition: background 0.3s ease;
|
799 |
+
}
|
800 |
+
|
801 |
+
.action-buttons .btn-primary:hover,
|
802 |
+
.action-buttons .btn-secondary:hover {
|
803 |
+
background: #0056b3;
|
804 |
+
}
|
static/css/zaloom.jpeg
ADDED
![]() |
Git LFS Details
|
static/icons/email-icon.png
ADDED
![]() |
Git LFS Details
|
static/icons/icons8-2-circled-64.png
ADDED
![]() |
Git LFS Details
|
static/icons/icons8-circled-3-c-64.png
ADDED
![]() |
Git LFS Details
|
static/icons/icons8-circled-4-c-64.png
ADDED
![]() |
Git LFS Details
|
static/icons/icons8-circled-5-64.png
ADDED
![]() |
Git LFS Details
|
static/icons/icons8-linkedin-50.png
ADDED
![]() |
Git LFS Details
|
static/icons/icons8-number-1-64.png
ADDED
![]() |
Git LFS Details
|
static/icons/image.png
ADDED
![]() |
Git LFS Details
|
static/icons/linkedin-icon.png
ADDED
![]() |
Git LFS Details
|
static/icons/linkedin-icon.svg
ADDED
|
static/script.js
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
document.getElementById("generate-form").addEventListener("submit", function (event) {
|
2 |
+
event.preventDefault();
|
3 |
+
|
4 |
+
const formData = new FormData(event.target);
|
5 |
+
const resultDiv = document.getElementById("result");
|
6 |
+
resultDiv.innerHTML = "Generating images...";
|
7 |
+
|
8 |
+
fetch("/generate", {
|
9 |
+
method: "POST",
|
10 |
+
body: formData,
|
11 |
+
})
|
12 |
+
.then((response) => response.json())
|
13 |
+
.then((data) => {
|
14 |
+
resultDiv.innerHTML = "";
|
15 |
+
if (data.error) {
|
16 |
+
resultDiv.innerHTML = `<p>Error: ${data.error}</p>`;
|
17 |
+
return;
|
18 |
+
}
|
19 |
+
|
20 |
+
data.images.forEach((image) => {
|
21 |
+
const img = document.createElement("img");
|
22 |
+
img.src = image;
|
23 |
+
resultDiv.appendChild(img);
|
24 |
+
});
|
25 |
+
})
|
26 |
+
.catch((error) => {
|
27 |
+
console.error("Error:", error);
|
28 |
+
resultDiv.innerHTML = "<p>Something went wrong. Please try again.</p>";
|
29 |
+
});
|
30 |
+
});
|
static/temp_image.jpg
ADDED
![]() |
Git LFS Details
|
style.css
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
body {
|
2 |
+
padding: 2rem;
|
3 |
+
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
+
}
|
5 |
+
|
6 |
+
h1 {
|
7 |
+
font-size: 16px;
|
8 |
+
margin-top: 0;
|
9 |
+
}
|
10 |
+
|
11 |
+
p {
|
12 |
+
color: rgb(107, 114, 128);
|
13 |
+
font-size: 15px;
|
14 |
+
margin-bottom: 10px;
|
15 |
+
margin-top: 5px;
|
16 |
+
}
|
17 |
+
|
18 |
+
.card {
|
19 |
+
max-width: 620px;
|
20 |
+
margin: 0 auto;
|
21 |
+
padding: 16px;
|
22 |
+
border: 1px solid lightgray;
|
23 |
+
border-radius: 16px;
|
24 |
+
}
|
25 |
+
|
26 |
+
.card p:last-child {
|
27 |
+
margin-bottom: 0;
|
28 |
+
}
|
templates/About_us.html
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>About Us</title>
|
7 |
+
<link rel="icon" type="image/png" href="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png">
|
8 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
9 |
+
<style>
|
10 |
+
/* Additional styling for About Us page */
|
11 |
+
.about {
|
12 |
+
text-align: center;
|
13 |
+
padding: 50px 20px;
|
14 |
+
background: linear-gradient(135deg, #f0f7ff, #e0f4ff);
|
15 |
+
border-radius: 20px;
|
16 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
|
17 |
+
margin: 50px auto;
|
18 |
+
max-width: 1200px;
|
19 |
+
animation: fadeInAbout 1.5s ease-in-out;
|
20 |
+
}
|
21 |
+
|
22 |
+
.about h2 {
|
23 |
+
font-size: 2.5rem;
|
24 |
+
color: #007BFF;
|
25 |
+
margin-bottom: 30px;
|
26 |
+
position: relative;
|
27 |
+
}
|
28 |
+
|
29 |
+
.features-grid {
|
30 |
+
display: flex;
|
31 |
+
justify-content: center;
|
32 |
+
flex-wrap: wrap;
|
33 |
+
gap: 30px;
|
34 |
+
}
|
35 |
+
|
36 |
+
.feature-card {
|
37 |
+
text-align: center;
|
38 |
+
padding: 20px;
|
39 |
+
background: linear-gradient(135deg, #a47abe, #006eff);
|
40 |
+
border-radius: 15px;
|
41 |
+
box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1);
|
42 |
+
width: 280px;
|
43 |
+
color: white;
|
44 |
+
position: relative;
|
45 |
+
}
|
46 |
+
|
47 |
+
.feature-card img {
|
48 |
+
width: 150px;
|
49 |
+
height: 150px;
|
50 |
+
object-fit: cover;
|
51 |
+
border-radius: 50%;
|
52 |
+
margin-bottom: 15px;
|
53 |
+
border: 4px solid #ffffff;
|
54 |
+
}
|
55 |
+
|
56 |
+
.feature-card h3 {
|
57 |
+
font-size: 1.5rem;
|
58 |
+
color: white;
|
59 |
+
margin-bottom: 10px;
|
60 |
+
}
|
61 |
+
|
62 |
+
.feature-card p {
|
63 |
+
font-size: 1rem;
|
64 |
+
color: #f0f0f0;
|
65 |
+
line-height: 1.6;
|
66 |
+
}
|
67 |
+
|
68 |
+
.contact-links {
|
69 |
+
display: flex;
|
70 |
+
justify-content: center;
|
71 |
+
gap: 15px;
|
72 |
+
margin-top: 10px;
|
73 |
+
}
|
74 |
+
|
75 |
+
.contact-link {
|
76 |
+
display: flex;
|
77 |
+
justify-content: center;
|
78 |
+
align-items: center;
|
79 |
+
width: 40px;
|
80 |
+
height: 40px;
|
81 |
+
border-radius: 50%;
|
82 |
+
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.2);
|
83 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
84 |
+
}
|
85 |
+
|
86 |
+
.contact-link img {
|
87 |
+
width: 50px;
|
88 |
+
height: 50px;
|
89 |
+
}
|
90 |
+
|
91 |
+
.contact-link:hover {
|
92 |
+
transform: scale(1.1);
|
93 |
+
box-shadow: 0 6px 15px rgba(0, 0, 0, 0.3);
|
94 |
+
}
|
95 |
+
</style>
|
96 |
+
</head>
|
97 |
+
<body>
|
98 |
+
<header class="navbar">
|
99 |
+
<div class="logo">
|
100 |
+
<img src="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png" alt="MedGAN Logo">
|
101 |
+
<span>MedGAN Studio</span>
|
102 |
+
</div>
|
103 |
+
<nav>
|
104 |
+
<ul>
|
105 |
+
<li><a href="/">Home</a></li>
|
106 |
+
<li><a href="{{ url_for('generate_info') }}">Generate</a></li>
|
107 |
+
<li><a href="{{ url_for('detect_info') }}">Detect</a></li>
|
108 |
+
<li><a href="{{ url_for('about_us') }}">About Us</a></li>
|
109 |
+
<li><a href="{{ url_for('contact') }}">Contact</a></li>
|
110 |
+
</ul>
|
111 |
+
</nav>
|
112 |
+
</header>
|
113 |
+
|
114 |
+
<section class="about">
|
115 |
+
<div class="container">
|
116 |
+
<h2>About Our Team</h2>
|
117 |
+
<div class="features-grid">
|
118 |
+
<div class="feature-card">
|
119 |
+
<img src="static/css/hamza.jpeg" alt="Hamza Nasser">
|
120 |
+
<h3>Hamza Nasser</h3>
|
121 |
+
<p>Experienced in implementing effective data preprocessing strategies and specializing in optimizing GAN architectures for enhanced application outcomes.</p>
|
122 |
+
<div class="contact-links">
|
123 |
+
<a href="mailto:[email protected]" class="contact-link"><img src="static/icons/email-icon.png" alt="Email"></a>
|
124 |
+
<a href="https://www.linkedin.com/in/hamza-waseem-nasser" target="_blank" class="contact-link"><img src="static/icons/linkedin-icon.png" alt="LinkedIn"></a>
|
125 |
+
</div>
|
126 |
+
</div>
|
127 |
+
<div class="feature-card">
|
128 |
+
<img src="static\css\profile-pic - 2025-01-02T222023.790.png" alt="Mohammed Zaloom">
|
129 |
+
<h3>Mohammed Zaloom</h3>
|
130 |
+
<p>
|
131 |
+
Bachelor’s in AI and Robotics with a strong focus on AI and Deep Learning technologies. Experienced in designing, training, and optimizing deep learning models.</p>
|
132 |
+
<div class="contact-links">
|
133 |
+
<a href="mailto:[email protected]" class="contact-link"><img src="static/icons/email-icon.png" alt="Email"></a>
|
134 |
+
<a href="https://linkedin.com/in/mozaloom" target="_blank" class="contact-link"><img src="static/icons/linkedin-icon.png" alt="LinkedIn"></a>
|
135 |
+
</div>
|
136 |
+
</div>
|
137 |
+
<div class="feature-card">
|
138 |
+
<img src="static/css/me.jpeg" alt="Mahmoud AbuAwd">
|
139 |
+
<h3>Mahmoud AbuAwd</h3>
|
140 |
+
<p>Highly skilled in data preprocessing techniques and optimization, with a strong expertise in tuning, and implementing advanced GAN architectures for applications.</p>
|
141 |
+
<div class="contact-links">
|
142 |
+
<a href="mailto:@gmail.com" class="contact-link"><img src="static/icons/email-icon.png" alt="Email"></a>
|
143 |
+
<a href="https://linkedin.com/in/mahmoudabuawd" target="_blank" class="contact-link"><img src="static/icons/linkedin-icon.png" alt="LinkedIn"></a>
|
144 |
+
</div>
|
145 |
+
</div>
|
146 |
+
</div>
|
147 |
+
</div>
|
148 |
+
</section>
|
149 |
+
|
150 |
+
<footer class="footer">
|
151 |
+
<div class="container">
|
152 |
+
<p>© 2024 MedGAN Studio. All Rights Reserved.</p>
|
153 |
+
<div class="footer-links">
|
154 |
+
<a href="mailto:[email protected]">Email Us</a>
|
155 |
+
<a href="https://instagram.com/medgan" target="_blank">Instagram</a>
|
156 |
+
<a href="https://www.linkedin.com/company/medgan/" target="_blank">LinkedIn</a>
|
157 |
+
</div>
|
158 |
+
</div>
|
159 |
+
</footer>
|
160 |
+
</body>
|
161 |
+
</html>
|
templates/contact.html
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Contact Us</title>
|
7 |
+
<link rel="icon" type="image/png" href="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png">
|
8 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
9 |
+
<style>
|
10 |
+
/* Contact Section */
|
11 |
+
.contact-section {
|
12 |
+
text-align: center;
|
13 |
+
padding: 60px 20px;
|
14 |
+
background: linear-gradient(135deg, #e0f7fa, #f3e5f5);
|
15 |
+
border-radius: 20px;
|
16 |
+
box-shadow: 0 15px 30px rgba(0, 0, 0, 0.2);
|
17 |
+
margin: 50px auto;
|
18 |
+
max-width: 900px;
|
19 |
+
animation: fadeIn 1.5s ease-in-out;
|
20 |
+
position: relative;
|
21 |
+
overflow: hidden;
|
22 |
+
}
|
23 |
+
|
24 |
+
.contact-section::before {
|
25 |
+
content: '';
|
26 |
+
position: absolute;
|
27 |
+
top: -50%;
|
28 |
+
left: -50%;
|
29 |
+
width: 200%;
|
30 |
+
height: 200%;
|
31 |
+
background: radial-gradient(circle, rgba(0, 198, 255, 0.2), transparent 70%);
|
32 |
+
z-index: -1;
|
33 |
+
transform: scale(0);
|
34 |
+
transition: transform 0.4s ease-in-out, opacity 0.4s ease-in-out;
|
35 |
+
}
|
36 |
+
|
37 |
+
.contact-section:hover::before {
|
38 |
+
transform: scale(1);
|
39 |
+
opacity: 1;
|
40 |
+
}
|
41 |
+
|
42 |
+
@keyframes fadeIn {
|
43 |
+
from {
|
44 |
+
opacity: 0;
|
45 |
+
transform: translateY(20px);
|
46 |
+
}
|
47 |
+
to {
|
48 |
+
opacity: 1;
|
49 |
+
transform: translateY(0);
|
50 |
+
}
|
51 |
+
}
|
52 |
+
|
53 |
+
.contact-section h2 {
|
54 |
+
font-size: 3rem;
|
55 |
+
color: #007BFF;
|
56 |
+
margin-bottom: 20px;
|
57 |
+
font-weight: bold;
|
58 |
+
text-transform: uppercase;
|
59 |
+
position: relative;
|
60 |
+
animation: slideIn 1s ease-in-out;
|
61 |
+
}
|
62 |
+
|
63 |
+
.contact-section h2::after {
|
64 |
+
content: '';
|
65 |
+
position: absolute;
|
66 |
+
width: 80px;
|
67 |
+
height: 4px;
|
68 |
+
background: linear-gradient(90deg, #007BFF, #86E3CE);
|
69 |
+
bottom: -10px;
|
70 |
+
left: 50%;
|
71 |
+
transform: translateX(-50%);
|
72 |
+
animation: expand 1s ease-in-out;
|
73 |
+
}
|
74 |
+
|
75 |
+
@keyframes slideIn {
|
76 |
+
from {
|
77 |
+
opacity: 0;
|
78 |
+
transform: translateY(-20px);
|
79 |
+
}
|
80 |
+
to {
|
81 |
+
opacity: 1;
|
82 |
+
transform: translateY(0);
|
83 |
+
}
|
84 |
+
}
|
85 |
+
|
86 |
+
@keyframes expand {
|
87 |
+
from {
|
88 |
+
width: 0;
|
89 |
+
}
|
90 |
+
to {
|
91 |
+
width: 80px;
|
92 |
+
}
|
93 |
+
}
|
94 |
+
|
95 |
+
.contact-section p {
|
96 |
+
font-size: 1.3rem;
|
97 |
+
color: #555;
|
98 |
+
margin-bottom: 30px;
|
99 |
+
}
|
100 |
+
|
101 |
+
/* Contact Links */
|
102 |
+
.contact-links {
|
103 |
+
display: flex;
|
104 |
+
justify-content: center;
|
105 |
+
flex-wrap: wrap;
|
106 |
+
gap: 30px;
|
107 |
+
}
|
108 |
+
|
109 |
+
.contact-links a {
|
110 |
+
display: inline-block;
|
111 |
+
background: linear-gradient(135deg, #007BFF, #0056b3);
|
112 |
+
color: white;
|
113 |
+
padding: 15px 40px;
|
114 |
+
border-radius: 30px;
|
115 |
+
font-size: 1.2rem;
|
116 |
+
font-weight: bold;
|
117 |
+
text-decoration: none;
|
118 |
+
position: relative;
|
119 |
+
overflow: hidden;
|
120 |
+
transition: all 0.4s ease-in-out;
|
121 |
+
box-shadow: 0 8px 15px rgba(0, 0, 0, 0.2);
|
122 |
+
}
|
123 |
+
|
124 |
+
.contact-links a:hover {
|
125 |
+
background: linear-gradient(135deg, #0056b3, #003f7f);
|
126 |
+
transform: translateY(-5px);
|
127 |
+
box-shadow: 0 12px 25px rgba(0, 0, 0, 0.3);
|
128 |
+
}
|
129 |
+
|
130 |
+
.contact-links a::before {
|
131 |
+
content: '';
|
132 |
+
position: absolute;
|
133 |
+
top: 0;
|
134 |
+
left: -100%;
|
135 |
+
width: 200%;
|
136 |
+
height: 100%;
|
137 |
+
background: rgba(255, 255, 255, 0.1);
|
138 |
+
z-index: 1;
|
139 |
+
transform: skewX(-45deg);
|
140 |
+
transition: left 0.5s ease;
|
141 |
+
}
|
142 |
+
|
143 |
+
.contact-links a:hover::before {
|
144 |
+
left: 100%;
|
145 |
+
}
|
146 |
+
</style>
|
147 |
+
</head>
|
148 |
+
<body>
|
149 |
+
<header class="navbar">
|
150 |
+
<div class="logo">
|
151 |
+
<img src="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png" alt="MedGAN Logo">
|
152 |
+
<span>MedGAN Studio</span>
|
153 |
+
</div>
|
154 |
+
<nav>
|
155 |
+
<ul>
|
156 |
+
<li><a href="/">Home</a></li>
|
157 |
+
<li><a href="{{ url_for('generate_info') }}">Generate</a></li>
|
158 |
+
<li><a href="{{ url_for('detect_info') }}">Detect</a></li>
|
159 |
+
<li><a href="{{ url_for('about_us') }}">About Us</a></li>
|
160 |
+
<li><a href="{{ url_for('contact') }}">Contact</a></li>
|
161 |
+
</ul>
|
162 |
+
</nav>
|
163 |
+
</header>
|
164 |
+
|
165 |
+
<section class="contact-section">
|
166 |
+
<h2>Contact Us</h2>
|
167 |
+
<p>Reach out to us through the following channels:</p>
|
168 |
+
<div class="contact-links">
|
169 |
+
<a href="mailto:[email protected]">Email Us</a>
|
170 |
+
<a href="https://instagram.com/medgan"target="_blank">Instagram</a>
|
171 |
+
<a href="https://www.linkedin.com/company/medgan/" target="_blank">LinkedIn</a>
|
172 |
+
</div>
|
173 |
+
</section>
|
174 |
+
|
175 |
+
<footer class="footer">
|
176 |
+
<div class="container">
|
177 |
+
<p>© 2024 MedGAN Studio. All Rights Reserved.</p>
|
178 |
+
<p>Created by AI Students at Al- Balqa Applied University (BAU)</p>
|
179 |
+
</div>
|
180 |
+
</footer>
|
181 |
+
</body>
|
182 |
+
</html>
|
templates/detect.html
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Brain Tumor Detection</title>
|
7 |
+
<link rel="icon" type="image/png" href="static/css/Blue_Abstract_Brain_Technology_Logo__1_-removebg-preview.png">
|
8 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
9 |
+
<style>
|
10 |
+
body {
|
11 |
+
background: linear-gradient(120deg, #f0f7ff, #e0f4ff);
|
12 |
+
font-family: 'Inter', sans-serif;
|
13 |
+
}
|
14 |
+
|
15 |
+
.container {
|
16 |
+
max-width: 900px;
|
17 |
+
margin: 50px auto;
|
18 |
+
padding: 20px;
|
19 |
+
}
|
20 |
+
|
21 |
+
.detect-section {
|
22 |
+
text-align: center;
|
23 |
+
padding: 60px 20px;
|
24 |
+
background: linear-gradient(135deg, #ffffff, #f8f9fa);
|
25 |
+
border-radius: 25px;
|
26 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
|
27 |
+
animation: fadeInDetect 1.5s ease-in-out;
|
28 |
+
}
|
29 |
+
|
30 |
+
@keyframes fadeInDetect {
|
31 |
+
from {
|
32 |
+
opacity: 0;
|
33 |
+
transform: translateY(20px);
|
34 |
+
}
|
35 |
+
to {
|
36 |
+
opacity: 1;
|
37 |
+
transform: translateY(0);
|
38 |
+
}
|
39 |
+
}
|
40 |
+
|
41 |
+
.detect-section h2 {
|
42 |
+
font-size: 2.5rem;
|
43 |
+
color: #007BFF;
|
44 |
+
margin-bottom: 20px;
|
45 |
+
font-weight: bold;
|
46 |
+
position: relative;
|
47 |
+
text-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
|
48 |
+
}
|
49 |
+
|
50 |
+
.detect-section h2::after {
|
51 |
+
content: '';
|
52 |
+
position: absolute;
|
53 |
+
width: 100px;
|
54 |
+
height: 4px;
|
55 |
+
background: linear-gradient(90deg, #007BFF, #86E3CE);
|
56 |
+
bottom: -10px;
|
57 |
+
left: 50%;
|
58 |
+
transform: translateX(-50%);
|
59 |
+
}
|
60 |
+
|
61 |
+
.detect-section p {
|
62 |
+
font-size: 1.2rem;
|
63 |
+
color: #555;
|
64 |
+
margin-bottom: 40px;
|
65 |
+
}
|
66 |
+
|
67 |
+
.detect-section form {
|
68 |
+
text-align: left;
|
69 |
+
background: linear-gradient(135deg, #f9f9fc, #e3e9ff);
|
70 |
+
padding: 30px;
|
71 |
+
border-radius: 20px;
|
72 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.1);
|
73 |
+
display: inline-block;
|
74 |
+
}
|
75 |
+
|
76 |
+
.detect-section input[type="file"] {
|
77 |
+
width: 100%;
|
78 |
+
padding: 12px;
|
79 |
+
font-size: 1rem;
|
80 |
+
border: 1px solid #ddd;
|
81 |
+
border-radius: 10px;
|
82 |
+
background: #f7f9fc;
|
83 |
+
transition: all 0.3s ease;
|
84 |
+
margin-bottom: 20px;
|
85 |
+
}
|
86 |
+
|
87 |
+
.detect-section input[type="file"]:focus {
|
88 |
+
border-color: #007BFF;
|
89 |
+
box-shadow: 0 0 10px rgba(0, 123, 255, 0.5);
|
90 |
+
outline: none;
|
91 |
+
transform: scale(1.02);
|
92 |
+
}
|
93 |
+
|
94 |
+
.btn-primary {
|
95 |
+
background: linear-gradient(135deg, #007BFF, #0056b3);
|
96 |
+
color: white;
|
97 |
+
padding: 12px 30px;
|
98 |
+
border-radius: 25px;
|
99 |
+
font-size: 1.2rem;
|
100 |
+
font-weight: bold;
|
101 |
+
text-transform: uppercase;
|
102 |
+
text-decoration: none;
|
103 |
+
cursor: pointer;
|
104 |
+
transition: all 0.3s ease-in-out;
|
105 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.2);
|
106 |
+
display: inline-block;
|
107 |
+
}
|
108 |
+
|
109 |
+
.btn-primary:hover {
|
110 |
+
background: linear-gradient(135deg, #0056b3, #003f7f);
|
111 |
+
transform: translateY(-3px);
|
112 |
+
}
|
113 |
+
|
114 |
+
.btn-primary::after {
|
115 |
+
content: '';
|
116 |
+
position: absolute;
|
117 |
+
top: 0;
|
118 |
+
left: 0;
|
119 |
+
width: 100%;
|
120 |
+
height: 100%;
|
121 |
+
border-radius: 25px;
|
122 |
+
background: radial-gradient(circle, rgba(255, 255, 255, 0.4), transparent 70%);
|
123 |
+
opacity: 0;
|
124 |
+
transition: opacity 0.3s ease-in-out;
|
125 |
+
z-index: -1;
|
126 |
+
}
|
127 |
+
|
128 |
+
.btn-primary:hover::after {
|
129 |
+
opacity: 1;
|
130 |
+
}
|
131 |
+
|
132 |
+
.footer {
|
133 |
+
text-align: center;
|
134 |
+
padding: 20px;
|
135 |
+
background: linear-gradient(135deg, #1e3a8a, #60a5fa);
|
136 |
+
color: white;
|
137 |
+
font-size: 1rem;
|
138 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.2);
|
139 |
+
}
|
140 |
+
|
141 |
+
.footer .footer-links {
|
142 |
+
margin-top: 10px;
|
143 |
+
display: flex;
|
144 |
+
justify-content: center;
|
145 |
+
gap: 15px;
|
146 |
+
}
|
147 |
+
|
148 |
+
.footer .footer-links a {
|
149 |
+
color: white;
|
150 |
+
font-size: 1.2rem;
|
151 |
+
text-decoration: none;
|
152 |
+
transition: color 0.3s;
|
153 |
+
}
|
154 |
+
|
155 |
+
|
156 |
+
</style>
|
157 |
+
</head>
|
158 |
+
<body>
|
159 |
+
<header class="navbar">
|
160 |
+
<div class="logo">
|
161 |
+
<img src="static/css/Blue_Abstract_Brain_Technology_Logo__1_-removebg-preview.png" alt="MedGAN Logo">
|
162 |
+
<span>MedGAN Studio</span>
|
163 |
+
</div>
|
164 |
+
<nav>
|
165 |
+
<ul>
|
166 |
+
<li><a href="/">Home</a></li>
|
167 |
+
<li><a href="{{ url_for('generate_info') }}">Generate</a></li>
|
168 |
+
<li><a href="{{ url_for('detect_info') }}">Detect</a></li>
|
169 |
+
<li><a href="{{ url_for('about_us') }}">About Us</a></li>
|
170 |
+
<li><a href="{{ url_for('contact') }}">Contact</a></li>
|
171 |
+
|
172 |
+
</ul>
|
173 |
+
</nav>
|
174 |
+
</header>
|
175 |
+
|
176 |
+
<section class="detect-section">
|
177 |
+
<h2>Brain Tumor Detection</h2>
|
178 |
+
<p>Upload a brain scan image to predict its tumor type.</p>
|
179 |
+
<form action="/detect" method="post" enctype="multipart/form-data">
|
180 |
+
<input type="file" name="file" accept="image/*" required>
|
181 |
+
<button type="submit" class="btn-primary">Predict</button>
|
182 |
+
</form>
|
183 |
+
</section>
|
184 |
+
|
185 |
+
<footer class="footer">
|
186 |
+
<div class="container">
|
187 |
+
<p>© 2024 MedGAN Studio. All Rights Reserved.</p>
|
188 |
+
<div class="footer-links">
|
189 |
+
<a href="mailto:[email protected]">Email Us</a>
|
190 |
+
<a href="https://instagram.com/medgan" target="_blank">Instagram</a>
|
191 |
+
<a href="https://www.linkedin.com/company/medgan/" target="_blank">LinkedIn</a>
|
192 |
+
</div>
|
193 |
+
</div>
|
194 |
+
</footer>
|
195 |
+
</body>
|
196 |
+
</html>
|
templates/error.html
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Error</title>
|
7 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap/5.3.0/css/bootstrap.min.css">
|
8 |
+
<style>
|
9 |
+
body {
|
10 |
+
padding: 20px;
|
11 |
+
}
|
12 |
+
.error-container {
|
13 |
+
margin-top: 50px;
|
14 |
+
padding: 20px;
|
15 |
+
border-radius: 5px;
|
16 |
+
background-color: #f8d7da;
|
17 |
+
border: 1px solid #f5c6cb;
|
18 |
+
}
|
19 |
+
.btn-home {
|
20 |
+
margin-top: 20px;
|
21 |
+
}
|
22 |
+
</style>
|
23 |
+
</head>
|
24 |
+
<body>
|
25 |
+
<div class="container">
|
26 |
+
<div class="row">
|
27 |
+
<div class="col-md-8 offset-md-2">
|
28 |
+
<div class="error-container">
|
29 |
+
<h2 class="text-danger">Error Occurred</h2>
|
30 |
+
<p>{{ error }}</p>
|
31 |
+
<p>Please try again or contact support if the issue persists.</p>
|
32 |
+
<a href="/" class="btn btn-primary btn-home">Return to Home</a>
|
33 |
+
</div>
|
34 |
+
</div>
|
35 |
+
</div>
|
36 |
+
</div>
|
37 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap/5.3.0/js/bootstrap.bundle.min.js"></script>
|
38 |
+
</body>
|
39 |
+
</html>
|
templates/generate.html
ADDED
@@ -0,0 +1,502 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Generate Images</title>
|
7 |
+
<link rel="icon" type="image/png" href="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png">
|
8 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
9 |
+
|
10 |
+
<style>
|
11 |
+
body {
|
12 |
+
background: linear-gradient(120deg, #f0f7ff, #e0f4ff);
|
13 |
+
font-family: 'Inter', sans-serif;
|
14 |
+
}
|
15 |
+
|
16 |
+
.container {
|
17 |
+
max-width: 900px;
|
18 |
+
margin: 50px auto;
|
19 |
+
padding: 20px;
|
20 |
+
}
|
21 |
+
|
22 |
+
.manual {
|
23 |
+
background: linear-gradient(135deg, #1e3a8a, #60a5fa);
|
24 |
+
padding: 30px;
|
25 |
+
border-radius: 25px;
|
26 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.2);
|
27 |
+
margin-bottom: 30px;
|
28 |
+
text-align: left;
|
29 |
+
animation: fadeInManual 1.5s ease-in-out;
|
30 |
+
}
|
31 |
+
|
32 |
+
@keyframes fadeInManual {
|
33 |
+
from {
|
34 |
+
opacity: 0;
|
35 |
+
transform: translateX(20px);
|
36 |
+
}
|
37 |
+
to {
|
38 |
+
opacity: 1;
|
39 |
+
transform: translateX(0);
|
40 |
+
}
|
41 |
+
}
|
42 |
+
|
43 |
+
.manual h3 {
|
44 |
+
font-size: 2rem;
|
45 |
+
color: #ffffff;
|
46 |
+
margin-bottom: 15px;
|
47 |
+
text-align: center;
|
48 |
+
text-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
|
49 |
+
}
|
50 |
+
|
51 |
+
.manual ol {
|
52 |
+
font-size: 1.2rem;
|
53 |
+
color: #ffffff;
|
54 |
+
line-height: 1.8;
|
55 |
+
margin-left: 20px;
|
56 |
+
}
|
57 |
+
|
58 |
+
.manual ol li {
|
59 |
+
margin-bottom: 15px;
|
60 |
+
}
|
61 |
+
|
62 |
+
.manual ol li span {
|
63 |
+
font-weight: bold;
|
64 |
+
color: #ffdd57;
|
65 |
+
text-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
66 |
+
}
|
67 |
+
|
68 |
+
.generate {
|
69 |
+
text-align: center;
|
70 |
+
padding: 40px 20px;
|
71 |
+
background: #ffffff;
|
72 |
+
border-radius: 25px;
|
73 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
|
74 |
+
margin-top: 30px;
|
75 |
+
animation: fadeInGenerate 1.5s ease-in-out;
|
76 |
+
}
|
77 |
+
|
78 |
+
@keyframes fadeInGenerate {
|
79 |
+
from {
|
80 |
+
opacity: 0;
|
81 |
+
transform: translateY(20px);
|
82 |
+
}
|
83 |
+
to {
|
84 |
+
opacity: 1;
|
85 |
+
transform: translateY(0);
|
86 |
+
}
|
87 |
+
}
|
88 |
+
|
89 |
+
.generate h2 {
|
90 |
+
font-size: 2.5rem;
|
91 |
+
color: #007BFF;
|
92 |
+
margin-bottom: 20px;
|
93 |
+
font-weight: bold;
|
94 |
+
position: relative;
|
95 |
+
text-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
|
96 |
+
}
|
97 |
+
|
98 |
+
.generate h2::after {
|
99 |
+
content: '';
|
100 |
+
position: absolute;
|
101 |
+
width: 100px;
|
102 |
+
height: 4px;
|
103 |
+
background: linear-gradient(90deg, #007BFF, #86E3CE);
|
104 |
+
bottom: -10px;
|
105 |
+
left: 50%;
|
106 |
+
transform: translateX(-50%);
|
107 |
+
}
|
108 |
+
|
109 |
+
.generate p {
|
110 |
+
font-size: 1.2rem;
|
111 |
+
color: #555;
|
112 |
+
margin-bottom: 40px;
|
113 |
+
}
|
114 |
+
|
115 |
+
.form {
|
116 |
+
text-align: left;
|
117 |
+
background: linear-gradient(135deg, #f9f9fc, #e3e9ff);
|
118 |
+
padding: 30px;
|
119 |
+
border-radius: 20px;
|
120 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.1);
|
121 |
+
animation: slideInForm 1.5s ease-in-out;
|
122 |
+
}
|
123 |
+
|
124 |
+
@keyframes slideInForm {
|
125 |
+
from {
|
126 |
+
opacity: 0;
|
127 |
+
transform: translateX(-20px);
|
128 |
+
}
|
129 |
+
to {
|
130 |
+
opacity: 1;
|
131 |
+
transform: translateX(0);
|
132 |
+
}
|
133 |
+
}
|
134 |
+
|
135 |
+
.form-group {
|
136 |
+
margin-bottom: 20px;
|
137 |
+
}
|
138 |
+
|
139 |
+
.form-group label {
|
140 |
+
font-size: 1rem;
|
141 |
+
font-weight: bold;
|
142 |
+
color: #333;
|
143 |
+
display: block;
|
144 |
+
margin-bottom: 8px;
|
145 |
+
}
|
146 |
+
|
147 |
+
.form-group select, .form-group input {
|
148 |
+
width: 100%;
|
149 |
+
padding: 12px;
|
150 |
+
font-size: 1rem;
|
151 |
+
border: 1px solid #ddd;
|
152 |
+
border-radius: 10px;
|
153 |
+
transition: all 0.3s ease;
|
154 |
+
background: #f7f9fc;
|
155 |
+
}
|
156 |
+
|
157 |
+
.form-group select:focus, .form-group input:focus {
|
158 |
+
border-color: #007BFF;
|
159 |
+
box-shadow: 0 0 10px rgba(0, 123, 255, 0.5);
|
160 |
+
outline: none;
|
161 |
+
transform: scale(1.02);
|
162 |
+
}
|
163 |
+
|
164 |
+
.btn-primary {
|
165 |
+
display: inline-block;
|
166 |
+
background: linear-gradient(135deg, #007BFF, #0056b3);
|
167 |
+
color: white;
|
168 |
+
padding: 12px 30px;
|
169 |
+
border-radius: 25px;
|
170 |
+
font-size: 1.2rem;
|
171 |
+
font-weight: bold;
|
172 |
+
text-align: center;
|
173 |
+
cursor: pointer;
|
174 |
+
transition: all 0.3s ease;
|
175 |
+
box-shadow: 0 8px 15px rgba(0, 0, 0, 0.3);
|
176 |
+
position: relative;
|
177 |
+
overflow: hidden;
|
178 |
+
}
|
179 |
+
|
180 |
+
.btn-primary:hover {
|
181 |
+
background: linear-gradient(135deg, #0056b3, #003f7f);
|
182 |
+
transform: translateY(-3px);
|
183 |
+
box-shadow: 0 12px 20px rgba(0, 0, 0, 0.3);
|
184 |
+
}
|
185 |
+
|
186 |
+
.btn-primary::after {
|
187 |
+
content: '';
|
188 |
+
position: absolute;
|
189 |
+
top: 0;
|
190 |
+
left: 0;
|
191 |
+
width: 100%;
|
192 |
+
height: 100%;
|
193 |
+
border-radius: 25px;
|
194 |
+
background: radial-gradient(circle, rgba(255, 255, 255, 0.4), transparent 70%);
|
195 |
+
opacity: 0;
|
196 |
+
transition: opacity 0.3s ease-in-out;
|
197 |
+
z-index: -1;
|
198 |
+
}
|
199 |
+
|
200 |
+
.btn-primary:hover::after {
|
201 |
+
opacity: 1;
|
202 |
+
}
|
203 |
+
|
204 |
+
body {
|
205 |
+
background: linear-gradient(120deg, #f0f7ff, #e0f4ff);
|
206 |
+
font-family: 'Inter', sans-serif;
|
207 |
+
}
|
208 |
+
|
209 |
+
.container {
|
210 |
+
max-width: 1200px;
|
211 |
+
margin: 50px auto;
|
212 |
+
padding: 20px;
|
213 |
+
}
|
214 |
+
|
215 |
+
.steps {
|
216 |
+
display: flex;
|
217 |
+
justify-content: space-between;
|
218 |
+
align-items: stretch;
|
219 |
+
gap: 20px;
|
220 |
+
flex-wrap: wrap;
|
221 |
+
}
|
222 |
+
|
223 |
+
.step {
|
224 |
+
flex: 1 1 calc(25% - 20px); /* Four items in one row */
|
225 |
+
background: #ffffff;
|
226 |
+
border-radius: 15px;
|
227 |
+
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2);
|
228 |
+
padding: 20px;
|
229 |
+
text-align: center;
|
230 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
231 |
+
max-width: 220px; /* Ensures cards remain uniform */
|
232 |
+
}
|
233 |
+
|
234 |
+
.step:hover {
|
235 |
+
transform: scale(1.05);
|
236 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
|
237 |
+
}
|
238 |
+
|
239 |
+
.step-icon {
|
240 |
+
width: 60px;
|
241 |
+
height: 60px;
|
242 |
+
margin: 0 auto 15px;
|
243 |
+
background: linear-gradient(135deg, #007BFF, #0056b3);
|
244 |
+
border-radius: 50%;
|
245 |
+
display: flex;
|
246 |
+
justify-content: center;
|
247 |
+
align-items: center;
|
248 |
+
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.2);
|
249 |
+
}
|
250 |
+
|
251 |
+
.step-icon img {
|
252 |
+
width: 50px;
|
253 |
+
height: 50px;
|
254 |
+
}
|
255 |
+
|
256 |
+
.step-text h4 {
|
257 |
+
font-size: 1.5rem;
|
258 |
+
color: #333;
|
259 |
+
margin-bottom: 10px;
|
260 |
+
}
|
261 |
+
|
262 |
+
.step-text p {
|
263 |
+
font-size: 1rem;
|
264 |
+
color: #666;
|
265 |
+
line-height: 1.6;
|
266 |
+
}
|
267 |
+
|
268 |
+
.manual {
|
269 |
+
background: linear-gradient(135deg, #1e3a8a, #60a5fa);
|
270 |
+
padding: 30px;
|
271 |
+
border-radius: 25px;
|
272 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.2);
|
273 |
+
margin-bottom: 30px;
|
274 |
+
text-align: left;
|
275 |
+
animation: fadeInManual 1.5s ease-in-out;
|
276 |
+
}
|
277 |
+
|
278 |
+
@keyframes fadeInManual {
|
279 |
+
from {
|
280 |
+
opacity: 0;
|
281 |
+
transform: translateX(20px);
|
282 |
+
}
|
283 |
+
to {
|
284 |
+
opacity: 1;
|
285 |
+
transform: translateX(0);
|
286 |
+
}
|
287 |
+
}
|
288 |
+
|
289 |
+
.manual h3 {
|
290 |
+
font-size: 2rem;
|
291 |
+
color: #ffffff;
|
292 |
+
margin-bottom: 15px;
|
293 |
+
text-align: center;
|
294 |
+
text-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
|
295 |
+
}
|
296 |
+
|
297 |
+
.manual ol {
|
298 |
+
font-size: 1.2rem;
|
299 |
+
color: #ffffff;
|
300 |
+
line-height: 1.8;
|
301 |
+
margin-left: 20px;
|
302 |
+
}
|
303 |
+
|
304 |
+
.manual ol li {
|
305 |
+
margin-bottom: 15px;
|
306 |
+
}
|
307 |
+
|
308 |
+
.manual ol li span {
|
309 |
+
font-weight: bold;
|
310 |
+
color: #ffdd57;
|
311 |
+
text-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
312 |
+
}
|
313 |
+
|
314 |
+
.generate {
|
315 |
+
text-align: center;
|
316 |
+
padding: 40px 20px;
|
317 |
+
background: #ffffff;
|
318 |
+
border-radius: 25px;
|
319 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
|
320 |
+
margin-top: 30px;
|
321 |
+
animation: fadeInGenerate 1.5s ease-in-out;
|
322 |
+
}
|
323 |
+
|
324 |
+
.generate h2 {
|
325 |
+
font-size: 2.5rem;
|
326 |
+
color: #007BFF;
|
327 |
+
margin-bottom: 20px;
|
328 |
+
font-weight: bold;
|
329 |
+
position: relative;
|
330 |
+
text-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
|
331 |
+
}
|
332 |
+
|
333 |
+
.generate h2::after {
|
334 |
+
content: '';
|
335 |
+
position: absolute;
|
336 |
+
width: 100px;
|
337 |
+
height: 4px;
|
338 |
+
background: linear-gradient(90deg, #007BFF, #86E3CE);
|
339 |
+
bottom: -10px;
|
340 |
+
left: 50%;
|
341 |
+
transform: translateX(-50%);
|
342 |
+
}
|
343 |
+
|
344 |
+
.generate p {
|
345 |
+
font-size: 1.2rem;
|
346 |
+
color: #555;
|
347 |
+
margin-bottom: 40px;
|
348 |
+
}
|
349 |
+
|
350 |
+
.form {
|
351 |
+
text-align: left;
|
352 |
+
background: linear-gradient(135deg, #f9f9fc, #e3e9ff);
|
353 |
+
padding: 30px;
|
354 |
+
border-radius: 20px;
|
355 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.1);
|
356 |
+
}
|
357 |
+
|
358 |
+
.form-group {
|
359 |
+
margin-bottom: 20px;
|
360 |
+
}
|
361 |
+
|
362 |
+
.form-group label {
|
363 |
+
font-size: 1rem;
|
364 |
+
font-weight: bold;
|
365 |
+
color: #333;
|
366 |
+
display: block;
|
367 |
+
margin-bottom: 8px;
|
368 |
+
}
|
369 |
+
|
370 |
+
.form-group select, .form-group input {
|
371 |
+
width: 100%;
|
372 |
+
padding: 12px;
|
373 |
+
font-size: 1rem;
|
374 |
+
border: 1px solid #ddd;
|
375 |
+
border-radius: 10px;
|
376 |
+
transition: all 0.3s ease;
|
377 |
+
background: #f7f9fc;
|
378 |
+
}
|
379 |
+
|
380 |
+
.btn-primary {
|
381 |
+
display: inline-block;
|
382 |
+
background: linear-gradient(135deg, #007BFF, #0056b3);
|
383 |
+
color: white;
|
384 |
+
padding: 12px 30px;
|
385 |
+
border-radius: 25px;
|
386 |
+
font-size: 1.2rem;
|
387 |
+
font-weight: bold;
|
388 |
+
text-align: center;
|
389 |
+
cursor: pointer;
|
390 |
+
transition: all 0.3s ease;
|
391 |
+
}
|
392 |
+
|
393 |
+
|
394 |
+
</style>
|
395 |
+
</head>
|
396 |
+
<body>
|
397 |
+
<header class="navbar">
|
398 |
+
<div class="logo">
|
399 |
+
<img src="static/css/Blue_Abstract_Brain_Technology_Logo__1_-removebg-preview.png" alt="MedGAN Logo">
|
400 |
+
<span>MedGAN Studio</span>
|
401 |
+
</div>
|
402 |
+
<nav>
|
403 |
+
<ul>
|
404 |
+
<li><a href="/">Home</a></li>
|
405 |
+
<li><a href="{{ url_for('generate_info') }}">Generate</a></li>
|
406 |
+
<li><a href="{{ url_for('detect_info') }}">Detect</a></li>
|
407 |
+
<li><a href="{{ url_for('about_us') }}">About Us</a></li>
|
408 |
+
<li><a href="{{ url_for('contact') }}">Contact</a></li>
|
409 |
+
</ul>
|
410 |
+
</nav>
|
411 |
+
</header>
|
412 |
+
|
413 |
+
<div class="container">
|
414 |
+
<!-- User Manual Section -->
|
415 |
+
<section class="manual">
|
416 |
+
<h3>How to Use?</h3>
|
417 |
+
<div class="steps">
|
418 |
+
<div class="step">
|
419 |
+
<div class="step-icon">
|
420 |
+
<img src="static/icons/icons8-number-1-64.png" alt="Select Model">
|
421 |
+
</div>
|
422 |
+
<div class="step-text">
|
423 |
+
<h4>Select the Model</h4>
|
424 |
+
<p>Choose the GAN model you want to use from the "Model" dropdown menu.</p>
|
425 |
+
</div>
|
426 |
+
</div>
|
427 |
+
<div class="step">
|
428 |
+
<div class="step-icon">
|
429 |
+
<img src="static/icons/icons8-2-circled-64.png" alt="Select Tumor Type">
|
430 |
+
</div>
|
431 |
+
<div class="step-text">
|
432 |
+
<h4>Select the Tumor Type</h4>
|
433 |
+
<p>Pick the tumor type (Glioma, Pituitary, or Meningioma) from the "Tumor Type" dropdown.</p>
|
434 |
+
</div>
|
435 |
+
</div>
|
436 |
+
<div class="step">
|
437 |
+
<div class="step-icon">
|
438 |
+
<img src="static/icons/icons8-circled-3-c-64.png" alt="Set Number of Images">
|
439 |
+
</div>
|
440 |
+
<div class="step-text">
|
441 |
+
<h4>Set the Number of Images</h4>
|
442 |
+
<p>Enter the number of images to generate in the "Number of Images" field (1–100).</p>
|
443 |
+
</div>
|
444 |
+
</div>
|
445 |
+
<div class="step">
|
446 |
+
<div class="step-icon">
|
447 |
+
<img src="static/icons/icons8-circled-4-c-64.png" alt="Generate Images">
|
448 |
+
</div>
|
449 |
+
<div class="step-text">
|
450 |
+
<h4>Generate Images</h4>
|
451 |
+
<p>Click the "Generate Images" button to create your selected images.</p>
|
452 |
+
</div>
|
453 |
+
</div>
|
454 |
+
</div>
|
455 |
+
</section>
|
456 |
+
</div>
|
457 |
+
|
458 |
+
<!-- Generate Form Section -->
|
459 |
+
<section class="generate">
|
460 |
+
<h2>Generate Brain Tumor Images</h2>
|
461 |
+
<p>Select a model, tumor type, and the number of images to generate.</p>
|
462 |
+
|
463 |
+
<form action="/generate" method="post" class="form">
|
464 |
+
<div class="form-group">
|
465 |
+
<label for="model">Model:</label>
|
466 |
+
<select name="model" id="model">
|
467 |
+
<option value="Progressive GANs">Progressive GANs</option>
|
468 |
+
<option value="DCGANs">DCGANs</option>
|
469 |
+
<option value="StyleGAN2">StyleGAN2</option>
|
470 |
+
<option value="WGANs">WGANs</option>
|
471 |
+
</select>
|
472 |
+
</div>
|
473 |
+
<div class="form-group">
|
474 |
+
<label for="class_name">Tumor Type:</label>
|
475 |
+
<select name="class_name" id="class_name">
|
476 |
+
<option value="Glioma">Glioma</option>
|
477 |
+
<option value="Pituitary">Pituitary</option>
|
478 |
+
<option value="Meningioma">Meningioma</option>
|
479 |
+
</select>
|
480 |
+
</div>
|
481 |
+
<div class="form-group">
|
482 |
+
<label for="num_images">Number of Images:</label>
|
483 |
+
<input type="number" name="num_images" id="num_images" min="1" max="100" value="1">
|
484 |
+
</div>
|
485 |
+
<button type="submit" class="btn-primary">Generate Images</button>
|
486 |
+
</form>
|
487 |
+
</section>
|
488 |
+
</div>
|
489 |
+
|
490 |
+
|
491 |
+
<footer class="footer">
|
492 |
+
<div class="container">
|
493 |
+
<p>© 2024 MedGAN Studio. All Rights Reserved.</p>
|
494 |
+
<div class="footer-links">
|
495 |
+
<a href="mailto:[email protected]">Email Us</a>
|
496 |
+
<a href="https://instagram.com/medgan"target="_blank">Instagram</a>
|
497 |
+
<a href="https://www.linkedin.com/company/medgan/" target="_blank">LinkedIn</a>
|
498 |
+
</div>
|
499 |
+
</div>
|
500 |
+
</footer>
|
501 |
+
</body>
|
502 |
+
</html>
|
templates/index.html
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>MedGAN Studio</title>
|
7 |
+
<link rel="icon" type="image/png" href="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png">
|
8 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
9 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;600;700&display=swap" rel="stylesheet">
|
10 |
+
<style>
|
11 |
+
.models {
|
12 |
+
padding: 80px 20px;
|
13 |
+
text-align: center;
|
14 |
+
background: linear-gradient(120deg, #f8fbff, #eef5fc);
|
15 |
+
border-radius: 20px;
|
16 |
+
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.1);
|
17 |
+
animation: fadeInModels 1.5s ease-in-out;
|
18 |
+
}
|
19 |
+
|
20 |
+
@keyframes fadeInModels {
|
21 |
+
from {
|
22 |
+
opacity: 0;
|
23 |
+
transform: translateY(20px);
|
24 |
+
}
|
25 |
+
to {
|
26 |
+
opacity: 1;
|
27 |
+
transform: translateY(0);
|
28 |
+
}
|
29 |
+
}
|
30 |
+
|
31 |
+
.models h2 {
|
32 |
+
font-size: 2.8rem;
|
33 |
+
color: #007BFF;
|
34 |
+
margin-bottom: 20px;
|
35 |
+
font-weight: bold;
|
36 |
+
position: relative;
|
37 |
+
}
|
38 |
+
|
39 |
+
.models h2::after {
|
40 |
+
content: '';
|
41 |
+
position: absolute;
|
42 |
+
width: 100px;
|
43 |
+
height: 4px;
|
44 |
+
background: linear-gradient(90deg, #007BFF, #ffffff);
|
45 |
+
bottom: -10px;
|
46 |
+
left: 50%;
|
47 |
+
transform: translateX(-50%);
|
48 |
+
}
|
49 |
+
|
50 |
+
.models p {
|
51 |
+
font-size: 1.2rem;
|
52 |
+
color: #555;
|
53 |
+
margin-bottom: 50px;
|
54 |
+
max-width: 700px;
|
55 |
+
margin-left: auto;
|
56 |
+
margin-right: auto;
|
57 |
+
}
|
58 |
+
|
59 |
+
.models-grid {
|
60 |
+
display: flex;
|
61 |
+
justify-content: center;
|
62 |
+
flex-wrap: wrap;
|
63 |
+
gap: 30px;
|
64 |
+
margin-top: 30px;
|
65 |
+
}
|
66 |
+
|
67 |
+
.model-card {
|
68 |
+
background: linear-gradient(135deg,#60a5fa, #1e3a8a);
|
69 |
+
border-radius: 20px;
|
70 |
+
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.61);
|
71 |
+
text-align: center;
|
72 |
+
padding: 30px;
|
73 |
+
width: 280px;
|
74 |
+
transition: transform 0.4s ease, box-shadow 0.4s ease;
|
75 |
+
position: relative;
|
76 |
+
overflow: hidden;
|
77 |
+
}
|
78 |
+
|
79 |
+
.model-card:hover {
|
80 |
+
transform: translateY(-15px);
|
81 |
+
box-shadow: 0 15px 40px rgba(0, 0, 0, 0.2);
|
82 |
+
}
|
83 |
+
|
84 |
+
.model-card h3 {
|
85 |
+
font-size: 1.5rem;
|
86 |
+
color: #ffffff;
|
87 |
+
margin-bottom: 15px;
|
88 |
+
font-weight: bold;
|
89 |
+
}
|
90 |
+
|
91 |
+
.model-card p {
|
92 |
+
font-size: 1rem;
|
93 |
+
color: #ffffff;
|
94 |
+
line-height: 1.6;
|
95 |
+
padding: 0 10px;
|
96 |
+
}
|
97 |
+
|
98 |
+
.model-card::before {
|
99 |
+
content: '';
|
100 |
+
position: absolute;
|
101 |
+
top: -50%;
|
102 |
+
left: -50%;
|
103 |
+
width: 200%;
|
104 |
+
height: 200%;
|
105 |
+
background: radial-gradient(circle, rgba(0, 123, 255, 0.1), transparent 70%);
|
106 |
+
z-index: 0;
|
107 |
+
transform: scale(0);
|
108 |
+
transition: transform 0.4s ease;
|
109 |
+
}
|
110 |
+
|
111 |
+
.model-card:hover::before {
|
112 |
+
transform: scale(1);
|
113 |
+
}
|
114 |
+
|
115 |
+
@media screen and (max-width: 768px) {
|
116 |
+
.model-card {
|
117 |
+
width: 90%;
|
118 |
+
}
|
119 |
+
}
|
120 |
+
</style>
|
121 |
+
</head>
|
122 |
+
<body>
|
123 |
+
<header class="navbar">
|
124 |
+
<div class="logo">
|
125 |
+
<img src="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png" alt="MedGAN Logo">
|
126 |
+
<span>MedGAN Studio</span>
|
127 |
+
</div>
|
128 |
+
<nav>
|
129 |
+
<ul>
|
130 |
+
<li><a href="/">Home</a></li>
|
131 |
+
<li><a href="{{ url_for('generate_info') }}">Generate</a></li>
|
132 |
+
<li><a href="{{ url_for('detect_info') }}">Detect</a></li>
|
133 |
+
<li><a href="{{ url_for('about_us') }}">About Us</a></li>
|
134 |
+
<li><a href="{{ url_for('contact') }}">Contact</a></li>
|
135 |
+
|
136 |
+
</ul>
|
137 |
+
</nav>
|
138 |
+
</header>
|
139 |
+
|
140 |
+
<!-- Hero Section -->
|
141 |
+
<section class="hero" id="home">
|
142 |
+
<div class="container">
|
143 |
+
<h1>AI-Powered Medical Imaging</h1>
|
144 |
+
<p>Transform your research with advanced AI-generated medical images that are fast reliable and cutting-edge technology.</p>
|
145 |
+
<a href="{{ url_for('generate_info') }}" class="btn-primary">Generate Images</a>
|
146 |
+
</div>
|
147 |
+
</section>
|
148 |
+
|
149 |
+
<!-- About Section -->
|
150 |
+
<section class="about" id="about">
|
151 |
+
<div class="container">
|
152 |
+
<h2>Why Choose MedGAN Studio?</h2>
|
153 |
+
<p>MedGAN Studio is a platform built for researchers and medical professionals to leverage AI technology for generating brain tumor images.</p>
|
154 |
+
<div class="features-grid">
|
155 |
+
<div class="feature-card">
|
156 |
+
<h3>High Accuracy</h3>
|
157 |
+
<p>Our models deliver ultra-realistic images with unparalleled accuracy for your research needs.</p>
|
158 |
+
</div>
|
159 |
+
<div class="feature-card">
|
160 |
+
<h3>Fast Processing</h3>
|
161 |
+
<p>Generate images in seconds with our highly optimized platform.</p>
|
162 |
+
</div>
|
163 |
+
<div class="feature-card">
|
164 |
+
<h3>Custom Solutions</h3>
|
165 |
+
<p>Scale your image generation from small projects to large datasets effortlessly.</p>
|
166 |
+
</div>
|
167 |
+
</div>
|
168 |
+
</div>
|
169 |
+
</section>
|
170 |
+
|
171 |
+
<!-- Models Section -->
|
172 |
+
<section class="models" id="models">
|
173 |
+
<div class="container">
|
174 |
+
<h2>Our Models</h2>
|
175 |
+
<p>Explore the advanced GAN architectures we utilize to generate medical images with exceptional quality and precision.</p>
|
176 |
+
<div class="models-grid">
|
177 |
+
<div class="model-card">
|
178 |
+
<h3>Progressive GANs</h3>
|
179 |
+
<p>Enables high-quality image synthesis by progressively growing the generator and discriminator networks.</p>
|
180 |
+
</div>
|
181 |
+
<div class="model-card">
|
182 |
+
<h3>DCGANs</h3>
|
183 |
+
<p>Deep Convolutional GANs leverage convolutional layers for generating realistic medical images efficiently.</p>
|
184 |
+
</div>
|
185 |
+
<div class="model-card">
|
186 |
+
<h3>StyleGAN2</h3>
|
187 |
+
<p>State-of-the-art GAN architecture offering unprecedented control over image style and quality.</p>
|
188 |
+
</div>
|
189 |
+
<div class="model-card">
|
190 |
+
<h3>WGANs</h3>
|
191 |
+
<p>Wasserstein GANs optimize stability and improve training dynamics for generating sharp and stable images.</p>
|
192 |
+
</div>
|
193 |
+
</div>
|
194 |
+
</div>
|
195 |
+
</section>
|
196 |
+
|
197 |
+
<footer class="footer" id="contact">
|
198 |
+
<div class="container">
|
199 |
+
<p>© 2024 MedGAN Studio. All Rights Reserved.</p>
|
200 |
+
<div class="footer-links">
|
201 |
+
<a href="mailto:[email protected]">Email Us</a>
|
202 |
+
<a href="https://instagram.com/medgan"target="_blank">Instagram</a>
|
203 |
+
<a href="https://www.linkedin.com/company/medgan/" target="_blank">LinkedIn</a>
|
204 |
+
</div>
|
205 |
+
</div>
|
206 |
+
</footer>
|
207 |
+
</body>
|
208 |
+
</html>
|
templates/results-detect.html
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Detection Results</title>
|
7 |
+
<link rel="icon" type="image/png" href="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png">
|
8 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
9 |
+
<style>
|
10 |
+
.results-section {
|
11 |
+
text-align: center;
|
12 |
+
padding: 60px 20px;
|
13 |
+
background: linear-gradient(135deg, #ffffff, #f8f9fa);
|
14 |
+
border-radius: 20px;
|
15 |
+
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.1);
|
16 |
+
margin: 50px auto;
|
17 |
+
max-width: 900px;
|
18 |
+
}
|
19 |
+
|
20 |
+
.results-section h2 {
|
21 |
+
font-size: 2.5rem;
|
22 |
+
color: #007BFF;
|
23 |
+
margin-bottom: 20px;
|
24 |
+
}
|
25 |
+
|
26 |
+
.results-section p {
|
27 |
+
font-size: 1.2rem;
|
28 |
+
color: #555;
|
29 |
+
margin-bottom: 30px;
|
30 |
+
}
|
31 |
+
|
32 |
+
.image-container {
|
33 |
+
display: flex;
|
34 |
+
justify-content: center;
|
35 |
+
flex-wrap: wrap;
|
36 |
+
gap: 20px;
|
37 |
+
}
|
38 |
+
|
39 |
+
.image-card {
|
40 |
+
background: #ffffff;
|
41 |
+
border-radius: 15px;
|
42 |
+
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2);
|
43 |
+
overflow: hidden;
|
44 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
45 |
+
}
|
46 |
+
|
47 |
+
.image-card img {
|
48 |
+
width: 100%;
|
49 |
+
height: auto;
|
50 |
+
}
|
51 |
+
|
52 |
+
.image-card:hover {
|
53 |
+
transform: scale(1.05);
|
54 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
|
55 |
+
}
|
56 |
+
|
57 |
+
.btn-primary {
|
58 |
+
background: linear-gradient(135deg, #007BFF, #0056b3);
|
59 |
+
color: white;
|
60 |
+
padding: 12px 30px;
|
61 |
+
border-radius: 25px;
|
62 |
+
font-size: 1.2rem;
|
63 |
+
font-weight: bold;
|
64 |
+
text-transform: uppercase;
|
65 |
+
text-decoration: none;
|
66 |
+
cursor: pointer;
|
67 |
+
transition: all 0.3s ease-in-out;
|
68 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.2);
|
69 |
+
margin-top: 30px;
|
70 |
+
}
|
71 |
+
|
72 |
+
.btn-primary:hover {
|
73 |
+
background: linear-gradient(135deg, #0056b3, #003f7f);
|
74 |
+
transform: translateY(-3px);
|
75 |
+
}
|
76 |
+
</style>
|
77 |
+
</head>
|
78 |
+
<body>
|
79 |
+
<header class="navbar">
|
80 |
+
<div class="logo">
|
81 |
+
<img src="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png" alt="MedGAN Logo">
|
82 |
+
<span>MedGAN Studio</span>
|
83 |
+
</div>
|
84 |
+
<nav>
|
85 |
+
<ul>
|
86 |
+
<li><a href="/">Home</a></li>
|
87 |
+
<li><a href="{{ url_for('generate_info') }}">Generate</a></li>
|
88 |
+
<li><a href="{{ url_for('detect_info') }}">Detect</a></li>
|
89 |
+
<li><a href="{{ url_for('about_us') }}">About Us</a></li>
|
90 |
+
<li><a href="{{ url_for('contact') }}">Contact</a></li>
|
91 |
+
</ul>
|
92 |
+
</nav>
|
93 |
+
</header>
|
94 |
+
|
95 |
+
<!-- Results Section -->
|
96 |
+
<section class="results-section">
|
97 |
+
<h2>Detection Results</h2>
|
98 |
+
<p>Predicted Tumor Type: <strong>{{ result }}</strong></p>
|
99 |
+
<div class="image-container">
|
100 |
+
{% for image in images %}
|
101 |
+
<div class="image-card">
|
102 |
+
<img src="{{ url_for('static', filename=image) }}" alt="Uploaded Image">
|
103 |
+
</div>
|
104 |
+
{% endfor %}
|
105 |
+
</div>
|
106 |
+
<a href="{{ url_for('detect_info') }}" class="btn-primary">Analyze Another Image</a>
|
107 |
+
</section>
|
108 |
+
|
109 |
+
<footer class="footer">
|
110 |
+
<div class="container">
|
111 |
+
<p>© 2024 MedGAN Studio. All Rights Reserved.</p>
|
112 |
+
<div class="footer-links">
|
113 |
+
<a href="mailto:[email protected]">Email Us</a>
|
114 |
+
<a href="https://instagram.com/medgan"target="_blank">Instagram</a>
|
115 |
+
<a href="https://www.linkedin.com/company/medgan/" target="_blank">LinkedIn</a>
|
116 |
+
</div>
|
117 |
+
</div>
|
118 |
+
</footer>
|
119 |
+
</body>
|
120 |
+
</html>
|
templates/results.html
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Generated Images</title>
|
7 |
+
<link rel="icon" type="image/png" href="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png">
|
8 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
9 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;600&display=swap" rel="stylesheet">
|
10 |
+
</head>
|
11 |
+
<body>
|
12 |
+
<header class="navbar">
|
13 |
+
<div class="logo">
|
14 |
+
<img src="static/css/Blue_ABstract_Brain_Technology_Logo__1_-removebg-preview.png" alt="MedGAN Logo">
|
15 |
+
<span>MedGAN Studio</span>
|
16 |
+
</div>
|
17 |
+
<nav>
|
18 |
+
<ul>
|
19 |
+
<li><a href="/">Home</a></li>
|
20 |
+
<li><a href="{{ url_for('generate_info') }}">Generate</a></li>
|
21 |
+
<li><a href="{{ url_for('detect_info') }}">Detect</a></li>
|
22 |
+
<li><a href="{{ url_for('about_us') }}">About Us</a></li>
|
23 |
+
<li><a href="{{ url_for('contact') }}">Contact</a></li>
|
24 |
+
</ul>
|
25 |
+
</nav>
|
26 |
+
</header>
|
27 |
+
|
28 |
+
<!-- Results Section -->
|
29 |
+
<section class="results">
|
30 |
+
<div class="container">
|
31 |
+
<h1>Your AI-Generated Images</h1>
|
32 |
+
<p>View, download individual images, or download all images as a ZIP file below.</p>
|
33 |
+
|
34 |
+
<!-- Gallery -->
|
35 |
+
<div class="gallery">
|
36 |
+
{% for image in images %}
|
37 |
+
<div class="image-card">
|
38 |
+
<img src="data:image/png;base64,{{ image }}" alt="Generated Image">
|
39 |
+
<a href="data:image/png;base64,{{ image }}" download="Generated_Image.png" class="btn-secondary">Download</a>
|
40 |
+
</div>
|
41 |
+
{% endfor %}
|
42 |
+
</div>
|
43 |
+
|
44 |
+
<!-- ZIP Download -->
|
45 |
+
<div class="zip-download">
|
46 |
+
<a href="{{ url_for('download_zip') }}" class="btn-primary">Download All as ZIP</a>
|
47 |
+
</div>
|
48 |
+
|
49 |
+
<a href="{{ url_for('generate_info') }}" class="btn-primary">Generate More Images</a>
|
50 |
+
</div>
|
51 |
+
</section>
|
52 |
+
|
53 |
+
<!-- Footer -->
|
54 |
+
<footer class="footer" id="contact">
|
55 |
+
<div class="container">
|
56 |
+
<p>© 2024 MedGAN Studio. All Rights Reserved.</p>
|
57 |
+
<div class="footer-links">
|
58 |
+
<a href="mailto:[email protected]">Email Us</a> |
|
59 |
+
<a href="https://instagram.com/medgan" target="_blank">Instagram</a> |
|
60 |
+
<a href="https://www.linkedin.com/company/medgan/" target="_blank">LinkedIn</a>
|
61 |
+
</div>
|
62 |
+
</div>
|
63 |
+
</footer>
|
64 |
+
</body>
|
65 |
+
</html>
|