rightnight / app.py
mac9087's picture
Update app.py
04ac060 verified
raw
history blame
22.7 kB
import os
import torch
import time
import threading
import json
import gc
from flask import Flask, request, jsonify, send_file, Response, stream_with_context
from werkzeug.utils import secure_filename
from PIL import Image
import io
import zipfile
import uuid
import traceback
from huggingface_hub import snapshot_download
from flask_cors import CORS
import numpy as np
import trimesh
from transformers import pipeline
from scipy.ndimage import gaussian_filter, uniform_filter
app = Flask(__name__)
CORS(app) # Enable CORS for all routes
# Configure directories
UPLOAD_FOLDER = '/tmp/uploads'
RESULTS_FOLDER = '/tmp/results'
CACHE_DIR = '/tmp/huggingface'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
# Create necessary directories
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
os.makedirs(RESULTS_FOLDER, exist_ok=True)
os.makedirs(CACHE_DIR, exist_ok=True)
# Set Hugging Face cache environment variables
os.environ['HF_HOME'] = CACHE_DIR
os.environ['TRANSFORMERS_CACHE'] = os.path.join(CACHE_DIR, 'transformers')
os.environ['HF_DATASETS_CACHE'] = os.path.join(CACHE_DIR, 'datasets')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max
# Job tracking dictionary
processing_jobs = {}
# Global model variables
depth_estimator = None
model_loaded = False
model_loading = False
# Configuration for processing
TIMEOUT_SECONDS = 240 # 4 minutes max for processing (increased for larger model)
MAX_DIMENSION = 512 # Max image dimension to process
# TimeoutError for handling timeouts
class TimeoutError(Exception):
pass
# Thread-safe timeout implementation
def process_with_timeout(function, args, timeout):
result = [None]
error = [None]
completed = [False]
def target():
try:
result[0] = function(*args)
completed[0] = True
except Exception as e:
error[0] = e
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
thread.join(timeout)
if not completed[0]:
if thread.is_alive():
return None, TimeoutError(f"Processing timed out after {timeout} seconds")
elif error[0]:
return None, error[0]
if error[0]:
return None, error[0]
return result[0], None
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# Function to preprocess image
def preprocess_image(image_path):
with Image.open(image_path) as img:
img = img.convert("RGB")
# Resize if the image is too large
if img.width > MAX_DIMENSION or img.height > MAX_DIMENSION:
# Calculate new dimensions while preserving aspect ratio
if img.width > img.height:
new_width = MAX_DIMENSION
new_height = int(img.height * (MAX_DIMENSION / img.width))
else:
new_height = MAX_DIMENSION
new_width = int(img.width * (MAX_DIMENSION / img.height))
img = img.resize((new_width, new_height), Image.LANCZOS)
return img
def load_model():
global depth_estimator, model_loaded, model_loading
if model_loaded:
return depth_estimator
if model_loading:
# Wait for model to load if it's already in progress
while model_loading and not model_loaded:
time.sleep(0.5)
return depth_estimator
try:
model_loading = True
print("Starting model loading...")
# Using DPT-Large which provides better detail than DPT-Hybrid
model_name = "Intel/dpt-large"
# Download model with retry mechanism
max_retries = 3
retry_delay = 5
for attempt in range(max_retries):
try:
snapshot_download(
repo_id=model_name,
cache_dir=CACHE_DIR,
resume_download=True,
)
break
except Exception as e:
if attempt < max_retries - 1:
print(f"Download attempt {attempt+1} failed: {str(e)}. Retrying in {retry_delay} seconds...")
time.sleep(retry_delay)
retry_delay *= 2
else:
raise
# Initialize model with lower precision to save memory
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load depth estimator pipeline
depth_estimator = pipeline(
"depth-estimation",
model=model_name,
device=device if device == "cuda" else -1,
cache_dir=CACHE_DIR
)
# Optimize memory usage
if device == "cuda":
torch.cuda.empty_cache()
model_loaded = True
print(f"Model loaded successfully on {device}")
return depth_estimator
except Exception as e:
print(f"Error loading model: {str(e)}")
print(traceback.format_exc())
raise
finally:
model_loading = False
# Convert depth map to 3D mesh with enhanced detail
def depth_to_mesh(depth_map, image, resolution=100):
"""Convert depth map to 3D mesh with improved detail preservation"""
# Convert depth_map to numpy array if it's a PIL Image
if isinstance(depth_map, Image.Image):
depth_map = np.array(depth_map)
# Make sure the depth map is 2D
if len(depth_map.shape) > 2:
depth_map = np.mean(depth_map, axis=2) if depth_map.shape[2] > 1 else depth_map[:,:,0]
# Apply bilateral filter to smooth the depth map while preserving edges
# First, apply a slight gaussian filter to remove noise
depth_map_smooth = gaussian_filter(depth_map, sigma=1.0)
# Get dimensions
h, w = depth_map_smooth.shape
# Create a grid of points
x = np.linspace(0, w-1, resolution)
y = np.linspace(0, h-1, resolution)
x_grid, y_grid = np.meshgrid(x, y)
# Sample depth at grid points
x_indices = x_grid.astype(int)
y_indices = y_grid.astype(int)
z_values = depth_map_smooth[y_indices, x_indices]
# Normalize depth values with better scaling
z_min, z_max = np.percentile(z_values, [2, 98]) # Removes outliers
z_values = (z_values - z_min) / (z_max - z_min) if z_max > z_min else z_values
z_values = z_values * 2.0 # Scale depth
# Apply a local contrast enhancement to bring out details
# Simple adaptive normalization
window_size = resolution // 10
if window_size > 0:
local_mean = uniform_filter(z_values, size=window_size)
local_var = uniform_filter(z_values**2, size=window_size) - local_mean**2
local_std = np.sqrt(np.maximum(local_var, 0))
# Enhance local contrast
enhanced_z = (z_values - local_mean) / (local_std + 0.01) * 0.5 + z_values
z_values = np.clip(enhanced_z, 0, None) # Keep values positive
# Normalize x and y coordinates
x_grid = (x_grid / w - 0.5) * 2.0 # Map to -1 to 1
y_grid = (y_grid / h - 0.5) * 2.0 # Map to -1 to 1
# Create vertices
vertices = np.vstack([x_grid.flatten(), -y_grid.flatten(), -z_values.flatten()]).T
# Create faces (triangles)
faces = []
for i in range(resolution-1):
for j in range(resolution-1):
p1 = i * resolution + j
p2 = i * resolution + (j + 1)
p3 = (i + 1) * resolution + j
p4 = (i + 1) * resolution + (j + 1)
# Create two triangles for each grid cell
faces.append([p1, p2, p4])
faces.append([p1, p4, p3])
faces = np.array(faces)
# Create mesh
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
# Apply texturing if image is provided
if image:
# Convert to numpy array if needed
if isinstance(image, Image.Image):
img_array = np.array(image)
else:
img_array = image
# Create simple texture by sampling the original image
if resolution <= img_array.shape[0] and resolution <= img_array.shape[1]:
# Create vertex colors by sampling the image
vertex_colors = np.zeros((vertices.shape[0], 4), dtype=np.uint8)
for i in range(resolution):
for j in range(resolution):
img_x = min(int(j * img_array.shape[1] / resolution), img_array.shape[1]-1)
img_y = min(int(i * img_array.shape[0] / resolution), img_array.shape[0]-1)
vertex_idx = i * resolution + j
if len(img_array.shape) == 3 and img_array.shape[2] == 3: # RGB
vertex_colors[vertex_idx, :3] = img_array[img_y, img_x, :]
vertex_colors[vertex_idx, 3] = 255 # Alpha
elif len(img_array.shape) == 3 and img_array.shape[2] == 4: # RGBA
vertex_colors[vertex_idx, :] = img_array[img_y, img_x, :]
else:
# Handle grayscale or other formats
gray_value = img_array[img_y, img_x]
vertex_colors[vertex_idx, :3] = [gray_value, gray_value, gray_value]
vertex_colors[vertex_idx, 3] = 255
mesh.visual.vertex_colors = vertex_colors
return mesh
@app.route('/health', methods=['GET'])
def health_check():
return jsonify({
"status": "healthy",
"model": "Depth-Based 3D Model Generator (DPT-Large)",
"device": "cuda" if torch.cuda.is_available() else "cpu"
}), 200
@app.route('/progress/<job_id>', methods=['GET'])
def progress(job_id):
def generate():
if job_id not in processing_jobs:
yield f"data: {json.dumps({'error': 'Job not found'})}\n\n"
return
job = processing_jobs[job_id]
# Send initial progress
yield f"data: {json.dumps({'status': 'processing', 'progress': job['progress']})}\n\n"
# Wait for job to complete or update
last_progress = job['progress']
check_count = 0
while job['status'] == 'processing':
if job['progress'] != last_progress:
yield f"data: {json.dumps({'status': 'processing', 'progress': job['progress']})}\n\n"
last_progress = job['progress']
time.sleep(0.5)
check_count += 1
# If client hasn't received updates for a while, check if job is still running
if check_count > 60: # 30 seconds with no updates
if 'thread_alive' in job and not job['thread_alive']():
job['status'] = 'error'
job['error'] = 'Processing thread died unexpectedly'
break
check_count = 0
# Send final status
if job['status'] == 'completed':
yield f"data: {json.dumps({'status': 'completed', 'progress': 100, 'result_url': job['result_url'], 'preview_url': job['preview_url']})}\n\n"
else:
yield f"data: {json.dumps({'status': 'error', 'error': job['error']})}\n\n"
return Response(stream_with_context(generate()), mimetype='text/event-stream')
@app.route('/convert', methods=['POST'])
def convert_image_to_3d():
# Check if image is in the request
if 'image' not in request.files:
return jsonify({"error": "No image provided"}), 400
file = request.files['image']
if file.filename == '':
return jsonify({"error": "No image selected"}), 400
if not allowed_file(file.filename):
return jsonify({"error": f"File type not allowed. Supported types: {', '.join(ALLOWED_EXTENSIONS)}"}), 400
# Get optional parameters with defaults
try:
mesh_resolution = min(int(request.form.get('mesh_resolution', 100)), 200) # Limit max resolution
output_format = request.form.get('output_format', 'obj').lower()
detail_level = request.form.get('detail_level', 'medium').lower() # New parameter for detail level
except ValueError:
return jsonify({"error": "Invalid parameter values"}), 400
# Validate output format
if output_format not in ['obj', 'glb']:
return jsonify({"error": "Unsupported output format. Use 'obj' or 'glb'"}), 400
# Adjust mesh resolution based on detail level
if detail_level == 'high':
mesh_resolution = min(mesh_resolution * 1.5, 200)
elif detail_level == 'low':
mesh_resolution = max(int(mesh_resolution * 0.7), 50)
# Create a job ID
job_id = str(uuid.uuid4())
output_dir = os.path.join(RESULTS_FOLDER, job_id)
os.makedirs(output_dir, exist_ok=True)
# Save the uploaded file
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], f"{job_id}_{filename}")
file.save(filepath)
# Initialize job tracking
processing_jobs[job_id] = {
'status': 'processing',
'progress': 0,
'result_url': None,
'preview_url': None,
'error': None,
'output_format': output_format,
'created_at': time.time()
}
# Start processing in a separate thread
def process_image():
thread = threading.current_thread()
processing_jobs[job_id]['thread_alive'] = lambda: thread.is_alive()
try:
# Preprocess image
processing_jobs[job_id]['progress'] = 5
image = preprocess_image(filepath)
processing_jobs[job_id]['progress'] = 10
# Load model
try:
model = load_model()
processing_jobs[job_id]['progress'] = 30
except Exception as e:
processing_jobs[job_id]['status'] = 'error'
processing_jobs[job_id]['error'] = f"Error loading model: {str(e)}"
return
# Process image with thread-safe timeout
try:
def estimate_depth():
# Get depth map
result = model(image)
depth_map = result["depth"]
# Convert to numpy array if needed
if isinstance(depth_map, torch.Tensor):
depth_map = depth_map.cpu().numpy()
elif hasattr(depth_map, 'numpy'):
depth_map = depth_map.numpy()
elif isinstance(depth_map, Image.Image):
depth_map = np.array(depth_map)
return depth_map
depth_map, error = process_with_timeout(estimate_depth, [], TIMEOUT_SECONDS)
if error:
if isinstance(error, TimeoutError):
processing_jobs[job_id]['status'] = 'error'
processing_jobs[job_id]['error'] = f"Processing timed out after {TIMEOUT_SECONDS} seconds"
return
else:
raise error
processing_jobs[job_id]['progress'] = 60
# Create mesh from depth map
mesh_resolution_int = int(mesh_resolution)
mesh = depth_to_mesh(depth_map, image, resolution=mesh_resolution_int)
processing_jobs[job_id]['progress'] = 80
except Exception as e:
error_details = traceback.format_exc()
processing_jobs[job_id]['status'] = 'error'
processing_jobs[job_id]['error'] = f"Error during processing: {str(e)}"
print(f"Error processing job {job_id}: {str(e)}")
print(error_details)
return
# Export based on requested format
try:
if output_format == 'obj':
obj_path = os.path.join(output_dir, "model.obj")
mesh.export(obj_path, file_type='obj')
# Create a zip file with OBJ and MTL
zip_path = os.path.join(output_dir, "model.zip")
with zipfile.ZipFile(zip_path, 'w') as zipf:
zipf.write(obj_path, arcname="model.obj")
mtl_path = os.path.join(output_dir, "model.mtl")
if os.path.exists(mtl_path):
zipf.write(mtl_path, arcname="model.mtl")
processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
elif output_format == 'glb':
# Export as GLB
glb_path = os.path.join(output_dir, "model.glb")
mesh.export(glb_path, file_type='glb')
processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
# Update job status
processing_jobs[job_id]['status'] = 'completed'
processing_jobs[job_id]['progress'] = 100
print(f"Job {job_id} completed successfully")
except Exception as e:
error_details = traceback.format_exc()
processing_jobs[job_id]['status'] = 'error'
processing_jobs[job_id]['error'] = f"Error exporting model: {str(e)}"
print(f"Error exporting model for job {job_id}: {str(e)}")
print(error_details)
# Clean up temporary file
if os.path.exists(filepath):
os.remove(filepath)
# Force garbage collection to free memory
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
except Exception as e:
# Handle errors
error_details = traceback.format_exc()
processing_jobs[job_id]['status'] = 'error'
processing_jobs[job_id]['error'] = f"{str(e)}\n{error_details}"
print(f"Error processing job {job_id}: {str(e)}")
print(error_details)
# Clean up on error
if os.path.exists(filepath):
os.remove(filepath)
# Start processing thread
processing_thread = threading.Thread(target=process_image)
processing_thread.daemon = True
processing_thread.start()
# Return job ID immediately
return jsonify({"job_id": job_id}), 202 # 202 Accepted
@app.route('/download/<job_id>', methods=['GET'])
def download_model(job_id):
if job_id not in processing_jobs or processing_jobs[job_id]['status'] != 'completed':
return jsonify({"error": "Model not found or processing not complete"}), 404
# Get the output directory for this job
output_dir = os.path.join(RESULTS_FOLDER, job_id)
# Determine file format from the job data
output_format = processing_jobs[job_id].get('output_format', 'obj')
if output_format == 'obj':
zip_path = os.path.join(output_dir, "model.zip")
if os.path.exists(zip_path):
return send_file(zip_path, as_attachment=True, download_name="model.zip")
else: # glb
glb_path = os.path.join(output_dir, "model.glb")
if os.path.exists(glb_path):
return send_file(glb_path, as_attachment=True, download_name="model.glb")
return jsonify({"error": "File not found"}), 404
@app.route('/preview/<job_id>', methods=['GET'])
def preview_model(job_id):
if job_id not in processing_jobs or processing_jobs[job_id]['status'] != 'completed':
return jsonify({"error": "Model not found or processing not complete"}), 404
# Get the output directory for this job
output_dir = os.path.join(RESULTS_FOLDER, job_id)
output_format = processing_jobs[job_id].get('output_format', 'obj')
if output_format == 'obj':
obj_path = os.path.join(output_dir, "model.obj")
if os.path.exists(obj_path):
return send_file(obj_path, mimetype='model/obj')
else: # glb
glb_path = os.path.join(output_dir, "model.glb")
if os.path.exists(glb_path):
return send_file(glb_path, mimetype='model/gltf-binary')
return jsonify({"error": "Model file not found"}), 404
# Cleanup old jobs periodically
def cleanup_old_jobs():
current_time = time.time()
job_ids_to_remove = []
for job_id, job_data in processing_jobs.items():
# Remove completed jobs after 1 hour
if job_data['status'] == 'completed' and (current_time - job_data.get('created_at', 0)) > 3600:
job_ids_to_remove.append(job_id)
# Remove error jobs after 30 minutes
elif job_data['status'] == 'error' and (current_time - job_data.get('created_at', 0)) > 1800:
job_ids_to_remove.append(job_id)
# Remove the jobs
for job_id in job_ids_to_remove:
output_dir = os.path.join(RESULTS_FOLDER, job_id)
try:
import shutil
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
except Exception as e:
print(f"Error cleaning up job {job_id}: {str(e)}")
# Remove from tracking dictionary
if job_id in processing_jobs:
del processing_jobs[job_id]
# Schedule the next cleanup
threading.Timer(300, cleanup_old_jobs).start() # Run every 5 minutes
@app.route('/', methods=['GET'])
def index():
return jsonify({
"message": "Image to 3D API is running (DPT-Large Model)",
"endpoints": ["/convert", "/progress/<job_id>", "/download/<job_id>", "/preview/<job_id>"],
"parameters": {
"mesh_resolution": "Integer (50-200), controls mesh density",
"output_format": "obj or glb",
"detail_level": "low, medium, or high"
}
}), 200
if __name__ == '__main__':
# Start the cleanup thread
cleanup_old_jobs()
# Use port 7860 which is standard for Hugging Face Spaces
port = int(os.environ.get('PORT', 7860))
app.run(host='0.0.0.0', port=port)