import cv2 import numpy as np import os import gradio as gr from PIL import Image import tempfile from typing import Union, Tuple import json import datetime import pathlib # Custom CSS for styling the interface custom_css = """ .container { max-width: 1200px; margin: 0 auto; } /* Main styling */ .gradio-container { font-family: 'Roboto', 'Segoe UI', sans-serif; color: white; } /* Card styling */ .app-card { border-radius: 12px; box-shadow: 0 8px 16px rgba(0, 0, 0, 0.15); padding: 20px; background: linear-gradient(135deg, #2a3a4a 0%, #1e2a3a 100%); margin-bottom: 20px; } /* Header styling */ h1, h2, h3 { font-weight: 700 !important; color: white !important; } /* Labels styling */ label, .label { font-size: 1rem !important; font-weight: 600 !important; color: white !important; margin-bottom: 6px !important; } /* Input and slider styling */ .slider-label { font-weight: 600 !important; color: white !important; font-size: 0.95rem !important; } /* Button styling */ button.primary { background: linear-gradient(135deg, #3498db, #2980b9) !important; color: white !important; font-weight: 600 !important; border-radius: 8px !important; padding: 12px 24px !important; font-size: 1.1rem !important; transition: all 0.3s ease !important; border: none !important; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1), 0 0 10px rgba(52, 152, 219, 0.4) !important; } button.primary:hover { background: linear-gradient(135deg, #2980b9, #2573a7) !important; box-shadow: 0 6px 12px rgba(0, 0, 0, 0.2), 0 0 15px rgba(52, 152, 219, 0.6) !important; transform: translateY(-2px) !important; } /* Radio buttons */ .radio-group label { font-weight: 600 !important; color: white !important; } /* Tab styling */ .tab-nav { font-weight: 600 !important; font-size: 1.05rem !important; } /* Responsive adjustments */ @media (max-width: 768px) { .gradio-container { padding: 10px !important; } label, .label { font-size: 0.95rem !important; } button.primary { padding: 10px 18px !important; font-size: 1rem !important; } } """ # Enable OpenCL for better performance cv2.ocl.setUseOpenCL(True) # ------------------- Logger Class ------------------- # class UsageLogger: """Simple logger to record app usage timestamps using temp directory in Hugging Face Spaces""" def __init__(self, log_filename="usage_logs.json"): # Use the temporary directory in Hugging Face Spaces self.logs_dir = tempfile.gettempdir() self.log_file = os.path.join(self.logs_dir, log_filename) print(f"Log file location: {self.log_file}") self.ensure_log_file_exists() def ensure_log_file_exists(self): """Create log file with empty array if it doesn't exist""" try: if not os.path.exists(self.log_file): with open(self.log_file, 'w') as f: json.dump({"visits": [], "features": []}, f) # Test if file is readable/writable with open(self.log_file, 'r+') as f: pass except Exception as e: print(f"Error accessing log file: {str(e)}") # Create another temporary file as fallback temp_log = tempfile.NamedTemporaryFile(mode='w+', suffix='.json', delete=False) self.log_file = temp_log.name print(f"Using fallback temp log file: {self.log_file}") with open(self.log_file, 'w') as f: json.dump({"visits": [], "features": []}, f) def log_visit(self): """Log a timestamp when the app is visited/used""" current_time = datetime.datetime.now().isoformat() try: # Read existing logs or create new if file doesn't exist or is corrupt try: if os.path.exists(self.log_file) and os.path.getsize(self.log_file) > 0: with open(self.log_file, 'r') as f: logs = json.load(f) else: logs = {"visits": [], "features": []} except (json.JSONDecodeError, FileNotFoundError): # If file is corrupt or doesn't exist, start fresh logs = {"visits": [], "features": []} # Append new timestamp logs["visits"].append({"timestamp": current_time}) # Write updated logs with open(self.log_file, 'w') as f: json.dump(logs, f, indent=2) print(f"Visit logged at {current_time}") return True except Exception as e: print(f"Error logging visit: {str(e)}") # Try creating a new temporary file if there was an error try: temp_log = tempfile.NamedTemporaryFile(mode='w+', suffix='.json', delete=False) self.log_file = temp_log.name print(f"Created new temp log file: {self.log_file}") logs = {"visits": [{"timestamp": current_time}], "features": []} json.dump(logs, temp_log, indent=2) temp_log.close() return True except Exception as backup_error: print(f"Error creating backup log file: {str(backup_error)}") return False def log_usage(self, feature_type, media_type=None): """Log when a specific feature is used Args: feature_type: The feature used (e.g., 'black_white_image', 'sketch_video') media_type: The type of media processed (e.g., 'image', 'video') """ current_time = datetime.datetime.now().isoformat() # Extract media type from feature name if not provided if media_type is None: if "image" in feature_type: media_type = "image" elif "video" in feature_type: media_type = "video" else: media_type = "unknown" # Extract service type if "black_white" in feature_type: service_type = "black_and_white" elif "sketch" in feature_type: service_type = "pencil_sketch" else: service_type = "unknown" try: # Read existing logs or create new if file doesn't exist or is corrupt try: if os.path.exists(self.log_file) and os.path.getsize(self.log_file) > 0: with open(self.log_file, 'r') as f: logs = json.load(f) else: logs = {"visits": [], "features": []} except (json.JSONDecodeError, FileNotFoundError): # If file is corrupt or doesn't exist, start fresh logs = {"visits": [], "features": []} # Make sure features key exists if "features" not in logs: logs["features"] = [] # Append new usage record logs["features"].append({ "timestamp": current_time, "feature": feature_type, "service": service_type, "media_type": media_type }) # Write updated logs with open(self.log_file, 'w') as f: json.dump(logs, f, indent=2) print(f"Feature usage logged: {feature_type} ({media_type}) at {current_time}") return True except Exception as e: print(f"Error logging usage: {str(e)}") # Try creating a new temporary file if there was an error try: temp_log = tempfile.NamedTemporaryFile(mode='w+', suffix='.json', delete=False) self.log_file = temp_log.name print(f"Created new temp log file for usage: {self.log_file}") logs = {"visits": [], "features": [{ "timestamp": current_time, "feature": feature_type, "service": service_type, "media_type": media_type }]} json.dump(logs, temp_log, indent=2) temp_log.close() return True except Exception as backup_error: print(f"Error creating backup log file: {str(backup_error)}") return False # Create a global logger instance logger = UsageLogger() # Rest of the code remains unchanged... # ------------------- Theme Setup ------------------- # def create_custom_theme(): """Create a custom dark theme for the interface""" return gr.themes.Base().set( body_background_fill="linear-gradient(to bottom right, #1a1f2c, #121620)", body_background_fill_dark="linear-gradient(to bottom right, #1a1f2c, #121620)", body_text_color="white", body_text_color_dark="white", button_primary_background_fill="linear-gradient(135deg, #3498db, #2980b9)", button_primary_background_fill_hover="linear-gradient(135deg, #2980b9, #2573a7)", button_primary_text_color="white", button_primary_text_color_dark="white", button_primary_border_color="transparent", button_primary_border_color_dark="transparent", button_secondary_background_fill="#34495e", button_secondary_background_fill_hover="#2c3e50", button_secondary_text_color="white", button_secondary_text_color_dark="white", block_title_text_color="white", block_title_text_color_dark="white", block_label_text_color="white", block_label_text_color_dark="white", slider_color="#3498db", slider_color_dark="#3498db", border_color_primary="#3498db", border_color_primary_dark="#3498db", background_fill_primary="#2a3a4a", background_fill_primary_dark="#2a3a4a", background_fill_secondary="#1e2a3a", background_fill_secondary_dark="#1e2a3a", border_radius_size="12px", spacing_md="12px", spacing_lg="16px", text_size="16px", text_md="18px", text_lg="20px", text_xl="24px", font=["Roboto", "ui-sans-serif", "system-ui", "sans-serif"], ) # ------------------- Black & White Converter Functions ------------------- # def convert_to_black_white(image, threshold_value=127, method="otsu"): """Convert image to black and white using specified thresholding method""" if isinstance(image, str): image = cv2.imread(image) # Convert to grayscale if not already if len(image.shape) == 3: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: gray = image if method == "adaptive": binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) elif method == "otsu": _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) else: _, binary = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_BINARY) return binary def process_image_bw(image, threshold_value, method): """Process image with black and white thresholding""" if image is None: return None # Convert to numpy array if PIL Image if isinstance(image, Image.Image): image_np = np.array(image) # Convert RGB to BGR for OpenCV if len(image_np.shape) == 3: image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) else: image_np = image result = convert_to_black_white(image_np, threshold_value, method) return result def process_video_bw(video_path, threshold_value, method): """Process video with black and white thresholding""" if not os.path.exists(video_path): return "Video file not found", None try: cap = cv2.VideoCapture(video_path) if not cap.isOpened(): return "Could not open video file", None # Get video properties fourcc = cv2.VideoWriter_fourcc(*'mp4v') frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) # Create temporary output file temp_output = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) output_path = temp_output.name temp_output.close() # Create video writer out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height), isColor=False) # Process each frame while cap.isOpened(): ret, frame = cap.read() if not ret: break bw_frame = convert_to_black_white(frame, threshold_value, method) out.write(bw_frame) cap.release() out.release() return "Video processed successfully", output_path except Exception as e: return f"Error processing video: {str(e)}", None # ------------------- Pencil Sketch Converter Functions ------------------- # def process_image_sketch(image, intensity=255, blur_ksize=21, sigma=0): """Convert image to pencil sketch effect""" if image is None: return None # Convert to numpy array if PIL Image if isinstance(image, Image.Image): image_np = np.array(image) # Convert RGB to BGR for OpenCV if len(image_np.shape) == 3: image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) else: image_np = image # Convert to grayscale gray = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY) if len(image_np.shape) == 3 else image_np # Create sketch effect inverted = cv2.bitwise_not(gray) blur_ksize = blur_ksize if blur_ksize % 2 == 1 else blur_ksize + 1 # Ensure kernel size is odd blurred = cv2.GaussianBlur(inverted, (blur_ksize, blur_ksize), sigma) sketch = cv2.divide(gray, cv2.bitwise_not(blurred), scale=intensity) return sketch def process_video_sketch(video_path, intensity=255, blur_ksize=21, sigma=0): """Process video with pencil sketch effect""" if not os.path.exists(video_path): return "Video file not found", None try: cap = cv2.VideoCapture(video_path) if not cap.isOpened(): return "Could not open video file", None # Get video properties fourcc = cv2.VideoWriter_fourcc(*'mp4v') frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) # Create temporary output file temp_output = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) output_path = temp_output.name temp_output.close() # Create video writer out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height), isColor=True) # Process each frame while cap.isOpened(): ret, frame = cap.read() if not ret: break sketch_frame = process_image_sketch(frame, intensity, blur_ksize, sigma) # Convert grayscale to BGR for video output sketch_bgr = cv2.cvtColor(sketch_frame, cv2.COLOR_GRAY2BGR) out.write(sketch_bgr) cap.release() out.release() return "Video processed successfully", output_path except Exception as e: return f"Error processing video: {str(e)}", None # ------------------- Gradio Interface Functions ------------------- # def black_white_image(image, threshold_method, threshold_value): """Process image with black and white filter for Gradio""" # Log the usage of this feature logger.log_usage("black_white_image", "image") if threshold_method != "manual": threshold_value = 0 # Not used for adaptive or Otsu result = process_image_bw(image, threshold_value, threshold_method) return Image.fromarray(result) def black_white_video(video, threshold_method, threshold_value): """Process video with black and white filter for Gradio""" # Log the usage of this feature logger.log_usage("black_white_video", "video") if threshold_method != "manual": threshold_value = 0 # Not used for adaptive or Otsu message, output_path = process_video_bw(video, threshold_value, threshold_method) if output_path: return output_path else: raise gr.Error(message) def sketch_image(image, intensity, blur_ksize, sigma): """Process image with pencil sketch filter for Gradio""" # Log the usage of this feature logger.log_usage("sketch_image", "image") result = process_image_sketch(image, intensity, blur_ksize, sigma) return Image.fromarray(result) def sketch_video(video, intensity, blur_ksize, sigma): """Process video with pencil sketch filter for Gradio""" # Log the usage of this feature logger.log_usage("sketch_video", "video") message, output_path = process_video_sketch(video, intensity, blur_ksize, sigma) if output_path: return output_path else: raise gr.Error(message) # ------------------- Create Gradio Interface ------------------- # def create_interface(): # Tooltip content otsu_tooltip = "Otsu automatically determines the optimal threshold value by analyzing the image histogram." adaptive_tooltip = "Adaptive thresholding calculates different thresholds for different areas of the image, useful for images with varying lighting conditions." manual_tooltip = "Manual threshold lets you set a specific brightness cutoff point between black and white pixels." intensity_tooltip = "Controls the strength of the pencil sketch effect. Higher values create more contrast." blur_tooltip = "Controls how much the image is blurred. Higher values create a softer sketch effect." sigma_tooltip = "Controls the standard deviation of the Gaussian blur. Higher values increase the blurring effect." # Black and White Image Interface with gr.Blocks(title="Image Processor", css=custom_css, theme=gr.themes.Base()) as app: # Log app visit at launch def log_application_visit(): print("Application loaded - logging visit") success = logger.log_visit() if success: print("Visit logged successfully") else: print("Failed to log visit") return None app.load(fn=log_application_visit, inputs=None, outputs=None) with gr.Row(elem_classes="container"): gr.Markdown(""" # Image and Video Processor Transform your media with professional black & white conversion and pencil sketch effects """) with gr.Tabs() as tabs: with gr.TabItem("Pencil Sketch Converter", elem_classes="app-card"): with gr.Tabs() as sketch_tabs: with gr.TabItem("Image Processing"): with gr.Row(equal_height=True): with gr.Column(scale=1): sketch_image_input = gr.Image(label="Input Image") with gr.Group(): sketch_intensity = gr.Slider( minimum=1, maximum=255, value=255, step=1, label="Intensity", info=intensity_tooltip, elem_classes="slider-label" ) sketch_blur = gr.Slider( minimum=1, maximum=99, value=21, step=2, label="Blur Kernel Size", info=blur_tooltip, elem_classes="slider-label" ) sketch_sigma = gr.Slider( minimum=0, maximum=50, value=0, step=0.1, label="Standard Deviation", info=sigma_tooltip, elem_classes="slider-label" ) sketch_image_btn = gr.Button("Convert", elem_classes="primary") with gr.Column(scale=1): sketch_image_output = gr.Image(label="Processed Image") with gr.TabItem("Video Processing"): with gr.Row(equal_height=True): with gr.Column(scale=1): sketch_video_input = gr.Video(label="Input Video") with gr.Group(): sketch_video_intensity = gr.Slider( minimum=1, maximum=255, value=255, step=1, label="Intensity", info=intensity_tooltip, elem_classes="slider-label" ) sketch_video_blur = gr.Slider( minimum=1, maximum=99, value=21, step=2, label="Blur Kernel Size", info=blur_tooltip, elem_classes="slider-label" ) sketch_video_sigma = gr.Slider( minimum=0, maximum=50, value=0, step=0.1, label="Standard Deviation", info=sigma_tooltip, elem_classes="slider-label" ) sketch_video_btn = gr.Button("Convert", elem_classes="primary") with gr.Column(scale=1): sketch_video_output = gr.Video(label="Processed Video") with gr.TabItem("Black & White Converter", elem_classes="app-card"): with gr.Tabs() as bw_tabs: with gr.TabItem("Image Processing"): with gr.Row(equal_height=True): with gr.Column(scale=1): bw_image_input = gr.Image(label="Input Image", elem_classes="input-image") with gr.Group(): bw_method = gr.Radio( choices=["otsu", "adaptive", "manual"], value="otsu", label="Thresholding Method", info=otsu_tooltip, elem_classes="radio-group" ) bw_threshold = gr.Slider( minimum=0, maximum=255, value=127, step=1, label="Manual Threshold Value", info=manual_tooltip, interactive=True, elem_classes="slider-label" ) bw_image_btn = gr.Button("Convert", elem_classes="primary") with gr.Column(scale=1): bw_image_output = gr.Image(label="Processed Image") with gr.TabItem("Video Processing"): with gr.Row(equal_height=True): with gr.Column(scale=1): bw_video_input = gr.Video(label="Input Video") with gr.Group(): bw_video_method = gr.Radio( choices=["otsu", "adaptive", "manual"], value="otsu", label="Thresholding Method", info=otsu_tooltip, elem_classes="radio-group" ) bw_video_threshold = gr.Slider( minimum=0, maximum=255, value=127, step=1, label="Manual Threshold Value", info=manual_tooltip, interactive=True, elem_classes="slider-label" ) bw_video_btn = gr.Button("Convert", elem_classes="primary") with gr.Column(scale=1): bw_video_output = gr.Video(label="Processed Video") with gr.Row(elem_classes="container"): gr.Markdown(""" ### How to use: 1. Upload an image or video 2. Adjust the settings as needed 3. Click the Convert button to process your media """) # Set up event listeners bw_image_btn.click( fn=black_white_image, inputs=[bw_image_input, bw_method, bw_threshold], outputs=bw_image_output ) bw_video_btn.click( fn=black_white_video, inputs=[bw_video_input, bw_video_method, bw_video_threshold], outputs=bw_video_output ) sketch_image_btn.click( fn=sketch_image, inputs=[sketch_image_input, sketch_intensity, sketch_blur, sketch_sigma], outputs=sketch_image_output ) sketch_video_btn.click( fn=sketch_video, inputs=[sketch_video_input, sketch_video_intensity, sketch_video_blur, sketch_video_sigma], outputs=sketch_video_output ) # Make blur slider always odd def update_blur(value): return value if value % 2 == 1 else value + 1 sketch_blur.change(update_blur, sketch_blur, sketch_blur) sketch_video_blur.change(update_blur, sketch_video_blur, sketch_video_blur) # Add visibility toggle based on method selection def update_threshold_visibility(method): return gr.update(visible=(method == "manual")) bw_method.change(update_threshold_visibility, bw_method, bw_threshold) bw_video_method.change(update_threshold_visibility, bw_video_method, bw_video_threshold) return app # ------------------- Launch App ------------------- # if __name__ == "__main__": app = create_interface() app.launch()