diff --git a/.env b/.env deleted file mode 100644 index c5248bd10368a413f83738e3d565f0eee347e73d..0000000000000000000000000000000000000000 --- a/.env +++ /dev/null @@ -1,26 +0,0 @@ -# OpenAI -OPENAI_API_KEY="" - -# Azure OpenAI -AZURE_API_KEY="" -AZURE_API_BASE="" -AZURE_API_VERSION="" -OPENROUTER_API_KEY = "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" -# Google Vertex AI -VERTEXAI_PROJECT="" -VERTEXAI_LOCATION="" -GOOGLE_APPLICATION_CREDENTIALS="" -GITHUB_API_KEY = "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" -GITHUB_TOKEN = "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" -OPENAI_API_KEY = "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" -# Google Gemini -GEMINI_API_KEY="AIzaSyBUCGQ_hDLAHQN-T1ycWBJV8SGfwusfEjg" - -... - -# Kokoro TTS Settings -KOKORO_MODEL_PATH="models/kokoro-v0_19.onnx" -KOKORO_VOICES_PATH="models/voices.bin" -KOKORO_DEFAULT_VOICE="af" -KOKORO_DEFAULT_SPEED="1.0" -KOKORO_DEFAULT_LANG="en-us" \ No newline at end of file diff --git a/.env.example b/.env.example index 2c2c6c13c8cc57cb289f4b0df59787208672be32..56b414b78645d310c895ba59984e6f2743e8e2d2 100644 --- a/.env.example +++ b/.env.example @@ -1,26 +1,77 @@ -# OpenAI -OPENAI_API_KEY="" - -# Azure OpenAI -AZURE_API_KEY="" -AZURE_API_BASE="" -AZURE_API_VERSION="" -OPENROUTER_API_KEY = "" -# Google Vertex AI -VERTEXAI_PROJECT="" -VERTEXAI_LOCATION="" -GOOGLE_APPLICATION_CREDENTIALS="" -GITHUB_API_KEY = "" -GITHUB_TOKEN = "" -OPENAI_API_KEY = "" -# Google Gemini -GEMINI_API_KEY="" - -... - -# Kokoro TTS Settings -KOKORO_MODEL_PATH="models/kokoro-v0_19.onnx" -KOKORO_VOICES_PATH="models/voices.bin" -KOKORO_DEFAULT_VOICE="af" -KOKORO_DEFAULT_SPEED="1.0" -KOKORO_DEFAULT_LANG="en-us" \ No newline at end of file +# FastAPI Video Backend Environment Configuration +# Copy this file to .env and update the values + +# Application Settings +APP_NAME="FastAPI Video Backend" +APP_VERSION="0.1.0" +DEBUG=true +ENVIRONMENT=development + +# Server Settings +HOST=0.0.0.0 +PORT=8000 +RELOAD=true + +# API Settings +API_V1_PREFIX="/api/v1" +DOCS_URL="/docs" +REDOC_URL="/redoc" +OPENAPI_URL="/openapi.json" + +# CORS Settings +ALLOWED_ORIGINS="http://localhost:3000,http://localhost:8080,http://127.0.0.1:3000" +ALLOWED_METHODS="GET,POST,PUT,DELETE,OPTIONS" +ALLOWED_HEADERS="*" + +# Redis Settings +REDIS_URL="redis://localhost:6379/0" +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_DB=0 +REDIS_PASSWORD= +REDIS_MAX_CONNECTIONS=20 +REDIS_SOCKET_TIMEOUT=5 +REDIS_SOCKET_CONNECT_TIMEOUT=5 + +# Clerk Authentication Settings (REQUIRED) +CLERK_SECRET_KEY=your_clerk_secret_key_here +CLERK_PUBLISHABLE_KEY=your_clerk_publishable_key_here +CLERK_WEBHOOK_SECRET=your_clerk_webhook_secret_here +CLERK_JWT_VERIFICATION=true + +# Job Queue Settings +JOB_QUEUE_NAME=video_generation_queue +JOB_QUEUE_MAX_SIZE=1000 +JOB_DEFAULT_TIMEOUT=3600 +JOB_RETRY_ATTEMPTS=3 + +# File Storage Settings +UPLOAD_DIR=./uploads +MAX_FILE_SIZE=104857600 +ALLOWED_FILE_TYPES="image/jpeg,image/png,image/gif,video/mp4,text/plain" + +# Rate Limiting Settings +RATE_LIMIT_REQUESTS=100 +RATE_LIMIT_WINDOW=60 +RATE_LIMIT_PER_USER=50 + +# Logging Settings +LOG_LEVEL=INFO +LOG_FORMAT=json +LOG_FILE= +LOG_ROTATION="1 day" +LOG_RETENTION="30 days" + +# Security Settings (REQUIRED) +SECRET_KEY=your_super_secret_key_here_change_in_production +ACCESS_TOKEN_EXPIRE_MINUTES=30 +REFRESH_TOKEN_EXPIRE_DAYS=7 + +# Video Generation Settings +VIDEO_OUTPUT_DIR=./videos +VIDEO_QUALITY_DEFAULT=medium +VIDEO_MAX_DURATION=600 + +# Health Check Settings +HEALTH_CHECK_INTERVAL=30 +HEALTH_CHECK_TIMEOUT=5 \ No newline at end of file diff --git a/.gitignore b/.gitignore index f63cc8288fbb057856345c17427dccaa99ff5cc1..6621fc112aa4ac120968d05ab89f444fe6c8e2a2 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ __pycache__/ # Distribution / packaging .Python env/ +.env +.*env build/ develop-eggs/ dist/ diff --git a/.kiro/specs/fastapi-backend/design.md b/.kiro/specs/fastapi-backend/design.md new file mode 100644 index 0000000000000000000000000000000000000000..6b35c9bf1994fdd485cf4d0544a203111f729f66 --- /dev/null +++ b/.kiro/specs/fastapi-backend/design.md @@ -0,0 +1,1055 @@ +# Design Document + +## Overview + +This document outlines the design for a simplified FastAPI backend that serves as the primary interface for the multi-agent video generation system. The backend uses Pydantic for all data modeling and validation, Clerk for authentication, and Redis for both caching and job queuing. The design emphasizes simplicity and rapid development while maintaining clean architecture principles. + +The system provides REST API endpoints for video generation requests and job management, with Redis handling both the job queue and caching layer. Authentication is managed entirely through Clerk, eliminating the need for custom user management. + +## Architecture + +### High-Level Architecture + +```mermaid +graph TB + subgraph "Client Layer" + WEB[Web Frontend] + MOBILE[Mobile App] + API_CLIENT[API Clients] + end + + subgraph "API Gateway Layer" + NGINX[Nginx Reverse Proxy] + RATE_LIMIT[Rate Limiting] + AUTH_MW[Authentication Middleware] + end + + subgraph "FastAPI Application" + API_ROUTER[API Routers] + WEBSOCKET[WebSocket Handlers] + MIDDLEWARE[Custom Middleware] + DEPS[Dependencies] + end + + subgraph "Business Logic Layer" + VIDEO_SERVICE[Video Generation Service] + JOB_SERVICE[Job Management Service] + FILE_SERVICE[File Management Service] + NOTIFICATION_SERVICE[Notification Service] + end + + subgraph "Data Layer" + REDIS[(Redis)] + FILE_STORAGE[Local File Storage] + end + + subgraph "External Services" + CLERK[Clerk Authentication] + end + + subgraph "External Systems" + VIDEO_PIPELINE[Multi-Agent Video Pipeline] + MONITORING[Monitoring & Logging] + end + + WEB --> NGINX + MOBILE --> NGINX + API_CLIENT --> NGINX + + NGINX --> RATE_LIMIT + RATE_LIMIT --> AUTH_MW + AUTH_MW --> API_ROUTER + AUTH_MW --> WEBSOCKET + + API_ROUTER --> MIDDLEWARE + WEBSOCKET --> MIDDLEWARE + MIDDLEWARE --> DEPS + + DEPS --> VIDEO_SERVICE + DEPS --> JOB_SERVICE + DEPS --> FILE_SERVICE + DEPS --> NOTIFICATION_SERVICE + + VIDEO_SERVICE --> REDIS + JOB_SERVICE --> REDIS + FILE_SERVICE --> FILE_STORAGE + NOTIFICATION_SERVICE --> REDIS + + AUTH_MW --> CLERK + + QUEUE --> VIDEO_PIPELINE + VIDEO_PIPELINE --> MONITORING +``` + +### Project Structure + +Simplified structure focusing on Pydantic models and Redis: + +``` +src/ +├── app/ +│ ├── main.py # FastAPI application entry point +│ ├── api/ # API layer +│ │ ├── dependencies.py # Shared dependencies (Clerk auth, Redis) +│ │ └── v1/ # API version 1 +│ │ ├── __init__.py +│ │ ├── videos.py # Video generation endpoints +│ │ ├── jobs.py # Job management endpoints +│ │ └── system.py # System health endpoints +│ ├── core/ # Core utilities and configurations +│ │ ├── config.py # Application settings +│ │ ├── redis.py # Redis connection and utilities +│ │ ├── auth.py # Clerk authentication utilities +│ │ ├── logger.py # Logging configuration +│ │ └── exceptions.py # Custom exceptions +│ ├── services/ # Business logic layer +│ │ ├── video_service.py # Video generation business logic +│ │ ├── job_service.py # Job management logic +│ │ └── queue_service.py # Redis queue management +│ ├── models/ # Pydantic models only +│ │ ├── __init__.py +│ │ ├── job.py # Job data models +│ │ ├── video.py # Video metadata models +│ │ ├── user.py # User data models (from Clerk) +│ │ └── system.py # System status models +│ ├── middleware/ # Custom middleware +│ │ ├── __init__.py +│ │ ├── cors.py # CORS middleware +│ │ ├── clerk_auth.py # Clerk authentication middleware +│ │ └── error_handling.py # Global error handling +│ └── utils/ # Utility functions +│ ├── __init__.py +│ ├── file_utils.py # File handling utilities +│ └── helpers.py # General helper functions +├── tests/ # Test suite +│ ├── conftest.py # Test configuration +│ ├── test_api/ # API endpoint tests +│ └── test_services/ # Service layer tests +└── scripts/ # Utility scripts + └── setup_redis.py # Redis setup script +``` + +## Components and Interfaces + +### API Layer Components + +#### 1. Video Generation Router (`api/v1/videos.py`) + +**Endpoints:** +- `POST /api/v1/videos/generate` - Submit video generation request +- `POST /api/v1/videos/batch` - Submit batch video generation requests +- `GET /api/v1/videos/jobs/{job_id}/status` - Get job status +- `GET /api/v1/videos/jobs/{job_id}/download` - Download completed video +- `GET /api/v1/videos/jobs/{job_id}/metadata` - Get job metadata + +**Key Features:** +- Request validation using Pydantic schemas +- Async request handling +- Integration with job service +- File streaming for downloads +- Comprehensive error handling + +#### 2. Job Management Router (`api/v1/jobs.py`) + +**Endpoints:** +- `GET /api/v1/jobs` - List jobs with pagination and filtering +- `POST /api/v1/jobs/{job_id}/cancel` - Cancel job +- `DELETE /api/v1/jobs/{job_id}` - Delete job (soft delete) +- `GET /api/v1/jobs/{job_id}/logs` - Get job processing logs + +**Key Features:** +- Pagination support using `fastcrud` patterns +- Advanced filtering and sorting +- Job lifecycle management +- Audit trail maintenance + +#### 3. User Management Router (`api/v1/users.py`) + +**Endpoints:** +- `POST /api/v1/users/register` - User registration +- `POST /api/v1/users/login` - User authentication +- `GET /api/v1/users/profile` - Get user profile +- `PUT /api/v1/users/profile` - Update user profile +- `POST /api/v1/users/verify-email` - Email verification +- `POST /api/v1/users/reset-password` - Password reset + +**Key Features:** +- JWT-based authentication +- Email verification workflow +- Password reset functionality +- Profile management + +#### 4. Subscription Management Router (`api/v1/subscriptions.py`) + +**Endpoints:** +- `GET /api/v1/subscriptions/plans` - List available subscription plans +- `POST /api/v1/subscriptions/subscribe` - Create new subscription +- `GET /api/v1/subscriptions/current` - Get current user subscription +- `PUT /api/v1/subscriptions/upgrade` - Upgrade subscription plan +- `POST /api/v1/subscriptions/cancel` - Cancel subscription +- `GET /api/v1/subscriptions/usage` - Get usage statistics + +**Key Features:** +- Subscription plan management +- Credit tracking and usage monitoring +- Billing integration +- Usage analytics + +#### 5. WebSocket Handler (`api/v1/websockets.py`) + +**Endpoints:** +- `WS /ws/jobs/{job_id}` - Real-time job status updates +- `WS /ws/system/health` - System health monitoring + +**Key Features:** +- Connection management +- Real-time status broadcasting +- Graceful disconnection handling +- Authentication for WebSocket connections + +### Service Layer Components + +#### 1. Video Generation Service (`services/video_service.py`) + +**Responsibilities:** +- Interface with multi-agent video generation pipeline +- Job queue management +- Configuration validation +- Progress tracking + +**Key Methods:** +```python +async def create_video_job(request: VideoGenerationRequest) -> JobResponse +async def create_batch_jobs(requests: List[VideoGenerationRequest]) -> BatchJobResponse +async def get_job_status(job_id: str) -> JobStatus +async def cancel_job(job_id: str) -> bool +``` + +#### 2. Job Management Service (`services/job_service.py`) + +**Responsibilities:** +- Job lifecycle management +- Status updates and notifications +- Resource allocation +- Performance monitoring + +**Key Methods:** +```python +async def update_job_status(job_id: str, status: JobStatus, metadata: dict) +async def get_jobs_paginated(filters: JobFilters, pagination: PaginationParams) -> PaginatedResponse +async def cleanup_completed_jobs(retention_days: int) +``` + +#### 3. File Management Service (`services/file_service.py`) + +**Responsibilities:** +- AWS S3 file upload/download handling +- Storage management with versioning +- Security scanning and validation +- Metadata extraction and storage + +**Key Methods:** +```python +async def upload_to_s3(file: UploadFile, user_id: str, file_type: str) -> S3FileMetadata +async def download_from_s3(s3_key: str, bucket: str) -> StreamingResponse +async def generate_presigned_url(s3_key: str, expiration: int = 3600) -> str +async def validate_file(file: UploadFile) -> ValidationResult +async def cleanup_expired_files() +async def create_video_thumbnail(video_s3_key: str) -> str +``` + +#### 4. Subscription Service (`services/subscription_service.py`) + +**Responsibilities:** +- Subscription lifecycle management +- Credit tracking and usage monitoring +- Billing integration +- Plan upgrade/downgrade logic + +**Key Methods:** +```python +async def create_subscription(user_id: str, plan_id: int, payment_method: str) -> Subscription +async def check_user_credits(user_id: str) -> int +async def consume_credits(user_id: str, credits: int) -> bool +async def upgrade_subscription(user_id: str, new_plan_id: int) -> Subscription +async def cancel_subscription(user_id: str) -> bool +async def process_billing_cycle() +``` + +#### 5. AWS Integration Service (`services/aws_service.py`) + +**Responsibilities:** +- AWS service integration and management +- DynamoDB operations for high-frequency data +- SQS queue management +- CloudWatch metrics and logging + +**Key Methods:** +```python +async def put_job_status_dynamodb(job_id: str, status: dict) +async def get_job_status_history(job_id: str) -> List[dict] +async def send_sqs_message(queue_url: str, message: dict) +async def put_cloudwatch_metric(metric_name: str, value: float, dimensions: dict) +async def log_user_activity(user_id: str, activity: dict) +``` + +### Data Layer Components + +#### Pydantic Data Models + +**Job Model (`models/job.py`):** +```python +from pydantic import BaseModel, Field +from datetime import datetime +from enum import Enum +from typing import Optional, Dict, Any +import uuid + +class JobStatus(str, Enum): + QUEUED = "queued" + PROCESSING = "processing" + COMPLETED = "completed" + FAILED = "failed" + CANCELLED = "cancelled" + +class JobCreate(BaseModel): + topic: str = Field(..., min_length=1, max_length=500) + context: str = Field(..., min_length=1, max_length=2000) + model: Optional[str] = None + quality: str = Field(default="medium") + use_rag: bool = Field(default=False) + configuration: Optional[Dict[str, Any]] = None + +class Job(BaseModel): + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + user_id: str # Clerk user ID + status: JobStatus = JobStatus.QUEUED + job_type: str = "video_generation" + configuration: Dict[str, Any] + progress: float = Field(default=0.0, ge=0.0, le=100.0) + error_message: Optional[str] = None + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) + completed_at: Optional[datetime] = None + +class JobResponse(BaseModel): + job_id: str + status: JobStatus + progress: float + created_at: datetime + estimated_completion: Optional[datetime] = None +``` + +**Video Model (`models/video.py`):** +```python +from pydantic import BaseModel, Field +from datetime import datetime +from typing import Optional +import uuid + +class VideoMetadata(BaseModel): + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + job_id: str + filename: str + file_path: str + file_size: int = Field(gt=0) + duration: Optional[float] = Field(None, gt=0) + resolution: Optional[str] = None + format: str + created_at: datetime = Field(default_factory=datetime.utcnow) + +class VideoResponse(BaseModel): + video_id: str + job_id: str + filename: str + file_size: int + duration: Optional[float] + download_url: str + created_at: datetime +``` + +#### Pydantic Schemas + +**Request Schemas (`schemas/job.py`):** +```python +class VideoGenerationRequest(BaseModel): + topic: str = Field(..., min_length=1, max_length=500) + context: str = Field(..., min_length=1, max_length=2000) + model: Optional[str] = Field(None, description="AI model to use") + quality: VideoQuality = Field(VideoQuality.MEDIUM) + use_rag: bool = Field(False) + custom_config: Optional[Dict[str, Any]] = Field(None) + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "topic": "Pythagorean Theorem", + "context": "Explain the mathematical proof with visual demonstration", + "model": "gemini/gemini-2.5-flash-preview-04-17", + "quality": "medium", + "use_rag": True + } + } + ) +``` + +**Response Schemas:** +```python +class JobResponse(BaseModel): + job_id: str + status: JobStatus + created_at: datetime + estimated_completion: Optional[datetime] = None + +class JobStatusResponse(BaseModel): + job_id: str + status: JobStatus + progress: float + current_stage: Optional[str] = None + error_message: Optional[str] = None + created_at: datetime + updated_at: datetime + completed_at: Optional[datetime] = None +``` + +## Data Models + +### Core Entities + +#### Job Entity +- **Primary Key:** UUID string +- **Status:** Enum (queued, processing, completed, failed, cancelled) +- **Configuration:** JSON field for flexible job parameters +- **Progress Tracking:** Float percentage and current stage +- **Audit Fields:** Created, updated, completed timestamps +- **Soft Delete:** Support for data retention policies + +#### User Entity +- **Authentication:** JWT-based authentication +- **Authorization:** Role-based access control +- **Rate Limiting:** Per-user request limits +- **Audit Trail:** Request logging and monitoring + +#### Video Entity +- **Metadata:** File size, duration, resolution, format +- **Storage:** File path and storage location +- **Relationships:** Linked to originating job +- **Lifecycle:** Automatic cleanup policies + +### Redis Data Structure Design + +#### Redis Keys and Data Types + +**Job Storage (Hash):** +``` +jobs:{job_id} -> Hash +{ + "id": "uuid", + "user_id": "clerk_user_id", + "status": "queued|processing|completed|failed|cancelled", + "job_type": "video_generation", + "configuration": "json_string", + "progress": "0.0-100.0", + "error_message": "optional_error", + "created_at": "iso_datetime", + "updated_at": "iso_datetime", + "completed_at": "optional_iso_datetime" +} +``` + +**Job Queue (List):** +``` +job_queue -> List +["job_id_1", "job_id_2", "job_id_3", ...] +``` + +**User Jobs Index (Set):** +``` +user_jobs:{user_id} -> Set +{"job_id_1", "job_id_2", "job_id_3", ...} +``` + +**Video Metadata (Hash):** +``` +videos:{video_id} -> Hash +{ + "id": "uuid", + "job_id": "job_uuid", + "filename": "video.mp4", + "file_path": "/path/to/video.mp4", + "file_size": "bytes", + "duration": "seconds", + "resolution": "1920x1080", + "format": "mp4", + "created_at": "iso_datetime" +} +``` + +**Job Status Cache (String with TTL):** +``` +job_status:{job_id} -> String (TTL: 300 seconds) +"processing" | "completed" | "failed" +``` + +**System Health (Hash):** +``` +system:health -> Hash +{ + "redis": "healthy", + "queue_length": "5", + "active_jobs": "3", + "last_check": "iso_datetime" +} +``` + +## Error Handling + +### Exception Hierarchy + +```python +class APIException(Exception): + """Base API exception""" + def __init__(self, message: str, status_code: int = 500, error_code: str = None): + self.message = message + self.status_code = status_code + self.error_code = error_code + +class ValidationException(APIException): + """Request validation errors""" + def __init__(self, message: str, field_errors: List[dict] = None): + super().__init__(message, 422, "VALIDATION_ERROR") + self.field_errors = field_errors or [] + +class NotFoundException(APIException): + """Resource not found""" + def __init__(self, resource: str): + super().__init__(f"{resource} not found", 404, "NOT_FOUND") + +class ConflictException(APIException): + """Resource conflict""" + def __init__(self, message: str): + super().__init__(message, 409, "CONFLICT") + +class RateLimitException(APIException): + """Rate limit exceeded""" + def __init__(self, retry_after: int = None): + super().__init__("Rate limit exceeded", 429, "RATE_LIMIT_EXCEEDED") + self.retry_after = retry_after +``` + +### Global Error Handler + +```python +@app.exception_handler(APIException) +async def api_exception_handler(request: Request, exc: APIException): + return JSONResponse( + status_code=exc.status_code, + content={ + "error": { + "message": exc.message, + "error_code": exc.error_code, + "timestamp": datetime.utcnow().isoformat(), + "path": str(request.url.path) + } + } + ) + +@app.exception_handler(ValidationError) +async def validation_exception_handler(request: Request, exc: ValidationError): + return JSONResponse( + status_code=422, + content={ + "error": { + "message": "Validation failed", + "error_code": "VALIDATION_ERROR", + "details": exc.errors(), + "timestamp": datetime.utcnow().isoformat(), + "path": str(request.url.path) + } + } + ) +``` + +### Error Response Format + +All error responses follow a consistent structure: + +```json +{ + "error": { + "message": "Human-readable error message", + "error_code": "MACHINE_READABLE_CODE", + "details": {}, + "timestamp": "2024-01-15T10:30:00Z", + "path": "/api/v1/videos/generate" + } +} +``` + +## Testing Strategy + +### Testing Pyramid + +#### Unit Tests (70%) +- **Service Layer:** Business logic validation +- **CRUD Operations:** Database interaction testing +- **Utility Functions:** Helper function validation +- **Schema Validation:** Pydantic model testing + +#### Integration Tests (20%) +- **API Endpoints:** Full request/response cycle +- **Database Integration:** Real database operations +- **External Service Integration:** Mock external dependencies +- **WebSocket Connections:** Real-time communication testing + +#### End-to-End Tests (10%) +- **Complete Workflows:** Full video generation pipeline +- **User Journeys:** Multi-step user interactions +- **Performance Testing:** Load and stress testing +- **Security Testing:** Authentication and authorization + +### Test Configuration + +```python +# conftest.py +@pytest.fixture +async def test_db(): + """Create test database session""" + engine = create_async_engine(TEST_DATABASE_URL) + async with engine.begin() as conn: + await conn.run_sync(Base.metadata.create_all) + + async_session = async_sessionmaker(engine, expire_on_commit=False) + async with async_session() as session: + yield session + + async with engine.begin() as conn: + await conn.run_sync(Base.metadata.drop_all) + +@pytest.fixture +def test_client(): + """Create test client""" + return TestClient(app) + +@pytest.fixture +async def authenticated_user(test_db): + """Create authenticated test user""" + user = await crud_users.create( + db=test_db, + object=UserCreate( + username="testuser", + email="test@example.com", + password="testpass123" + ) + ) + return user +``` + +### Test Examples + +```python +# Test API endpoint +async def test_create_video_job(test_client, authenticated_user): + request_data = { + "topic": "Test Topic", + "context": "Test context for video generation", + "quality": "medium" + } + + response = test_client.post( + "/api/v1/videos/generate", + json=request_data, + headers={"Authorization": f"Bearer {authenticated_user.token}"} + ) + + assert response.status_code == 201 + data = response.json() + assert "job_id" in data + assert data["status"] == "queued" + +# Test service layer +async def test_video_service_create_job(test_db): + service = VideoService(test_db) + request = VideoGenerationRequest( + topic="Test Topic", + context="Test context" + ) + + job = await service.create_video_job(request, user_id=1) + + assert job.status == JobStatus.QUEUED + assert job.configuration["topic"] == "Test Topic" +``` + +## Security Considerations + +### Authentication & Authorization + +#### JWT Token-Based Authentication +- **Access Tokens:** Short-lived (15 minutes) for API access +- **Refresh Tokens:** Long-lived (7 days) for token renewal +- **Token Blacklisting:** Support for immediate token revocation +- **Secure Storage:** HttpOnly cookies for web clients + +#### Role-Based Access Control (RBAC) +- **User Roles:** admin, user, readonly +- **Permission System:** Granular permissions for different operations +- **Resource Ownership:** Users can only access their own resources +- **Admin Override:** Administrators can access all resources + +### Input Validation & Sanitization + +#### Request Validation +- **Pydantic Models:** Automatic type validation and conversion +- **Field Constraints:** Length limits, format validation, range checks +- **Custom Validators:** Business rule validation +- **Sanitization:** XSS prevention and input cleaning + +#### File Upload Security +- **File Type Validation:** Whitelist of allowed file types +- **Size Limits:** Maximum file size enforcement +- **Virus Scanning:** Integration with antivirus services +- **Secure Storage:** Isolated file storage with access controls + +### Rate Limiting & DDoS Protection + +#### Multi-Level Rate Limiting +- **Global Limits:** Overall API request limits +- **Per-User Limits:** Individual user quotas +- **Per-Endpoint Limits:** Specific endpoint restrictions +- **Sliding Window:** Advanced rate limiting algorithms + +#### Implementation Strategy +```python +from slowapi import Limiter, _rate_limit_exceeded_handler +from slowapi.util import get_remote_address + +limiter = Limiter(key_func=get_remote_address) + +@app.route("/api/v1/videos/generate") +@limiter.limit("10/minute") +async def generate_video(request: Request): + # Endpoint implementation + pass +``` + +### Data Protection + +#### Encryption +- **Data at Rest:** Database encryption for sensitive fields +- **Data in Transit:** TLS 1.3 for all communications +- **File Encryption:** Encrypted file storage +- **Key Management:** Secure key rotation policies + +#### Privacy Compliance +- **Data Minimization:** Collect only necessary data +- **Retention Policies:** Automatic data cleanup +- **User Rights:** Data export and deletion capabilities +- **Audit Logging:** Comprehensive access logging + +## Performance Optimization + +### Caching Strategy + +#### Multi-Level Caching +- **Application Cache:** In-memory caching with Redis +- **Database Query Cache:** SQLAlchemy query result caching +- **HTTP Response Cache:** CDN and browser caching +- **File System Cache:** Temporary file caching + +#### Cache Implementation +```python +from fastapi_cache.decorator import cache + +@router.get("/api/v1/jobs/{job_id}/status") +@cache(expire=30) # Cache for 30 seconds +async def get_job_status(job_id: str): + return await job_service.get_status(job_id) +``` + +### Database Optimization + +#### Query Optimization +- **Eager Loading:** Reduce N+1 query problems +- **Indexing Strategy:** Optimized database indexes +- **Connection Pooling:** Efficient database connections +- **Query Monitoring:** Performance tracking and optimization + +#### Pagination & Filtering +```python +async def get_jobs_paginated( + page: int = 1, + items_per_page: int = 10, + filters: JobFilters = None +) -> PaginatedResponse: + offset = (page - 1) * items_per_page + + query = select(Job).where(Job.is_deleted == False) + if filters: + query = apply_filters(query, filters) + + total = await db.scalar(select(func.count()).select_from(query.subquery())) + jobs = await db.execute(query.offset(offset).limit(items_per_page)) + + return PaginatedResponse( + data=jobs.scalars().all(), + total_count=total, + page=page, + items_per_page=items_per_page + ) +``` + +### Asynchronous Processing + +#### Background Tasks +- **Celery Integration:** Distributed task processing +- **Job Queues:** Redis-based task queuing +- **Progress Tracking:** Real-time progress updates +- **Error Recovery:** Automatic retry mechanisms + +#### WebSocket Optimization +- **Connection Pooling:** Efficient WebSocket management +- **Message Broadcasting:** Efficient multi-client updates +- **Heartbeat Monitoring:** Connection health checks +- **Graceful Degradation:** Fallback to polling if needed + +## Monitoring & Observability + +### Logging Strategy + +#### Structured Logging +```python +import structlog + +logger = structlog.get_logger() + +async def create_video_job(request: VideoGenerationRequest, user_id: int): + logger.info( + "Creating video job", + user_id=user_id, + topic=request.topic, + quality=request.quality + ) + + try: + job = await video_service.create_job(request, user_id) + logger.info("Video job created successfully", job_id=job.id) + return job + except Exception as e: + logger.error( + "Failed to create video job", + user_id=user_id, + error=str(e), + exc_info=True + ) + raise +``` + +### Metrics Collection + +#### Application Metrics +- **Request Metrics:** Response times, status codes, throughput +- **Business Metrics:** Job completion rates, user activity +- **System Metrics:** CPU, memory, disk usage +- **Custom Metrics:** Domain-specific measurements + +#### Health Checks +```python +@router.get("/health") +async def health_check(): + checks = { + "database": await check_database_health(), + "redis": await check_redis_health(), + "queue": await check_queue_health(), + "storage": await check_storage_health() + } + + overall_health = all(checks.values()) + status_code = 200 if overall_health else 503 + + return JSONResponse( + status_code=status_code, + content={ + "status": "healthy" if overall_health else "unhealthy", + "checks": checks, + "timestamp": datetime.utcnow().isoformat() + } + ) +``` + +### Distributed Tracing + +#### OpenTelemetry Integration +- **Request Tracing:** End-to-end request tracking +- **Service Dependencies:** Inter-service communication mapping +- **Performance Analysis:** Bottleneck identification +- **Error Correlation:** Error tracking across services + +## Deployment Architecture + +### Container Strategy + +#### Docker Configuration +```dockerfile +FROM python:3.11-slim + +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + gcc \ + && rm -rf /var/lib/apt/lists/* + +# Install Python dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY src/ ./src/ +COPY migrations/ ./migrations/ + +# Set environment variables +ENV PYTHONPATH=/app/src +ENV PYTHONUNBUFFERED=1 + +# Expose port +EXPOSE 8000 + +# Run application +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] +``` + +#### AWS Deployment Configuration + +**AWS Services Integration:** +- **RDS PostgreSQL:** Primary relational database for user data, jobs, subscriptions +- **DynamoDB:** High-frequency data like job status updates, user activity tracking +- **S3:** Video files, thumbnails, job outputs, user uploads +- **ElastiCache Redis:** Session storage, caching, real-time data +- **SQS:** Job queue management and inter-service communication +- **CloudFront:** CDN for video delivery and static assets +- **Lambda:** Serverless functions for background processing +- **ECS/Fargate:** Container orchestration for API services + +**Docker Compose for Local Development:** +```yaml +version: '3.8' + +services: + api: + build: . + ports: + - "8000:8000" + environment: + - AWS_REGION=us-east-1 + - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} + - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} + - DATABASE_URL=${RDS_DATABASE_URL} + - REDIS_URL=${ELASTICACHE_URL} + - S3_BUCKET=${S3_BUCKET_NAME} + - SQS_QUEUE_URL=${SQS_QUEUE_URL} + - DYNAMODB_TABLE_PREFIX=${DYNAMODB_PREFIX} + volumes: + - ./logs:/app/logs + - ~/.aws:/root/.aws:ro + + localstack: + image: localstack/localstack:latest + ports: + - "4566:4566" + environment: + - SERVICES=s3,sqs,dynamodb,elasticache + - DEBUG=1 + - DATA_DIR=/tmp/localstack/data + volumes: + - localstack_data:/tmp/localstack + + postgres: + image: postgres:15 + environment: + - POSTGRES_DB=videoapi_local + - POSTGRES_USER=user + - POSTGRES_PASSWORD=pass + volumes: + - postgres_data:/var/lib/postgresql/data + ports: + - "5432:5432" + +volumes: + postgres_data: + localstack_data: +``` + +**AWS CDK/Terraform Infrastructure:** +```typescript +// AWS CDK example structure +export class VideoAPIStack extends Stack { + constructor(scope: Construct, id: string, props?: StackProps) { + super(scope, id, props); + + // RDS PostgreSQL + const database = new rds.DatabaseInstance(this, 'VideoAPIDB', { + engine: rds.DatabaseInstanceEngine.postgres({ + version: rds.PostgresEngineVersion.VER_15 + }), + instanceType: ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.MICRO), + multiAz: true, + backupRetention: Duration.days(7) + }); + + // S3 Buckets + const videoBucket = new s3.Bucket(this, 'VideoBucket', { + versioned: true, + lifecycleRules: [{ + id: 'DeleteOldVersions', + expiration: Duration.days(90) + }] + }); + + // DynamoDB Tables + const jobStatusTable = new dynamodb.Table(this, 'JobStatusTable', { + partitionKey: { name: 'job_id', type: dynamodb.AttributeType.STRING }, + sortKey: { name: 'timestamp', type: dynamodb.AttributeType.NUMBER }, + timeToLiveAttribute: 'ttl' + }); + + // ECS Fargate Service + const cluster = new ecs.Cluster(this, 'VideoAPICluster'); + const taskDefinition = new ecs.FargateTaskDefinition(this, 'VideoAPITask'); + + const container = taskDefinition.addContainer('api', { + image: ecs.ContainerImage.fromRegistry('your-api-image'), + environment: { + DATABASE_URL: database.instanceEndpoint.socketAddress, + S3_BUCKET: videoBucket.bucketName + } + }); + + new ecs.FargateService(this, 'VideoAPIService', { + cluster, + taskDefinition, + desiredCount: 2 + }); + } +} +``` + +### Production Considerations + +#### Scalability +- **Horizontal Scaling:** Multiple API instances behind load balancer +- **Database Scaling:** Read replicas and connection pooling +- **Cache Scaling:** Redis clustering for high availability +- **File Storage:** Distributed storage solutions + +#### Security Hardening +- **SSL/TLS:** End-to-end encryption +- **Firewall Rules:** Network access restrictions +- **Secret Management:** Secure credential storage +- **Regular Updates:** Security patch management + +#### Monitoring & Alerting +- **Application Monitoring:** APM tools integration +- **Infrastructure Monitoring:** System metrics collection +- **Log Aggregation:** Centralized logging solution +- **Alert Management:** Proactive issue notification \ No newline at end of file diff --git a/.kiro/specs/fastapi-backend/requirements.md b/.kiro/specs/fastapi-backend/requirements.md new file mode 100644 index 0000000000000000000000000000000000000000..52e3add4cffbaef9f92b9f9d634f2880cb36e58f --- /dev/null +++ b/.kiro/specs/fastapi-backend/requirements.md @@ -0,0 +1,126 @@ +# Requirements Document + +## Introduction + +This document outlines the requirements for implementing a simple FastAPI backend for the multi-agent video generation system. The backend will use Pydantic for all data modeling and validation, Clerk for authentication, and Redis for caching and job queuing. The system will provide RESTful API endpoints to manage video generation requests, monitor processing status, and integrate with the existing multi-agent pipeline through Redis queues. + +## Requirements + +### Requirement 1 + +**User Story:** As a client application, I want to submit video generation requests through a REST API, so that I can programmatically create educational videos from textual descriptions. + +#### Acceptance Criteria + +1. WHEN a POST request is made to `/api/v1/videos/generate` with topic and context data THEN the system SHALL accept the request and return a unique job ID +2. WHEN the request includes optional parameters like model selection, quality settings, or RAG configuration THEN the system SHALL validate and apply these parameters +3. WHEN the request payload is invalid or missing required fields THEN the system SHALL return a 422 validation error with detailed field-level error messages +4. WHEN the system receives a valid request THEN it SHALL queue the job for processing and return a 201 status code with job metadata + +### Requirement 2 + +**User Story:** As a client application, I want to monitor the status of video generation jobs, so that I can track progress and know when videos are ready for download. + +#### Acceptance Criteria + +1. WHEN a GET request is made to `/api/v1/videos/jobs/{job_id}/status` THEN the system SHALL return the current job status (queued, processing, completed, failed) +2. WHEN a job is in progress THEN the system SHALL return progress information including current stage and percentage completion +3. WHEN a job has failed THEN the system SHALL return error details and failure reason +4. WHEN a job is completed THEN the system SHALL return metadata about the generated video including file size and duration +5. WHEN an invalid job ID is provided THEN the system SHALL return a 404 error + +### Requirement 3 + +**User Story:** As a client application, I want to retrieve completed videos and their metadata, so that I can download and use the generated content. + +#### Acceptance Criteria + +1. WHEN a GET request is made to `/api/v1/videos/jobs/{job_id}/download` for a completed job THEN the system SHALL return the video file as a streaming response +2. WHEN a GET request is made to `/api/v1/videos/jobs/{job_id}/metadata` THEN the system SHALL return comprehensive job metadata including processing logs and performance metrics +3. WHEN a job is not yet completed THEN the download endpoint SHALL return a 409 conflict error +4. WHEN the video file is not found THEN the system SHALL return a 404 error +5. WHEN downloading large files THEN the system SHALL support HTTP range requests for partial content delivery + +### Requirement 4 + +**User Story:** As a system administrator, I want to manage and monitor the video generation pipeline, so that I can ensure optimal system performance and troubleshoot issues. + +#### Acceptance Criteria + +1. WHEN a GET request is made to `/api/v1/system/health` THEN the system SHALL return health status of all components including database, queue, and agent services +2. WHEN a GET request is made to `/api/v1/system/metrics` THEN the system SHALL return performance metrics including queue length, processing times, and resource utilization +3. WHEN a POST request is made to `/api/v1/system/jobs/{job_id}/cancel` THEN the system SHALL attempt to cancel the job and return cancellation status +4. WHEN a GET request is made to `/api/v1/system/jobs` with pagination parameters THEN the system SHALL return a paginated list of all jobs with filtering options +5. WHEN system resources are critically low THEN the health endpoint SHALL return a 503 service unavailable status + +### Requirement 5 + +**User Story:** As a client application, I want to upload custom content and configurations, so that I can customize the video generation process with specific materials or settings. + +#### Acceptance Criteria + +1. WHEN a POST request is made to `/api/v1/uploads/content` with multipart form data THEN the system SHALL accept and store uploaded files securely +2. WHEN uploading files THEN the system SHALL validate file types, sizes, and scan for malicious content +3. WHEN a POST request is made to `/api/v1/configurations` with custom settings THEN the system SHALL validate and store the configuration for later use +4. WHEN uploaded content exceeds size limits THEN the system SHALL return a 413 payload too large error +5. WHEN invalid file types are uploaded THEN the system SHALL return a 415 unsupported media type error + +### Requirement 6 + +**User Story:** As a client application, I want to authenticate requests using Clerk authentication, so that the system remains secure and user access is properly managed. + +#### Acceptance Criteria + +1. WHEN a request is made without a valid Clerk session token THEN the system SHALL return a 401 unauthorized error +2. WHEN a request is made with an invalid or expired Clerk token THEN the system SHALL return a 401 unauthorized error +3. WHEN a valid Clerk token is provided THEN the system SHALL extract user information and process the request +4. WHEN user information is needed THEN the system SHALL retrieve it from Clerk's user management system +5. WHEN rate limits are exceeded THEN the system SHALL return a 429 too many requests error + +### Requirement 7 + +**User Story:** As a developer integrating with the API, I want comprehensive API documentation and client generation capabilities, so that I can efficiently build applications that consume the video generation service. + +#### Acceptance Criteria + +1. WHEN accessing `/docs` THEN the system SHALL provide interactive Swagger UI documentation with all endpoints and schemas +2. WHEN accessing `/redoc` THEN the system SHALL provide ReDoc documentation interface +3. WHEN accessing `/openapi.json` THEN the system SHALL return the complete OpenAPI specification +4. WHEN generating client code THEN the OpenAPI specification SHALL include proper operation IDs and detailed schemas +5. WHEN API changes are made THEN the documentation SHALL automatically update to reflect the current API state + +### Requirement 8 + +**User Story:** As a system operator, I want the API to handle errors gracefully and provide detailed logging, so that I can maintain system reliability and troubleshoot issues effectively. + +#### Acceptance Criteria + +1. WHEN any error occurs THEN the system SHALL return appropriate HTTP status codes with consistent error response format +2. WHEN internal errors occur THEN the system SHALL log detailed error information without exposing sensitive data to clients +3. WHEN validation errors occur THEN the system SHALL return specific field-level error messages +4. WHEN the system is under high load THEN it SHALL implement proper backpressure and queue management +5. WHEN critical errors occur THEN the system SHALL trigger appropriate alerting mechanisms + +### Requirement 9 + +**User Story:** As a client application, I want to receive real-time updates about job progress, so that I can provide live feedback to users about video generation status. + +#### Acceptance Criteria + +1. WHEN a WebSocket connection is established to `/ws/jobs/{job_id}` THEN the system SHALL provide real-time status updates +2. WHEN job status changes THEN connected WebSocket clients SHALL receive immediate notifications +3. WHEN processing stages complete THEN clients SHALL receive detailed progress information +4. WHEN WebSocket connections are lost THEN the system SHALL handle reconnection gracefully +5. WHEN multiple clients connect to the same job THEN all SHALL receive synchronized updates + +### Requirement 10 + +**User Story:** As a system integrator, I want the API to support batch operations and bulk processing, so that I can efficiently handle multiple video generation requests simultaneously. + +#### Acceptance Criteria + +1. WHEN a POST request is made to `/api/v1/videos/batch` with multiple video requests THEN the system SHALL create multiple jobs and return batch job metadata +2. WHEN batch processing is requested THEN the system SHALL optimize resource allocation across multiple jobs +3. WHEN batch jobs are queried THEN the system SHALL return aggregated status information for all jobs in the batch +4. WHEN individual jobs in a batch fail THEN other jobs SHALL continue processing independently +5. WHEN batch size exceeds system limits THEN the system SHALL return appropriate error messages with suggested batch sizes \ No newline at end of file diff --git a/.kiro/specs/fastapi-backend/tasks.md b/.kiro/specs/fastapi-backend/tasks.md new file mode 100644 index 0000000000000000000000000000000000000000..af155e8bdf6397737ee2387e30030178abbec157 --- /dev/null +++ b/.kiro/specs/fastapi-backend/tasks.md @@ -0,0 +1,305 @@ +# Implementation Plan + +- [ ] 1. Set up project structure and basic configuration + + - Create FastAPI project directory structure following the simplified design specification + - Set up pyproject.toml with required dependencies (FastAPI, Pydantic, Redis, Clerk SDK) + - Create main.py with basic FastAPI application initialization + - Configure environment-based settings using Pydantic BaseSettings + - Set up basic logging configuration + - _Requirements: 1.1, 6.4, 7.5_ + +- [x] 2. Implement Redis infrastructure and connection + + - [x] 2.1 Set up Redis connection and utilities + + - Create redis.py with Redis connection management + - Implement Redis dependency injection for FastAPI endpoints + - Configure Redis connection pooling and error handling + - Add Redis health check utilities + - _Requirements: 1.1, 2.1, 4.1_ + + - [x] 2.2 Create Redis data access patterns + + - Implement Redis hash operations for job storage + - Create Redis list operations for job queue management + - Add Redis set operations for user job indexing + - Implement Redis key expiration and cleanup utilities + - _Requirements: 1.1, 2.1, 8.2_ + +- [x] 3. Implement Clerk authentication integration + + - [x] 3.1 Set up Clerk authentication middleware + + - Create Clerk SDK integration and configuration + - Implement Clerk token validation middleware + - Build authentication dependency for protected endpoints + - Add user information extraction from Clerk tokens + - _Requirements: 6.1, 6.2, 6.3_ + + - [x] 3.2 Create user management utilities + + - Implement user data extraction from Clerk + - Create user session management utilities + - Add user permission checking functions + - _Requirements: 6.1, 6.2, 8.1_ + +- [ ] 4. Create Pydantic data models + + - [x] 4.1 Define core Pydantic models + + - Create Job model with validation rules and status enum + - Implement VideoMetadata model for video information + - Define User model for Clerk user data + - Create SystemHealth model for monitoring + - Add common response models and error schemas + - _Requirements: 1.1, 1.3, 2.1, 7.3, 8.3_ + + - [x] 4.2 Implement request/response schemas + + - Create VideoGenerationRequest schema with field validation + - Define JobResponse and JobStatusResponse schemas + - Implement pagination and filtering schemas + - Add error response schemas with consistent structure + - Create API documentation examples for all schemas + - _Requirements: 1.1, 1.3, 2.1, 7.3, 8.3_ + +- [x] 5. Build core API endpoints + + - [x] 5.1 Implement video generation endpoints + + - Create POST /api/v1/videos/generate endpoint with Pydantic validation + - Implement GET /api/v1/videos/jobs/{job_id}/status endpoint with Redis data + - Build GET /api/v1/videos/jobs/{job_id}/download endpoint for file serving + - Add GET /api/v1/videos/jobs/{job_id}/metadata endpoint + - _Requirements: 1.1, 1.2, 1.3, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2_ + + - [x] 5.2 Implement job management endpoints + + - Create GET /api/v1/jobs endpoint with pagination and filtering + - Implement POST /api/v1/jobs/{job_id}/cancel endpoint + - Build DELETE /api/v1/jobs/{job_id} endpoint with Redis cleanup + - Add GET /api/v1/jobs/{job_id}/logs endpoint + - _Requirements: 2.1, 2.2, 4.3, 10.3_ + + - [x] 5.3 Create system monitoring endpoints + + - Implement GET /api/v1/system/health endpoint with Redis and queue checks + - Create GET /api/v1/system/metrics endpoint for basic system stats + - Add GET /api/v1/system/queue-status endpoint for queue monitoring + - _Requirements: 4.1, 4.2, 4.5_ + +- [x] 6. Implement business logic services + + - [x] 6.1 Create video generation service + + - Implement VideoService class with Redis job queue integration + - Add job creation and status management methods + - Create progress tracking and update mechanisms + - Implement job queue processing logic + - _Requirements: 1.1, 1.2, 2.1, 2.2, 9.2_ + + - [x] 6.2 Build job management service + + - Implement JobService with Redis-based storage + - Add job lifecycle management methods + - Create job cancellation and cleanup functionality + - Implement job metrics collection + - _Requirements: 2.1, 2.2, 4.3, 10.1, 10.2, 10.4_ + + - [x] 6.3 Create queue management service + + - Implement QueueService for Redis queue operations + + - Add job queuing and dequeuing methods + - Create queue monitoring and health check functions + - Implement queue cleanup and maintenance utilities + - _Requirements: 1.1, 2.1, 4.2_ + +- [x] 7. Add file handling and storage + + - [x] 7.1 Implement local file management + + - Create file upload handling with validation + - Implement secure file storage with proper permissions + - Add file metadata extraction and storage in Redis + - Create file cleanup and maintenance utilities + - _Requirements: 3.1, 3.2, 3.3, 5.1, 5.2, 5.4, 5.5_ + + - [x] 7.2 Build file serving capabilities + + - Implement file download endpoints with streaming + - Add file access control and security checks + - Create file URL generation for frontend access + - Implement file caching strategies + - _Requirements: 3.1, 3.2, 3.3_ + +- [ ] 8. Implement error handling and middleware + + - [x] 8.1 Create global exception handling + + - Implement custom exception classes with proper HTTP status codes + - Create global exception handler middleware + - Add structured error response formatting + - Implement error logging with request correlation + - _Requirements: 8.1, 8.2, 8.3_ + + - [x] 8.2 Build request/response middleware + + - Implement CORS middleware for cross-origin requests + - Create request logging middleware with performance metrics + - Add response compression middleware + - Implement security headers middleware + - _Requirements: 8.2, 8.4_ + +- [x] 9. Add caching and performance optimization + + - [x] 9.1 Implement Redis caching strategies + + - Create cache decorator for frequently accessed endpoints + - Implement cache invalidation patterns + + - Add cache warming strategies for common queries + - Create cache monitoring and metrics collection + - _Requirements: 2.1, 4.2_ + + - [x] 9.2 Optimize API performance + + - Implement response caching for static data + - Add request deduplication for expensive operations + - Create connection pooling optimization + - Implement async processing where beneficial + - _Requirements: 2.1, 4.2, 10.3_ + +- [ ] 10. Implement batch processing capabilities + + - [ ] 10.1 Create batch job endpoints + + - Implement POST /api/v1/videos/batch endpoint + - Add batch job validation and processing logic + - Create batch status tracking and reporting + - _Requirements: 10.1, 10.2_ + + - [ ] 10.2 Build batch job management + - Implement batch job cancellation and cleanup + - Add batch job progress aggregation + - Create batch job completion notifications + - _Requirements: 10.3, 10.4, 10.5_ + +- [ ] 11. Add comprehensive testing suite + + - [ ] 11.1 Create unit tests for core functionality + + - Write unit tests for all service layer methods + - Create tests for Redis operations and data models + - Add tests for Clerk authentication integration + - Test Pydantic schema validation and serialization + - _Requirements: 1.1, 1.3, 2.1, 6.1, 8.3_ + + - [ ] 11.2 Implement integration tests for API endpoints + + - Create integration tests for all video generation endpoints + - Test job management endpoints with Redis operations + - Add file upload and download functionality tests + - Test error handling and edge cases + - _Requirements: 1.1, 2.1, 3.1, 9.1_ + + - [ ] 11.3 Build end-to-end workflow tests + - Create complete video generation workflow tests + - Test batch processing end-to-end scenarios + - Add performance testing for critical endpoints + - Test system recovery and error scenarios + - _Requirements: 1.1, 8.1, 10.1_ + +- [ ] 12. Implement rate limiting and security features + + - [ ] 12.1 Add rate limiting middleware + + - Implement Redis-based rate limiting per user and endpoint + - Create rate limit configuration and storage + - Add rate limit headers and error responses + - Implement rate limit monitoring and alerting + - _Requirements: 6.5, 8.4_ + + - [ ] 12.2 Enhance security measures + - Implement input sanitization and validation + - Add request/response logging for audit trails + - Create security headers middleware + - Implement API key validation for internal services + - _Requirements: 6.1, 6.3, 8.2_ + +- [x] 13. Create API documentation and client generation + + + + + + - [x] 13.1 Configure OpenAPI documentation + + + + - Customize OpenAPI schema generation with proper operation IDs + - Add comprehensive endpoint descriptions and examples + - Configure Swagger UI and ReDoc interfaces + - Add authentication documentation for Clerk integration + - _Requirements: 7.1, 7.2, 7.3_ + + + + - [ ] 13.2 Set up client code generation + - Configure OpenAPI specification for client generation + - Create example client generation scripts + - Add client SDK documentation and examples + - Test generated clients with real API endpoints + - _Requirements: 7.4, 7.5_ + +- [ ] 14. Implement deployment configuration + + - [ ] 14.1 Create Docker containerization + + - Write Dockerfile with multi-stage build optimization + - Create docker-compose.yml for local development with Redis + - Add production docker-compose with proper networking + - Configure environment variable management + - _Requirements: 4.1, 8.4_ + + - [ ] 14.2 Add monitoring and logging + - Implement structured logging with JSON format + - Add application metrics collection and export + - Create health check endpoints for container orchestration + - Configure log aggregation and monitoring dashboards + - _Requirements: 4.1, 4.2, 8.2_ + +- [ ] 15. Final integration and optimization + + - [ ] 15.1 Integrate with existing video generation pipeline + + - Create Redis queue integration with multi-agent video generation system + - Implement job queue management with proper error handling + - Add configuration mapping between API requests and pipeline parameters + - Test end-to-end video generation workflow + - _Requirements: 1.1, 1.2, 2.1_ + + - [ ] 15.2 Performance optimization and cleanup + - Optimize Redis operations and connection management + - Implement proper resource cleanup and garbage collection + - Add graceful shutdown handling for long-running operations + - Create production-ready configuration templates + - _Requirements: 4.2, 8.4_ + +- [ ] 16. Frontend integration preparation + + - [ ] 16.1 Create frontend-specific endpoints + + - Implement GET /api/v1/dashboard/stats endpoint for user dashboard + - Create GET /api/v1/dashboard/recent-jobs endpoint + - Add WebSocket endpoints for real-time job updates + - Implement user preference and settings endpoints + - _Requirements: 2.1, 9.1, 9.2_ + + - [ ] 16.2 Set up CORS and frontend security + - Configure CORS middleware for frontend domain access + - Add rate limiting specific to frontend endpoints + - Create frontend-specific authentication flows with Clerk + - Implement proper session management for frontend clients + - _Requirements: 6.1, 6.5, 8.4_ diff --git a/API_DOCUMENTATION.md b/API_DOCUMENTATION.md new file mode 100644 index 0000000000000000000000000000000000000000..da102b413013a20335f542749c2a31ef0c1cfa17 --- /dev/null +++ b/API_DOCUMENTATION.md @@ -0,0 +1,1311 @@ +# T2M API Documentation + +## Overview + +This document provides comprehensive documentation for the T2M (Text-to-Media) API endpoints. The API is organized into several modules: Authentication, Files, Jobs, System, and Videos. + +## Base URL + +``` +https://your-api-domain.com/api/v1 +``` + +## Authentication + +Most endpoints require authentication. Include the authorization token in the request headers: + +``` +Authorization: Bearer +``` + +**Public endpoints** (no authentication required): + +- `GET /auth/health` +- `GET /system/health` + +**Optional authentication** (enhanced data for authenticated users): + +- All other `/system/*` endpoints + +## Common Response Formats + +### Success Response + +```json +{ + "success": true, + "data": { + "id": "12345", + "status": "completed" + }, + "message": "Operation completed successfully" +} +``` + +### Error Response + +```json +{ + "success": false, + "error": { + "code": "AUTH_INVALID", + "details": "Token has expired or is malformed" + } +} +``` + +### Pagination Format + +```json +{ + "success": true, + "data": { + "items": [...], + "pagination": { + "page": 1, + "items_per_page": 20, + "total_items": 150, + "total_pages": 8, + "has_next": true, + "has_previous": false, + "next_page": 2, + "previous_page": null + } + } +} +``` + +## Authentication Endpoints + +### Health Check + +- **Endpoint**: `GET /auth/health` +- **Description**: Check authentication service health +- **Authentication**: Not required +- **Response**: Service health status +- **Example Response**: + +```json +{ + "status": "healthy", + "clerk": { + "status": "healthy", + "response_time": "45ms", + "last_check": "2024-01-15T10:30:00Z" + }, + "message": "Authentication service health check completed" +} +``` + +### Get Authentication Status + +- **Endpoint**: `GET /auth/status` +- **Description**: Get current user authentication status +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Authentication status and user info +- **Example Response (Authenticated)**: + +```json +{ + "authenticated": true, + "user_id": "user_12345", + "email": "john@example.com", + "email_verified": true, + "request_context": { + "path": "/auth/status", + "method": "GET", + "client_ip": "192.168.1.100" + } +} +``` + +- **Example Response (Not Authenticated)**: + +```json +{ + "authenticated": false, + "message": "No authentication provided", + "request_context": { + "path": "/auth/status", + "method": "GET", + "client_ip": "192.168.1.100" + } +} +``` + +### Get Current User Profile + +- **Endpoint**: `GET /auth/me` +- **Description**: Get authenticated user's profile information +- **Headers**: + - `Authorization: Bearer ` (required) +- **Response**: User profile data +- **Example Response**: + +```json +{ + "id": "user_12345", + "username": "john_doe", + "full_name": "John Doe", + "email": "john@example.com", + "image_url": "https://example.com/avatar.jpg", + "email_verified": true, + "created_at": "2024-01-01T00:00:00Z", + "last_sign_in_at": "2024-01-15T10:30:00Z" +} +``` + +**Note**: `created_at` and `last_sign_in_at` fields may be `null` if not available from the authentication provider. + +### Get User Permissions + +- **Endpoint**: `GET /auth/permissions` +- **Description**: Get user's permissions and access levels +- **Headers**: + - `Authorization: Bearer ` (required) +- **Response**: User permissions and role information +- **Example Response**: + +```json +{ + "user_id": "user_12345", + "role": "USER", + "permissions": [ + "read_files", + "upload_files", + "generate_videos" + ], + "access_level": "standard" +} +``` + +### Test Protected Endpoint + +- **Endpoint**: `GET /auth/test-protected` +- **Description**: Test endpoint for authenticated users +- **Headers**: + - `Authorization: Bearer ` (required) +- **Response**: Test response with user ID +- **Example Response**: + +```json +{ + "message": "Successfully accessed protected endpoint", + "user_id": "user_12345", + "timestamp": "2024-01-15T10:30:00Z" +} +``` + +### Test Verified Endpoint + +- **Endpoint**: `GET /auth/test-verified` +- **Description**: Test endpoint for verified users only +- **Headers**: + - `Authorization: Bearer ` (required) +- **Response**: Test response for verified users +- **Example Response**: + +```json +{ + "message": "Successfully accessed verified user endpoint", + "user_id": "user_12345", + "email": "john@example.com", + "email_verified": true, + "timestamp": "2024-01-15T10:30:00Z" +} +``` + +### Verify Token + +- **Endpoint**: `POST /auth/verify` +- **Description**: Verify authentication token validity +- **Headers**: + - `Authorization: Bearer ` (required) +- **Response**: Token verification status +- **Example Response**: + +```json +{ + "verified": true, + "user_id": "user_12345", + "email": "john@example.com", + "email_verified": true, + "message": "Token verified successfully" +} +``` + +## File Management Endpoints + +### Upload Single File + +- **Endpoint**: `POST /files/upload` +- **Description**: Upload a single file +- **Headers**: + - `Authorization: Bearer ` (required) + - `Content-Type: multipart/form-data` +- **Parameters**: + - `file` (file, required): File to upload + - `file_type` (string, optional): File type category + - `subdirectory` (string, optional): Target subdirectory + - `description` (string, optional): File description +- **Response**: File upload confirmation with file ID + +### Batch Upload Files + +- **Endpoint**: `POST /files/batch-upload` +- **Description**: Upload multiple files at once +- **Headers**: + - `Authorization: Bearer ` (required) + - `Content-Type: multipart/form-data` +- **Parameters**: + - `files` (file[], required): Files to upload + - `file_type` (string, optional): File type for all files + - `subdirectory` (string, optional): Subdirectory for all files + - `description` (string, optional): Description for all files +- **Response**: Batch upload results + +### List Files + +- **Endpoint**: `GET /files` +- **Description**: List user's files with pagination and filtering +- **Headers**: + - `Authorization: Bearer ` (required) +- **Query Parameters**: + +| Name | Type | Required | Default | Description | +| ---------------- | ------- | -------- | ------- | --------------------------------------------------- | +| `file_type` | string | no | - | Filter by file type (document, image, video, audio) | +| `page` | integer | no | 1 | Page number (≥1) | +| `items_per_page` | integer | no | 20 | Items per page (1-100) | + +- **Response**: Paginated list of files +- **Example Response**: + +```json +{ + "success": true, + "data": { + "items": [ + { + "id": "file_123456", + "filename": "document.pdf", + "size": 2048576, + "file_type": "document", + "created_at": "2024-01-15T10:30:00Z" + } + ], + "pagination": { + "page": 1, + "items_per_page": 20, + "total_items": 150, + "total_pages": 8, + "has_next": true + } + } +} +``` + +### Get File Details + +- **Endpoint**: `GET /files/{file_id}` +- **Description**: Get comprehensive file information including content details and metadata +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `file_id` (string, required): Unique file identifier +- **Response**: Complete file details +- **Example Response**: + +```json +{ + "success": true, + "data": { + "id": "file_123456", + "filename": "document.pdf", + "size": 2048576, + "content_type": "application/pdf", + "file_type": "document", + "subdirectory": "uploads/2024", + "description": "Important document", + "created_at": "2024-01-15T10:30:00Z", + "updated_at": "2024-01-15T10:30:00Z", + "download_count": 5, + "metadata": { + "pages": 10, + "author": "John Doe", + "creation_date": "2024-01-15" + } + } +} +``` + +### Get File Metadata + +- **Endpoint**: `GET /files/{file_id}/metadata` +- **Description**: Get file metadata and technical information only +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `file_id` (string, required): Unique file identifier +- **Response**: File metadata only +- **Example Response**: + +```json +{ + "success": true, + "data": { + "content_type": "application/pdf", + "size": 2048576, + "checksum": "sha256:abc123...", + "metadata": { + "pages": 10, + "author": "John Doe", + "creation_date": "2024-01-15" + } + } +} +``` + +### Download File + +- **Endpoint**: `GET /files/{file_id}/download` +- **Description**: Download file content +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `file_id` (string, required): Unique file identifier +- **Query Parameters**: + - `inline` (boolean, default: false): Serve inline instead of attachment +- **Response**: File content + +### Stream File + +- **Endpoint**: `GET /files/{file_id}/stream` +- **Description**: Stream file content +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `file_id` (string, required): Unique file identifier +- **Query Parameters**: + - `quality` (string, default: "auto"): Stream quality +- **Response**: Streamed file content + +### Get File Thumbnail + +- **Endpoint**: `GET /files/{file_id}/thumbnail` +- **Description**: Get file thumbnail +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `file_id` (string, required): Unique file identifier +- **Query Parameters**: + - `size` (string, default: "medium"): Thumbnail size (small|medium|large) +- **Response**: Thumbnail image + +### Get File Analytics + +- **Endpoint**: `GET /files/{file_id}/analytics` +- **Description**: Get file usage analytics +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `file_id` (string, required): Unique file identifier +- **Response**: File analytics data + +### Delete File + +- **Endpoint**: `DELETE /files/{file_id}` +- **Description**: Delete a file +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `file_id` (string, required): Unique file identifier +- **Response**: Deletion confirmation + +### Get File Statistics + +- **Endpoint**: `GET /files/stats` +- **Description**: Get user's file statistics +- **Headers**: + - `Authorization: Bearer ` (required) +- **Response**: File usage statistics + +### Cleanup Files + +- **Endpoint**: `POST /files/cleanup` +- **Description**: Cleanup files based on criteria +- **Headers**: + - `Authorization: Bearer ` (required) + - `Content-Type: application/json` +- **Request Body**: File cleanup criteria +- **Response**: Cleanup results + +### Secure File Access + +- **Endpoint**: `GET /files/secure/{file_id}` +- **Description**: Access files via signed URLs +- **Query Parameters**: + - `user_id` (string, required): User ID from signed URL + - `expires` (string, required): Expiration timestamp + - `signature` (string, required): URL signature + - `file_type` (string, optional): File type + - `inline` (string, default: "false"): Serve inline + - `size` (string, optional): Thumbnail size + - `quality` (string, optional): Stream quality +- **Response**: Secure file access + +## Job Management Endpoints + +### List Jobs + +- **Endpoint**: `GET /jobs` +- **Description**: List user's jobs with pagination and filtering +- **Headers**: + - `Authorization: Bearer ` (required) +- **Query Parameters**: Pagination and filtering parameters +- **Response**: Paginated list of jobs + +### Get Job Details + +- **Endpoint**: `GET /jobs/{job_id}` +- **Description**: Get comprehensive job information including status, progress, and results +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `job_id` (string, required): Unique job identifier +- **Response**: Complete job details and status +- **Example Response**: + +```json +{ + "success": true, + "data": { + "id": "job_789012", + "type": "video_generation", + "status": "completed", + "progress": 100, + "created_at": "2024-01-15T10:30:00Z", + "started_at": "2024-01-15T10:30:05Z", + "completed_at": "2024-01-15T10:35:30Z", + "duration": 325, + "parameters": { + "prompt": "A beautiful sunset over mountains", + "duration": 10, + "quality": "1080p" + }, + "result": { + "file_id": "video_456789", + "file_size": 15728640, + "thumbnail_url": "/videos/job_789012/thumbnail" + }, + "error": null + } +} +``` + +### Get Job Logs + +- **Endpoint**: `GET /jobs/{job_id}/logs` +- **Description**: Get job execution logs with filtering and pagination +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `job_id` (string, required): Unique job identifier +- **Query Parameters**: + +| Name | Type | Required | Default | Description | +| -------- | ------- | -------- | ------- | ------------------------------------------------- | +| `limit` | integer | no | 100 | Maximum log entries (1-1000) | +| `offset` | integer | no | 0 | Log entries to skip (≥0) | +| `level` | string | no | - | Filter by log level (DEBUG, INFO, WARNING, ERROR) | + +- **Response**: Job logs with metadata +- **Example Response**: + +```json +{ + "success": true, + "data": { + "logs": [ + { + "timestamp": "2024-01-15T10:30:15Z", + "level": "INFO", + "message": "Video processing started", + "details": { + "step": "initialization", + "progress": 0 + } + }, + { + "timestamp": "2024-01-15T10:30:45Z", + "level": "INFO", + "message": "Processing frame 100/1000", + "details": { + "step": "rendering", + "progress": 10 + } + } + ], + "total_logs": 250, + "has_more": true + } +} +``` + +### Cancel Job + +- **Endpoint**: `POST /jobs/{job_id}/cancel` +- **Description**: Cancel a running job +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `job_id` (string, required): Unique job identifier +- **Response**: Cancellation confirmation + +### Delete Job + +- **Endpoint**: `DELETE /jobs/{job_id}` +- **Description**: Delete a job and its data +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `job_id` (string, required): Unique job identifier +- **Response**: Deletion confirmation + +## System Monitoring Endpoints + +### System Health Check + +- **Endpoint**: `GET /system/health` +- **Description**: Get overall system health status +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: System health metrics + +### System Metrics + +- **Endpoint**: `GET /system/metrics` +- **Description**: Get detailed system metrics +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: System performance metrics + +### Queue Status + +- **Endpoint**: `GET /system/queue` +- **Description**: Get job queue status and statistics +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Queue status and metrics + +### Cache Information + +- **Endpoint**: `GET /system/cache` +- **Description**: Get cache status and information +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Cache metrics and status + +### Cache Metrics + +- **Endpoint**: `GET /system/cache/metrics` +- **Description**: Get detailed cache metrics +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Cache performance metrics + +### Cache Report + +- **Endpoint**: `GET /system/cache/report` +- **Description**: Get comprehensive cache report +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Detailed cache report + +### Performance Summary + +- **Endpoint**: `GET /system/performance` +- **Description**: Get system performance summary +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Query Parameters**: + - `hours` (integer, default: 1): Time range in hours +- **Response**: Performance summary + +### Connection Statistics + +- **Endpoint**: `GET /system/connections` +- **Description**: Get connection statistics +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Connection metrics + +### Async Statistics + +- **Endpoint**: `GET /system/async` +- **Description**: Get asynchronous processing statistics +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Async processing metrics + +### Deduplication Statistics + +- **Endpoint**: `GET /system/deduplication` +- **Description**: Get deduplication statistics +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Deduplication metrics + +### Invalidate Cache + +- **Endpoint**: `POST /system/cache/invalidate` +- **Description**: Invalidate cache entries +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Query Parameters**: + - `pattern` (string, optional): Cache key pattern + - `user_id` (string, optional): User-specific cache +- **Response**: Cache invalidation results + +### Warm Cache + +- **Endpoint**: `POST /system/cache/warm` +- **Description**: Pre-warm cache with frequently accessed data +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Cache warming results + +### Optimize Performance + +- **Endpoint**: `POST /system/optimize` +- **Description**: Trigger system performance optimization +- **Headers**: + - `Authorization: Bearer ` (optional) +- **Response**: Optimization results + +## Video Processing Endpoints + +### Generate Video + +- **Endpoint**: `POST /videos/generate` +- **Description**: Create a new video generation job +- **Headers**: + - `Authorization: Bearer ` (required) + - `Content-Type: application/json` +- **Request Body**: Job creation parameters +- **Response**: Job creation confirmation with job ID + +### Get Job Status + +- **Endpoint**: `GET /videos/{job_id}/status` +- **Description**: Get video generation job status +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `job_id` (string, required): Unique job identifier +- **Response**: Job status and progress + +### Download Video + +- **Endpoint**: `GET /videos/{job_id}/download` +- **Description**: Download generated video +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `job_id` (string, required): Unique job identifier +- **Query Parameters**: + - `inline` (boolean, default: false): Serve inline instead of attachment +- **Response**: Video file content + +### Stream Video + +- **Endpoint**: `GET /videos/{job_id}/stream` +- **Description**: Stream generated video +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `job_id` (string, required): Unique job identifier +- **Query Parameters**: + - `quality` (string, default: "auto"): Stream quality (auto|720p|1080p) +- **Response**: Streamed video content + +### Get Video Metadata + +- **Endpoint**: `GET /videos/{job_id}/metadata` +- **Description**: Get comprehensive video metadata and technical information +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `job_id` (string, required): Unique job identifier +- **Response**: Detailed video metadata +- **Example Response**: + +```json +{ + "success": true, + "data": { + "job_id": "job_789012", + "video": { + "duration": 10.5, + "width": 1920, + "height": 1080, + "fps": 30, + "bitrate": 5000000, + "codec": "h264", + "format": "mp4", + "file_size": 15728640 + }, + "audio": { + "codec": "aac", + "bitrate": 128000, + "sample_rate": 44100, + "channels": 2 + }, + "generation": { + "prompt": "A beautiful sunset over mountains", + "model": "t2v-v2.1", + "seed": 12345, + "created_at": "2024-01-15T10:30:00Z" + } + } +} +``` + +### Get Video Thumbnail + +- **Endpoint**: `GET /videos/{job_id}/thumbnail` +- **Description**: Get video thumbnail +- **Headers**: + - `Authorization: Bearer ` (required) +- **Path Parameters**: + - `job_id` (string, required): Unique job identifier +- **Query Parameters**: + - `size` (string, default: "medium"): Thumbnail size (small|medium|large) +- **Response**: Video thumbnail image + +## Error Handling + +### Error Codes + +| Code | HTTP Status | Description | +| --------------------- | ----------- | ------------------------------- | +| `AUTH_REQUIRED` | 401 | Authentication required | +| `AUTH_INVALID` | 401 | Invalid authentication token | +| `AUTH_EXPIRED` | 401 | Authentication token expired | +| `PERMISSION_DENIED` | 403 | Insufficient permissions | +| `RESOURCE_NOT_FOUND` | 404 | Requested resource not found | +| `VALIDATION_ERROR` | 400 | Request validation failed | +| `RATE_LIMIT_EXCEEDED` | 429 | Rate limit exceeded | +| `SERVER_ERROR` | 500 | Internal server error | +| `SERVICE_UNAVAILABLE` | 503 | Service temporarily unavailable | + +### Error Response Examples + +**Invalid Authentication (401)** + +```json +{ + "success": false, + "error": { + "code": "AUTH_INVALID", + "details": "Token has expired or is malformed" + } +} +``` + +**Resource Not Found (404)** + +```json +{ + "success": false, + "error": { + "code": "RESOURCE_NOT_FOUND", + "message": "File not found", + "details": "File with ID 'file_123456' does not exist or you don't have access" + } +} +``` + +**Validation Error (400)** + +```json +{ + "success": false, + "error": { + "code": "VALIDATION_ERROR", + "message": "Request validation failed", + "details": { + "file_type": [ + "Invalid file type. Must be one of: document, image, video, audio" + ], + "page": ["Page must be greater than 0"] + } + } +} +``` + +**Rate Limit Exceeded (429)** + +```json +{ + "success": false, + "error": { + "code": "RATE_LIMIT_EXCEEDED", + "message": "Rate limit exceeded", + "details": "You have exceeded the limit of 100 uploads per hour. Try again in 45 minutes." + } +} +``` + +## Rate Limits + +- **General API**: 1000 requests per hour per user +- **File Upload**: 100 uploads per hour per user +- **Video Generation**: 10 jobs per hour per user +- **System Endpoints**: 500 requests per hour per user + +## Code Examples + +### cURL Examples + +**Upload a file** + +```bash +curl -X POST "https://api.example.com/api/v1/files/upload" \ + -H "Authorization: Bearer your-token-here" \ + -F "file=@example.txt" \ + -F "file_type=document" \ + -F "description=Sample document" +``` + +**Generate video** + +```bash +curl -X POST "https://api.example.com/api/v1/videos/generate" \ + -H "Authorization: Bearer your-token-here" \ + -H "Content-Type: application/json" \ + -d '{ + "prompt": "A beautiful sunset over mountains", + "duration": 10, + "quality": "1080p" + }' +``` + +**Get job status** + +```bash +curl -X GET "https://api.example.com/api/v1/jobs/job_789012" \ + -H "Authorization: Bearer your-token-here" +``` + +**Get system metrics** + +```bash +curl -X GET "https://api.example.com/api/v1/system/metrics" \ + -H "Authorization: Bearer your-token-here" +``` + +### Python Examples + +**File Management** + +```python +import requests + +headers = {"Authorization": "Bearer your-token-here"} +base_url = "https://api.example.com/api/v1" + +# Upload file +with open("example.txt", "rb") as f: + files = {"file": f} + data = {"file_type": "document", "description": "Sample document"} + response = requests.post(f"{base_url}/files/upload", headers=headers, files=files, data=data) + file_data = response.json() + print(f"File uploaded: {file_data['data']['id']}") + +# List files +response = requests.get(f"{base_url}/files", headers=headers, params={"page": 1, "items_per_page": 10}) +files = response.json() +print(f"Found {files['data']['pagination']['total_items']} files") +``` + +**Job Management** + +```python +# Get job logs +job_id = "job_789012" +response = requests.get(f"{base_url}/jobs/{job_id}/logs", headers=headers, params={"limit": 50, "level": "INFO"}) +logs = response.json() +print(f"Retrieved {len(logs['data']['logs'])} log entries") + +# Cancel job +response = requests.post(f"{base_url}/jobs/{job_id}/cancel", headers=headers) +if response.json()['success']: + print("Job cancelled successfully") +``` + +**Video Processing** + +```python +# Generate video +job_data = { + "prompt": "A beautiful sunset over mountains", + "duration": 10, + "quality": "1080p" +} +response = requests.post(f"{base_url}/videos/generate", headers=headers, json=job_data) +job = response.json() +job_id = job['data']['job_id'] +print(f"Video generation started: {job_id}") + +# Check status +response = requests.get(f"{base_url}/videos/{job_id}/status", headers=headers) +status = response.json() +print(f"Job status: {status['data']['status']} ({status['data']['progress']}%)") +``` + +**System Monitoring** + +```python +# Get system metrics +response = requests.get(f"{base_url}/system/metrics", headers=headers) +metrics = response.json() +print(f"CPU usage: {metrics['data']['cpu_usage']}%") +print(f"Memory usage: {metrics['data']['memory_usage']}%") + +# Get queue status +response = requests.get(f"{base_url}/system/queue", headers=headers) +queue = response.json() +print(f"Jobs in queue: {queue['data']['pending_jobs']}") +``` + +### JavaScript Examples + +**File Management** + +```javascript +const headers = { + Authorization: "Bearer your-token-here", +}; +const baseUrl = "https://api.example.com/api/v1"; + +// Upload file +const formData = new FormData(); +formData.append("file", fileInput.files[0]); +formData.append("file_type", "document"); +formData.append("description", "Sample document"); + +fetch(`${baseUrl}/files/upload`, { + method: "POST", + headers: headers, + body: formData, +}) + .then((response) => response.json()) + .then((data) => console.log("File uploaded:", data.data.id)); + +// List files with pagination +fetch(`${baseUrl}/files?page=1&items_per_page=10`, { + headers: headers, +}) + .then((response) => response.json()) + .then((data) => + console.log(`Found ${data.data.pagination.total_items} files`) + ); +``` + +**Job Management** + +```javascript +// Get job logs +const jobId = "job_789012"; +fetch(`${baseUrl}/jobs/${jobId}/logs?limit=50&level=INFO`, { + headers: headers, +}) + .then((response) => response.json()) + .then((data) => + console.log(`Retrieved ${data.data.logs.length} log entries`) + ); + +// Cancel job +fetch(`${baseUrl}/jobs/${jobId}/cancel`, { + method: "POST", + headers: headers, +}) + .then((response) => response.json()) + .then((data) => { + if (data.success) console.log("Job cancelled successfully"); + }); +``` + +**Video Processing** + +```javascript +// Generate video +const jobData = { + prompt: "A beautiful sunset over mountains", + duration: 10, + quality: "1080p", +}; + +fetch(`${baseUrl}/videos/generate`, { + method: "POST", + headers: { ...headers, "Content-Type": "application/json" }, + body: JSON.stringify(jobData), +}) + .then((response) => response.json()) + .then((data) => { + const jobId = data.data.job_id; + console.log("Video generation started:", jobId); + + // Poll for status + const checkStatus = () => { + fetch(`${baseUrl}/videos/${jobId}/status`, { headers }) + .then((response) => response.json()) + .then((status) => { + console.log( + `Status: ${status.data.status} (${status.data.progress}%)` + ); + if (status.data.status === "processing") { + setTimeout(checkStatus, 5000); // Check again in 5 seconds + } + }); + }; + checkStatus(); + }); +``` + +**System Monitoring** + +```javascript +// Get system metrics +fetch(`${baseUrl}/system/metrics`, { headers }) + .then((response) => response.json()) + .then((data) => { + console.log(`CPU usage: ${data.data.cpu_usage}%`); + console.log(`Memory usage: ${data.data.memory_usage}%`); + }); + +// Get queue status +fetch(`${baseUrl}/system/queue`, { headers }) + .then((response) => response.json()) + .then((data) => console.log(`Jobs in queue: ${data.data.pending_jobs}`)); +``` + +## Support + +For API support and questions, please contact: + +- Email: api-support@example.com +- Documentation: https://docs.example.com +- Status Page: https://status.example.com + +## Webhooks + +The T2M API supports webhook notifications for long-running operations like video generation. + +### Webhook Configuration + +Configure webhook URLs in your account settings or via the API: + +```bash +curl -X POST "https://api.example.com/api/v1/webhooks" \ + -H "Authorization: Bearer your-token-here" \ + -H "Content-Type: application/json" \ + -d '{ + "url": "https://your-app.com/webhooks/t2m", + "events": ["job.completed", "job.failed"], + "secret": "your-webhook-secret" + }' +``` + +### Webhook Events + +| Event | Description | +| --------------- | ------------------------------- | +| `job.started` | Job processing has begun | +| `job.progress` | Job progress update (every 10%) | +| `job.completed` | Job completed successfully | +| `job.failed` | Job failed with error | +| `job.cancelled` | Job was cancelled | + +### Webhook Payload Example + +**Job Completed** + +```json +{ + "event": "job.completed", + "timestamp": "2024-01-15T10:35:30Z", + "data": { + "job_id": "job_789012", + "type": "video_generation", + "status": "completed", + "result": { + "file_id": "video_456789", + "download_url": "https://api.example.com/api/v1/videos/job_789012/download", + "thumbnail_url": "https://api.example.com/api/v1/videos/job_789012/thumbnail" + } + } +} +``` + +### Webhook Security + +Verify webhook authenticity using the signature header: + +```python +import hmac +import hashlib + +def verify_webhook(payload, signature, secret): + expected = hmac.new( + secret.encode('utf-8'), + payload.encode('utf-8'), + hashlib.sha256 + ).hexdigest() + return hmac.compare_digest(f"sha256={expected}", signature) + +# In your webhook handler +signature = request.headers.get('X-T2M-Signature') +if verify_webhook(request.body, signature, webhook_secret): + # Process webhook + pass +``` + +## Advanced Features + +### Batch Operations + +**Batch File Upload** + +```bash +curl -X POST "https://api.example.com/api/v1/files/batch-upload" \ + -H "Authorization: Bearer your-token-here" \ + -F "files=@file1.txt" \ + -F "files=@file2.txt" \ + -F "files=@file3.txt" \ + -F "file_type=document" +``` + +### Signed URLs for Secure Access + +Generate temporary signed URLs for file access without authentication: + +```python +# Request signed URL +response = requests.post(f"{base_url}/files/{file_id}/signed-url", + headers=headers, + json={"expires_in": 3600}) # 1 hour +signed_url = response.json()['data']['url'] + +# Use signed URL (no auth required) +file_response = requests.get(signed_url) +``` + +### Streaming and Quality Options + +**Video Streaming with Quality Selection** + +```bash +# Stream in different qualities +curl "https://api.example.com/api/v1/videos/job_789012/stream?quality=720p" \ + -H "Authorization: Bearer your-token-here" +``` + +**Thumbnail Sizes** + +```bash +# Get different thumbnail sizes +curl "https://api.example.com/api/v1/videos/job_789012/thumbnail?size=large" \ + -H "Authorization: Bearer your-token-here" +``` + +## Performance Optimization + +### Caching + +The API implements intelligent caching. Use these endpoints to manage cache: + +```bash +# Warm cache for better performance +curl -X POST "https://api.example.com/api/v1/system/cache/warm" \ + -H "Authorization: Bearer your-token-here" + +# Invalidate specific cache patterns +curl -X POST "https://api.example.com/api/v1/system/cache/invalidate" \ + -H "Authorization: Bearer your-token-here" \ + -d "pattern=user:123:*" +``` + +### Request Optimization + +- Use pagination for large datasets +- Implement client-side caching for frequently accessed data +- Use appropriate quality settings for streaming +- Batch operations when possible + +## Monitoring and Analytics + +### System Health Monitoring + +```bash +# Check overall system health +curl "https://api.example.com/api/v1/system/health" + +# Get detailed performance metrics +curl "https://api.example.com/api/v1/system/performance?hours=24" \ + -H "Authorization: Bearer your-token-here" +``` + +### File Analytics + +Track file usage and performance: + +```bash +curl "https://api.example.com/api/v1/files/file_123456/analytics" \ + -H "Authorization: Bearer your-token-here" +``` + +## Migration Guide + +### From v1.0 to v1.1 + +**Breaking Changes:** + +- `GET /files/{id}/info` is now `GET /files/{id}` (consolidated endpoints) +- Error response format now includes `details` field +- Pagination format standardized across all endpoints + +**New Features:** + +- Webhook support for job notifications +- Batch file operations +- Enhanced error details +- Signed URL support + +**Migration Steps:** + +1. Update endpoint URLs for file info +2. Update error handling to use new format +3. Implement webhook handlers for better UX +4. Use batch operations for improved performance + +## Changelog + +### v1.1.0 (2024-01-15) + +- Added webhook support +- Introduced batch file operations +- Enhanced error responses with details +- Added signed URL generation +- Improved pagination format +- Added system performance endpoints + +### v1.0.0 (2023-12-01) + +- Initial API release +- Basic CRUD operations for files +- Video generation capabilities +- Job management system +- System monitoring endpoints diff --git a/AUTHENTICATION_GUIDE.md b/AUTHENTICATION_GUIDE.md new file mode 100644 index 0000000000000000000000000000000000000000..b1e29ece9cc792171ae739603ab39111a6b3edcc --- /dev/null +++ b/AUTHENTICATION_GUIDE.md @@ -0,0 +1,1120 @@ +# T2M Authentication & Session Flow Guide + +## Overview + +This guide covers the complete authentication and session management flow for the T2M (Text-to-Media) application using Clerk as the authentication provider. It includes frontend SDK setup, route protection, API token management, and secure file access patterns. + +## Table of Contents + +1. [Clerk Setup & Configuration](#clerk-setup--configuration) +2. [Frontend SDK Integration](#frontend-sdk-integration) +3. [Route Protection Strategy](#route-protection-strategy) +4. [API Token Management](#api-token-management) +5. [Secure File Access](#secure-file-access) +6. [Session Management](#session-management) +7. [Security Best Practices](#security-best-practices) +8. [Implementation Examples](#implementation-examples) + +## Clerk Setup & Configuration + +### 1. Clerk Dashboard Configuration + +**Environment Setup:** +- **Development**: `https://dev.t2m-app.com` +- **Production**: `https://t2m-app.com` + +**Required Clerk Settings:** +```javascript +// Environment Variables +NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY=pk_test_... +CLERK_SECRET_KEY=sk_test_... +NEXT_PUBLIC_CLERK_SIGN_IN_URL=/sign-in +NEXT_PUBLIC_CLERK_SIGN_UP_URL=/sign-up +NEXT_PUBLIC_CLERK_AFTER_SIGN_IN_URL=/dashboard +NEXT_PUBLIC_CLERK_AFTER_SIGN_UP_URL=/onboarding +``` + +**Clerk Application Settings:** +- **Session token lifetime**: 7 days +- **JWT template**: Custom template for API integration +- **Allowed origins**: Your frontend domains +- **Webhook endpoints**: For user lifecycle events + +### 2. JWT Template Configuration + +Create a custom JWT template in Clerk dashboard: + +```json +{ + "aud": "t2m-api", + "exp": "{{session.expire_at}}", + "iat": "{{session.created_at}}", + "iss": "https://clerk.t2m-app.com", + "sub": "{{user.id}}", + "user_id": "{{user.id}}", + "email": "{{user.primary_email_address.email_address}}", + "role": "{{user.public_metadata.role}}", + "permissions": "{{user.public_metadata.permissions}}", + "subscription_tier": "{{user.public_metadata.subscription_tier}}" +} +``` + +## Frontend SDK Integration + +### 1. Next.js Setup (@clerk/nextjs) + +**Installation:** +```bash +npm install @clerk/nextjs +``` + +**App Router Configuration (app/layout.tsx):** +```typescript +import { ClerkProvider } from '@clerk/nextjs' +import { Inter } from 'next/font/google' +import './globals.css' + +const inter = Inter({ subsets: ['latin'] }) + +export default function RootLayout({ + children, +}: { + children: React.ReactNode +}) { + return ( + + + + {children} + + + + ) +} +``` + +**Middleware Setup (middleware.ts):** +```typescript +import { authMiddleware } from "@clerk/nextjs"; + +export default authMiddleware({ + // Public routes that don't require authentication + publicRoutes: [ + "/", + "/api/auth/health", + "/api/system/health", + "/pricing", + "/about", + "/contact" + ], + + // Routes that should be ignored by Clerk + ignoredRoutes: [ + "/api/webhooks/clerk", + "/api/files/secure/(.*)" // Signed URL access + ], + + // API routes that require authentication + apiRoutes: ["/api/(.*)"], + + // Redirect after sign in + afterAuth(auth, req, evt) { + // Handle users who aren't authenticated + if (!auth.userId && !auth.isPublicRoute) { + return redirectToSignIn({ returnBackUrl: req.url }); + } + + // Redirect authenticated users away from public-only pages + if (auth.userId && auth.isPublicRoute && req.nextUrl.pathname === "/") { + const dashboard = new URL("/dashboard", req.url); + return NextResponse.redirect(dashboard); + } + } +}); + +export const config = { + matcher: ["/((?!.+\\.[\\w]+$|_next).*)", "/", "/(api|trpc)(.*)"], +}; +``` + +### 2. React Components Setup + +**Authentication Components:** +```typescript +// components/auth/SignInButton.tsx +import { SignInButton as ClerkSignInButton } from "@clerk/nextjs"; + +export function SignInButton() { + return ( + + + + ); +} + +// components/auth/UserButton.tsx +import { UserButton as ClerkUserButton } from "@clerk/nextjs"; + +export function UserButton() { + return ( + + ); +} +``` + +**Protected Page Component:** +```typescript +// components/auth/ProtectedRoute.tsx +import { useAuth } from "@clerk/nextjs"; +import { useRouter } from "next/navigation"; +import { useEffect } from "react"; +import { LoadingSpinner } from "@/components/ui/LoadingSpinner"; + +interface ProtectedRouteProps { + children: React.ReactNode; + requiredRole?: string; + requiredPermissions?: string[]; +} + +export function ProtectedRoute({ + children, + requiredRole, + requiredPermissions +}: ProtectedRouteProps) { + const { isLoaded, isSignedIn, user } = useAuth(); + const router = useRouter(); + + useEffect(() => { + if (isLoaded && !isSignedIn) { + router.push("/sign-in"); + } + }, [isLoaded, isSignedIn, router]); + + if (!isLoaded) { + return ; + } + + if (!isSignedIn) { + return null; + } + + // Check role-based access + if (requiredRole) { + const userRole = user?.publicMetadata?.role as string; + if (userRole !== requiredRole) { + return
Access denied. Required role: {requiredRole}
; + } + } + + // Check permission-based access + if (requiredPermissions) { + const userPermissions = user?.publicMetadata?.permissions as string[] || []; + const hasPermission = requiredPermissions.every(permission => + userPermissions.includes(permission) + ); + + if (!hasPermission) { + return
Access denied. Missing required permissions.
; + } + } + + return <>{children}; +} +``` + +## Route Protection Strategy + +### 1. Public Routes (No Authentication Required) + +```typescript +// Public routes configuration +const PUBLIC_ROUTES = [ + "/", // Landing page + "/pricing", // Pricing information + "/about", // About page + "/contact", // Contact page + "/api/auth/health", // Auth service health + "/api/system/health", // System health check + "/legal/privacy", // Privacy policy + "/legal/terms" // Terms of service +]; +``` + +### 2. Protected Routes (Authentication Required) + +```typescript +// Protected routes with different access levels +const PROTECTED_ROUTES = { + // Basic authenticated routes + AUTHENTICATED: [ + "/dashboard", + "/profile", + "/files", + "/videos", + "/jobs" + ], + + // Admin-only routes + ADMIN: [ + "/admin", + "/admin/users", + "/admin/system", + "/admin/analytics" + ], + + // Premium subscription routes + PREMIUM: [ + "/premium/advanced-generation", + "/premium/batch-processing", + "/premium/priority-queue" + ] +}; +``` + +### 3. API Route Protection + +```typescript +// app/api/auth/route-protection.ts +import { auth } from "@clerk/nextjs"; +import { NextRequest, NextResponse } from "next/server"; + +export async function requireAuth(request: NextRequest) { + const { userId } = auth(); + + if (!userId) { + return NextResponse.json( + { error: "Unauthorized" }, + { status: 401 } + ); + } + + return userId; +} + +export async function requireRole(request: NextRequest, requiredRole: string) { + const { userId, sessionClaims } = auth(); + + if (!userId) { + return NextResponse.json( + { error: "Unauthorized" }, + { status: 401 } + ); + } + + const userRole = sessionClaims?.metadata?.role as string; + + if (userRole !== requiredRole) { + return NextResponse.json( + { error: "Forbidden" }, + { status: 403 } + ); + } + + return userId; +} + +// Usage in API routes +// app/api/files/route.ts +import { requireAuth } from "@/app/api/auth/route-protection"; + +export async function GET(request: NextRequest) { + const userId = await requireAuth(request); + if (userId instanceof NextResponse) return userId; // Error response + + // Continue with authenticated logic + // ... +} +``` + +## API Token Management + +### 1. Token Retrieval in Frontend + +```typescript +// hooks/useApiToken.ts +import { useAuth } from "@clerk/nextjs"; +import { useCallback } from "react"; + +export function useApiToken() { + const { getToken } = useAuth(); + + const getApiToken = useCallback(async () => { + try { + // Get token with custom JWT template + const token = await getToken({ template: "t2m-api" }); + return token; + } catch (error) { + console.error("Failed to get API token:", error); + throw new Error("Authentication failed"); + } + }, [getToken]); + + return { getApiToken }; +} + +// Usage in components +function VideoUpload() { + const { getApiToken } = useApiToken(); + + const uploadVideo = async (file: File) => { + const token = await getApiToken(); + + const formData = new FormData(); + formData.append('file', file); + + const response = await fetch('/api/files/upload', { + method: 'POST', + headers: { + 'Authorization': `Bearer ${token}` + }, + body: formData + }); + + return response.json(); + }; + + // ... +} +``` + +### 2. API Client with Automatic Token Management + +```typescript +// lib/api-client.ts +import { useAuth } from "@clerk/nextjs"; + +class ApiClient { + private baseUrl: string; + private getToken: () => Promise; + + constructor(baseUrl: string, getToken: () => Promise) { + this.baseUrl = baseUrl; + this.getToken = getToken; + } + + private async request( + endpoint: string, + options: RequestInit = {} + ): Promise { + const token = await this.getToken(); + + const config: RequestInit = { + ...options, + headers: { + 'Content-Type': 'application/json', + ...(token && { 'Authorization': `Bearer ${token}` }), + ...options.headers, + }, + }; + + const response = await fetch(`${this.baseUrl}${endpoint}`, config); + + if (!response.ok) { + const error = await response.json(); + throw new Error(error.message || 'API request failed'); + } + + return response.json(); + } + + // File operations + async uploadFile(file: File, metadata?: any) { + const formData = new FormData(); + formData.append('file', file); + if (metadata) { + Object.entries(metadata).forEach(([key, value]) => { + formData.append(key, value as string); + }); + } + + const token = await this.getToken(); + return fetch(`${this.baseUrl}/files/upload`, { + method: 'POST', + headers: { + ...(token && { 'Authorization': `Bearer ${token}` }) + }, + body: formData + }).then(res => res.json()); + } + + async getFiles(params?: any) { + const query = params ? `?${new URLSearchParams(params)}` : ''; + return this.request(`/files${query}`); + } + + // Video operations + async generateVideo(prompt: string, options?: any) { + return this.request('/videos/generate', { + method: 'POST', + body: JSON.stringify({ prompt, ...options }) + }); + } + + async getJobStatus(jobId: string) { + return this.request(`/jobs/${jobId}`); + } +} + +// Hook for using API client +export function useApiClient() { + const { getToken } = useAuth(); + + const apiClient = new ApiClient( + process.env.NEXT_PUBLIC_API_URL || '/api/v1', + () => getToken({ template: "t2m-api" }) + ); + + return apiClient; +} +``` + +### 3. Backend Token Validation + +```typescript +// Backend API token validation (if using proxy) +// app/api/auth/validate-token.ts +import { verifyToken } from "@clerk/backend"; + +export async function validateClerkToken(token: string) { + try { + const payload = await verifyToken(token, { + jwtKey: process.env.CLERK_JWT_KEY, + authorizedParties: [process.env.NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY] + }); + + return { + userId: payload.sub, + email: payload.email, + role: payload.role, + permissions: payload.permissions, + subscriptionTier: payload.subscription_tier + }; + } catch (error) { + throw new Error('Invalid token'); + } +} + +// Usage in API routes +export async function POST(request: NextRequest) { + const authHeader = request.headers.get('authorization'); + const token = authHeader?.replace('Bearer ', ''); + + if (!token) { + return NextResponse.json({ error: 'No token provided' }, { status: 401 }); + } + + try { + const user = await validateClerkToken(token); + // Continue with authenticated logic + } catch (error) { + return NextResponse.json({ error: 'Invalid token' }, { status: 401 }); + } +} +``` + +## Secure File Access + +### 1. Signed URL Generation + +```typescript +// Backend: Generate signed URLs for secure file access +// app/api/files/[fileId]/signed-url/route.ts +import { auth } from "@clerk/nextjs"; +import { createHmac } from "crypto"; + +export async function POST( + request: NextRequest, + { params }: { params: { fileId: string } } +) { + const { userId } = auth(); + if (!userId) { + return NextResponse.json({ error: "Unauthorized" }, { status: 401 }); + } + + const { fileId } = params; + const { expiresIn = 3600 } = await request.json(); // Default 1 hour + + // Verify user owns the file + const file = await getFileById(fileId); + if (!file || file.userId !== userId) { + return NextResponse.json({ error: "File not found" }, { status: 404 }); + } + + // Generate signed URL + const expires = Math.floor(Date.now() / 1000) + expiresIn; + const signature = createHmac('sha256', process.env.FILE_SIGNING_SECRET!) + .update(`${fileId}:${userId}:${expires}`) + .digest('hex'); + + const signedUrl = `${process.env.NEXT_PUBLIC_API_URL}/files/secure/${fileId}?` + + `user_id=${userId}&expires=${expires}&signature=${signature}`; + + return NextResponse.json({ + success: true, + data: { + url: signedUrl, + expires_at: new Date(expires * 1000).toISOString() + } + }); +} +``` + +### 2. Signed URL Validation + +```typescript +// Backend: Validate signed URLs +// app/api/files/secure/[fileId]/route.ts +import { createHmac } from "crypto"; +import { NextRequest, NextResponse } from "next/server"; + +export async function GET( + request: NextRequest, + { params }: { params: { fileId: string } } +) { + const { fileId } = params; + const { searchParams } = new URL(request.url); + + const userId = searchParams.get('user_id'); + const expires = searchParams.get('expires'); + const signature = searchParams.get('signature'); + + if (!userId || !expires || !signature) { + return NextResponse.json({ error: "Invalid signed URL" }, { status: 400 }); + } + + // Check expiration + const expiresTimestamp = parseInt(expires); + if (Date.now() / 1000 > expiresTimestamp) { + return NextResponse.json({ error: "Signed URL expired" }, { status: 410 }); + } + + // Verify signature + const expectedSignature = createHmac('sha256', process.env.FILE_SIGNING_SECRET!) + .update(`${fileId}:${userId}:${expires}`) + .digest('hex'); + + if (signature !== expectedSignature) { + return NextResponse.json({ error: "Invalid signature" }, { status: 403 }); + } + + // Serve file + const file = await getFileById(fileId); + if (!file || file.userId !== userId) { + return NextResponse.json({ error: "File not found" }, { status: 404 }); + } + + // Return file stream + const fileStream = await getFileStream(file.path); + return new NextResponse(fileStream, { + headers: { + 'Content-Type': file.contentType, + 'Content-Disposition': `attachment; filename="${file.filename}"`, + 'Cache-Control': 'private, max-age=3600' + } + }); +} +``` + +### 3. Frontend: Secure Video Player + +```typescript +// components/VideoPlayer.tsx +import { useApiClient } from "@/lib/api-client"; +import { useEffect, useState } from "react"; + +interface VideoPlayerProps { + jobId: string; + autoplay?: boolean; +} + +export function VideoPlayer({ jobId, autoplay = false }: VideoPlayerProps) { + const [signedUrl, setSignedUrl] = useState(null); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(null); + const apiClient = useApiClient(); + + useEffect(() => { + async function getSignedUrl() { + try { + setLoading(true); + + // Get signed URL for video + const response = await apiClient.request(`/videos/${jobId}/signed-url`, { + method: 'POST', + body: JSON.stringify({ expiresIn: 3600 }) // 1 hour + }); + + setSignedUrl(response.data.url); + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to load video'); + } finally { + setLoading(false); + } + } + + getSignedUrl(); + }, [jobId, apiClient]); + + if (loading) return
Loading video...
; + if (error) return
Error: {error}
; + if (!signedUrl) return
Video not available
; + + return ( + + ); +} +``` + +## Session Management + +### 1. Session Configuration + +```typescript +// lib/session-config.ts +export const SESSION_CONFIG = { + // Session duration + maxAge: 7 * 24 * 60 * 60, // 7 days in seconds + + // Token refresh threshold + refreshThreshold: 5 * 60, // Refresh if expires in 5 minutes + + // Automatic logout on inactivity + inactivityTimeout: 30 * 60, // 30 minutes + + // Remember me option + rememberMe: { + enabled: true, + duration: 30 * 24 * 60 * 60 // 30 days + } +}; +``` + +### 2. Session Monitoring Hook + +```typescript +// hooks/useSessionMonitor.ts +import { useAuth } from "@clerk/nextjs"; +import { useEffect, useRef } from "react"; + +export function useSessionMonitor() { + const { isSignedIn, signOut } = useAuth(); + const lastActivityRef = useRef(Date.now()); + const inactivityTimerRef = useRef(); + + const resetInactivityTimer = () => { + lastActivityRef.current = Date.now(); + + if (inactivityTimerRef.current) { + clearTimeout(inactivityTimerRef.current); + } + + inactivityTimerRef.current = setTimeout(() => { + if (isSignedIn) { + signOut(); + alert('You have been logged out due to inactivity.'); + } + }, SESSION_CONFIG.inactivityTimeout * 1000); + }; + + useEffect(() => { + if (!isSignedIn) return; + + const events = ['mousedown', 'mousemove', 'keypress', 'scroll', 'touchstart']; + + events.forEach(event => { + document.addEventListener(event, resetInactivityTimer, true); + }); + + resetInactivityTimer(); // Initialize timer + + return () => { + events.forEach(event => { + document.removeEventListener(event, resetInactivityTimer, true); + }); + + if (inactivityTimerRef.current) { + clearTimeout(inactivityTimerRef.current); + } + }; + }, [isSignedIn]); +} +``` + +### 3. Token Refresh Management + +```typescript +// hooks/useTokenRefresh.ts +import { useAuth } from "@clerk/nextjs"; +import { useEffect, useCallback } from "react"; + +export function useTokenRefresh() { + const { getToken, isSignedIn } = useAuth(); + + const checkTokenExpiry = useCallback(async () => { + if (!isSignedIn) return; + + try { + const token = await getToken({ template: "t2m-api" }); + if (!token) return; + + // Decode JWT to check expiry + const payload = JSON.parse(atob(token.split('.')[1])); + const expiryTime = payload.exp * 1000; // Convert to milliseconds + const currentTime = Date.now(); + const timeUntilExpiry = expiryTime - currentTime; + + // Refresh if token expires within threshold + if (timeUntilExpiry < SESSION_CONFIG.refreshThreshold * 1000) { + await getToken({ template: "t2m-api", skipCache: true }); + } + } catch (error) { + console.error('Token refresh failed:', error); + } + }, [getToken, isSignedIn]); + + useEffect(() => { + if (!isSignedIn) return; + + // Check token expiry every minute + const interval = setInterval(checkTokenExpiry, 60 * 1000); + + return () => clearInterval(interval); + }, [isSignedIn, checkTokenExpiry]); +} +``` + +## Security Best Practices + +### 1. Token Security + +```typescript +// Security guidelines for token handling + +// ✅ DO: Use secure token storage +const { getToken } = useAuth(); +const token = await getToken({ template: "t2m-api" }); + +// ❌ DON'T: Store tokens in localStorage or sessionStorage +localStorage.setItem('token', token); // NEVER DO THIS + +// ✅ DO: Use tokens for API calls only +const response = await fetch('/api/files', { + headers: { 'Authorization': `Bearer ${token}` } +}); + +// ❌ DON'T: Embed tokens in URLs or HTML +const videoUrl = `/video?token=${token}`; // NEVER DO THIS +``` + +### 2. CSRF Protection + +```typescript +// middleware.ts - CSRF protection +import { NextRequest, NextResponse } from "next/server"; + +export function middleware(request: NextRequest) { + // CSRF protection for state-changing operations + if (['POST', 'PUT', 'DELETE', 'PATCH'].includes(request.method)) { + const origin = request.headers.get('origin'); + const host = request.headers.get('host'); + + if (origin && !origin.includes(host!)) { + return NextResponse.json( + { error: 'CSRF protection: Invalid origin' }, + { status: 403 } + ); + } + } + + return NextResponse.next(); +} +``` + +### 3. Rate Limiting + +```typescript +// lib/rate-limiter.ts +import { Ratelimit } from "@upstash/ratelimit"; +import { Redis } from "@upstash/redis"; + +const redis = new Redis({ + url: process.env.UPSTASH_REDIS_REST_URL!, + token: process.env.UPSTASH_REDIS_REST_TOKEN!, +}); + +export const rateLimiter = new Ratelimit({ + redis, + limiter: Ratelimit.slidingWindow(100, "1 h"), // 100 requests per hour + analytics: true, +}); + +// Usage in API routes +export async function POST(request: NextRequest) { + const { userId } = auth(); + if (!userId) { + return NextResponse.json({ error: "Unauthorized" }, { status: 401 }); + } + + const { success, limit, reset, remaining } = await rateLimiter.limit(userId); + + if (!success) { + return NextResponse.json( + { error: "Rate limit exceeded" }, + { + status: 429, + headers: { + 'X-RateLimit-Limit': limit.toString(), + 'X-RateLimit-Remaining': remaining.toString(), + 'X-RateLimit-Reset': reset.toString(), + } + } + ); + } + + // Continue with request processing +} +``` + +## Implementation Examples + +### 1. Complete Authentication Flow + +```typescript +// app/dashboard/page.tsx +import { ProtectedRoute } from "@/components/auth/ProtectedRoute"; +import { useSessionMonitor } from "@/hooks/useSessionMonitor"; +import { useTokenRefresh } from "@/hooks/useTokenRefresh"; + +export default function DashboardPage() { + useSessionMonitor(); // Monitor for inactivity + useTokenRefresh(); // Handle token refresh + + return ( + +
+

Welcome to T2M Dashboard

+ {/* Dashboard content */} +
+
+ ); +} +``` + +### 2. File Upload with Progress + +```typescript +// components/FileUpload.tsx +import { useApiClient } from "@/lib/api-client"; +import { useState } from "react"; + +export function FileUpload() { + const [uploading, setUploading] = useState(false); + const [progress, setProgress] = useState(0); + const apiClient = useApiClient(); + + const handleUpload = async (file: File) => { + setUploading(true); + setProgress(0); + + try { + // Create XMLHttpRequest for progress tracking + const formData = new FormData(); + formData.append('file', file); + + const token = await apiClient.getApiToken(); + + const xhr = new XMLHttpRequest(); + + xhr.upload.addEventListener('progress', (e) => { + if (e.lengthComputable) { + setProgress((e.loaded / e.total) * 100); + } + }); + + xhr.addEventListener('load', () => { + if (xhr.status === 200) { + const response = JSON.parse(xhr.responseText); + console.log('Upload successful:', response); + } + }); + + xhr.open('POST', '/api/files/upload'); + xhr.setRequestHeader('Authorization', `Bearer ${token}`); + xhr.send(formData); + + } catch (error) { + console.error('Upload failed:', error); + } finally { + setUploading(false); + } + }; + + return ( +
+ { + const file = e.target.files?.[0]; + if (file) handleUpload(file); + }} + disabled={uploading} + /> + + {uploading && ( +
+
+ {Math.round(progress)}% +
+ )} +
+ ); +} +``` + +### 3. Video Generation with Real-time Updates + +```typescript +// components/VideoGenerator.tsx +import { useApiClient } from "@/lib/api-client"; +import { useState, useEffect } from "react"; + +export function VideoGenerator() { + const [prompt, setPrompt] = useState(""); + const [jobId, setJobId] = useState(null); + const [status, setStatus] = useState("idle"); + const [progress, setProgress] = useState(0); + const apiClient = useApiClient(); + + const generateVideo = async () => { + try { + setStatus("starting"); + + const response = await apiClient.generateVideo(prompt, { + duration: 10, + quality: "1080p" + }); + + setJobId(response.data.job_id); + setStatus("processing"); + } catch (error) { + console.error('Video generation failed:', error); + setStatus("error"); + } + }; + + // Poll for job status + useEffect(() => { + if (!jobId || status === "completed" || status === "error") return; + + const pollStatus = async () => { + try { + const response = await apiClient.getJobStatus(jobId); + setStatus(response.data.status); + setProgress(response.data.progress || 0); + } catch (error) { + console.error('Status check failed:', error); + } + }; + + const interval = setInterval(pollStatus, 2000); // Poll every 2 seconds + return () => clearInterval(interval); + }, [jobId, status, apiClient]); + + return ( +
+