Spaces:
Runtime error
Runtime error
| import os | |
| import random | |
| from groq import Groq | |
| from openai import OpenAI | |
| from gradio_client import Client | |
| class VideoLLMInferenceNode: | |
| def __init__(self): | |
| self.huggingface_token = os.getenv("HUGGINGFACE_TOKEN") | |
| self.groq_api_key = os.getenv("GROQ_API_KEY") | |
| self.sambanova_api_key = os.getenv("SAMBANOVA_API_KEY") | |
| self.huggingface_client = OpenAI( | |
| base_url="https://api-inference.huggingface.co/v1/", | |
| api_key=self.huggingface_token, | |
| ) | |
| self.groq_client = Groq(api_key=self.groq_api_key) | |
| self.sambanova_client = OpenAI( | |
| api_key=self.sambanova_api_key, | |
| base_url="https://api.sambanova.ai/v1", | |
| ) | |
| def generate_video_prompt( | |
| self, | |
| input_concept, | |
| duration, | |
| style, | |
| camera_style, | |
| pacing, | |
| special_effects, | |
| custom_elements, | |
| provider="Hugging Face", | |
| model=None | |
| ): | |
| try: | |
| # Video prompt templates | |
| prompt_templates = { | |
| "cinematic": f"""Create a detailed cinematic prompt for a {duration}-second video. Include: | |
| - 3-5 distinct scenes with smooth transitions | |
| - Camera movements: {camera_style} | |
| - Lighting design for {style} style | |
| - Special effects: {special_effects} | |
| - Color grading and film grain details | |
| - Pacing: {pacing} | |
| - Add {custom_elements if custom_elements else 'unique atmospheric elements'} | |
| Format: Timestamped scene descriptions with shot types and transition notes.""", | |
| "documentary": f"""Develop a documentary-style video prompt for {duration} seconds. Include: | |
| - Interview setup with lighting and background | |
| - B-roll sequences (3-5 locations) | |
| - Archival footage integration | |
| - Text overlay and info-graphics | |
| - Narration style and tone | |
| - {camera_style} camera work | |
| - {pacing} rhythm for topic exploration | |
| - {special_effects} for historical recreations""", | |
| "animation": f"""Create a {style} animation prompt for {duration} seconds. Specify: | |
| - Animation technique (2D/3D/stop-motion) | |
| - Key action sequences (3-5) | |
| - Character design elements | |
| - Background art style | |
| - Motion blur and frame rate considerations | |
| - Camera zooms/pans for {pacing} pacing | |
| - Special effects: {special_effects} | |
| - {custom_elements if custom_elements else 'unique stylistic flourishes'}""", | |
| "action": f"""Generate intense action sequence prompt ({duration} seconds). Include: | |
| - 3 escalating action beats | |
| - Camera angles for {style} impact | |
| - Stunt choreography details | |
| - Slow-motion/fast-cut ratios | |
| - Explosion/sfx elements: {special_effects} | |
| - Pacing structure: {pacing} | |
| - {camera_style} camera movements | |
| - Hero shot composition""", | |
| "experimental": f"""Design avant-garde video prompt ({duration} seconds) with: | |
| - Unconventional narrative structure | |
| - {style} visual treatments | |
| - Abstract transitions between {random.randint(5,8)} concepts | |
| - Experimental sound/image relationships | |
| - {camera_style} capture techniques | |
| - {special_effects} digital manipulations | |
| - Pacing: {pacing} with {custom_elements if custom_elements else 'temporal distortions'}""" | |
| } | |
| base_prompt = prompt_templates.get(style.lower(), prompt_templates["cinematic"]) | |
| system_message = """You are a professional video director and cinematography expert. | |
| Generate rich, technical video prompts that include: | |
| 1. Scene-by-scene breakdowns with timestamps | |
| 2. Camera movements and lens specifications | |
| 3. Lighting setups and color palettes | |
| 4. Transition types and durations | |
| 5. Special effects implementation | |
| 6. Pacing and rhythm markers | |
| 7. Technical specifications when relevant""" | |
| # Select provider | |
| if provider == "Hugging Face": | |
| client = self.huggingface_client | |
| model = model or "meta-llama/Meta-Llama-3.1-70B-Instruct" | |
| elif provider == "Groq": | |
| client = self.groq_client | |
| model = model or "llama-3.1-70b-versatile" | |
| elif provider == "SambaNova": | |
| client = self.sambanova_client | |
| model = model or "Meta-Llama-3.1-70B-Instruct" | |
| response = client.chat.completions.create( | |
| model=model, | |
| messages=[ | |
| {"role": "system", "content": system_message}, | |
| {"role": "user", "content": f"{base_prompt}\nCore Concept: {input_concept}"} | |
| ], | |
| temperature=1.2, | |
| max_tokens=1500, | |
| top_p=0.95, | |
| seed=random.randint(0, 10000) | |
| ) | |
| return response.choices[0].message.content.strip() | |
| except Exception as e: | |
| return f"Error generating video prompt: {str(e)}" |