import gradio as gr from google import genai from google.genai import types from PIL import Image from io import BytesIO import base64 import os import json import random import urllib.parse import time # Initialize the Google Generative AI client with the API key from environment variables try: api_key = os.environ['GEMINI_API_KEY'] except KeyError: raise ValueError("Please set the GEMINI_API_KEY environment variable.") client = genai.Client(api_key=api_key) def clean_response_text(response_text): """ Clean the API response by removing Markdown code block markers. Args: response_text (str): The raw response text from the API. Returns: str: The cleaned response text. """ cleaned_text = response_text.strip() if cleaned_text.startswith("```json"): cleaned_text = cleaned_text[len("```json"):].strip() if cleaned_text.endswith("```"): cleaned_text = cleaned_text[:-len("```")].strip() return cleaned_text def generate_ideas(user_input): """ Generate a diverse set of ideas based on the user's input concept using the LLM. Args: user_input (str): The user's input concept or idea (e.g., "blindfolded Rubik's Cube challenge"). Returns: list: A list of ideas as strings. """ prompt = f""" The user has provided the concept: "{user_input}". Based on this concept, generate a list of 5 diverse and creative ideas for a TikTok video that directly relate to "{user_input}". Each idea should be a short sentence describing a specific scene or concept, ensuring the core theme of "{user_input}" is central to each idea. Return the response as a JSON object with a single key 'ideas' containing a list of 5 ideas. Ensure the response is strictly in JSON format. Example: {{"ideas": ["A neon-lit Rubik's Cube challenge in the dark", "A blindfolded speedcubing competition with dramatic music"]}} """ try: response = client.models.generate_content( model='gemini-2.0-flash', contents=[prompt], config=types.GenerateContentConfig(temperature=1.2) ) print(f"Raw response for ideas: {response.text}") # Debugging if not response.text or response.text.isspace(): raise ValueError("Empty response from API") cleaned_text = clean_response_text(response.text) response_json = json.loads(cleaned_text) if 'ideas' not in response_json or not isinstance(response_json['ideas'], list): raise ValueError("Invalid JSON format: 'ideas' key missing or not a list") return response_json['ideas'] except Exception as e: print(f"Error generating ideas: {e}") return [ f"A dramatic {user_input} scene with cinematic lighting", f"A close-up of {user_input} in a futuristic setting", f"A high-energy {user_input} moment with vibrant colors", f"A serene {user_input} scene with soft focus", f"An action-packed {user_input} challenge with dynamic angles" ] def generate_item(user_input, ideas, generate_video=False, max_retries=3): """ Generate a single feed item (image and optionally video) using one of the ideas. Args: user_input (str): The user's input concept or idea. ideas (list): List of ideas to choose from. generate_video (bool): Whether to generate a video from the image. max_retries (int): Maximum number of retries if image generation fails. Returns: dict: A dictionary with 'text' (str), 'image_base64' (str), 'video_base64_list' (list of str), and 'ideas' (list). """ video_base64_list = [] for attempt in range(max_retries): selected_idea = random.choice(ideas) prompt = f""" The user has provided the concept: "{user_input}". Based on this concept and the specific idea "{selected_idea}", create content for a TikTok video. Return a JSON object with two keys: - 'caption': A short, viral TikTok-style caption with hashtags that reflects "{user_input}". - 'image_prompt': A detailed image prompt for generating a high-quality visual scene, ensuring the theme of "{user_input}" is central. The image prompt should describe the scene vividly, specify a perspective and style, and ensure no text or letters are included. Ensure the response is strictly in JSON format. Example: {{"caption": "Blindfolded Rubik's Cube MAGIC! 🤯 #rubiks", "image_prompt": "A close-up view of a person solving a Rubik's Cube blindfolded, in a dramatic style, no text or letters"}} """ try: response = client.models.generate_content( model='gemini-2.0-flash', contents=[prompt], config=types.GenerateContentConfig(temperature=1.2) ) print(f"Raw response for item (attempt {attempt + 1}): {response.text}") # Debugging if not response.text or response.text.isspace(): raise ValueError("Empty response from API") cleaned_text = clean_response_text(response.text) response_json = json.loads(cleaned_text) if 'caption' not in response_json or 'image_prompt' not in response_json: raise ValueError("Invalid JSON format: 'caption' or 'image_prompt' key missing") text = response_json['caption'] image_prompt = response_json['image_prompt'] except Exception as e: print(f"Error generating item (attempt {attempt + 1}): {e}") text = f"Amazing {user_input}! 🔥 #{user_input.replace(' ', '')}" image_prompt = f"A vivid scene of {selected_idea} related to {user_input}, in a vibrant pop art style, no text or letters" # Attempt to generate the image try: imagen = client.models.generate_images( model='imagen-3.0-generate-002', prompt=image_prompt, config=types.GenerateImagesConfig( aspect_ratio="9:16", number_of_images=1 ) ) if imagen.generated_images and len(imagen.generated_images) > 0: generated_image = imagen.generated_images[0] image = Image.open(BytesIO(generated_image.image.image_bytes)) # Ensure the image matches the desired aspect ratio (9:16 = 0.5625) target_width = 360 target_height = int(target_width / 9 * 16) # 9:16 aspect ratio image = image.resize((target_width, target_height), Image.LANCZOS) # Convert image to base64 buffered = BytesIO() image.save(buffered, format="PNG") img_str = base64.b64encode(buffered.getvalue()).decode() # Generate video if enabled if generate_video: try: # Enhance the image prompt for video generation video_prompt = f""" The user concept is "{user_input}". Based on this and the scene: {image_prompt}, create a video. Use a close-up shot with a slow dolly shot circling around the subject, using shallow focus on the main subject to emphasize details, in a realistic style with cinematic lighting. """ operation = client.models.generate_videos( model="veo-2.0-generate-001", prompt=video_prompt, image=generated_image.image, config=types.GenerateVideosConfig( aspect_ratio="9:16", number_of_videos=2, duration_seconds=8, negative_prompt="blurry, low quality, text, letters", enhance_prompt=True ) ) # Wait for videos to generate while not operation.done: time.sleep(20) operation = client.operations.get(operation) # Check if operation succeeded and has a valid response if operation.response is None: raise ValueError("Video generation operation failed: No response") if not hasattr(operation.response, 'generated_videos'): raise ValueError("Video generation operation failed: No generated_videos in response") for n, video in enumerate(operation.response.generated_videos): fname = f'with_image_input{n}.mp4' print(f"Generated video: {fname}") client.files.download(file=video.video) video_buffer = BytesIO() video.video.save(video_buffer) video_base64 = base64.b64encode(video_buffer.getvalue()).decode() video_base64_list.append(video_base64) except Exception as e: print(f"Error generating video: {e}") video_base64_list = [] # Proceed without video if generation fails return { 'text': text, 'image_base64': img_str, 'video_base64_list': video_base64_list, 'ideas': ideas } else: print(f"Image generation failed (attempt {attempt + 1}): No images returned") if attempt == max_retries - 1: # Last attempt, use a gray placeholder image = Image.new('RGB', (360, 640), color='gray') buffered = BytesIO() image.save(buffered, format="PNG") img_str = base64.b64encode(buffered.getvalue()).decode() return { 'text': text, 'image_base64': img_str, 'video_base64_list': [], 'ideas': ideas } # Retry with new ideas ideas = generate_ideas(user_input) continue except Exception as e: print(f"Error generating image (attempt {attempt + 1}): {e}") if attempt == max_retries - 1: # Last attempt, use a gray placeholder image = Image.new('RGB', (360, 640), color='gray') buffered = BytesIO() image.save(buffered, format="PNG") img_str = base64.b64encode(buffered.getvalue()).decode() return { 'text': text, 'image_base64': img_str, 'video_base64_list': [], 'ideas': ideas } # Retry with new ideas ideas = generate_ideas(user_input) continue def start_feed(user_input, generate_video, current_index, feed_items): """ Start or update the feed based on the user input. Args: user_input (str): The user's input concept or idea. generate_video (bool): Whether to generate a video. current_index (int): The current item index. feed_items (list): The current list of feed items. Returns: tuple: (current_user_input, current_index, feed_items, html_content, share_links, is_loading) """ if not user_input.strip(): user_input = "trending" # Set loading state is_loading = True html_content = generate_html([], False, 0, user_input, is_loading) share_links = "" try: ideas = generate_ideas(user_input) item = generate_item(user_input, ideas, generate_video=generate_video) feed_items = [item] current_index = 0 share_links = generate_share_links( item['image_base64'], item['video_base64_list'], item['text'] ) except Exception as e: print(f"Error in start_feed: {e}") feed_items = [] current_index = 0 html_content = """

Error generating content. Please try again!

""" is_loading = False return user_input, current_index, feed_items, html_content, share_links, is_loading # Set loading state to False and update UI is_loading = False html_content = generate_html(feed_items, False, current_index, user_input, is_loading) return user_input, current_index, feed_items, html_content, share_links, is_loading def load_next(user_input, generate_video, current_index, feed_items): """ Load the next item in the feed. Args: user_input (str): The user's input concept or idea. generate_video (bool): Whether to generate a video. current_index (int): The current item index. feed_items (list): The current list of feed items. Returns: tuple: (current_user_input, current_index, feed_items, html_content, share_links, is_loading) """ is_loading = True html_content = generate_html(feed_items, False, current_index, user_input, is_loading) share_links = "" try: if current_index + 1 < len(feed_items): current_index += 1 else: ideas = feed_items[-1]['ideas'] if feed_items else generate_ideas(user_input) new_item = generate_item(user_input, ideas, generate_video=generate_video) feed_items.append(new_item) current_index = len(feed_items) - 1 share_links = generate_share_links( feed_items[current_index]['image_base64'], feed_items[current_index]['video_base64_list'], feed_items[current_index]['text'] ) except Exception as e: print(f"Error in load_next: {e}") html_content = """

Error generating content. Please try again!

""" is_loading = False return user_input, current_index, feed_items, html_content, share_links, is_loading is_loading = False html_content = generate_html(feed_items, False, current_index, user_input, is_loading) return user_input, current_index, feed_items, html_content, share_links, is_loading def load_previous(user_input, generate_video, current_index, feed_items): """ Load the previous item in the feed. Args: user_input (str): The user's input concept or idea. generate_video (bool): Whether to generate a video (not used here). current_index (int): The current item index. feed_items (list): The current list of feed items. Returns: tuple: (current_user_input, current_index, feed_items, html_content, share_links, is_loading) """ if current_index > 0: current_index -= 1 html_content = generate_html(feed_items, False, current_index, user_input, False) share_links = generate_share_links( feed_items[current_index]['image_base64'], feed_items[current_index]['video_base64_list'], feed_items[current_index]['text'] ) return user_input, current_index, feed_items, html_content, share_links, False def generate_share_links(image_base64, video_base64_list, caption): """ Generate share links for social media platforms with download links for image and video. Args: image_base64 (str): The base64-encoded image data. video_base64_list (list): List of base64-encoded video data. caption (str): The caption to share. Returns: str: HTML string with share links and download instructions. """ image_data_url = f"data:image/png;base64,{image_base64}" encoded_caption = urllib.parse.quote(caption) # Generate download links for image and videos download_links = f"""

Download and attach the image/video to share:

Download Image """ for i, video_base64 in enumerate(video_base64_list): video_data_url = f"data:video/mp4;base64,{video_base64}" download_links += f""" Download Video {i+1} """ # Generate share links using only the caption share_links = f"""
{download_links}
Share on TikTok Share on Instagram Share on Facebook Share on X Share on Pinterest
""" return share_links def generate_html(feed_items, scroll_to_latest=False, current_index=0, user_input="", is_loading=False): """ Generate an HTML string to display the current feed item with click navigation. Args: feed_items (list): List of dictionaries containing 'text' and 'image_base64'. scroll_to_latest (bool): Whether to auto-scroll to the latest item (not used here). current_index (int): The index of the item to display. user_input (str): The user's input concept or idea for loading messages. is_loading (bool): Whether the feed is currently loading. Returns: str: HTML string representing the feed. """ loading_messages = [ f"Cooking up a {user_input} masterpiece... 🍳", f"Snapping a vibrant {user_input} moment... 📸", f"Creating a {user_input} vibe that pops... ✨", f"Getting that perfect {user_input} shot... 🎥", f"Bringing {user_input} to life... 🌟" ] if is_loading: return f"""
{loading_messages[0]}
""" if not feed_items or current_index >= len(feed_items): return """

Enter a concept or idea to start your feed!

""" item = feed_items[current_index] html_str = """
{text}
""".format(image_base64=item['image_base64'], text=item['text']) return html_str # Define the Gradio interface with gr.Blocks( css=""" body { background-color: #000; color: #fff; font-family: Arial, sans-serif; } .gradio-container { max-width: 400px; margin: 0 auto; padding: 10px; } input, select, button, .gr-checkbox { border-radius: 5px; background-color: #222; color: #fff; border: 1px solid #444; } button { background-color: #ff2d55; border: none; } button:hover { background-color: #e0264b; } .gr-button { width: 100%; margin-top: 10px; } .gr-form { background-color: #111; padding: 15px; border-radius: 10px; } """, title="Create Your Feed" ) as demo: # State variables current_user_input = gr.State(value="") current_index = gr.State(value=0) feed_items = gr.State(value=[]) is_loading = gr.State(value=False) share_links = gr.State(value="") # Input section with gr.Column(elem_classes="gr-form"): gr.Markdown("### Create Your Feed") user_input = gr.Textbox( label="Enter Concept or Ideas", value="", placeholder="e.g., sushi adventure, neon tech", submit_btn=False ) generate_video_checkbox = gr.Checkbox( label="Generate Video (may take longer)", value=False ) magic_button = gr.Button("✨ Generate Next Item", elem_classes="gr-button") # Output display feed_html = gr.HTML() share_html = gr.HTML(label="Share this item:") # Event handlers # Handle Enter keypress in the concept input user_input.submit( fn=start_feed, inputs=[user_input, generate_video_checkbox, current_index, feed_items], outputs=[current_user_input, current_index, feed_items, feed_html, share_html, is_loading] ) # Handle magic button click to generate next item magic_button.click( fn=load_next, inputs=[current_user_input, generate_video_checkbox, current_index, feed_items], outputs=[current_user_input, current_index, feed_items, feed_html, share_html, is_loading] ) # Hidden button for previous item navigation previous_button = gr.Button("Previous", elem_id="previous-button", visible=False) # Handle click to go to previous item previous_button.click( fn=load_previous, inputs=[current_user_input, generate_video_checkbox, current_index, feed_items], outputs=[current_user_input, current_index, feed_items, feed_html, share_html, is_loading] ) # Launch the app demo.launch()