File size: 18,288 Bytes
9b90da9 279374e 9b90da9 86ea854 f756321 f4c66f7 7215045 591125d b5e0ef9 591125d 9b90da9 b5e0ef9 86ea854 9b90da9 b5e0ef9 3d23cb0 b5e0ef9 3d23cb0 89c081e b5e0ef9 89c081e 02a494e b5e0ef9 55a428a 4f86d21 b5e0ef9 2a952b2 b5e0ef9 2a952b2 4f86d21 ba84d74 3ed94dc ba84d74 b5e0ef9 ba84d74 89c081e b5e0ef9 2a952b2 55a428a 2a952b2 ba84d74 55a428a 4f86d21 02a494e 4f86d21 b5e0ef9 a98439b b5e0ef9 7215045 0e27fad b5e0ef9 0e27fad 55a428a 0e27fad b5e0ef9 3ed94dc 55a428a 3ed94dc b5e0ef9 3ed94dc b5e0ef9 3ed94dc 55a428a b5e0ef9 3ed94dc b5e0ef9 0e27fad b5e0ef9 9b90da9 a214565 b5e0ef9 a214565 b5e0ef9 a214565 b5e0ef9 a214565 b5e0ef9 a214565 b5e0ef9 a214565 b5e0ef9 9b90da9 b5e0ef9 f5ef562 4f86d21 a214565 b5e0ef9 4f86d21 ba84d74 55a428a a214565 b5e0ef9 55a428a b5e0ef9 55a428a a214565 b5e0ef9 55a428a ba84d74 a214565 b5e0ef9 ba84d74 b5e0ef9 4f86d21 02a494e b5e0ef9 4f86d21 b5e0ef9 9b90da9 ba84d74 b5e0ef9 ba84d74 55a428a b5e0ef9 55a428a b5e0ef9 55a428a b5e0ef9 55a428a b5e0ef9 55a428a b5e0ef9 55a428a b5e0ef9 ba84d74 b5e0ef9 4f86d21 02a494e b5e0ef9 4f86d21 b5e0ef9 02a494e 4f86d21 86ea854 f4c66f7 b5e0ef9 f4c66f7 4f86d21 b5e0ef9 86ea854 f4c66f7 9b90da9 b5e0ef9 9b90da9 89c081e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 |
import gradio as gr
from google import genai
from google.genai import types
from PIL import Image
from io import BytesIO
import base64
import os
import json
import random
import urllib.parse
import time
# Check Gradio version
required_version = "4.44.0"
current_version = gr.__version__
if current_version < required_version:
raise ValueError(f"Gradio version {current_version} is outdated. Please upgrade to {required_version} or later using 'pip install gradio=={required_version}'.")
# Initialize the Google Generative AI client
try:
api_key = os.environ['GEMINI_API_KEY']
except KeyError:
raise ValueError("Please set the GEMINI_API_KEY environment variable.")
client = genai.Client(api_key=api_key)
# Safety settings to disable all filters
SAFETY_SETTINGS = [
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=types.HarmBlockThreshold.BLOCK_NONE),
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY, threshold=types.HarmBlockThreshold.BLOCK_NONE),
]
def clean_response_text(response_text):
"""Clean API response by removing Markdown code block markers."""
cleaned_text = response_text.strip()
if cleaned_text.startswith("```json"):
cleaned_text = cleaned_text[len("```json"):].strip()
if cleaned_text.endswith("```"):
cleaned_text = cleaned_text[:-len("```")].strip()
return cleaned_text
def generate_ideas(user_input):
"""Generate 5 creative TikTok video ideas based on user input."""
yield (10, f"Brainstorming epic ideas for {user_input}... π")
prompt = f"""
The user has provided the concept: "{user_input}". Generate 5 diverse and creative ideas for a TikTok video explicitly related to "{user_input}".
Each idea should be a short sentence describing a specific scene or concept.
Return the response as a JSON object with a key 'ideas' containing a list of 5 ideas.
Example for "blindfolded Rubik's Cube challenge":
{{"ideas": [
"A blindfolded speedcubing competition with dramatic music",
"A close-up of a person solving a Rubik's Cube blindfolded under a spotlight",
"A time-lapse of a blindfolded Rubik's Cube solve with colorful lighting",
"A blindfolded Rubik's Cube challenge in a futuristic setting",
"A split-screen of two people racing to solve a Rubik's Cube blindfolded"
]}}
"""
try:
response = client.models.generate_content(
model='gemini-2.0-flash-lite',
contents=[prompt],
config=types.GenerateContentConfig(temperature=1.2, safety_settings=SAFETY_SETTINGS)
)
cleaned_text = clean_response_text(response.text)
response_json = json.loads(cleaned_text)
if 'ideas' not in response_json or len(response_json['ideas']) != 5:
raise ValueError("Invalid JSON format or incorrect number of ideas")
ideas = response_json['ideas']
yield (20, f"Ideas locked in for {user_input}! π")
return ideas
except Exception as e:
print(f"Error generating ideas: {e}")
yield (20, f"Oops, tweaking the plan for {user_input}... π§")
return [
f"A dramatic {user_input} scene with cinematic lighting",
f"A close-up of {user_input} in a futuristic setting",
f"A high-energy {user_input} moment with vibrant colors",
f"A serene {user_input} scene with soft focus",
f"An action-packed {user_input} challenge with dynamic angles"
]
def generate_item(user_input, ideas, generate_video=False):
"""Generate a feed item (image and optionally video) with progress updates."""
video_base64 = None
max_attempts = 3
total_attempts = 0
while total_attempts < max_attempts:
total_attempts += 1
yield (20 + total_attempts * 10, f"Attempt {total_attempts} to craft your {user_input} masterpiece... π¨")
selected_idea = random.choice(ideas)
prompt = f"""
The user has provided the concept: "{user_input}". Based on this and the idea "{selected_idea}", create content for a TikTok video.
Return a JSON object with:
- 'caption': A short, viral TikTok-style caption with hashtags reflecting "{user_input}".
- 'image_prompt': A detailed image prompt for a high-quality visual scene, no text or letters.
Example: {{"caption": "Blindfolded Rubik's Cube MAGIC! π€― #rubiks", "image_prompt": "A close-up view of a person solving a Rubik's Cube blindfolded, dramatic style"}}
"""
try:
response = client.models.generate_content(
model='gemini-2.0-flash-lite',
contents=[prompt],
config=types.GenerateContentConfig(temperature=1.2, safety_settings=SAFETY_SETTINGS)
)
cleaned_text = clean_response_text(response.text)
response_json = json.loads(cleaned_text)
text = response_json['caption']
image_prompt = response_json['image_prompt']
except Exception as e:
print(f"Error generating item: {e}")
text = f"Amazing {user_input}! π₯ #{user_input.replace(' ', '')}"
image_prompt = f"A vivid scene of {selected_idea} related to {user_input}, vibrant pop art style, no text"
# Generate image
try:
yield (40, f"Rendering your {user_input} vision... β¨")
imagen = client.models.generate_images(
model='imagen-3.0-generate-002',
prompt=image_prompt,
config=types.GenerateImagesConfig(aspect_ratio="9:16", number_of_images=1)
)
if imagen.generated_images:
image = Image.open(BytesIO(imagen.generated_images[0].image.image_bytes))
image = image.resize((360, 640), Image.LANCZOS)
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
yield (50, f"Image for {user_input} is ready! π")
break
except Exception as e:
print(f"Error generating image: {e}")
if total_attempts == max_attempts:
image = Image.new('RGB', (360, 640), color='gray')
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
yield (60, f"Using a placeholder for {user_input}... πΌοΈ")
return {'text': text, 'image_base64': img_str, 'video_base64': None, 'ideas': ideas}
continue
# Generate video if requested
if generate_video:
try:
yield (60, f"Filming a viral video for {user_input}... π₯")
operation = client.models.generate_videos(
model="veo-2.0-generate-001",
prompt=f"A close-up slow dolly shot of {image_prompt}, realistic style",
image=imagen.generated_images[0].image,
config=types.GenerateVideosConfig(aspect_ratio="9:16", duration_seconds=8)
)
while not operation.done:
time.sleep(20)
operation = client.operations.get(operation)
if operation.response and operation.response.generated_videos:
video_data = client.files.download(file=operation.response.generated_videos[0].video)
video_bytes = video_data if isinstance(video_data, bytes) else BytesIO(video_data).getvalue()
video_base64 = base64.b64encode(video_bytes).decode()
yield (90, f"Video for {user_input} is a wrap! π¬")
return {'text': text, 'image_base64': img_str, 'video_base64': video_base64, 'ideas': ideas}
except Exception as e:
print(f"Error generating video: {e}")
yield (70, f"Skipping video for {user_input}... π")
yield (95, f"Polishing your {user_input} masterpiece... β¨")
return {'text': text, 'image_base64': img_str, 'video_base64': video_base64, 'ideas': ideas}
def generate_progress_html(progress, message, user_input):
"""Generate HTML for the progress bar."""
return f"""
<div id="progress-container" style="
display: flex; flex-direction: column; align-items: center; justify-content: center;
max-width: 360px; margin: 0 auto; background-color: #000; height: 200px;
border: 1px solid #333; border-radius: 10px; color: white; font-family: Arial, sans-serif;">
<div id="loading-message" style="font-size: 18px; font-weight: bold; text-align: center; margin-bottom: 20px;">
{message}
</div>
<div style="width: 80%; height: 10px; background-color: #333; border-radius: 5px; overflow: hidden;">
<div id="progress-bar" style="width: {progress}%; height: 100%; background: linear-gradient(to right, #ff2d55, #ff5e78);"></div>
</div>
<div style="margin-top: 10px; font-size: 14px; color: #ccc;">{int(progress)}% Complete</div>
<style>
@keyframes pulse {{ 0% {{ opacity: 1; }} 50% {{ opacity: 0.5; }} 100% {{ opacity: 1; }} }}
#loading-message {{ animation: pulse 2s infinite; }}
</style>
</div>
"""
def generate_html(feed_items, manual_upload, current_index, user_input, is_loading):
"""Generate HTML for the feed display."""
if not feed_items or current_index >= len(feed_items):
return "<div style='color: white; text-align: center;'>No content yet!</div>"
item = feed_items[current_index]
media_html = f'<img src="data:image/png;base64,{item["image_base64"]}" style="width: 360px; height: 640px; object-fit: cover;" />'
if item['video_base64']:
media_html += f'<video controls src="data:video/mp4;base64,{item["video_base64"]}" style="width: 360px; height: 640px;"></video>'
return f"""
<div style="max-width: 360px; margin: 0 auto; background-color: #000; border: 1px solid #333; border-radius: 10px; color: white;">
{media_html}
<p style="padding: 10px;">{item['text']}</p>
</div>
"""
def generate_share_links(image_base64, video_base64, text):
"""Generate YouTube share links."""
share_text = urllib.parse.quote(f"{text} Check out this cool content!")
return f'<a href="https://www.youtube.com/upload?description={share_text}" target="_blank">Share on YouTube</a>'
def start_feed(user_input, generate_video, current_index, feed_items):
"""Start or reset the feed with a new item."""
user_input = user_input.strip() or "trending"
current_user_input = user_input
is_loading = True
progress_html = generate_progress_html(0, f"Getting started with {user_input}... π", user_input)
yield (current_user_input, current_index, feed_items, gr.update(), gr.update(), is_loading, progress_html)
try:
ideas_gen = generate_ideas(user_input)
ideas = None
for update in ideas_gen:
if isinstance(update, tuple):
progress, message = update
progress_html = generate_progress_html(progress, message, user_input)
yield (gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), is_loading, progress_html)
else:
ideas = update
item_gen = generate_item(user_input, ideas, generate_video)
item = None
for update in item_gen:
if isinstance(update, tuple):
progress, message = update
progress_html = generate_progress_html(progress, message, user_input)
yield (gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), is_loading, progress_html)
else:
item = update
feed_items = [item]
current_index = 0
feed_html = generate_html(feed_items, False, current_index, user_input, is_loading=False)
share_html = generate_share_links(item['image_base64'], item['video_base64'], item['text'])
yield (current_user_input, current_index, feed_items, feed_html, share_html, False, "")
except Exception as e:
print(f"Error in start_feed: {e}")
feed_html = "<div style='color: white; text-align: center;'>Error generating content. Try again!</div>"
yield (current_user_input, current_index, feed_items, feed_html, "", False, generate_progress_html(100, "Oops, something went wrong! π
", user_input))
def load_next(user_input, generate_video, current_index, feed_items):
"""Load the next item in the feed."""
current_user_input = user_input.strip() or "trending"
is_loading = True
progress_html = generate_progress_html(0, f"Loading next {current_user_input} vibe... π", current_user_input)
yield (current_user_input, current_index, feed_items, gr.update(), gr.update(), is_loading, progress_html)
try:
if current_index + 1 < len(feed_items):
current_index += 1
progress_html = generate_progress_html(50, f"Switching to the next {current_user_input} moment... π", current_user_input)
yield (gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), is_loading, progress_html)
else:
ideas = feed_items[-1]['ideas'] if feed_items else None
if not ideas:
ideas_gen = generate_ideas(current_user_input)
for update in ideas_gen:
if isinstance(update, tuple):
progress, message = update
progress_html = generate_progress_html(progress, message, current_user_input)
yield (gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), is_loading, progress_html)
else:
ideas = update
item_gen = generate_item(current_user_input, ideas, generate_video)
for update in item_gen:
if isinstance(update, tuple):
progress, message = update
progress_html = generate_progress_html(progress, message, current_user_input)
yield (gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), is_loading, progress_html)
else:
feed_items.append(update)
current_index = len(feed_items) - 1
feed_html = generate_html(feed_items, False, current_index, current_user_input, is_loading=False)
share_html = generate_share_links(feed_items[current_index]['image_base64'], feed_items[current_index]['video_base64'], feed_items[current_index]['text'])
yield (current_user_input, current_index, feed_items, feed_html, share_html, False, "")
except Exception as e:
print(f"Error in load_next: {e}")
feed_html = "<div style='color: white; text-align: center;'>Error generating content. Try again!</div>"
yield (current_user_input, current_index, feed_items, feed_html, "", False, generate_progress_html(100, "Oops, something went wrong! π
", current_user_input))
def load_previous(user_input, generate_video, current_index, feed_items):
"""Load the previous item in the feed."""
current_user_input = user_input.strip() or "trending"
if current_index > 0:
current_index -= 1
feed_html = generate_html(feed_items, False, current_index, current_user_input, is_loading=False)
share_html = generate_share_links(feed_items[current_index]['image_base64'], feed_items[current_index]['video_base64'], feed_items[current_index]['text'])
return current_user_input, current_index, feed_items, feed_html, share_html, False, ""
# Gradio Interface
with gr.Blocks(css="""
body { background-color: #000; color: #fff; font-family: Arial, sans-serif; }
.gradio-container { max-width: 400px; margin: 0 auto; padding: 10px; }
input, button { border-radius: 5px; background-color: #222; color: #fff; border: 1px solid #444; }
button { background-color: #ff2d55; }
button:hover { background-color: #e0264b; }
.gr-button { width: 100%; margin-top: 10px; }
.gr-form { background-color: #111; padding: 15px; border-radius: 10px; }
""", title="Create Your Feed") as demo:
current_user_input = gr.State(value="")
current_index = gr.State(value=0)
feed_items = gr.State(value=[])
is_loading = gr.State(value=False)
with gr.Column(elem_classes="gr-form"):
gr.Markdown("### Create Your Feed")
user_input = gr.Textbox(label="Enter Concept or Ideas", placeholder="e.g., jogging in gardens by the bay")
generate_video_checkbox = gr.Checkbox(label="Generate Video (may take longer)", value=False)
magic_button = gr.Button("β¨ Generate Next Item", elem_classes="gr-button")
progress_html = gr.HTML(label="Progress")
feed_html = gr.HTML()
share_html = gr.HTML(label="Share this item:")
user_input.submit(start_feed, [user_input, generate_video_checkbox, current_index, feed_items],
[current_user_input, current_index, feed_items, feed_html, share_html, is_loading, progress_html])
magic_button.click(load_next, [user_input, generate_video_checkbox, current_index, feed_items],
[current_user_input, current_index, feed_items, feed_html, share_html, is_loading, progress_html])
previous_button = gr.Button("Previous", visible=False).click(load_previous,
[user_input, generate_video_checkbox, current_index, feed_items],
[current_user_input, current_index, feed_items, feed_html, share_html, is_loading, progress_html])
demo.launch() |