Spaces:
Build error
Build error
File size: 21,020 Bytes
45faa4c b6507d6 b082bff 45faa4c 23fb8e6 b082bff 23fb8e6 b6507d6 23fb8e6 97ed4cf 23fb8e6 b082bff 23fb8e6 97ed4cf 23fb8e6 97ed4cf b082bff b6507d6 45faa4c b082bff 45faa4c b082bff 45faa4c b082bff 0a38b03 97ed4cf 45faa4c 97ed4cf b082bff 97ed4cf e7589a4 97ed4cf e7589a4 97ed4cf 23fb8e6 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 0a38b03 97ed4cf b082bff 97ed4cf b082bff e7589a4 97ed4cf b082bff 97ed4cf 0a38b03 97ed4cf b082bff 97ed4cf 0a38b03 b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf 45faa4c b082bff 45faa4c b082bff 97ed4cf 45faa4c 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 45faa4c 97ed4cf b082bff 97ed4cf 45faa4c b082bff 97ed4cf 45faa4c 97ed4cf b082bff 45faa4c b082bff 97ed4cf b082bff 45faa4c b082bff 97ed4cf b082bff 97ed4cf b082bff 97ed4cf b082bff 45faa4c b082bff 45faa4c b082bff 45faa4c b082bff 45faa4c b082bff 45faa4c b082bff 45faa4c b082bff 97ed4cf b082bff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 |
import os
import moviepy # Keep this if needed elsewhere (e.g., for moviepy.config)
# import moviepy.editor # No longer strictly needed if using 'from' import below
import shutil
import requests
import re
import random
import time
import math
# Corrected import: Uncomment this block
from moviepy.editor import (
VideoFileClip, concatenate_videoclips, AudioFileClip, ImageClip,
CompositeVideoClip, TextClip, CompositeAudioClip
)
import moviepy.video.fx.all as vfx
import moviepy.config as mpy_config
from pydub import AudioSegment
from gtts import gTTS
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from bs4 import BeautifulSoup
from urllib.parse import quote
import pysrt
import soundfile as sf
from kokoro import KPipeline
import cv2
import gradio as gr
import tempfile # Added for use in create_clip
# Initialize Kokoro TTS pipeline
pipeline = KPipeline(lang_code='a') # 'a' is from original code; adjust if needed
# Set ImageMagick binary
mpy_config.change_settings({"IMAGEMAGICK_BINARY": "/usr/bin/convert"})
# Global Configuration
PEXELS_API_KEY = 'BhJqbcdm9Vi90KqzXKAhnEHGsuFNv4irXuOjWtT761U49lRzo03qBGna'
OPENROUTER_API_KEY = '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'
OPENROUTER_MODEL = "mistralai/mistral-small-3.1-24b-instruct:free"
TEMP_FOLDER = "temp_video_processing"
OUTPUT_VIDEO_FILENAME = "final_video.mp4"
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
# Helper Functions
def generate_script(user_input):
headers = {
'Authorization': f'Bearer {OPENROUTER_API_KEY}',
'HTTP-Referer': 'https://your-domain.com',
'X-Title': 'AI Documentary Maker'
}
prompt = f"""Short Documentary Script GeneratorInstructions:
If I say "use this," just output the script exactly as I gave it.
If I only give topics, generate a script based on them.
If I provide a full script, rewrite it without any changes. Make everything short simple and humarous funny and act as serious but humarous. And don't say anything off topic. Also alway say a funny statement to subscribe based on the video topic at the end. Use normal conversational text like a normal person talking and avoid AI phase make the statements humanize and normal conversational
And always talk negative views don't really wants any positive statement and the full script will be one video about one topic so keep in mind
Formatting Rules:
Title in Square Brackets:
Each section starts with a one-word title inside [ ] (max two words if necessary).
This title will be used as a search term for Pexels footage.
Casual & Funny Narration:
Each section has 5-10 words of narration.
Keep it natural, funny, and unpredictable (not robotic, poetic, or rhythmic).
No Special Formatting:
No bold, italics, or special characters. You are a assistant AI your task is to create script. You aren't a chatbot. So, don't write extra text
Generalized Search Terms:
If a term is too specific, make it more general for Pexels search.
Scene-Specific Writing:
Each section describes only what should be shown in the video.
Output Only the Script, and also make it funny and humarous and helirous and also add to subscribe with a funny statement like subscribe now or .....
No extra text, just the script.
Example Output:
[North Korea]
Top 5 unknown facts about North Korea.
[Invisibility]
North Korea’s internet speed is so fast… it doesn’t exist.
[Leadership]
Kim Jong-un once won an election with 100% votes… against himself.
[Magic]
North Korea discovered time travel. That’s why their news is always from the past.
[Warning]
Subscribe now, or Kim Jong-un will send you a free one-way ticket… to North Korea.
[Freedom]
North Korean citizens can do anything… as long as it's government-approved.
Now here is the Topic/scrip: {user_input}
"""
data = {
'model': OPENROUTER_MODEL,
'messages': [{'role': 'user', 'content': prompt}],
'temperature': 0.4,
'max_tokens': 5000
}
try:
response = requests.post(
'https://openrouter.ai/api/v1/chat/completions',
headers=headers,
json=data,
timeout=30
)
if response.status_code == 200:
response_data = response.json()
if 'choices' in response_data and len(response_data['choices']) > 0:
return response_data['choices'][0]['message']['content']
return None
except Exception:
return None
def parse_script(script_text):
sections = {}
current_title = None
current_text = ""
try:
for line in script_text.splitlines():
line = line.strip()
if line.startswith("[") and "]" in line:
bracket_start = line.find("[")
bracket_end = line.find("]", bracket_start)
if bracket_start != -1 and bracket_end != -1:
if current_title is not None:
sections[current_title] = current_text.strip()
current_title = line[bracket_start+1:bracket_end]
current_text = line[bracket_end+1:].strip()
elif current_title:
current_text += line + " "
if current_title:
sections[current_title] = current_text.strip()
elements = []
for title, narration in sections.items():
if not title or not narration:
continue
media_element = {"type": "media", "prompt": title, "effects": "fade-in"}
words = narration.split()
duration = max(3, len(words) * 0.5)
tts_element = {"type": "tts", "text": narration, "voice": "en", "duration": duration}
elements.append(media_element)
elements.append(tts_element)
return elements
except Exception:
return []
def search_pexels_videos(query, pexels_api_key):
headers = {'Authorization': pexels_api_key}
base_url = "https://api.pexels.com/videos/search"
num_pages = 3
videos_per_page = 15
all_videos = []
for page in range(1, num_pages + 1):
try:
params = {"query": query, "per_page": videos_per_page, "page": page}
response = requests.get(base_url, headers=headers, params=params, timeout=10)
if response.status_code == 200:
data = response.json()
videos = data.get("videos", [])
for video in videos:
video_files = video.get("video_files", [])
for file in video_files:
if file.get("quality") == "hd":
all_videos.append(file.get("link"))
break
except Exception:
continue
return random.choice(all_videos) if all_videos else None
def search_pexels_images(query, pexels_api_key):
headers = {'Authorization': pexels_api_key}
url = "https://api.pexels.com/v1/search"
params = {"query": query, "per_page": 5, "orientation": "landscape"}
try:
response = requests.get(url, headers=headers, params=params, timeout=10)
if response.status_code == 200:
data = response.json()
photos = data.get("photos", [])
if photos:
photo = random.choice(photos[:min(5, len(photos))])
return photo.get("src", {}).get("original")
return None
except Exception:
return None
def search_google_images(query):
try:
search_url = f"https://www.google.com/search?q={quote(query)}&tbm=isch"
headers = {"User-Agent": USER_AGENT}
response = requests.get(search_url, headers=headers, timeout=10)
soup = BeautifulSoup(response.text, "html.parser")
img_tags = soup.find_all("img")
image_urls = [img.get("src", "") for img in img_tags if img.get("src", "").startswith("http") and "gstatic" not in img.get("src", "")]
return random.choice(image_urls[:5]) if image_urls else None
except Exception:
return None
def download_image(image_url, filename):
try:
headers = {"User-Agent": USER_AGENT}
response = requests.get(image_url, headers=headers, stream=True, timeout=15)
response.raise_for_status()
with open(filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
img = Image.open(filename)
img.verify()
img = Image.open(filename)
if img.mode != 'RGB':
img = img.convert('RGB')
img.save(filename)
return filename
except Exception:
if os.path.exists(filename):
os.remove(filename)
return None
def download_video(video_url, filename):
try:
response = requests.get(video_url, stream=True, timeout=30)
response.raise_for_status()
with open(filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return filename
except Exception:
if os.path.exists(filename):
os.remove(filename)
return None
def generate_media(prompt, current_index=0, total_segments=1):
safe_prompt = re.sub(r'[^\w\s-]', '', prompt).strip().replace(' ', '_')
if "news" in prompt.lower():
image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_news.jpg")
image_url = search_google_images(prompt)
if image_url and download_image(image_url, image_file):
return {"path": image_file, "asset_type": "image"}
if random.random() < 0.25:
video_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}_video.mp4")
video_url = search_pexels_videos(prompt, PEXELS_API_KEY)
if video_url and download_video(video_url, video_file):
return {"path": video_file, "asset_type": "video"}
image_file = os.path.join(TEMP_FOLDER, f"{safe_prompt}.jpg")
image_url = search_pexels_images(prompt, PEXELS_API_KEY)
if image_url and download_image(image_url, image_file):
return {"path": image_file, "asset_type": "image"}
fallback_terms = ["nature", "people", "landscape", "technology", "business"]
for term in fallback_terms:
fallback_file = os.path.join(TEMP_FOLDER, f"fallback_{term}.jpg")
fallback_url = search_pexels_images(term, PEXELS_API_KEY)
if fallback_url and download_image(fallback_url, fallback_file):
return {"path": fallback_file, "asset_type": "image"}
return None
def generate_tts(text, voice):
safe_text = re.sub(r'[^\w\s-]', '', text[:10]).strip().replace(' ', '')
file_path = os.path.join(TEMP_FOLDER, f"tts{safe_text}.wav")
if os.path.exists(file_path):
return file_path
try:
kokoro_voice = 'af_heart' if voice == 'en' else voice
generator = pipeline(text, voice=kokoro_voice, speed=0.9, split_pattern=r'\n+')
audio_segments = [audio for _, _, audio in generator]
full_audio = np.concatenate(audio_segments) if len(audio_segments) > 1 else audio_segments[0]
sf.write(file_path, full_audio, 24000)
return file_path
except Exception:
try:
tts = gTTS(text=text, lang='en')
mp3_path = os.path.join(TEMP_FOLDER, f"tts_{safe_text}.mp3")
tts.save(mp3_path)
audio = AudioSegment.from_mp3(mp3_path)
audio.export(file_path, format="wav")
os.remove(mp3_path)
return file_path
except Exception:
num_samples = int(max(3, len(text.split()) * 0.5) * 24000)
silence = np.zeros(num_samples, dtype=np.float32)
sf.write(file_path, silence, 24000)
return file_path
def apply_kenburns_effect(clip, target_resolution, effect_type=None):
target_w, target_h = target_resolution
clip_aspect = clip.w / clip.h
target_aspect = target_w / target_h
if clip_aspect > target_aspect:
new_height = target_h
new_width = int(new_height * clip_aspect)
else:
new_width = target_w
new_height = int(new_width / clip_aspect)
clip = clip.resize(newsize=(new_width, new_height))
base_scale = 1.15
new_width = int(new_width * base_scale)
new_height = int(new_height * base_scale)
clip = clip.resize(newsize=(new_width, new_height))
max_offset_x = new_width - target_w
max_offset_y = new_height - target_h
available_effects = ["zoom-in", "zoom-out", "pan-left", "pan-right", "up-left"]
effect_type = random.choice(available_effects) if not effect_type or effect_type == "random" else effect_type
if effect_type == "zoom-in":
start_zoom, end_zoom = 0.9, 1.1
start_center = end_center = (new_width / 2, new_height / 2)
elif effect_type == "zoom-out":
start_zoom, end_zoom = 1.1, 0.9
start_center = end_center = (new_width / 2, new_height / 2)
elif effect_type == "pan-left":
start_zoom = end_zoom = 1.0
start_center = (max_offset_x + target_w / 2, (max_offset_y // 2) + target_h / 2)
end_center = (target_w / 2, (max_offset_y // 2) + target_h / 2)
elif effect_type == "pan-right":
start_zoom = end_zoom = 1.0
start_center = (target_w / 2, (max_offset_y // 2) + target_h / 2)
end_center = (max_offset_x + target_w / 2, (max_offset_y // 2) + target_h / 2)
elif effect_type == "up-left":
start_zoom = end_zoom = 1.0
start_center = (max_offset_x + target_w / 2, max_offset_y + target_h / 2)
end_center = (target_w / 2, target_h / 2)
else:
raise ValueError(f"Unsupported effect_type: {effect_type}")
def transform_frame(get_frame, t):
frame = get_frame(t)
ratio = 0.5 - 0.5 * math.cos(math.pi * t / clip.duration) if clip.duration > 0 else 0
current_zoom = start_zoom + (end_zoom - start_zoom) * ratio
crop_w, crop_h = int(target_w / current_zoom), int(target_h / current_zoom)
current_center_x = start_center[0] + (end_center[0] - start_center[0]) * ratio
current_center_y = start_center[1] + (end_center[1] - start_center[1]) * ratio
min_center_x, max_center_x = crop_w / 2, new_width - crop_w / 2
min_center_y, max_center_y = crop_h / 2, new_height - crop_h / 2
current_center_x = max(min_center_x, min(current_center_x, max_center_x))
current_center_y = max(min_center_y, min(current_center_y, max_center_y))
cropped_frame = cv2.getRectSubPix(frame, (crop_w, crop_h), (current_center_x, current_center_y))
return cv2.resize(cropped_frame, (target_w, target_h), interpolation=cv2.INTER_LANCZOS4)
return clip.fl(transform_frame)
def resize_to_fill(clip, target_resolution):
target_w, target_h = target_resolution
clip_aspect = clip.w / clip.h
target_aspect = target_w / target_h
if clip_aspect > target_aspect:
clip = clip.resize(height=target_h)
crop_amount = (clip.w - target_w) / 2
clip = clip.crop(x1=crop_amount, x2=clip.w - crop_amount, y1=0, y2=clip.h)
else:
clip = clip.resize(width=target_w)
crop_amount = (clip.h - target_h) / 2
clip = clip.crop(x1=0, x2=clip.w, y1=crop_amount, y2=clip.h - crop_amount)
return clip
def add_background_music(final_video, bg_music_volume=0.08):
bg_music_path = "background_music.mp3"
if os.path.exists(bg_music_path):
bg_music = AudioFileClip(bg_music_path)
if bg_music.duration < final_video.duration:
loops_needed = math.ceil(final_video.duration / bg_music.duration)
bg_segments = [bg_music] * loops_needed
bg_music = concatenate_audioclips(bg_segments)
bg_music = bg_music.subclip(0, final_video.duration)
bg_music = bg_music.volumex(bg_music_volume)
video_audio = final_video.audio
mixed_audio = CompositeAudioClip([video_audio, bg_music])
final_video = final_video.set_audio(mixed_audio)
return final_video
def create_clip(media_path, asset_type, tts_path, duration=None, effects=None, narration_text=None, segment_index=0):
try:
if not os.path.exists(media_path) or not os.path.exists(tts_path):
return None
audio_clip = AudioFileClip(tts_path).audio_fadeout(0.2)
target_duration = audio_clip.duration + 0.2
if asset_type == "video":
clip = VideoFileClip(media_path)
clip = resize_to_fill(clip, TARGET_RESOLUTION)
clip = clip.loop(duration=target_duration) if clip.duration < target_duration else clip.subclip(0, target_duration)
elif asset_type == "image":
img = Image.open(media_path)
if img.mode != 'RGB':
with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp:
img.convert('RGB').save(temp.name)
media_path = temp.name
img.close()
clip = ImageClip(media_path).set_duration(target_duration)
clip = apply_kenburns_effect(clip, TARGET_RESOLUTION)
clip = clip.fadein(0.3).fadeout(0.3)
else:
return None
subtitle_clips = []
if narration_text and CAPTION_COLOR != "transparent":
words = narration_text.split()
chunks = [' '.join(words[i:i+5]) for i in range(0, len(words), 5)]
chunk_duration = audio_clip.duration / len(chunks)
subtitle_y_position = int(TARGET_RESOLUTION[1] * 0.70)
for i, chunk_text in enumerate(chunks):
start_time = i * chunk_duration
end_time = (i + 1) * chunk_duration
txt_clip = TextClip(
chunk_text,
fontsize=45,
font='Arial-Bold',
color=CAPTION_COLOR,
bg_color='rgba(0, 0, 0, 0.25)',
method='caption',
align='center',
stroke_width=2,
stroke_color=CAPTION_COLOR,
size=(TARGET_RESOLUTION[0] * 0.8, None)
).set_start(start_time).set_end(end_time).set_position(('center', subtitle_y_position))
subtitle_clips.append(txt_clip)
clip = CompositeVideoClip([clip] + subtitle_clips)
clip = clip.set_audio(audio_clip)
return clip
except Exception:
return None
# Main Gradio Function
def generate_video(video_concept, resolution, caption_option):
global TARGET_RESOLUTION, CAPTION_COLOR
TARGET_RESOLUTION = (1920, 1080) if resolution == "Full" else (1080, 1920)
CAPTION_COLOR = "white" if caption_option == "Yes" else "transparent"
if os.path.exists(TEMP_FOLDER):
shutil.rmtree(TEMP_FOLDER)
os.makedirs(TEMP_FOLDER)
script = generate_script(video_concept)
if not script:
return "Failed to generate script."
elements = parse_script(script)
if not elements:
return "Failed to parse script."
paired_elements = [(elements[i], elements[i+1]) for i in range(0, len(elements), 2) if i+1 < len(elements)]
if not paired_elements:
return "No valid script segments found."
clips = []
for idx, (media_elem, tts_elem) in enumerate(paired_elements):
media_asset = generate_media(media_elem['prompt'], current_index=idx, total_segments=len(paired_elements))
if not media_asset:
continue
tts_path = generate_tts(tts_elem['text'], tts_elem['voice'])
if not tts_path:
continue
clip = create_clip(
media_path=media_asset['path'],
asset_type=media_asset['asset_type'],
tts_path=tts_path,
duration=tts_elem['duration'],
effects=media_elem.get('effects', 'fade-in'),
narration_text=tts_elem['text'],
segment_index=idx
)
if clip:
clips.append(clip)
if not clips:
return "No clips were successfully created."
final_video = concatenate_videoclips(clips, method="compose")
final_video = add_background_music(final_video, bg_music_volume=0.08)
final_video.write_videofile(OUTPUT_VIDEO_FILENAME, codec='libx264', fps=24, preset='veryfast')
shutil.rmtree(TEMP_FOLDER)
return OUTPUT_VIDEO_FILENAME
# Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("# AI Documentary Video Generator")
with gr.Row():
video_concept = gr.Textbox(label="Video Concept", placeholder="Enter your video concept here...")
resolution = gr.Dropdown(["Full", "Short"], label="Resolution", value="Full")
caption_option = gr.Dropdown(["Yes", "No"], label="Caption", value="Yes")
generate_btn = gr.Button("Generate Video")
output_video = gr.Video(label="Generated Video")
generate_btn.click(generate_video, inputs=[video_concept, resolution, caption_option], outputs=output_video)
demo.launch() |