File size: 2,958 Bytes
3d42847 b9dc6c1 3d42847 b9dc6c1 3d42847 b9dc6c1 3d42847 b9dc6c1 3d42847 b9dc6c1 965d9f2 3d42847 965d9f2 3d42847 b9dc6c1 3d42847 b9dc6c1 3d42847 965d9f2 3d42847 965d9f2 3d42847 b9dc6c1 3d42847 b9dc6c1 3d42847 965d9f2 3d42847 b9dc6c1 3d42847 965d9f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import requests
import shutil
import os
import re
# Searching for the videos
def search_pexels(keyword, api_key, orientation='potrait', size='medium', endpoint='videos', num_pages=50):
if orientation not in ['potrait', 'landscape', 'square']:
raise Exception("Error! orientation must be one of {'square', 'landscape', 'potrait'}")
if size not in ['medium', 'small', 'large']:
raise Exception("Error! size must be one of ['medium', 'small', 'large']")
base_url = 'https://api.pexels.com/'
headers = {
'Authorization': f'{api_key}'
}
url = f'{base_url}{endpoint}/search?query={keyword}&per_page={num_pages}&orientation={orientation}&size={size}'
response = requests.get(url, headers=headers)
# Check if request was successful (status code 200)
if response.status_code == 200:
data = response.json()
return data
else:
print(f'Error: {response.status_code}')
# Video download function
def download_video(data, parent_path, height, width, links, i):
for x in data['videos']:
if x['id'] in links:
continue
vid = x['video_files']
for v in vid:
if v['height'] == height and v['width'] == width:
video_id = str(x['id'])
video_path = os.path.join(parent_path, f"{i}_{video_id}.mp4")
with open(video_path, 'wb') as f:
f.write(requests.get(v['link']).content)
print("Successfully saved video in", video_path)
return video_id
# Utilizing the LLMs to find the relevant videos
def generate_videos(product, api_key, orientation, height, width, llm_chain=None, sum_llm_chain=None):
prod = product.strip().replace(" ", "_")
links = []
try:
# Split the paragraph by sentences
sentences = llm_chain.run(product.strip())
print('Sentences:', sentences)
# Tách các câu từ văn bản
sentences = [x.strip() for x in re.split(r'[.!?]', sentences) if len(x.strip()) > 6]
# Sử dụng UUID để tạo tên thư mục ngắn gọn
folder_name = f"video_{uuid.uuid4().hex}"
os.makedirs(folder_name, exist_ok=True)
folder_path = os.path.join(folder_name, "images")
os.makedirs(folder_path, exist_ok=True)
# Generate video for every sentence
print("Keywords:")
for i, s in enumerate(sentences):
keyword = sum_llm_chain.run(s)
print(i + 1, ":", keyword)
data = search_pexels(keyword, api_key, orientation.lower())
link = download_video(data, folder_path, height, width, links, i)
if link:
links.append(link)
print("Success! Videos have been generated")
except Exception as e:
print("Error! Failed generating videos")
print(e)
return folder_name, sentences |