Voff / components /pexels.py
TDN-M's picture
Update components/pexels.py
965d9f2 verified
raw
history blame
2.96 kB
import requests
import shutil
import os
import re
# Searching for the videos
def search_pexels(keyword, api_key, orientation='potrait', size='medium', endpoint='videos', num_pages=50):
if orientation not in ['potrait', 'landscape', 'square']:
raise Exception("Error! orientation must be one of {'square', 'landscape', 'potrait'}")
if size not in ['medium', 'small', 'large']:
raise Exception("Error! size must be one of ['medium', 'small', 'large']")
base_url = 'https://api.pexels.com/'
headers = {
'Authorization': f'{api_key}'
}
url = f'{base_url}{endpoint}/search?query={keyword}&per_page={num_pages}&orientation={orientation}&size={size}'
response = requests.get(url, headers=headers)
# Check if request was successful (status code 200)
if response.status_code == 200:
data = response.json()
return data
else:
print(f'Error: {response.status_code}')
# Video download function
def download_video(data, parent_path, height, width, links, i):
for x in data['videos']:
if x['id'] in links:
continue
vid = x['video_files']
for v in vid:
if v['height'] == height and v['width'] == width:
video_id = str(x['id'])
video_path = os.path.join(parent_path, f"{i}_{video_id}.mp4")
with open(video_path, 'wb') as f:
f.write(requests.get(v['link']).content)
print("Successfully saved video in", video_path)
return video_id
# Utilizing the LLMs to find the relevant videos
def generate_videos(product, api_key, orientation, height, width, llm_chain=None, sum_llm_chain=None):
prod = product.strip().replace(" ", "_")
links = []
try:
# Split the paragraph by sentences
sentences = llm_chain.run(product.strip())
print('Sentences:', sentences)
# Tách các câu từ văn bản
sentences = [x.strip() for x in re.split(r'[.!?]', sentences) if len(x.strip()) > 6]
# Sử dụng UUID để tạo tên thư mục ngắn gọn
folder_name = f"video_{uuid.uuid4().hex}"
os.makedirs(folder_name, exist_ok=True)
folder_path = os.path.join(folder_name, "images")
os.makedirs(folder_path, exist_ok=True)
# Generate video for every sentence
print("Keywords:")
for i, s in enumerate(sentences):
keyword = sum_llm_chain.run(s)
print(i + 1, ":", keyword)
data = search_pexels(keyword, api_key, orientation.lower())
link = download_video(data, folder_path, height, width, links, i)
if link:
links.append(link)
print("Success! Videos have been generated")
except Exception as e:
print("Error! Failed generating videos")
print(e)
return folder_name, sentences