Create model.py
Browse files
model.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
import torch
|
4 |
+
import moviepy.editor as mp
|
5 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
+
|
7 |
+
# Load AI Pose Model
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained("CompVis/posegpt")
|
9 |
+
model = AutoModelForCausalLM.from_pretrained("CompVis/posegpt")
|
10 |
+
|
11 |
+
# Memory Storage for Characters
|
12 |
+
character_memory = {}
|
13 |
+
|
14 |
+
def generate_stick_animation(story, character_name):
|
15 |
+
global character_memory
|
16 |
+
|
17 |
+
# Assign unique ID to character
|
18 |
+
if character_name not in character_memory:
|
19 |
+
character_memory[character_name] = {"pose": (250, 200), "size": 20}
|
20 |
+
|
21 |
+
char_pose = character_memory[character_name]["pose"]
|
22 |
+
|
23 |
+
# Convert story to keyframes using AI
|
24 |
+
inputs = tokenizer(story, return_tensors="pt")
|
25 |
+
output = model.generate(**inputs, max_length=50)
|
26 |
+
|
27 |
+
# Generate Stick Figure Animation
|
28 |
+
frames = []
|
29 |
+
for i in range(20): # More frames for smoother motion
|
30 |
+
img = np.ones((500, 500, 3), dtype=np.uint8) * 255 # White background
|
31 |
+
|
32 |
+
# Stick Figure Body
|
33 |
+
cv2.line(img, (char_pose[0], char_pose[1] + i * 3), (char_pose[0], char_pose[1] + 100 + i * 3), (0, 0, 0), 5)
|
34 |
+
cv2.circle(img, (char_pose[0], char_pose[1] - 20 + i * 3), 20, (0, 0, 0), -1) # Head
|
35 |
+
cv2.line(img, (char_pose[0] - 50, char_pose[1] + 50 + i * 3), (char_pose[0] + 50, char_pose[1] + 50 + i * 3), (0, 0, 0), 5) # Arms
|
36 |
+
|
37 |
+
# Black & White Filter
|
38 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
39 |
+
|
40 |
+
frames.append(gray)
|
41 |
+
|
42 |
+
return frames
|
43 |
+
|
44 |
+
# Extra Features
|
45 |
+
def add_camera_effects(frames, effect):
|
46 |
+
if effect == "Shake":
|
47 |
+
return [cv2.warpAffine(frame, np.float32([[1, 0, np.random.randint(-5, 5)], [0, 1, np.random.randint(-5, 5)]]), (500, 500)) for frame in frames]
|
48 |
+
elif effect == "Zoom":
|
49 |
+
return [cv2.resize(frame, (600, 600))[50:550, 50:550] for frame in frames]
|
50 |
+
elif effect == "Slow Motion":
|
51 |
+
return frames * 2 # Duplicate frames for slow-motion effect
|
52 |
+
return frames
|
53 |
+
|
54 |
+
def add_background(frames, background):
|
55 |
+
if background == "Dark Forest":
|
56 |
+
return [cv2.addWeighted(frame, 0.8, cv2.imread("dark_forest.jpg", 0), 0.2, 0) for frame in frames]
|
57 |
+
elif background == "Haunted House":
|
58 |
+
return [cv2.addWeighted(frame, 0.8, cv2.imread("haunted_house.jpg", 0), 0.2, 0) for frame in frames]
|
59 |
+
return frames
|
60 |
+
|
61 |
+
def add_sound(video_path, sound_type):
|
62 |
+
if sound_type == "Horror":
|
63 |
+
sound = mp.AudioFileClip("horror_music.mp3")
|
64 |
+
elif sound_type == "Action":
|
65 |
+
sound = mp.AudioFileClip("action_music.mp3")
|
66 |
+
else:
|
67 |
+
return video_path
|
68 |
+
video = mp.VideoFileClip(video_path)
|
69 |
+
final = video.set_audio(sound)
|
70 |
+
final_path = video_path.replace(".mp4", "_sound.mp4")
|
71 |
+
final.write_videofile(final_path)
|
72 |
+
return final_path
|