CM / model.py
SabkeSawaal68's picture
Update model.py
6310a6a verified
# 🔹 model.py (Complete Animation Engine with AI Integration)
import cv2
import numpy as np
import torch
import moviepy.editor as mp
from transformers import pipeline
from PIL import Image, ImageDraw
import os
class AnimationGenerator:
def __init__(self):
self.pose_analyzer = pipeline("text2text-generation",
model="google/pegasus-x-base")
self.character_db = {}
self.resolution = (720, 480)
self.fps = 24
def create_animation(self, script, character, bg, camera_effect, sound, tmp_dir):
# Process script into animation sequences
keyframes = self.parse_script(script, character)
# Generate frames with persistent character
frames = self.render_frames(keyframes, character, bg)
# Apply cinematic effects
frames = self.apply_camera_effects(frames, camera_effect)
# Save and add audio
video_path = os.path.join(tmp_dir, "animation.mp4")
self.save_video(frames, video_path)
return self.add_audio_track(video_path, sound, tmp_dir)
def parse_script(self, script, character):
# AI-based script analysis
prompt = f"Convert this story into animation keyframes: {script}"
analysis = self.pose_analyzer(prompt, max_length=400)
return self.extract_motion_data(analysis[0]['generated_text'], character)
def extract_motion_data(self, text, character):
# Implement actual NLP parsing here
return [{
'position': (100 + i*20, 200),
'pose': 'walking' if i%2 ==0 else 'standing',
'expression': 'neutral'
} for i in range(24)] # 1 second base
def render_frames(self, keyframes, character, bg):
# Character memory system
if character not in self.character_db:
self.character_db[character] = {
'color': (0, 0, 0),
'scale': 1.0,
'last_position': (100, 200)
}
frames = []
for frame_data in keyframes:
canvas = self.create_background(bg)
self.draw_character(canvas, frame_data, character)
frames.append(cv2.cvtColor(np.array(canvas), cv2.COLOR_RGB2BGR))
return frames
def create_background(self, bg_name):
# Add actual background images
if bg_name == "Dark Forest":
return Image.new('RGB', self.resolution, (34, 139, 34))
elif bg_name == "Haunted House":
return Image.new('RGB', self.resolution, (28, 28, 28))
return Image.new('RGB', self.resolution, (255, 255, 255))
def draw_character(self, canvas, data, character):
draw = ImageDraw.Draw(canvas)
# Main character drawing logic
x, y = data['position']
# Head
draw.ellipse((x-15, y-40, x+15, y-10), outline=(0,0,0), width=2)
# Body
draw.line((x, y, x, y+60), fill=(0,0,0), width=3)
# Arms
draw.line((x-30, y+30, x+30, y+30), fill=(0,0,0), width=3)
# Legs
draw.line((x-20, y+90, x, y+60), fill=(0,0,0), width=3)
draw.line((x+20, y+90, x, y+60), fill=(0,0,0), width=3)
def apply_camera_effects(self, frames, effect):
# Professional camera effects
if effect == "Dynamic Shake":
return [self.apply_shake(frame) for frame in frames]
elif effect == "Cinematic Zoom":
return self.create_zoom_effect(frames)
return frames
def apply_shake(self, frame):
dx, dy = np.random.randint(-7,7), np.random.randint(-5,5)
M = np.float32([[1,0,dx], [0,1,dy]])
return cv2.warpAffine(frame, M, self.resolution)
def create_zoom_effect(self, frames):
zoomed = []
for i, frame in enumerate(frames):
scale = 1.0 + (i/len(frames))*0.3
new_frame = cv2.resize(frame, None, fx=scale, fy=scale)
y_start = int((new_frame.shape[0] - self.resolution[1])/2)
x_start = int((new_frame.shape[1] - self.resolution[0])/2)
zoomed.append(new_frame[y_start:y_start+self.resolution[1],
x_start:x_start+self.resolution[0]])
return zoomed
def save_video(self, frames, path):
clip = mp.ImageSequenceClip(frames, fps=self.fps)
clip.write_videofile(path, codec='libx264', audio=False)
def add_audio_track(self, video_path, sound, tmp_dir):
# Implement actual audio mixing
final_path = os.path.join(tmp_dir, "final.mp4")
video = mp.VideoFileClip(video_path)
video.write_videofile(final_path)
return final_path