Spaces:
Running
Running
Update video.py
Browse files
video.py
CHANGED
@@ -1,74 +1,152 @@
|
|
1 |
-
import cv2
|
2 |
-
import numpy as np
|
3 |
-
import random
|
4 |
-
import tempfile
|
5 |
-
import os
|
6 |
-
from moviepy.video.io.VideoFileClip import VideoFileClip
|
7 |
-
|
8 |
-
def add_and_detect_watermark_video(video_path, watermark_text, num_watermarks=5):
|
9 |
-
def add_watermark_to_frame(frame):
|
10 |
-
watermark_positions = []
|
11 |
-
|
12 |
-
# Resize frame to be divisible by 8 (required for DCT)
|
13 |
-
h, w, _ = frame.shape
|
14 |
-
h_new = (h // 8) * 8
|
15 |
-
w_new = (w // 8) * 8
|
16 |
-
frame_resized = cv2.resize(frame, (w_new, h_new))
|
17 |
-
|
18 |
-
# Convert to YCrCb color space and extract Y channel
|
19 |
-
ycrcb_image = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2YCrCb)
|
20 |
-
y_channel, cr_channel, cb_channel = cv2.split(ycrcb_image)
|
21 |
-
|
22 |
-
# Apply DCT to the Y channel
|
23 |
-
dct_y = cv2.dct(np.float32(y_channel))
|
24 |
-
|
25 |
-
# Add watermark in the DCT domain
|
26 |
-
rows, cols = dct_y.shape
|
27 |
-
font = cv2.FONT_HERSHEY_SIMPLEX
|
28 |
-
for _ in range(num_watermarks):
|
29 |
-
text_size = cv2.getTextSize(watermark_text, font, 0.5, 1)[0]
|
30 |
-
text_x = random.randint(0, cols - text_size[0])
|
31 |
-
text_y = random.randint(text_size[1], rows)
|
32 |
-
watermark = np.zeros_like(dct_y)
|
33 |
-
watermark = cv2.putText(watermark, watermark_text, (text_x, text_y), font, 0.5, (1, 1, 1), 1, cv2.LINE_AA)
|
34 |
-
dct_y += watermark * 0.01
|
35 |
-
watermark_positions.append((text_x, text_y, text_size[0], text_size[1]))
|
36 |
-
|
37 |
-
# Apply inverse DCT
|
38 |
-
idct_y = cv2.idct(dct_y)
|
39 |
-
|
40 |
-
# Merge channels and convert back to BGR
|
41 |
-
ycrcb_image[:, :, 0] = idct_y
|
42 |
-
watermarked_frame = cv2.cvtColor(ycrcb_image, cv2.COLOR_YCrCb2BGR)
|
43 |
-
|
44 |
-
# Highlight watermarks for visualization
|
45 |
-
watermark_highlight = watermarked_frame.copy()
|
46 |
-
for (text_x, text_y, text_w, text_h) in watermark_positions:
|
47 |
-
cv2.putText(watermark_highlight, watermark_text, (text_x, text_y), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
|
48 |
-
cv2.rectangle(watermark_highlight, (text_x, text_y - text_h), (text_x + text_w, text_y), (0, 0, 255), 2)
|
49 |
-
|
50 |
-
return watermarked_frame, watermark_highlight
|
51 |
-
|
52 |
-
try:
|
53 |
-
# Load video using MoviePy
|
54 |
-
video = VideoFileClip(video_path)
|
55 |
-
|
56 |
-
# Apply watermark to each frame
|
57 |
-
video_with_watermark = video.fl_image(lambda frame: add_watermark_to_frame(frame)[0])
|
58 |
-
video_with_highlight = video.fl_image(lambda frame: add_watermark_to_frame(frame)[1])
|
59 |
-
|
60 |
-
# Create temporary files for output videos
|
61 |
-
temp_fd, watermarked_video_path = tempfile.mkstemp(suffix=".mp4")
|
62 |
-
temp_fd_highlight, highlight_video_path = tempfile.mkstemp(suffix=".mp4")
|
63 |
-
os.close(temp_fd)
|
64 |
-
os.close(temp_fd_highlight)
|
65 |
-
|
66 |
-
# Write output videos
|
67 |
-
video_with_watermark.write_videofile(watermarked_video_path, codec='libx264')
|
68 |
-
video_with_highlight.write_videofile(highlight_video_path, codec='libx264')
|
69 |
-
|
70 |
-
return watermarked_video_path, highlight_video_path
|
71 |
-
|
72 |
-
except Exception as e:
|
73 |
-
print(f"An error occurred: {e}")
|
74 |
-
return None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import random
|
4 |
+
import tempfile
|
5 |
+
import os # Ensure os is imported
|
6 |
+
from moviepy.video.io.VideoFileClip import VideoFileClip
|
7 |
+
|
8 |
+
def add_and_detect_watermark_video(video_path, watermark_text, num_watermarks=5):
|
9 |
+
def add_watermark_to_frame(frame):
|
10 |
+
watermark_positions = []
|
11 |
+
|
12 |
+
# Resize frame to be divisible by 8 (required for DCT)
|
13 |
+
h, w, _ = frame.shape
|
14 |
+
h_new = (h // 8) * 8
|
15 |
+
w_new = (w // 8) * 8
|
16 |
+
frame_resized = cv2.resize(frame, (w_new, h_new))
|
17 |
+
|
18 |
+
# Convert to YCrCb color space and extract Y channel
|
19 |
+
ycrcb_image = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2YCrCb)
|
20 |
+
y_channel, cr_channel, cb_channel = cv2.split(ycrcb_image)
|
21 |
+
|
22 |
+
# Apply DCT to the Y channel
|
23 |
+
dct_y = cv2.dct(np.float32(y_channel))
|
24 |
+
|
25 |
+
# Add watermark in the DCT domain
|
26 |
+
rows, cols = dct_y.shape
|
27 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
28 |
+
for _ in range(num_watermarks):
|
29 |
+
text_size = cv2.getTextSize(watermark_text, font, 0.5, 1)[0]
|
30 |
+
text_x = random.randint(0, cols - text_size[0])
|
31 |
+
text_y = random.randint(text_size[1], rows)
|
32 |
+
watermark = np.zeros_like(dct_y)
|
33 |
+
watermark = cv2.putText(watermark, watermark_text, (text_x, text_y), font, 0.5, (1, 1, 1), 1, cv2.LINE_AA)
|
34 |
+
dct_y += watermark * 0.01
|
35 |
+
watermark_positions.append((text_x, text_y, text_size[0], text_size[1]))
|
36 |
+
|
37 |
+
# Apply inverse DCT
|
38 |
+
idct_y = cv2.idct(dct_y)
|
39 |
+
|
40 |
+
# Merge channels and convert back to BGR
|
41 |
+
ycrcb_image[:, :, 0] = idct_y
|
42 |
+
watermarked_frame = cv2.cvtColor(ycrcb_image, cv2.COLOR_YCrCb2BGR)
|
43 |
+
|
44 |
+
# Highlight watermarks for visualization
|
45 |
+
watermark_highlight = watermarked_frame.copy()
|
46 |
+
for (text_x, text_y, text_w, text_h) in watermark_positions:
|
47 |
+
cv2.putText(watermark_highlight, watermark_text, (text_x, text_y), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
|
48 |
+
cv2.rectangle(watermark_highlight, (text_x, text_y - text_h), (text_x + text_w, text_y), (0, 0, 255), 2)
|
49 |
+
|
50 |
+
return watermarked_frame, watermark_highlight
|
51 |
+
|
52 |
+
try:
|
53 |
+
# Load video using MoviePy
|
54 |
+
video = VideoFileClip(video_path)
|
55 |
+
|
56 |
+
# Apply watermark to each frame
|
57 |
+
video_with_watermark = video.fl_image(lambda frame: add_watermark_to_frame(frame)[0])
|
58 |
+
video_with_highlight = video.fl_image(lambda frame: add_watermark_to_frame(frame)[1])
|
59 |
+
|
60 |
+
# Create temporary files for output videos
|
61 |
+
temp_fd, watermarked_video_path = tempfile.mkstemp(suffix=".mp4")
|
62 |
+
temp_fd_highlight, highlight_video_path = tempfile.mkstemp(suffix=".mp4")
|
63 |
+
os.close(temp_fd)
|
64 |
+
os.close(temp_fd_highlight)
|
65 |
+
|
66 |
+
# Write output videos
|
67 |
+
video_with_watermark.write_videofile(watermarked_video_path, codec='libx264')
|
68 |
+
video_with_highlight.write_videofile(highlight_video_path, codec='libx264')
|
69 |
+
|
70 |
+
return watermarked_video_path, highlight_video_path
|
71 |
+
|
72 |
+
except Exception as e:
|
73 |
+
print(f"An error occurred: {e}")
|
74 |
+
return None, None
|
75 |
+
|
76 |
+
def detect_watermark_video(video_path, watermark_text="WATERMARK"):
|
77 |
+
"""Detect watermarks in a video file using OpenCV.
|
78 |
+
|
79 |
+
Args:
|
80 |
+
video_path (str): Path to the video file
|
81 |
+
watermark_text (str): The watermark text to detect
|
82 |
+
|
83 |
+
Returns:
|
84 |
+
str: Path to the output video with detected watermarks
|
85 |
+
"""
|
86 |
+
try:
|
87 |
+
# Use OpenCV directly for frame processing
|
88 |
+
cap = cv2.VideoCapture(video_path)
|
89 |
+
if not cap.isOpened():
|
90 |
+
print(f"Error: Could not open video file {video_path}")
|
91 |
+
return None
|
92 |
+
|
93 |
+
# Get video properties
|
94 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
95 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
96 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
97 |
+
|
98 |
+
# Create output video file
|
99 |
+
temp_fd, output_path = tempfile.mkstemp(suffix=".mp4")
|
100 |
+
os.close(temp_fd) # Make sure to close the file descriptor
|
101 |
+
|
102 |
+
# Initialize video writer
|
103 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # MP4 codec
|
104 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
105 |
+
|
106 |
+
# Track detection results
|
107 |
+
frame_count = 0
|
108 |
+
detected_frames = 0
|
109 |
+
|
110 |
+
# Process each frame
|
111 |
+
while True:
|
112 |
+
ret, frame = cap.read()
|
113 |
+
if not ret:
|
114 |
+
break
|
115 |
+
|
116 |
+
# Apply watermark detection to the frame
|
117 |
+
frame_count += 1
|
118 |
+
|
119 |
+
# Detect watermark in current frame
|
120 |
+
ycrcb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb)
|
121 |
+
y_channel, _, _ = cv2.split(ycrcb_image)
|
122 |
+
|
123 |
+
# Check if frame dimensions are suitable for DCT
|
124 |
+
h, w = y_channel.shape[:2]
|
125 |
+
if h % 8 != 0 or w % 8 != 0:
|
126 |
+
y_channel = cv2.resize(y_channel, ((w//8)*8, (h//8)*8))
|
127 |
+
|
128 |
+
dct_y = cv2.dct(np.float32(y_channel))
|
129 |
+
|
130 |
+
# Simple detection logic: look for anomalies in DCT coefficients
|
131 |
+
mid_freq_sum = np.sum(np.abs(dct_y[2:6, 2:6]))
|
132 |
+
detected = mid_freq_sum > 1000 # Threshold for detection
|
133 |
+
|
134 |
+
if detected:
|
135 |
+
detected_frames += 1
|
136 |
+
# Add visual indicator of detection
|
137 |
+
frame = cv2.putText(frame, "WATERMARK DETECTED", (30, 30),
|
138 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
|
139 |
+
|
140 |
+
out.write(frame)
|
141 |
+
|
142 |
+
# Release resources
|
143 |
+
cap.release()
|
144 |
+
out.release()
|
145 |
+
|
146 |
+
print(f"Processed {frame_count} frames, detected watermarks in {detected_frames} frames")
|
147 |
+
|
148 |
+
return output_path
|
149 |
+
|
150 |
+
except Exception as e:
|
151 |
+
print(f"Error detecting watermark in video: {e}")
|
152 |
+
return None
|