pragnakalp commited on
Commit
5a09824
·
1 Parent(s): 0eec857

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +142 -3
app.py CHANGED
@@ -23,6 +23,147 @@ import ffmpeg
23
 
24
  block = gr.Blocks()
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def one_shot(image,input_text,gender):
27
  if gender == 'Female' or gender == 'female':
28
  tts = gTTS(input_text)
@@ -31,9 +172,7 @@ def one_shot(image,input_text,gender):
31
  f.seek(0)
32
  sound = AudioSegment.from_file(f.name, format="mp3")
33
  sound.export("/content/audio.wav", format="wav")
34
- return "/content/audio.wav"
35
- exit()
36
- one_shot_talking(image,'audio.wav')
37
 
38
  elif gender == 'Male' or gender == 'male':
39
  print(gender)
 
23
 
24
  block = gr.Blocks()
25
 
26
+ def merge_frames():
27
+ path = '/content/video_results/restored_imgs'
28
+ image_folder = os.fsencode(path)
29
+ print(image_folder)
30
+ filenames = []
31
+
32
+ for file in os.listdir(image_folder):
33
+ filename = os.fsdecode(file)
34
+ if filename.endswith( ('.jpg', '.png', '.gif') ):
35
+ filenames.append(filename)
36
+
37
+ filenames.sort() # this iteration technique has no built in order, so sort the frames
38
+ images = list(map(lambda filename: imageio.imread("/content/video_results/restored_imgs/"+filename), filenames))
39
+
40
+
41
+ imageio.mimsave('/content/video_output.mp4', images, fps=25.0) # modify the frame duration as needed
42
+
43
+
44
+ block = gr.Blocks()
45
+
46
+
47
+
48
+ def audio_video():
49
+
50
+ input_video = ffmpeg.input('/content/video_output.mp4')
51
+
52
+ input_audio = ffmpeg.input('/content/audio.wav')
53
+
54
+ ffmpeg.concat(input_video, input_audio, v=1, a=1).output('final_output.mp4').run()
55
+ return "final_output.mp4"
56
+
57
+
58
+ def compute_aspect_preserved_bbox(bbox, increase_area, h, w):
59
+ left, top, right, bot = bbox
60
+ width = right - left
61
+ height = bot - top
62
+
63
+ width_increase = max(increase_area, ((1 + 2 * increase_area) * height - width) / (2 * width))
64
+ height_increase = max(increase_area, ((1 + 2 * increase_area) * width - height) / (2 * height))
65
+
66
+ left_t = int(left - width_increase * width)
67
+ top_t = int(top - height_increase * height)
68
+ right_t = int(right + width_increase * width)
69
+ bot_t = int(bot + height_increase * height)
70
+
71
+ left_oob = -min(0, left_t)
72
+ right_oob = right - min(right_t, w)
73
+ top_oob = -min(0, top_t)
74
+ bot_oob = bot - min(bot_t, h)
75
+
76
+ if max(left_oob, right_oob, top_oob, bot_oob) > 0:
77
+ max_w = max(left_oob, right_oob)
78
+ max_h = max(top_oob, bot_oob)
79
+ if max_w > max_h:
80
+ return left_t + max_w, top_t + max_w, right_t - max_w, bot_t - max_w
81
+ else:
82
+ return left_t + max_h, top_t + max_h, right_t - max_h, bot_t - max_h
83
+
84
+ else:
85
+ return (left_t, top_t, right_t, bot_t)
86
+
87
+ def crop_src_image(src_img, detector=None):
88
+ if detector is None:
89
+ detector = dlib.get_frontal_face_detector()
90
+ save_img='/content/image_pre.png'
91
+ img = cv2.imread(src_img)
92
+ faces = detector(img, 0)
93
+ h, width, _ = img.shape
94
+ if len(faces) > 0:
95
+ bbox = [faces[0].left(), faces[0].top(),faces[0].right(), faces[0].bottom()]
96
+ l = bbox[3]-bbox[1]
97
+ bbox[1]= bbox[1]-l*0.1
98
+ bbox[3]= bbox[3]-l*0.1
99
+ bbox[1] = max(0,bbox[1])
100
+ bbox[3] = min(h,bbox[3])
101
+ bbox = compute_aspect_preserved_bbox(tuple(bbox), 0.5, img.shape[0], img.shape[1])
102
+ img = img[bbox[1] :bbox[3] , bbox[0]:bbox[2]]
103
+ img = cv2.resize(img, (256, 256))
104
+ cv2.imwrite(save_img,img)
105
+ else:
106
+ img = cv2.resize(img,(256,256))
107
+ cv2.imwrite(save_img, img)
108
+
109
+
110
+
111
+ def pad_image(image):
112
+ w, h = image.size
113
+ if w == h:
114
+ return image
115
+ elif w > h:
116
+ new_image = Image.new(image.mode, (w, w), (0, 0, 0))
117
+ new_image.paste(image, (0, (w - h) // 2))
118
+ return new_image
119
+ else:
120
+ new_image = Image.new(image.mode, (h, h), (0, 0, 0))
121
+ new_image.paste(image, ((h - w) // 2, 0))
122
+ return new_image
123
+
124
+ def calculate(image_in, audio_in):
125
+ waveform, sample_rate = torchaudio.load(audio_in)
126
+ torchaudio.save("/content/audio.wav", waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
127
+ image = Image.open(image_in)
128
+ image = pad_image(image)
129
+ image.save("image.png")
130
+
131
+ pocketsphinx_run = subprocess.run(['pocketsphinx', '-phone_align', 'yes', 'single', '/content/audio.wav'], check=True, capture_output=True)
132
+ jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
133
+ with open("test.json", "w") as f:
134
+ f.write(jq_run.stdout.decode('utf-8').strip())
135
+
136
+ os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/results/restored_imgs/image_pre.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
137
+ return "/content/train/image_audio.mp4"
138
+
139
+ def one_shot_talking(image_in,audio_in):
140
+
141
+ #Pre-processing of image
142
+ crop_src_image(image_in)
143
+
144
+ #Improve quality of input image
145
+ os.system(f"python /content/GFPGAN/inference_gfpgan.py --upscale 2 -i /content/image_pre.png -o /content/results --bg_upsampler realesrgan")
146
+
147
+ image_in_one_shot='/content/results/restored_imgs/image_pre.png'
148
+
149
+ #One Shot Talking Face algorithm
150
+ calculate(image_in_one_shot,audio_in)
151
+
152
+ #Video Quality Improvement
153
+
154
+ #1. Extract the frames from the video file using PyVideoFramesExtractor
155
+ os.system(f"python /content/PyVideoFramesExtractor/extract.py --video=/content/train/image_pre_audio.mp4")
156
+
157
+ #2. Improve image quality using GFPGAN on each frames
158
+ os.system(f"python /content/GFPGAN/inference_gfpgan.py --upscale 2 -i /content/extracted_frames/image_pre_audio_frames -o /content/video_results --bg_upsampler realesrgan")
159
+
160
+ #3. Merge all the frames to a one video using imageio
161
+ merge_frames()
162
+
163
+ audio_video()
164
+ return "Sucessufull"
165
+
166
+
167
  def one_shot(image,input_text,gender):
168
  if gender == 'Female' or gender == 'female':
169
  tts = gTTS(input_text)
 
172
  f.seek(0)
173
  sound = AudioSegment.from_file(f.name, format="mp3")
174
  sound.export("/content/audio.wav", format="wav")
175
+ one_shot_talking(image,'/content/audio.wav')
 
 
176
 
177
  elif gender == 'Male' or gender == 'male':
178
  print(gender)