Update app.py
Browse files
app.py
CHANGED
|
@@ -19,6 +19,12 @@ from tasks.mm_tasks.caption import CaptionTask
|
|
| 19 |
from tasks.mm_tasks.refcoco import RefcocoTask
|
| 20 |
from tasks.mm_tasks.vqa_gen import VqaGenTask
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
def move2gpu(models, cfg):
|
| 24 |
for model in models:
|
|
@@ -48,6 +54,10 @@ def construct_transform(patch_image_size):
|
|
| 48 |
tasks.register_task('caption', CaptionTask)
|
| 49 |
tasks.register_task('refcoco', RefcocoTask)
|
| 50 |
tasks.register_task('vqa_gen', VqaGenTask)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
# turn on cuda if GPU is available
|
| 52 |
use_cuda = torch.cuda.is_available()
|
| 53 |
# use fp16 only when GPU is available
|
|
@@ -56,16 +66,19 @@ use_fp16 = False
|
|
| 56 |
# download checkpoints
|
| 57 |
os.system('mkdir -p checkpoints; ')
|
| 58 |
|
| 59 |
-
os.system('wget https://data.isir.upmc.fr/unival/models/unival_s2_hs/checkpoint1.pt; '
|
| 60 |
-
|
| 61 |
|
| 62 |
os.system('wget https://data.isir.upmc.fr/unival/models/unival_vqa/checkpoint_best.pt; '
|
| 63 |
'mkdir -p checkpoints/unival_vqa; mv checkpoint_best.pt checkpoints/unival_vqa/')
|
| 64 |
-
os.system('wget https://data.isir.upmc.fr/unival/models/unival_caption_stage_1/checkpoint_best_test.pt; '
|
| 65 |
-
|
| 66 |
-
os.system('wget https://data.isir.upmc.fr/unival/models/unival_refcocog/checkpoint_best.pt; '
|
| 67 |
-
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
# Load ckpt & config for Image Captioning
|
| 71 |
checkpoint_path = 'checkpoints/unival_caption_stage_1/checkpoint_best_test.pt'
|
|
@@ -78,6 +91,29 @@ caption_models, caption_cfg, caption_task = checkpoint_utils.load_model_ensemble
|
|
| 78 |
arg_overrides=caption_overrides
|
| 79 |
)
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
# Load ckpt & config for Refcoco
|
| 82 |
checkpoint_path = 'checkpoints/unival_refcocog/checkpoint_best.pt'
|
| 83 |
|
|
@@ -132,6 +168,8 @@ move2gpu(caption_models, caption_cfg)
|
|
| 132 |
move2gpu(refcoco_models, refcoco_cfg)
|
| 133 |
move2gpu(vqa_models, vqa_cfg)
|
| 134 |
move2gpu(general_models, general_cfg)
|
|
|
|
|
|
|
| 135 |
|
| 136 |
# # Initialize generator
|
| 137 |
caption_generator = caption_task.build_generator(caption_models, caption_cfg.generation)
|
|
@@ -141,6 +179,9 @@ vqa_generator.zero_shot = True
|
|
| 141 |
vqa_generator.constraint_trie = None
|
| 142 |
general_generator = general_task.build_generator(general_models, general_cfg.generation)
|
| 143 |
|
|
|
|
|
|
|
|
|
|
| 144 |
# Construct image transforms
|
| 145 |
caption_transform = construct_transform(caption_cfg.task.patch_image_size)
|
| 146 |
refcoco_transform = construct_transform(refcoco_cfg.task.patch_image_size)
|
|
@@ -153,6 +194,111 @@ bos_item = torch.LongTensor([general_task.src_dict.bos()])
|
|
| 153 |
eos_item = torch.LongTensor([general_task.src_dict.eos()])
|
| 154 |
pad_idx = general_task.src_dict.pad()
|
| 155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
|
| 157 |
def get_symbols_to_strip_from_output(generator):
|
| 158 |
if hasattr(generator, "symbols_to_strip_from_output"):
|
|
@@ -214,7 +360,7 @@ def encode_text(text, length=None, append_bos=False, append_eos=False):
|
|
| 214 |
s = torch.cat([s, eos_item])
|
| 215 |
return s
|
| 216 |
|
| 217 |
-
|
| 218 |
def construct_sample(image: Image, instruction: str, transform):
|
| 219 |
patch_image = transform(image).unsqueeze(0)
|
| 220 |
patch_mask = torch.tensor([True])
|
|
@@ -248,6 +394,18 @@ def inference(image, task_type, instruction):
|
|
| 248 |
instruction = 'what does the image describe?'
|
| 249 |
transform = caption_transform
|
| 250 |
cfg = caption_cfg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 251 |
elif task_type == 'Visual Question Answering':
|
| 252 |
task = vqa_task
|
| 253 |
models = vqa_models
|
|
@@ -267,11 +425,22 @@ def inference(image, task_type, instruction):
|
|
| 267 |
generator = general_generator
|
| 268 |
transform = general_transform
|
| 269 |
cfg = general_cfg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 270 |
else:
|
| 271 |
raise NotImplementedError
|
| 272 |
|
| 273 |
# Construct input sample & preprocess for GPU if cuda available
|
| 274 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
sample = utils.move_to_cuda(sample) if use_cuda else sample
|
| 276 |
sample = utils.apply_to_sample(apply_half, sample) if use_fp16 else sample
|
| 277 |
|
|
@@ -297,7 +466,7 @@ def inference(image, task_type, instruction):
|
|
| 297 |
else:
|
| 298 |
return None, tokens
|
| 299 |
|
| 300 |
-
inputs = [gr.inputs.Image(type='pil'), gr.inputs.Radio(choices=['Image Captioning',"Visual Question Answering", "Visual Grounding", "General"], type="value", default="Image Captioning", label="Task"), gr.inputs.Textbox(lines=1, label="Instruction")]
|
| 301 |
outputs = [gr.outputs.Image(type='pil'), 'text']
|
| 302 |
examples = [
|
| 303 |
# ['examples/caption/soccer.jpg', 'Image Captioning', None],
|
|
|
|
| 19 |
from tasks.mm_tasks.refcoco import RefcocoTask
|
| 20 |
from tasks.mm_tasks.vqa_gen import VqaGenTask
|
| 21 |
|
| 22 |
+
# video
|
| 23 |
+
from data.video_utils import VIDEO_READER_FUNCS
|
| 24 |
+
|
| 25 |
+
# audio
|
| 26 |
+
import torchaudio
|
| 27 |
+
from data.audio_utils import get_audio_features, int16_to_float32, float32_to_int16, AUDIO_CFG
|
| 28 |
|
| 29 |
def move2gpu(models, cfg):
|
| 30 |
for model in models:
|
|
|
|
| 54 |
tasks.register_task('caption', CaptionTask)
|
| 55 |
tasks.register_task('refcoco', RefcocoTask)
|
| 56 |
tasks.register_task('vqa_gen', VqaGenTask)
|
| 57 |
+
tasks.register_task('video_caption', CaptionTask)
|
| 58 |
+
tasks.register_task('audio_caption', CaptionTask)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
# turn on cuda if GPU is available
|
| 62 |
use_cuda = torch.cuda.is_available()
|
| 63 |
# use fp16 only when GPU is available
|
|
|
|
| 66 |
# download checkpoints
|
| 67 |
os.system('mkdir -p checkpoints; ')
|
| 68 |
|
| 69 |
+
# os.system('wget https://data.isir.upmc.fr/unival/models/unival_s2_hs/checkpoint1.pt; '
|
| 70 |
+
# 'mkdir -p checkpoints/unival_s2_hs; mv checkpoint1.pt checkpoints/unival_s2_hs/')
|
| 71 |
|
| 72 |
os.system('wget https://data.isir.upmc.fr/unival/models/unival_vqa/checkpoint_best.pt; '
|
| 73 |
'mkdir -p checkpoints/unival_vqa; mv checkpoint_best.pt checkpoints/unival_vqa/')
|
| 74 |
+
# os.system('wget https://data.isir.upmc.fr/unival/models/unival_caption_stage_1/checkpoint_best_test.pt; '
|
| 75 |
+
# 'mkdir -p checkpoints/unival_caption_stage_1; mv checkpoint_best_test.pt checkpoints/unival_caption_stage_1/')
|
| 76 |
+
# os.system('wget https://data.isir.upmc.fr/unival/models/unival_refcocog/checkpoint_best.pt; '
|
| 77 |
+
# 'mkdir -p checkpoints/unival_refcocog; mv checkpoint_best.pt checkpoints/unival_refcocog/')
|
| 78 |
+
# os.system('wget https://data.isir.upmc.fr/unival/models/unival_video_caption_stage_1/checkpoint_best.pt; '
|
| 79 |
+
# 'mkdir -p checkpoints/unival_video_caption_stage_1; mv checkpoint_best.pt checkpoints/unival_video_caption_stage_1/')
|
| 80 |
+
# os.system('wget https://data.isir.upmc.fr/unival/models/unival_audio_caption/checkpoint_best.pt; '
|
| 81 |
+
# 'mkdir -p checkpoints/unival_audio_caption; mv checkpoint_best.pt checkpoints/unival_audio_caption/')
|
| 82 |
|
| 83 |
# Load ckpt & config for Image Captioning
|
| 84 |
checkpoint_path = 'checkpoints/unival_caption_stage_1/checkpoint_best_test.pt'
|
|
|
|
| 91 |
arg_overrides=caption_overrides
|
| 92 |
)
|
| 93 |
|
| 94 |
+
# Load ckpt & config for Video Captioning
|
| 95 |
+
checkpoint_path = 'checkpoints/unival_video_caption_stage_1/checkpoint_best.p'
|
| 96 |
+
|
| 97 |
+
caption_overrides={"eval_cider":False, "beam":5, "max_len_b":22, "no_repeat_ngram_size":3, "seed":7, "unnormalized": False,
|
| 98 |
+
"bpe_dir":"utils/BPE", "video_model_path": None, "video_model_path": None, "resnet_model_path": None}
|
| 99 |
+
|
| 100 |
+
video_caption_models, video_caption_cfg, video_caption_task = checkpoint_utils.load_model_ensemble_and_task(
|
| 101 |
+
utils.split_paths(checkpoint_path),
|
| 102 |
+
arg_overrides=caption_overrides
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Load ckpt & config for Audio Captioning
|
| 107 |
+
checkpoint_path = 'checkpoints/unival_audio_caption/checkpoint_best.pt'
|
| 108 |
+
|
| 109 |
+
caption_overrides={"eval_cider":False, "beam":5, "max_len_b":22, "no_repeat_ngram_size":3, "seed":7, "unnormalized": False,
|
| 110 |
+
"bpe_dir":"utils/BPE", "video_model_path": None, "video_model_path": None, "resnet_model_path": None, "audio_model_path": None}
|
| 111 |
+
|
| 112 |
+
audio_caption_models, audio_caption_cfg, audio_caption_task = checkpoint_utils.load_model_ensemble_and_task(
|
| 113 |
+
utils.split_paths(checkpoint_path),
|
| 114 |
+
arg_overrides=caption_overrides
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
# Load ckpt & config for Refcoco
|
| 118 |
checkpoint_path = 'checkpoints/unival_refcocog/checkpoint_best.pt'
|
| 119 |
|
|
|
|
| 168 |
move2gpu(refcoco_models, refcoco_cfg)
|
| 169 |
move2gpu(vqa_models, vqa_cfg)
|
| 170 |
move2gpu(general_models, general_cfg)
|
| 171 |
+
move2gpu(video_caption_models, general_cfg)
|
| 172 |
+
move2gpu(audio_general_models, general_cfg)
|
| 173 |
|
| 174 |
# # Initialize generator
|
| 175 |
caption_generator = caption_task.build_generator(caption_models, caption_cfg.generation)
|
|
|
|
| 179 |
vqa_generator.constraint_trie = None
|
| 180 |
general_generator = general_task.build_generator(general_models, general_cfg.generation)
|
| 181 |
|
| 182 |
+
video_caption_generator = caption_task.build_generator(video_caption_models, video_caption_cfg.generation)
|
| 183 |
+
audio_caption_generator = caption_task.build_generator(audio_caption_models, audio_caption_cfg.generation)
|
| 184 |
+
|
| 185 |
# Construct image transforms
|
| 186 |
caption_transform = construct_transform(caption_cfg.task.patch_image_size)
|
| 187 |
refcoco_transform = construct_transform(refcoco_cfg.task.patch_image_size)
|
|
|
|
| 194 |
eos_item = torch.LongTensor([general_task.src_dict.eos()])
|
| 195 |
pad_idx = general_task.src_dict.pad()
|
| 196 |
|
| 197 |
+
# Video process
|
| 198 |
+
|
| 199 |
+
type_transform = transforms.Lambda(lambda x: x.float().div(255.0))
|
| 200 |
+
patch_video_resize_transform = transforms.Compose([
|
| 201 |
+
transforms.CenterCrop(cfg.task.patch_frame_size),
|
| 202 |
+
type_transform,
|
| 203 |
+
transforms.Normalize(mean=mean, std=std),
|
| 204 |
+
])
|
| 205 |
+
|
| 206 |
+
# video process
|
| 207 |
+
video_reader = VIDEO_READER_FUNCS['decord']
|
| 208 |
+
|
| 209 |
+
def process_video(video_path, max_num_frames=16, num_frames=16, sample_type='rand',):
|
| 210 |
+
|
| 211 |
+
# video
|
| 212 |
+
data_path = os.path.join(video_path)
|
| 213 |
+
|
| 214 |
+
frames, frame_indices, video_duration = video_reader(
|
| 215 |
+
data_path, num_frames, sample_type, max_num_frames=max_num_frames
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
patch_video = patch_video_resize_transform(frames)
|
| 219 |
+
patch_video = patch_video.permute(1, 0, 2, 3) # -> (C, T, h, w)
|
| 220 |
+
|
| 221 |
+
return patch_video.unsqueeze(0)
|
| 222 |
+
|
| 223 |
+
def construct_video_sample(video_path):
|
| 224 |
+
|
| 225 |
+
patch_video = process_video(video_path, max_num_frames=16, num_frames=cfg.task.num_frames, sample_type=cfg.task.sample_type,)
|
| 226 |
+
patch_image = torch.zeros((3, cfg.task.patch_image_size, cfg.task.patch_image_size))
|
| 227 |
+
|
| 228 |
+
patch_type = torch.tensor([1])
|
| 229 |
+
patch_mask = torch.tensor([True])
|
| 230 |
+
src_text = encode_text(" what does the video describe?", append_bos=True, append_eos=True).unsqueeze(0)
|
| 231 |
+
src_length = torch.LongTensor([s.ne(pad_idx).long().sum() for s in src_text])
|
| 232 |
+
sample = {
|
| 233 |
+
"id":np.array(['42']),
|
| 234 |
+
"net_input": {
|
| 235 |
+
"src_tokens": src_text,
|
| 236 |
+
"src_lengths": src_length,
|
| 237 |
+
"patch_videos": patch_video,
|
| 238 |
+
"patch_images": patch_image,
|
| 239 |
+
"patch_masks": patch_mask,
|
| 240 |
+
"patch_types": patch_type,
|
| 241 |
+
}
|
| 242 |
+
}
|
| 243 |
+
return sample
|
| 244 |
+
|
| 245 |
+
#####
|
| 246 |
+
|
| 247 |
+
# audio process
|
| 248 |
+
mean = [0.5, 0.5, 0.5]
|
| 249 |
+
std = [0.5, 0.5, 0.5]
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def process_audio(audio_path, sample_rate=48000, max_audio_len=480000, audio_cfg=AUDIO_CFG):
|
| 253 |
+
|
| 254 |
+
# audio
|
| 255 |
+
data_path = audio_path
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
audio_data, orig_sr = torchaudio.load(data_path)
|
| 260 |
+
audio_data = torchaudio.transforms.Resample(orig_sr, sample_rate)(audio_data[0])
|
| 261 |
+
|
| 262 |
+
sample = {}
|
| 263 |
+
|
| 264 |
+
sample = get_audio_features(
|
| 265 |
+
sample, audio_data, max_audio_len,
|
| 266 |
+
data_truncating='rand_trunc',
|
| 267 |
+
data_filling='repeatpad',
|
| 268 |
+
audio_cfg=audio_cfg
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
waveform = sample['waveform']
|
| 273 |
+
patch_audio = waveform
|
| 274 |
+
|
| 275 |
+
return patch_audio.unsqueeze(0)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def construct_audio_sample(audio_path):
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
patch_audio = process_audio(audio_path, sample_rate=48000, max_audio_len=480000, audio_cfg=AUDIO_CFG)
|
| 282 |
+
patch_image = torch.zeros((3, cfg.task.patch_image_size, cfg.task.patch_image_size))
|
| 283 |
+
|
| 284 |
+
patch_type = torch.tensor([2])
|
| 285 |
+
patch_mask = torch.tensor([True])
|
| 286 |
+
src_text = encode_text(" what does the image describe?", append_bos=True, append_eos=True).unsqueeze(0)
|
| 287 |
+
src_length = torch.LongTensor([s.ne(pad_idx).long().sum() for s in src_text])
|
| 288 |
+
sample = {
|
| 289 |
+
"id":np.array(['42']),
|
| 290 |
+
"net_input": {
|
| 291 |
+
"src_tokens": src_text,
|
| 292 |
+
"src_lengths": src_length,
|
| 293 |
+
"patch_images": patch_image,
|
| 294 |
+
"patch_audios": patch_audio,
|
| 295 |
+
"patch_masks": patch_mask,
|
| 296 |
+
"patch_types": patch_type,
|
| 297 |
+
}
|
| 298 |
+
}
|
| 299 |
+
return sample
|
| 300 |
+
|
| 301 |
+
#####
|
| 302 |
|
| 303 |
def get_symbols_to_strip_from_output(generator):
|
| 304 |
if hasattr(generator, "symbols_to_strip_from_output"):
|
|
|
|
| 360 |
s = torch.cat([s, eos_item])
|
| 361 |
return s
|
| 362 |
|
| 363 |
+
# image
|
| 364 |
def construct_sample(image: Image, instruction: str, transform):
|
| 365 |
patch_image = transform(image).unsqueeze(0)
|
| 366 |
patch_mask = torch.tensor([True])
|
|
|
|
| 394 |
instruction = 'what does the image describe?'
|
| 395 |
transform = caption_transform
|
| 396 |
cfg = caption_cfg
|
| 397 |
+
elif task_type == 'Video Captioning':
|
| 398 |
+
task = video_caption_task
|
| 399 |
+
models = video_caption_models
|
| 400 |
+
generator = video_caption_generator
|
| 401 |
+
instruction = 'what does the video describe?'
|
| 402 |
+
cfg = video_caption_cfg
|
| 403 |
+
elif task_type == 'Audio Captioning':
|
| 404 |
+
task = audio_caption_task
|
| 405 |
+
models = audio_caption_models
|
| 406 |
+
generator = audio_caption_generator
|
| 407 |
+
instruction = 'what does the audio describe?'
|
| 408 |
+
cfg = audio_caption_cfg
|
| 409 |
elif task_type == 'Visual Question Answering':
|
| 410 |
task = vqa_task
|
| 411 |
models = vqa_models
|
|
|
|
| 425 |
generator = general_generator
|
| 426 |
transform = general_transform
|
| 427 |
cfg = general_cfg
|
| 428 |
+
elif task_type == 'General Video':
|
| 429 |
+
task = video_general_task
|
| 430 |
+
models = video_general_models
|
| 431 |
+
generator = video_general_generator
|
| 432 |
+
transform = general_transform
|
| 433 |
+
cfg = video_general_cfg
|
| 434 |
else:
|
| 435 |
raise NotImplementedError
|
| 436 |
|
| 437 |
# Construct input sample & preprocess for GPU if cuda available
|
| 438 |
+
if "Video" in task_type:
|
| 439 |
+
sample = construct_video_sample(video)
|
| 440 |
+
elif "Audio" in task_type:
|
| 441 |
+
sample = construct_audio_sample(audio)
|
| 442 |
+
else:
|
| 443 |
+
sample = construct_sample(image, instruction, transform)
|
| 444 |
sample = utils.move_to_cuda(sample) if use_cuda else sample
|
| 445 |
sample = utils.apply_to_sample(apply_half, sample) if use_fp16 else sample
|
| 446 |
|
|
|
|
| 466 |
else:
|
| 467 |
return None, tokens
|
| 468 |
|
| 469 |
+
inputs = [gr.inputs.Image(type='pil'), gr.Audio(source="upload", type="filepath"), gr.Video(source="upload", type="filepath"), gr.inputs.Radio(choices=['Image Captioning', 'Video Captioning', 'Audio Captioning', "Visual Question Answering", "Visual Grounding", "General", "General Video"], type="value", default="Image Captioning", label="Task"), gr.inputs.Textbox(lines=1, label="Instruction")]
|
| 470 |
outputs = [gr.outputs.Image(type='pil'), 'text']
|
| 471 |
examples = [
|
| 472 |
# ['examples/caption/soccer.jpg', 'Image Captioning', None],
|