Update app.py
Browse files
app.py
CHANGED
|
@@ -128,15 +128,20 @@ move2gpu(general_models, general_cfg)
|
|
| 128 |
general_generator = general_task.build_generator(general_models, general_cfg.generation)
|
| 129 |
|
| 130 |
# Construct image transforms
|
| 131 |
-
caption_transform = construct_transform(caption_cfg.task.patch_image_size)
|
| 132 |
-
refcoco_transform = construct_transform(refcoco_cfg.task.patch_image_size)
|
| 133 |
-
vqa_transform = construct_transform(vqa_cfg.task.patch_image_size)
|
| 134 |
general_transform = construct_transform(general_cfg.task.patch_image_size)
|
| 135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
# Text preprocess
|
| 137 |
-
bos_item = torch.LongTensor([
|
| 138 |
-
eos_item = torch.LongTensor([
|
| 139 |
-
pad_idx =
|
| 140 |
|
| 141 |
|
| 142 |
def get_symbols_to_strip_from_output(generator):
|
|
|
|
| 128 |
general_generator = general_task.build_generator(general_models, general_cfg.generation)
|
| 129 |
|
| 130 |
# Construct image transforms
|
| 131 |
+
# caption_transform = construct_transform(caption_cfg.task.patch_image_size)
|
| 132 |
+
# refcoco_transform = construct_transform(refcoco_cfg.task.patch_image_size)
|
| 133 |
+
# vqa_transform = construct_transform(vqa_cfg.task.patch_image_size)
|
| 134 |
general_transform = construct_transform(general_cfg.task.patch_image_size)
|
| 135 |
|
| 136 |
+
# # Text preprocess
|
| 137 |
+
# bos_item = torch.LongTensor([caption_task.src_dict.bos()])
|
| 138 |
+
# eos_item = torch.LongTensor([caption_task.src_dict.eos()])
|
| 139 |
+
# pad_idx = caption_task.src_dict.pad()
|
| 140 |
+
|
| 141 |
# Text preprocess
|
| 142 |
+
bos_item = torch.LongTensor([general_task.src_dict.bos()])
|
| 143 |
+
eos_item = torch.LongTensor([general_task.src_dict.eos()])
|
| 144 |
+
pad_idx = general_task.src_dict.pad()
|
| 145 |
|
| 146 |
|
| 147 |
def get_symbols_to_strip_from_output(generator):
|