python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import gzip
import json
import os
from typing import Dict, List, Optional, Union
import attr
from habitat.config import Config
from habitat.core.dataset import Dataset
from habitat.core.registry import registry
from habitat.core.utils import not_none_validator
from habitat.datasets.pointnav.pointnav_dataset import ALL_SCENES_MASK
from habitat.datasets.utils import VocabDict
from habitat.tasks.nav.nav import NavigationGoal
from habitat.tasks.vln.vln import InstructionData, VLNEpisode
import random
random.seed(0)
DEFAULT_SCENE_PATH_PREFIX = "data/scene_datasets/"
ALL_LANGUAGES_MASK = "*"
ALL_ROLES_MASK = "*"
@attr.s(auto_attribs=True)
class ExtendedInstructionData:
instruction_text: str = attr.ib(default=None, validator=not_none_validator)
instruction_id: Optional[str] = attr.ib(default=None)
language: Optional[str] = attr.ib(default=None)
annotator_id: Optional[str] = attr.ib(default=None)
edit_distance: Optional[float] = attr.ib(default=None)
timed_instruction: Optional[List[Dict[str, Union[float, str]]]] = attr.ib(
default=None
)
instruction_tokens: Optional[List[str]] = attr.ib(default=None)
split: Optional[str] = attr.ib(default=None)
@attr.s(auto_attribs=True, kw_only=True)
class VLNExtendedEpisode(VLNEpisode):
goals: Optional[List[NavigationGoal]] = attr.ib(default=None)
reference_path: Optional[List[List[float]]] = attr.ib(default=None)
instruction: ExtendedInstructionData = attr.ib(
default=None, validator=not_none_validator
)
trajectory_id: Optional[Union[int, str]] = attr.ib(default=None)
@registry.register_dataset(name="VLN-CE-v1")
class VLNCEDatasetV1(Dataset):
r"""Class inherited from Dataset that loads a Vision and Language
Navigation dataset.
"""
episodes: List[VLNEpisode]
instruction_vocab: VocabDict
@staticmethod
def check_config_paths_exist(config: Config) -> bool:
return os.path.exists(
config.DATA_PATH.format(split=config.SPLIT)
) and os.path.exists(config.SCENES_DIR)
@staticmethod
def _scene_from_episode(episode: VLNEpisode) -> str:
r"""Helper method to get the scene name from an episode. Assumes
the scene_id is formated /path/to/<scene_name>.<ext>
"""
return os.path.splitext(os.path.basename(episode.scene_id))[0]
@classmethod
def get_scenes_to_load(cls, config: Config) -> List[str]:
r"""Return a sorted list of scenes"""
assert cls.check_config_paths_exist(config)
dataset = cls(config)
return sorted(
{cls._scene_from_episode(episode) for episode in dataset.episodes}
)
def __init__(self, config: Optional[Config] = None) -> None:
self.episodes = []
if config is None:
return
dataset_filename = config.DATA_PATH.format(split=config.SPLIT)
with gzip.open(dataset_filename, "rt") as f:
self.from_json(f.read(), scenes_dir=config.SCENES_DIR)
if ALL_SCENES_MASK not in config.CONTENT_SCENES:
scenes_to_load = set(config.CONTENT_SCENES)
self.episodes = [
episode
for episode in self.episodes
if self._scene_from_episode(episode) in scenes_to_load
]
if config.EPISODES_ALLOWED is not None:
ep_ids_before = {ep.episode_id for ep in self.episodes}
ep_ids_to_purge = ep_ids_before - set([ int(id) for id in config.EPISODES_ALLOWED])
self.episodes = [
episode
for episode in self.episodes
if episode.episode_id not in ep_ids_to_purge
]
def from_json(
self, json_str: str, scenes_dir: Optional[str] = None
) -> None:
deserialized = json.loads(json_str)
self.instruction_vocab = VocabDict(
word_list=deserialized["instruction_vocab"]["word_list"]
)
for episode in deserialized["episodes"]:
episode = VLNExtendedEpisode(**episode)
if scenes_dir is not None:
if episode.scene_id.startswith(DEFAULT_SCENE_PATH_PREFIX):
episode.scene_id = episode.scene_id[
len(DEFAULT_SCENE_PATH_PREFIX) :
]
episode.scene_id = os.path.join(scenes_dir, episode.scene_id)
episode.instruction = InstructionData(**episode.instruction)
if episode.goals is not None:
for g_index, goal in enumerate(episode.goals):
episode.goals[g_index] = NavigationGoal(**goal)
self.episodes.append(episode)
random.shuffle(self.episodes)
@registry.register_dataset(name="RxR-VLN-CE-v1")
class RxRVLNCEDatasetV1(Dataset):
r"""Loads the RxR VLN-CE Dataset."""
episodes: List[VLNEpisode]
instruction_vocab: VocabDict
annotation_roles: List[str] = ["guide", "follower"]
languages: List[str] = ["en-US", "en-IN", "hi-IN", "te-IN"]
@staticmethod
def _scene_from_episode(episode: VLNEpisode) -> str:
r"""Helper method to get the scene name from an episode. Assumes
the scene_id is formated /path/to/<scene_name>.<ext>
"""
return os.path.splitext(os.path.basename(episode.scene_id))[0]
@staticmethod
def _language_from_episode(episode: VLNExtendedEpisode) -> str:
return episode.instruction.language
@classmethod
def get_scenes_to_load(cls, config: Config) -> List[str]:
r"""Return a sorted list of scenes"""
assert cls.check_config_paths_exist(config)
dataset = cls(config)
return sorted(
{cls._scene_from_episode(episode) for episode in dataset.episodes}
)
@classmethod
def extract_roles_from_config(cls, config: Config) -> List[str]:
if ALL_ROLES_MASK in config.ROLES:
return cls.annotation_roles
assert set(config.ROLES).issubset(set(cls.annotation_roles))
return config.ROLES
@classmethod
def check_config_paths_exist(cls, config: Config) -> bool:
return all(
os.path.exists(
config.DATA_PATH.format(split=config.SPLIT, role=role)
)
for role in cls.extract_roles_from_config(config)
) and os.path.exists(config.SCENES_DIR)
def __init__(self, config: Optional[Config] = None) -> None:
self.episodes = []
self.config = config
if config is None:
return
for role in self.extract_roles_from_config(config):
with gzip.open(
config.DATA_PATH.format(split=config.SPLIT, role=role), "rt"
) as f:
self.from_json(f.read(), scenes_dir=config.SCENES_DIR)
if ALL_SCENES_MASK not in config.CONTENT_SCENES:
scenes_to_load = set(config.CONTENT_SCENES)
self.episodes = [
episode
for episode in self.episodes
if self._scene_from_episode(episode) in scenes_to_load
]
if ALL_LANGUAGES_MASK not in config.LANGUAGES:
languages_to_load = set(config.LANGUAGES)
self.episodes = [
episode
for episode in self.episodes
if self._language_from_episode(episode) in languages_to_load
]
if config.EPISODES_ALLOWED is not None:
ep_ids_before = {ep.episode_id for ep in self.episodes}
ep_ids_to_purge = ep_ids_before - set(config.EPISODES_ALLOWED)
self.episodes = [
episode
for episode in self.episodes
if episode.episode_id not in ep_ids_to_purge
]
def from_json(
self, json_str: str, scenes_dir: Optional[str] = None
) -> None:
deserialized = json.loads(json_str)
for episode in deserialized["episodes"]:
episode = VLNExtendedEpisode(**episode)
if scenes_dir is not None:
if episode.scene_id.startswith(DEFAULT_SCENE_PATH_PREFIX):
episode.scene_id = episode.scene_id[
len(DEFAULT_SCENE_PATH_PREFIX) :
]
episode.scene_id = os.path.join(scenes_dir, episode.scene_id)
episode.instruction = ExtendedInstructionData(
**episode.instruction
)
episode.instruction.split = self.config.SPLIT
if episode.goals is not None:
for g_index, goal in enumerate(episode.goals):
episode.goals[g_index] = NavigationGoal(**goal)
self.episodes.append(episode)
| InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/task.py |
from typing import Dict, List, Optional, Tuple, Union
import networkx as nx
import numpy as np
from habitat.core.simulator import Simulator
from habitat.core.utils import try_cv2_import
from habitat.tasks.vln.vln import VLNEpisode
from habitat.utils.visualizations import maps as habitat_maps
cv2 = try_cv2_import()
AGENT_SPRITE = habitat_maps.AGENT_SPRITE
MAP_THICKNESS_SCALAR: int = 128
MAP_INVALID_POINT = 0
MAP_VALID_POINT = 1
MAP_BORDER_INDICATOR = 2
MAP_SOURCE_POINT_INDICATOR = 3
MAP_TARGET_POINT_INDICATOR = 4
MAP_MP3D_WAYPOINT = 5
MAP_VIEW_POINT_INDICATOR = 6
MAP_TARGET_BOUNDING_BOX = 7
MAP_REFERENCE_POINT = 8
MAP_MP3D_REFERENCE_PATH = 9
MAP_SHORTEST_PATH_WAYPOINT = 10
TOP_DOWN_MAP_COLORS = np.full((256, 3), 150, dtype=np.uint8)
TOP_DOWN_MAP_COLORS[12:] = cv2.applyColorMap(
np.arange(244, dtype=np.uint8), cv2.COLORMAP_JET
).squeeze(1)[:, ::-1]
TOP_DOWN_MAP_COLORS[MAP_INVALID_POINT] = [255, 255, 255] # White
TOP_DOWN_MAP_COLORS[MAP_VALID_POINT] = [150, 150, 150] # Light Grey
TOP_DOWN_MAP_COLORS[MAP_BORDER_INDICATOR] = [50, 50, 50] # Grey
TOP_DOWN_MAP_COLORS[MAP_SOURCE_POINT_INDICATOR] = [0, 0, 200] # Blue
TOP_DOWN_MAP_COLORS[MAP_TARGET_POINT_INDICATOR] = [200, 0, 0] # Red
TOP_DOWN_MAP_COLORS[MAP_MP3D_WAYPOINT] = [0, 200, 0] # Green
TOP_DOWN_MAP_COLORS[MAP_VIEW_POINT_INDICATOR] = [245, 150, 150] # Light Red
TOP_DOWN_MAP_COLORS[MAP_TARGET_BOUNDING_BOX] = [0, 175, 0] # Dark Green
TOP_DOWN_MAP_COLORS[MAP_REFERENCE_POINT] = [0, 0, 0] # Black
TOP_DOWN_MAP_COLORS[MAP_MP3D_REFERENCE_PATH] = [0, 0, 0] # Black
TOP_DOWN_MAP_COLORS[MAP_SHORTEST_PATH_WAYPOINT] = [0, 150, 0] # Dark Green
def get_top_down_map(sim, map_resolution, meters_per_pixel):
base_height = sim.get_agent(0).state.position[1]
td_map = habitat_maps.get_topdown_map(
sim.pathfinder,
base_height,
map_resolution,
False,
meters_per_pixel,
)
return td_map
def colorize_topdown_map(
top_down_map: np.ndarray,
fog_of_war_mask: Optional[np.ndarray] = None,
fog_of_war_desat_amount: float = 0.5,
) -> np.ndarray:
r"""Same as `maps.colorize_topdown_map` in Habitat-Lab, but with different map
colors.
"""
_map = TOP_DOWN_MAP_COLORS[top_down_map]
if fog_of_war_mask is not None:
fog_of_war_desat_values = np.array([[fog_of_war_desat_amount], [1.0]])
# Only desaturate things that are valid points as only valid points get revealed
desat_mask = top_down_map != MAP_INVALID_POINT
_map[desat_mask] = (
_map * fog_of_war_desat_values[fog_of_war_mask]
).astype(np.uint8)[desat_mask]
return _map
def static_to_grid(
realworld_x: float,
realworld_y: float,
grid_resolution: Tuple[int, int],
bounds: Dict[str, Tuple[float, float]],
):
r"""Return gridworld index of realworld coordinates assuming top-left corner
is the origin. The real world coordinates of lower left corner are
(coordinate_min, coordinate_min) and of top right corner are
(coordinate_max, coordinate_max). Same as the habitat-Lab maps.to_grid function
but with a static `bounds` instead of requiring a SIM/pathfinder instance.
"""
grid_size = (
abs(bounds["upper"][2] - bounds["lower"][2]) / grid_resolution[0],
abs(bounds["upper"][0] - bounds["lower"][0]) / grid_resolution[1],
)
grid_x = int((realworld_x - bounds["lower"][2]) / grid_size[0])
grid_y = int((realworld_y - bounds["lower"][0]) / grid_size[1])
return grid_x, grid_y
def drawline(
img: np.ndarray,
pt1: Union[Tuple[float], List[float]],
pt2: Union[Tuple[float], List[float]],
color: List[int],
thickness: int = 1,
style: str = "dotted",
gap: int = 15,
) -> None:
"""https://stackoverflow.com/questions/26690932/opencv-rectangle-with-dotted-or-dashed-lines
style: "dotted", "dashed", or "filled"
"""
assert style in ["dotted", "dashed", "filled"]
if style == "filled":
cv2.line(img, pt1, pt2, color, thickness)
return
dist = ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** 0.5
pts = []
for i in np.arange(0, dist, gap):
r = i / dist
x = int((pt1[0] * (1 - r) + pt2[0] * r) + 0.5)
y = int((pt1[1] * (1 - r) + pt2[1] * r) + 0.5)
pts.append((x, y))
if style == "dotted":
for p in pts:
cv2.circle(img, p, thickness, color, -1)
else:
s = pts[0]
e = pts[0]
for i, p in enumerate(pts):
s = e
e = p
if i % 2 == 1:
cv2.line(img, s, e, color, thickness)
def drawpoint(
img: np.ndarray,
position: Union[Tuple[int], List[int]],
color: List[int],
meters_per_px: float,
pad: float = 0.3,
) -> None:
point_padding = int(pad / meters_per_px)
img[
position[0] - point_padding : position[0] + point_padding + 1,
position[1] - point_padding : position[1] + point_padding + 1,
] = color
def draw_reference_path(
img: np.ndarray,
sim: Simulator,
episode: VLNEpisode,
map_resolution: int,
meters_per_px: float,
):
r"""Draws lines between each waypoint in the reference path."""
shortest_path_points = [
habitat_maps.to_grid(
p[2],
p[0],
img.shape[0:2],
sim,
)[::-1]
for p in episode.reference_path
]
pt_from = None
for i, pt_to in enumerate(shortest_path_points):
if i != 0:
drawline(
img,
(pt_from[0], pt_from[1]),
(pt_to[0], pt_to[1]),
MAP_SHORTEST_PATH_WAYPOINT,
thickness=int(0.4 * map_resolution / MAP_THICKNESS_SCALAR),
style="dashed",
gap=10,
)
pt_from = pt_to
for pt in shortest_path_points:
drawpoint(
img, (pt[1], pt[0]), MAP_SHORTEST_PATH_WAYPOINT, meters_per_px
)
def draw_straight_shortest_path_points(
img: np.ndarray,
sim: Simulator,
map_resolution: int,
shortest_path_points: List[List[float]],
):
r"""Draws the shortest path from start to goal assuming a standard
discrete action space.
"""
shortest_path_points = [
habitat_maps.to_grid(p[2], p[0], img.shape[0:2], sim)[::-1]
for p in shortest_path_points
]
habitat_maps.draw_path(
img,
[(p[1], p[0]) for p in shortest_path_points],
MAP_SHORTEST_PATH_WAYPOINT,
int(0.4 * map_resolution / MAP_THICKNESS_SCALAR),
)
def draw_source_and_target(
img: np.ndarray, sim: Simulator, episode: VLNEpisode, meters_per_px: float
):
s_x, s_y = habitat_maps.to_grid(
episode.start_position[2],
episode.start_position[0],
img.shape[0:2],
sim,
)
drawpoint(img, (s_x, s_y), MAP_SOURCE_POINT_INDICATOR, meters_per_px)
# mark target point
t_x, t_y = habitat_maps.to_grid(
episode.goals[0].position[2],
episode.goals[0].position[0],
img.shape[0:2],
sim,
)
drawpoint(img, (t_x, t_y), MAP_TARGET_POINT_INDICATOR, meters_per_px)
def get_nearest_node(graph: nx.Graph, current_position: List[float]) -> str:
"""Determine the closest MP3D node to the agent's start position as given
by a [x,z] position vector.
Returns:
node ID
"""
nearest = None
dist = float("inf")
for node in graph:
node_pos = graph.nodes[node]["position"]
node_pos = np.take(node_pos, (0, 2))
cur_dist = np.linalg.norm(
np.array(node_pos) - np.array(current_position), ord=2
)
if cur_dist < dist:
dist = cur_dist
nearest = node
return nearest
def update_nearest_node(
graph: nx.Graph, nearest_node: str, current_position: np.array
) -> str:
"""Determine the closest MP3D node to the agent's current position as
given by a [x,z] position vector. The selected node must be reachable
from the previous MP3D node as specified in the nav-graph edges.
Returns:
node ID
"""
nearest = None
dist = float("inf")
for node in [nearest_node] + [e[1] for e in graph.edges(nearest_node)]:
node_pos = graph.nodes[node]["position"]
node_pos = np.take(node_pos, (0, 2))
cur_dist = np.linalg.norm(
np.array(node_pos) - np.array(current_position), ord=2
)
if cur_dist < dist:
dist = cur_dist
nearest = node
return nearest
def draw_mp3d_nodes(
img: np.ndarray,
sim: Simulator,
episode: VLNEpisode,
graph: nx.Graph,
meters_per_px: float,
):
n = get_nearest_node(
graph, (episode.start_position[0], episode.start_position[2])
)
starting_height = graph.nodes[n]["position"][1]
for node in graph:
pos = graph.nodes[node]["position"]
# no obvious way to differentiate between floors. Use this for now.
if abs(pos[1] - starting_height) < 1.0:
r_x, r_y = habitat_maps.to_grid(
pos[2], pos[0], img.shape[0:2], sim
)
# only paint if over a valid point
if img[r_x, r_y]:
drawpoint(img, (r_x, r_y), MAP_MP3D_WAYPOINT, meters_per_px)
| InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/maps.py |
from typing import Any, Dict
import numpy as np
from gym import spaces
from habitat.config import Config
from habitat.core.registry import registry
from habitat.core.simulator import Observations, Sensor, SensorTypes, Simulator
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower
from habitat_extensions.shortest_path_follower import (
ShortestPathFollowerCompat,
)
from habitat_extensions.task import VLNExtendedEpisode
@registry.register_sensor(name="GlobalGPSSensor")
class GlobalGPSSensor(Sensor):
r"""The agents current location in the global coordinate frame
Args:
sim: reference to the simulator for calculating task observations.
config: Contains the DIMENSIONALITY field for the number of dimensions
to express the agents position
Attributes:
_dimensionality: number of dimensions used to specify the agents position
"""
cls_uuid: str = "globalgps"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._dimensionality = getattr(config, "DIMENSIONALITY", 2)
assert self._dimensionality in [2, 3]
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.POSITION
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(self._dimensionality,),
dtype=np.float32,
)
def get_observation(self, *args: Any, **kwargs: Any):
return self._sim.get_agent_state().position.astype(np.float32)
@registry.register_sensor
class ShortestPathSensor(Sensor):
r"""Sensor for observing the action to take that follows the shortest path
to the goal.
Args:
sim: reference to the simulator for calculating task observations.
config: config for the sensor.
"""
cls_uuid: str = "shortest_path_sensor"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
super().__init__(config=config)
if config.USE_ORIGINAL_FOLLOWER:
self.follower = ShortestPathFollowerCompat(
sim, config.GOAL_RADIUS, return_one_hot=False
)
self.follower.mode = "geodesic_path"
else:
self.follower = ShortestPathFollower(
sim, config.GOAL_RADIUS, return_one_hot=False
)
# self._sim = sim
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.TACTILE
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(low=0.0, high=100, shape=(1,), dtype=np.float)
def get_observation(self, *args: Any, episode, **kwargs: Any):
best_action = self.follower.get_next_action(episode.goals[0].position)
return np.array(
[
best_action
if best_action is not None
else HabitatSimActions.STOP
]
)
@registry.register_sensor
class VLNOracleProgressSensor(Sensor):
r"""Sensor for observing how much progress has been made towards the goal.
Args:
sim: reference to the simulator for calculating task observations.
config: config for the sensor.
"""
cls_uuid: str = "progress"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
# TODO: what is the correct sensor type?
return SensorTypes.MEASUREMENT
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(low=0.0, high=1.0, shape=(1,), dtype=np.float)
def get_observation(
self, observations, *args: Any, episode, **kwargs: Any
):
current_position = self._sim.get_agent_state().position.tolist()
distance_to_target = self._sim.geodesic_distance(
current_position, episode.goals[0].position
)
if "geodesic_distance" not in episode.info.keys():
distance_from_start = self._sim.geodesic_distance(
episode.start_position, episode.goals[0].position
)
episode.info["geodesic_distance"] = distance_from_start
distance_from_start = episode.info["geodesic_distance"]
progress = (distance_from_start - distance_to_target) / distance_from_start
return np.array(progress, dtype = np.float32) | InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/sensors.py |
from habitat_extensions import measures, obs_transformers, sensors, nav
from habitat_extensions.config.default import get_extended_config
from habitat_extensions.task import VLNCEDatasetV1
from habitat_extensions.habitat_simulator import Simulator
| InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/__init__.py |
import copy
import numbers
from typing import Dict, List, Tuple, Union
import torch
from gym import spaces
from habitat.config import Config
from habitat.core.logging import logger
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.obs_transformers import ObservationTransformer
from habitat_baselines.utils.common import (
center_crop,
get_image_height_width,
overwrite_gym_box_shape,
)
from torch import Tensor
@baseline_registry.register_obs_transformer()
class CenterCropperPerSensor(ObservationTransformer):
"""An observation transformer that center crops your input on a per-sensor basis."""
sensor_crops: Dict[str, Union[int, Tuple[int, int]]]
channels_last: bool
def __init__(
self,
sensor_crops: List[Tuple[str, Union[int, Tuple[int, int]]]],
channels_last: bool = True,
):
"""Args:
size: A sequence (h, w) or int of the size you wish to resize/center_crop.
If int, assumes square crop
channels_list: indicates if channels is the last dimension
trans_keys: The list of sensors it will try to centercrop.
"""
super().__init__()
self.sensor_crops = dict(sensor_crops)
for k in self.sensor_crops:
size = self.sensor_crops[k]
if isinstance(size, numbers.Number):
self.sensor_crops[k] = (int(size), int(size))
assert len(size) == 2, "forced input size must be len of 2 (h, w)"
self.channels_last = channels_last
def transform_observation_space(
self,
observation_space: spaces.Dict,
):
observation_space = copy.deepcopy(observation_space)
for key in observation_space.spaces:
if (
key in self.sensor_crops
and observation_space.spaces[key].shape[-3:-1]
!= self.sensor_crops[key]
):
h, w = get_image_height_width(
observation_space.spaces[key], channels_last=True
)
logger.info(
"Center cropping observation size of %s from %s to %s"
% (key, (h, w), self.sensor_crops[key])
)
observation_space.spaces[key] = overwrite_gym_box_shape(
observation_space.spaces[key], self.sensor_crops[key]
)
return observation_space
@torch.no_grad()
def forward(self, observations: Dict[str, Tensor]) -> Dict[str, Tensor]:
observations.update(
{
sensor: center_crop(
observations[sensor],
self.sensor_crops[sensor],
channels_last=self.channels_last,
)
for sensor in self.sensor_crops
if sensor in observations
}
)
return observations
@classmethod
def from_config(cls, config: Config):
cc_config = config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR
return cls(cc_config.SENSOR_CROPS)
@baseline_registry.register_obs_transformer()
class ResizerPerSensor(ObservationTransformer):
r"""An nn module the resizes images to any aspect ratio.
This module assumes that all images in the batch are of the same size.
"""
def __init__(
self,
sizes: int,
channels_last: bool = True,
trans_keys: Tuple[str] = ("rgb", "depth", "semantic"),
):
super().__init__()
"""Args:
size: The size you want to resize
channels_last: indicates if channels is the last dimension
"""
self.sensor_resizes = dict(sizes)
for k in self.sensor_resizes:
size = self.sensor_resizes[k]
if isinstance(size, numbers.Number):
self.sensor_resizes[k] = (int(size), int(size))
assert len(size) == 2, "forced input size must be len of 2 (h, w)"
self.channels_last = channels_last
def transform_observation_space(
self,
observation_space: spaces.Dict,
):
for key in observation_space.spaces:
if (
key in self.sensor_resizes
and observation_space.spaces[key].shape[-3:-1]
!= self.sensor_resizes[key]
):
h, w = get_image_height_width(
observation_space.spaces[key], channels_last=True
)
logger.info(
"Resizing observation size of %s from %s to %s"
% (key, (h, w), self.sensor_resizes[key])
)
observation_space.spaces[key] = overwrite_gym_box_shape(
observation_space.spaces[key], self.sensor_resizes[key]
)
return observation_space
def _transform_obs(self, obs: torch.Tensor, size) -> torch.Tensor:
img = torch.as_tensor(obs)
no_batch_dim = len(img.shape) == 3
if len(img.shape) < 3 or len(img.shape) > 5:
raise NotImplementedError()
if no_batch_dim:
img = img.unsqueeze(0) # Adds a batch dimension
h, w = get_image_height_width(img, channels_last=self.channels_last)
if self.channels_last:
if len(img.shape) == 4:
# NHWC -> NCHW
img = img.permute(0, 3, 1, 2)
else:
# NDHWC -> NDCHW
img = img.permute(0, 1, 4, 2, 3)
h, w = size
img = torch.nn.functional.interpolate(
img.float(), size=(h, w), mode="area"
).to(dtype=img.dtype)
if self.channels_last:
if len(img.shape) == 4:
# NCHW -> NHWC
img = img.permute(0, 2, 3, 1)
else:
# NDCHW -> NDHWC
img = img.permute(0, 1, 3, 4, 2)
if no_batch_dim:
img = img.squeeze(dim=0) # Removes the batch dimension
return img
@torch.no_grad()
def forward(
self, observations: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
observations.update(
{
sensor: self._transform_obs(
observations[sensor], self.sensor_resizes[sensor])
for sensor in self.sensor_resizes
if sensor in observations
}
)
return observations
@classmethod
def from_config(cls, config: Config):
r_config = config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR
return cls(r_config.SIZES)
| InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/obs_transformers.py |
import gzip
import json
import pickle
from typing import Any, List, Union
import numpy as np
from dtw import dtw
from fastdtw import fastdtw
from habitat.config import Config
from habitat.core.embodied_task import EmbodiedTask, Measure
from habitat.core.registry import registry
from habitat.core.simulator import Simulator
from habitat.tasks.nav.nav import DistanceToGoal, Success
from habitat.tasks.utils import cartesian_to_polar
from habitat.utils.geometry_utils import quaternion_rotate_vector
from habitat.utils.visualizations import fog_of_war
from habitat.utils.visualizations import maps as habitat_maps
from habitat_extensions import maps
@registry.register_measure
class Position(Measure):
r"""Path Length (PL)
PL = sum(geodesic_distance(agent_prev_position, agent_position)
over all agent positions.
"""
cls_uuid: str = "position"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__(**kwargs)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, episode, *args: Any, **kwargs: Any):
self._metric = {'distance':[], 'position':[]}
self.update_metric(episode)
def update_metric(self, episode, *args: Any, **kwargs: Any):
current_position = self._sim.get_agent_state().position
if len(self._metric['position']) > 0:
if (current_position == self._metric['position'][-1]).all():
return
distance = self._sim.geodesic_distance(
current_position,
[goal.position for goal in episode.goals],
episode,
)
self._metric['position'].append(self._sim.get_agent_state().position)
self._metric['distance'].append(distance)
@registry.register_measure
class PathLength(Measure):
r"""Path Length (PL)
PL = sum(geodesic_distance(agent_prev_position, agent_position)
over all agent positions.
"""
cls_uuid: str = "path_length"
@staticmethod
def euclidean_distance(
position_a: np.ndarray, position_b: np.ndarray
) -> float:
return np.linalg.norm(position_b - position_a, ord=2)
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__(**kwargs)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, **kwargs: Any):
self._previous_position = self._sim.get_agent_state().position
self._metric = 0.0
def update_metric(self, *args: Any, **kwargs: Any):
current_position = self._sim.get_agent_state().position
self._metric += self.euclidean_distance(
current_position, self._previous_position
)
self._previous_position = current_position
@registry.register_measure
class OracleNavigationError(Measure):
r"""Oracle Navigation Error (ONE)
ONE = min(geosdesic_distance(agent_pos, goal))
over all locations in the agent's path.
"""
cls_uuid: str = "oracle_navigation_error"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
task.measurements.check_measure_dependencies(
self.uuid, [DistanceToGoal.cls_uuid]
)
self._metric = float("inf")
self.update_metric(episode, task)
def update_metric(self, episode, task: EmbodiedTask, **kwargs: Any):
distance_to_target = task.measurements.measures[
DistanceToGoal.cls_uuid
].get_metric()
self._metric = min(self._metric, distance_to_target)
@registry.register_measure
class OracleSuccess(Measure):
r"""Oracle Success Rate (OSR)
OSR = I(ONE <= goal_radius),
where ONE is Oracle Navigation Error.
"""
cls_uuid: str = "oracle_success"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
task.measurements.check_measure_dependencies(
self.uuid, [DistanceToGoal.cls_uuid]
)
self._metric = 0
self.update_metric(episode, task)
def update_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
d = task.measurements.measures[DistanceToGoal.cls_uuid].get_metric()
self._metric = float(self._metric or d < self._config.SUCCESS_DISTANCE)
@registry.register_measure
class OracleSPL(Measure):
r"""OracleSPL (Oracle Success weighted by Path Length)
OracleSPL = max(SPL) over all points in the agent path
"""
cls_uuid: str = "oracle_spl"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(
self, *args: Any, episode, task: EmbodiedTask, **kwargs: Any
):
task.measurements.check_measure_dependencies(self.uuid, ["spl"])
self._metric = 0.0
def update_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
spl = task.measurements.measures["spl"].get_metric()
self._metric = max(self._metric, spl)
@registry.register_measure
class StepsTaken(Measure):
r"""Counts the number of times update_metric() is called. This is equal to
the number of times that the agent takes an action. STOP counts as an
action.
"""
cls_uuid: str = "steps_taken"
def __init__(self, *args: Any, **kwargs: Any):
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, **kwargs: Any):
self._metric = 0.0
def update_metric(self, *args: Any, **kwargs: Any):
self._metric += 1.0
@registry.register_measure
class NDTW(Measure):
r"""NDTW (Normalized Dynamic Time Warping)
ref: Effective and General Evaluation for Instruction
Conditioned Navigation using Dynamic Time
Warping - Magalhaes et. al
https://arxiv.org/pdf/1907.05446.pdf
"""
cls_uuid: str = "ndtw"
@staticmethod
def euclidean_distance(
position_a: Union[List[float], np.ndarray],
position_b: Union[List[float], np.ndarray],
) -> float:
return np.linalg.norm(
np.array(position_b) - np.array(position_a), ord=2
)
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
self.dtw_func = fastdtw if config.FDTW else dtw
if "{role}" in config.GT_PATH:
self.gt_json = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
with gzip.open(
config.GT_PATH.format(split=config.SPLIT, role=role), "rt"
) as f:
self.gt_json.update(json.load(f))
else:
with gzip.open(
config.GT_PATH.format(split=config.SPLIT), "rt"
) as f:
self.gt_json = json.load(f)
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, episode, *args: Any, **kwargs: Any):
self.locations = []
self.gt_locations = self.gt_json[str(episode.episode_id)]["locations"]
self.update_metric()
def update_metric(self, *args: Any, **kwargs: Any):
current_position = self._sim.get_agent_state().position.tolist()
if len(self.locations) == 0:
self.locations.append(current_position)
else:
if current_position == self.locations[-1]:
return
self.locations.append(current_position)
dtw_distance = self.dtw_func(
self.locations, self.gt_locations, dist=self.euclidean_distance
)[0]
nDTW = np.exp(
-dtw_distance
/ (len(self.gt_locations) * self._config.SUCCESS_DISTANCE)
)
self._metric = nDTW
@registry.register_measure
class SDTW(Measure):
r"""SDTW (Success Weighted be nDTW)
ref: Effective and General Evaluation for Instruction
Conditioned Navigation using Dynamic Time
Warping - Magalhaes et. al
https://arxiv.org/pdf/1907.05446.pdf
"""
cls_uuid: str = "sdtw"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, episode, task, *args: Any, **kwargs: Any):
task.measurements.check_measure_dependencies(
self.uuid, [NDTW.cls_uuid, Success.cls_uuid]
)
self.update_metric(episode, task)
def update_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
ep_success = task.measurements.measures[Success.cls_uuid].get_metric()
nDTW = task.measurements.measures[NDTW.cls_uuid].get_metric()
self._metric = ep_success * nDTW
@registry.register_measure
class TopDownMapVLNCE(Measure):
r"""A top down map that optionally shows VLN-related visual information
such as MP3D node locations and MP3D agent traversals.
"""
cls_uuid: str = "top_down_map_vlnce"
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
):
self._sim = sim
self._config = config
with open(self._config.GRAPHS_FILE, "rb") as f:
self._conn_graphs = pickle.load(f)
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def get_original_map(self):
habitat_maps.get_topdown_map_from_sim
top_down_map = maps.get_top_down_map(
self._sim,
self._config.MAP_RESOLUTION,
self._meters_per_pixel,
)
if self._config.FOG_OF_WAR.DRAW:
self._fog_of_war_mask = np.zeros_like(top_down_map)
else:
self._fog_of_war_mask = None
return top_down_map
def reset_metric(self, *args: Any, episode, **kwargs: Any):
self._scene_id = episode.scene_id.split("/")[-2]
self._step_count = 0
self._metric = None
self._meters_per_pixel = habitat_maps.calculate_meters_per_pixel(
self._config.MAP_RESOLUTION, self._sim
)
self._top_down_map = self.get_original_map()
agent_position = self._sim.get_agent_state().position
scene_id = episode.scene_id.split("/")[-1].split(".")[0]
a_x, a_y = habitat_maps.to_grid(
agent_position[2],
agent_position[0],
self._top_down_map.shape[0:2],
sim=self._sim,
)
self._previous_xy_location = (a_y, a_x)
if self._config.FOG_OF_WAR.DRAW:
self._fog_of_war_mask = fog_of_war.reveal_fog_of_war(
self._top_down_map,
self._fog_of_war_mask,
np.array([a_x, a_y]),
self.get_polar_angle(),
fov=self._config.FOG_OF_WAR.FOV,
max_line_len=self._config.FOG_OF_WAR.VISIBILITY_DIST
/ habitat_maps.calculate_meters_per_pixel(
self._config.MAP_RESOLUTION, sim=self._sim
),
)
if self._config.DRAW_FIXED_WAYPOINTS:
maps.draw_mp3d_nodes(
self._top_down_map,
self._sim,
episode,
self._conn_graphs[scene_id],
self._meters_per_pixel,
)
if self._config.DRAW_SHORTEST_PATH:
shortest_path_points = self._sim.get_straight_shortest_path_points(
agent_position, episode.goals[0].position
)
maps.draw_straight_shortest_path_points(
self._top_down_map,
self._sim,
self._config.MAP_RESOLUTION,
shortest_path_points,
)
if self._config.DRAW_REFERENCE_PATH:
maps.draw_reference_path(
self._top_down_map,
self._sim,
episode,
self._config.MAP_RESOLUTION,
self._meters_per_pixel,
)
# draw source and target points last to avoid overlap
if self._config.DRAW_SOURCE_AND_TARGET:
maps.draw_source_and_target(
self._top_down_map,
self._sim,
episode,
self._meters_per_pixel,
)
# MP3D START NODE
self._nearest_node = maps.get_nearest_node(
self._conn_graphs[scene_id], np.take(agent_position, (0, 2))
)
nn_position = self._conn_graphs[self._scene_id].nodes[
self._nearest_node
]["position"]
self.s_x, self.s_y = habitat_maps.to_grid(
nn_position[2],
nn_position[0],
self._top_down_map.shape[0:2],
self._sim,
)
self.update_metric(episode, action=None)
def update_metric(self, *args: Any, **kwargs: Any):
self._step_count += 1
(
house_map,
map_agent_pos,
) = self.update_map(self._sim.get_agent_state().position)
self._metric = {
"map": house_map,
"fog_of_war_mask": self._fog_of_war_mask,
"agent_map_coord": map_agent_pos,
"agent_angle": self.get_polar_angle(),
"bounds": {
k: v
for k, v in zip(
["lower", "upper"],
self._sim.pathfinder.get_bounds(),
)
},
"meters_per_px": self._meters_per_pixel,
}
def get_polar_angle(self):
agent_state = self._sim.get_agent_state()
# quaternion is in x, y, z, w format
ref_rotation = agent_state.rotation
heading_vector = quaternion_rotate_vector(
ref_rotation.inverse(), np.array([0, 0, -1])
)
phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
z_neg_z_flip = np.pi
return np.array(phi) + z_neg_z_flip
def update_map(self, agent_position):
a_x, a_y = habitat_maps.to_grid(
agent_position[2],
agent_position[0],
self._top_down_map.shape[0:2],
self._sim,
)
# Don't draw over the source point
gradient_color = 15 + min(
self._step_count * 245 // self._config.MAX_EPISODE_STEPS, 245
)
if self._top_down_map[a_x, a_y] != maps.MAP_SOURCE_POINT_INDICATOR:
maps.drawline(
self._top_down_map,
self._previous_xy_location,
(a_y, a_x),
gradient_color,
thickness=int(
self._config.MAP_RESOLUTION
* 1.4
/ maps.MAP_THICKNESS_SCALAR
),
style="filled",
)
if self._config.FOG_OF_WAR.DRAW:
self._fog_of_war_mask = fog_of_war.reveal_fog_of_war(
self._top_down_map,
self._fog_of_war_mask,
np.array([a_x, a_y]),
self.get_polar_angle(),
self._config.FOG_OF_WAR.FOV,
max_line_len=self._config.FOG_OF_WAR.VISIBILITY_DIST
/ habitat_maps.calculate_meters_per_pixel(
self._config.MAP_RESOLUTION, sim=self._sim
),
)
point_padding = int(0.2 / self._meters_per_pixel)
prev_nearest_node = self._nearest_node
self._nearest_node = maps.update_nearest_node(
self._conn_graphs[self._scene_id],
self._nearest_node,
np.take(agent_position, (0, 2)),
)
if (
self._nearest_node != prev_nearest_node
and self._config.DRAW_MP3D_AGENT_PATH
):
nn_position = self._conn_graphs[self._scene_id].nodes[
self._nearest_node
]["position"]
(prev_s_x, prev_s_y) = (self.s_x, self.s_y)
self.s_x, self.s_y = habitat_maps.to_grid(
nn_position[2],
nn_position[0],
self._top_down_map.shape[0:2],
self._sim,
)
self._top_down_map[
self.s_x
- int(2.0 / 3.0 * point_padding) : self.s_x
+ int(2.0 / 3.0 * point_padding)
+ 1,
self.s_y
- int(2.0 / 3.0 * point_padding) : self.s_y
+ int(2.0 / 3.0 * point_padding)
+ 1,
] = gradient_color
maps.drawline(
self._top_down_map,
(prev_s_y, prev_s_x),
(self.s_y, self.s_x),
gradient_color,
thickness=int(
1.0
/ 2.0
* np.round(
self._config.MAP_RESOLUTION / maps.MAP_THICKNESS_SCALAR
)
),
)
self._previous_xy_location = (a_y, a_x)
map_agent_pos = (a_x, a_y)
return self._top_down_map, map_agent_pos
| InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/measures.py |
from typing import Dict
import numpy as np
from habitat.core.utils import try_cv2_import
from habitat.utils.visualizations import maps as habitat_maps
from habitat.utils.visualizations.utils import draw_collision
from habitat_extensions import maps
cv2 = try_cv2_import()
def observations_to_image(observation: Dict, info: Dict) -> np.ndarray:
r"""Generate image of single frame from observation and info
returned from a single environment step().
Args:
observation: observation returned from an environment step().
info: info returned from an environment step().
Returns:
generated image of a single frame.
"""
egocentric_view = []
observation_size = -1
if "rgb" in observation:
observation_size = observation["rgb"].shape[0]
rgb = observation["rgb"][:, :, :3]
egocentric_view.append(rgb)
# draw depth map if observation has depth info. resize to rgb size.
if "depth" in observation:
if observation_size == -1:
observation_size = observation["depth"].shape[0]
depth_map = (observation["depth"].squeeze() * 255).astype(np.uint8)
depth_map = np.stack([depth_map for _ in range(3)], axis=2)
depth_map = cv2.resize(
depth_map,
dsize=(observation_size, observation_size),
interpolation=cv2.INTER_CUBIC,
)
egocentric_view.append(depth_map)
assert (
len(egocentric_view) > 0
), "Expected at least one visual sensor enabled."
egocentric_view = np.concatenate(egocentric_view, axis=1)
# draw collision
if "collisions" in info and info["collisions"]["is_collision"]:
egocentric_view = draw_collision(egocentric_view)
frame = egocentric_view
map_k = None
if "top_down_map_vlnce" in info:
map_k = "top_down_map_vlnce"
elif "top_down_map" in info:
map_k = "top_down_map"
if map_k is not None:
td_map = info[map_k]["map"]
td_map = maps.colorize_topdown_map(
td_map,
info[map_k]["fog_of_war_mask"],
fog_of_war_desat_amount=0.75,
)
td_map = habitat_maps.draw_agent(
image=td_map,
agent_center_coord=info[map_k]["agent_map_coord"],
agent_rotation=info[map_k]["agent_angle"],
agent_radius_px=min(td_map.shape[0:2]) // 24,
)
if td_map.shape[1] < td_map.shape[0]:
td_map = np.rot90(td_map, 1)
if td_map.shape[0] > td_map.shape[1]:
td_map = np.rot90(td_map, 1)
# scale top down map to align with rgb view
old_h, old_w, _ = td_map.shape
top_down_height = observation_size
top_down_width = int(float(top_down_height) / old_h * old_w)
# cv2 resize (dsize is width first)
td_map = cv2.resize(
td_map,
(top_down_width, top_down_height),
interpolation=cv2.INTER_CUBIC,
)
frame = np.concatenate((egocentric_view, td_map), axis=1)
return frame
| InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/utils.py |
from turtle import heading
from typing import Any
import math
import numpy as np
from habitat.core.embodied_task import (
SimulatorTaskAction,
)
from habitat.core.registry import registry
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.tasks.utils import cartesian_to_polar
from habitat.utils.geometry_utils import quaternion_rotate_vector
@registry.register_task_action
class MoveHighToLowAction(SimulatorTaskAction):
def turn(self, angle):
''' angle: 0 ~ 360 degree '''
left_action = HabitatSimActions.TURN_LEFT
right_action = HabitatSimActions.TURN_RIGHT
turn_unit = self._sim.get_agent(0).agent_config.action_space[left_action].actuation.amount
states = []
if 180 < angle <= 360:
angle -= 360
if angle >=0:
turn_actions = [left_action] * int(angle // turn_unit)
else:
turn_actions = [right_action] * int(-angle // turn_unit)
for turn_action in turn_actions:
self._sim.step_without_obs(turn_action)
state = self._sim.get_agent_state()
states.append((state.position,state.rotation))
return states
def step(self, *args: Any,
angle: float, distance: float,
**kwargs: Any):
r"""This control method is called from ``Env`` on each ``step``.
"""
init_state = self._sim.get_agent_state()
forward_action = HabitatSimActions.MOVE_FORWARD
init_forward = self._sim.get_agent(0).agent_config.action_space[
forward_action].actuation.amount
theta = np.arctan2(init_state.rotation.imag[1],
init_state.rotation.real) + angle / 2
rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0)
angle = math.degrees(angle)
states = self.turn(angle)
states.append((init_state.position,rotation))
self._sim.set_agent_state(init_state.position, rotation)
ksteps = int(distance//init_forward)
for k in range(ksteps):
if k == ksteps - 1:
output = self._sim.step(forward_action)
state = self._sim.get_agent_state()
states.append((state.position,state.rotation))
else:
self._sim.step_without_obs(forward_action)
state = self._sim.get_agent_state()
states.append((state.position,state.rotation))
output['states'] = states
return output
@registry.register_task_action
class MoveHighToLowActionEval(SimulatorTaskAction):
def turn(self, angle):
''' angle: 0 ~ 360 degree '''
left_action = HabitatSimActions.TURN_LEFT
right_action = HabitatSimActions.TURN_RIGHT
turn_unit = self._sim.get_agent(0).agent_config.action_space[left_action].actuation.amount
states = []
if 180 < angle <= 360:
angle -= 360
if angle >=0:
turn_actions = [left_action] * int(angle // turn_unit)
else:
turn_actions = [right_action] * int(-angle // turn_unit)
for turn_action in turn_actions:
self._sim.step_without_obs(turn_action)
state = self._sim.get_agent_state()
states.append((state.position,state.rotation))
return states
def step(self, *args: Any,
angle: float, distance: float,
**kwargs: Any):
r"""This control method is called from ``Env`` on each ``step``.
"""
init_state = self._sim.get_agent_state()
positions = []
collisions = []
forward_action = HabitatSimActions.MOVE_FORWARD
init_forward = self._sim.get_agent(0).agent_config.action_space[
forward_action].actuation.amount
theta = np.arctan2(init_state.rotation.imag[1],
init_state.rotation.real) + angle / 2
rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0)
angle = math.degrees(angle)
states = self.turn(angle)
states.append((init_state.position,rotation))
self._sim.set_agent_state(init_state.position, rotation)
ksteps = int(distance//init_forward)
for k in range(ksteps):
if k == ksteps - 1:
output = self._sim.step(forward_action)
state = self._sim.get_agent_state()
states.append((state.position,state.rotation))
else:
self._sim.step_without_obs(forward_action)
state = self._sim.get_agent_state()
states.append((state.position,state.rotation))
positions.append(self._sim.get_agent_state().position)
collisions.append(self._sim.previous_step_collided)
output['positions'] = positions
output['collisions'] = collisions
output['states'] = states
return output
@registry.register_task_action
class MoveHighToLowActionInference(SimulatorTaskAction):
def turn(self, angle):
''' angle: 0 ~ 360 degree '''
left_action = HabitatSimActions.TURN_LEFT
right_action = HabitatSimActions.TURN_RIGHT
turn_unit = self._sim.get_agent(0).agent_config.action_space[left_action].actuation.amount
states = []
if 180 < angle <= 360:
angle -= 360
if angle >=0:
turn_actions = [left_action] * int(angle // turn_unit)
else:
turn_actions = [right_action] * int(-angle // turn_unit)
for turn_action in turn_actions:
self._sim.step_without_obs(turn_action)
state = self._sim.get_agent_state()
states.append((state.position,state.rotation))
return states
def step(self, *args: Any,
angle: float, distance: float,
**kwargs: Any):
r"""This control method is called from ``Env`` on each ``step``.
"""
init_state = self._sim.get_agent_state()
cur_path = []
forward_action = HabitatSimActions.MOVE_FORWARD
init_forward = self._sim.get_agent(0).agent_config.action_space[
forward_action].actuation.amount
theta = np.arctan2(init_state.rotation.imag[1],
init_state.rotation.real) + angle / 2
rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0)
angle = math.degrees(angle)
states = self.turn(angle)
states.append((init_state.position,rotation))
self._sim.set_agent_state(init_state.position, rotation)
ksteps = int(distance//init_forward)
for k in range(ksteps):
if k == ksteps - 1:
output = self._sim.step(forward_action)
cur_path.append(self.get_agent_info())
state = self._sim.get_agent_state()
states.append((state.position,state.rotation))
else:
self._sim.step_without_obs(forward_action)
cur_path.append(self.get_agent_info())
state = self._sim.get_agent_state()
states.append((state.position,state.rotation))
output['cur_path'] = cur_path
output['states'] = states
return output
def get_agent_info(self):
agent_state = self._sim.get_agent_state()
heading_vector = quaternion_rotate_vector(
agent_state.rotation.inverse(), np.array([0, 0, -1])
)
heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
return {
"position": agent_state.position.tolist(),
"heading": heading,
"stop": False,
} | InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/nav.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Union,
cast,
)
import numpy as np
from gym import spaces
from gym.spaces.box import Box
from numpy import ndarray
if TYPE_CHECKING:
from torch import Tensor
import habitat_sim
from habitat_sim.simulator import MutableMapping, MutableMapping_T
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.core.dataset import Episode
from habitat.core.registry import registry
from habitat.core.simulator import (
AgentState,
Config,
DepthSensor,
Observations,
RGBSensor,
SemanticSensor,
Sensor,
SensorSuite,
ShortestPathPoint,
Simulator,
VisualObservation,
)
from habitat.core.spaces import Space
from collections import OrderedDict
# inherit habitat-lab/habitat/sims/habitat_simulator/habitat_simulator.py
@registry.register_simulator(name="Sim-v1")
class Simulator(HabitatSim):
r"""Simulator wrapper over habitat-sim
habitat-sim repo: https://github.com/facebookresearch/habitat-sim
Args:
config: configuration for initializing the simulator.
"""
def __init__(self, config: Config) -> None:
super().__init__(config)
def step_without_obs(self,
action: Union[str, int, MutableMapping_T[int, Union[str, int]]],
dt: float = 1.0 / 60.0,):
self._num_total_frames += 1
if isinstance(action, MutableMapping):
return_single = False
else:
action = cast(Dict[int, Union[str, int]], {self._default_agent_id: action})
return_single = True
collided_dict: Dict[int, bool] = {}
for agent_id, agent_act in action.items():
agent = self.get_agent(agent_id)
collided_dict[agent_id] = agent.act(agent_act)
self.__last_state[agent_id] = agent.get_state()
# # step physics by dt
# step_start_Time = time.time()
# super().step_world(dt)
# self._previous_step_time = time.time() - step_start_Time
multi_observations = {}
for agent_id in action.keys():
agent_observation = {}
agent_observation["collided"] = collided_dict[agent_id]
multi_observations[agent_id] = agent_observation
if return_single:
sim_obs = multi_observations[self._default_agent_id]
else:
sim_obs = multi_observations
self._prev_sim_obs = sim_obs
def step_with_specific_sensors(self,
sensors,
action: Union[str, int, MutableMapping_T[int, Union[str, int]]],
dt: float = 1.0 / 60.0,):
self._num_total_frames += 1
if isinstance(action, MutableMapping):
return_single = False
else:
action = cast(Dict[int, Union[str, int]], {self._default_agent_id: action})
return_single = True
collided_dict: Dict[int, bool] = {}
for agent_id, agent_act in action.items():
agent = self.get_agent(agent_id)
collided_dict[agent_id] = agent.act(agent_act)
self.__last_state[agent_id] = agent.get_state()
# # step physics by dt
# step_start_Time = time.time()
# super().step_world(dt)
# self._previous_step_time = time.time() - step_start_Time
multi_observations = self.get_specific_sensors_observations(sensors = sensors)
for agent_id in action.keys():
agent_observation = {}
agent_observation["collided"] = collided_dict[agent_id]
multi_observations[agent_id] = agent_observation
if return_single:
sim_obs = multi_observations[self._default_agent_id]
else:
sim_obs = multi_observations
self._prev_sim_obs = sim_obs
return multi_observations
def get_specific_sensors_observations(
self, sensors, agent_ids: Union[int, List[int]] = 0,
) -> Union[
Dict[str, Union[ndarray, "Tensor"]],
Dict[int, Dict[str, Union[ndarray, "Tensor"]]],
]:
if isinstance(agent_ids, int):
agent_ids = [agent_ids]
return_single = True
else:
return_single = False
for agent_id in agent_ids:
agent_sensorsuite = sensors[agent_id]
for _sensor_uuid, sensor in agent_sensorsuite.items():
sensor.draw_observation()
# As backport. All Dicts are ordered in Python >= 3.7
observations: Dict[int, Dict[str, Union[ndarray, "Tensor"]]] = OrderedDict()
for agent_id in agent_ids:
agent_observations: Dict[str, Union[ndarray, "Tensor"]] = {}
for sensor_uuid, sensor in sensors[agent_id].items():
agent_observations[sensor_uuid] = sensor.get_observation()
observations[agent_id] = agent_observations
if return_single:
return next(iter(observations.values()))
return observations
# def render_specific_sensors(self, sensors, mode: str = "rgb") -> Any:
# r"""
# Args:
# mode: sensor whose observation is used for returning the frame,
# eg: "rgb", "depth", "semantic"
# Returns:
# rendered frame according to the mode
# """
# sim_obs = self.get_specific_sensors_observations(sensors = sensors)
# observations = self._sensor_suite.get_observations(sim_obs)
# output = observations.get(mode)
# assert output is not None, "mode {} sensor is not active".format(mode)
# if not isinstance(output, np.ndarray):
# # If it is not a numpy array, it is a torch tensor
# # The function expects the result to be a numpy array
# output = output.to("cpu").numpy()
# return output | InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/habitat_simulator.py |
# Copied from https://github.com/facebookresearch/habitat-lab/blob/v0.1.4/habitat/tasks/nav/shortest_path_follower.py
# Use the Habitat v0.1.4 ShortestPathFollower for compatibility with
# the dataset generation oracle.
from typing import Optional, Union
import habitat_sim
import numpy as np
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.utils.geometry_utils import (
angle_between_quaternions,
quaternion_from_two_vectors,
)
EPSILON = 1e-6
def action_to_one_hot(action: int) -> np.array:
one_hot = np.zeros(len(HabitatSimActions), dtype=np.float32)
one_hot[action] = 1
return one_hot
class ShortestPathFollowerCompat:
r"""Utility class for extracting the action on the shortest path to the
goal.
Args:
sim: HabitatSim instance.
goal_radius: Distance between the agent and the goal for it to be
considered successful.
return_one_hot: If true, returns a one-hot encoding of the action
(useful for training ML agents). If false, returns the
SimulatorAction.
"""
def __init__(
self, sim: HabitatSim, goal_radius: float, return_one_hot: bool = True
):
assert (
getattr(sim, "geodesic_distance", None) is not None
), "{} must have a method called geodesic_distance".format(
type(sim).__name__
)
self._sim = sim
self._max_delta = sim.habitat_config.FORWARD_STEP_SIZE - EPSILON
self._goal_radius = goal_radius
self._step_size = sim.habitat_config.FORWARD_STEP_SIZE
self._mode = (
"geodesic_path"
if getattr(sim, "get_straight_shortest_path_points", None)
is not None
else "greedy"
)
self._return_one_hot = return_one_hot
def _get_return_value(self, action) -> Union[int, np.array]:
if self._return_one_hot:
return action_to_one_hot(action)
else:
return action
def get_next_action(
self, goal_pos: np.array
) -> Optional[Union[int, np.array]]:
"""Returns the next action along the shortest path."""
if (
self._sim.geodesic_distance(
self._sim.get_agent_state().position, goal_pos
)
<= self._goal_radius
):
return None
max_grad_dir = self._est_max_grad_dir(goal_pos)
if max_grad_dir is None:
return self._get_return_value(HabitatSimActions.MOVE_FORWARD)
return self._step_along_grad(max_grad_dir)
def _step_along_grad(
self, grad_dir: np.quaternion
) -> Union[int, np.array]:
current_state = self._sim.get_agent_state()
alpha = angle_between_quaternions(grad_dir, current_state.rotation)
if alpha <= np.deg2rad(self._sim.habitat_config.TURN_ANGLE) + EPSILON:
return self._get_return_value(HabitatSimActions.MOVE_FORWARD)
else:
sim_action = HabitatSimActions.TURN_LEFT
self._sim.step(sim_action)
best_turn = (
HabitatSimActions.TURN_LEFT
if (
angle_between_quaternions(
grad_dir, self._sim.get_agent_state().rotation
)
< alpha
)
else HabitatSimActions.TURN_RIGHT
)
self._reset_agent_state(current_state)
return self._get_return_value(best_turn)
def _reset_agent_state(self, state: habitat_sim.AgentState) -> None:
self._sim.set_agent_state(
state.position, state.rotation, reset_sensors=False
)
def _geo_dist(self, goal_pos: np.array) -> float:
return self._sim.geodesic_distance(
self._sim.get_agent_state().position, goal_pos
)
def _est_max_grad_dir(self, goal_pos: np.array) -> np.array:
current_state = self._sim.get_agent_state()
current_pos = current_state.position
if self.mode == "geodesic_path":
points = self._sim.get_straight_shortest_path_points(
self._sim.get_agent_state().position, goal_pos
)
# Add a little offset as things get weird if
# points[1] - points[0] is anti-parallel with forward
if len(points) < 2:
return None
max_grad_dir = quaternion_from_two_vectors(
self._sim.forward_vector,
points[1]
- points[0]
+ EPSILON
* np.cross(self._sim.up_vector, self._sim.forward_vector),
)
max_grad_dir.x = 0
max_grad_dir = np.normalized(max_grad_dir)
else:
current_rotation = self._sim.get_agent_state().rotation
current_dist = self._geo_dist(goal_pos)
best_geodesic_delta = -2 * self._max_delta
best_rotation = current_rotation
for _ in range(0, 360, self._sim.habitat_config.TURN_ANGLE):
sim_action = HabitatSimActions.MOVE_FORWARD
self._sim.step(sim_action)
new_delta = current_dist - self._geo_dist(goal_pos)
if new_delta > best_geodesic_delta:
best_rotation = self._sim.get_agent_state().rotation
best_geodesic_delta = new_delta
# If the best delta is within (1 - cos(TURN_ANGLE))% of the
# best delta (the step size), then we almost certainly have
# found the max grad dir and should just exit
if np.isclose(
best_geodesic_delta,
self._max_delta,
rtol=1
- np.cos(np.deg2rad(self._sim.habitat_config.TURN_ANGLE)),
):
break
self._sim.set_agent_state(
current_pos,
self._sim.get_agent_state().rotation,
reset_sensors=False,
)
sim_action = HabitatSimActions.TURN_LEFT
self._sim.step(sim_action)
self._reset_agent_state(current_state)
max_grad_dir = best_rotation
return max_grad_dir
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, new_mode: str):
r"""Sets the mode for how the greedy follower determines the best next
step.
Args:
new_mode: geodesic_path indicates using the simulator's shortest
path algorithm to find points on the map to navigate between.
greedy indicates trying to move forward at all possible
orientations and selecting the one which reduces the geodesic
distance the most.
"""
assert new_mode in {"geodesic_path", "greedy"}
if new_mode == "geodesic_path":
assert (
getattr(self._sim, "get_straight_shortest_path_points", None)
is not None
)
self._mode = new_mode
| InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/shortest_path_follower.py |
InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/config/__init__.py |
|
from typing import List, Optional, Union
from habitat.config.default import Config as CN
from habitat.config.default import get_config
_C = get_config()
_C.defrost()
# ----------------------------------------------------------------------------
# CUSTOM ACTION: HIGHTOLOWINFERENCE ACTION
# ----------------------------------------------------------------------------
_C.TASK.ACTIONS.HIGHTOLOWINFERENCE = CN()
_C.TASK.ACTIONS.HIGHTOLOWINFERENCE.TYPE = 'MoveHighToLowActionInference'
# ----------------------------------------------------------------------------
# CUSTOM ACTION: HIGHTOLOWEVAL ACTION
# ----------------------------------------------------------------------------
_C.TASK.ACTIONS.HIGHTOLOWEVAL = CN()
_C.TASK.ACTIONS.HIGHTOLOWEVAL.TYPE = 'MoveHighToLowActionEval'
# ----------------------------------------------------------------------------
# CUSTOM ACTION: HIGHTOLOW ACTION
# ----------------------------------------------------------------------------
_C.TASK.ACTIONS.HIGHTOLOW = CN()
_C.TASK.ACTIONS.HIGHTOLOW.TYPE = 'MoveHighToLowAction'
# ----------------------------------------------------------------------------
# GPS SENSOR
# ----------------------------------------------------------------------------
_C.TASK.GLOBAL_GPS_SENSOR = CN()
_C.TASK.GLOBAL_GPS_SENSOR.TYPE = "GlobalGPSSensor"
_C.TASK.GLOBAL_GPS_SENSOR.DIMENSIONALITY = 3
# ----------------------------------------------------------------------------
# # RXR INSTRUCTION SENSOR
# ----------------------------------------------------------------------------
_C.TASK.RXR_INSTRUCTION_SENSOR = CN()
_C.TASK.RXR_INSTRUCTION_SENSOR.TYPE = "RxRInstructionSensor"
_C.TASK.RXR_INSTRUCTION_SENSOR.features_path = "data/datasets/RxR_VLNCE_v0/text_features/rxr_{split}/{id:06}_{lang}_text_features.npz"
_C.TASK.RXR_INSTRUCTION_SENSOR.max_text_len = 512
_C.TASK.INSTRUCTION_SENSOR_UUID = "rxr_instruction"
# ----------------------------------------------------------------------------
# SHORTEST PATH SENSOR (previously: VLN_ORACLE_ACTION_SENSOR)
# ----------------------------------------------------------------------------
_C.TASK.SHORTEST_PATH_SENSOR = CN()
_C.TASK.SHORTEST_PATH_SENSOR.TYPE = "ShortestPathSensor"
# all goals can be navigated to within 0.5m.
_C.TASK.SHORTEST_PATH_SENSOR.GOAL_RADIUS = 0.5
# compatibility with the dataset generation oracle and paper results.
# if False, use the ShortestPathFollower in Habitat
_C.TASK.SHORTEST_PATH_SENSOR.USE_ORIGINAL_FOLLOWER = False
# ----------------------------------------------------------------------------
# VLN ORACLE PROGRESS SENSOR
# ----------------------------------------------------------------------------
_C.TASK.VLN_ORACLE_PROGRESS_SENSOR = CN()
_C.TASK.VLN_ORACLE_PROGRESS_SENSOR.TYPE = "VLNOracleProgressSensor"
# ----------------------------------------------------------------------------
# NDTW MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.NDTW = CN()
_C.TASK.NDTW.TYPE = "NDTW"
_C.TASK.NDTW.SPLIT = "val_seen"
_C.TASK.NDTW.FDTW = True # False: DTW
_C.TASK.NDTW.GT_PATH = (
"data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}/{split}_gt.json"
)
_C.TASK.NDTW.SUCCESS_DISTANCE = 3.0
# ----------------------------------------------------------------------------
# SDTW MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.SDTW = CN()
_C.TASK.SDTW.TYPE = "SDTW"
# ----------------------------------------------------------------------------
# PATH_LENGTH MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.PATH_LENGTH = CN()
_C.TASK.PATH_LENGTH.TYPE = "PathLength"
# ----------------------------------------------------------------------------
# ORACLE_NAVIGATION_ERROR MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.ORACLE_NAVIGATION_ERROR = CN()
_C.TASK.ORACLE_NAVIGATION_ERROR.TYPE = "OracleNavigationError"
# ----------------------------------------------------------------------------
# ORACLE_SUCCESS MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.ORACLE_SUCCESS = CN()
_C.TASK.ORACLE_SUCCESS.TYPE = "OracleSuccess"
_C.TASK.ORACLE_SUCCESS.SUCCESS_DISTANCE = 3.0
# ----------------------------------------------------------------------------
# ORACLE_SPL MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.ORACLE_SPL = CN()
_C.TASK.ORACLE_SPL.TYPE = "OracleSPL"
# ----------------------------------------------------------------------------
# STEPS_TAKEN MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.STEPS_TAKEN = CN()
_C.TASK.STEPS_TAKEN.TYPE = "StepsTaken"
# ----------------------------------------------------------------------------
# POSITION MEASUREMENT For faster eval
# ----------------------------------------------------------------------------
_C.TASK.POSITION = CN()
_C.TASK.POSITION.TYPE = 'Position'
# -----------------------------------------------------------------------------
# TOP_DOWN_MAP_VLNCE MEASUREMENT
# -----------------------------------------------------------------------------
_C.TASK.TOP_DOWN_MAP_VLNCE = CN()
_C.TASK.TOP_DOWN_MAP_VLNCE.TYPE = "TopDownMapVLNCE"
_C.TASK.TOP_DOWN_MAP_VLNCE.MAX_EPISODE_STEPS = _C.ENVIRONMENT.MAX_EPISODE_STEPS
_C.TASK.TOP_DOWN_MAP_VLNCE.MAP_RESOLUTION = 1024
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_SOURCE_AND_TARGET = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_BORDER = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_SHORTEST_PATH = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_REFERENCE_PATH = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_FIXED_WAYPOINTS = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_MP3D_AGENT_PATH = True
_C.TASK.TOP_DOWN_MAP_VLNCE.GRAPHS_FILE = "data/connectivity_graphs.pkl"
_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR = CN()
_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR.DRAW = True
_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR.FOV = 79
_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR.VISIBILITY_DIST = 5.0
# ----------------------------------------------------------------------------
# DATASET EXTENSIONS
# ----------------------------------------------------------------------------
_C.DATASET.ROLES = ["guide"] # options: "*", "guide", "follower"
# language options by region: "*", "te-IN", "hi-IN", "en-US", "en-IN"
_C.DATASET.LANGUAGES = ["*"]
# a list or set of episode IDs to allow in dataset creation. None allows all.
_C.DATASET.EPISODES_ALLOWED = None
def get_extended_config(
config_paths: Optional[Union[List[str], str]] = None,
opts: Optional[list] = None,
) -> CN:
r"""Create a unified config with default values overwritten by values from
:p:`config_paths` and overwritten by options from :p:`opts`.
:param config_paths: List of config paths or string that contains comma
separated list of config paths.
:param opts: Config options (keys, values) in a list (e.g., passed from
command line into the config. For example,
:py:`opts = ['FOO.BAR', 0.5]`. Argument can be used for parameter
sweeping or quick tests.
"""
config = _C.clone()
if config_paths:
if isinstance(config_paths, str):
config_paths = [config_paths]
for config_path in config_paths:
config.merge_from_file(config_path)
if opts:
config.merge_from_list(opts)
config.freeze()
return config
| InternVideo-main | Downstream/Visual-Language-Navigation/habitat_extensions/config/default.py |
import gc
import os
import io
import sys
import random
import warnings
from collections import defaultdict
from typing import Dict, List
import jsonlines
import lmdb
import msgpack_numpy
import numpy as np
import math
import time
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parallel import DistributedDataParallel as DDP
import tqdm
from gym import Space
from habitat import Config, logger
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.base_il_trainer import BaseVLNCETrainer
from vlnce_baselines.common.env_utils import construct_envs, construct_envs_for_rl, is_slurm_batch_job
from vlnce_baselines.common.utils import extract_instruction_tokens
from vlnce_baselines.utils import reduce_loss
from habitat.utils.visualizations.utils import images_to_video
from .utils import get_camera_orientations12
from .utils import (
length2mask, dir_angle_feature_with_ele,
)
from vlnce_baselines.common.utils import dis_to_con, gather_list_and_concat
from habitat_extensions.measures import NDTW
from fastdtw import fastdtw
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
import torch.distributed as distr
import gzip
import json
from copy import deepcopy
from torch.cuda.amp import autocast, GradScaler
@baseline_registry.register_trainer(name="HAMT")
class RLTrainer(BaseVLNCETrainer):
def __init__(self, config=None):
super().__init__(config)
self.max_len = int(config.IL.max_traj_len) # * 0.97 transfered gt path got 0.96 spl
#check ceph things
if config.CEPH_IO:
from petrel_client.client import Client
conf_path = '~/petreloss.conf'
self.client = Client(conf_path)
def _make_dirs(self) -> None:
if self.config.local_rank == 0:
self._make_ckpt_dir()
# os.makedirs(self.lmdb_features_dir, exist_ok=True)
if self.config.EVAL.SAVE_RESULTS:
self._make_results_dir()
def save_checkpoint(self, iteration: int,) -> None:
if not self.config.CEPH_IO:
torch.save(
obj={
"state_dict": self.policy.state_dict(),
"config": self.config,
"optim_state": self.optimizer.state_dict(),
"iteration": iteration,
},
f=os.path.join(self.config.CHECKPOINT_FOLDER, f"ckpt.iter{iteration}.pth"),
)
else:
save_dict = {
"state_dict": self.policy.state_dict(),
"config": self.config,
"optim_state": self.optimizer.state_dict(),
"iteration": iteration,
}
path = os.path.join(self.config.CEPH_URL, f"ckpt.iter{iteration}.pth")
with io.BytesIO() as buffer:
torch.save(save_dict, buffer)
self.client.put(path, buffer.getvalue())
def _set_config(self):
self.split = self.config.TASK_CONFIG.DATASET.SPLIT
self.config.defrost()
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = self.split
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = self.split
self.config.TASK_CONFIG.TASK.MEASUREMENTS = ['DISTANCE_TO_GOAL', 'NDTW'] # for RL reward
self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS = self.config.IL.max_traj_len
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1
self.config.SIMULATOR_GPU_IDS = self.config.SIMULATOR_GPU_IDS[self.config.local_rank]
self.config.use_pbar = True # not is_slurm_batch_job()
''' if choosing image '''
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations12()
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
cropper_size = dict(crop_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
crop_config.append((camera_template.lower(), cropper_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
if self.config.IL.progress_monitor == True:
self.config.MODEL.progress_monitor = True
self.config.MODEL.max_len = self.config.IL.max_text_len
else:
self.config.MODEL.progress_monitor = False
self.config.freeze()
self.world_size = self.config.GPU_NUMBERS
self.local_rank = self.config.local_rank
self.batch_size = self.config.IL.batch_size
torch.cuda.set_device(self.device)
if self.world_size > 1:
distr.init_process_group(backend='nccl', init_method='env://')
self.device = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.defrost()
self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.freeze()
torch.cuda.set_device(self.device)
def _init_envs(self):
self.config.defrost()
self.config.TASK_CONFIG.SEED = self.config.TASK_CONFIG.SEED + self.local_rank
self.config.freeze()
self.envs = construct_envs(
self.config,
get_env_class(self.config.ENV_NAME),
auto_reset_done=False
)
env_num = self.envs.num_envs
dataset_len = sum(self.envs.number_of_episodes)
logger.info(f'LOCAL RANK: {self.local_rank}, ENV NUM: {env_num}, DATASET LEN: {dataset_len}')
observation_space = self.envs.observation_spaces[0]
action_space = self.envs.action_spaces[0]
self.obs_transforms = get_active_obs_transforms(self.config)
observation_space = apply_obs_transforms_obs_space(
observation_space, self.obs_transforms
)
return observation_space, action_space
def _get_iter(self, x):
x_iter = int(x.split('.')[-2][4:])
return x_iter
def _initialize_policy(
self,
config: Config,
load_from_ckpt: bool,
observation_space: Space,
action_space: Space,
) -> int:
start_iter = 0
policy = baseline_registry.get_policy(self.config.MODEL.policy_name)
self.policy = policy.from_config(
config=config,
observation_space=observation_space,
action_space=action_space,
)
''' initialize the waypoint predictor here '''
from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM
self.waypoint_predictor = BinaryDistPredictor_TRM(device=self.device)
self.waypoint_predictor.load_state_dict(
torch.load('pretrained/wp_pred/waypoint_predictor', map_location = torch.device('cpu'))['predictor']['state_dict']
)
for param in self.waypoint_predictor.parameters():
param.requires_grad_(False)
self.policy.to(self.device)
self.waypoint_predictor.to(self.device)
self.num_recurrent_layers = self.policy.net.num_recurrent_layers
if self.config.GPU_NUMBERS > 1:
print('Using', self.config.GPU_NUMBERS,'GPU!')
# find_unused_parameters=False fix ddp bug
self.policy.net = DDP(self.policy.net.to(self.device), device_ids=[self.device],
output_device=self.device, find_unused_parameters=False, broadcast_buffers=False)
self.optimizer = torch.optim.AdamW(self.policy.parameters(), lr=self.config.IL.lr)
if config.IL.resume:
import glob
if not self.config.CEPH_IO:
ckpt_list = list(filter(os.path.isfile, glob.glob(config.CHECKPOINT_FOLDER + "/*")) )
else:
ckpt_list = [os.path.join(self.config.CEPH_URL,p) for p in self.client.list(self.config.CEPH_URL)]
ckpt_list.sort(key=self._get_iter)
if len(ckpt_list) > 0:
config.defrost()
config.IL.ckpt_to_load = ckpt_list[-1]
load_from_ckpt = True
config.IL.is_requeue = True
config.freeze()
else:
load_from_ckpt = False
if load_from_ckpt:
ckpt_path = config.IL.ckpt_to_load
if not self.config.CEPH_IO:
ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu")
else:
with io.BytesIO(self.client.get(ckpt_path)) as buffer:
ckpt_dict = torch.load(buffer, map_location="cpu")
if 'module' in list(ckpt_dict['state_dict'].keys())[0] and self.config.GPU_NUMBERS == 1:
self.policy.net = torch.nn.DataParallel(self.policy.net.to(self.device),
device_ids=[self.device], output_device=self.device)
self.policy.load_state_dict(ckpt_dict["state_dict"], strict=False)
self.policy.net = self.policy.net.module
self.waypoint_predictor = torch.nn.DataParallel(self.waypoint_predictor.to(self.device),
device_ids=[self.device], output_device=self.device)
else:
self.policy.load_state_dict(ckpt_dict["state_dict"], strict=False)
if config.IL.is_requeue:
start_iter = ckpt_dict["iteration"]
self.optimizer.load_state_dict(ckpt_dict["optim_state"])
logger.info(f"Loaded weights from checkpoint: {ckpt_path}, iteration: {start_iter}")
params = sum(param.numel() for param in self.policy.parameters())
params_t = sum(
p.numel() for p in self.policy.parameters() if p.requires_grad
)
logger.info(f"Agent parameters: {params/1e6:.2f} MB. Trainable: {params_t/1e6:.2f} MB.")
logger.info("Finished setting up policy.")
return start_iter
def _teacher_action(self, batch_angles, batch_distances, candidate_lengths):
if self.config.MODEL.task_type == 'r2r':
cand_dists_to_goal = [[] for _ in range(len(batch_angles))]
oracle_cand_idx = []
for j in range(len(batch_angles)):
for k in range(len(batch_angles[j])):
angle_k = batch_angles[j][k]
forward_k = batch_distances[j][k]
dist_k = self.envs.call_at(j, "cand_dist_to_goal", {"angle": angle_k, "forward": forward_k})
cand_dists_to_goal[j].append(dist_k)
curr_dist_to_goal = self.envs.call_at(j, "current_dist_to_goal")
# if within target range (which def as 3.0)
if curr_dist_to_goal < 1.5:
oracle_cand_idx.append(candidate_lengths[j] - 1)
else:
oracle_cand_idx.append(np.argmin(cand_dists_to_goal[j]))
return oracle_cand_idx
elif self.config.MODEL.task_type == 'rxr':
kargs = []
current_episodes = self.envs.current_episodes()
for i in range(self.envs.num_envs):
kargs.append({
'ref_path':self.gt_data[str(current_episodes[i].episode_id)]['locations'],
'angles':batch_angles[i],
'distances':batch_distances[i],
'candidate_length':candidate_lengths[i]
})
outputs = self.envs.call(["get_cand_idx"]*self.envs.num_envs,kargs)
oracle_cand_idx, progresses = [list(x) for x in zip(*outputs)]
return oracle_cand_idx, progresses
def _cand_pano_feature_variable(self, obs):
batch_size = len(obs['cand_angles'])
ob_cand_lens = [len(x)+1 for x in obs['cand_angles']] # +1 is for the end
ob_lens = []
ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts, ob_nav_types = [], [], [], [], []
# Note: The candidate_feat at len(ob['candidate']) is the feature for the END
# which is zero in my implementation
for i in range(batch_size):
cand_nav_types = []
cand_idxes = np.zeros(12, dtype=np.bool)
cand_idxes[obs['cand_img_idxes'][i]] = True
# cand
cand_rgb_fts = obs['cand_rgb'][i]
cand_dep_fts = obs['cand_depth'][i]
cand_ang_fts = obs['cand_angle_fts'][i]
cand_dis_fts = obs['cand_dis_fts'][i]
cand_nav_types += [1] * cand_ang_fts.shape[0]
# stop
stop_rgb_fts = torch.zeros([1, 768])
# stop_rgb_fts = torch.zeros([1, 2048])
stop_dep_fts = torch.zeros([1, 128])
stop_ang_fts = torch.zeros([1, 4])
stop_dis_fts = torch.zeros([1, 4])
cand_nav_types += [2]
# pano context
pano_rgb_fts = obs['pano_rgb'][i][~cand_idxes]
pano_dep_fts = obs['pano_depth'][i][~cand_idxes]
pano_ang_fts = obs['pano_angle_fts'][~cand_idxes]
pano_dis_fts = obs['pano_dis_fts'][~cand_idxes]
cand_nav_types += [0] * (12-np.sum(cand_idxes))
cand_pano_rgb = torch.cat([cand_rgb_fts, stop_rgb_fts, pano_rgb_fts], dim=0)
cand_pano_dep = torch.cat([cand_dep_fts, stop_dep_fts, pano_dep_fts], dim=0)
cand_pano_ang = torch.cat([cand_ang_fts, stop_ang_fts, pano_ang_fts], dim=0)
cand_pano_dis = torch.cat([cand_dis_fts, stop_dis_fts, pano_dis_fts], dim=0)
ob_rgb_fts.append(cand_pano_rgb)
ob_dep_fts.append(cand_pano_dep)
ob_ang_fts.append(cand_pano_ang)
ob_dis_fts.append(cand_pano_dis)
ob_nav_types.append(cand_nav_types)
ob_lens.append(len(cand_nav_types))
# pad features to max_len
max_len = max(ob_lens)
for i in range(batch_size):
num_pads = max_len - ob_lens[i]
ob_rgb_fts[i] = torch.cat([ob_rgb_fts[i], torch.zeros(num_pads, 768)], dim=0)
# ob_rgb_fts[i] = torch.cat([ob_rgb_fts[i], torch.zeros(num_pads, 2048)], dim=0)
ob_dep_fts[i] = torch.cat([ob_dep_fts[i], torch.zeros(num_pads, 128)], dim=0)
ob_ang_fts[i] = torch.cat([ob_ang_fts[i], torch.zeros(num_pads, 4)], dim=0)
ob_dis_fts[i] = torch.cat([ob_dis_fts[i], torch.zeros(num_pads, 4)], dim=0)
ob_nav_types[i] = np.array(ob_nav_types[i] + [0]*num_pads)
ob_rgb_fts = torch.stack(ob_rgb_fts, dim=0).cuda()
ob_dep_fts = torch.stack(ob_dep_fts, dim=0).cuda()
ob_ang_fts = torch.stack(ob_ang_fts, dim=0).cuda()
ob_dis_fts = torch.stack(ob_dis_fts, dim=0).cuda()
ob_nav_types = torch.from_numpy(np.stack(ob_nav_types, 0)).cuda()
return ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts, ob_nav_types, ob_lens, ob_cand_lens
def _history_variable(self, obs):
batch_size = obs['pano_rgb'].shape[0]
hist_rgb_fts = obs['pano_rgb'][:, 0, ...].cuda()
hist_depth_fts = obs['pano_depth'][:, 0, ...].cuda()
hist_pano_rgb_fts = obs['pano_rgb'].cuda()
hist_pano_depth_fts = obs['pano_depth'].cuda()
hist_pano_ang_fts = obs['pano_angle_fts'].unsqueeze(0).expand(batch_size, -1, -1).cuda()
return hist_rgb_fts, hist_depth_fts, hist_pano_rgb_fts, hist_pano_depth_fts, hist_pano_ang_fts
@staticmethod
def _pause_envs(envs, batch, envs_to_pause, *args):
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
for arg in args:
arg.pop(idx)
for k, v in batch.items():
if k != 'video_rgbs':
batch[k] = v[state_index]
# else:
# batch[k] = [v[state_i] for state_i in state_index]
return envs, batch
@torch.no_grad()
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
if self.local_rank < 1:
logger.info(f"checkpoint_path: {checkpoint_path}")
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")["config"]
)
else:
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1
config.IL.ckpt_to_load = checkpoint_path
if config.IL.progress_monitor == True:
config.MODEL.progress_monitor = True
config.MODEL.max_len = config.IL.max_text_len
else:
config.MODEL.progress_monitor = False
config.freeze()
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{config.TASK_CONFIG.DATASET.SPLIT}.json",
)
if os.path.exists(fname):
print("skipping -- evaluation exists.")
return
envs = construct_envs(
config,
get_env_class(config.ENV_NAME),
auto_reset_done=False, # unseen: 11006
)
dataset_length = sum(envs.number_of_episodes)
print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
state_episodes = {}
if config.EVAL.EPISODE_COUNT == -1:
episodes_to_eval = sum(envs.number_of_episodes)
else:
episodes_to_eval = min(
config.EVAL.EPISODE_COUNT, sum(envs.number_of_episodes)
)
pbar = tqdm.tqdm(total=episodes_to_eval) if config.use_pbar else None
while len(state_episodes) < episodes_to_eval:
envs.resume_all()
observations = envs.reset()
instr_max_len = self.config.IL.max_text_len # r2r 80, rxr 200
instr_pad_id = 1 if self.config.MODEL.task_type == 'rxr' else 0
observations = extract_instruction_tokens(observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
max_length=instr_max_len, pad_id=instr_pad_id)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
keys = ['rgb', 'rgb_30', 'rgb_60', 'rgb_90', 'rgb_120', 'rgb_150', 'rgb_180', 'rgb_210', 'rgb_240', 'rgb_270', 'rgb_300', 'rgb_330']
# states = [[envs.call_at(i,"get_agent_state",{})] for i in range(envs.num_envs)]
history_images = [{k:observations[i][k][None,...] for k in keys} for i in range(envs.num_envs)]
video_inputs = [{k:observations[i][k][None,...].repeat(16,0) for k in keys} for i in range(envs.num_envs)]
batch['video_rgbs'] = video_inputs
envs_to_pause = [i for i, ep in enumerate(envs.current_episodes()) if ep.episode_id in state_episodes]
envs, batch = self._pause_envs(envs, batch, envs_to_pause, history_images, video_inputs)
if envs.num_envs == 0:
break
# encode instructions
all_txt_ids = batch['instruction']
all_txt_masks = (all_txt_ids != instr_pad_id)
all_txt_embeds = self.policy.net(
mode='language',
txt_ids=all_txt_ids,
txt_masks=all_txt_masks,
)
not_done_index = list(range(envs.num_envs))
hist_lens = np.ones(envs.num_envs, dtype=np.int64)
hist_embeds = [self.policy.net('history').expand(envs.num_envs, -1)]
for stepk in range(self.max_len):
txt_embeds = all_txt_embeds[not_done_index]
txt_masks = all_txt_masks[not_done_index]
# cand waypoint prediction
wp_outputs = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts, \
ob_nav_types, ob_lens, ob_cand_lens = self._cand_pano_feature_variable(wp_outputs)
ob_masks = length2mask(ob_lens).logical_not()
# navigation
visual_inputs = {
'mode': 'navigation',
'txt_embeds': txt_embeds,
'txt_masks': txt_masks,
'hist_embeds': hist_embeds, # history before t step
'hist_lens': hist_lens,
'ob_rgb_fts': ob_rgb_fts,
'ob_dep_fts': ob_dep_fts,
'ob_ang_fts': ob_ang_fts,
'ob_dis_fts': ob_dis_fts,
'ob_nav_types': ob_nav_types,
'ob_masks': ob_masks,
'return_states': False,
}
t_outputs = self.policy.net(**visual_inputs)
logits = t_outputs[0]
# sample action
a_t = logits.argmax(dim=-1, keepdim=True)
cpu_a_t = a_t.squeeze(1).cpu().numpy()
# update history
if stepk != self.max_len-1:
hist_rgb_fts, hist_depth_fts, hist_pano_rgb_fts, hist_pano_depth_fts, hist_pano_ang_fts = self._history_variable(wp_outputs)
prev_act_ang_fts = torch.zeros([envs.num_envs, 4]).cuda()
for i, next_id in enumerate(cpu_a_t):
prev_act_ang_fts[i] = ob_ang_fts[i, next_id]
t_hist_inputs = {
'mode': 'history',
'hist_rgb_fts': hist_rgb_fts,
'hist_depth_fts': hist_depth_fts,
'hist_ang_fts': prev_act_ang_fts,
'hist_pano_rgb_fts': hist_pano_rgb_fts,
'hist_pano_depth_fts': hist_pano_depth_fts,
'hist_pano_ang_fts': hist_pano_ang_fts,
'ob_step': stepk,
}
t_hist_embeds = self.policy.net(**t_hist_inputs)
hist_embeds.append(t_hist_embeds)
hist_lens = hist_lens + 1
# make equiv action
env_actions = []
for j in range(envs.num_envs):
if cpu_a_t[j].item()==ob_cand_lens[j]-1 or stepk==self.max_len-1:
env_actions.append({'action':{'action': 0, 'action_args':{}}})
else:
t_angle = wp_outputs['cand_angles'][j][cpu_a_t[j]]
if self.config.EVAL.ANG30:
t_angle = round(t_angle / math.radians(30)) * math.radians(30)
t_distance = wp_outputs['cand_distances'][j][cpu_a_t[j]]
env_actions.append({'action':{'action': 4, 'action_args':{'angle': t_angle, 'distance': t_distance,
'niu1niu': not self.config.TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING}
}
})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
for j, ob in enumerate(observations):
if env_actions[j]['action']['action'] == 0:
continue
else:
envs.call_at(
j, 'change_current_path', # to update and record low-level path
{'new_path': ob.pop('positions'),
'collisions': ob.pop('collisions')}
)
# calculate metric
current_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if not dones[i]:
continue
info = infos[i]
metric = {}
metric['steps_taken'] = info['steps_taken']
ep_id = str(current_episodes[i].episode_id)
gt_path = np.array(self.gt_data[ep_id]['locations']).astype(np.float)
if 'current_path' in current_episodes[i].info.keys():
positions_ = np.array(current_episodes[i].info['current_path']).astype(np.float)
collisions_ = np.array(current_episodes[i].info['collisions'])
assert collisions_.shape[0] == positions_.shape[0] - 1
else:
positions_ = np.array(dis_to_con(np.array(info['position']['position']))).astype(np.float)
distance = np.array(info['position']['distance']).astype(np.float)
metric['distance_to_goal'] = distance[-1]
metric['success'] = 1. if distance[-1] <= 3. and env_actions[i]['action']['action'] == 0 else 0.
metric['oracle_success'] = 1. if (distance <= 3.).any() else 0.
metric['path_length'] = np.linalg.norm(positions_[1:] - positions_[:-1],axis=1).sum()
if collisions_.size == 0:
metric['collisions'] = 0
else:
metric['collisions'] = collisions_.mean()
gt_length = distance[0]
metric['spl'] = metric['success']*gt_length/max(gt_length,metric['path_length'])
act_con_path = positions_
gt_con_path = np.array(dis_to_con(gt_path)).astype(np.float)
dtw_distance = fastdtw(act_con_path, gt_con_path, dist=NDTW.euclidean_distance)[0]
nDTW = np.exp(-dtw_distance / (len(gt_con_path) * config.TASK_CONFIG.TASK.SUCCESS_DISTANCE))
metric['ndtw'] = nDTW
state_episodes[current_episodes[i].episode_id] = metric
if len(state_episodes)%300 == 0:
aggregated_states = {}
num_episodes = len(state_episodes)
for stat_key in next(iter(state_episodes.values())).keys():
aggregated_states[stat_key] = (
sum(v[stat_key] for v in state_episodes.values()) / num_episodes
)
print(aggregated_states)
if config.use_pbar:
pbar.update()
# pause env
if sum(dones) > 0:
for i in reversed(list(range(envs.num_envs))):
if dones[i]:
not_done_index.pop(i)
envs.pause_at(i)
observations.pop(i)
video_inputs.pop(i)
history_images.pop(i)
if envs.num_envs == 0:
break
for i in range(len(observations)):
states_i = observations[i].pop('states')
# states[i] += states_i
new_images_i = {k:[] for k in keys}
for position, rotation in states_i[:-1]:
new_image = envs.call_at(i,'get_pano_rgbs_observations_at', {'source_position':position,'source_rotation':rotation})
for k in keys:
new_images_i[k].append(new_image[k][None,...])
for k in keys:
new_images_i[k].append(observations[i][k][None,...])
history_images[i][k] = np.vstack((history_images[i][k], np.vstack(new_images_i[k])))
if len(history_images[i][k]) < 16:
video_inputs[i][k][16-len(history_images[i][k]):] = history_images[i][k]
else:
video_inputs[i][k] = history_images[i][k][-16:]
# print(i,stepk,len(new_images_i[k]))
position, rotation = states_i[-1]
envs.call_at(i,'set_agent_state', {'position':position,'rotation':rotation})
hist_lens = hist_lens[np.array(dones)==False]
for j in range(len(hist_embeds)):
hist_embeds[j] = hist_embeds[j][np.array(dones)==False]
observations = extract_instruction_tokens(observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
batch['video_rgbs'] = video_inputs
envs.close()
if self.world_size > 1:
distr.barrier()
aggregated_states = {}
num_episodes = len(state_episodes)
for stat_key in next(iter(state_episodes.values())).keys():
aggregated_states[stat_key] = (
sum(v[stat_key] for v in state_episodes.values()) / num_episodes
)
print(aggregated_states)
total = torch.tensor(num_episodes).cuda()
if self.world_size > 1:
distr.reduce(total,dst=0)
total = total.item()
if self.world_size > 1:
logger.info(f"rank {self.local_rank}'s {num_episodes}-episode results: {aggregated_states}")
for k,v in aggregated_states.items():
v = torch.tensor(v*num_episodes).cuda()
cat_v = gather_list_and_concat(v,self.world_size)
v = (sum(cat_v)/total).item()
aggregated_states[k] = v
split = config.TASK_CONFIG.DATASET.SPLIT
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ep_ckpt_{checkpoint_index}_{split}_r{self.local_rank}_w{self.world_size}.json",
)
if config.EVAL.SAVE_RESULTS:
with open(fname, "w") as f:
json.dump(state_episodes, f, indent=4)
if self.local_rank < 1:
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{split}.json",
)
with open(fname, "w") as f:
json.dump(aggregated_states, f, indent=4)
logger.info(f"Episodes evaluated: {total}")
if config.EVAL.SAVE_RESULTS:
checkpoint_num = int(checkpoint_index[4:])
for k, v in aggregated_states.items():
logger.info(f"Average episode {k}: {v:.6f}")
if config.EVAL.SAVE_RESULTS:
writer.add_scalar(f"eval_{k}/{split}", v, checkpoint_num)
@torch.no_grad()
def inference(self) -> None:
checkpoint_path = self.config.INFERENCE.CKPT_PATH
logger.info(f"checkpoint_path: {checkpoint_path}")
self.config.defrost()
self.config.IL.ckpt_to_load = checkpoint_path
self.config.TASK_CONFIG.DATASET.SPLIT = self.config.INFERENCE.SPLIT
self.config.TASK_CONFIG.DATASET.ROLES = ["guide"]
self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.INFERENCE.LANGUAGES
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1
if self.config.IL.progress_monitor == True:
self.config.MODEL.progress_monitor = True
self.config.MODEL.max_len = config.IL.max_text_len
else:
self.config.MODEL.progress_monitor = False
self.config.TASK_CONFIG.TASK.MEASUREMENTS = []
self.config.TASK_CONFIG.TASK.SENSORS = [s for s in self.config.TASK_CONFIG.TASK.SENSORS if "INSTRUCTION" in s]
self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]]
if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS:
idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW')
self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWINFERENCE'
# if choosing image
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations12()
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
cropper_size = dict(crop_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
crop_config.append((camera_template.lower(), cropper_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
self.config.INFERENCE.EPISODE_COUNT = -1
self.config.INFERENCE.ANG30 = False
self.config.freeze()
if self.config.INFERENCE.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")["config"]
)
else:
config = self.config.clone()
envs = construct_envs(
config,
get_env_class(config.ENV_NAME),
auto_reset_done=False,
)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
episode_predictions = defaultdict(list)
# episode ID --> instruction ID for rxr predictions format
instruction_ids: Dict[str, int] = {}
if config.INFERENCE.EPISODE_COUNT == -1:
episodes_to_infer = sum(envs.number_of_episodes)
else:
episodes_to_infer = min(
config.INFERENCE.EPISODE_COUNT, sum(envs.number_of_episodes)
)
pbar = tqdm.tqdm(total=episodes_to_infer)
while len(episode_predictions) < episodes_to_infer:
envs.resume_all()
observations = envs.reset()
instr_max_len = self.config.IL.max_text_len # r2r 80, rxr 300
instr_pad_id = 1 if self.config.MODEL.task_type == 'rxr' else 0
observations = extract_instruction_tokens(observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
max_length=instr_max_len, pad_id=instr_pad_id)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
keys = ['rgb', 'rgb_30', 'rgb_60', 'rgb_90', 'rgb_120', 'rgb_150', 'rgb_180', 'rgb_210', 'rgb_240', 'rgb_270', 'rgb_300', 'rgb_330']
# states = [[envs.call_at(i,"get_agent_state",{})] for i in range(envs.num_envs)]
history_images = [{k:observations[i][k][None,...] for k in keys} for i in range(envs.num_envs)]
video_inputs = [{k:observations[i][k][None,...].repeat(16,0) for k in keys} for i in range(envs.num_envs)]
batch['video_rgbs'] = video_inputs
envs_to_pause = [i for i, ep in enumerate(envs.current_episodes()) if ep.episode_id in episode_predictions]
envs, batch = self._pause_envs(envs, batch, envs_to_pause, history_images, video_inputs)
if envs.num_envs == 0:
break
# init predicitos start point
current_episodes = envs.current_episodes()
for i in range(envs.num_envs):
episode_predictions[current_episodes[i].episode_id].append(
envs.call_at(i, "get_agent_info", {})
)
if config.INFERENCE.FORMAT == "rxr":
ep_id = current_episodes[i].episode_id
k = current_episodes[i].instruction.instruction_id
instruction_ids[ep_id] = int(k)
# encode instructions
all_txt_ids = batch['instruction']
all_txt_masks = (all_txt_ids != instr_pad_id)
all_txt_embeds = self.policy.net(
mode='language',
txt_ids=all_txt_ids,
txt_masks=all_txt_masks,
)
not_done_index = list(range(envs.num_envs))
hist_lens = np.ones(envs.num_envs, dtype=np.int64)
hist_embeds = [self.policy.net('history').expand(envs.num_envs, -1)]
for stepk in range(self.max_len):
txt_embeds = all_txt_embeds[not_done_index]
txt_masks = all_txt_masks[not_done_index]
# cand waypoint prediction
wp_outputs = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts, \
ob_nav_types, ob_lens, ob_cand_lens = self._cand_pano_feature_variable(wp_outputs)
ob_masks = length2mask(ob_lens).logical_not()
# navigation
visual_inputs = {
'mode': 'navigation',
'txt_embeds': txt_embeds,
'txt_masks': txt_masks,
'hist_embeds': hist_embeds, # history before t step
'hist_lens': hist_lens,
'ob_rgb_fts': ob_rgb_fts,
'ob_dep_fts': ob_dep_fts,
'ob_ang_fts': ob_ang_fts,
'ob_dis_fts': ob_dis_fts,
'ob_nav_types': ob_nav_types,
'ob_masks': ob_masks,
'return_states': False,
}
t_outputs = self.policy.net(**visual_inputs)
logits = t_outputs[0]
# sample action
a_t = logits.argmax(dim=-1, keepdim=True)
cpu_a_t = a_t.squeeze(1).cpu().numpy()
# update history
if stepk != self.max_len-1:
hist_rgb_fts, hist_depth_fts, hist_pano_rgb_fts, hist_pano_depth_fts, hist_pano_ang_fts = self._history_variable(wp_outputs)
prev_act_ang_fts = torch.zeros([envs.num_envs, 4]).cuda()
for i, next_id in enumerate(cpu_a_t):
prev_act_ang_fts[i] = ob_ang_fts[i, next_id]
t_hist_inputs = {
'mode': 'history',
'hist_rgb_fts': hist_rgb_fts,
'hist_depth_fts': hist_depth_fts,
'hist_ang_fts': prev_act_ang_fts,
'hist_pano_rgb_fts': hist_pano_rgb_fts,
'hist_pano_depth_fts': hist_pano_depth_fts,
'hist_pano_ang_fts': hist_pano_ang_fts,
'ob_step': stepk,
}
t_hist_embeds = self.policy.net(**t_hist_inputs)
hist_embeds.append(t_hist_embeds)
hist_lens = hist_lens + 1
# make equiv action
env_actions = []
for j in range(envs.num_envs):
if cpu_a_t[j].item()==ob_cand_lens[j]-1 or stepk==self.max_len-1:
env_actions.append({'action':{'action': 0, 'action_args':{}}})
else:
t_angle = wp_outputs['cand_angles'][j][cpu_a_t[j]]
if self.config.INFERENCE.ANG30:
t_angle = round(t_angle / math.radians(30)) * math.radians(30)
t_distance = wp_outputs['cand_distances'][j][cpu_a_t[j]]
env_actions.append({'action':{'action': 4, 'action_args':{'angle': t_angle, 'distance': t_distance,
'niu1niu': not self.config.TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING}
}
})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
for j, ob in enumerate(observations):
if env_actions[j]['action']['action'] == 0:
continue
else:
envs.call_at(
j, 'update_cur_path', {'new_path': ob.pop('cur_path')}
)
# record path
current_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if not dones[i]:
continue
ep_id = current_episodes[i].episode_id
if 'cur_path' in current_episodes[i].info:
episode_predictions[ep_id] += current_episodes[i].info['cur_path']
episode_predictions[ep_id][-1]['stop'] = True
pbar.update()
# pause env
if sum(dones) > 0:
for i in reversed(list(range(envs.num_envs))):
if dones[i]:
not_done_index.pop(i)
envs.pause_at(i)
observations.pop(i)
video_inputs.pop(i)
history_images.pop(i)
if envs.num_envs == 0:
break
for i in range(len(observations)):
states_i = observations[i].pop('states')
# states[i] += states_i
new_images_i = {k:[] for k in keys}
for position, rotation in states_i[:-1]:
new_image = envs.call_at(i,'get_pano_rgbs_observations_at', {'source_position':position,'source_rotation':rotation})
for k in keys:
new_images_i[k].append(new_image[k][None,...])
for k in keys:
new_images_i[k].append(observations[i][k][None,...])
history_images[i][k] = np.vstack((history_images[i][k], np.vstack(new_images_i[k])))
if len(history_images[i][k]) < 16:
video_inputs[i][k][16-len(history_images[i][k]):] = history_images[i][k]
else:
video_inputs[i][k] = history_images[i][k][-16:]
# print(i,stepk,len(new_images_i[k]))
position, rotation = states_i[-1]
envs.call_at(i,'set_agent_state', {'position':position,'rotation':rotation})
hist_lens = hist_lens[np.array(dones)==False]
for j in range(len(hist_embeds)):
hist_embeds[j] = hist_embeds[j][np.array(dones)==False]
observations = extract_instruction_tokens(observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
batch['video_rgbs'] = video_inputs
envs.close()
if config.INFERENCE.FORMAT == "r2r":
with open(config.INFERENCE.PREDICTIONS_FILE, "w") as f:
json.dump(episode_predictions, f, indent=2)
logger.info(f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}")
else: # use 'rxr' format for rxr-habitat leaderboard
predictions_out = []
for k,v in episode_predictions.items():
# save only positions that changed
path = [v[0]["position"]]
for p in v[1:]:
if path[-1] != p["position"]:
path.append(p["position"])
predictions_out.append(
{
"instruction_id": instruction_ids[k],
"path": path,
}
)
predictions_out.sort(key=lambda x: x["instruction_id"])
with jsonlines.open(
config.INFERENCE.PREDICTIONS_FILE, mode="w"
) as writer:
writer.write_all(predictions_out)
logger.info(
f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}"
) | InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/trainer_HAMT.py |
from vlnce_baselines import trainer_HAMT
from vlnce_baselines.common import environments
from vlnce_baselines.models import (
Policy_ViewSelection_CMA,
Policy_ViewSelection_HAMT,
)
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/__init__.py |
import torch
import torch.distributed as dist
import numpy as np
import math
import copy
class ARGS():
def __init__(self):
self.local_rank = 0
def reduce_loss(tensor, rank, world_size):
with torch.no_grad():
dist.reduce(tensor, dst=0)
if rank == 0:
# print(tensor)
tensor /= world_size
def gather_list_and_concat(list_of_nums,world_size):
if not torch.is_tensor(list_of_nums):
tensor = torch.Tensor(list_of_nums).cuda()
else:
if list_of_nums.is_cuda == False:
tensor = list_of_nums.cuda()
else:
tensor = list_of_nums
gather_t = [torch.ones_like(tensor) for _ in
range(world_size)]
dist.all_gather(gather_t, tensor)
return gather_t
def repeat_allocation(allocations, max_number):
if torch.is_tensor(max_number):
max_number = max_number.long().item()
else:
max_number = max_number.long()
allocation_number = len(allocations)
repeat_time, res = max_number // allocation_number, max_number % allocation_number
allocations_ = []
for i in range(repeat_time):
allocations_ += copy.deepcopy(allocations)
allocations_ += copy.deepcopy(allocations)[:res]
return allocations_
def allocate(number, ep_length, size_per_time):
length_to_indexes = {ep_length[i]: [] for i in
range(len(ep_length))}
for i in range(len(ep_length)):
length_to_indexes[ep_length[i]] += [i]*number[i]
values = []
for i in range(len(number)):
values += [ep_length[i]] * number[i]
groups = int((len(values) - 0.01) // size_per_time + 1)
values.sort(reverse=True)
load_balance_groups = [[] for grp in range(groups)]
for v in values:
load_balance_groups.sort(key=lambda x: sum(x))
load_balance_groups[0].append(v)
indexes = []
set_length = list(set(ep_length))
for i in range(groups):
index = np.zeros(len(load_balance_groups[i]),dtype=int)
for j in range(len(set_length)):
length_indexes = length_to_indexes[set_length[j]]
position = np.where(np.array(load_balance_groups[i]) ==
set_length[j])[0]
position_length = len(position)
# print(position_length,j)
index[position] = length_indexes[:position_length]
# print(length_indexes)
length_to_indexes[set_length[j]] = length_indexes[position_length:]
indexes.append((index).tolist())
return indexes
def allocate_instructions(instruction_lengths, allocations,ep_length, instruction_ids):
instruction_ids_copy = copy.deepcopy(instruction_ids)
allocations_copy = copy.deepcopy(allocations)
instruction_lengths_copy = copy.deepcopy(instruction_lengths)
values = []
value_indexes = []
weights = []
for i in range(len(instruction_lengths)):
instruction_length = instruction_lengths[i]
values += instruction_length
value_indexes += len(instruction_length)*[i]
weights += [ep_length[i]] * len(instruction_length)
# values = np.array(values)
# value_indexes = np.array(value_indexes)
values = np.array(values)
weights = np.array(weights)
value_indexes = np.array(value_indexes)
sorted_index = np.argsort(values*weights)[::-1]
values = values[sorted_index]
value_indexes = value_indexes[sorted_index]
weights = weights[sorted_index]
groups = len(allocations)
load_balance_groups = [[] for grp in range(groups)]
group_weights = [[] for grp in range(groups)]
instruction_allocations = [[] for grp in range(groups)]
for j in range(len(values)):
summation = np.array([np.sum(np.array(load_balance_groups[i])*np.array(group_weights[i])) for i in range(groups)])
sorted_index = np.argsort(summation)
for i in sorted_index:
index = value_indexes[j]
value = values[j]
if index in allocations_copy[i]:
allocations_copy[i].remove(index)
load_balance_groups[i].append(value)
group_weights[i].append(weights[j])
# check[i].append(index)
index_in_length = np.where(np.array(instruction_lengths_copy[index]) == value)[0][0]
instruction_lengths_copy[index].pop(index_in_length)
instruction_allocations[i].append(instruction_ids_copy[index].pop(index_in_length))
break
return instruction_allocations
def allocate_by_scene_for_ddp(number, ep_length, size_per_time):
length_to_indexes = {ep_length[i]: [] for i in
range(len(ep_length))}
for i in range(len(ep_length)):
length_to_indexes[ep_length[i]] += [i]*number[i]
values = []
for i in range(len(number)):
values += [ep_length[i]] * number[i]
groups = int((len(values) - 0.01) // size_per_time + 1)
values.sort(reverse=True)
load_balance_groups = [[] for grp in range(groups)]
for v in values:
load_balance_groups.sort(key=lambda x: sum(x))
load_balance_groups[0].append(v)
indexes = []
set_length = list(set(ep_length))
for i in range(groups):
index = np.zeros(len(load_balance_groups[i]),dtype=int)
for j in range(len(set_length)):
length_indexes = length_to_indexes[set_length[j]]
position = np.where(np.array(load_balance_groups[i]) ==
set_length[j])[0]
position_length = len(position)
# print(position_length,j)
index[position] = length_indexes[:position_length]
# print(length_indexes)
length_to_indexes[set_length[j]] = length_indexes[position_length:]
indexes.append((index).tolist())
return indexes
def get_camera_orientations12():
base_angle_deg = 30
base_angle_rad = math.pi / 6
orient_dict = {}
for k in range(1,12):
orient_dict[str(base_angle_deg*k)] = [0.0, base_angle_rad*k, 0.0]
return orient_dict
def get_camera_orientations24():
base_angle_deg = 15
base_angle_rad = math.pi / 12
orient_dict = {}
for k in range(1,24):
orient_dict[str(base_angle_deg*k)] = [0.0, base_angle_rad*k, 0.0]
return orient_dict
def length2mask(length, size=None):
batch_size = len(length)
size = int(max(length)) if size is None else size
mask = (torch.arange(size, dtype=torch.int64).unsqueeze(0).repeat(batch_size, 1)
> (torch.LongTensor(length) - 1).unsqueeze(1)).cuda()
return mask
def dir_angle_feature(angle_list, device=None):
feature_dim = 64
batch_size = len(angle_list)
max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop
heading_enc = torch.zeros(
batch_size, max_leng, feature_dim, dtype=torch.float32)
for i in range(batch_size):
for j, angle_rad in enumerate(angle_list[i]):
heading_enc[i][j] = torch.tensor(
[math.sin(angle_rad),
math.cos(angle_rad)] * (feature_dim // 2))
return heading_enc
def dir_angle_feature_with_ele(angle_list, device=None):
feature_dim = 128
batch_size = len(angle_list)
max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop
heading_enc = torch.zeros(
batch_size, max_leng, feature_dim, dtype=torch.float32)
for i in range(batch_size):
for j, angle_rad in enumerate(angle_list[i]):
heading_enc[i][j] = torch.tensor(
[
math.sin(angle_rad), math.cos(angle_rad),
math.sin(0.0), math.cos(0.0), # elevation
] * (128 // 4))
return heading_enc | InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/utils.py |
InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/config/__init__.py |
|
from typing import List, Optional, Union
import habitat_baselines.config.default
from habitat.config.default import CONFIG_FILE_SEPARATOR
from habitat.config.default import Config as CN
from habitat_extensions.config.default import (
get_extended_config as get_task_config,
)
# -----------------------------------------------------------------------------
# EXPERIMENT CONFIG
# -----------------------------------------------------------------------------
_C = CN()
_C.BASE_TASK_CONFIG_PATH = "habitat_extensions/config/vlnce_task.yaml"
_C.TASK_CONFIG = CN() # task_config will be stored as a config node
_C.TRAINER_NAME = "dagger"
_C.ENV_NAME = "VLNCEDaggerEnv"
_C.SIMULATOR_GPU_IDS = [0]
_C.VIDEO_OPTION = [] # options: "disk", "tensorboard"
_C.VIDEO_DIR = "videos/debug"
_C.TENSORBOARD_DIR = "data/tensorboard_dirs/debug"
_C.RESULTS_DIR = "data/checkpoints/pretrained/evals"
# -----------------------------------------------------------------------------
# EVAL CONFIG
# -----------------------------------------------------------------------------
_C.EVAL = CN()
# The split to evaluate on
_C.EVAL.SPLIT = "val_seen"
_C.EVAL.EPISODE_COUNT = -1
_C.EVAL.LANGUAGES = ["en-US", "en-IN"]
_C.EVAL.SAMPLE = False
_C.EVAL.SAVE_RESULTS = True
_C.EVAL.EVAL_NONLEARNING = False
_C.EVAL.NONLEARNING = CN()
_C.EVAL.NONLEARNING.AGENT = "RandomAgent"
# -----------------------------------------------------------------------------
# INFERENCE CONFIG
# -----------------------------------------------------------------------------
_C.INFERENCE = CN()
_C.INFERENCE.SPLIT = "test"
_C.INFERENCE.LANGUAGES = ["en-US", "en-IN"]
_C.INFERENCE.SAMPLE = False
_C.INFERENCE.USE_CKPT_CONFIG = True
_C.INFERENCE.CKPT_PATH = "data/checkpoints/CMA_PM_DA_Aug.pth"
_C.INFERENCE.PREDICTIONS_FILE = "predictions.json"
_C.INFERENCE.INFERENCE_NONLEARNING = False
_C.INFERENCE.NONLEARNING = CN()
_C.INFERENCE.NONLEARNING.AGENT = "RandomAgent"
_C.INFERENCE.FORMAT = "rxr" # either 'rxr' or 'r2r'
# -----------------------------------------------------------------------------
# IMITATION LEARNING CONFIG
# -----------------------------------------------------------------------------
_C.IL = CN()
_C.IL.lr = 2.5e-4
_C.IL.batch_size = 5
_C.IL.epochs = 4
_C.IL.use_iw = True
# inflection coefficient for RxR training set GT trajectories (guide): 1.9
# inflection coefficient for R2R training set GT trajectories: 3.2
_C.IL.inflection_weight_coef = 3.2
# load an already trained model for fine tuning
_C.IL.waypoint_aug = False
_C.IL.load_from_ckpt = False
_C.IL.ckpt_to_load = "data/checkpoints/ckpt.0.pth"
# if True, loads the optimizer state, epoch, and step_id from the ckpt dict.
_C.IL.is_requeue = False
# it True, start training from the saved epoch
# -----------------------------------------------------------------------------
# IL: RXR TRAINER CONFIG
# -----------------------------------------------------------------------------
_C.IL.RECOLLECT_TRAINER = CN()
_C.IL.RECOLLECT_TRAINER.preload_trajectories_file = True
_C.IL.RECOLLECT_TRAINER.trajectories_file = (
"data/trajectories_dirs/debug/trajectories.json.gz"
)
# if set to a positive int, episodes with longer paths are ignored in training
_C.IL.RECOLLECT_TRAINER.max_traj_len = -1
# if set to a positive int, effective_batch_size must be some multiple of
# IL.batch_size. Gradient accumulation enables an arbitrarily high "effective"
# batch size.
_C.IL.RECOLLECT_TRAINER.effective_batch_size = -1
_C.IL.RECOLLECT_TRAINER.preload_size = 30
_C.IL.RECOLLECT_TRAINER.use_iw = True
_C.IL.RECOLLECT_TRAINER.gt_file = (
"data/datasets/RxR_VLNCE_v0_enc_xlmr/{split}/{split}_{role}_gt.json.gz"
)
# -----------------------------------------------------------------------------
# IL: DAGGER CONFIG
# -----------------------------------------------------------------------------
_C.IL.DAGGER = CN()
_C.IL.DAGGER.iterations = 10
_C.IL.DAGGER.update_size = 5000
_C.IL.DAGGER.p = 0.75
_C.IL.DAGGER.expert_policy_sensor = "SHORTEST_PATH_SENSOR"
_C.IL.DAGGER.expert_policy_sensor_uuid = "shortest_path_sensor"
_C.IL.DAGGER.load_space = False
# if True, load saved observation space and action space
_C.IL.DAGGER.lmdb_map_size = 1.0e12
# if True, saves data to disk in fp16 and converts back to fp32 when loading.
_C.IL.DAGGER.lmdb_fp16 = False
# How often to commit the writes to the DB, less commits is
# better, but everything must be in memory until a commit happens/
_C.IL.DAGGER.lmdb_commit_frequency = 500
# If True, load precomputed features directly from lmdb_features_dir.
_C.IL.DAGGER.preload_lmdb_features = False
_C.IL.DAGGER.lmdb_features_dir = (
"data/trajectories_dirs/debug/trajectories.lmdb"
)
# -----------------------------------------------------------------------------
# RL CONFIG
# -----------------------------------------------------------------------------
_C.RL = CN()
_C.RL.POLICY = CN()
_C.RL.POLICY.OBS_TRANSFORMS = CN()
_C.RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS = [
"CenterCropperPerSensor",
]
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR = CN()
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = [
("rgb", (224, 224)),
("depth", (256, 256)),
]
_C.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR = CN()
_C.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = [
("rgb", (224, 298)),
("depth", (256, 341)),
]
# -----------------------------------------------------------------------------
# MODELING CONFIG
# -----------------------------------------------------------------------------
_C.MODEL = CN()
_C.MODEL.policy_name = "CMAPolicy" # or "Seq2SeqPolicy"
_C.MODEL.ablate_depth = False
_C.MODEL.ablate_rgb = False
_C.MODEL.ablate_instruction = False
_C.MODEL.INSTRUCTION_ENCODER = CN()
_C.MODEL.INSTRUCTION_ENCODER.sensor_uuid = "instruction"
_C.MODEL.INSTRUCTION_ENCODER.vocab_size = 2504
_C.MODEL.INSTRUCTION_ENCODER.use_pretrained_embeddings = True
_C.MODEL.INSTRUCTION_ENCODER.embedding_file = (
"data/datasets/R2R_VLNCE_v1-2_preprocessed/embeddings.json.gz"
)
_C.MODEL.INSTRUCTION_ENCODER.dataset_vocab = (
"data/datasets/R2R_VLNCE_v1-2_preprocessed/train/train.json.gz"
)
_C.MODEL.INSTRUCTION_ENCODER.fine_tune_embeddings = False
_C.MODEL.INSTRUCTION_ENCODER.embedding_size = 50
_C.MODEL.INSTRUCTION_ENCODER.hidden_size = 128
_C.MODEL.INSTRUCTION_ENCODER.rnn_type = "LSTM"
_C.MODEL.INSTRUCTION_ENCODER.final_state_only = True
_C.MODEL.INSTRUCTION_ENCODER.bidirectional = False
_C.MODEL.spatial_output = True
_C.MODEL.RGB_ENCODER = CN()
_C.MODEL.RGB_ENCODER.backbone_type = "TorchVisionResNet50"
_C.MODEL.RGB_ENCODER.output_size = 256
_C.MODEL.DEPTH_ENCODER = CN()
_C.MODEL.DEPTH_ENCODER.backbone_type = "VlnResnetDepthEncoder"
_C.MODEL.DEPTH_ENCODER.output_size = 128
# type of resnet to use
_C.MODEL.DEPTH_ENCODER.backbone = "resnet50"
# path to DDPPO resnet weights
_C.MODEL.DEPTH_ENCODER.ddppo_checkpoint = (
"pretrained/ddppo-models/gibson-2plus-resnet50.pth"
)
_C.MODEL.STATE_ENCODER = CN()
_C.MODEL.STATE_ENCODER.hidden_size = 512
_C.MODEL.STATE_ENCODER.rnn_type = "GRU"
_C.MODEL.SEQ2SEQ = CN()
_C.MODEL.SEQ2SEQ.use_prev_action = False
_C.MODEL.PROGRESS_MONITOR = CN()
_C.MODEL.PROGRESS_MONITOR.use = False
_C.MODEL.PROGRESS_MONITOR.alpha = 1.0 # loss multiplier
def purge_keys(config: CN, keys: List[str]) -> None:
for k in keys:
del config[k]
config.register_deprecated_key(k)
def get_config(
config_paths: Optional[Union[List[str], str]] = None,
opts: Optional[list] = None,
) -> CN:
r"""Create a unified config with default values. Initialized from the
habitat_baselines default config. Overwritten by values from
`config_paths` and overwritten by options from `opts`.
Args:
config_paths: List of config paths or string that contains comma
separated list of config paths.
opts: Config options (keys, values) in a list (e.g., passed from
command line into the config. For example, `opts = ['FOO.BAR',
0.5]`. Argument can be used for parameter sweeping or quick tests.
"""
config = CN()
config.merge_from_other_cfg(habitat_baselines.config.default._C)
purge_keys(config, ["SIMULATOR_GPU_ID", "TEST_EPISODE_COUNT"])
config.merge_from_other_cfg(_C.clone())
if config_paths:
if isinstance(config_paths, str):
if CONFIG_FILE_SEPARATOR in config_paths:
config_paths = config_paths.split(CONFIG_FILE_SEPARATOR)
else:
config_paths = [config_paths]
prev_task_config = ""
for config_path in config_paths:
config.merge_from_file(config_path)
if config.BASE_TASK_CONFIG_PATH != prev_task_config:
config.TASK_CONFIG = get_task_config(
config.BASE_TASK_CONFIG_PATH
)
prev_task_config = config.BASE_TASK_CONFIG_PATH
if opts:
config.CMD_TRAILING_OPTS = opts
config.merge_from_list(opts)
config.freeze()
return config
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/config/default.py |
import abc
from typing import Any
from habitat_baselines.rl.ppo.policy import Policy
from habitat_baselines.utils.common import (
CategoricalNet,
CustomFixedCategorical,
)
from torch.distributions import Categorical
class ILPolicy(Policy, metaclass=abc.ABCMeta):
def __init__(self, net, dim_actions):
r"""Defines an imitation learning policy as having functions act() and
build_distribution().
"""
super(Policy, self).__init__()
self.net = net
self.dim_actions = dim_actions
# self.action_distribution = CategoricalNet(
# self.net.output_size, self.dim_actions
# )
def forward(self, *x):
raise NotImplementedError
def act(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
deterministic=False,
):
print('need to revise for CMA and VLNBERT')
import pdb; pdb.set_trace()
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution = self.action_distribution(features)
# if distribution.logit
if deterministic:
action = distribution.mode()
else:
action = distribution.sample()
return action, rnn_hidden_states
def get_value(self, *args: Any, **kwargs: Any):
raise NotImplementedError
def evaluate_actions(self, *args: Any, **kwargs: Any):
raise NotImplementedError
def build_distribution(
self, observations, rnn_hidden_states, prev_actions, masks
) -> CustomFixedCategorical:
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
return self.action_distribution(features)
def act2(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
deterministic=False,
):
print('need to revise for CMA and VLNBERT')
import pdb; pdb.set_trace()
feature_rgb, feature_depth, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution_rgb = self.action_distribution(feature_rgb)
distribution_depth = self.action_distribution(feature_depth)
probs = (distribution_rgb.probs + distribution_depth.probs)/2
# if distribution.logit
if deterministic:
action = probs.argmax(dim=-1, keepdim=True)
else:
action = Categorical(probs).sample().unsqueeze(-1)
return action, rnn_hidden_states
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/policy.py |
from copy import deepcopy
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym import Space
from habitat import Config
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.rl.models.rnn_state_encoder import (
build_rnn_state_encoder,
)
from habitat_baselines.rl.ppo.policy import Net
from vlnce_baselines.models.hamt.vlnbert_init import get_vlnbert_models
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.models.encoders.instruction_encoder import (
InstructionEncoder,
)
from vlnce_baselines.models.encoders.image_encoders import (
TorchVisionResNet50,
VlnResnetDepthEncoder,
CLIPEncoder,
)
from vlnce_baselines.models.encoders.video_encoder import VideoRGBEcnoder
from vlnce_baselines.models.policy import ILPolicy
from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM
from vlnce_baselines.waypoint_pred.utils import nms
from vlnce_baselines.models.utils import (
angle_feature_with_ele, dir_angle_feature_with_ele, angle_feature_torch, length2mask)
import math
@baseline_registry.register_policy
class PolicyViewSelectionHAMT(ILPolicy):
def __init__(
self,
observation_space: Space,
action_space: Space,
model_config: Config,
):
super().__init__(
HAMT(
observation_space=observation_space,
model_config=model_config,
num_actions=action_space.n,
),
action_space.n,
)
@classmethod
def from_config(
cls, config: Config, observation_space: Space, action_space: Space
):
config.defrost()
config.MODEL.TORCH_GPU_ID = config.TORCH_GPU_ID
config.MODEL.use_critic = (config.IL.feedback == 'sample')
config.freeze()
return cls(
observation_space=observation_space,
action_space=action_space,
model_config=config.MODEL,
)
class Critic(nn.Module):
def __init__(self, drop_ratio):
super(Critic, self).__init__()
self.state2value = nn.Sequential(
nn.Linear(768, 512),
nn.ReLU(),
nn.Dropout(drop_ratio),
nn.Linear(512, 1),
)
def forward(self, state):
return self.state2value(state).squeeze()
class HAMT(Net):
def __init__(
self, observation_space: Space, model_config: Config, num_actions,
):
super().__init__()
device = (
torch.device("cuda", model_config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.device = device
print('\nInitalizing the HAMT model ...')
self.vln_bert = get_vlnbert_models(config=model_config)
if model_config.task_type == 'r2r':
self.rgb_projection = nn.Linear(model_config.RGB_ENCODER.output_size, 768)
elif model_config.task_type == 'rxr':
self.rgb_projection = nn.Linear(model_config.RGB_ENCODER.output_size, 512)
# self.rgb_projection = nn.Linear(2048, 768) # for vit 768 compability
self.drop_env = nn.Dropout(p=0.4)
if model_config.use_critic:
self.critic = Critic(drop_ratio=0.5)
# Init the depth encoder
assert model_config.DEPTH_ENCODER.backbone_type in [
"VlnResnetDepthEncoder"
], "DEPTH_ENCODER.backbone_type must be VlnResnetDepthEncoder"
self.depth_encoder = VlnResnetDepthEncoder(
observation_space,
output_size=model_config.DEPTH_ENCODER.output_size,
checkpoint=model_config.DEPTH_ENCODER.ddppo_checkpoint,
backbone=model_config.DEPTH_ENCODER.backbone,
spatial_output=model_config.spatial_output,
)
self.space_pool_depth = nn.Sequential(nn.AdaptiveAvgPool2d((1,1)), nn.Flatten(start_dim=2))
# Init the RGB encoder
# assert model_config.RGB_ENCODER.backbone_type in [
# "TorchVisionResNet152", "TorchVisionResNet50"
# ], "RGB_ENCODER.backbone_type must be TorchVisionResNet152 or TorchVisionResNet50"
if model_config.RGB_ENCODER.backbone_type == "TorchVisionResNet50":
self.rgb_encoder = TorchVisionResNet50(
observation_space,
model_config.RGB_ENCODER.output_size,
device,
spatial_output=model_config.spatial_output,
)
elif model_config.RGB_ENCODER.backbone_type == "CLIP":
self.rgb_encoder = CLIPEncoder(self.device)
elif model_config.RGB_ENCODER.backbone_type.startswith("VideoIntern"):
self.rgb_encoder = VideoRGBEcnoder(
observation_space,
model_config.RGB_ENCODER.output_size,
model_config.RGB_ENCODER.backbone_type,
self.device
)
self.clip_encoder = CLIPEncoder(self.device)
if "Base" in model_config.RGB_ENCODER.backbone_type:
self.rgb_embedding_projection = nn.Linear(512+768, 768)
elif "Large" in model_config.RGB_ENCODER.backbone_type:
self.rgb_embedding_projection = nn.Linear(512+1024, 768)
self.space_pool_rgb = nn.Sequential(nn.AdaptiveAvgPool2d((1,1)), nn.Flatten(start_dim=2))
self.pano_img_idxes = np.arange(0, 12, dtype=np.int64) # 逆时针
pano_angle_rad_c = (1-self.pano_img_idxes/12) * 2 * math.pi # 对应到逆时针
self.pano_angle_fts = angle_feature_torch(torch.from_numpy(pano_angle_rad_c))
if model_config.progress_monitor:
self.state2 = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(768,512),
nn.Tanh()
)
self.progress_monitor = nn.Sequential(
nn.Linear(model_config.max_len + 512, 1),
nn.Sigmoid()
)
@property # trivial argument, just for init with habitat
def output_size(self):
return 1
@property
def is_blind(self):
return self.rgb_encoder.is_blind or self.depth_encoder.is_blind
@property
def num_recurrent_layers(self):
return 1
def forward(self, mode=None,
waypoint_predictor=None, observations=None, in_train=True,
txt_ids=None, txt_masks=None, txt_embeds=None,
hist_rgb_fts=None, hist_depth_fts=None, hist_ang_fts=None, embeddings = None,
hist_pano_rgb_fts=None, hist_pano_depth_fts=None, hist_pano_ang_fts=None,
hist_embeds=None, hist_lens=None, ob_step=None,
ob_rgb_fts=None, ob_dep_fts=None, ob_ang_fts=None, ob_dis_fts=None,
ob_nav_types=None, ob_masks=None, return_states=False, critic_states=None,
h_t=None,language_attention=None):
if mode == 'language':
encoded_sentence = self.vln_bert(mode, txt_ids=txt_ids, txt_masks=txt_masks)
return encoded_sentence
elif mode == 'waypoint':
# batch_size = observations['instruction'].size(0)
batch_size = observations['rgb'].shape[0]
''' encoding rgb/depth at all directions ----------------------------- '''
NUM_ANGLES = 120 # 120 angles 3 degrees each
NUM_IMGS = 12
NUM_CLASSES = 12 # 12 distances at each sector
depth_batch = torch.zeros_like(observations['depth']).repeat(NUM_IMGS, 1, 1, 1)
rgb_batch = torch.zeros_like(observations['rgb']).repeat(NUM_IMGS, 1, 1, 1)
# reverse the order of input images to clockwise
a_count = 0
for i, (k, v) in enumerate(observations.items()):
if 'depth' in k: # You might need to double check the keys order
for bi in range(v.size(0)):
ra_count = (NUM_IMGS - a_count) % NUM_IMGS
depth_batch[ra_count + bi*NUM_IMGS] = v[bi]
rgb_batch[ra_count + bi*NUM_IMGS] = observations[k.replace('depth','rgb')][bi]
a_count += 1
obs_view12 = {}
rgb_batch = rgb_batch/255
obs_view12['depth'] = depth_batch
obs_view12['video_rgbs'] = observations['video_rgbs']
depth_embedding = self.depth_encoder(obs_view12) # torch.Size([bs, 128, 4, 4])
video_embedding = self.rgb_encoder(obs_view12) # torch.Size([bs, 2048, 7, 7])
clip_embedding = self.clip_encoder({'rgb':rgb_batch})
rgb_embedding_cated = torch.cat([video_embedding,clip_embedding],1)
rgb_embedding = self.rgb_embedding_projection(rgb_embedding_cated)
''' waypoint prediction ----------------------------- '''
waypoint_heatmap_logits = waypoint_predictor(
rgb_embedding, depth_embedding)
# reverse the order of images back to counter-clockwise
rgb_embed_reshape = rgb_embedding.reshape(
batch_size, NUM_IMGS, 768, 1, 1)
# rgb_embed_reshape = rgb_embedding.reshape(
# batch_size, NUM_IMGS, 2048, 7, 7)
depth_embed_reshape = depth_embedding.reshape(
batch_size, NUM_IMGS, 128, 4, 4)
rgb_feats = torch.cat((
rgb_embed_reshape[:,0:1,:],
torch.flip(rgb_embed_reshape[:,1:,:], [1]),
), dim=1)
depth_feats = torch.cat((
depth_embed_reshape[:,0:1,:],
torch.flip(depth_embed_reshape[:,1:,:], [1]),
), dim=1)
# way_feats = torch.cat((
# way_feats[:,0:1,:],
# torch.flip(way_feats[:,1:,:], [1]),
# ), dim=1)
# from heatmap to points
batch_x_norm = torch.softmax(
waypoint_heatmap_logits.reshape(
batch_size, NUM_ANGLES*NUM_CLASSES,
), dim=1
)
batch_x_norm = batch_x_norm.reshape(
batch_size, NUM_ANGLES, NUM_CLASSES,
)
batch_x_norm_wrap = torch.cat((
batch_x_norm[:,-1:,:],
batch_x_norm,
batch_x_norm[:,:1,:]),
dim=1)
batch_output_map = nms(
batch_x_norm_wrap.unsqueeze(1),
max_predictions=5,
sigma=(7.0,5.0))
# predicted waypoints before sampling
batch_output_map = batch_output_map.squeeze(1)[:,1:-1,:]
# candidate_lengths = ((batch_output_map!=0).sum(-1).sum(-1) + 1).tolist()
# if isinstance(candidate_lengths, int):
# candidate_lengths = [candidate_lengths]
# max_candidate = max(candidate_lengths) # including stop
# cand_mask = length2mask(candidate_lengths, device=self.device)
if in_train:
# Waypoint augmentation
# parts of heatmap for sampling (fix offset first)
HEATMAP_OFFSET = 5
batch_way_heats_regional = torch.cat(
(waypoint_heatmap_logits[:,-HEATMAP_OFFSET:,:],
waypoint_heatmap_logits[:,:-HEATMAP_OFFSET,:],
), dim=1)
batch_way_heats_regional = batch_way_heats_regional.reshape(batch_size, 12, 10, 12)
batch_sample_angle_idxes = []
batch_sample_distance_idxes = []
# batch_way_log_prob = []
for j in range(batch_size):
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# clockwise image indexes (same as batch_x_norm)
img_idxes = ((angle_idxes.cpu().numpy()+5) // 10)
img_idxes[img_idxes==12] = 0
# # candidate waypoint states
# way_feats_regional = way_feats[j][img_idxes]
# heatmap regions for sampling
way_heats_regional = batch_way_heats_regional[j][img_idxes].view(img_idxes.size, -1)
way_heats_probs = F.softmax(way_heats_regional, 1)
probs_c = torch.distributions.Categorical(way_heats_probs)
way_heats_act = probs_c.sample().detach()
sample_angle_idxes = []
sample_distance_idxes = []
for k, way_act in enumerate(way_heats_act):
if img_idxes[k] != 0:
angle_pointer = (img_idxes[k] - 1) * 10 + 5
else:
angle_pointer = 0
sample_angle_idxes.append(way_act//12+angle_pointer)
sample_distance_idxes.append(way_act%12)
batch_sample_angle_idxes.append(sample_angle_idxes)
batch_sample_distance_idxes.append(sample_distance_idxes)
# batch_way_log_prob.append(
# probs_c.log_prob(way_heats_act))
else:
# batch_way_log_prob = None
None
rgb_feats = self.space_pool_rgb(rgb_feats).cpu()
depth_feats = self.space_pool_depth(depth_feats).cpu()
# for cand
cand_rgb = []
cand_depth = []
cand_angle_fts = []
cand_dis_fts = []
cand_img_idxes = []
cand_angles = []
cand_distances = []
for j in range(batch_size):
if in_train:
angle_idxes = torch.tensor(batch_sample_angle_idxes[j])
distance_idxes = torch.tensor(batch_sample_distance_idxes[j])
else:
angle_idxes = batch_output_map[j].nonzero()[:, 0]
distance_idxes = batch_output_map[j].nonzero()[:, 1]
# for angle & distance
angle_rad_c = angle_idxes.cpu().float()/120*2*math.pi # 顺时针
angle_rad_cc = 2*math.pi-angle_idxes.float()/120*2*math.pi # 逆时针
cand_angle_fts.append( angle_feature_torch(angle_rad_c) )
cand_angles.append(angle_rad_cc.tolist())
cand_distances.append( ((distance_idxes + 1)*0.25).tolist() )
cand_dis_fts.append((((distance_idxes + 1)*0.25/3).repeat(4,1).T).cpu())
# for img idxes
img_idxes = 12 - (angle_idxes.cpu().numpy()+5) // 10 # 逆时针
img_idxes[img_idxes==12] = 0
cand_img_idxes.append(img_idxes)
# for rgb & depth
cand_rgb.append(rgb_feats[j, img_idxes, ...])
cand_depth.append(depth_feats[j, img_idxes, ...])
# for pano
pano_rgb = rgb_feats # B x 12 x 2048
pano_depth = depth_feats # B x 12 x 128
pano_angle_fts = deepcopy(self.pano_angle_fts) # 12 x 4
pano_dis_fts = torch.zeros_like(pano_angle_fts) # 12 x 4
pano_img_idxes = deepcopy(self.pano_img_idxes) # 12
# cand_angle_fts 顺时针
# cand_angles 逆时针
outputs = {
'cand_rgb': cand_rgb, # [K x 2048]
'cand_depth': cand_depth, # [K x 128]
'cand_angle_fts': cand_angle_fts, # [K x 4]
'cand_dis_fts': cand_dis_fts, # [K x 4]
'cand_img_idxes': cand_img_idxes, # [K]
'cand_angles': cand_angles, # [K]
'cand_distances': cand_distances, # [K]
'pano_rgb': pano_rgb, # B x 12 x 2048
'pano_depth': pano_depth, # B x 12 x 128
'pano_angle_fts': pano_angle_fts, # 12 x 4
'pano_dis_fts': pano_dis_fts, # 12 x 4
'pano_img_idxes': pano_img_idxes, # 12
}
return outputs
elif mode == 'navigation':
hist_embeds = torch.stack(hist_embeds, 1)
hist_masks = length2mask(hist_lens, size=hist_embeds.size(1)).logical_not()
ob_rgb_fts = self.drop_env(ob_rgb_fts)
ob_rgb_fts = self.rgb_projection(ob_rgb_fts)
act_logits, txt_embeds, hist_embeds, ob_embeds, lang_attention_score = self.vln_bert(
mode, txt_embeds=txt_embeds, txt_masks=txt_masks,
hist_embeds=hist_embeds, hist_masks=hist_masks,
ob_rgb_fts=ob_rgb_fts, ob_dep_fts=ob_dep_fts, ob_ang_fts=ob_ang_fts, ob_dis_fts=ob_dis_fts,
ob_nav_types=ob_nav_types, ob_masks=ob_masks)
if return_states:
# if self.args.no_lang_ca:
# states = hist_embeds[:, 0]
# else:
# states = txt_embeds[:, 0] * hist_embeds[:, 0] # [CLS]
states = txt_embeds[:, 0] * hist_embeds[:, 0]
return act_logits, states, lang_attention_score
return (act_logits, )
elif mode == 'history':
if hist_rgb_fts is not None:
hist_rgb_fts = self.drop_env(hist_rgb_fts)
hist_rgb_fts = self.rgb_projection(hist_rgb_fts)
if hist_pano_rgb_fts is not None:
hist_pano_rgb_fts = self.drop_env(hist_pano_rgb_fts)
hist_pano_rgb_fts = self.rgb_projection(hist_pano_rgb_fts)
if ob_step is not None:
ob_step_ids = torch.LongTensor([ob_step]).cuda()
else:
ob_step_ids = None
hist_embeds = self.vln_bert(mode, hist_rgb_fts=hist_rgb_fts,
hist_ang_fts=hist_ang_fts, hist_depth_fts=hist_depth_fts, ob_step_ids=ob_step_ids,
hist_pano_rgb_fts=hist_pano_rgb_fts, hist_pano_depth_fts=hist_pano_depth_fts,
hist_pano_ang_fts=hist_pano_ang_fts)
return hist_embeds
elif mode == 'critic':
return self.critic(critic_states)
elif mode == 'progress':
pm_in = torch.cat((self.state2(h_t),language_attention.sum(1)),1)
progresses = self.progress_monitor(pm_in)
return progresses
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/Policy_ViewSelection_HAMT.py |
InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/__init__.py |
|
import math
from turtle import heading
import torch
def angle_feature(headings, device=None):
# twopi = math.pi * 2
# heading = (heading + twopi) % twopi # From 0 ~ 2pi
# It will be the same
heading_enc = torch.zeros(len(headings), 64, dtype=torch.float32)
for i, head in enumerate(headings):
heading_enc[i] = torch.tensor(
[math.sin(head), math.cos(head)] * (64 // 2))
return heading_enc.to(device)
def dir_angle_feature(angle_list, device=None):
feature_dim = 64
batch_size = len(angle_list)
max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop
heading_enc = torch.zeros(
batch_size, max_leng, feature_dim, dtype=torch.float32)
for i in range(batch_size):
for j, angle_rad in enumerate(angle_list[i]):
heading_enc[i][j] = torch.tensor(
[math.sin(angle_rad),
math.cos(angle_rad)] * (feature_dim // 2))
return heading_enc
def angle_feature_with_ele(headings, device=None):
# twopi = math.pi * 2
# heading = (heading + twopi) % twopi # From 0 ~ 2pi
# It will be the same
heading_enc = torch.zeros(len(headings), 128, dtype=torch.float32)
for i, head in enumerate(headings):
heading_enc[i] = torch.tensor(
[
math.sin(head), math.cos(head),
math.sin(0.0), math.cos(0.0), # elevation
] * (128 // 4))
return heading_enc.to(device)
def angle_feature_torch(headings: torch.Tensor):
return torch.stack(
[
torch.sin(headings),
torch.cos(headings),
torch.sin(torch.zeros_like(headings)),
torch.cos(torch.zeros_like(headings))
]
).float().T
def dir_angle_feature_with_ele(angle_list, device=None):
feature_dim = 128
batch_size = len(angle_list)
max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop
heading_enc = torch.zeros(
batch_size, max_leng, feature_dim, dtype=torch.float32)
for i in range(batch_size):
for j, angle_rad in enumerate(angle_list[i]):
heading_enc[i][j] = torch.tensor(
[
math.sin(angle_rad), math.cos(angle_rad),
math.sin(0.0), math.cos(0.0), # elevation
] * (128 // 4))
return heading_enc
def length2mask(length, size=None):
batch_size = len(length)
size = int(max(length)) if size is None else size
mask = (torch.arange(size, dtype=torch.int64).unsqueeze(0).repeat(batch_size, 1)
> (torch.LongTensor(length) - 1).unsqueeze(1)).cuda()
return mask | InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/utils.py |
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym import Space
from habitat import Config
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.rl.models.rnn_state_encoder import (
build_rnn_state_encoder,
)
from habitat_baselines.rl.ppo.policy import Net
from habitat_baselines.utils.common import CustomFixedCategorical
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.models.encoders.instruction_encoder import (
InstructionEncoder,
)
from vlnce_baselines.models.encoders.image_encoders import (
TorchVisionResNet50,
VlnResnetDepthEncoder
)
from vlnce_baselines.models.policy import ILPolicy
from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM
from vlnce_baselines.waypoint_pred.utils import nms
from vlnce_baselines.models.utils import (
length2mask, angle_feature, dir_angle_feature)
import math
@baseline_registry.register_policy
class PolicyViewSelectionCMA(ILPolicy):
def __init__(
self,
observation_space: Space,
action_space: Space,
model_config: Config,
):
super().__init__(
CMANet(
observation_space=observation_space,
model_config=model_config,
num_actions=action_space.n,
),
action_space.n,
)
@classmethod
def from_config(
cls, config: Config, observation_space: Space, action_space: Space
):
config.defrost()
config.MODEL.TORCH_GPU_ID = config.TORCH_GPU_ID
config.freeze()
return cls(
observation_space=observation_space,
action_space=action_space,
model_config=config.MODEL,
)
class CMANet(Net):
r"""A cross-modal attention (CMA) network that contains:
Instruction encoder
Depth encoder
RGB encoder
CMA state encoder
"""
def __init__(
self, observation_space: Space, model_config: Config, num_actions
):
super().__init__()
self.model_config = model_config
model_config.defrost()
model_config.INSTRUCTION_ENCODER.final_state_only = False
model_config.freeze()
device = (
torch.device("cuda", model_config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.device = device
# Init the instruction encoder
self.instruction_encoder = InstructionEncoder(
model_config.INSTRUCTION_ENCODER
)
# Init the depth encoder
assert model_config.DEPTH_ENCODER.backbone_type in [
"VlnResnetDepthEncoder"
], "DEPTH_ENCODER.backbone_type must be VlnResnetDepthEncoder"
self.depth_encoder = VlnResnetDepthEncoder(
observation_space,
output_size=model_config.DEPTH_ENCODER.output_size,
checkpoint=model_config.DEPTH_ENCODER.ddppo_checkpoint,
backbone=model_config.DEPTH_ENCODER.backbone,
spatial_output=model_config.spatial_output,
)
# Init the RGB encoder
assert model_config.RGB_ENCODER.backbone_type in [
"TorchVisionResNet152", "TorchVisionResNet50"
], "RGB_ENCODER.backbone_type must be TorchVisionResNet152 or TorchVisionResNet50"
if model_config.RGB_ENCODER.backbone_type == "TorchVisionResNet50":
self.rgb_encoder = TorchVisionResNet50(
observation_space,
model_config.RGB_ENCODER.output_size,
device,
spatial_output=model_config.spatial_output,
)
hidden_size = model_config.STATE_ENCODER.hidden_size
self._hidden_size = hidden_size
# merging visual inputs
self.rgb_linear = nn.Sequential(
nn.Linear(
2048,
model_config.RGB_ENCODER.output_size, # 256
),
nn.ReLU(True),
)
if self.depth_encoder.spatial_output:
None
else:
self.depth_linear = nn.Sequential(
nn.Linear(
128, # 128
model_config.DEPTH_ENCODER.output_size, # 128
),
nn.ReLU(True),
)
self.vismerge_linear = nn.Sequential(
nn.Linear(
model_config.DEPTH_ENCODER.output_size + model_config.RGB_ENCODER.output_size + model_config.VISUAL_DIM.directional,
model_config.VISUAL_DIM.vis_hidden,
),
nn.ReLU(True),
)
self.enc_prev_act = nn.Sequential(
nn.Linear(model_config.VISUAL_DIM.directional, model_config.VISUAL_DIM.directional),
nn.Tanh(),
)
# Init the RNN state decoder
self.state_encoder = build_rnn_state_encoder(
input_size=model_config.VISUAL_DIM.vis_hidden + model_config.VISUAL_DIM.directional,
hidden_size=model_config.STATE_ENCODER.hidden_size,
rnn_type=model_config.STATE_ENCODER.rnn_type,
num_layers=1,
)
self.prev_state_vis_attn = SoftDotAttention(
model_config.STATE_ENCODER.hidden_size,
model_config.VISUAL_DIM.vis_hidden,
model_config.VISUAL_DIM.vis_hidden,
output_tilde=False
)
self.text_vis_attn = SoftDotAttention(
self.instruction_encoder.output_size,
model_config.VISUAL_DIM.vis_hidden,
model_config.VISUAL_DIM.vis_hidden,
output_tilde=False
)
self.state_text_attn = SoftDotAttention(
model_config.STATE_ENCODER.hidden_size,
self.instruction_encoder.output_size,
self.instruction_encoder.output_size,
output_tilde=False
)
self.state_vis_logits = SoftDotAttention(
model_config.STATE_ENCODER.hidden_size+model_config.VISUAL_DIM.vis_hidden+self.instruction_encoder.output_size,
model_config.VISUAL_DIM.vis_hidden,
model_config.STATE_ENCODER.hidden_size,
output_tilde=False
)
self.register_buffer(
"_scale", torch.tensor(1.0 / ((hidden_size // 2) ** 0.5))
)
self.space_pool = nn.Sequential(
nn.AdaptiveAvgPool2d((1,1)),
nn.Flatten(start_dim=2),)
# self.critic = nn.Sequential(
# nn.Linear(model_config.STATE_ENCODER.hidden_size, model_config.STATE_ENCODER.hidden_size),
# nn.ReLU(),
# nn.Dropout(0.5),
# nn.Linear(model_config.STATE_ENCODER.hidden_size, 1),
# )
# self.drop = nn.Dropout(p=0.50)
# self.drop_env = nn.Dropout(p=0.40)
self.train()
self.rgb_encoder.cnn.eval()
self.depth_encoder.eval()
# self.waypoint_predictor.eval()
@property
def is_blind(self):
return self.rgb_encoder.is_blind or self.depth_encoder.is_blind
@property # trivial argument, just for init with habitat
def output_size(self):
return 1
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
def forward(self, mode=None,
waypoint_predictor=None,
observations=None,
instruction=None, text_mask=None,
rnn_states=None,
cand_rgb=None, cand_depth=None,
cand_direction=None, cand_mask=None,
headings=None, masks=None,
post_states=None, in_train=True):
r"""
instruction_embedding: [batch_size x INSTRUCTION_ENCODER.output_size]
depth_embedding: [batch_size x DEPTH_ENCODER.output_size]
rgb_embedding: [batch_size x RGB_ENCODER.output_size]
"""
if mode == 'language':
ctx, all_lang_masks = self.instruction_encoder(observations)
return ctx, all_lang_masks
elif mode == 'waypoint':
# batch_size = observations['instruction'].size(0)
batch_size = observations['rgb'].shape[0]
''' encoding rgb/depth at all directions ----------------------------- '''
NUM_ANGLES = 120 # 120 angles 3 degrees each
NUM_IMGS = 12
NUM_CLASSES = 12 # 12 distances at each sector
depth_batch = torch.zeros_like(observations['depth']).repeat(NUM_IMGS, 1, 1, 1)
rgb_batch = torch.zeros_like(observations['rgb']).repeat(NUM_IMGS, 1, 1, 1)
# reverse the order of input images to clockwise
a_count = 0
for i, (k, v) in enumerate(observations.items()):
if 'depth' in k: # You might need to double check the keys order
for bi in range(v.size(0)):
ra_count = (NUM_IMGS - a_count) % NUM_IMGS
depth_batch[ra_count + bi*NUM_IMGS] = v[bi]
rgb_batch[ra_count + bi*NUM_IMGS] = observations[k.replace('depth','rgb')][bi]
a_count += 1
obs_view12 = {}
obs_view12['depth'] = depth_batch
obs_view12['rgb'] = rgb_batch
depth_embedding = self.depth_encoder(obs_view12) # torch.Size([bs, 128, 4, 4])
rgb_embedding = self.rgb_encoder(obs_view12) # torch.Size([bs, 2048, 7, 7])
''' waypoint prediction ----------------------------- '''
waypoint_heatmap_logits = waypoint_predictor(
rgb_embedding, depth_embedding)
# reverse the order of images back to counter-clockwise
rgb_embed_reshape = rgb_embedding.reshape(
batch_size, NUM_IMGS, 2048, 7, 7)
depth_embed_reshape = depth_embedding.reshape(
batch_size, NUM_IMGS, 128, 4, 4)
rgb_feats = torch.cat((
rgb_embed_reshape[:,0:1,:],
torch.flip(rgb_embed_reshape[:,1:,:], [1]),
), dim=1)
depth_feats = torch.cat((
depth_embed_reshape[:,0:1,:],
torch.flip(depth_embed_reshape[:,1:,:], [1]),
), dim=1)
# way_feats = torch.cat((
# way_feats[:,0:1,:],
# torch.flip(way_feats[:,1:,:], [1]),
# ), dim=1)
# from heatmap to points
batch_x_norm = torch.softmax(
waypoint_heatmap_logits.reshape(
batch_size, NUM_ANGLES*NUM_CLASSES,
), dim=1
)
batch_x_norm = batch_x_norm.reshape(
batch_size, NUM_ANGLES, NUM_CLASSES,
)
batch_x_norm_wrap = torch.cat((
batch_x_norm[:,-1:,:],
batch_x_norm,
batch_x_norm[:,:1,:]),
dim=1)
batch_output_map = nms(
batch_x_norm_wrap.unsqueeze(1),
max_predictions=5,
sigma=(7.0,5.0))
# predicted waypoints before sampling
batch_output_map = batch_output_map.squeeze(1)[:,1:-1,:]
candidate_lengths = ((batch_output_map!=0).sum(-1).sum(-1) + 1).tolist()
if isinstance(candidate_lengths, int):
candidate_lengths = [candidate_lengths]
max_candidate = max(candidate_lengths) # including stop
cand_mask = length2mask(candidate_lengths, device=self.device)
if in_train:
# Waypoint augmentation
# parts of heatmap for sampling (fix offset first)
batch_way_heats_regional = torch.cat(
(waypoint_heatmap_logits[:,-waypoint_predictor.HEATMAP_OFFSET:,:],
waypoint_heatmap_logits[:,:-waypoint_predictor.HEATMAP_OFFSET,:],
), dim=1)
batch_way_heats_regional = batch_way_heats_regional.reshape(batch_size, 12, 10, 12)
batch_sample_angle_idxes = []
batch_sample_distance_idxes = []
batch_way_log_prob = []
for j in range(batch_size):
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# clockwise image indexes (same as batch_x_norm)
img_idxes = ((angle_idxes.cpu().numpy()+5) // 10)
img_idxes[img_idxes==12] = 0
# # candidate waypoint states
# way_feats_regional = way_feats[j][img_idxes]
# heatmap regions for sampling
way_heats_regional = batch_way_heats_regional[j][img_idxes].view(img_idxes.size, -1)
way_heats_probs = F.softmax(way_heats_regional, 1)
probs_c = torch.distributions.Categorical(way_heats_probs)
way_heats_act = probs_c.sample().detach()
sample_angle_idxes = []
sample_distance_idxes = []
for k, way_act in enumerate(way_heats_act):
if img_idxes[k] != 0:
angle_pointer = (img_idxes[k] - 1) * 10 + 5
else:
angle_pointer = 0
sample_angle_idxes.append(way_act//12+angle_pointer)
sample_distance_idxes.append(way_act%12)
batch_sample_angle_idxes.append(sample_angle_idxes)
batch_sample_distance_idxes.append(sample_distance_idxes)
batch_way_log_prob.append(
probs_c.log_prob(way_heats_act))
else:
# batch_way_log_prob = None
None
cand_rgb = torch.zeros(
(batch_size, max_candidate, 2048, 7, 7),
dtype=torch.float32, device=self.device)
cand_depth = torch.zeros(
(batch_size, max_candidate, 128, 4, 4),
dtype=torch.float32, device=self.device)
batch_angles = []
batch_distances = []
batch_img_idxes = []
for j in range(batch_size):
if in_train:
angle_idxes = torch.tensor(batch_sample_angle_idxes[j])
distance_idxes = torch.tensor(batch_sample_distance_idxes[j])
else:
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# distance indexes for candidates
distance_idxes = batch_output_map[j].nonzero()[:, 1]
# 2pi- becoz counter-clockwise is the positive direction
angle_rad = 2*math.pi-angle_idxes.float()/120*2*math.pi
batch_angles.append(angle_rad.tolist())
batch_distances.append(
((distance_idxes + 1)*0.25).tolist())
# counter-clockwise image indexes
img_idxes = 12 - ((angle_idxes.cpu().numpy()+5) // 10)
img_idxes[img_idxes==12] = 0
batch_img_idxes.append(img_idxes)
for k in range(len(img_idxes)):
cand_rgb[j][k] = rgb_feats[j][img_idxes[k]]
cand_depth[j][k] = depth_feats[j][img_idxes[k]]
cand_direction = dir_angle_feature(batch_angles).to(self.device)
if in_train:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances #, batch_way_log_prob
else:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances
elif mode == 'navigation':
cand_rgb_feats_pool = self.space_pool(cand_rgb)
# cand_rgb_feats_pool = self.drop_env(cand_rgb_feats_pool)
rgb_in = self.rgb_linear(cand_rgb_feats_pool)
cand_depth_feats_pool = self.space_pool(cand_depth)
# cand_depth_feats_pool = self.drop_env(cand_depth_feats_pool)
depth_in = self.depth_linear(cand_depth_feats_pool)
vis_in = self.vismerge_linear(
torch.cat((rgb_in, depth_in, cand_direction), dim=2),)
''' aggregate visual features by agent's previous state -------------- '''
prev_state = rnn_states[:, 0:self.state_encoder.num_recurrent_layers].squeeze(1)
vis_prev_state, _ = self.prev_state_vis_attn(
prev_state, vis_in, cand_mask)
''' first state encoder for new visual features '''
prev_actions = angle_feature(headings, device=self.device)
prev_actions = self.enc_prev_act(prev_actions)
# prev_actions = self.drop(prev_actions)
state_in = torch.cat([vis_prev_state, prev_actions], dim=1)
rnn_states_out = rnn_states.detach().clone()
(
state,
rnn_states_out[:, 0 : self.state_encoder.num_recurrent_layers],
) = self.state_encoder(
state_in,
rnn_states[:, 0 : self.state_encoder.num_recurrent_layers],
masks,
)
''' language attention using state '''
text_state, _ = self.state_text_attn(
state, instruction, text_mask)
''' visual attention using attended language '''
vis_text_feats, _ = self.text_vis_attn(
text_state, vis_in, cand_mask)
x = torch.cat((state, vis_text_feats, text_state), dim=1)
_, logits = self.state_vis_logits(
x, vis_in, cand_mask, output_prob=False)
return logits, rnn_states_out
elif mode == 'waypoint_actor':
None
elif mode == 'critic':
return self.critic(post_states)
class SoftDotAttention(nn.Module):
def __init__(self, q_dim, kv_dim, hidden_dim, output_tilde=False):
'''Initialize layer.'''
super(SoftDotAttention, self).__init__()
self.linear_q = nn.Linear(q_dim, hidden_dim, bias=True)
self.linear_kv = nn.Linear(kv_dim, hidden_dim, bias=True)
self.sm = nn.Softmax(dim=1)
self.output_tilde = output_tilde
if output_tilde:
self.linear_out = nn.Linear(q_dim + hidden_dim, hidden_dim, bias=False)
self.tanh = nn.Tanh()
def forward(self, q, kv, mask=None, output_prob=True):
'''Propagate h through the network.
q: (query) batch x dim
kv: (keys and values) batch x seq_len x dim
mask: batch x seq_len indices to be masked
'''
x_q = self.linear_q(q).unsqueeze(2) # batch x dim x 1
x_kv = self.linear_kv(kv)
# Get attention
attn = torch.bmm(x_kv, x_q).squeeze(2) # batch x seq_len
logit = attn
if mask is not None:
# -Inf masking prior to the softmax
attn.masked_fill_(mask, -float('inf'))
attn = self.sm(attn) # There will be a bug here, but it's actually a problem in torch source code.
attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x seq_len
weighted_x_kv = torch.bmm(attn3, x_kv).squeeze(1) # batch x dim
if not output_prob:
attn = logit
if self.output_tilde:
h_tilde = torch.cat((weighted_x_kv, q), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, attn
else:
return weighted_x_kv, attn
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/Policy_ViewSelection_CMA.py |
import argparse
def get_args():
parser = argparse.ArgumentParser('VideoMAE fine-tuning and evaluation script for video classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=100, type=int)
# Model parameters
parser.add_argument('--model', default='vit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--tubelet_size', type=int, default= 2)
parser.add_argument('--input_size', default=224, type=int,
help='videos input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT',
help='Attention dropout rate (default: 0.)')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='learning rate (default: 1e-3)')
parser.add_argument('--layer_decay', type=float, default=0.75)
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--num_sample', type=int, default=2,
help='Repeated_aug (default: 2)')
parser.add_argument('--aa', type=str, default='rand-m7-n4-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
parser.add_argument('--short_side_size', type=int, default=224)
parser.add_argument('--test_num_segment', type=int, default=5)
parser.add_argument('--test_num_crop', type=int, default=3)
# Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling')
# Dataset parameters
parser.add_argument('--data_path', default='/path/to/list_kinetics-400', type=str,
help='dataset path')
parser.add_argument('--eval_data_path', default=None, type=str,
help='dataset path for evaluation')
parser.add_argument('--nb_classes', default=400, type=int,
help='number of the classification types')
parser.add_argument('--imagenet_default_mean_and_std', default=True, action='store_true')
parser.add_argument('--num_segments', type=int, default= 1)
parser.add_argument('--num_frames', type=int, default= 16)
parser.add_argument('--sampling_rate', type=int, default= 4)
parser.add_argument('--data_set', default='Kinetics-400', choices=['Kinetics-400', 'SSV2', 'UCF101', 'HMDB51','image_folder'],
type=str, help='dataset')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--enable_deepspeed', action='store_true', default=False)
parser.add_argument(
"--exp_name",
type=str,
default="test",
required=True,
help="experiment id that matches to exp-id in Notion log",
)
parser.add_argument(
"--run-type",
choices=["train", "eval", "inference"],
required=True,
help="run type of the experiment (train, eval, inference)",
)
parser.add_argument(
"--exp-config",
type=str,
required=True,
help="path to config yaml containing info about experiment",
)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
try:
import deepspeed
from deepspeed import DeepSpeedConfig
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
except:
print("Please 'pip install deepspeed'")
exit(0)
else:
ds_init = None
return parser.parse_args(), ds_init | InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/get_args.py |
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
AutoAugment, RandAugment, and AugMix for PyTorch
This code implements the searched ImageNet policies with various tweaks and
improvements and does not include any of the search code. AA and RA
Implementation adapted from:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
AugMix adapted from:
https://github.com/google-research/augmix
Papers:
AutoAugment: Learning Augmentation Policies from Data
https://arxiv.org/abs/1805.09501
Learning Data Augmentation Strategies for Object Detection
https://arxiv.org/abs/1906.11172
RandAugment: Practical automated data augmentation...
https://arxiv.org/abs/1909.13719
AugMix: A Simple Data Processing Method to Improve Robustness and
Uncertainty https://arxiv.org/abs/1912.02781
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.0
_HPARAMS_DEFAULT = {
"translate_const": 250,
"img_mean": _FILL,
}
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop("resample", Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs
)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs
)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0],
-rotn_center[1] - post_trans[1],
matrix,
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs["resample"])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level, _hparams):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.0
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level, _hparams):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _enhance_increasing_level_to_arg(level, _hparams):
# the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend
# range [0.1, 1.9]
level = (level / _MAX_LEVEL) * 0.9
level = 1.0 + _randomly_negate(level)
return (level,)
def _shear_level_to_arg(level, _hparams):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams["translate_const"]
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level, hparams):
# default range [-0.45, 0.45]
translate_pct = hparams.get("translate_pct", 0.45)
level = (level / _MAX_LEVEL) * translate_pct
level = _randomly_negate(level)
return (level,)
def _posterize_level_to_arg(level, _hparams):
# As per Tensorflow TPU EfficientNet impl
# range [0, 4], 'keep 0 up to 4 MSB of original image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4),)
def _posterize_increasing_level_to_arg(level, hparams):
# As per Tensorflow models research and UDA impl
# range [4, 0], 'keep 4 down to 0 MSB of original image',
# intensity/severity of augmentation increases with level
return (4 - _posterize_level_to_arg(level, hparams)[0],)
def _posterize_original_level_to_arg(level, _hparams):
# As per original AutoAugment paper description
# range [4, 8], 'keep 4 up to 8 MSB of image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4) + 4,)
def _solarize_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 256),)
def _solarize_increasing_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation increases with level
return (256 - _solarize_level_to_arg(level, _hparams)[0],)
def _solarize_add_level_to_arg(level, _hparams):
# range [0, 110]
return (int((level / _MAX_LEVEL) * 110),)
LEVEL_TO_ARG = {
"AutoContrast": None,
"Equalize": None,
"Invert": None,
"Rotate": _rotate_level_to_arg,
# There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers
"Posterize": _posterize_level_to_arg,
"PosterizeIncreasing": _posterize_increasing_level_to_arg,
"PosterizeOriginal": _posterize_original_level_to_arg,
"Solarize": _solarize_level_to_arg,
"SolarizeIncreasing": _solarize_increasing_level_to_arg,
"SolarizeAdd": _solarize_add_level_to_arg,
"Color": _enhance_level_to_arg,
"ColorIncreasing": _enhance_increasing_level_to_arg,
"Contrast": _enhance_level_to_arg,
"ContrastIncreasing": _enhance_increasing_level_to_arg,
"Brightness": _enhance_level_to_arg,
"BrightnessIncreasing": _enhance_increasing_level_to_arg,
"Sharpness": _enhance_level_to_arg,
"SharpnessIncreasing": _enhance_increasing_level_to_arg,
"ShearX": _shear_level_to_arg,
"ShearY": _shear_level_to_arg,
"TranslateX": _translate_abs_level_to_arg,
"TranslateY": _translate_abs_level_to_arg,
"TranslateXRel": _translate_rel_level_to_arg,
"TranslateYRel": _translate_rel_level_to_arg,
}
NAME_TO_OP = {
"AutoContrast": auto_contrast,
"Equalize": equalize,
"Invert": invert,
"Rotate": rotate,
"Posterize": posterize,
"PosterizeIncreasing": posterize,
"PosterizeOriginal": posterize,
"Solarize": solarize,
"SolarizeIncreasing": solarize,
"SolarizeAdd": solarize_add,
"Color": color,
"ColorIncreasing": color,
"Contrast": contrast,
"ContrastIncreasing": contrast,
"Brightness": brightness,
"BrightnessIncreasing": brightness,
"Sharpness": sharpness,
"SharpnessIncreasing": sharpness,
"ShearX": shear_x,
"ShearY": shear_y,
"TranslateX": translate_x_abs,
"TranslateY": translate_y_abs,
"TranslateXRel": translate_x_rel,
"TranslateYRel": translate_y_rel,
}
class AugmentOp:
"""
Apply for video.
"""
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = hparams or _HPARAMS_DEFAULT
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = {
"fillcolor": hparams["img_mean"]
if "img_mean" in hparams
else _FILL,
"resample": hparams["interpolation"]
if "interpolation" in hparams
else _RANDOM_INTERPOLATION,
}
# If magnitude_std is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from a normal distribution
# with mean `magnitude` and std-dev of `magnitude_std`.
# NOTE This is my own hack, being tested, not in papers or reference impls.
self.magnitude_std = self.hparams.get("magnitude_std", 0)
def __call__(self, img_list):
if self.prob < 1.0 and random.random() > self.prob:
return img_list
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range
level_args = (
self.level_fn(magnitude, self.hparams)
if self.level_fn is not None
else ()
)
if isinstance(img_list, list):
return [
self.aug_fn(img, *level_args, **self.kwargs) for img in img_list
]
else:
return self.aug_fn(img_list, *level_args, **self.kwargs)
_RAND_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"Posterize",
"Solarize",
"SolarizeAdd",
"Color",
"Contrast",
"Brightness",
"Sharpness",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
_RAND_INCREASING_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"PosterizeIncreasing",
"SolarizeIncreasing",
"SolarizeAdd",
"ColorIncreasing",
"ContrastIncreasing",
"BrightnessIncreasing",
"SharpnessIncreasing",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
# These experimental weights are based loosely on the relative improvements mentioned in paper.
# They may not result in increased performance, but could likely be tuned to so.
_RAND_CHOICE_WEIGHTS_0 = {
"Rotate": 0.3,
"ShearX": 0.2,
"ShearY": 0.2,
"TranslateXRel": 0.1,
"TranslateYRel": 0.1,
"Color": 0.025,
"Sharpness": 0.025,
"AutoContrast": 0.025,
"Solarize": 0.005,
"SolarizeAdd": 0.005,
"Contrast": 0.005,
"Brightness": 0.005,
"Equalize": 0.005,
"Posterize": 0,
"Invert": 0,
}
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = transforms or _RAND_TRANSFORMS
assert weight_idx == 0 # only one set of weights currently
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
hparams = hparams or _HPARAMS_DEFAULT
transforms = transforms or _RAND_TRANSFORMS
return [
AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams)
for name in transforms
]
class RandAugment:
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
# no replacement when using weighted choice
ops = np.random.choice(
self.ops,
self.num_layers,
replace=self.choice_weights is None,
p=self.choice_weights,
)
for op in ops:
img = op(img)
return img
def rand_augment_transform(config_str, hparams):
"""
RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
Create a RandAugment transform
:param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by
dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining
sections, not order sepecific determine
'm' - integer magnitude of rand augment
'n' - integer num layers (number of transform ops selected per image)
'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)
'mstd' - float std deviation of magnitude noise applied
'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)
Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2
:param hparams: Other hparams (kwargs) for the RandAugmentation scheme
:return: A PyTorch compatible Transform
"""
magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10)
num_layers = 2 # default to 2 ops per image
weight_idx = None # default to no probability weights for op choice
transforms = _RAND_TRANSFORMS
config = config_str.split("-")
assert config[0] == "rand"
config = config[1:]
for c in config:
cs = re.split(r"(\d.*)", c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == "mstd":
# noise param injected via hparams for now
hparams.setdefault("magnitude_std", float(val))
elif key == "inc":
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif key == "m":
magnitude = int(val)
elif key == "n":
num_layers = int(val)
elif key == "w":
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(
magnitude=magnitude, hparams=hparams, transforms=transforms
)
choice_weights = (
None if weight_idx is None else _select_rand_weights(weight_idx)
)
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/rand_augment.py |
import numpy as np
from PIL import Image
import torch
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) format
"""
if len(img.shape) == 3:
img = img.transpose(2, 0, 1)
if len(img.shape) == 2:
img = np.expand_dims(img, 0)
return img
class ClipToTensor(object):
"""Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
"""
def __init__(self, channel_nb=3, div_255=True, numpy=False):
self.channel_nb = channel_nb
self.div_255 = div_255
self.numpy = numpy
def __call__(self, clip):
"""
Args: clip (list of numpy.ndarray): clip (list of images)
to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(
ch)
elif isinstance(clip[0], Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = convert_img(img)
np_clip[:, img_idx, :, :] = img
if self.numpy:
if self.div_255:
np_clip = np_clip / 255.0
return np_clip
else:
tensor_clip = torch.from_numpy(np_clip)
if not isinstance(tensor_clip, torch.FloatTensor):
tensor_clip = tensor_clip.float()
if self.div_255:
tensor_clip = torch.div(tensor_clip, 255)
return tensor_clip
# Note this norms data to -1/1
class ClipToTensor_K(object):
"""Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
"""
def __init__(self, channel_nb=3, div_255=True, numpy=False):
self.channel_nb = channel_nb
self.div_255 = div_255
self.numpy = numpy
def __call__(self, clip):
"""
Args: clip (list of numpy.ndarray): clip (list of images)
to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(
ch)
elif isinstance(clip[0], Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = convert_img(img)
np_clip[:, img_idx, :, :] = img
if self.numpy:
if self.div_255:
np_clip = (np_clip - 127.5) / 127.5
return np_clip
else:
tensor_clip = torch.from_numpy(np_clip)
if not isinstance(tensor_clip, torch.FloatTensor):
tensor_clip = tensor_clip.float()
if self.div_255:
tensor_clip = torch.div(torch.sub(tensor_clip, 127.5), 127.5)
return tensor_clip
class ToTensor(object):
"""Converts numpy array to tensor
"""
def __call__(self, array):
tensor = torch.from_numpy(array)
return tensor
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/volume_transforms.py |
import numbers
import cv2
import numpy as np
import PIL
import torch
def _is_tensor_clip(clip):
return torch.is_tensor(clip) and clip.ndimension() == 4
def crop_clip(clip, min_h, min_w, h, w):
if isinstance(clip[0], np.ndarray):
cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
cropped = [
img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return cropped
def resize_clip(clip, size, interpolation='bilinear'):
if isinstance(clip[0], np.ndarray):
if isinstance(size, numbers.Number):
im_h, im_w, im_c = clip[0].shape
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[0], size[1]
if interpolation == 'bilinear':
np_inter = cv2.INTER_LINEAR
else:
np_inter = cv2.INTER_NEAREST
scaled = [
cv2.resize(img, size, interpolation=np_inter) for img in clip
]
elif isinstance(clip[0], PIL.Image.Image):
if isinstance(size, numbers.Number):
im_w, im_h = clip[0].size
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
pil_inter = PIL.Image.BILINEAR
else:
pil_inter = PIL.Image.NEAREST
scaled = [img.resize(size, pil_inter) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return scaled
def get_resize_sizes(im_h, im_w, size):
if im_w < im_h:
ow = size
oh = int(size * im_h / im_w)
else:
oh = size
ow = int(size * im_w / im_h)
return oh, ow
def normalize(clip, mean, std, inplace=False):
if not _is_tensor_clip(clip):
raise TypeError('tensor is not a torch clip.')
if not inplace:
clip = clip.clone()
dtype = clip.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=clip.device)
std = torch.as_tensor(std, dtype=dtype, device=clip.device)
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
return clip
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/functional.py |
import io
import os
import math
import time
import json
from collections import defaultdict, deque
import datetime
import numpy as np
from timm.utils import get_state_dict
from torch.utils.data._utils.collate import default_collate
from pathlib import Path
import subprocess
import torch
import torch.distributed as dist
from torch._six import inf
import random
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = int(os.environ['SLURM_LOCALID'])
args.world_size = int(os.environ['SLURM_NTASKS'])
os.environ['RANK'] = str(args.rank)
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['WORLD_SIZE'] = str(args.world_size)
node_list = os.environ['SLURM_NODELIST']
addr = subprocess.getoutput(
f'scontrol show hostname {node_list} | head -n1')
if 'MASTER_ADDR' not in os.environ:
os.environ['MASTER_ADDR'] = addr
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
# assert torch.distributed.is_initialized()
setup_for_distributed(args.rank == 0)
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
if model_ema is not None:
client_state['model_ema'] = get_state_dict(model_ema)
model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if loss_scaler is not None:
# torch.amp
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if hasattr(args, 'model_ema') and args.model_ema:
_load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
else:
# deepspeed, only support '--auto_resume'.
if args.auto_resume:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt)
print("Auto resume checkpoint: %d" % latest_ckpt)
_, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt)
args.start_epoch = client_states['epoch'] + 1
if model_ema is not None:
if args.model_ema:
_load_checkpoint_for_ema(model_ema, client_states['model_ema'])
def create_ds_config(args):
args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json")
with open(args.deepspeed_config, mode="w") as writer:
ds_config = {
"train_batch_size": args.batch_size * args.update_freq * get_world_size(),
"train_micro_batch_size_per_gpu": args.batch_size,
"steps_per_print": 1000,
"optimizer": {
"type": "Adam",
"adam_w_mode": True,
"params": {
"lr": args.lr,
"weight_decay": args.weight_decay,
"bias_correction": True,
"betas": [
0.9,
0.999
],
"eps": 1e-8
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 7,
"loss_scale_window": 128
}
}
writer.write(json.dumps(ds_config, indent=2))
def multiple_samples_collate(batch, fold=False):
"""
Collate function for repeated augmentation. Each instance in the batch has
more than one sample.
Args:
batch (tuple or list): data batch to collate.
Returns:
(tuple): collated data batch.
"""
inputs, labels, video_idx, extra_data = zip(*batch)
inputs = [item for sublist in inputs for item in sublist]
labels = [item for sublist in labels for item in sublist]
video_idx = [item for sublist in video_idx for item in sublist]
inputs, labels, video_idx, extra_data = (
default_collate(inputs),
default_collate(labels),
default_collate(video_idx),
default_collate(extra_data),
)
if fold:
return [inputs], labels, video_idx, extra_data
else:
return inputs, labels, video_idx, extra_data
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/utils.py |
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py
pulished under an Apache License 2.0.
"""
import math
import random
import torch
def _get_pixels(
per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda"
):
# NOTE I've seen CUDA illegal memory access errors being caused by the normal_()
# paths, flip the order so normal is run on CPU if this becomes a problem
# Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508
if per_pixel:
return torch.empty(patch_size, dtype=dtype, device=device).normal_()
elif rand_color:
return torch.empty(
(patch_size[0], 1, 1), dtype=dtype, device=device
).normal_()
else:
return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device)
class RandomErasing:
"""Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
This variant of RandomErasing is intended to be applied to either a batch
or single image tensor after it has been normalized by dataset mean and std.
Args:
probability: Probability that the Random Erasing operation will be performed.
min_area: Minimum percentage of erased area wrt input image area.
max_area: Maximum percentage of erased area wrt input image area.
min_aspect: Minimum aspect ratio of erased area.
mode: pixel color mode, one of 'const', 'rand', or 'pixel'
'const' - erase block is constant color of 0 for all channels
'rand' - erase block is same per-channel random (normal) color
'pixel' - erase block is per-pixel random (normal) color
max_count: maximum number of erasing blocks per image, area per box is scaled by count.
per-image count is randomly chosen between 1 and this value.
"""
def __init__(
self,
probability=0.5,
min_area=0.02,
max_area=1 / 3,
min_aspect=0.3,
max_aspect=None,
mode="const",
min_count=1,
max_count=None,
num_splits=0,
device="cuda",
cube=True,
):
self.probability = probability
self.min_area = min_area
self.max_area = max_area
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
self.min_count = min_count
self.max_count = max_count or min_count
self.num_splits = num_splits
mode = mode.lower()
self.rand_color = False
self.per_pixel = False
self.cube = cube
if mode == "rand":
self.rand_color = True # per block random normal
elif mode == "pixel":
self.per_pixel = True # per pixel random normal
else:
assert not mode or mode == "const"
self.device = device
def _erase(self, img, chan, img_h, img_w, dtype):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(10):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
img[:, top : top + h, left : left + w] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def _erase_cube(
self,
img,
batch_start,
batch_size,
chan,
img_h,
img_w,
dtype,
):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(100):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
for i in range(batch_start, batch_size):
img_instance = img[i]
img_instance[
:, top : top + h, left : left + w
] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def __call__(self, input):
if len(input.size()) == 3:
self._erase(input, *input.size(), input.dtype)
else:
batch_size, chan, img_h, img_w = input.size()
# skip first slice of batch if num_splits is set (for clean portion of samples)
batch_start = (
batch_size // self.num_splits if self.num_splits > 1 else 0
)
if self.cube:
self._erase_cube(
input,
batch_start,
batch_size,
chan,
img_h,
img_w,
input.dtype,
)
else:
for i in range(batch_start, batch_size):
self._erase(input[i], chan, img_h, img_w, input.dtype)
return input
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/random_erasing.py |
#!/usr/bin/env python3
import math
import numpy as np
import random
import torch
import torchvision.transforms.functional as F
from PIL import Image
from torchvision import transforms
from .rand_augment import rand_augment_transform
from .random_erasing import RandomErasing
import numbers
import PIL
import torchvision
from . import functional as FF
_pil_interpolation_to_str = {
Image.NEAREST: "PIL.Image.NEAREST",
Image.BILINEAR: "PIL.Image.BILINEAR",
Image.BICUBIC: "PIL.Image.BICUBIC",
Image.LANCZOS: "PIL.Image.LANCZOS",
Image.HAMMING: "PIL.Image.HAMMING",
Image.BOX: "PIL.Image.BOX",
}
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _pil_interp(method):
if method == "bicubic":
return Image.BICUBIC
elif method == "lanczos":
return Image.LANCZOS
elif method == "hamming":
return Image.HAMMING
else:
return Image.BILINEAR
def random_short_side_scale_jitter(
images, min_size, max_size, boxes=None, inverse_uniform_sampling=False
):
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (tensor): images to perform scale jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
boxes (ndarray): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale, max_scale].
Returns:
(tensor): the scaled images with dimension of
`num frames` x `channel` x `new height` x `new width`.
(ndarray or None): the scaled boxes with dimension of
`num boxes` x 4.
"""
if inverse_uniform_sampling:
size = int(
round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))
)
else:
size = int(round(np.random.uniform(min_size, max_size)))
height = images.shape[2]
width = images.shape[3]
if (width <= height and width == size) or (
height <= width and height == size
):
return images, boxes
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
if boxes is not None:
boxes = boxes * float(new_height) / height
else:
new_width = int(math.floor((float(width) / height) * size))
if boxes is not None:
boxes = boxes * float(new_width) / width
return (
torch.nn.functional.interpolate(
images,
size=(new_height, new_width),
mode="bilinear",
align_corners=False,
),
boxes,
)
def crop_boxes(boxes, x_offset, y_offset):
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (ndarray or None): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = boxes.copy()
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
def random_crop(images, size, boxes=None):
"""
Perform random spatial crop on the given images and corresponding boxes.
Args:
images (tensor): images to perform random crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): the size of height and width to crop on the image.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): cropped images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
if images.shape[2] == size and images.shape[3] == size:
return images
height = images.shape[2]
width = images.shape[3]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
return cropped, cropped_boxes
def horizontal_flip(prob, images, boxes=None):
"""
Perform horizontal flip on the given images and corresponding boxes.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`num frames` x `channel` x `height` x `width`.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
images (tensor): images with dimension of
`num frames` x `channel` x `height` x `width`.
flipped_boxes (ndarray or None): the flipped boxes with dimension of
`num boxes` x 4.
"""
if boxes is None:
flipped_boxes = None
else:
flipped_boxes = boxes.copy()
if np.random.uniform() < prob:
images = images.flip((-1))
if len(images.shape) == 3:
width = images.shape[2]
elif len(images.shape) == 4:
width = images.shape[3]
else:
raise NotImplementedError("Dimension does not supported")
if boxes is not None:
flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1
return images, flipped_boxes
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode="bilinear",
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes
def clip_boxes_to_image(boxes, height, width):
"""
Clip an array of boxes to an image with the given height and width.
Args:
boxes (ndarray): bounding boxes to perform clipping.
Dimension is `num boxes` x 4.
height (int): given image height.
width (int): given image width.
Returns:
clipped_boxes (ndarray): the clipped boxes with dimension of
`num boxes` x 4.
"""
clipped_boxes = boxes.copy()
clipped_boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
clipped_boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return clipped_boxes
def blend(images1, images2, alpha):
"""
Blend two images with a given weight alpha.
Args:
images1 (tensor): the first images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
images2 (tensor): the second images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
alpha (float): the blending weight.
Returns:
(tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
return images1 * alpha + images2 * (1 - alpha)
def grayscale(images):
"""
Get the grayscale for the input images. The channels of images should be
in order BGR.
Args:
images (tensor): the input images for getting grayscale. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
img_gray (tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
# R -> 0.299, G -> 0.587, B -> 0.114.
img_gray = torch.tensor(images)
gray_channel = (
0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0]
)
img_gray[:, 0] = gray_channel
img_gray[:, 1] = gray_channel
img_gray[:, 2] = gray_channel
return img_gray
def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0):
"""
Perfrom a color jittering on the input images. The channels of images
should be in order BGR.
Args:
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
img_brightness (float): jitter ratio for brightness.
img_contrast (float): jitter ratio for contrast.
img_saturation (float): jitter ratio for saturation.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
jitter = []
if img_brightness != 0:
jitter.append("brightness")
if img_contrast != 0:
jitter.append("contrast")
if img_saturation != 0:
jitter.append("saturation")
if len(jitter) > 0:
order = np.random.permutation(np.arange(len(jitter)))
for idx in range(0, len(jitter)):
if jitter[order[idx]] == "brightness":
images = brightness_jitter(img_brightness, images)
elif jitter[order[idx]] == "contrast":
images = contrast_jitter(img_contrast, images)
elif jitter[order[idx]] == "saturation":
images = saturation_jitter(img_saturation, images)
return images
def brightness_jitter(var, images):
"""
Perfrom brightness jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for brightness.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_bright = torch.zeros(images.shape)
images = blend(images, img_bright, alpha)
return images
def contrast_jitter(var, images):
"""
Perfrom contrast jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for contrast.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True)
images = blend(images, img_gray, alpha)
return images
def saturation_jitter(var, images):
"""
Perfrom saturation jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for saturation.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
images = blend(images, img_gray, alpha)
return images
def lighting_jitter(images, alphastd, eigval, eigvec):
"""
Perform AlexNet-style PCA jitter on the given images.
Args:
images (tensor): images to perform lighting jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
alphastd (float): jitter ratio for PCA jitter.
eigval (list): eigenvalues for PCA jitter.
eigvec (list[list]): eigenvectors for PCA jitter.
Returns:
out_images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if alphastd == 0:
return images
# generate alpha1, alpha2, alpha3.
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1,
)
out_images = torch.zeros_like(images)
if len(images.shape) == 3:
# C H W
channel_dim = 0
elif len(images.shape) == 4:
# T C H W
channel_dim = 1
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
for idx in range(images.shape[channel_dim]):
# C H W
if len(images.shape) == 3:
out_images[idx] = images[idx] + rgb[2 - idx]
# T C H W
elif len(images.shape) == 4:
out_images[:, idx] = images[:, idx] + rgb[2 - idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images
def color_normalization(images, mean, stddev):
"""
Perform color nomration on the given images.
Args:
images (tensor): images to perform color normalization. Dimension is
`num frames` x `channel` x `height` x `width`.
mean (list): mean values for normalization.
stddev (list): standard deviations for normalization.
Returns:
out_images (tensor): the noramlized images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if len(images.shape) == 3:
assert (
len(mean) == images.shape[0]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[0]
), "channel stddev not computed properly"
elif len(images.shape) == 4:
assert (
len(mean) == images.shape[1]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[1]
), "channel stddev not computed properly"
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
out_images = torch.zeros_like(images)
for idx in range(len(mean)):
# C H W
if len(images.shape) == 3:
out_images[idx] = (images[idx] - mean[idx]) / stddev[idx]
elif len(images.shape) == 4:
out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images
def _get_param_spatial_crop(
scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False
):
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
"""
for _ in range(num_repeat):
area = height * width
target_area = random.uniform(*scale) * area
if log_scale:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
else:
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.uniform() < 0.5 and switch_hw:
w, h = h, w
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def random_resized_crop(
images,
target_height,
target_width,
scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
Crop the given images to random size and aspect ratio. A crop of random
size (default: of 0.08 to 1.0) of the original size and a random aspect
ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This
crop is finally resized to given size. This is popularly used to train the
Inception networks.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
cropped = images[:, :, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped,
size=(target_height, target_width),
mode="bilinear",
align_corners=False,
)
def random_resized_crop_with_shift(
images,
target_height,
target_width,
scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
This is similar to random_resized_crop. However, it samples two different
boxes (for cropping) for the first and last frame. It then linearly
interpolates the two boxes for other frames.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
t = images.shape[1]
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width)
i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()]
j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()]
h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()]
w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()]
out = torch.zeros((3, t, target_height, target_width))
for ind in range(t):
out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate(
images[
:,
ind : ind + 1,
i_s[ind] : i_s[ind] + h_s[ind],
j_s[ind] : j_s[ind] + w_s[ind],
],
size=(target_height, target_width),
mode="bilinear",
align_corners=False,
)
return out
def create_random_augment(
input_size,
auto_augment=None,
interpolation="bilinear",
):
"""
Get video randaug transform.
Args:
input_size: The size of the input video in tuple.
auto_augment: Parameters for randaug. An example:
"rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number
of operations to apply).
interpolation: Interpolation method.
"""
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = {"translate_const": int(img_size_min * 0.45)}
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
if auto_augment.startswith("rand"):
return transforms.Compose(
[rand_augment_transform(auto_augment, aa_params)]
)
raise NotImplementedError
def random_sized_crop_img(
im,
size,
jitter_scale=(0.08, 1.0),
jitter_aspect=(3.0 / 4.0, 4.0 / 3.0),
max_iter=10,
):
"""
Performs Inception-style cropping (used for training).
"""
assert (
len(im.shape) == 3
), "Currently only support image for random_sized_crop"
h, w = im.shape[1:3]
i, j, h, w = _get_param_spatial_crop(
scale=jitter_scale,
ratio=jitter_aspect,
height=h,
width=w,
num_repeat=max_iter,
log_scale=False,
switch_hw=True,
)
cropped = im[:, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped.unsqueeze(0),
size=(size, size),
mode="bilinear",
align_corners=False,
).squeeze(0)
# The following code are modified based on timm lib, we will replace the following
# contents with dependency from PyTorchVideo.
# https://github.com/facebookresearch/pytorchvideo
class RandomResizedCropAndInterpolation:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation="bilinear",
):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
print("range should be of kind (min, max)")
if interpolation == "random":
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for _ in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = " ".join(
[_pil_interpolation_to_str[x] for x in self.interpolation]
)
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + "(size={0}".format(self.size)
format_string += ", scale={0}".format(
tuple(round(s, 4) for s in self.scale)
)
format_string += ", ratio={0}".format(
tuple(round(r, 4) for r in self.ratio)
)
format_string += ", interpolation={0})".format(interpolate_str)
return format_string
def transforms_imagenet_train(
img_size=224,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.0,
color_jitter=0.4,
auto_augment=None,
interpolation="random",
use_prefetcher=False,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
re_prob=0.0,
re_mode="const",
re_count=1,
re_num_splits=0,
separate=False,
):
"""
If separate==True, the transforms are returned as a tuple of 3 separate transforms
for use in a mixing dataset that passes
* all data through the first (primary) transform, called the 'clean' data
* a portion of the data through the secondary transform
* normalizes and converts the branches above with the third, final transform
"""
if isinstance(img_size, tuple):
img_size = img_size[-2:]
else:
img_size = img_size
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(
ratio or (3.0 / 4.0, 4.0 / 3.0)
) # default imagenet ratio range
primary_tfl = [
RandomResizedCropAndInterpolation(
img_size, scale=scale, ratio=ratio, interpolation=interpolation
)
]
if hflip > 0.0:
primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]
if vflip > 0.0:
primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]
secondary_tfl = []
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = dict(
translate_const=int(img_size_min * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
if auto_augment.startswith("rand"):
secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]
elif auto_augment.startswith("augmix"):
raise NotImplementedError("Augmix not implemented")
else:
raise NotImplementedError("Auto aug not implemented")
elif color_jitter is not None:
# color jitter is enabled when not using AA
if isinstance(color_jitter, (list, tuple)):
# color jitter should be a 3-tuple/list if spec brightness/contrast/saturation
# or 4 if also augmenting hue
assert len(color_jitter) in (3, 4)
else:
# if it's a scalar, duplicate for brightness, contrast, and saturation, no hue
color_jitter = (float(color_jitter),) * 3
secondary_tfl += [transforms.ColorJitter(*color_jitter)]
final_tfl = []
final_tfl += [
transforms.ToTensor(),
transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)),
]
if re_prob > 0.0:
final_tfl.append(
RandomErasing(
re_prob,
mode=re_mode,
max_count=re_count,
num_splits=re_num_splits,
device="cpu",
cube=False,
)
)
if separate:
return (
transforms.Compose(primary_tfl),
transforms.Compose(secondary_tfl),
transforms.Compose(final_tfl),
)
else:
return transforms.Compose(primary_tfl + secondary_tfl + final_tfl)
############################################################################################################
############################################################################################################
class Compose(object):
"""Composes several transforms
Args:
transforms (list of ``Transform`` objects): list of transforms
to compose
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
for t in self.transforms:
clip = t(clip)
return clip
class RandomHorizontalFlip(object):
"""Horizontally flip the list of given images randomly
with a probability 0.5
"""
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Randomly flipped clip
"""
if random.random() < 0.5:
if isinstance(clip[0], np.ndarray):
return [np.fliplr(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [
img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
return clip
class RandomResize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
self.ratio = ratio
self.interpolation = interpolation
def __call__(self, clip):
scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
new_w = int(im_w * scaling_factor)
new_h = int(im_h * scaling_factor)
new_size = (new_w, new_h)
resized = FF.resize_clip(
clip, new_size, interpolation=self.interpolation)
return resized
class Resize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, size, interpolation='nearest'):
self.size = size
self.interpolation = interpolation
def __call__(self, clip):
resized = FF.resize_clip(
clip, self.size, interpolation=self.interpolation)
return resized
class RandomCrop(object):
"""Extract random crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = random.randint(0, im_w - w)
y1 = random.randint(0, im_h - h)
cropped = FF.crop_clip(clip, y1, x1, h, w)
return cropped
class ThreeCrop(object):
"""Extract random crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w != im_w and h != im_h:
clip = FF.resize_clip(clip, self.size, interpolation="bilinear")
im_h, im_w, im_c = clip[0].shape
step = np.max((np.max((im_w, im_h)) - self.size[0]) // 2, 0)
cropped = []
for i in range(3):
if (im_h > self.size[0]):
x1 = 0
y1 = i * step
cropped.extend(FF.crop_clip(clip, y1, x1, h, w))
else:
x1 = i * step
y1 = 0
cropped.extend(FF.crop_clip(clip, y1, x1, h, w))
return cropped
class RandomRotation(object):
"""Rotate entire clip randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
import skimage
angle = random.uniform(self.degrees[0], self.degrees[1])
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angle) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angle) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class CenterCrop(object):
"""Extract center crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = int(round((im_w - w) / 2.))
y1 = int(round((im_h - h) / 2.))
cropped = FF.crop_clip(clip, y1, x1, h, w)
return cropped
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img transform function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
jittered_img = func(img)
jittered_clip.append(jittered_img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
class Normalize(object):
"""Normalize a clip with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutates the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, clip):
"""
Args:
clip (Tensor): Tensor clip of size (T, C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor clip.
"""
return FF.normalize(clip, self.mean, self.std)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/video_transforms.py |
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.models.registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 400, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, num_frames=16, tubelet_size=2):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.tubelet_size = int(tubelet_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * (num_frames // self.tubelet_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv3d(in_channels=in_chans, out_channels=embed_dim,
kernel_size = (self.tubelet_size, patch_size[0],patch_size[1]),
stride=(self.tubelet_size, patch_size[0], patch_size[1]))
def forward(self, x, **kwargs):
B, C, T, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
# sin-cos position encoding
# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31
def get_sinusoid_encoding_table(n_position, d_hid):
''' Sinusoid position encoding table '''
# TODO: make it with torch instead of numpy
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
init_values=0.,
use_learnable_pos_emb=False,
init_scale=0.,
all_frames=16,
tubelet_size=2,
use_mean_pooling=True):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.tubelet_size = tubelet_size
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, num_frames=all_frames, tubelet_size=self.tubelet_size)
num_patches = self.patch_embed.num_patches
if use_learnable_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
else:
# sine-cosine positional embeddings is on the way
self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values)
for i in range(depth)])
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if use_learnable_pos_emb:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.head.weight, std=.02)
self.apply(self._init_weights)
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
B, _, _ = x.size()
if self.pos_embed is not None:
x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach()
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
if self.fc_norm is not None:
return self.fc_norm(x.mean(1))
else:
return x[:, 0]
def get_spatial_features(self, x):
x = self.patch_embed(x)
B, _, _ = x.size()
if self.pos_embed is not None:
x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach()
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
x = x.transpose(1,2).reshape(-1,768,8,14,14).mean(2)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_large_patch16_512(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/modeling_finetune.py |
# PREVALENT, 2020, [email protected]
# Modified in Recurrent VLN-BERT, 2020, [email protected]
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from pytorch_transformers import BertPreTrainedModel, BertConfig
import pdb
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except (ImportError, AttributeError) as e:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = True
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_scores) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertXAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
self.att = BertOutAttention(config, ctx_dim=ctx_dim)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None):
output, attention_scores = self.att(input_tensor, ctx_tensor, ctx_att_mask)
attention_output = self.output(output, input_tensor)
return attention_output, attention_scores
class BertOutAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# visual_dim = 2048
if ctx_dim is None:
ctx_dim =config.hidden_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(ctx_dim, self.all_head_size)
self.value = nn.Linear(ctx_dim, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores
class LXRTXLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# Lang self-att and FFN layer
self.lang_self_att = BertAttention(config)
self.lang_inter = BertIntermediate(config)
self.lang_output = BertOutput(config)
# Visn self-att and FFN layer
self.visn_self_att = BertAttention(config)
self.visn_inter = BertIntermediate(config)
self.visn_output = BertOutput(config)
# The cross attention layer
self.visual_attention = BertXAttention(config)
def cross_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):
''' Cross Attention -- cross for vision not for language '''
visn_att_output, attention_scores = self.visual_attention(visn_input, lang_input, ctx_att_mask=lang_attention_mask)
return visn_att_output, attention_scores
def self_att(self, visn_input, visn_attention_mask):
''' Self Attention -- on visual features with language clues '''
visn_att_output = self.visn_self_att(visn_input, visn_attention_mask)
return visn_att_output
def output_fc(self, visn_input):
''' Feed forward '''
visn_inter_output = self.visn_inter(visn_input)
visn_output = self.visn_output(visn_inter_output, visn_input)
return visn_output
def forward(self, lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask, tdx):
''' visual self-attention with state '''
visn_att_output = torch.cat((lang_feats[:, 0:1, :], visn_feats), dim=1)
state_vis_mask = torch.cat((lang_attention_mask[:,:,:,0:1], visn_attention_mask), dim=-1)
''' state and vision attend to language '''
visn_att_output, cross_attention_scores = self.cross_att(lang_feats[:, 1:, :], lang_attention_mask[:, :, :, 1:], visn_att_output, state_vis_mask)
language_attention_scores = cross_attention_scores[:, :, 0, :]
state_visn_att_output = self.self_att(visn_att_output, state_vis_mask)
state_visn_output = self.output_fc(state_visn_att_output[0])
visn_att_output = state_visn_output[:, 1:, :]
lang_att_output = torch.cat((state_visn_output[:, 0:1, :], lang_feats[:,1:,:]), dim=1)
visual_attention_scores = state_visn_att_output[1][:, :, 0, 1:]
return lang_att_output, visn_att_output, language_attention_scores, visual_attention_scores
class VisionEncoder(nn.Module):
def __init__(self, vision_size, config):
super().__init__()
feat_dim = vision_size
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visn_input):
feats = visn_input
x = self.visn_fc(feats)
x = self.visn_layer_norm(x)
output = self.dropout(x)
return output
class VLNBert(BertPreTrainedModel):
def __init__(self, config):
super(VLNBert, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.pooler = BertPooler(config)
self.img_dim = config.img_feature_dim # 2176
logger.info('VLNBert Image Dimension: {}'.format(self.img_dim))
self.img_feature_type = config.img_feature_type # ''
self.vl_layers = config.vl_layers # 4
self.la_layers = config.la_layers # 9
self.lalayer = nn.ModuleList(
[BertLayer(config) for _ in range(self.la_layers)])
self.addlayer = nn.ModuleList(
[LXRTXLayer(config) for _ in range(self.vl_layers)])
# self.vision_encoder = VisionEncoder(self.config.img_feature_dim, self.config)
# self.apply(self.init_weights)
self.init_weights()
# del self.img_dim
# del self.vision_encoder
# del self.embeddings
def forward(self, mode, input_ids, token_type_ids=None,
attention_mask=None, lang_mask=None, vis_mask=None,
position_ids=None, head_mask=None, img_feats=None):
attention_mask = lang_mask
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
if mode == 'language':
''' LXMERT language branch (in VLN only perform this at initialization) '''
if input_ids.shape[-1] == 768: # rxr instruction
text_embeds = input_ids
else: # r2r instruction
embedding_output = self.embeddings(input_ids,
position_ids=position_ids, token_type_ids=token_type_ids)
text_embeds = embedding_output
for layer_module in self.lalayer:
temp_output = layer_module(text_embeds, extended_attention_mask)
text_embeds = temp_output[0]
sequence_output = text_embeds
pooled_output = self.pooler(sequence_output)
return pooled_output, sequence_output
elif mode == 'visual':
''' LXMERT visual branch (no language processing during navigation) '''
text_embeds = input_ids
text_mask = extended_attention_mask
# img_embedding_output = self.vision_encoder(img_feats)
img_embedding_output = img_feats
img_seq_len = img_feats.shape[1]
batch_size = text_embeds.size(0)
img_seq_mask = vis_mask
extended_img_mask = img_seq_mask.unsqueeze(1).unsqueeze(2)
extended_img_mask = extended_img_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_img_mask = (1.0 - extended_img_mask) * -10000.0
img_mask = extended_img_mask
lang_output = text_embeds
visn_output = img_embedding_output
for tdx, layer_module in enumerate(self.addlayer):
lang_output, visn_output, language_attention_scores, visual_attention_scores = layer_module(lang_output, text_mask, visn_output, img_mask, tdx)
sequence_output = lang_output
pooled_output = self.pooler(sequence_output)
visual_action_scores = visual_attention_scores.mean(dim=1)
return pooled_output, visual_action_scores
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/vlnbert/vlnbert_PREVALENT.py |
# Recurrent VLN-BERT, 2020, by [email protected]
from pytorch_transformers import (BertConfig, BertTokenizer)
def get_vlnbert_models(config=None):
config_class = BertConfig
from vlnce_baselines.models.vlnbert.vlnbert_PREVALENT import VLNBert
model_class = VLNBert
# model_name_or_path = 'data/mydata/snap/VLNBERT-PREVALENT-final/state_dict/best_val_unseen'
model_name_or_path = 'pretrained/Prevalent/pretrained_model/pytorch_model.bin'
vis_config = config_class.from_pretrained('pretrained/Prevalent/bert-base-uncased')
vis_config.img_feature_dim = 2176
vis_config.img_feature_type = ""
vis_config.vl_layers = 4
vis_config.la_layers = 9
visual_model = model_class.from_pretrained(model_name_or_path, config=vis_config)
return visual_model
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/vlnbert/vlnbert_init.py |
import json
import jsonlines
import os
import sys
import time
import glob
import warnings
from collections import defaultdict
from typing import Dict, List
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as distr
import torch.multiprocessing as mp
import gzip
import math
from copy import deepcopy
import tqdm
from gym import Space
from habitat import Config, logger
from habitat.utils.visualizations.utils import append_text_to_image
from habitat_baselines.common.base_il_trainer import BaseILTrainer
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_extensions.measures import Position
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs, generate_video
from habitat_baselines.utils.common import (
get_checkpoint_id,
poll_checkpoint_folder,
)
from habitat_extensions.utils import observations_to_image
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.env_utils import (
construct_envs_auto_reset_false,
construct_envs,
is_slurm_batch_job,
)
from vlnce_baselines.common.utils import *
from habitat_extensions.measures import NDTW
from fastdtw import fastdtw
from ..utils import get_camera_orientations12
from ..utils import (
length2mask, dir_angle_feature, dir_angle_feature_with_ele,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
class BaseVLNCETrainer(BaseILTrainer):
r"""A base trainer for VLN-CE imitation learning."""
supported_tasks: List[str] = ["VLN-v0"]
def __init__(self, config=None):
super().__init__(config)
self.policy = None
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.obs_transforms = []
self.start_epoch = 0
self.step_id = 0
def _initialize_policy(
self,
config: Config,
load_from_ckpt: bool,
observation_space: Space,
action_space: Space,
) -> None:
policy = baseline_registry.get_policy(self.config.MODEL.policy_name)
self.policy = policy.from_config(
config=config,
observation_space=observation_space,
action_space=action_space,
)
''' initialize the waypoint predictor here '''
from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM
self.waypoint_predictor = BinaryDistPredictor_TRM(device=self.device)
self.waypoint_predictor.load_state_dict(
torch.load(
'pretrained/wp_pred/waypoint_predictor',
map_location = torch.device('cpu'),
)['predictor']['state_dict']
)
for param in self.waypoint_predictor.parameters():
param.requires_grad_(False)
self.policy.to(self.device)
self.waypoint_predictor.to(self.device)
self.num_recurrent_layers = self.policy.net.num_recurrent_layers
if self.config.GPU_NUMBERS > 1:
print('Using', self.config.GPU_NUMBERS,'GPU!')
self.policy.net = DDP(self.policy.net.to(self.device), device_ids=[self.device],
output_device=self.device, find_unused_parameters=True, broadcast_buffers=False)
# self.waypoint_predictor = DDP(self.waypoint_predictor.to(self.device), device_ids=[self.device],
# output_device=self.device, find_unused_parameters=True, broadcast_buffers=False)
self.optimizer = torch.optim.AdamW(
self.policy.parameters(), lr=self.config.IL.lr,
)
if load_from_ckpt:
ckpt_path = config.IL.ckpt_to_load
ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu")
if 'module' in list(ckpt_dict['state_dict'].keys())[0] and self.config.GPU_NUMBERS == 1:
self.policy.net = torch.nn.DataParallel(self.policy.net.to(self.device),
device_ids=[self.device], output_device=self.device)
self.policy.load_state_dict(ckpt_dict["state_dict"])
self.policy.net = self.policy.net.module
self.waypoint_predictor = torch.nn.DataParallel(self.waypoint_predictor.to(self.device),
device_ids=[self.device], output_device=self.device)
# self.waypoint_predictor.load_state_dict(ckpt_dict["waypoint_predictor_state_dict"])
# self.waypoint_predictor = self.waypoint_predictor.module
else:
self.policy.load_state_dict(ckpt_dict["state_dict"])
# self.waypoint_predictor.load_state_dict(ckpt_dict["waypoint_predictor_state_dict"])
if config.IL.is_requeue:
self.optimizer.load_state_dict(ckpt_dict["optim_state"])
self.start_epoch = ckpt_dict["epoch"] + 1
self.step_id = ckpt_dict["step_id"]
logger.info(f"Loaded weights from checkpoint: {ckpt_path}")
self.waypoint_predictor.eval()
params = sum(param.numel() for param in self.policy.parameters())
params_t = sum(
p.numel() for p in self.policy.parameters() if p.requires_grad
)
logger.info(f"Agent parameters: {params/1e6} MB. Trainable: {params_t/1e6} MB")
logger.info("Finished setting up policy.")
# def save_checkpoint(self, file_name) -> None:
# r"""Save checkpoint with specified name.
# Args:
# file_name: file name for checkpoint
# Returns:
# None
# """
# checkpoint = {
# "state_dict": self.policy.state_dict(),
# "config": self.config,
# }
# torch.save(
# checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
# )
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
return torch.load(checkpoint_path, *args, **kwargs)
# def _update_agent(
# self,
# observations,
# prev_actions,
# not_done_masks,
# corrected_actions,
# weights,
# step_grad: bool = True,
# loss_accumulation_scalar: int = 1,
# ):
# T, N = corrected_actions.size()
# recurrent_hidden_states = torch.zeros(
# N,
# self.num_recurrent_layers,
# self.config.MODEL.STATE_ENCODER.hidden_size,
# device=self.device,
# )
# AuxLosses.clear()
# # observations['rgb'] = observations['rgb'][0:2]
# # observations['depth'] = observations['depth'][0:2]
# # observations['rxr_instruction'] = observations['rxr_instruction'][0:2]
# # not_done_masks = not_done_masks[0:2]
# # prev_actions = prev_actions[0:2]
# distribution = self.policy.build_distribution(
# observations, recurrent_hidden_states, prev_actions, not_done_masks)
# logits = distribution.logits
# logits = logits.view(T, N, -1)
# action_loss = F.cross_entropy(
# logits.permute(0, 2, 1), corrected_actions, reduction="none"
# )
# action_loss = ((weights * action_loss).sum(0) / weights.sum(0)).mean()
# aux_mask = (weights > 0).view(-1)
# aux_loss = AuxLosses.reduce(aux_mask)
# loss = action_loss + aux_loss
# loss = loss / loss_accumulation_scalar
# loss.backward()
# if step_grad:
# self.optimizer.step()
# self.optimizer.zero_grad()
# # if isinstance(aux_loss, torch.Tensor):
# # aux_loss = aux_loss.item()
# # return loss.item(), action_loss.item(), aux_loss
# return loss, action_loss, aux_loss
@staticmethod
def _pause_envs(
envs_to_pause,
envs,
recurrent_hidden_states,
not_done_masks,
prev_actions,
batch,
rgb_frames=None,
# positions=None
):
# pausing envs with no new episode
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
# positions.pop(idx)
# indexing along the batch dimensions
recurrent_hidden_states = recurrent_hidden_states[state_index]
not_done_masks = not_done_masks[state_index]
prev_actions = prev_actions[state_index]
for k, v in batch.items():
batch[k] = v[state_index]
if rgb_frames is not None:
rgb_frames = [rgb_frames[i] for i in state_index]
return (
envs,
recurrent_hidden_states,
not_done_masks,
prev_actions,
batch,
rgb_frames,
# positions
)
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object
checkpoint_index: index of the current checkpoint
Returns:
None
"""
if self.local_rank < 1:
logger.info(f"checkpoint_path: {checkpoint_path}")
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")[
"config"
]
)
else:
config = self.config.clone()
config.defrost()
# config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
# config.TASK_CONFIG.DATASET.ROLES = ["guide"]
# config.TASK_CONFIG.DATASET.LANGUAGES = config.EVAL.LANGUAGES
# config.TASK_CONFIG.TASK.NDTW.SPLIT = config.EVAL.SPLIT
# config.TASK_CONFIG.TASK.SDTW.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
config.IL.ckpt_to_load = checkpoint_path
if len(config.VIDEO_OPTION) > 0:
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP_VLNCE")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.freeze()
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{config.TASK_CONFIG.DATASET.SPLIT}.json",
)
if os.path.exists(fname):
print("skipping -- evaluation exists.")
return
envs = construct_envs(
config, get_env_class(config.ENV_NAME),
auto_reset_done=False,
episodes_allowed=self.traj # split by rank
)
dataset_length = sum(envs.number_of_episodes)
print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
observations = envs.reset()
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
envs.num_envs,
self.num_recurrent_layers,
config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t = torch.zeros(
envs.num_envs, 768,
device=self.device,
)
language_features = torch.zeros(
envs.num_envs, 80, 768,
device=self.device,
)
not_done_masks = torch.zeros(
envs.num_envs, 1, dtype=torch.uint8, device=self.device
)
stats_episodes = {}
rgb_frames = [[] for _ in range(envs.num_envs)]
if len(config.VIDEO_OPTION) > 0:
os.makedirs(config.VIDEO_DIR, exist_ok=True)
if config.EVAL.EPISODE_COUNT == -1:
episodes_to_eval = sum(envs.number_of_episodes)
else:
episodes_to_eval = min(
config.EVAL.EPISODE_COUNT, sum(envs.number_of_episodes)
)
pbar = tqdm.tqdm(total=episodes_to_eval) if config.use_pbar else None
log_str = (
f"[Ckpt: {checkpoint_index}]"
" [Episodes evaluated: {evaluated}/{total}]"
" [Time elapsed (s): {time}]"
)
start_time = time.time()
# number = 0
total_weight = 0.
ml_loss = 0.
bpositions = [[] for _ in range(envs.num_envs)]
while envs.num_envs > 0 and len(stats_episodes) < episodes_to_eval:
current_episodes = envs.current_episodes()
positions = []; headings = []
for ob_i in range(len(current_episodes)):
agent_state_i = envs.call_at(ob_i, "get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
with torch.no_grad():
if 'CMA' in self.config.MODEL.policy_name:
# instructions
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = instruction_embedding,
text_mask = all_lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
elif 'VLNBERT' in self.config.MODEL.policy_name:
if 'R2R' in self.config.TASK_CONFIG.DATASET.DATA_PATH:
lang_idx_tokens = batch['instruction']
padding_idx = 0
lang_masks = (lang_idx_tokens != padding_idx)
lang_lengths = lang_masks.sum(1)
lang_token_type_ids = torch.zeros_like(lang_masks,
dtype=torch.long, device=self.device)
h_t_flag = h_t.sum(1)==0.0
h_t_init, language_features = self.policy.net(
mode='language',
lang_idx_tokens=lang_idx_tokens,
lang_masks=lang_masks)
elif 'RxR' in self.config.TASK_CONFIG.DATASET.DATA_PATH:
to_be_masked = ((torch.abs(batch['rxr_instruction']) == 0)*1.).mean(-1)
lang_masks = torch.ones_like(to_be_masked) - to_be_masked
# lang_lengths = all_lang_masks.sum(1)
h_t_flag = h_t.sum(1)==0.0
h_t_init, language_features = self.policy.net(
mode='language',
observations=batch,
lang_masks=lang_masks,
)
else:
raise NotImplementedError
h_t[h_t_flag] = h_t_init[h_t_flag]
language_features = torch.cat(
(h_t.unsqueeze(1), language_features[:,1:,:]), dim=1)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, h_t = self.policy.net(
mode = 'navigation',
observations=batch,
lang_masks=lang_masks,
lang_feats=language_features,
# lang_token_type_ids=lang_token_type_ids,
headings=headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
# high-to-low actions in environments
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
for j, ob in enumerate(observations):
if env_actions[j]['action']['action'] == 0:
continue
else:
envs.call_at(j,
'change_current_path', # to update and record low-level path
{'new_path': ob.pop('positions'),
'collisions': ob.pop('collisions')}
)
not_done_masks = torch.tensor(
[[0] if done else [1] for done in dones],
dtype=torch.uint8, device=self.device)
# reset envs and observations if necessary
for i in range(envs.num_envs):
if len(config.VIDEO_OPTION) > 0:
frame = observations_to_image(observations[i], infos[i])
frame = append_text_to_image(
frame, current_episodes[i].instruction.instruction_text
)
rgb_frames[i].append(frame)
if not dones[i]:
continue
# ep done, calculate metrics
info = infos[i]
metric = {}
metric['steps_taken'] = info['steps_taken']
ep_id = str(envs.current_episodes()[i].episode_id)
gt_path = np.array(self.gt_data[ep_id]['locations']).astype(np.float)
if 'current_path' in envs.current_episodes()[i].info.keys():
positions_ = np.array(envs.current_episodes()[i].info['current_path']).astype(np.float)
collisions_ = np.array(envs.current_episodes()[i].info['collisions'])
assert collisions_.shape[0] == positions_.shape[0] - 1
else:
positions_ = np.array(dis_to_con(np.array(info['position']['position']))).astype(np.float)
distance = np.array(info['position']['distance']).astype(np.float)
metric['distance_to_goal'] = distance[-1]
metric['success'] = 1. if distance[-1] <= 3. and env_actions[i]['action']['action'] == 0 else 0.
metric['oracle_success'] = 1. if (distance <= 3.).any() else 0.
metric['path_length'] = np.linalg.norm(positions_[1:] - positions_[:-1],axis=1).sum()
try:
metric['collisions'] = collisions_.mean()
except:
metric['collisions'] = 0
pass
gt_length = distance[0]
metric['spl'] = metric['success']*gt_length/max(gt_length,metric['path_length'])
act_con_path = positions_
gt_con_path = np.array(dis_to_con(gt_path)).astype(np.float)
dtw_distance = fastdtw(act_con_path, gt_con_path, dist=NDTW.euclidean_distance)[0]
nDTW = np.exp(-dtw_distance / (len(gt_con_path) * config.TASK_CONFIG.TASK.SUCCESS_DISTANCE))
metric['ndtw'] = nDTW
stats_episodes[current_episodes[i].episode_id] = metric
observations[i] = envs.reset_at(i)[0] # envs[i] change to next episode
if 'CMA' in self.config.MODEL.policy_name:
rnn_states[i] *= 0.
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t[i] *= 0.
if config.use_pbar:
pbar.update()
else:
logger.info(
log_str.format(
evaluated=len(stats_episodes),
total=episodes_to_eval,
time=round(time.time() - start_time),
)
)
if len(config.VIDEO_OPTION) > 0:
generate_video(
video_option=config.VIDEO_OPTION,
video_dir=config.VIDEO_DIR,
images=rgb_frames[i],
episode_id=current_episodes[i].episode_id,
checkpoint_idx=checkpoint_index,
metrics={
"spl": stats_episodes[
current_episodes[i].episode_id
]["spl"]
},
tb_writer=writer,
fps=1,
)
# del stats_episodes[current_episodes[i].episode_id][
# "top_down_map_vlnce"
# ]
# del stats_episodes[current_episodes[i].episode_id][
# "collisions"
# ]
rgb_frames[i] = []
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
envs_to_pause = []
next_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if next_episodes[i].episode_id in stats_episodes: # 出现了重复的ep,表示这个模拟器中的episode已经全部过了一遍
envs_to_pause.append(i)
if 'VLNBERT' in self.config.MODEL.policy_name:
rnn_states = h_t
headings = torch.tensor(headings)
(
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
rgb_frames,
# positions
) = self._pause_envs(
envs_to_pause,
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
rgb_frames,
# positions
)
headings = headings.tolist()
if 'VLNBERT' in self.config.MODEL.policy_name:
h_t = rnn_states
envs.close()
if config.use_pbar:
pbar.close()
if self.world_size > 1:
distr.barrier()
aggregated_stats = {}
num_episodes = len(stats_episodes)
# print('rank', self.local_rank, 'evaluated',num_episodes, 'episodes')
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum(v[stat_key] for v in stats_episodes.values())
/ num_episodes
)
# print(self.local_rank, aggregated_stats)
total = torch.tensor(num_episodes).cuda()
if self.world_size > 1:
dist.reduce(total,dst=0)
total = total.item()
if self.world_size > 1:
logger.info(
f"rank {self.local_rank}'s {num_episodes}-episode results: {aggregated_stats}")
for k,v in aggregated_stats.items():
v = torch.tensor(v*num_episodes).cuda()
# print(self.local_rank, k+':', v.item(), num_episodes, 'before reduce')
cat_v = gather_list_and_concat(v,self.world_size)
# print(self.local_rank, k+':', cat_v, num_episodes, 'after_reduce')
v = (sum(cat_v)/total).item()
# print(self.local_rank, k+':', v, num_episodes, 'after divide total')
aggregated_stats[k] = v
split = config.TASK_CONFIG.DATASET.SPLIT
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ep_ckpt_{checkpoint_index}_{split}_r{self.local_rank}_w{self.world_size}.json",
)
with open(fname, "w") as f:
json.dump(stats_episodes, f, indent=4)
if self.local_rank < 1:
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{split}.json",
)
with open(fname, "w") as f:
json.dump(aggregated_stats, f, indent=4)
logger.info(f"Episodes evaluated: {total}")
checkpoint_num = checkpoint_index + 1
for k, v in aggregated_stats.items():
logger.info(f"Average episode {k}: {v:.6f}")
writer.add_scalar(f"eval_{k}/{split}", v, checkpoint_num)
def collect_val_traj(self):
from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1
trajectories = defaultdict(list)
split = self.config.TASK_CONFIG.DATASET.SPLIT
if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file:
gt_data = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
if (
ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES
and role not in self.config.TASK_CONFIG.DATASET.ROLES
):
continue
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_file.format(
split=split, role=role
),
"rt",
) as f:
gt_data.update(json.load(f))
else:
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_path.format(
split=split)
) as f:
gt_data = json.load(f)
else:
with gzip.open(
self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(split=split)
) as f:
gt_data = json.load(f)
self.gt_data = gt_data
trajectories = gt_data
self.trajectories = gt_data
trajectories = list(trajectories.keys())[self.config.local_rank::self.config.GPU_NUMBERS]
return trajectories
def eval(self) -> None:
r"""Main method of trainer evaluation. Calls _eval_checkpoint() that
is specified in Trainer class that inherits from BaseRLTrainer
or BaseILTrainer
Returns:
None
"""
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if "tensorboard" in self.config.VIDEO_OPTION:
assert (
len(self.config.TENSORBOARD_DIR) > 0
), "Must specify a tensorboard directory for video display"
os.makedirs(self.config.TENSORBOARD_DIR, exist_ok=True)
if "disk" in self.config.VIDEO_OPTION:
assert (
len(self.config.VIDEO_DIR) > 0
), "Must specify a directory for storing videos on disk"
world_size = self.config.GPU_NUMBERS
self.world_size = world_size
self.local_rank = self.config.local_rank
self.config.defrost()
# split = self.config.TASK_CONFIG.DATASET.SPLIT
# self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
# self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
self.config.TASK_CONFIG.DATASET.ROLES = ["guide"]
self.config.TASK_CONFIG.TASK.MEASUREMENTS = ['POSITION', 'STEPS_TAKEN']
self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]]
if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS:
idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW')
self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWEVAL'
self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.EVAL.LANGUAGES
self.config.TASK_CONFIG.DATASET.SPLIT = self.config.EVAL.SPLIT
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = self.config.EVAL.SPLIT
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = self.config.EVAL.SPLIT
self.config.use_pbar = not is_slurm_batch_job()
# if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
# self.config.EVAL.trajectories_file = \
# self.config.EVAL.trajectories_file[:-8] + '_w' + \
# str(self.world_size) + '_r' + str(self.local_rank) + '.json.gz'
# if choosing image
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations12()
# sensor_uuids = []
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
cropper_size = dict(crop_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
# sensor_uuids.append(camera_config.UUID)
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
crop_config.append((camera_template.lower(), cropper_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
self.config.freeze()
# self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
# self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
# -1
# )
torch.cuda.set_device(self.device)
if world_size > 1:
distr.init_process_group(backend='nccl', init_method='env://')
self.device = self.config.TORCH_GPU_IDS[self.local_rank]
torch.cuda.set_device(self.device)
self.config.defrost()
self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.freeze()
#
# if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
self.traj = self.collect_val_traj()
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
if os.path.isfile(self.config.EVAL.CKPT_PATH_DIR):
# evaluate singe checkpoint
proposed_index = get_checkpoint_id(
self.config.EVAL.CKPT_PATH_DIR
)
if proposed_index is not None:
ckpt_idx = proposed_index
else:
ckpt_idx = 0
self._eval_checkpoint(
self.config.EVAL.CKPT_PATH_DIR,
writer,
checkpoint_index=ckpt_idx,
)
else:
# evaluate multiple checkpoints in order
# prev_ckpt_ind = -1 #TODO eval start index
evaluated = []
while True:
current_ckpt = None
while current_ckpt is None:
checkpoint_folder = self.config.EVAL_CKPT_PATH_DIR
if not self.config.CEPH_IO:
models_paths = [p for p in filter(os.path.isfile, glob.glob(checkpoint_folder + "/*")) if p not in evaluated]
else:
models_paths = [os.path.join(self.config.CEPH_URL,p) for p in self.client.list(self.config.CEPH_URL) if os.path.join(self.config.CEPH_URL,p) not in evaluated]
if len(models_paths) > 0:
models_paths.sort(key=self._get_iter)
current_ckpt = models_paths[0]
prev_ckpt_ind = current_ckpt.split('.')[-2]
else:
current_ckpt = None
time.sleep(2) # sleep for 2 secs before polling again
# time.sleep(10)
if self.local_rank < 1:
logger.info(f"=======current_ckpt: {current_ckpt}=======")
# prev_ckpt_ind += 1
self._eval_checkpoint(
checkpoint_path=current_ckpt,
writer=writer,
checkpoint_index=prev_ckpt_ind,
)
evaluated.append(current_ckpt)
def inference(self) -> None:
r"""Runs inference on a single checkpoint, creating a path predictions file."""
checkpoint_path = self.config.INFERENCE.CKPT_PATH
logger.info(f"checkpoint_path: {checkpoint_path}")
self.config.defrost()
self.config.TASK_CONFIG.DATASET.SPLIT = self.config.INFERENCE.SPLIT
self.config.TASK_CONFIG.DATASET.ROLES = ["guide"]
self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.INFERENCE.LANGUAGES
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
self.config.IL.ckpt_to_load = self.config.INFERENCE.CKPT_PATH
self.config.TASK_CONFIG.TASK.MEASUREMENTS = []
self.config.TASK_CONFIG.TASK.SENSORS = [
s for s in self.config.TASK_CONFIG.TASK.SENSORS if "INSTRUCTION" in s
]
########### Additional Config ###########
self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]]
if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS:
idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW')
self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWINFERENCE'
# if choosing image
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations12()
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
cropper_size = dict(crop_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
crop_config.append((camera_template.lower(), cropper_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
# self.config.ENV_NAME = "VLNCEInferenceEnv" #TODO is this necessary?
self.config.freeze()
if self.config.INFERENCE.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")[
"config"
]
)
else:
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
config.IL.ckpt_to_load = checkpoint_path
config.freeze()
eps = self.collect_val_traj()
envs = construct_envs(
config, get_env_class(config.ENV_NAME),
auto_reset_done=False,
episodes_allowed=eps[:10] if sys.gettrace() else None # for debug, ep subset
)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
observations = envs.reset()
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
envs.num_envs,
self.num_recurrent_layers,
config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t = torch.zeros(
envs.num_envs, 768,
device=self.device,
)
language_features = torch.zeros(
envs.num_envs, 80, 768,
device=self.device,
)
not_done_masks = torch.zeros(
envs.num_envs, 1, dtype=torch.uint8, device=self.device
)
episode_predictions = defaultdict(list)
# episode ID --> instruction ID for rxr predictions format
instruction_ids: Dict[str, int] = {}
# populate episode_predictions with the starting state
current_episodes = envs.current_episodes()
for i in range(envs.num_envs):
episode_predictions[current_episodes[i].episode_id].append(
envs.call_at(i, "get_agent_info", {})
)
if config.INFERENCE.FORMAT == "rxr":
ep_id = current_episodes[i].episode_id
k = current_episodes[i].instruction.instruction_id
instruction_ids[ep_id] = int(k)
with tqdm.tqdm(
total=sum(envs.count_episodes()),
desc=f"[inference:{self.config.INFERENCE.SPLIT}]",
) as pbar:
while envs.num_envs > 0:
current_episodes = envs.current_episodes()
positions = []; headings = []
for i in range(envs.num_envs):
agent_state_i = envs.call_at(i,"get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
with torch.no_grad():
if 'CMA' in self.config.MODEL.policy_name:
# instructions
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = instruction_embedding,
text_mask = all_lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
# high-to-low actions in environments
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
for i, ob in enumerate(observations):
if env_actions[i]['action']['action'] == 0:
continue
else:
envs.call_at(
i, 'update_cur_path', {'new_path': ob.pop('cur_path')}
) # to update and record low-level path
not_done_masks = torch.tensor(
[[0] if done else [1] for done in dones],
dtype=torch.uint8,
device=self.device,
)
# reset envs and observations if necessary
for i in range(envs.num_envs):
if not dones[i]:
continue
ep_id = envs.current_episodes()[i].episode_id
if 'cur_path' in envs.current_episodes()[i].info:
episode_predictions[ep_id] += envs.current_episodes()[i].info['cur_path']
episode_predictions[ep_id][-1]['stop'] = True
# assert len(episode_predictions[ep_id]) <= 500
observations[i] = envs.reset_at(i)[0]
if 'CMA' in self.config.MODEL.policy_name:
rnn_states[i] *= 0.
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t[i] *= 0.
# prev_actions[i] = torch.zeros(1, dtype=torch.long)
pbar.update()
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
envs_to_pause = []
next_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if not dones[i]:
continue
if next_episodes[i].episode_id in episode_predictions:
envs_to_pause.append(i)
else:
episode_predictions[next_episodes[i].episode_id].append(
envs.call_at(i, "get_agent_info", {})
)
if config.INFERENCE.FORMAT == "rxr":
ep_id = next_episodes[i].episode_id
k = next_episodes[i].instruction.instruction_id
instruction_ids[ep_id] = int(k)
# number += 1
headings = torch.tensor(headings)
(
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
rgb_frames,
# positions
) = self._pause_envs(
envs_to_pause,
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
# rgb_frames,
# positions
)
headings = headings.tolist()
if 'VLNBERT' in self.config.MODEL.policy_name:
h_t = rnn_states
envs.close()
if config.INFERENCE.FORMAT == "r2r":
with open(config.INFERENCE.PREDICTIONS_FILE, "w") as f:
json.dump(episode_predictions, f, indent=2)
logger.info(
f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}"
)
else: # use 'rxr' format for rxr-habitat leaderboard
predictions_out = []
for k,v in episode_predictions.items():
# save only positions that changed
path = [v[0]["position"]]
for p in v[1:]:
if path[-1] != p["position"]:
path.append(p["position"])
predictions_out.append(
{
"instruction_id": instruction_ids[k],
"path": path,
}
)
predictions_out.sort(key=lambda x: x["instruction_id"])
with jsonlines.open(
config.INFERENCE.PREDICTIONS_FILE, mode="w"
) as writer:
writer.write_all(predictions_out)
logger.info(
f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}"
)
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/base_il_trainer.py |
import json
import logging
import math
import os
import sys
from io import open
from typing import Callable, List, Tuple
import numpy as np
import copy
import torch
from torch import nn
from torch import Tensor, device, dtype
from transformers import BertPreTrainedModel
logger = logging.getLogger(__name__)
BertLayerNorm = torch.nn.LayerNorm
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
# recurrent vlnbert use attention scores
outputs = (context_layer, attention_scores) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask,
None if head_mask is None else head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size,
config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOutAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if ctx_dim is None:
ctx_dim =config.hidden_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(ctx_dim, self.all_head_size)
self.value = nn.Linear(ctx_dim, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_probs
class BertXAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
self.att = BertOutAttention(config, ctx_dim=ctx_dim)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None):
output, attention_scores = self.att(input_tensor, ctx_tensor, ctx_att_mask)
attention_output = self.output(output, input_tensor)
return attention_output, attention_scores
class LXRTXLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.no_lang_ca = config.no_lang_ca # do not update language embeds
# Lang self-att and FFN layer
self.lang_self_att = BertAttention(config)
self.lang_inter = BertIntermediate(config)
self.lang_output = BertOutput(config)
# Visn self-att and FFN layer
self.visn_self_att = BertAttention(config)
self.visn_inter = BertIntermediate(config)
self.visn_output = BertOutput(config)
# The cross attention layer
self.visual_attention = BertXAttention(config)
def cross_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):
# Cross Attention
if self.no_lang_ca:
lang_att_output = lang_input
else:
lang_att_output, _ = self.visual_attention(lang_input, visn_input, ctx_att_mask=visn_attention_mask)
visn_att_output, cross_attention_score = self.visual_attention(visn_input, lang_input, ctx_att_mask=lang_attention_mask)
return lang_att_output, visn_att_output, cross_attention_score
def self_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):
# Self Attention
if self.no_lang_ca:
lang_att_output = (lang_input, )
else:
lang_att_output = self.lang_self_att(lang_input, lang_attention_mask)
visn_att_output = self.visn_self_att(visn_input, visn_attention_mask)
return lang_att_output, visn_att_output
def output_fc(self, lang_input, visn_input):
# FC layers
if not self.no_lang_ca:
lang_inter_output = self.lang_inter(lang_input)
visn_inter_output = self.visn_inter(visn_input)
# Layer output
if self.no_lang_ca:
lang_output = lang_input
else:
lang_output = self.lang_output(lang_inter_output, lang_input)
visn_output = self.visn_output(visn_inter_output, visn_input)
return lang_output, visn_output
def forward(self, lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask):
lang_att_output = lang_feats
visn_att_output = visn_feats
lang_att_output, visn_att_output, cross_attention_score = self.cross_att(lang_att_output, lang_attention_mask,
visn_att_output, visn_attention_mask)
lang_attention_score = cross_attention_score[:,:,0,:]
lang_att_output, visn_att_output = self.self_att(lang_att_output, lang_attention_mask,
visn_att_output, visn_attention_mask)
lang_output, visn_output = self.output_fc(lang_att_output[0], visn_att_output[0])
return lang_output, visn_output, lang_attention_score
class LxmertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.num_l_layers = config.num_l_layers
self.num_r_layers = config.num_r_layers
self.num_h_layers = config.num_h_layers
self.num_x_layers = config.num_x_layers
self.update_lang_bert = config.update_lang_bert
# Using self.layer instead of self.l_layers to support loading BERT weights.
self.layer = nn.ModuleList(
[BertLayer(config) for _ in range(self.num_l_layers)]
)
if not self.update_lang_bert:
for name, param in self.layer.named_parameters():
param.requires_grad_(False)
self.h_layers = nn.ModuleList(
[BertLayer(config) for _ in range(self.num_h_layers)]
) if self.num_h_layers > 0 else None
self.r_layers = nn.ModuleList(
[BertLayer(config) for _ in range(self.num_r_layers)]
) if self.num_r_layers > 0 else None
self.x_layers = nn.ModuleList(
[LXRTXLayer(config) for _ in range(self.num_x_layers)]
)
def forward(self, txt_embeds, extended_txt_masks, hist_embeds,
extended_hist_masks, img_embeds=None, extended_img_masks=None):
# text encoding
for layer_module in self.layer:
temp_output = layer_module(txt_embeds, extended_txt_masks)
txt_embeds = temp_output[0]
# if not self.update_lang_bert:
# txt_embeds = txt_embeds.detach()
# image encoding
if img_embeds is not None:
if self.r_layers is not None:
for layer_module in self.r_layers:
temp_output = layer_module(img_embeds, extended_img_masks)
img_embeds = temp_output[0]
# history encoding
if self.h_layers is not None:
for layer_module in self.h_layers:
temp_output = layer_module(hist_embeds, extended_hist_masks)
hist_embeds = temp_output[0]
hist_max_len = hist_embeds.size(1)
# cross-modal encoding
if img_embeds is None:
hist_img_embeds = hist_embeds
extended_hist_img_masks = extended_hist_masks
else:
hist_img_embeds = torch.cat([hist_embeds, img_embeds], 1)
extended_hist_img_masks = torch.cat([extended_hist_masks, extended_img_masks], -1)
for layer_module in self.x_layers:
txt_embeds, hist_img_embeds = layer_module(
txt_embeds, extended_txt_masks,
hist_img_embeds, extended_hist_img_masks)
hist_embeds = hist_img_embeds[:, :hist_max_len]
if img_embeds is not None:
img_embeds = hist_img_embeds[:, hist_max_len:]
return txt_embeds, hist_embeds, img_embeds
class ImageEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.img_linear = nn.Linear(config.image_feat_size, config.hidden_size)
self.img_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dep_linear = nn.Linear(config.depth_feat_size, config.hidden_size)
self.dep_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.ang_linear = nn.Linear(config.angle_feat_size, config.hidden_size)
self.ang_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dis_linear = nn.Linear(config.angle_feat_size, config.hidden_size)
self.dis_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
# 0: non-navigable, 1: navigable, 2: stop
self.nav_type_embedding = nn.Embedding(3, config.hidden_size)
# tf naming convention for layer norm
self.layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, rgb_feat, dep_feat, ang_feat, dis_feat, type_embeddings, nav_types=None):
transformed_im = self.img_layer_norm(self.img_linear(rgb_feat))
transformed_dep = self.dep_layer_norm(self.dep_linear(dep_feat))
transformed_ang = self.ang_layer_norm(self.ang_linear(ang_feat))
transformed_dis = self.dis_layer_norm(self.dis_linear(dis_feat))
embeddings = transformed_im + transformed_dep + transformed_ang + transformed_dis + type_embeddings
if nav_types is not None:
nav_embeddings = self.nav_type_embedding(nav_types)
embeddings = embeddings + nav_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class HistoryEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.img_linear = nn.Linear(config.image_feat_size, config.hidden_size)
self.img_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
# self.dep_linear = nn.Linear(config.depth_feat_size, config.hidden_size)
# self.dep_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.ang_linear = nn.Linear(config.angle_feat_size, config.hidden_size)
self.ang_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.position_embeddings = nn.Embedding(config.max_action_steps, config.hidden_size)
# special type embedding for history
self.type_embedding = nn.Embedding(1, config.hidden_size)
# tf naming convention for layer norm
self.layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.hist_enc_pano = config.hist_enc_pano
if config.hist_enc_pano:
self.pano_img_linear = nn.Linear(config.image_feat_size, config.hidden_size)
self.pano_img_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
# self.pano_dep_linear = nn.Linear(config.depth_feat_size, config.hidden_size)
# self.pano_dep_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.pano_ang_linear = nn.Linear(config.angle_feat_size, config.hidden_size)
self.pano_ang_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
pano_enc_config = copy.copy(config)
pano_enc_config.num_hidden_layers = config.num_h_pano_layers
self.pano_encoder = BertEncoder(pano_enc_config)
else:
self.pano_encoder = None
def forward(self, img_feats, dep_feats, ang_feats, pos_ids,
pano_img_feats=None, pano_dep_feats=None, pano_ang_feats=None):
'''Args:
- img_feats: (batch_size, dim_feat)
- pos_ids: (batch_size, )
- pano_img_feats: (batch_size, pano_len, dim_feat)
'''
device = next(iter(self.parameters())).device
if img_feats is not None:
batch_size = img_feats.size(0)
else:
batch_size = 1
type_ids = torch.zeros((batch_size, )).long().to(device)
type_embeddings = self.type_embedding(type_ids)
if img_feats is None:
cls_embeddings = self.dropout(self.layer_norm(
self.cls_token.expand(batch_size, -1, -1)[:, 0] + type_embeddings))
return cls_embeddings
# history embedding per step
embeddings = self.img_layer_norm(self.img_linear(img_feats)) + \
self.ang_layer_norm(self.ang_linear(ang_feats)) + \
self.position_embeddings(pos_ids) + \
type_embeddings
if self.pano_encoder is not None:
pano_embeddings = self.pano_img_layer_norm(self.pano_img_linear(pano_img_feats)) + \
self.pano_ang_layer_norm(self.pano_ang_linear(pano_ang_feats))
pano_embeddings = self.dropout(pano_embeddings)
# TODO: mask is always True
batch_size, pano_len, _ = pano_img_feats.size()
extended_pano_masks = torch.zeros(batch_size, pano_len).float().to(device).unsqueeze(1).unsqueeze(2)
pano_embeddings = self.pano_encoder(pano_embeddings, extended_pano_masks)[0]
pano_embeddings = torch.mean(pano_embeddings, 1)
embeddings = embeddings + pano_embeddings # history既包含了orientation方位的img,也包含了pano的context
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class NextActionPrediction(nn.Module):
def __init__(self, hidden_size, dropout_rate):
super().__init__()
self.net = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
BertLayerNorm(hidden_size, eps=1e-12),
nn.Dropout(dropout_rate),
nn.Linear(hidden_size, 1))
def forward(self, x):
return self.net(x)
class NavCMT(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = BertEmbeddings(config)
self.img_embeddings = ImageEmbeddings(config)
self.hist_embeddings = HistoryEmbeddings(config)
self.encoder = LxmertEncoder(config)
self.next_action = NextActionPrediction(config.hidden_size, config.pred_head_dropout_prob)
self.init_weights()
if self.config.fix_lang_embedding:
for name, param in self.embeddings.named_parameters():
if 'token_type_embeddings' not in name:
param.requires_grad_(False)
if self.config.fix_hist_embedding:
for parma in self.hist_embeddings.parameters():
parma.requires_grad_(False)
if self.config.fix_obs_embedding:
for parma in self.img_embeddings.parameters():
parma.requires_grad_(False)
def forward(self, mode, txt_ids=None, txt_embeds=None, txt_masks=None,
hist_rgb_fts=None, hist_depth_fts=None, hist_ang_fts=None,
hist_pano_rgb_fts=None, hist_pano_depth_fts=None, hist_pano_ang_fts=None,
hist_embeds=None, ob_step_ids=None, hist_masks=None,
ob_rgb_fts=None, ob_dep_fts=None, ob_ang_fts=None, ob_dis_fts=None, ob_nav_types=None,
ob_masks=None):
# text embedding
if mode == 'language':
''' LXMERT language branch (in VLN only perform this at initialization) '''
extended_txt_masks = txt_masks.unsqueeze(1).unsqueeze(2)
extended_txt_masks = extended_txt_masks.to(dtype=self.dtype)
extended_txt_masks = (1.0 - extended_txt_masks) * -10000.0
txt_token_type_ids = torch.zeros_like(txt_ids)
txt_embeds = self.embeddings(txt_ids, token_type_ids=txt_token_type_ids)
for layer_module in self.encoder.layer:
temp_output = layer_module(txt_embeds, extended_txt_masks)
txt_embeds = temp_output[0]
# if self.config.fix_lang_embedding:
# txt_embeds = txt_embeds.detach()
if self.config.no_lang_ca: # run self-attn layers for lang
all_txt_embeds = [txt_embeds]
for layer_module in self.encoder.x_layers:
lang_att_output = layer_module.lang_self_att(txt_embeds, extended_txt_masks)[0]
lang_inter_output = layer_module.lang_inter(lang_att_output)
lang_output = layer_module.lang_output(lang_inter_output, lang_att_output)
all_txt_embeds.append(lang_output)
return all_txt_embeds
return txt_embeds
# history embedding per step
if mode == 'history':
hist_embeds = self.hist_embeddings(hist_rgb_fts, hist_depth_fts, hist_ang_fts, ob_step_ids,
pano_img_feats=hist_pano_rgb_fts, pano_dep_feats=hist_pano_depth_fts, pano_ang_feats=hist_pano_ang_fts)
# if self.config.fix_hist_embedding:
# hist_embeds = hist_embeds.detach()
return hist_embeds
# cross-modal encoding per step
elif mode == 'navigation':
''' LXMERT visual branch'''
# history embedding
extended_hist_masks = hist_masks.unsqueeze(1).unsqueeze(2)
extended_hist_masks = extended_hist_masks.to(dtype=self.dtype)
extended_hist_masks = (1.0 - extended_hist_masks) * -10000.0
# if self.encoder.h_layers is not None:
# for layer_module in self.encoder.h_layers:
# temp_output = layer_module(hist_embeds, extended_hist_masks)
# hist_embeds = temp_output[0]
# image embedding
extended_ob_masks = ob_masks.unsqueeze(1).unsqueeze(2)
extended_ob_masks = extended_ob_masks.to(dtype=self.dtype)
extended_ob_masks = (1.0 - extended_ob_masks) * -10000.0
ob_token_type_ids = torch.ones(ob_rgb_fts.size(0), ob_rgb_fts.size(1), dtype=torch.long, device=self.device)
ob_embeds = self.img_embeddings(ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts,
self.embeddings.token_type_embeddings(ob_token_type_ids),
nav_types=ob_nav_types)
# if self.encoder.r_layers is not None:
# for layer_module in self.encoder.r_layers:
# temp_output = layer_module(ob_embeds, extended_ob_masks)
# ob_embeds = temp_output[0]
# if self.config.fix_obs_embedding:
# ob_embeds = ob_embeds.detach()
# multi-modal encoding
hist_max_len = hist_embeds.size(1)
hist_ob_embeds = torch.cat([hist_embeds, ob_embeds], 1)
extended_hist_ob_masks = torch.cat([extended_hist_masks, extended_ob_masks], -1)
extended_txt_masks = txt_masks.unsqueeze(1).unsqueeze(2)
extended_txt_masks = extended_txt_masks.to(dtype=self.dtype)
extended_txt_masks = (1.0 - extended_txt_masks) * -10000.0
if self.config.no_lang_ca:
all_txt_embeds = txt_embeds
for l, layer_module in enumerate(self.encoder.x_layers):
if self.config.no_lang_ca:
txt_embeds = all_txt_embeds[l]
txt_embeds, hist_ob_embeds, lang_attention_score = layer_module(
txt_embeds, extended_txt_masks,
hist_ob_embeds, extended_hist_ob_masks,
)
hist_embeds = hist_ob_embeds[:, :hist_max_len]
ob_embeds = hist_ob_embeds[:, hist_max_len:]
# TODO
if self.config.no_lang_ca:
act_logits = self.next_action(ob_embeds).squeeze(-1)
else:
if self.config.act_pred_token == 'ob_txt':
act_logits = self.next_action(ob_embeds * txt_embeds[:, :1]).squeeze(-1)
elif self.config.act_pred_token == 'ob':
act_logits = self.next_action(ob_embeds).squeeze(-1)
elif self.config.act_pred_token == 'ob_hist':
act_logits = self.next_action(ob_embeds * hist_embeds[:, :1]).squeeze(-1)
elif self.config.act_pred_token == 'ob_txt_hist':
act_logits = self.next_action(ob_embeds * (txt_embeds[:, :1] + hist_embeds[:, :1])).squeeze(-1)
act_logits.masked_fill_(ob_nav_types==0, -float('inf'))
# ob_nav_type的形状: 1,1,1,1,2,0,0,0,0,0
return act_logits, txt_embeds, hist_embeds, ob_embeds, lang_attention_score
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/vilmodel_cmt.py |
import torch
def get_tokenizer(args):
from transformers import AutoTokenizer
if args.dataset == 'rxr' or args.tokenizer == 'xlm':
cfg_name = 'xlm-roberta-base'
else:
cfg_name = 'bert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(cfg_name)
return tokenizer
def get_vlnbert_models(config=None):
from transformers import PretrainedConfig
from vlnce_baselines.models.hamt.vilmodel_cmt import NavCMT
model_class = NavCMT
model_name_or_path = config.pretrained_path
new_ckpt_weights = {}
if model_name_or_path is not None:
ckpt_weights = torch.load(model_name_or_path, map_location='cpu')
for k, v in ckpt_weights.items():
if k.startswith('module'):
new_ckpt_weights[k[7:]] = v
else:
# add next_action in weights
if k.startswith('next_action'):
k = 'bert.' + k
new_ckpt_weights[k] = v
if config.task_type == 'r2r':
cfg_name = 'pretrained/Prevalent/bert-base-uncased'
elif config.task_type == 'rxr':
cfg_name = 'pretrained/xlm-roberta-base'
vis_config = PretrainedConfig.from_pretrained(cfg_name)
if config.task_type == 'r2r':
vis_config.image_feat_size = 768
vis_config.max_action_steps = 50
elif config.task_type == 'rxr':
vis_config.type_vocab_size = 2
vis_config.image_feat_size = 512
vis_config.max_action_steps = 100
# vis_config.image_feat_size = 768
vis_config.depth_feat_size = 128
vis_config.angle_feat_size = 4
vis_config.num_l_layers = 9
vis_config.num_r_layers = 0
vis_config.num_h_layers = 0
vis_config.num_x_layers = 4
vis_config.hist_enc_pano = True
vis_config.num_h_pano_layers = 2
vis_config.fix_lang_embedding = config.fix_lang_embedding
vis_config.fix_hist_embedding = config.fix_hist_embedding
vis_config.fix_obs_embedding = config.fix_obs_embedding
vis_config.update_lang_bert = not vis_config.fix_lang_embedding
vis_config.output_attentions = True
vis_config.pred_head_dropout_prob = 0.1
vis_config.no_lang_ca = False
vis_config.act_pred_token = 'ob_txt'
# vis_config.max_action_steps = 50
# vis_config.max_action_steps = 100
visual_model = model_class.from_pretrained(
pretrained_model_name_or_path=None,
config=vis_config,
state_dict=new_ckpt_weights)
return visual_model
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/vlnbert_init.py |
from vlnce_baselines.models.videomae import volume_transforms, video_transforms, modeling_finetune, utils
from vlnce_baselines.models.videomae.get_args import get_args
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm import create_model
from habitat import logger
from collections import OrderedDict
class VideoRGBEcnoder(nn.Module):
r"""
Takes in observations and produces an embedding of the rgb component.
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
device: torch.device
"""
def __init__(
self,
observation_space,
output_size,
model_name,
device,
spatial_output: bool = False,
):
super().__init__()
self.device = device
args, ds_init = get_args()
if "Large" in model_name:
args.model = 'vit_large_patch16_224'
args.finetune = 'pretrained/VideoMAE/vit_l_hybrid_pt_800e.pth'
elif "Base" in model_name:
args.model = 'vit_base_patch16_224'
args.finetune = 'pretrained/VideoMAE/vit_b_hybrid_pt_800e.pth'
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
all_frames=args.num_frames * args.num_segments,
tubelet_size=args.tubelet_size,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
attn_drop_rate=args.attn_drop_rate,
drop_block_rate=None,
use_mean_pooling=args.use_mean_pooling,
init_scale=args.init_scale,
)
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
all_keys = list(checkpoint_model.keys())
new_dict = OrderedDict()
for key in all_keys:
if key.startswith('backbone.'):
new_dict[key[9:]] = checkpoint_model[key]
elif key.startswith('encoder.'):
new_dict[key[8:]] = checkpoint_model[key]
else:
new_dict[key] = checkpoint_model[key]
checkpoint_model = new_dict
# interpolate position embedding
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1] # channel dim
num_patches = model.patch_embed.num_patches #
num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1
# height (== width) for the checkpoint position embedding
orig_size = int(((pos_embed_checkpoint.shape[-2] - num_extra_tokens)//(args.num_frames // model.patch_embed.tubelet_size)) ** 0.5)
# height (== width) for the new position embedding
new_size = int((num_patches // (args.num_frames // model.patch_embed.tubelet_size) )** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(-1, args.num_frames // model.patch_embed.tubelet_size, orig_size, orig_size, embedding_size)
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, args.num_frames // model.patch_embed.tubelet_size, new_size, new_size, embedding_size)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
self.model = model
# disable gradients for resnet, params frozen
for param in self.model.parameters():
param.requires_grad = False
self.model.eval()
self.resnet_layer_size = 768
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
# self.fc = nn.Linear(768, output_size)
# self.activation = nn.ReLU()
else:
class SpatialAvgPool(nn.Module):
def forward(self, x):
x = F.adaptive_avg_pool2d(x, (4, 4))
return x
self.model.avgpool = SpatialAvgPool()
self.model.fc = nn.Sequential()
self.spatial_embeddings = nn.Embedding(4 * 4, 64)
self.output_shape = (
self.resnet_layer_size + self.spatial_embeddings.embedding_dim,
4,
4,
)
self.data_transform = video_transforms.Compose([
video_transforms.Resize(args.short_side_size, interpolation='bilinear'),
video_transforms.CenterCrop(size=(args.input_size, args.input_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
@property
def is_blind(self):
return self._n_input_rgb == 0
def forward(self, observations):
r"""Sends RGB observation through the TorchVision ResNet50 pre-trained
on ImageNet. Sends through fully connected layer, activates, and
returns final embedding.
"""
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT x WIDTH]
# rgb_observations = observations["rgb"].permute(0, 3, 1, 2)
video_rgb_batch = torch.vstack([self.data_transform(obs[k])[None,...] for obs in observations["video_rgbs"] for k in obs.keys()]).cuda()
if self.spatial_output:
features = self.model.get_spatial_features(video_rgb_batch)
features = self.model.avgpool(features)
b, c, h, w = features.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=features.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([features, spatial_features], dim=1)#.to(self.device)
else:
return self.model.forward_features(video_rgb_batch) | InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/video_encoder.py |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from gym import spaces
from habitat import logger
from habitat_baselines.rl.ddppo.policy import resnet
from habitat_baselines.rl.ddppo.policy.resnet_policy import ResNetEncoder
import clip
import torchvision
class VlnResnetDepthEncoder(nn.Module):
def __init__(
self,
observation_space,
output_size=128,
checkpoint="NONE",
backbone="resnet50",
resnet_baseplanes=32,
normalize_visual_inputs=False,
trainable=False,
spatial_output: bool = False,
):
super().__init__()
self.visual_encoder = ResNetEncoder(
spaces.Dict({"depth": observation_space.spaces["depth"]}),
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
normalize_visual_inputs=normalize_visual_inputs,
)
for param in self.visual_encoder.parameters():
param.requires_grad_(trainable)
if checkpoint != "NONE":
ddppo_weights = torch.load(checkpoint)
weights_dict = {}
for k, v in ddppo_weights["state_dict"].items():
split_layer_name = k.split(".")[2:]
if split_layer_name[0] != "visual_encoder":
continue
layer_name = ".".join(split_layer_name[1:])
weights_dict[layer_name] = v
del ddppo_weights
self.visual_encoder.load_state_dict(weights_dict, strict=True)
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
# self.visual_fc = nn.Sequential(
# nn.Flatten(),
# nn.Linear(
# np.prod(self.visual_encoder.output_shape), output_size
# ),
# nn.ReLU(True),
# )
None
else:
self.spatial_embeddings = nn.Embedding(
self.visual_encoder.output_shape[1]
* self.visual_encoder.output_shape[2],
64,
)
self.output_shape = list(self.visual_encoder.output_shape)
self.output_shape[0] += self.spatial_embeddings.embedding_dim
self.output_shape = tuple(self.output_shape)
def forward(self, observations):
"""
Args:
observations: [BATCH, HEIGHT, WIDTH, CHANNEL]
Returns:
[BATCH, OUTPUT_SIZE]
"""
if "depth_features" in observations:
x = observations["depth_features"]
else:
x = self.visual_encoder(observations)
if self.spatial_output:
b, c, h, w = x.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=x.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([x, spatial_features], dim=1)
else:
# return self.visual_fc(x)
return x
class TorchVisionResNet50(nn.Module):
r"""
Takes in observations and produces an embedding of the rgb component.
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
device: torch.device
"""
def __init__(
self,
observation_space,
output_size,
device,
spatial_output: bool = False,
):
super().__init__()
self.device = device
self.resnet_layer_size = 2048
linear_layer_input_size = 0
if "rgb" in observation_space.spaces:
self._n_input_rgb = observation_space.spaces["rgb"].shape[2]
obs_size_0 = observation_space.spaces["rgb"].shape[0]
obs_size_1 = observation_space.spaces["rgb"].shape[1]
if obs_size_0 != 224 or obs_size_1 != 224:
logger.warn(
"TorchVisionResNet50: observation size is not conformant to expected ResNet input size [3x224x224]"
)
linear_layer_input_size += self.resnet_layer_size
else:
self._n_input_rgb = 0
if self.is_blind:
self.cnn = nn.Sequential()
return
rgb_resnet = models.resnet50(pretrained=True)
rgb_modules = list(rgb_resnet.children())[:-2]
self.cnn = torch.nn.Sequential(*rgb_modules)
# disable gradients for resnet, params frozen
for param in self.cnn.parameters():
param.requires_grad_(False)
self.cnn.eval()
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
# self.fc = nn.Linear(linear_layer_input_size, output_size)
# self.activation = nn.ReLU()
None
else:
class SpatialAvgPool(nn.Module):
def forward(self, x):
x = F.adaptive_avg_pool2d(x, (4, 4))
return x
self.cnn.avgpool = SpatialAvgPool()
self.cnn.fc = nn.Sequential()
self.spatial_embeddings = nn.Embedding(4 * 4, 64)
self.output_shape = (
self.resnet_layer_size + self.spatial_embeddings.embedding_dim,
4,
4,
)
# self.layer_extract = self.cnn._modules.get("avgpool")
from torchvision import transforms
self.rgb_transform = torch.nn.Sequential(
# transforms.ConvertImageDtype(torch.float),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
)
@property
def is_blind(self):
return self._n_input_rgb == 0
def forward(self, observations):
r"""Sends RGB observation through the TorchVision ResNet50 pre-trained
on ImageNet. Sends through fully connected layer, activates, and
returns final embedding.
"""
def resnet_forward(observation):
# resnet_output = torch.zeros(
# 1, dtype=torch.float32, device=observation.device
# )
# def hook(m, i, o):
# resnet_output.set_(o)
# output: [BATCH x RESNET_DIM]
# h = self.layer_extract.register_forward_hook(hook)
resnet_output = self.cnn(observation)
# h.remove()
return resnet_output
if "rgb_features" in observations:
resnet_output = observations["rgb_features"]
else:
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT x WIDTH]
rgb_observations = observations["rgb"].permute(0, 3, 1, 2)
rgb_observations = self.rgb_transform(rgb_observations)
# rgb_observations = rgb_observations / 255.0 # normalize RGB
resnet_output = resnet_forward(rgb_observations.contiguous())
if self.spatial_output:
b, c, h, w = resnet_output.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=resnet_output.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([resnet_output, spatial_features], dim=1)#.to(self.device)
else:
# return self.activation(
# self.fc(torch.flatten(resnet_output, 1))
# ) # [BATCH x OUTPUT_DIM]
return resnet_output
class CLIPEncoder(nn.Module):
r"""
Takes in observations and produces an embedding of the rgb component.
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
device: torch.device
"""
def __init__(
self, device,
):
super().__init__()
self.model, _ = clip.load("ViT-B/32", device=device)
for param in self.model.parameters():
param.requires_grad_(False)
self.model.eval()
from torchvision import transforms
self.rgb_transform = torch.nn.Sequential(
# transforms.ConvertImageDtype(torch.float),
transforms.Normalize([0.48145466, 0.4578275, 0.40821073], [0.26862954, 0.26130258, 0.27577711]),
)
def forward(self, observations):
r"""Sends RGB observation through the TorchVision ResNet50 pre-trained
on ImageNet. Sends through fully connected layer, activates, and
returns final embedding.
"""
rgb_observations = observations["rgb"].permute(0, 3, 1, 2)
rgb_observations = self.rgb_transform(rgb_observations)
output = self.model.encode_image(rgb_observations.contiguous())
return output.float() | InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/image_encoders.py |
import gzip
import json
import torch
import torch.nn as nn
from habitat import Config
class InstructionEncoder(nn.Module):
def __init__(self, config: Config):
r"""An encoder that uses RNN to encode an instruction. Returns
the final hidden state after processing the instruction sequence.
Args:
config: must have
embedding_size: The dimension of each embedding vector
hidden_size: The hidden (output) size
rnn_type: The RNN cell type. Must be GRU or LSTM
final_state_only: Whether or not to return just the final state
"""
super().__init__()
self.config = config
# lang_drop_ratio = 0.50
# self.drop = nn.Dropout(p=lang_drop_ratio)
rnn = nn.GRU if self.config.rnn_type == "GRU" else nn.LSTM
self.encoder_rnn = rnn(
input_size=config.embedding_size,
hidden_size=config.hidden_size,
bidirectional=config.bidirectional,
)
if config.sensor_uuid == "instruction":
if self.config.use_pretrained_embeddings:
self.embedding_layer = nn.Embedding.from_pretrained(
embeddings=self._load_embeddings(),
freeze=not self.config.fine_tune_embeddings,
)
else: # each embedding initialized to sampled Gaussian
self.embedding_layer = nn.Embedding(
num_embeddings=config.vocab_size,
embedding_dim=config.embedding_size,
padding_idx=0,
)
@property
def output_size(self):
return self.config.hidden_size * (1 + int(self.config.bidirectional))
def _load_embeddings(self):
"""Loads word embeddings from a pretrained embeddings file.
PAD: index 0. [0.0, ... 0.0]
UNK: index 1. mean of all R2R word embeddings: [mean_0, ..., mean_n]
why UNK is averaged: https://bit.ly/3u3hkYg
Returns:
embeddings tensor of size [num_words x embedding_dim]
"""
with gzip.open(self.config.embedding_file, "rt") as f:
embeddings = torch.tensor(json.load(f))
return embeddings
def forward(self, observations):
"""
Tensor sizes after computation:
instruction: [batch_size x seq_length]
lengths: [batch_size]
hidden_state: [batch_size x hidden_size]
"""
if self.config.sensor_uuid == "instruction":
instruction = observations["instruction"].long()
lengths = (instruction != 0.0).long().sum(dim=1)
instruction = self.embedding_layer(instruction)
# instruction = self.drop(instruction)
else:
instruction = observations["rxr_instruction"]
lengths = (instruction != 0.0).long().sum(dim=2)
lengths = (lengths != 0.0).long().sum(dim=1)
packed_seq = nn.utils.rnn.pack_padded_sequence(
instruction, lengths.cpu(), batch_first=True, enforce_sorted=False
)
output, final_state = self.encoder_rnn(packed_seq)
if self.config.rnn_type == "LSTM":
final_state = final_state[0]
if self.config.final_state_only: # default False
return final_state.squeeze(0)
else:
ctx = nn.utils.rnn.pad_packed_sequence(output,
batch_first=True)[0].permute(0, 2, 1)
all_lang_masks = (ctx == 0.0).all(dim=1)
ctx = ctx.permute(0, 2, 1)
# ctx = self.drop(ctx)
return ctx, all_lang_masks
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/instruction_encoder.py |
import torch
import numpy as np
import sys
import glob
import json
def neighborhoods(mu, x_range, y_range, sigma, circular_x=True, gaussian=False):
""" Generate masks centered at mu of the given x and y range with the
origin in the centre of the output
Inputs:
mu: tensor (N, 2)
Outputs:
tensor (N, y_range, s_range)
"""
x_mu = mu[:,0].unsqueeze(1).unsqueeze(1)
y_mu = mu[:,1].unsqueeze(1).unsqueeze(1)
# Generate bivariate Gaussians centered at position mu
x = torch.arange(start=0,end=x_range, device=mu.device, dtype=mu.dtype).unsqueeze(0).unsqueeze(0)
y = torch.arange(start=0,end=y_range, device=mu.device, dtype=mu.dtype).unsqueeze(1).unsqueeze(0)
y_diff = y - y_mu
x_diff = x - x_mu
if circular_x:
x_diff = torch.min(torch.abs(x_diff), torch.abs(x_diff + x_range))
if gaussian:
output = torch.exp(-0.5 * ((x_diff/sigma[0])**2 + (y_diff/sigma[1])**2 ))
else:
output = torch.logical_and(
torch.abs(x_diff) <= sigma[0], torch.abs(y_diff) <= sigma[1]
).type(mu.dtype)
return output
def nms(pred, max_predictions=10, sigma=(1.0,1.0), gaussian=False):
''' Input (batch_size, 1, height, width) '''
shape = pred.shape
output = torch.zeros_like(pred)
flat_pred = pred.reshape((shape[0],-1)) # (BATCH_SIZE, 24*48)
supp_pred = pred.clone()
flat_output = output.reshape((shape[0],-1)) # (BATCH_SIZE, 24*48)
for i in range(max_predictions):
# Find and save max over the entire map
flat_supp_pred = supp_pred.reshape((shape[0],-1))
val, ix = torch.max(flat_supp_pred, dim=1)
indices = torch.arange(0,shape[0])
flat_output[indices,ix] = flat_pred[indices,ix]
# Suppression
y = ix / shape[-1]
x = ix % shape[-1]
mu = torch.stack([x,y], dim=1).float()
g = neighborhoods(mu, shape[-1], shape[-2], sigma, gaussian=gaussian)
supp_pred *= (1-g.unsqueeze(1))
output[output < 0] = 0
return output
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=50):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def get_attention_mask(num_imgs=12, neighbor=1):
assert neighbor <= 5
mask = np.zeros((num_imgs,num_imgs))
t = np.zeros(num_imgs)
t[:neighbor+1] = np.ones(neighbor+1)
if neighbor != 0:
t[-neighbor:] = np.ones(neighbor)
for ri in range(num_imgs):
mask[ri] = t
t = np.roll(t, 1)
return torch.from_numpy(mask).reshape(1,1,num_imgs,num_imgs).long() | InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/utils.py |
import torch
import torch.nn as nn
import numpy as np
import vlnce_baselines.waypoint_pred.utils as utils
from .transformer.waypoint_bert import WaypointBert
from pytorch_transformers import BertConfig
class BinaryDistPredictor_TRM(nn.Module):
def __init__(self, hidden_dim=768, n_classes=12, device=None):
super(BinaryDistPredictor_TRM, self).__init__()
self.device = device
self.num_angles = 120
self.num_imgs = 12
self.n_classes = 12 # num of distances
self.TRM_LAYER = 2
self.TRM_NEIGHBOR = 1
self.HEATMAP_OFFSET = 5
# self.visual_fc_rgb = nn.Sequential(
# nn.Flatten(),
# nn.Linear(np.prod([2048,7,7]), hidden_dim),
# nn.ReLU(True),
# )
self.visual_fc_depth = nn.Sequential(
nn.Flatten(),
nn.Linear(np.prod([128,4,4]), hidden_dim),
nn.ReLU(True),
)
self.visual_merge = nn.Sequential(
nn.Linear(hidden_dim*2, hidden_dim),
nn.ReLU(True),
)
config = BertConfig()
config.model_type = 'visual'
config.finetuning_task = 'waypoint_predictor'
config.hidden_dropout_prob = 0.3
config.hidden_size = 768
config.num_attention_heads = 12
config.num_hidden_layers = self.TRM_LAYER
self.waypoint_TRM = WaypointBert(config=config)
layer_norm_eps = config.layer_norm_eps
self.mergefeats_LayerNorm = BertLayerNorm(
hidden_dim,
eps=layer_norm_eps
)
self.mask = utils.get_attention_mask(
num_imgs=self.num_imgs,
neighbor=self.TRM_NEIGHBOR).to(self.device)
self.vis_classifier = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim,
int(n_classes*(self.num_angles/self.num_imgs))),
)
def forward(self, rgb_feats, depth_feats):
bsi = rgb_feats.size(0) // self.num_imgs
# rgb_x = self.visual_fc_rgb(rgb_feats).reshape(
# bsi, self.num_imgs, -1)
depth_x = self.visual_fc_depth(depth_feats).reshape(
bsi, self.num_imgs, -1)
# vis_x = self.visual_merge(
# torch.cat((rgb_x, depth_x), dim=-1)
# )
vis_x = depth_x
attention_mask = self.mask.repeat(bsi,1,1,1)
vis_rel_x = self.waypoint_TRM(
vis_x, attention_mask=attention_mask
)
vis_logits = self.vis_classifier(vis_rel_x)
vis_logits = vis_logits.reshape(
bsi, self.num_angles, self.n_classes)
# heatmap offset (each image is pointing at the middle)
vis_logits = torch.cat(
(vis_logits[:,self.HEATMAP_OFFSET:,:], vis_logits[:,:self.HEATMAP_OFFSET,:]),
dim=1)
return vis_logits
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/TRM_net.py |
# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license.
# Modified in Recurrent VLN-BERT, 2020, [email protected]
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from .pytorch_transformer.modeling_bert import (BertEmbeddings,
BertSelfAttention, BertAttention, BertEncoder, BertLayer,
BertSelfOutput, BertIntermediate, BertOutput,
BertPooler, BertLayerNorm, BertPreTrainedModel,
BertPredictionHeadTransform)
logger = logging.getLogger(__name__)
class VisPosEmbeddings(nn.Module):
def __init__(self, config):
super(VisPosEmbeddings, self).__init__()
self.position_embeddings = nn.Embedding(24, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_vis_feats, position_ids=None):
seq_length = input_vis_feats.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_vis_feats.device)
position_ids = position_ids.unsqueeze(0).repeat(input_vis_feats.size(0), 1)
vis_embeddings = input_vis_feats
position_embeddings = self.position_embeddings(position_ids)
embeddings = vis_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
# embeddings = self.dropout(embeddings)
return embeddings
class CaptionBertSelfAttention(BertSelfAttention):
"""
Modified from BertSelfAttention to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertSelfAttention, self).__init__(config)
self.config = config
def forward(self, hidden_states, attention_mask, head_mask=None,
history_state=None):
if history_state is not None:
x_states = torch.cat([history_state, hidden_states], dim=1)
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(x_states)
mixed_value_layer = self.value(x_states)
else:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
''' language feature only provide Keys and Values '''
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_scores)
return outputs
class CaptionBertAttention(BertAttention):
"""
Modified from BertAttention to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertAttention, self).__init__(config)
self.self = CaptionBertSelfAttention(config)
self.output = BertSelfOutput(config)
self.config = config
def forward(self, input_tensor, attention_mask, head_mask=None,
history_state=None):
''' transformer processing '''
self_outputs = self.self(input_tensor, attention_mask, head_mask, history_state)
''' feed-forward network with residule '''
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class CaptionBertLayer(BertLayer):
"""
Modified from BertLayer to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertLayer, self).__init__(config)
self.attention = CaptionBertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None,
history_state=None):
attention_outputs = self.attention(hidden_states, attention_mask,
head_mask, history_state)
''' feed-forward network with residule '''
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:]
return outputs
class CaptionBertEncoder(BertEncoder):
"""
Modified from BertEncoder to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertEncoder, self).__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
# 12 Bert layers
self.layer = nn.ModuleList([CaptionBertLayer(config) for _ in range(config.num_hidden_layers)])
self.config = config
def forward(self, hidden_states, attention_mask, head_mask=None,
encoder_history_states=None):
for i, layer_module in enumerate(self.layer):
history_state = None if encoder_history_states is None else encoder_history_states[i] # default None
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i],
history_state)
hidden_states = layer_outputs[0]
if i == self.config.num_hidden_layers - 1:
slang_attention_score = layer_outputs[1]
outputs = (hidden_states, slang_attention_score)
return outputs
class BertImgModel(nn.Module):
""" Expand from BertModel to handle image region features as input
"""
def __init__(self, config):
super(BertImgModel, self).__init__()
self.config = config
# self.vis_pos_embeds = VisPosEmbeddings(config)
self.encoder = CaptionBertEncoder(config)
def forward(self, input_x, attention_mask=None):
extended_attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
''' positional encodings '''
# input_x = self.vis_pos_embeds(input_x)
''' pass to the Transformer layers '''
encoder_outputs = self.encoder(input_x,
extended_attention_mask, head_mask=head_mask)
outputs = (encoder_outputs[0],) + encoder_outputs[1:]
return outputs
class WaypointBert(nn.Module):
"""
Modified from BertForMultipleChoice to support oscar training.
"""
def __init__(self, config=None):
super(WaypointBert, self).__init__()
self.config = config
self.bert = BertImgModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_x, attention_mask=None):
outputs = self.bert(input_x, attention_mask=attention_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
return sequence_output | InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/waypoint_bert.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import json
import logging
import os
from io import open
import six
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .file_utils import cached_path
logger = logging.getLogger(__name__)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
TF_WEIGHTS_NAME = 'model.ckpt'
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, input):
return input
if not six.PY2:
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = ''.join(docstr) + fn.__doc__
return fn
return docstring_decorator
else:
# Not possible to update class docstrings on python2
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
return fn
return docstring_decorator
class PretrainedConfig(object):
""" Base class for all configuration classes.
Handle a few common parameters and methods for loading/downloading/saving configurations.
"""
pretrained_config_archive_map = {}
def __init__(self, **kwargs):
self.finetuning_task = kwargs.pop('finetuning_task', None)
self.num_labels = kwargs.pop('num_labels', 2)
self.output_attentions = kwargs.pop('output_attentions', False)
self.output_hidden_states = kwargs.pop('output_hidden_states', False)
self.torchscript = kwargs.pop('torchscript', False)
def save_pretrained(self, save_directory):
""" Save a configuration object to a directory, so that it
can be re-loaded using the `from_pretrained(save_directory)` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r""" Instantiate a PretrainedConfig from a pre-trained model configuration.
Params:
**pretrained_model_name_or_path**: either:
- a string with the `shortcut name` of a pre-trained model configuration to load from cache
or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
- a path to a `directory` containing a configuration file saved
using the `save_pretrained(save_directory)` method.
- a path or url to a saved configuration `file`.
**cache_dir**: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
**return_unused_kwargs**: (`optional`) bool:
- If False, then this function returns just the final configuration object.
- If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes:
ie the part of kwargs which has not been used to update `config` and is otherwise ignored.
**kwargs**: (`optional`) dict:
Dictionary of key/value pairs with which to update the configuration object after loading.
- The values in kwargs of any keys which are configuration attributes will be used
to override the loaded values.
- Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
>>> config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
>>> config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
>>> config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
>>> assert config.output_attention == True
>>> config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
>>> foo=False, return_unused_kwargs=True)
>>> assert config.output_attention == True
>>> assert unused_kwargs == {'foo': False}
"""
cache_dir = kwargs.pop('cache_dir', None)
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
else:
config_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
logger.error(
"Couldn't reach server at '{}' to download pretrained model configuration file.".format(
config_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(cls.pretrained_config_archive_map.keys()),
config_file))
return None
if resolved_config_file == config_file:
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = cls.from_json_file(resolved_config_file)
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", config)
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_dict(cls, json_object):
"""Constructs a `Config` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class PreTrainedModel(nn.Module):
""" Base class for all models. Handle loading/storing model config and
a simple interface for dowloading and loading pretrained models.
"""
config_class = PretrainedConfig
pretrained_model_archive_map = {}
load_tf_weights = lambda model, config, path: None
base_model_prefix = ""
input_embeddings = None
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
# Save config in model
self.config = config
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self.init_weights(new_embeddings)
# Copy word embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: does nothing and just returns a pointer to the input tokens Embedding Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embedding Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
if hasattr(self, 'tie_weights'):
self.tie_weights()
return model_embeds
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Args:
heads_to_prune: dict of {layer_num (int): list of heads to prune in this layer (list of int)}
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model with its configuration file to a directory, so that it
can be re-loaded using the `from_pretrained(save_directory)` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model it-self if we are using distributed training
model_to_save = self.module if hasattr(self, 'module') else self
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are desactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
**pretrained_model_name_or_path**: either:
- a string with the `shortcut name` of a pre-trained model to load from cache
or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
- a path to a `directory` containing a configuration file saved
using the `save_pretrained(save_directory)` method.
- a path or url to a tensorflow index checkpoint `file` (e.g. `./tf_model/model.ckpt.index`).
In this case, ``from_tf`` should be set to True and a configuration object should be
provided as `config` argument. This loading option is slower than converting the TensorFlow
checkpoint in a PyTorch model using the provided conversion scripts and loading
the PyTorch model afterwards.
**model_args**: (`optional`) Sequence:
All remaning positional arguments will be passed to the underlying model's __init__ function
**config**: an optional configuration for the model to use instead of an automatically loaded configuation.
Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with a `shortcut name` of a pre-trained model), or
- the model was saved using the `save_pretrained(save_directory)` (loaded by suppling the save directory).
**state_dict**: an optional state dictionnary for the model to use instead of a state dictionary loaded
from saved weights file.
This option can be used if you want to create a model from a pretrained configuraton but load your own weights.
In this case though, you should check if using `save_pretrained(dir)` and `from_pretrained(save_directory)` is not
a simpler option.
**cache_dir**: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
**output_loading_info**: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
**kwargs**: (`optional`) dict:
Dictionary of key, values to update the configuration object after loading.
Can be used to override selected configuration parameters. E.g. ``output_attention=True``.
- If a configuration is provided with `config`, **kwargs will be directly passed
to the underlying model's __init__ method.
- If a configuration is not provided, **kwargs will be first passed to the pretrained
model configuration class loading function (`PretrainedConfig.from_pretrained`).
Each key of **kwargs that corresponds to a configuration attribute
will be used to override said attribute with the supplied **kwargs value.
Remaining keys that do not correspond to any configuration attribute will
be passed to the underlying model's __init__ function.
Examples::
>>> model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
>>> model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
>>> assert model.config.output_attention == True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop('config', None)
state_dict = kwargs.pop('state_dict', None)
cache_dir = kwargs.pop('cache_dir', None)
from_tf = kwargs.pop('from_tf', False)
output_loading_info = kwargs.pop('output_loading_info', False)
# Load config
if config is None:
config, model_kwargs = cls.config_class.from_pretrained(
pretrained_model_name_or_path, *model_args,
cache_dir=cache_dir, return_unused_kwargs=True,
**kwargs
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
logger.error(
"Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(cls.pretrained_model_archive_map.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# Load from a PyTorch state_dict
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ''
model_to_load = model
if not hasattr(model, cls.base_model_prefix) and any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
start_prefix = cls.base_model_prefix + '.'
if hasattr(model, cls.base_model_prefix) and not any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
print(" Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
print(" Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
if hasattr(model, 'tie_weights'):
model.tie_weights() # make sure word embedding weights are still tied
# Set model in evaluation mode to desactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs}
return model, loading_info
return model
class Conv1D(nn.Module):
def __init__(self, nf, nx):
""" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
""" Compute SQuAD start_logits from sequence hidden states. """
def __init__(self, config):
super(PoolerStartLogits, self).__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, p_mask=None):
""" Args:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
invalid position mask such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
""" Compute SQuAD end_logits from sequence hidden states and start token hidden state.
"""
def __init__(self, config):
super(PoolerEndLogits, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
""" Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to hidden_states
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
""" Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
def __init__(self, config):
super(PoolerAnswerClass, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
"""
Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
note(Original repo):
no dependency on end_feature so that we can obtain one single `cls_logits`
for each sample
"""
hsz = hidden_states.shape[-1]
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
class SQuADHead(nn.Module):
r""" A SQuAD head inspired by XLNet.
Parameters:
config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Inputs:
**hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
hidden states of sequence tokens
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the last token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
**is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
Whether the question has a possible answer in the paragraph or not.
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
**start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
Indices for the top config.start_n_top start token possibilities (beam-search).
**end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size,)``
Log probabilities for the ``is_impossible`` label of the answers.
"""
def __init__(self, config):
super(SQuADHead, self).__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
def forward(self, hidden_states, start_positions=None, end_positions=None,
cls_index=None, is_impossible=None, p_mask=None):
outputs = ()
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
class SequenceSummary(nn.Module):
r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
Args of the config class:
summary_type:
- 'last' => [default] take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'token_ids' => supply a Tensor of classification token indices (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj: Add a projection after the vector extraction
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default
summary_first_dropout: Add a dropout before the projection and activation
summary_last_dropout: Add a dropout after the projection and activation
"""
def __init__(self, config):
super(SequenceSummary, self).__init__()
self.summary_type = config.summary_type if hasattr(config, 'summary_use_proj') else 'last'
if config.summary_type == 'attn':
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, 'summary_use_proj') and config.summary_use_proj:
if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
self.activation = Identity()
if hasattr(config, 'summary_activation') and config.summary_activation == 'tanh':
self.activation = nn.Tanh()
self.first_dropout = Identity()
if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(self, hidden_states, token_ids=None):
""" hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer.
token_ids: [optional] index of the classification token if summary_type == 'token_ids',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'token_ids' and token_ids is None:
we take the last token of the sequence as classification token
"""
if self.summary_type == 'last':
output = hidden_states[:, -1]
elif self.summary_type == 'first':
output = hidden_states[:, 0]
elif self.summary_type == 'mean':
output = hidden_states.mean(dim=1)
elif self.summary_type == 'token_ids':
if token_ids is None:
token_ids = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long)
else:
token_ids = token_ids.unsqueeze(-1).unsqueeze(-1)
token_ids = token_ids.expand((-1,) * (token_ids.dim()-1) + (hidden_states.size(-1),))
# shape of token_ids: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, token_ids).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == 'attn':
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(layer, index, dim=None):
""" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/modeling_utils.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel,
prune_linear_layer, add_start_docstrings)
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
}
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
}
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(PretrainedConfig):
r"""
:class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a
`BertModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size_or_config_json_file=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(BertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768]
self.key = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768]
self.value = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768]
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # outputs, (hidden states), (attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size,
config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r""" The BERT model was proposed in
`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
pre-trained using a combination of masked language modeling objective and next sentence prediction
on a large corpus comprising the Toronto Book Corpus and Wikipedia.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
https://arxiv.org/abs/1810.04805
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model.
"""
BERT_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertModel(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertModel(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training:
a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForPreTraining(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForPreTraining(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForMaskedLM(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, masked_lm_labels=input_ids)
>>> loss, prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention is they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForNextSentencePrediction(BertPreTrainedModel):
r"""
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Next sequence prediction (classification) loss.
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForNextSentencePrediction(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> seq_relationship_scores = outputs[0]
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForSequenceClassification(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, logits = outputs[:2]
"""
def __init__(self, config):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING)
class BertForMultipleChoice(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForMultipleChoice(config)
>>> choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
>>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
>>> labels = torch.tensor(1).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMultipleChoice, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForTokenClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels]``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForTokenClassification(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, scores = outputs[:2]
"""
def __init__(self, config):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForQuestionAnswering(BertPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForQuestionAnswering(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
end_positions=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/modeling_bert.py |
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import sys
import json
import logging
import os
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/file_utils.py |
import os
import random
import sys
from typing import List, Optional, Type, Union
import habitat
from habitat import logger
from habitat import Config, Env, RLEnv, VectorEnv, make_dataset
from habitat_baselines.utils.env_utils import make_env_fn
random.seed(0)
SLURM_JOBID = os.environ.get("SLURM_JOB_ID", None)
def is_slurm_job() -> bool:
return SLURM_JOBID is not None
def is_slurm_batch_job() -> bool:
r"""Heuristic to determine if a slurm job is a batch job or not. Batch jobs
will have a job name that is not a shell unless the user specifically set the job
name to that of a shell. Interactive jobs have a shell name as their job name.
"""
return is_slurm_job() and os.environ.get("SLURM_JOB_NAME", None) not in (
None,
"bash",
"zsh",
"fish",
"tcsh",
"sh",
)
def construct_envs(
config: Config,
env_class: Type[Union[Env, RLEnv]],
workers_ignore_signals: bool = False,
auto_reset_done: bool = True,
episodes_allowed: Optional[List[str]] = None,
) -> VectorEnv:
r"""Create VectorEnv object with specified config and env class type.
To allow better performance, dataset are split into small ones for
each individual env, grouped by scenes.
:param config: configs that contain num_environments as well as information
:param necessary to create individual environments.
:param env_class: class type of the envs to be created.
:param workers_ignore_signals: Passed to :ref:`habitat.VectorEnv`'s constructor
:param auto_reset_done: Whether or not to automatically reset the env on done
:return: VectorEnv object created according to specification.
"""
num_envs_per_gpu = config.NUM_ENVIRONMENTS
if isinstance(config.SIMULATOR_GPU_IDS, list):
gpus = config.SIMULATOR_GPU_IDS
else:
gpus = [config.SIMULATOR_GPU_IDS]
num_gpus = len(gpus)
num_envs = num_gpus * num_envs_per_gpu
if episodes_allowed is not None:
config.defrost()
config.TASK_CONFIG.DATASET.EPISODES_ALLOWED = episodes_allowed
config.freeze()
configs = []
env_classes = [env_class for _ in range(num_envs)]
dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE)
scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES
if "*" in config.TASK_CONFIG.DATASET.CONTENT_SCENES:
scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)
logger.info(f"SPLTI: {config.TASK_CONFIG.DATASET.SPLIT}, NUMBER OF SCENES: {len(scenes)}")
if num_envs > 1:
if len(scenes) == 0:
raise RuntimeError(
"No scenes to load, multi-process logic relies on being able"
" to split scenes uniquely between processes"
)
if len(scenes) < num_envs and len(scenes) != 1:
raise RuntimeError(
"reduce the number of GPUs or envs as there"
" aren't enough number of scenes"
)
random.shuffle(scenes)
if len(scenes) == 1:
scene_splits = [[scenes[0]] for _ in range(num_envs)]
else:
scene_splits = [[] for _ in range(num_envs)]
for idx, scene in enumerate(scenes):
scene_splits[idx % len(scene_splits)].append(scene)
assert sum(map(len, scene_splits)) == len(scenes)
for i in range(num_gpus):
for j in range(num_envs_per_gpu):
proc_config = config.clone()
proc_config.defrost()
proc_id = (i * num_envs_per_gpu) + j
task_config = proc_config.TASK_CONFIG
task_config.SEED += proc_id
if len(scenes) > 0:
task_config.DATASET.CONTENT_SCENES = scene_splits[proc_id]
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpus[i]
task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS
proc_config.freeze()
configs.append(proc_config)
# is_debug = True if sys.gettrace() else False
is_debug = False
env_entry = habitat.ThreadedVectorEnv if is_debug else habitat.VectorEnv
envs = env_entry(
make_env_fn=make_env_fn,
env_fn_args=tuple(zip(configs, env_classes)),
auto_reset_done=auto_reset_done,
workers_ignore_signals=workers_ignore_signals,
)
return envs
def construct_envs_auto_reset_false(
config: Config, env_class: Type[Union[Env, RLEnv]]
) -> VectorEnv:
return construct_envs(config, env_class, auto_reset_done=False)
def construct_envs_for_rl(
config: Config,
env_class: Type[Union[Env, RLEnv]],
workers_ignore_signals: bool = False,
auto_reset_done: bool = True,
episodes_allowed: Optional[List[str]] = None,
) -> VectorEnv:
r"""Create VectorEnv object with specified config and env class type.
To allow better performance, dataset are split into small ones for
each individual env, grouped by scenes.
:param config: configs that contain num_environments as well as information
:param necessary to create individual environments.
:param env_class: class type of the envs to be created.
:param workers_ignore_signals: Passed to :ref:`habitat.VectorEnv`'s constructor
:param auto_reset_done: Whether or not to automatically reset the env on done
:return: VectorEnv object created according to specification.
"""
num_envs_per_gpu = config.NUM_ENVIRONMENTS
if isinstance(config.SIMULATOR_GPU_IDS, list):
gpus = config.SIMULATOR_GPU_IDS
else:
gpus = [config.SIMULATOR_GPU_IDS]
num_gpus = len(gpus)
num_envs = num_gpus * num_envs_per_gpu
if episodes_allowed is not None:
config.defrost()
config.TASK_CONFIG.DATASET.EPISODES_ALLOWED = episodes_allowed
config.freeze()
configs = []
env_classes = [env_class for _ in range(num_envs)]
dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE)
scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES
if "*" in config.TASK_CONFIG.DATASET.CONTENT_SCENES:
scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)
if num_envs > 1:
if len(scenes) == 0:
raise RuntimeError(
"No scenes to load, multi-process logic relies on being able"
" to split scenes uniquely between processes"
)
if len(scenes) < num_envs and len(scenes) != 1:
raise RuntimeError(
"reduce the number of GPUs or envs as there"
" aren't enough number of scenes"
)
random.shuffle(scenes)
if len(scenes) == 1:
scene_splits = [[scenes[0]] for _ in range(num_envs)]
else:
scene_splits = [[] for _ in range(num_envs)]
for idx, scene in enumerate(scenes):
scene_splits[idx % len(scene_splits)].append(scene)
assert sum(map(len, scene_splits)) == len(scenes)
for i in range(num_gpus):
for j in range(num_envs_per_gpu):
proc_config = config.clone()
proc_config.defrost()
proc_id = (i * num_envs_per_gpu) + j
task_config = proc_config.TASK_CONFIG
task_config.SEED += proc_id
if len(scenes) > 0:
task_config.DATASET.CONTENT_SCENES = scene_splits[proc_id]
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpus[i]
task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS
proc_config.freeze()
configs.append(proc_config)
is_debug = True if sys.gettrace() else False
env_entry = habitat.ThreadedVectorEnv if is_debug else habitat.VectorEnv
envs = env_entry(
make_env_fn=make_env_fn,
env_fn_args=tuple(zip(configs, env_classes)),
auto_reset_done=auto_reset_done,
workers_ignore_signals=workers_ignore_signals,
)
return envs
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/common/env_utils.py |
import json
import jsonlines
import os
import sys
import time
import glob
import warnings
from collections import defaultdict
from typing import Dict, List
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as distr
import torch.multiprocessing as mp
import gzip
import math
from copy import deepcopy
import tqdm
from gym import Space
from habitat import Config, logger
from habitat.utils.visualizations.utils import append_text_to_image
from habitat_baselines.common.base_il_trainer import BaseILTrainer
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_extensions.measures import Position
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs, generate_video
from habitat_baselines.utils.common import (
get_checkpoint_id,
poll_checkpoint_folder,
)
from habitat_extensions.utils import observations_to_image
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.env_utils import (
construct_envs_auto_reset_false,
construct_envs,
is_slurm_batch_job,
)
from vlnce_baselines.common.utils import *
from habitat_extensions.measures import NDTW
from fastdtw import fastdtw
from ..utils import get_camera_orientations12
from ..utils import (
length2mask, dir_angle_feature, dir_angle_feature_with_ele,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
class BaseVLNCETrainer(BaseILTrainer):
r"""A base trainer for VLN-CE imitation learning."""
supported_tasks: List[str] = ["VLN-v0"]
def __init__(self, config=None):
super().__init__(config)
self.policy = None
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.obs_transforms = []
self.start_epoch = 0
self.step_id = 0
def _initialize_policy(
self,
config: Config,
load_from_ckpt: bool,
observation_space: Space,
action_space: Space,
) -> None:
policy = baseline_registry.get_policy(self.config.MODEL.policy_name)
self.policy = policy.from_config(
config=config,
observation_space=observation_space,
action_space=action_space,
)
''' initialize the waypoint predictor here '''
from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM
self.waypoint_predictor = BinaryDistPredictor_TRM(device=self.device)
self.waypoint_predictor.load_state_dict(
torch.load(
'pretrained/wp_pred/waypoint_predictor',
map_location = torch.device('cpu'),
)['predictor']['state_dict']
)
for param in self.waypoint_predictor.parameters():
param.requires_grad_(False)
self.policy.to(self.device)
self.waypoint_predictor.to(self.device)
self.num_recurrent_layers = self.policy.net.num_recurrent_layers
if self.config.GPU_NUMBERS > 1:
print('Using', self.config.GPU_NUMBERS,'GPU!')
self.policy.net = DDP(self.policy.net.to(self.device), device_ids=[self.device],
output_device=self.device, find_unused_parameters=True, broadcast_buffers=False)
# self.waypoint_predictor = DDP(self.waypoint_predictor.to(self.device), device_ids=[self.device],
# output_device=self.device, find_unused_parameters=True, broadcast_buffers=False)
self.optimizer = torch.optim.AdamW(
self.policy.parameters(), lr=self.config.IL.lr,
)
if load_from_ckpt:
ckpt_path = config.IL.ckpt_to_load
ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu")
if 'module' in list(ckpt_dict['state_dict'].keys())[0] and self.config.GPU_NUMBERS == 1:
self.policy.net = torch.nn.DataParallel(self.policy.net.to(self.device),
device_ids=[self.device], output_device=self.device)
self.policy.load_state_dict(ckpt_dict["state_dict"])
self.policy.net = self.policy.net.module
self.waypoint_predictor = torch.nn.DataParallel(self.waypoint_predictor.to(self.device),
device_ids=[self.device], output_device=self.device)
# self.waypoint_predictor.load_state_dict(ckpt_dict["waypoint_predictor_state_dict"])
# self.waypoint_predictor = self.waypoint_predictor.module
else:
self.policy.load_state_dict(ckpt_dict["state_dict"])
# self.waypoint_predictor.load_state_dict(ckpt_dict["waypoint_predictor_state_dict"])
if config.IL.is_requeue:
self.optimizer.load_state_dict(ckpt_dict["optim_state"])
self.start_epoch = ckpt_dict["epoch"] + 1
self.step_id = ckpt_dict["step_id"]
logger.info(f"Loaded weights from checkpoint: {ckpt_path}")
self.waypoint_predictor.eval()
params = sum(param.numel() for param in self.policy.parameters())
params_t = sum(
p.numel() for p in self.policy.parameters() if p.requires_grad
)
logger.info(f"Agent parameters: {params/1e6} MB. Trainable: {params_t/1e6} MB")
logger.info("Finished setting up policy.")
# def save_checkpoint(self, file_name) -> None:
# r"""Save checkpoint with specified name.
# Args:
# file_name: file name for checkpoint
# Returns:
# None
# """
# checkpoint = {
# "state_dict": self.policy.state_dict(),
# "config": self.config,
# }
# torch.save(
# checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
# )
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
return torch.load(checkpoint_path, *args, **kwargs)
# def _update_agent(
# self,
# observations,
# prev_actions,
# not_done_masks,
# corrected_actions,
# weights,
# step_grad: bool = True,
# loss_accumulation_scalar: int = 1,
# ):
# T, N = corrected_actions.size()
# recurrent_hidden_states = torch.zeros(
# N,
# self.num_recurrent_layers,
# self.config.MODEL.STATE_ENCODER.hidden_size,
# device=self.device,
# )
# AuxLosses.clear()
# # observations['rgb'] = observations['rgb'][0:2]
# # observations['depth'] = observations['depth'][0:2]
# # observations['rxr_instruction'] = observations['rxr_instruction'][0:2]
# # not_done_masks = not_done_masks[0:2]
# # prev_actions = prev_actions[0:2]
# distribution = self.policy.build_distribution(
# observations, recurrent_hidden_states, prev_actions, not_done_masks)
# logits = distribution.logits
# logits = logits.view(T, N, -1)
# action_loss = F.cross_entropy(
# logits.permute(0, 2, 1), corrected_actions, reduction="none"
# )
# action_loss = ((weights * action_loss).sum(0) / weights.sum(0)).mean()
# aux_mask = (weights > 0).view(-1)
# aux_loss = AuxLosses.reduce(aux_mask)
# loss = action_loss + aux_loss
# loss = loss / loss_accumulation_scalar
# loss.backward()
# if step_grad:
# self.optimizer.step()
# self.optimizer.zero_grad()
# # if isinstance(aux_loss, torch.Tensor):
# # aux_loss = aux_loss.item()
# # return loss.item(), action_loss.item(), aux_loss
# return loss, action_loss, aux_loss
@staticmethod
def _pause_envs(
envs_to_pause,
envs,
recurrent_hidden_states,
not_done_masks,
prev_actions,
batch,
rgb_frames=None,
# positions=None
):
# pausing envs with no new episode
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
# positions.pop(idx)
# indexing along the batch dimensions
recurrent_hidden_states = recurrent_hidden_states[state_index]
not_done_masks = not_done_masks[state_index]
prev_actions = prev_actions[state_index]
for k, v in batch.items():
batch[k] = v[state_index]
if rgb_frames is not None:
rgb_frames = [rgb_frames[i] for i in state_index]
return (
envs,
recurrent_hidden_states,
not_done_masks,
prev_actions,
batch,
rgb_frames,
# positions
)
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object
checkpoint_index: index of the current checkpoint
Returns:
None
"""
if self.local_rank < 1:
logger.info(f"checkpoint_path: {checkpoint_path}")
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")[
"config"
]
)
else:
config = self.config.clone()
config.defrost()
# config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
# config.TASK_CONFIG.DATASET.ROLES = ["guide"]
# config.TASK_CONFIG.DATASET.LANGUAGES = config.EVAL.LANGUAGES
# config.TASK_CONFIG.TASK.NDTW.SPLIT = config.EVAL.SPLIT
# config.TASK_CONFIG.TASK.SDTW.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
config.IL.ckpt_to_load = checkpoint_path
if len(config.VIDEO_OPTION) > 0:
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP_VLNCE")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.freeze()
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{config.TASK_CONFIG.DATASET.SPLIT}.json",
)
if os.path.exists(fname):
print("skipping -- evaluation exists.")
return
envs = construct_envs(
config, get_env_class(config.ENV_NAME),
auto_reset_done=False,
episodes_allowed=self.traj # split by rank
)
dataset_length = sum(envs.number_of_episodes)
print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
observations = envs.reset()
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
envs.num_envs,
self.num_recurrent_layers,
config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t = torch.zeros(
envs.num_envs, 768,
device=self.device,
)
language_features = torch.zeros(
envs.num_envs, 80, 768,
device=self.device,
)
not_done_masks = torch.zeros(
envs.num_envs, 1, dtype=torch.uint8, device=self.device
)
stats_episodes = {}
rgb_frames = [[] for _ in range(envs.num_envs)]
if len(config.VIDEO_OPTION) > 0:
os.makedirs(config.VIDEO_DIR, exist_ok=True)
if config.EVAL.EPISODE_COUNT == -1:
episodes_to_eval = sum(envs.number_of_episodes)
else:
episodes_to_eval = min(
config.EVAL.EPISODE_COUNT, sum(envs.number_of_episodes)
)
pbar = tqdm.tqdm(total=episodes_to_eval) if config.use_pbar else None
log_str = (
f"[Ckpt: {checkpoint_index}]"
" [Episodes evaluated: {evaluated}/{total}]"
" [Time elapsed (s): {time}]"
)
start_time = time.time()
# number = 0
total_weight = 0.
ml_loss = 0.
bpositions = [[] for _ in range(envs.num_envs)]
while envs.num_envs > 0 and len(stats_episodes) < episodes_to_eval:
current_episodes = envs.current_episodes()
positions = []; headings = []
for ob_i in range(len(current_episodes)):
agent_state_i = envs.call_at(ob_i, "get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
with torch.no_grad():
if 'CMA' in self.config.MODEL.policy_name:
# instructions
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = instruction_embedding,
text_mask = all_lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
elif 'VLNBERT' in self.config.MODEL.policy_name:
if 'R2R' in self.config.TASK_CONFIG.DATASET.DATA_PATH:
lang_idx_tokens = batch['instruction']
padding_idx = 0
lang_masks = (lang_idx_tokens != padding_idx)
lang_lengths = lang_masks.sum(1)
lang_token_type_ids = torch.zeros_like(lang_masks,
dtype=torch.long, device=self.device)
h_t_flag = h_t.sum(1)==0.0
h_t_init, language_features = self.policy.net(
mode='language',
lang_idx_tokens=lang_idx_tokens,
lang_masks=lang_masks)
elif 'RxR' in self.config.TASK_CONFIG.DATASET.DATA_PATH:
to_be_masked = ((torch.abs(batch['rxr_instruction']) == 0)*1.).mean(-1)
lang_masks = torch.ones_like(to_be_masked) - to_be_masked
# lang_lengths = all_lang_masks.sum(1)
h_t_flag = h_t.sum(1)==0.0
h_t_init, language_features = self.policy.net(
mode='language',
observations=batch,
lang_masks=lang_masks,
)
else:
raise NotImplementedError
h_t[h_t_flag] = h_t_init[h_t_flag]
language_features = torch.cat(
(h_t.unsqueeze(1), language_features[:,1:,:]), dim=1)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, h_t = self.policy.net(
mode = 'navigation',
observations=batch,
lang_masks=lang_masks,
lang_feats=language_features,
# lang_token_type_ids=lang_token_type_ids,
headings=headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
# high-to-low actions in environments
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
for j, ob in enumerate(observations):
if env_actions[j]['action']['action'] == 0:
continue
else:
envs.call_at(j,
'change_current_path', # to update and record low-level path
{'new_path': ob.pop('positions'),
'collisions': ob.pop('collisions')}
)
not_done_masks = torch.tensor(
[[0] if done else [1] for done in dones],
dtype=torch.uint8, device=self.device)
# reset envs and observations if necessary
for i in range(envs.num_envs):
if len(config.VIDEO_OPTION) > 0:
frame = observations_to_image(observations[i], infos[i])
frame = append_text_to_image(
frame, current_episodes[i].instruction.instruction_text
)
rgb_frames[i].append(frame)
if not dones[i]:
continue
# ep done, calculate metrics
info = infos[i]
metric = {}
metric['steps_taken'] = info['steps_taken']
ep_id = str(envs.current_episodes()[i].episode_id)
gt_path = np.array(self.gt_data[ep_id]['locations']).astype(np.float)
if 'current_path' in envs.current_episodes()[i].info.keys():
positions_ = np.array(envs.current_episodes()[i].info['current_path']).astype(np.float)
collisions_ = np.array(envs.current_episodes()[i].info['collisions'])
assert collisions_.shape[0] == positions_.shape[0] - 1
else:
positions_ = np.array(dis_to_con(np.array(info['position']['position']))).astype(np.float)
distance = np.array(info['position']['distance']).astype(np.float)
metric['distance_to_goal'] = distance[-1]
metric['success'] = 1. if distance[-1] <= 3. and env_actions[i]['action']['action'] == 0 else 0.
metric['oracle_success'] = 1. if (distance <= 3.).any() else 0.
metric['path_length'] = np.linalg.norm(positions_[1:] - positions_[:-1],axis=1).sum()
try:
metric['collisions'] = collisions_.mean()
except:
metric['collisions'] = 0
pass
gt_length = distance[0]
metric['spl'] = metric['success']*gt_length/max(gt_length,metric['path_length'])
act_con_path = positions_
gt_con_path = np.array(dis_to_con(gt_path)).astype(np.float)
dtw_distance = fastdtw(act_con_path, gt_con_path, dist=NDTW.euclidean_distance)[0]
nDTW = np.exp(-dtw_distance / (len(gt_con_path) * config.TASK_CONFIG.TASK.SUCCESS_DISTANCE))
metric['ndtw'] = nDTW
stats_episodes[current_episodes[i].episode_id] = metric
observations[i] = envs.reset_at(i)[0] # envs[i] change to next episode
if 'CMA' in self.config.MODEL.policy_name:
rnn_states[i] *= 0.
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t[i] *= 0.
if config.use_pbar:
pbar.update()
else:
logger.info(
log_str.format(
evaluated=len(stats_episodes),
total=episodes_to_eval,
time=round(time.time() - start_time),
)
)
if len(config.VIDEO_OPTION) > 0:
generate_video(
video_option=config.VIDEO_OPTION,
video_dir=config.VIDEO_DIR,
images=rgb_frames[i],
episode_id=current_episodes[i].episode_id,
checkpoint_idx=checkpoint_index,
metrics={
"spl": stats_episodes[
current_episodes[i].episode_id
]["spl"]
},
tb_writer=writer,
fps=1,
)
# del stats_episodes[current_episodes[i].episode_id][
# "top_down_map_vlnce"
# ]
# del stats_episodes[current_episodes[i].episode_id][
# "collisions"
# ]
rgb_frames[i] = []
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
envs_to_pause = []
next_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if next_episodes[i].episode_id in stats_episodes: # 出现了重复的ep,表示这个模拟器中的episode已经全部过了一遍
envs_to_pause.append(i)
if 'VLNBERT' in self.config.MODEL.policy_name:
rnn_states = h_t
headings = torch.tensor(headings)
(
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
rgb_frames,
# positions
) = self._pause_envs(
envs_to_pause,
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
rgb_frames,
# positions
)
headings = headings.tolist()
if 'VLNBERT' in self.config.MODEL.policy_name:
h_t = rnn_states
envs.close()
if config.use_pbar:
pbar.close()
if self.world_size > 1:
distr.barrier()
aggregated_stats = {}
num_episodes = len(stats_episodes)
# print('rank', self.local_rank, 'evaluated',num_episodes, 'episodes')
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum(v[stat_key] for v in stats_episodes.values())
/ num_episodes
)
# print(self.local_rank, aggregated_stats)
total = torch.tensor(num_episodes).cuda()
if self.world_size > 1:
dist.reduce(total,dst=0)
total = total.item()
if self.world_size > 1:
logger.info(
f"rank {self.local_rank}'s {num_episodes}-episode results: {aggregated_stats}")
for k,v in aggregated_stats.items():
v = torch.tensor(v*num_episodes).cuda()
# print(self.local_rank, k+':', v.item(), num_episodes, 'before reduce')
cat_v = gather_list_and_concat(v,self.world_size)
# print(self.local_rank, k+':', cat_v, num_episodes, 'after_reduce')
v = (sum(cat_v)/total).item()
# print(self.local_rank, k+':', v, num_episodes, 'after divide total')
aggregated_stats[k] = v
split = config.TASK_CONFIG.DATASET.SPLIT
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ep_ckpt_{checkpoint_index}_{split}_r{self.local_rank}_w{self.world_size}.json",
)
with open(fname, "w") as f:
json.dump(stats_episodes, f, indent=4)
if self.local_rank < 1:
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{split}.json",
)
with open(fname, "w") as f:
json.dump(aggregated_stats, f, indent=4)
logger.info(f"Episodes evaluated: {total}")
checkpoint_num = checkpoint_index + 1
for k, v in aggregated_stats.items():
logger.info(f"Average episode {k}: {v:.6f}")
writer.add_scalar(f"eval_{k}/{split}", v, checkpoint_num)
def collect_val_traj(self):
from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1
trajectories = defaultdict(list)
split = self.config.TASK_CONFIG.DATASET.SPLIT
if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file:
gt_data = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
if (
ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES
and role not in self.config.TASK_CONFIG.DATASET.ROLES
):
continue
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_file.format(
split=split, role=role
),
"rt",
) as f:
gt_data.update(json.load(f))
else:
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_path.format(
split=split)
) as f:
gt_data = json.load(f)
else:
with gzip.open(
self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(split=split)
) as f:
gt_data = json.load(f)
self.gt_data = gt_data
trajectories = gt_data
self.trajectories = gt_data
trajectories = list(trajectories.keys())[self.config.local_rank::self.config.GPU_NUMBERS]
return trajectories
def eval(self) -> None:
r"""Main method of trainer evaluation. Calls _eval_checkpoint() that
is specified in Trainer class that inherits from BaseRLTrainer
or BaseILTrainer
Returns:
None
"""
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if "tensorboard" in self.config.VIDEO_OPTION:
assert (
len(self.config.TENSORBOARD_DIR) > 0
), "Must specify a tensorboard directory for video display"
os.makedirs(self.config.TENSORBOARD_DIR, exist_ok=True)
if "disk" in self.config.VIDEO_OPTION:
assert (
len(self.config.VIDEO_DIR) > 0
), "Must specify a directory for storing videos on disk"
world_size = self.config.GPU_NUMBERS
self.world_size = world_size
self.local_rank = self.config.local_rank
self.config.defrost()
# split = self.config.TASK_CONFIG.DATASET.SPLIT
# self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
# self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
self.config.TASK_CONFIG.DATASET.ROLES = ["guide"]
self.config.TASK_CONFIG.TASK.MEASUREMENTS = ['POSITION', 'STEPS_TAKEN']
self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]]
if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS:
idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW')
self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWEVAL'
self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.EVAL.LANGUAGES
self.config.TASK_CONFIG.DATASET.SPLIT = self.config.EVAL.SPLIT
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = self.config.EVAL.SPLIT
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = self.config.EVAL.SPLIT
self.config.use_pbar = not is_slurm_batch_job()
# if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
# self.config.EVAL.trajectories_file = \
# self.config.EVAL.trajectories_file[:-8] + '_w' + \
# str(self.world_size) + '_r' + str(self.local_rank) + '.json.gz'
# if choosing image
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations12()
# sensor_uuids = []
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
cropper_size = dict(crop_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
# sensor_uuids.append(camera_config.UUID)
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
crop_config.append((camera_template.lower(), cropper_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
self.config.freeze()
# self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
# self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
# -1
# )
torch.cuda.set_device(self.device)
if world_size > 1:
distr.init_process_group(backend='nccl', init_method='env://')
self.device = self.config.TORCH_GPU_IDS[self.local_rank]
torch.cuda.set_device(self.device)
self.config.defrost()
self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.freeze()
#
# if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
self.traj = self.collect_val_traj()
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
if os.path.isfile(self.config.EVAL.CKPT_PATH_DIR):
# evaluate singe checkpoint
proposed_index = get_checkpoint_id(
self.config.EVAL.CKPT_PATH_DIR
)
if proposed_index is not None:
ckpt_idx = proposed_index
else:
ckpt_idx = 0
self._eval_checkpoint(
self.config.EVAL.CKPT_PATH_DIR,
writer,
checkpoint_index=ckpt_idx,
)
else:
# evaluate multiple checkpoints in order
# prev_ckpt_ind = -1 #TODO eval start index
evaluated = []
while True:
current_ckpt = None
while current_ckpt is None:
checkpoint_folder = self.config.EVAL_CKPT_PATH_DIR
if not self.config.CEPH_IO:
models_paths = [p for p in filter(os.path.isfile, glob.glob(checkpoint_folder + "/*")) if p not in evaluated]
else:
models_paths = [os.path.join(self.config.CEPH_URL,p) for p in self.client.list(self.config.CEPH_URL) if os.path.join(self.config.CEPH_URL,p) not in evaluated]
if len(models_paths) > 0:
models_paths.sort(key=self._get_iter)
current_ckpt = models_paths[0]
prev_ckpt_ind = current_ckpt.split('.')[-2]
else:
current_ckpt = None
time.sleep(2) # sleep for 2 secs before polling again
# time.sleep(10)
if self.local_rank < 1:
logger.info(f"=======current_ckpt: {current_ckpt}=======")
# prev_ckpt_ind += 1
self._eval_checkpoint(
checkpoint_path=current_ckpt,
writer=writer,
checkpoint_index=prev_ckpt_ind,
)
evaluated.append(current_ckpt)
def inference(self) -> None:
r"""Runs inference on a single checkpoint, creating a path predictions file."""
checkpoint_path = self.config.INFERENCE.CKPT_PATH
logger.info(f"checkpoint_path: {checkpoint_path}")
self.config.defrost()
self.config.TASK_CONFIG.DATASET.SPLIT = self.config.INFERENCE.SPLIT
self.config.TASK_CONFIG.DATASET.ROLES = ["guide"]
self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.INFERENCE.LANGUAGES
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
self.config.IL.ckpt_to_load = self.config.INFERENCE.CKPT_PATH
self.config.TASK_CONFIG.TASK.MEASUREMENTS = []
self.config.TASK_CONFIG.TASK.SENSORS = [
s for s in self.config.TASK_CONFIG.TASK.SENSORS if "INSTRUCTION" in s
]
########### Additional Config ###########
self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]]
if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS:
idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW')
self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWINFERENCE'
# if choosing image
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations12()
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
cropper_size = dict(crop_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
crop_config.append((camera_template.lower(), cropper_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
# self.config.ENV_NAME = "VLNCEInferenceEnv" #TODO is this necessary?
self.config.freeze()
if self.config.INFERENCE.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")[
"config"
]
)
else:
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
config.IL.ckpt_to_load = checkpoint_path
config.freeze()
eps = self.collect_val_traj()
envs = construct_envs(
config, get_env_class(config.ENV_NAME),
auto_reset_done=False,
episodes_allowed=eps[:10] if sys.gettrace() else None # for debug, ep subset
)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
observations = envs.reset()
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
envs.num_envs,
self.num_recurrent_layers,
config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t = torch.zeros(
envs.num_envs, 768,
device=self.device,
)
language_features = torch.zeros(
envs.num_envs, 80, 768,
device=self.device,
)
not_done_masks = torch.zeros(
envs.num_envs, 1, dtype=torch.uint8, device=self.device
)
episode_predictions = defaultdict(list)
# episode ID --> instruction ID for rxr predictions format
instruction_ids: Dict[str, int] = {}
# populate episode_predictions with the starting state
current_episodes = envs.current_episodes()
for i in range(envs.num_envs):
episode_predictions[current_episodes[i].episode_id].append(
envs.call_at(i, "get_agent_info", {})
)
if config.INFERENCE.FORMAT == "rxr":
ep_id = current_episodes[i].episode_id
k = current_episodes[i].instruction.instruction_id
instruction_ids[ep_id] = int(k)
with tqdm.tqdm(
total=sum(envs.count_episodes()),
desc=f"[inference:{self.config.INFERENCE.SPLIT}]",
) as pbar:
while envs.num_envs > 0:
current_episodes = envs.current_episodes()
positions = []; headings = []
for i in range(envs.num_envs):
agent_state_i = envs.call_at(i,"get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
with torch.no_grad():
if 'CMA' in self.config.MODEL.policy_name:
# instructions
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = instruction_embedding,
text_mask = all_lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
# high-to-low actions in environments
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
for i, ob in enumerate(observations):
if env_actions[i]['action']['action'] == 0:
continue
else:
envs.call_at(
i, 'update_cur_path', {'new_path': ob.pop('cur_path')}
) # to update and record low-level path
not_done_masks = torch.tensor(
[[0] if done else [1] for done in dones],
dtype=torch.uint8,
device=self.device,
)
# reset envs and observations if necessary
for i in range(envs.num_envs):
if not dones[i]:
continue
ep_id = envs.current_episodes()[i].episode_id
if 'cur_path' in envs.current_episodes()[i].info:
episode_predictions[ep_id] += envs.current_episodes()[i].info['cur_path']
episode_predictions[ep_id][-1]['stop'] = True
# assert len(episode_predictions[ep_id]) <= 500
observations[i] = envs.reset_at(i)[0]
if 'CMA' in self.config.MODEL.policy_name:
rnn_states[i] *= 0.
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t[i] *= 0.
# prev_actions[i] = torch.zeros(1, dtype=torch.long)
pbar.update()
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
envs_to_pause = []
next_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if not dones[i]:
continue
if next_episodes[i].episode_id in episode_predictions:
envs_to_pause.append(i)
else:
episode_predictions[next_episodes[i].episode_id].append(
envs.call_at(i, "get_agent_info", {})
)
if config.INFERENCE.FORMAT == "rxr":
ep_id = next_episodes[i].episode_id
k = next_episodes[i].instruction.instruction_id
instruction_ids[ep_id] = int(k)
# number += 1
headings = torch.tensor(headings)
(
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
rgb_frames,
# positions
) = self._pause_envs(
envs_to_pause,
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
# rgb_frames,
# positions
)
headings = headings.tolist()
if 'VLNBERT' in self.config.MODEL.policy_name:
h_t = rnn_states
envs.close()
if config.INFERENCE.FORMAT == "r2r":
with open(config.INFERENCE.PREDICTIONS_FILE, "w") as f:
json.dump(episode_predictions, f, indent=2)
logger.info(
f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}"
)
else: # use 'rxr' format for rxr-habitat leaderboard
predictions_out = []
for k,v in episode_predictions.items():
# save only positions that changed
path = [v[0]["position"]]
for p in v[1:]:
if path[-1] != p["position"]:
path.append(p["position"])
predictions_out.append(
{
"instruction_id": instruction_ids[k],
"path": path,
}
)
predictions_out.sort(key=lambda x: x["instruction_id"])
with jsonlines.open(
config.INFERENCE.PREDICTIONS_FILE, mode="w"
) as writer:
writer.write_all(predictions_out)
logger.info(
f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}"
)
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/common/base_il_trainer.py |
import gzip
import json
from collections import defaultdict, deque
import numpy as np
import torch
import tqdm
from gym import Space
from habitat.config.default import Config
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1
from vlnce_baselines.common.env_utils import construct_envs
from vlnce_baselines.common.utils import extract_instruction_tokens
class TeacherRecollectionDataset(torch.utils.data.IterableDataset):
def __init__(self, config: Config):
super().__init__()
self.config = config
# self._preload = []
self._preload = deque()
self.world_size = self.config.GPU_NUMBERS
self.rank = self.config.local_rank
assert (
config.IL.RECOLLECT_TRAINER.preload_size >= config.IL.batch_size
), "preload size must be greater than batch size."
self.envs = None
self._env_observations = None
if config.IL.use_iw:
self.inflec_weights = torch.tensor(
[1.0, config.IL.inflection_weight_coef]
)
else:
self.inflec_weights = torch.tensor([1.0, 1.0])
if self.config.IL.RECOLLECT_TRAINER.preload_trajectories_file:
self.config.defrost()
self.config.IL.RECOLLECT_TRAINER.trajectories_file = \
self.config.IL.RECOLLECT_TRAINER.trajectories_file[
:-8] + '_w' + \
str(self.world_size) + '_r' + str(self.rank) + '.json.gz'
self.config.freeze()
with gzip.open(
config.IL.RECOLLECT_TRAINER.trajectories_file, "rt"
) as f:
self.trajectories = json.load(f)
else:
self.trajectories = self.collect_dataset()
self.initialize_sims()
def initialize_sims(self):
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.MEASUREMENTS = []
config.freeze()
self.envs = construct_envs(
config,
get_env_class(config.ENV_NAME),
episodes_allowed=list(self.trajectories.keys()),
)
self.length = sum(self.envs.number_of_episodes)
self.obs_transforms = get_active_obs_transforms(self.config)
self._observation_space = apply_obs_transforms_obs_space(
self.envs.observation_spaces[0], self.obs_transforms
)
self.env_step = [0 for _ in range(self.envs.num_envs)]
self._env_observations = [[] for _ in range(self.envs.num_envs)]
observations = self.envs.reset()
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
for i, ep in enumerate(self.envs.current_episodes()):
path_step = self.trajectories[str(ep.episode_id)][0]
self._env_observations[i].append(
(
observations[i],
path_step[0], # prev_action
path_step[2], # oracle_action
)
)
@property
def batch_size(self):
return self.config.IL.batch_size
@property
def observation_space(self) -> Space:
assert self.envs is not None, "Simulator must first be loaded."
assert self._observation_space is not None
return self._observation_space
@property
def action_space(self) -> Space:
assert self.envs is not None, "Simulator must first be loaded."
return self.envs.action_spaces[0]
def close_sims(self):
self.envs.close()
del self.envs
del self._env_observations
self.envs = None
self._env_observations = None
def collect_dataset(self):
r"""Uses the ground truth trajectories to create a teacher forcing
datset for a given split. Loads both guide and follower episodes.
"""
trajectories = defaultdict(list)
split = self.config.TASK_CONFIG.DATASET.SPLIT
if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file:
gt_data = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
if (
ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES
and role not in self.config.TASK_CONFIG.DATASET.ROLES
):
continue
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_file.format(
split=split, role=role
),
"rt",
) as f:
gt_data.update(json.load(f))
else:
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_path.format(split=split)
) as f:
gt_data = json.load(f)
t = (
tqdm.tqdm(gt_data.items(), "GT Collection")
if self.config.use_pbar
else gt_data.items()
)
for episode_id, trajectory in t:
if (
self.config.IL.RECOLLECT_TRAINER.max_traj_len != -1
and len(trajectory["actions"])
> self.config.IL.RECOLLECT_TRAINER.max_traj_len
) or (
self.config.IL.RECOLLECT_TRAINER.min_traj_len != -1
and len(trajectory["actions"])
< self.config.IL.RECOLLECT_TRAINER.min_traj_len
):
continue
for i, action in enumerate(trajectory["actions"]):
prev_action = (
trajectories[episode_id][i - 1][1]
if i
else HabitatSimActions.STOP
)
# [prev_action, action, oracle_action]
trajectories[episode_id].append([prev_action, action, action])
trajectories = dict(list(trajectories.items())[self.rank::self.world_size])
self.config.defrost()
self.config.IL.RECOLLECT_TRAINER.trajectories_file = \
self.config.IL.RECOLLECT_TRAINER.trajectories_file[:-8]+'_w'+ \
str(self.world_size)+'_r'+str(self.rank) + '.json.gz'
self.config.freeze()
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.trajectories_file, "wt"
) as f:
f.write(json.dumps(trajectories))
return trajectories
def _load_next(self):
"""
Episode length is currently not considered. We were previously batching episodes
together with similar lengths. Not sure if we need to bring that back.
"""
# self.rank = 0
if len(self._preload):
# out = self._preload[self.rank]
# self._preload = self._preload[self.world_size:]
# return out
return self._preload.popleft()
while (
len(self._preload) < self.config.IL.RECOLLECT_TRAINER.preload_size
):
current_episodes = self.envs.current_episodes()
prev_eps = current_episodes
# get the next action for each env
actions = [
self.trajectories[str(ep.episode_id)][self.env_step[i]][1]
for i, ep in enumerate(current_episodes)
]
outputs = self.envs.step(actions)
observations, _, dones, _ = [list(x) for x in zip(*outputs)]
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
current_episodes = self.envs.current_episodes()
for i in range(self.envs.num_envs):
self.env_step[i] += 1
if dones[i]:
assert len(self._env_observations[i]) == len(
self.trajectories[str(prev_eps[i].episode_id)]
), "Collected episode does not match the step count of trajectory"
self._preload.append(
(
[o[0] for o in self._env_observations[i]],
[o[1] for o in self._env_observations[i]],
[o[2] for o in self._env_observations[i]],
)
)
self._env_observations[i] = []
self.env_step[i] = 0
path_step = self.trajectories[
str(current_episodes[i].episode_id)
][self.env_step[i]]
self._env_observations[i].append(
(
observations[i],
path_step[0], # prev_action
path_step[2], # oracle_action
)
)
assert (
len(self._env_observations[i])
<= self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS
), "Trajectories should be no more than the maximum episode steps."
# out = self._preload[self.rank]
# self._preload = self._preload[self.world_size:]
# return out
return self._preload.popleft()
def __next__(self):
"""Takes about 1s to once self._load_next() has finished with a batch
size of 5. For this reason, we probably don't need to use extra workers.
"""
x = self._load_next()
obs, prev_actions, oracle_actions = x
# transpose obs
obs_t = defaultdict(list)
for k in obs[0]:
for i in range(len(obs)):
obs_t[k].append(obs[i][k])
obs_t[k] = np.array(obs_t[k])
for k, v in obs_t.items():
obs_t[k] = torch.from_numpy(np.copy(v))
prev_actions = torch.from_numpy(np.copy(prev_actions))
oracle_actions = torch.from_numpy(np.copy(oracle_actions))
inflections = torch.cat(
[
torch.tensor([1], dtype=torch.long),
(oracle_actions[1:] != oracle_actions[:-1]).long(),
]
)
return (
obs_t,
prev_actions,
oracle_actions,
self.inflec_weights[inflections],
)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
assert (
worker_info.num_workers == 1
), "multiple workers not supported."
return self
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/common/recollection_dataset.py |
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import numpy as np
import copy
import math
def extract_instruction_tokens(
observations: List[Dict],
instruction_sensor_uuid: str,
tokens_uuid: str = "tokens",
max_length: int = 512,
pad_id: int = 0,
) -> Dict[str, Any]:
r"""Extracts instruction tokens from an instruction sensor if the tokens
exist and are in a dict structure.
"""
for i in range(len(observations)):
if (
isinstance(observations[i][instruction_sensor_uuid], dict)
and tokens_uuid in observations[i][instruction_sensor_uuid]
):
# observations[i][instruction_sensor_uuid] = observations[i][
# instruction_sensor_uuid
# ]["tokens"]
token = observations[i][instruction_sensor_uuid]["tokens"][:max_length]
if len(token) < max_length:
token += [pad_id] * (max_length - len(token))
observations[i][instruction_sensor_uuid] = token
else:
break
return observations
def gather_list_and_concat(list_of_nums,world_size):
if not torch.is_tensor(list_of_nums):
tensor = torch.Tensor(list_of_nums).cuda()
else:
if list_of_nums.is_cuda == False:
tensor = list_of_nums.cuda()
else:
tensor = list_of_nums
gather_t = [torch.ones_like(tensor) for _ in
range(world_size)]
dist.all_gather(gather_t, tensor)
return gather_t
def dis_to_con(path, amount=0.25):
starts = path[:-1]
ends = path[1:]
new_path = [path[0]]
for s, e in zip(starts,ends):
vec = np.array(e) - np.array(s)
ratio = amount/np.linalg.norm(vec[[0,2]])
unit = vec*ratio
times = int(1/ratio)
for i in range(times):
if i != times - 1:
location = np.array(new_path[-1])+unit
new_path.append(location.tolist())
new_path.append(e)
return new_path
def get_camera_orientations12():
base_angle_deg = 30
base_angle_rad = math.pi / 6
orient_dict = {}
for k in range(1,12):
orient_dict[str(base_angle_deg*k)] = [0.0, base_angle_rad*k, 0.0]
return orient_dict | InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/common/utils.py |
from collections import defaultdict
from typing import Any, Dict, Optional, Tuple, List, Union
import habitat
import numpy as np
from habitat import Config, Dataset
from habitat.core.simulator import Observations
from habitat.tasks.utils import cartesian_to_polar
from habitat.utils.geometry_utils import quaternion_rotate_vector
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.core.simulator import SensorSuite
from habitat.core.registry import registry
@baseline_registry.register_env(name="VLNCEDaggerEnv")
class VLNCEDaggerEnv(habitat.RLEnv):
def __init__(self, config: Config, dataset: Optional[Dataset] = None):
super().__init__(config.TASK_CONFIG, dataset)
self.prev_episode_id = "something different"
self.keys = ['rgb', 'rgb_30', 'rgb_60', 'rgb_90', 'rgb_120', 'rgb_150', 'rgb_180', 'rgb_210', 'rgb_240', 'rgb_270', 'rgb_300', 'rgb_330']
self.pano_rgbs_sensors = [{k:self._env.sim._sensors[k] for k in self.keys}]
sim_sensors = []
for sensor_name in ['RGB_SENSOR'] + [k.upper() for k in self.keys if k != 'rgb']:
sensor_cfg = getattr(self._env.sim.habitat_config, sensor_name)
sensor_type = registry.get_sensor(sensor_cfg.TYPE)
assert sensor_type is not None, "invalid sensor type {}".format(
sensor_cfg.TYPE
)
sim_sensors.append(sensor_type(sensor_cfg))
self.sensor_suite = SensorSuite(sim_sensors)
self.current_scene = self._env.sim._current_scene
def reset(self) -> Observations:
observations = self._env.reset()
if self.current_scene != self._env.sim._current_scene:
self.pano_rgbs_sensors = [{k:self._env.sim._sensors[k] for k in self.keys}]
sim_sensors = []
for sensor_name in ['RGB_SENSOR'] + [k.upper() for k in self.keys if k != 'rgb']:
sensor_cfg = getattr(self._env.sim.habitat_config, sensor_name)
sensor_type = registry.get_sensor(sensor_cfg.TYPE)
assert sensor_type is not None, "invalid sensor type {}".format(
sensor_cfg.TYPE
)
sim_sensors.append(sensor_type(sensor_cfg))
self.sensor_suite = SensorSuite(sim_sensors)
self.current_scene = self._env.sim._current_scene
return observations
def get_reward_range(self) -> Tuple[float, float]:
# We don't use a reward for DAgger, but the baseline_registry requires
# we inherit from habitat.RLEnv.
return (0.0, 0.0)
def get_reward(self, observations: Observations) -> float:
return 0.0
def get_done(self, observations: Observations) -> bool:
return self._env.episode_over
def get_info(self, observations: Observations) -> Dict[Any, Any]:
return self.habitat_env.get_metrics()
def get_metrics(self):
return self.habitat_env.get_metrics()
def get_geodesic_dist(self,
node_a: List[float], node_b: List[float]):
return self._env.sim.geodesic_distance(node_a, node_b)
def check_navigability(self, node: List[float]):
return self._env.sim.is_navigable(node)
def get_agent_info(self):
agent_state = self._env.sim.get_agent_state()
heading_vector = quaternion_rotate_vector(
agent_state.rotation.inverse(), np.array([0, 0, -1])
)
heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
return {
"position": agent_state.position.tolist(),
"heading": heading,
"stop": self._env.task.is_stop_called,
}
def get_observation_at(self,
source_position: List[float],
source_rotation: List[Union[int, np.float64]],
keep_agent_at_new_pose: bool = False):
return self._env.sim.get_observations_at(
source_position,
source_rotation,
keep_agent_at_new_pose)
def observations_by_angles(self, angle_list: List[float]):
r'''for getting observations from desired angles
requires rad, positive represents anticlockwise'''
obs = []
sim = self._env.sim
init_state = sim.get_agent_state()
prev_angle = 0
left_action = HabitatSimActions.TURN_LEFT
init_amount = sim.get_agent(0).agent_config.action_space[left_action].actuation.amount # turn left
for angle in angle_list:
sim.get_agent(0).agent_config.action_space[left_action].actuation.amount = (angle-prev_angle)*180/np.pi
obs.append(sim.step(left_action))
prev_angle = angle
sim.set_agent_state(init_state.position, init_state.rotation)
sim.get_agent(0).agent_config.action_space[left_action].actuation.amount = init_amount
return obs
def current_dist_to_goal(self):
sim = self._env.sim
init_state = sim.get_agent_state()
init_distance = self._env.sim.geodesic_distance(
init_state.position, self._env.current_episode.goals[0].position,
)
return init_distance
def current_dist_to_refpath(self, path):
sim = self._env.sim
init_state = sim.get_agent_state()
current_pos = init_state.position
circle_dists = []
for pos in path:
circle_dists.append(
self._env.sim.geodesic_distance(current_pos, pos)
)
# circle_dists = np.linalg.norm(np.array(path)-current_pos, axis=1).tolist()
return circle_dists
def get_cand_idx(self,ref_path,angles,distances,candidate_length):
episode_id = self._env.current_episode.episode_id
if episode_id != self.prev_episode_id:
self.progress = 0
self.prev_sub_goal_pos = [0.0,0.0,0.0]
progress = self.progress
# ref_path = self.envs.current_episodes()[j].reference_path
circle_dists = self.current_dist_to_refpath(ref_path)
circle_bool = np.array(circle_dists) <= 3.0
cand_dists_to_goal = []
if circle_bool.sum() == 0: # no gt point within 3.0m
sub_goal_pos = self.prev_sub_goal_pos
else:
cand_idxes = np.where(circle_bool * (np.arange(0,len(ref_path))>=progress))[0]
if len(cand_idxes) == 0:
sub_goal_pos = ref_path[progress] #prev_sub_goal_pos[perm_index]
else:
compare = np.array(list(range(cand_idxes[0],cand_idxes[0]+len(cand_idxes)))) == cand_idxes
if np.all(compare):
sub_goal_idx = cand_idxes[-1]
else:
sub_goal_idx = np.where(compare==False)[0][0]-1
sub_goal_pos = ref_path[sub_goal_idx]
self.progress = sub_goal_idx
self.prev_sub_goal_pos = sub_goal_pos
for k in range(len(angles)):
angle_k = angles[k]
forward_k = distances[k]
dist_k = self.cand_dist_to_subgoal(angle_k, forward_k, sub_goal_pos)
# distance to subgoal
cand_dists_to_goal.append(dist_k)
# distance to final goal
curr_dist_to_goal = self.current_dist_to_goal()
# if within target range (which def as 3.0)
if curr_dist_to_goal < 1.5:
oracle_cand_idx = candidate_length - 1
else:
oracle_cand_idx = np.argmin(cand_dists_to_goal)
self.prev_episode_id = episode_id
# if curr_dist_to_goal == np.inf:
progress100 = self.progress/len(ref_path)
return oracle_cand_idx, progress100#, sub_goal_pos
def cand_dist_to_goal(self, angle: float, forward: float):
r'''get resulting distance to goal by executing
a candidate action'''
sim = self._env.sim
init_state = sim.get_agent_state()
forward_action = HabitatSimActions.MOVE_FORWARD
init_forward = sim.get_agent(0).agent_config.action_space[
forward_action].actuation.amount
theta = np.arctan2(init_state.rotation.imag[1],
init_state.rotation.real) + angle / 2
rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0)
sim.set_agent_state(init_state.position, rotation)
ksteps = int(forward//init_forward)
for k in range(ksteps):
sim.step_without_obs(forward_action)
post_state = sim.get_agent_state()
post_distance = self._env.sim.geodesic_distance(
post_state.position, self._env.current_episode.goals[0].position,
)
# reset agent state
sim.set_agent_state(init_state.position, init_state.rotation)
return post_distance
def cand_dist_to_subgoal(self,
angle: float, forward: float,
sub_goal: Any):
r'''get resulting distance to goal by executing
a candidate action'''
sim = self._env.sim
init_state = sim.get_agent_state()
forward_action = HabitatSimActions.MOVE_FORWARD
init_forward = sim.get_agent(0).agent_config.action_space[
forward_action].actuation.amount
theta = np.arctan2(init_state.rotation.imag[1],
init_state.rotation.real) + angle / 2
rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0)
sim.set_agent_state(init_state.position, rotation)
ksteps = int(forward//init_forward)
prev_pos = init_state.position
dis = 0.
for k in range(ksteps):
sim.step_without_obs(forward_action)
pos = sim.get_agent_state().position
dis += np.linalg.norm(prev_pos - pos)
prev_pos = pos
post_state = sim.get_agent_state()
post_distance = self._env.sim.geodesic_distance(
post_state.position, sub_goal,
) + dis
# reset agent state
sim.set_agent_state(init_state.position, init_state.rotation)
return post_distance
def change_current_path(self, new_path: Any, collisions: Any):
'''just for recording current path in high to low'''
if self._env.current_episode.info is None:
self._env.current_episode.info = {}
if 'current_path' not in self._env.current_episode.info.keys():
self._env.current_episode.info['current_path'] = [np.array(self._env.current_episode.start_position)]
self._env.current_episode.info['current_path'] += new_path
if 'collisions' not in self._env.current_episode.info.keys():
self._env.current_episode.info['collisions'] = []
self._env.current_episode.info['collisions'] += collisions
# def draw_point(self,point,type,map):
# from scripts.draw_map_utils import drawpoint
# drawpoint(point,type,map,self._env.sim)
def update_cur_path(self, new_path: Dict):
if self._env.current_episode.info is None:
self._env.current_episode.info = defaultdict(list)
if 'cur_path' not in self._env.current_episode.info:
self._env.current_episode.info['cur_path'] = []
self._env.current_episode.info['cur_path'] += new_path
def stop_cur_path(self): # not used
assert self._env.current_episode.info is not None
assert 'cur_path' in self._env.current_episode.info.keys()
self._env.current_episode.info['cur_path'][-1]['stop'] = True
def get_pano_rgbs_observations_at(self,
source_position: List,
source_rotation: List,):
self._env.sim.set_agent_state(source_position,source_rotation)
pano_rgbs = self.sensor_suite.get_observations(self._env.sim.get_specific_sensors_observations(self.pano_rgbs_sensors))
return pano_rgbs
def get_agent_state(self):
agent_state = self._env.sim.get_agent_state()
return (agent_state.position,agent_state.rotation)
def set_agent_state(self, position, rotation):
self._env.sim.set_agent_state(position,rotation)
@baseline_registry.register_env(name="VLNCEInferenceEnv")
class VLNCEInferenceEnv(habitat.RLEnv):
def __init__(self, config: Config, dataset: Optional[Dataset] = None):
super().__init__(config.TASK_CONFIG, dataset)
def get_reward_range(self):
return (0.0, 0.0)
def get_reward(self, observations: Observations):
return 0.0
def get_done(self, observations: Observations):
return self._env.episode_over
def get_info(self, observations: Observations):
agent_state = self._env.sim.get_agent_state()
heading_vector = quaternion_rotate_vector(
agent_state.rotation.inverse(), np.array([0, 0, -1])
)
heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
return {
"position": agent_state.position.tolist(),
"heading": heading,
"stop": self._env.task.is_stop_called,
}
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/common/environments.py |
import torch
class _AuxLosses:
def __init__(self):
self._losses = {}
self._loss_alphas = {}
self._is_active = False
def clear(self):
self._losses.clear()
self._loss_alphas.clear()
def register_loss(self, name, loss, alpha=1.0):
assert self.is_active()
assert name not in self._losses
self._losses[name] = loss
self._loss_alphas[name] = alpha
def get_loss(self, name):
return self._losses[name]
def reduce(self, mask):
assert self.is_active()
total = torch.tensor(0.0).cuda()
for k in self._losses.keys():
k_loss = torch.masked_select(self._losses[k], mask).mean()
total = total + self._loss_alphas[k] * k_loss
return total
def is_active(self):
return self._is_active
def activate(self):
self._is_active = True
def deactivate(self):
self._is_active = False
AuxLosses = _AuxLosses()
| InternVideo-main | Downstream/Visual-Language-Navigation/vlnce_baselines/common/aux_losses.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import torch
import numpy as np
import random
import os
from metrics import compute_metrics, tensor_text_to_video_metrics, tensor_video_to_text_sim
import time
import argparse
from modules.tokenization_clip import SimpleTokenizer as ClipTokenizer
from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modules.modeling import CLIP4Clip
from modules.optimization import BertAdam
from util import parallel_apply, get_logger
from dataloaders.data_dataloaders import DATALOADER_DICT
import torch.distributed as dist
import subprocess
import torch.nn as nn
from scipy.special import softmax
# torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='CLIP4Clip on Retrieval Task'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument('--train_csv', type=str, default='data/MSR-VTT/anns/MSRVTT_train.9k.csv', help='')
parser.add_argument('--val_csv', type=str, default='data/MSR-VTT/anns/MSRVTT_JSFUSION_test.csv', help='')
parser.add_argument('--data_path', type=str, default='data/caption.pickle', help='data pickle file path')
# parser.add_argument('--features_path', type=str, default='data/videos_feature.pickle', help='feature path')
parser.add_argument('--features_path', type=str, default='s3://video_pub/MSR-VTT/videos', help='feature path')
parser.add_argument('--num_thread_reader', type=int, default=4, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
parser.add_argument("--output_dir", default='/path/to/save/your/experiments/', type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--resume_model", default=None, type=str, required=False, help="Resume train model.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--task_type", default="retrieval", type=str, help="Point the task `retrieval` to finetune.")
parser.add_argument("--datatype", default="msrvtt", type=str, help="Point the dataset to finetune.")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument("--rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=1., help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).")
parser.add_argument('--sampled_use_mil', action='store_true', help="Whether MIL, has a high priority than use_mil.")
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=12, help="Layer NO. of visual.")
parser.add_argument('--cross_num_hidden_layers', type=int, default=4, help="Layer NO. of cross.")
parser.add_argument('--loose_type', action='store_true', help="Default using tight type for retrieval.")
parser.add_argument('--expand_msrvtt_sentences', action='store_true', help="")
parser.add_argument('--train_frame_order', type=int, default=0, choices=[0, 1, 2],
help="Frame order, 0: ordinary order; 1: reverse order; 2: random order.")
parser.add_argument('--eval_frame_order', type=int, default=0, choices=[0, 1, 2],
help="Frame order, 0: ordinary order; 1: reverse order; 2: random order.")
parser.add_argument('--freeze_layer_num', type=int, default=0, help="Layer NO. of CLIP need to freeze.")
parser.add_argument('--slice_framepos', type=int, default=0, choices=[0, 1, 2],
help="0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.")
parser.add_argument('--linear_patch', type=str, default="2d", choices=["2d", "3d"],
help="linear projection of flattened patches.")
parser.add_argument('--sim_header', type=str, default="meanP",
choices=["meanP", "seqLSTM", "seqTransf", "tightTransf"],
help="choice a similarity header.")
#### CLIP EVL ######
parser.add_argument("--pretrained_clip_name", default="ViT-B/32", type=str, help="ViT Base or Large")
parser.add_argument("--pretrained_path", type=str, default=None, help="Give the upstream pretrained checkpoint (not the finetuned one)")
parser.add_argument("--clip_evl", action='store_true', help="whether to activate clip_evl")
parser.add_argument("--mergeclip", type=bool, default=False, help="whether to merge clip weight")
parser.add_argument("--mergeweight", type=float, default=0.5, help="merge weight from 0 to 1")
### DRL ###
parser.add_argument("--interaction", type=str, default='no', help="interaction mode, refer to DRL")
parser.add_argument("--wti_arch", type=int, default=0, help="wti architecture, refer to DRL")
parser.add_argument("--cdcr", type=int, default=0, help="which cdcr type, refer to DRL")
args = parser.parse_args()
if args.sim_header == "tightTransf":
args.loose_type = False
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
return args
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
# launched with torch.distributed.launch
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
# launched with submitit on a slurm cluster
elif 'SLURM_PROCID' in os.environ:
#args.rank = int(os.environ['SLURM_PROCID'])
#args.gpu = args.rank % torch.cuda.device_count()
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = os.environ['SLURM_NTASKS']
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list)
)
master_port = os.environ.get('MASTER_PORT', '29491')
## manually set is also ok ##
master_port = "29411"
os.environ['MASTER_PORT'] = master_port
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
os.environ['LOCAL_SIZE'] = str(num_gpus)
args.dist_url = 'env://'
args.world_size = int(ntasks)
args.rank = int(proc_id)
args.gpu = int(proc_id % num_gpus)
print(f'SLURM MODE: proc_id: {proc_id}, ntasks: {ntasks}, node_list: {node_list}, num_gpus:{num_gpus}, addr:{addr}, master port:{master_port}' )
# launched naively with `python main_dino.py`
# we manually add MASTER_ADDR and MASTER_PORT to env variables
elif torch.cuda.is_available():
print('Will run the code on one GPU.')
args.rank, args.gpu, args.world_size = 0, 0, 1
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
else:
print('Does not support training without GPU.')
sys.exit(1)
dist.init_process_group(
backend="nccl",
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.cuda.set_device(args.gpu)
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
dist.barrier()
setup_for_distributed(args.rank == 0)
def set_seed_logger(args):
global logger
# predefining random initial seeds
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
init_distributed_mode(args)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", args.rank)
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = CLIP4Clip.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
# model.to(device)
model.cuda()
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
no_decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
decay_clip_param_tp = [(n, p) for n, p in decay_param_tp if "clip." in n]
decay_noclip_param_tp = [(n, p) for n, p in decay_param_tp if "clip." not in n]
no_decay_clip_param_tp = [(n, p) for n, p in no_decay_param_tp if "clip." in n]
no_decay_noclip_param_tp = [(n, p) for n, p in no_decay_param_tp if "clip." not in n]
weight_decay = 0.2
optimizer_grouped_parameters = [
{'params': [p for n, p in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': args.lr * coef_lr},
{'params': [p for n, p in decay_noclip_param_tp], 'weight_decay': weight_decay},
{'params': [p for n, p in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr},
{'params': [p for n, p in no_decay_noclip_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-6,
t_total=num_train_optimization_steps, weight_decay=weight_decay,
max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[torch.cuda.current_device()], output_device=torch.cuda.current_device(),
find_unused_parameters=False)
return optimizer, scheduler, model
def save_model(epoch, args, model, optimizer, tr_loss, type_name=""):
# Only save the model it-self
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin")
optimizer_state_file = os.path.join(
args.output_dir, "pytorch_opt.bin")
torch.save(model_to_save.state_dict(), output_model_file)
torch.save({
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'loss': tr_loss,
}, optimizer_state_file)
logger.info("Model saved to %s", output_model_file)
logger.info("Optimizer saved to %s", optimizer_state_file)
return output_model_file
def load_model(epoch, args, n_gpu, device, model_file=None):
if model_file is None or len(model_file) == 0:
model_file = os.path.join(args.output_dir, "pytorch_model.bin")
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.rank == 0:
logger.info("Model loaded from %s", model_file)
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = CLIP4Clip.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
else:
model = None
return model
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0):
global logger
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
for step, batch in enumerate(train_dataloader):
if n_gpu == 1:
# multi-gpu does scattering it-self
batch = tuple(t.to(device=device, non_blocking=True) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask = batch
loss = model(input_ids, segment_ids, input_mask, video, video_mask)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
total_loss += float(loss)
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if scheduler is not None:
scheduler.step() # Update learning rate schedule
optimizer.step()
optimizer.zero_grad()
# https://github.com/openai/CLIP/issues/46
if hasattr(model, 'module'):
torch.clamp_(model.module.clip.logit_scale.data, max=np.log(100))
else:
torch.clamp_(model.clip.logit_scale.data, max=np.log(100))
global_step += 1
if global_step % log_step == 0 and args.rank == 0:
logger.info("Epoch: %d/%s, Step: %d/%d, Lr: %s, Loss: %f, Time/step: %f", epoch + 1,
args.epochs, step + 1,
len(train_dataloader), "-".join([str('%.9f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),
float(loss),
(time.time() - start_time) / (log_step * args.gradient_accumulation_steps))
start_time = time.time()
total_loss = total_loss / len(train_dataloader)
return total_loss, global_step
def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list):
sim_matrix = []
for idx1, b1 in enumerate(batch_list_t):
input_mask, segment_ids, *_tmp = b1
sequence_output = batch_sequence_output_list[idx1]
each_row = []
for idx2, b2 in enumerate(batch_list_v):
video_mask, *_tmp = b2
visual_output = batch_visual_output_list[idx2]
b1b2_logits, *_tmp = model.get_similarity_logits(sequence_output, visual_output, input_mask, video_mask,
loose_type=model.loose_type)
b1b2_logits = b1b2_logits.cpu().detach().numpy()
each_row.append(b1b2_logits)
each_row = np.concatenate(tuple(each_row), axis=-1)
sim_matrix.append(each_row)
return sim_matrix
def eval_epoch(args, model, test_dataloader, device, n_gpu):
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
# #################################################################
## below variables are used to multi-sentences retrieval
# multi_sentence_: important tag for eval
# cut_off_points: used to tag the label when calculate the metric
# sentence_num: used to cut the sentence representation
# video_num: used to cut the video representation
# #################################################################
multi_sentence_ = False
cut_off_points_, sentence_num_, video_num_ = [], -1, -1
if hasattr(test_dataloader.dataset, 'multi_sentence_per_video') \
and test_dataloader.dataset.multi_sentence_per_video:
multi_sentence_ = True
cut_off_points_ = test_dataloader.dataset.cut_off_points
sentence_num_ = test_dataloader.dataset.sentence_num
video_num_ = test_dataloader.dataset.video_num
cut_off_points_ = [itm - 1 for itm in cut_off_points_]
if multi_sentence_:
logger.warning("Eval under the multi-sentence per video clip setting.")
logger.warning("sentence num: {}, video num: {}".format(sentence_num_, video_num_))
model.eval()
with torch.no_grad():
batch_list_t = []
batch_list_v = []
batch_sequence_output_list, batch_visual_output_list = [], []
total_video_num = 0
# ----------------------------
# 1. cache the features
# ----------------------------
for bid, batch in enumerate(test_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask = batch
if multi_sentence_:
# multi-sentences retrieval means: one clip has two or more descriptions.
b, *_t = video.shape
sequence_output = model.get_sequence_output(input_ids, segment_ids, input_mask)
batch_sequence_output_list.append(sequence_output)
batch_list_t.append((input_mask, segment_ids,))
s_, e_ = total_video_num, total_video_num + b
filter_inds = [itm - s_ for itm in cut_off_points_ if itm >= s_ and itm < e_]
if len(filter_inds) > 0:
video, video_mask = video[filter_inds, ...], video_mask[filter_inds, ...]
visual_output = model.get_visual_output(video, video_mask)
batch_visual_output_list.append(visual_output)
batch_list_v.append((video_mask,))
total_video_num += b
else:
sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask)
batch_sequence_output_list.append(sequence_output)
batch_list_t.append((input_mask, segment_ids,))
batch_visual_output_list.append(visual_output)
batch_list_v.append((video_mask,))
print("{}/{}\r".format(bid, len(test_dataloader)), end="")
# ----------------------------------
# 2. calculate the similarity
# ----------------------------------
if n_gpu < 1:
device_ids = list(range(n_gpu))
batch_list_t_splits = []
batch_list_v_splits = []
batch_t_output_splits = []
batch_v_output_splits = []
bacth_len = len(batch_list_t)
split_len = (bacth_len + n_gpu - 1) // n_gpu
for dev_id in device_ids:
s_, e_ = dev_id * split_len, (dev_id + 1) * split_len
if dev_id == 0:
batch_list_t_splits.append(batch_list_t[s_:e_])
batch_list_v_splits.append(batch_list_v)
batch_t_output_splits.append(batch_sequence_output_list[s_:e_])
batch_v_output_splits.append(batch_visual_output_list)
else:
devc = torch.device('cuda:{}'.format(str(dev_id)))
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_t[s_:e_]]
batch_list_t_splits.append(devc_batch_list)
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_v]
batch_list_v_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]]
batch_t_output_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_visual_output_list]
batch_v_output_splits.append(devc_batch_list)
parameters_tuple_list = [(batch_list_t_splits[dev_id], batch_list_v_splits[dev_id],
batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids]
parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids)
sim_matrix = []
for idx in range(len(parallel_outputs)):
sim_matrix += parallel_outputs[idx]
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
else:
sim_matrix = _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list)
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
sim_matrix_dsl = sim_matrix * softmax(sim_matrix, axis=0)
sim_matrix_dsl_T = sim_matrix.T * softmax(sim_matrix.T, axis=0)
if multi_sentence_:
logger.info("before reshape, sim matrix size: {} x {}".format(sim_matrix.shape[0], sim_matrix.shape[1]))
cut_off_points2len_ = [itm + 1 for itm in cut_off_points_]
max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)])
sim_matrix_new = []
for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_):
sim_matrix_new.append(np.concatenate((sim_matrix[s_:e_],
np.full((max_length-e_+s_, sim_matrix.shape[1]), -np.inf)), axis=0))
sim_matrix = np.stack(tuple(sim_matrix_new), axis=0)
logger.info("after reshape, sim matrix size: {} x {} x {}".
format(sim_matrix.shape[0], sim_matrix.shape[1], sim_matrix.shape[2]))
tv_metrics = tensor_text_to_video_metrics(sim_matrix)
vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix))
# dsl
cut_off_points2len_ = [itm + 1 for itm in cut_off_points_]
max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)])
sim_matrix_new = []
for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_):
sim_matrix_new.append(np.concatenate((sim_matrix_dsl[s_:e_],
np.full((max_length-e_+s_, sim_matrix_dsl.shape[1]), -np.inf)), axis=0))
sim_matrix_dsl = np.stack(tuple(sim_matrix_new), axis=0)
logger.info("after reshape, sim matrix size: {} x {} x {}".
format(sim_matrix_dsl.shape[0], sim_matrix_dsl.shape[1], sim_matrix_dsl.shape[2]))
dsl_tv_metrics = tensor_text_to_video_metrics(sim_matrix_dsl)
dsl_vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix_dsl))
else:
logger.info("sim matrix size: {}, {}".format(sim_matrix.shape[0], sim_matrix.shape[1]))
tv_metrics = compute_metrics(sim_matrix)
vt_metrics = compute_metrics(sim_matrix.T)
dsl_tv_metrics = compute_metrics(sim_matrix_dsl)
# dsl_vt_metrics = compute_metrics(sim_matrix_dsl.T)
dsl_vt_metrics = compute_metrics(sim_matrix_dsl_T)
logger.info('\t Length-T: {}, Length-V:{}'.format(len(sim_matrix), len(sim_matrix[0])))
# dsl output
logger.info("------------------------------------------------------------")
logger.info("DSL Text-to-Video:")
logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'.
format(dsl_tv_metrics['R1'], dsl_tv_metrics['R5'], dsl_tv_metrics['R10'], dsl_tv_metrics['MR'], dsl_tv_metrics['MeanR']))
logger.info("DSL Video-to-Text:")
logger.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'.
format(dsl_vt_metrics['R1'], dsl_vt_metrics['R5'], dsl_vt_metrics['R10'], dsl_vt_metrics['MR'], dsl_vt_metrics['MeanR']))
logger.info("------------------------------------------------------------")
logger.info("Text-to-Video:")
logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'.
format(tv_metrics['R1'], tv_metrics['R5'], tv_metrics['R10'], tv_metrics['MR'], tv_metrics['MeanR']))
logger.info("Video-to-Text:")
logger.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'.
format(vt_metrics['R1'], vt_metrics['R5'], vt_metrics['R10'], vt_metrics['MR'], vt_metrics['MeanR']))
R1 = tv_metrics['R1']
return R1
def main():
global logger
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.rank)
tokenizer = ClipTokenizer()
assert args.task_type == "retrieval"
model = init_model(args, device, n_gpu, args.rank)
## ####################################
# freeze testing
## ####################################
assert args.freeze_layer_num <= 12 and args.freeze_layer_num >= -1
if hasattr(model, "clip") and args.freeze_layer_num > -1:
for name, param in model.clip.named_parameters():
# top layers always need to train
if name.find("ln_final.") == 0 or name.find("text_projection") == 0 or name.find("logit_scale") == 0 \
or name.find("visual.ln_post.") == 0 or name.find("visual.proj") == 0:
continue # need to train
elif name.find("visual.transformer.resblocks.") == 0 or name.find("transformer.resblocks.") == 0:
layer_num = int(name.split(".resblocks.")[1].split(".")[0])
if layer_num >= args.freeze_layer_num:
continue # need to train
if name.find('.dpe.') >=0 or name.find('.dec.') >= 0:
continue # need to train
if args.linear_patch == "3d" and name.find("conv2."):
continue # need to train
else:
# paramenters which < freeze_layer_num will be freezed
param.requires_grad = False
print('freezing: ', name, name.find('dpe.'), name.find('dec.'), param.shape)
# exit(0)
## ####################################
# dataloader loading
## ####################################
assert args.datatype in DATALOADER_DICT
assert DATALOADER_DICT[args.datatype]["test"] is not None \
or DATALOADER_DICT[args.datatype]["val"] is not None
test_dataloader, test_length = None, 0
if DATALOADER_DICT[args.datatype]["test"] is not None:
test_dataloader, test_length = DATALOADER_DICT[args.datatype]["test"](args, tokenizer)
if DATALOADER_DICT[args.datatype]["val"] is not None:
val_dataloader, val_length = DATALOADER_DICT[args.datatype]["val"](args, tokenizer, subset="val")
else:
val_dataloader, val_length = test_dataloader, test_length
## report validation results if the ["test"] is None
if test_dataloader is None:
test_dataloader, test_length = val_dataloader, val_length
if args.rank == 0:
logger.info("***** Running test *****")
logger.info(" Num examples = %d", test_length)
logger.info(" Batch size = %d", args.batch_size_val)
logger.info(" Num steps = %d", len(test_dataloader))
logger.info("***** Running val *****")
logger.info(" Num examples = %d", val_length)
## ####################################
# train and eval
## ####################################
if args.do_train:
train_dataloader, train_length, train_sampler = DATALOADER_DICT[args.datatype]["train"](args, tokenizer)
num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1)
/ args.gradient_accumulation_steps) * args.epochs
coef_lr = args.coef_lr
optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.rank, coef_lr=coef_lr)
if args.rank == 0:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_length)
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps)
best_score = 0.00001
best_output_model_file = "None"
## ##############################################################
# resume optimizer state besides loss to continue train
## ##############################################################
resumed_epoch = 0
if args.resume_model:
checkpoint = torch.load(args.resume_model, map_location='cpu')
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
resumed_epoch = checkpoint['epoch']+1
resumed_loss = checkpoint['loss']
print('begin training!!!!!!!!')
global_step = 0
for epoch in range(resumed_epoch, args.epochs):
train_sampler.set_epoch(epoch)
tr_loss, global_step = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer,
scheduler, global_step, local_rank=args.rank)
if args.rank == 0:
logger.info("Epoch %d/%s Finished, Train Loss: %f", epoch + 1, args.epochs, tr_loss)
########## here we save the last ckpt #############
########## feel free to modify this for saving the best ckpt #######
output_model_file = save_model(epoch, args, model, optimizer, tr_loss, type_name="")
## Run on val dataset, this process is *TIME-consuming*.
# logger.info("Eval on val dataset")
# R1 = eval_epoch(args, model, val_dataloader, device, n_gpu)
R1 = eval_epoch(args, model, test_dataloader, device, n_gpu)
if best_score <= R1:
best_score = R1
best_output_model_file = output_model_file
logger.info("The best model is: {}, the R1 is: {:.4f}".format(best_output_model_file, best_score))
## Uncomment if want to test on the best checkpoint
# if args.local_rank == 0:
# model = load_model(-1, args, n_gpu, device, model_file=best_output_model_file)
# eval_epoch(args, model, test_dataloader, device, n_gpu)
elif args.do_eval:
if args.rank == 0:
eval_epoch(args, model, test_dataloader, device, n_gpu)
if __name__ == "__main__":
main()
| InternVideo-main | Downstream/Video-Text-Retrieval/main_task_retrieval.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import numpy as np
import torch
def compute_metrics(x):
sx = np.sort(-x, axis=1)
d = np.diag(-x)
d = d[:, np.newaxis]
ind = sx - d
ind = np.where(ind == 0)
ind = ind[1]
metrics = {}
metrics['R1'] = float(np.sum(ind == 0)) * 100 / len(ind)
metrics['R5'] = float(np.sum(ind < 5)) * 100 / len(ind)
metrics['R10'] = float(np.sum(ind < 10)) * 100 / len(ind)
metrics['MR'] = np.median(ind) + 1
metrics["MedianR"] = metrics['MR']
metrics["MeanR"] = np.mean(ind) + 1
metrics["cols"] = [int(i) for i in list(ind)]
return metrics
def print_computed_metrics(metrics):
r1 = metrics['R1']
r5 = metrics['R5']
r10 = metrics['R10']
mr = metrics['MR']
print('R@1: {:.4f} - R@5: {:.4f} - R@10: {:.4f} - Median R: {}'.format(r1, r5, r10, mr))
# below two functions directly come from: https://github.com/Deferf/Experiments
def tensor_text_to_video_metrics(sim_tensor, top_k = [1,5,10]):
if not torch.is_tensor(sim_tensor):
sim_tensor = torch.tensor(sim_tensor)
# Permute sim_tensor so it represents a sequence of text-video similarity matrices.
# Then obtain the double argsort to position the rank on the diagonal
stacked_sim_matrices = sim_tensor.permute(1, 0, 2)
first_argsort = torch.argsort(stacked_sim_matrices, dim = -1, descending= True)
second_argsort = torch.argsort(first_argsort, dim = -1, descending= False)
# Extracts ranks i.e diagonals
ranks = torch.flatten(torch.diagonal(second_argsort, dim1 = 1, dim2 = 2))
# Now we need to extract valid ranks, as some belong to inf padding values
permuted_original_data = torch.flatten(torch.diagonal(sim_tensor, dim1 = 0, dim2 = 2))
mask = ~ torch.logical_or(torch.isinf(permuted_original_data), torch.isnan(permuted_original_data))
valid_ranks = ranks[mask]
# A quick dimension check validates our results, there may be other correctness tests pending
# Such as dot product localization, but that is for other time.
#assert int(valid_ranks.shape[0]) == sum([len(text_dict[k]) for k in text_dict])
if not torch.is_tensor(valid_ranks):
valid_ranks = torch.tensor(valid_ranks)
results = {f"R{k}": float(torch.sum(valid_ranks < k) * 100 / len(valid_ranks)) for k in top_k}
results["MedianR"] = float(torch.median(valid_ranks + 1))
results["MeanR"] = float(np.mean(valid_ranks.numpy() + 1))
results["Std_Rank"] = float(np.std(valid_ranks.numpy() + 1))
results['MR'] = results["MedianR"]
return results
def tensor_video_to_text_sim(sim_tensor):
if not torch.is_tensor(sim_tensor):
sim_tensor = torch.tensor(sim_tensor)
# Code to avoid nans
sim_tensor[sim_tensor != sim_tensor] = float('-inf')
# Forms a similarity matrix for use with rank at k
values, _ = torch.max(sim_tensor, dim=1, keepdim=True)
return torch.squeeze(values).T
| InternVideo-main | Downstream/Video-Text-Retrieval/metrics.py |
import torch
import torch.nn as nn
import threading
from torch._utils import ExceptionWrapper
import logging
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert len(modules) == len(inputs)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where="in replica {} on device {}".format(i, device))
if len(modules) > 1:
threads = [threading.Thread(target=_worker, args=(i, module, input))
for i, (module, input) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
if filename is not None:
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger | InternVideo-main | Downstream/Video-Text-Retrieval/util.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import torch
import numpy as np
import random
import os
from metrics import compute_metrics, tensor_text_to_video_metrics, tensor_video_to_text_sim
import time
import argparse
from modules.tokenization_clip import SimpleTokenizer as ClipTokenizer
from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modules.modeling import CLIP4Clip
from modules.optimization import BertAdam
from util import parallel_apply, get_logger
from dataloaders.data_dataloaders import DATALOADER_DICT
import torch.distributed as dist
import subprocess
from ipdb import set_trace
import torch.nn as nn
from scipy.special import softmax
# torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='CLIP4Clip on Retrieval Task'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument('--train_csv', type=str, default='data/MSR-VTT/anns/MSRVTT_train.9k.csv', help='')
parser.add_argument('--val_csv', type=str, default='data/MSR-VTT/anns/MSRVTT_JSFUSION_test.csv', help='')
parser.add_argument('--data_path', type=str, default='data/caption.pickle', help='data pickle file path')
# parser.add_argument('--features_path', type=str, default='data/videos_feature.pickle', help='feature path')
parser.add_argument('--features_path', type=str, default='s3://video_pub/MSR-VTT/videos', help='feature path')
parser.add_argument('--num_thread_reader', type=int, default=4, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
#parser.add_argument("--output_dir", default=None, type=str, required=True,
# help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--output_dir", default='/mnt/lustre/xujilan.vendor/exps/clip4clip', type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--resume_model", default=None, type=str, required=False, help="Resume train model.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--task_type", default="retrieval", type=str, help="Point the task `retrieval` to finetune.")
parser.add_argument("--datatype", default="msrvtt", type=str, help="Point the dataset to finetune.")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument("--rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=1., help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).")
parser.add_argument('--sampled_use_mil', action='store_true', help="Whether MIL, has a high priority than use_mil.")
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=12, help="Layer NO. of visual.")
parser.add_argument('--cross_num_hidden_layers', type=int, default=4, help="Layer NO. of cross.")
parser.add_argument('--loose_type', action='store_true', help="Default using tight type for retrieval.")
parser.add_argument('--expand_msrvtt_sentences', action='store_true', help="")
parser.add_argument('--train_frame_order', type=int, default=0, choices=[0, 1, 2],
help="Frame order, 0: ordinary order; 1: reverse order; 2: random order.")
parser.add_argument('--eval_frame_order', type=int, default=0, choices=[0, 1, 2],
help="Frame order, 0: ordinary order; 1: reverse order; 2: random order.")
parser.add_argument('--freeze_layer_num', type=int, default=0, help="Layer NO. of CLIP need to freeze.")
parser.add_argument('--slice_framepos', type=int, default=0, choices=[0, 1, 2],
help="0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.")
parser.add_argument('--linear_patch', type=str, default="2d", choices=["2d", "3d"],
help="linear projection of flattened patches.")
parser.add_argument('--sim_header', type=str, default="meanP",
choices=["meanP", "seqLSTM", "seqTransf", "tightTransf"],
help="choice a similarity header.")
#### CLIP KC/EVL ######
parser.add_argument("--zeroshot", action='store_true', help="Choose a CLIP version")
parser.add_argument("--pretrained_clip_name", default="ViT-B/32", type=str, help="Choose a CLIP version")
parser.add_argument("--clip_evl", action='store_true', help="Choose a CLIP version")
parser.add_argument("--clip_kc", action='store_true', help="Choose a CLIP version")
parser.add_argument("--use_dsl", action='store_true', help="Choose a CLIP version")
parser.add_argument("--clip_kc2", action='store_true', help="This is for ViT-B/16")
parser.add_argument("--clip_kc4", action='store_true', help="This is for ViT-L/14")
### DRL ###
parser.add_argument("--interaction", type=str, default='no', help="Choose a CLIP version")
parser.add_argument("--wti_arch", type=int, default=0, help="Choose a CLIP version")
parser.add_argument("--cdcr", type=int, default=0, help="Choose a CLIP version")
parser.add_argument("--pretrained_path", type=str, default=None, help="Choose a CLIP version")
parser.add_argument("--mergeclip", type=bool, default=False, help="Choose a CLIP version")
parser.add_argument("--mergeweight", type=float, default=0.5, help="Choose a CLIP version")
parser.add_argument("--use_capdecoder", type=bool, default=False, help="Choose a CLIP version")
parser.add_argument("--finetuned_path", type=str, default=None, help="Choose a CLIP version")
args = parser.parse_args()
if args.sim_header == "tightTransf":
args.loose_type = False
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
return args
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
# launched with torch.distributed.launch
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
# launched with submitit on a slurm cluster
elif 'SLURM_PROCID' in os.environ:
#args.rank = int(os.environ['SLURM_PROCID'])
#args.gpu = args.rank % torch.cuda.device_count()
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = os.environ['SLURM_NTASKS']
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list)
)
master_port = os.environ.get('MASTER_PORT', '29498')
master_port = "29481"
os.environ['MASTER_PORT'] = master_port
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
os.environ['LOCAL_SIZE'] = str(num_gpus)
args.dist_url = 'env://'
args.world_size = int(ntasks)
args.rank = int(proc_id)
args.gpu = int(proc_id % num_gpus)
print(f'SLURM MODE: proc_id: {proc_id}, ntasks: {ntasks}, node_list: {node_list}, num_gpus:{num_gpus}, addr:{addr}, master port:{master_port}' )
# launched naively with `python main_dino.py`
# we manually add MASTER_ADDR and MASTER_PORT to env variables
elif torch.cuda.is_available():
print('Will run the code on one GPU.')
args.rank, args.gpu, args.world_size = 0, 0, 1
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
else:
print('Does not support training without GPU.')
sys.exit(1)
dist.init_process_group(
backend="nccl",
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.cuda.set_device(args.gpu)
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
dist.barrier()
setup_for_distributed(args.rank == 0)
# def init_distributed_mode(args):
# # launched with torch.distributed.launch
# if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
# args.rank = int(os.environ["RANK"])
# args.world_size = int(os.environ['WORLD_SIZE'])
# args.gpu = int(os.environ['LOCAL_RANK'])
# # launched with submitit on a slurm cluster
# if 'SLURM_PROCID' in os.environ:
# proc_id = int(os.environ['SLURM_PROCID'])
# ntasks = os.environ['SLURM_NTASKS']
# node_list = os.environ['SLURM_NODELIST']
# num_gpus = torch.cuda.device_count()
# addr = subprocess.getoutput(
# 'scontrol show hostname {} | head -n1'.format(node_list)
# )
# master_port = os.environ.get('MASTER_PORT', '29489')
# os.environ['MASTER_PORT'] = master_port
# os.environ['MASTER_ADDR'] = addr
# os.environ['WORLD_SIZE'] = str(ntasks)
# os.environ['RANK'] = str(proc_id)
# args.dist_url = 'env://'
# args.world_size = int(ntasks) * num_gpus
# print(f'SLURM MODE: proc_id: {proc_id}, ntasks: {ntasks}, node_list: {node_list}, num_gpus:{num_gpus}, addr:{addr}, master port:{master_port}' )
# dist.init_process_group(
# backend="nccl",
# init_method=args.dist_url,
# world_size=args.world_size,
# rank=args.rank,
# )
# torch.cuda.set_device(args.gpu)
# print('| distributed init (rank {}): {}'.format(
# args.rank, args.dist_url), flush=True)
# dist.barrier()
# setup_for_distributed(args.rank == 0)
def set_seed_logger(args):
global logger
# predefining random initial seeds
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# world_size = torch.distributed.get_world_size()
# torch.cuda.set_device(args.local_rank)
# args.world_size = world_size
# rank = torch.distributed.get_rank()
# args.rank = rank
init_distributed_mode(args)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", args.rank)
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = CLIP4Clip.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
# set_trace()
### here we load the finetuned model (if needed) ###
if args.finetuned_path is not None:
finetuned_ckpt = torch.load(args.finetuned_path, map_location='cpu')
model.load_state_dict(finetuned_ckpt)
# set_trace()
model.cuda()
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
no_decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
decay_clip_param_tp = [(n, p) for n, p in decay_param_tp if "clip." in n]
decay_noclip_param_tp = [(n, p) for n, p in decay_param_tp if "clip." not in n]
no_decay_clip_param_tp = [(n, p) for n, p in no_decay_param_tp if "clip." in n]
no_decay_noclip_param_tp = [(n, p) for n, p in no_decay_param_tp if "clip." not in n]
weight_decay = 0.2
optimizer_grouped_parameters = [
{'params': [p for n, p in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': args.lr * coef_lr},
{'params': [p for n, p in decay_noclip_param_tp], 'weight_decay': weight_decay},
{'params': [p for n, p in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr},
{'params': [p for n, p in no_decay_noclip_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-6,
t_total=num_train_optimization_steps, weight_decay=weight_decay,
max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[torch.cuda.current_device()], output_device=torch.cuda.current_device(),
find_unused_parameters=False)
#model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
# output_device=local_rank, find_unused_parameters=True)
return optimizer, scheduler, model
# def load_model(epoch, args, n_gpu, device, model_file=None):
# #if model_file is None or len(model_file) == 0:
# # model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch))
# if model_file is None or len(model_file) == 0:
# model_file = os.path.join(args.output_dir, "pytorch_model.bin")
# if os.path.exists(model_file):
# model_state_dict = torch.load(model_file, map_location='cpu')
# if args.rank == 0:
# logger.info("Model loaded from %s", model_file)
# # Prepare model
# # cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
# # model = CLIP4Clip.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
# ### Prepare Model ###
# set_trace()
# ### here we load the finetuned model ###
# if args.finetuned_path is not None:
# model.load_state_dict(model_state_dict)
# set_trace()
# model.to(device)
# else:
# model = None
# return model
def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list):
sim_matrix = []
for idx1, b1 in enumerate(batch_list_t):
input_mask, segment_ids, *_tmp = b1
sequence_output = batch_sequence_output_list[idx1]
each_row = []
for idx2, b2 in enumerate(batch_list_v):
video_mask, *_tmp = b2
visual_output = batch_visual_output_list[idx2]
b1b2_logits, *_tmp = model.get_similarity_logits(sequence_output, visual_output, input_mask, video_mask,
loose_type=model.loose_type)
b1b2_logits = b1b2_logits.cpu().detach().numpy()
each_row.append(b1b2_logits)
each_row = np.concatenate(tuple(each_row), axis=-1)
sim_matrix.append(each_row)
return sim_matrix
def eval_epoch(args, model, test_dataloader, device, n_gpu):
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
# #################################################################
## below variables are used to multi-sentences retrieval
# multi_sentence_: important tag for eval
# cut_off_points: used to tag the label when calculate the metric
# sentence_num: used to cut the sentence representation
# video_num: used to cut the video representation
# #################################################################
multi_sentence_ = False
cut_off_points_, sentence_num_, video_num_ = [], -1, -1
if hasattr(test_dataloader.dataset, 'multi_sentence_per_video') \
and test_dataloader.dataset.multi_sentence_per_video:
multi_sentence_ = True
cut_off_points_ = test_dataloader.dataset.cut_off_points
sentence_num_ = test_dataloader.dataset.sentence_num
video_num_ = test_dataloader.dataset.video_num
cut_off_points_ = [itm - 1 for itm in cut_off_points_]
if multi_sentence_:
logger.warning("Eval under the multi-sentence per video clip setting.")
logger.warning("sentence num: {}, video num: {}".format(sentence_num_, video_num_))
model.eval()
with torch.no_grad():
batch_list_t = []
batch_list_v = []
batch_sequence_output_list, batch_visual_output_list = [], []
total_video_num = 0
# ----------------------------
# 1. cache the features
# ----------------------------
for bid, batch in enumerate(test_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask = batch
if multi_sentence_:
# multi-sentences retrieval means: one clip has two or more descriptions.
b, *_t = video.shape
sequence_output = model.get_sequence_output(input_ids, segment_ids, input_mask)
batch_sequence_output_list.append(sequence_output)
batch_list_t.append((input_mask, segment_ids,))
s_, e_ = total_video_num, total_video_num + b
filter_inds = [itm - s_ for itm in cut_off_points_ if itm >= s_ and itm < e_]
if len(filter_inds) > 0:
video, video_mask = video[filter_inds, ...], video_mask[filter_inds, ...]
visual_output = model.get_visual_output(video, video_mask)
batch_visual_output_list.append(visual_output)
batch_list_v.append((video_mask,))
total_video_num += b
else:
sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask)
batch_sequence_output_list.append(sequence_output)
batch_list_t.append((input_mask, segment_ids,))
batch_visual_output_list.append(visual_output)
batch_list_v.append((video_mask,))
print("{}/{}\r".format(bid, len(test_dataloader)), end="")
# ----------------------------------
# 2. calculate the similarity
# ----------------------------------
if n_gpu < 1:
device_ids = list(range(n_gpu))
batch_list_t_splits = []
batch_list_v_splits = []
batch_t_output_splits = []
batch_v_output_splits = []
bacth_len = len(batch_list_t)
split_len = (bacth_len + n_gpu - 1) // n_gpu
for dev_id in device_ids:
s_, e_ = dev_id * split_len, (dev_id + 1) * split_len
if dev_id == 0:
batch_list_t_splits.append(batch_list_t[s_:e_])
batch_list_v_splits.append(batch_list_v)
batch_t_output_splits.append(batch_sequence_output_list[s_:e_])
batch_v_output_splits.append(batch_visual_output_list)
else:
devc = torch.device('cuda:{}'.format(str(dev_id)))
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_t[s_:e_]]
batch_list_t_splits.append(devc_batch_list)
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_v]
batch_list_v_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]]
batch_t_output_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_visual_output_list]
batch_v_output_splits.append(devc_batch_list)
parameters_tuple_list = [(batch_list_t_splits[dev_id], batch_list_v_splits[dev_id],
batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids]
parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids)
sim_matrix = []
for idx in range(len(parallel_outputs)):
sim_matrix += parallel_outputs[idx]
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
else:
sim_matrix = _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list)
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
sim_matrix_dsl = sim_matrix * softmax(sim_matrix, axis=0)
sim_matrix_dsl_T = sim_matrix.T * softmax(sim_matrix.T, axis=0)
# np.save('result_lsmdc.npy', sim_matrix)
if multi_sentence_:
logger.info("before reshape, sim matrix size: {} x {}".format(sim_matrix.shape[0], sim_matrix.shape[1]))
cut_off_points2len_ = [itm + 1 for itm in cut_off_points_]
max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)])
sim_matrix_new = []
for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_):
sim_matrix_new.append(np.concatenate((sim_matrix[s_:e_],
np.full((max_length-e_+s_, sim_matrix.shape[1]), -np.inf)), axis=0))
sim_matrix = np.stack(tuple(sim_matrix_new), axis=0)
logger.info("after reshape, sim matrix size: {} x {} x {}".
format(sim_matrix.shape[0], sim_matrix.shape[1], sim_matrix.shape[2]))
tv_metrics = tensor_text_to_video_metrics(sim_matrix)
vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix))
# dsl
cut_off_points2len_ = [itm + 1 for itm in cut_off_points_]
max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)])
sim_matrix_new = []
for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_):
sim_matrix_new.append(np.concatenate((sim_matrix_dsl[s_:e_],
np.full((max_length-e_+s_, sim_matrix_dsl.shape[1]), -np.inf)), axis=0))
sim_matrix_dsl = np.stack(tuple(sim_matrix_new), axis=0)
logger.info("after reshape, sim matrix size: {} x {} x {}".
format(sim_matrix_dsl.shape[0], sim_matrix_dsl.shape[1], sim_matrix_dsl.shape[2]))
dsl_tv_metrics = tensor_text_to_video_metrics(sim_matrix_dsl)
dsl_vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix_dsl))
else:
logger.info("sim matrix size: {}, {}".format(sim_matrix.shape[0], sim_matrix.shape[1]))
tv_metrics = compute_metrics(sim_matrix)
vt_metrics = compute_metrics(sim_matrix.T)
dsl_tv_metrics = compute_metrics(sim_matrix_dsl)
# dsl_vt_metrics = compute_metrics(sim_matrix_dsl.T)
dsl_vt_metrics = compute_metrics(sim_matrix_dsl_T)
logger.info('\t Length-T: {}, Length-V:{}'.format(len(sim_matrix), len(sim_matrix[0])))
# dsl output
logger.info("------------------------------------------------------------")
logger.info("DSL Text-to-Video:")
logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'.
format(dsl_tv_metrics['R1'], dsl_tv_metrics['R5'], dsl_tv_metrics['R10'], dsl_tv_metrics['MR'], dsl_tv_metrics['MeanR']))
logger.info("DSL Video-to-Text:")
logger.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'.
format(dsl_vt_metrics['R1'], dsl_vt_metrics['R5'], dsl_vt_metrics['R10'], dsl_vt_metrics['MR'], dsl_vt_metrics['MeanR']))
logger.info("------------------------------------------------------------")
logger.info("Text-to-Video:")
logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'.
format(tv_metrics['R1'], tv_metrics['R5'], tv_metrics['R10'], tv_metrics['MR'], tv_metrics['MeanR']))
logger.info("Video-to-Text:")
logger.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'.
format(vt_metrics['R1'], vt_metrics['R5'], vt_metrics['R10'], vt_metrics['MR'], vt_metrics['MeanR']))
R1 = tv_metrics['R1']
return R1
def main():
global logger
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.rank)
tokenizer = ClipTokenizer()
assert args.task_type == "retrieval"
model = init_model(args, device, n_gpu, args.rank)
## ####################################
# dataloader loading
## ####################################
assert args.datatype in DATALOADER_DICT
assert DATALOADER_DICT[args.datatype]["test"] is not None \
or DATALOADER_DICT[args.datatype]["val"] is not None
test_dataloader, test_length = None, 0
if DATALOADER_DICT[args.datatype]["test"] is not None:
test_dataloader, test_length = DATALOADER_DICT[args.datatype]["test"](args, tokenizer)
if DATALOADER_DICT[args.datatype]["val"] is not None:
val_dataloader, val_length = DATALOADER_DICT[args.datatype]["val"](args, tokenizer, subset="val")
else:
val_dataloader, val_length = test_dataloader, test_length
## report validation results if the ["test"] is None
if test_dataloader is None:
test_dataloader, test_length = val_dataloader, val_length
if args.rank == 0:
logger.info("***** Running test *****")
logger.info(" Num examples = %d", test_length)
logger.info(" Batch size = %d", args.batch_size_val)
logger.info(" Num steps = %d", len(test_dataloader))
logger.info("***** Running val *****")
logger.info(" Num examples = %d", val_length)
## ####################################
### test ####
#######################################
if args.rank == 0:
eval_epoch(args, model, test_dataloader, device, n_gpu)
if __name__ == "__main__":
main()
| InternVideo-main | Downstream/Video-Text-Retrieval/inference.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from torch.utils.data import Dataset
import numpy as np
import pickle
import json
from dataloaders.rawvideo_util import RawVideoExtractor
import io
from decord import VideoReader, cpu
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode
try:
from petrel_client.client import Client
client = Client()
# Disable boto logger
import logging
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
except:
client = None
class VATEX_DataLoader(Dataset):
"""VATEX dataloader"""
def __init__(
self,
subset,
data_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
image_resolution=224,
frame_order=0,
slice_framepos=0,
):
self.data_path = data_path
self.features_path = features_path
self.feature_framerate = feature_framerate
self.image_resolution = image_resolution
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
# 0: ordinary order; 1: reverse order; 2: random order.
self.frame_order = frame_order
assert self.frame_order in [0, 1, 2]
# 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.
self.slice_framepos = slice_framepos
assert self.slice_framepos in [0, 1, 2]
self.subset = subset
assert self.subset in ["train", "val", "test"]
video_id_path_dict = {}
video_id_path_dict["train"] = os.path.join(self.data_path, "train_list.txt")
video_id_path_dict["val"] = os.path.join(self.data_path, "test_list.txt")
video_id_path_dict["test"] = os.path.join(self.data_path, "test_list.txt")
with open(video_id_path_dict[self.subset], 'r') as fp:
video_ids = [itm.strip() for itm in fp.readlines()]
# =============================================================================================
video_json_path_dict = {}
video_json_path_dict["train"] = os.path.join(self.data_path, "vatex_training_v1.0_ceph.json")
video_json_path_dict["val"] = os.path.join(self.data_path, "vatex_validation_v1.0_ceph.json")
video_json_path_dict["test"] = os.path.join(self.data_path, "vatex_validation_v1.0_ceph.json")
captions = {}
video_dict = {}
with open(video_json_path_dict[self.subset], 'r') as f:
json_data = json.load(f)
for itm in json_data:
video_id = itm["videoID"]
if video_id not in video_ids:
continue
if "path" not in itm:
continue
path = itm["path"]
caption = itm["enCap"]
video_dict[video_id] = path
captions[video_id] = caption
# ==============================================================================================================================
self.video_dict = video_dict
self.sample_len = 0
self.sentences_dict = {}
self.cut_off_points = []
for video_id in video_ids:
if video_id not in captions:
continue
for cap_txt in captions[video_id]:
self.sentences_dict[len(self.sentences_dict)] = (video_id, cap_txt)
self.cut_off_points.append(len(self.sentences_dict))
print(f"sentence dict len: {len(self.sentences_dict)}")
print(f"video dict len: {len(video_dict)}")
print(f"video ids len: {len(video_ids)}")
## below variables are used to multi-sentences retrieval
# self.cut_off_points: used to tag the label when calculate the metric
# self.sentence_num: used to cut the sentence representation
# self.video_num: used to cut the video representation
self.multi_sentence_per_video = True # !!! important tag for eval
if self.subset == "val" or self.subset == "test":
self.sentence_num = len(self.sentences_dict)
self.video_num = len(video_dict)
assert len(self.cut_off_points) == self.video_num
self.sample_len = len(self.sentences_dict)
self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution)
self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>",
"MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"}
self.transform = Compose([
Resize(image_resolution, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_resolution),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __len__(self):
return self.sample_len
def _get_text(self, video_id, caption):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
words = self.tokenizer.tokenize(caption)
words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]]
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
return pairs_text, pairs_mask, pairs_segment, choice_video_ids
def _get_rawvideo(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
# Pair x L x T x 3 x H x W
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
video_path = self.video_dict[video_id]
raw_video_data = self.rawVideoExtractor.get_video_data(video_path)
raw_video_data = raw_video_data['video']
if len(raw_video_data.shape) > 3:
raw_video_data_clip = raw_video_data
# L x T x 3 x H x W
raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip)
if self.max_frames < raw_video_slice.shape[0]:
if self.slice_framepos == 0:
video_slice = raw_video_slice[:self.max_frames, ...]
elif self.slice_framepos == 1:
video_slice = raw_video_slice[-self.max_frames:, ...]
else:
sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int)
video_slice = raw_video_slice[sample_indx, ...]
else:
video_slice = raw_video_slice
video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order)
slice_len = video_slice.shape[0]
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = video_slice
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
return video, video_mask
def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None):
# speed up video decode via decord.
# video_mask = np.zeros(self.max_frames, dtype=np.long)
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
# max_video_length = 0
max_video_length = [0] * len(choice_video_ids)
# T x 3 x H x W
# video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float)
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.image_resolution, self.image_resolution), dtype=np.float)
if s is None:
start_time, end_time = None, None
else:
start_time = int(s)
end_time = int(e)
start_time = start_time if start_time >= 0. else 0.
end_time = end_time if end_time >= 0. else 0.
if start_time > end_time:
start_time, end_time = end_time, start_time
elif start_time == end_time:
end_time = start_time + 1
# video_path = self.video_dict[video_id]
for i, video_id in enumerate(choice_video_ids):
video_path = self.video_dict[video_id]
# video_path = os.path.join(self.features_path, "{}.mp4 ".format(video_id))
if video_path.startswith("s3://"):
video_bytes = client.get(video_path)
if video_bytes is None:
print("Get video failed from {}".format(video_path))
continue
video_path = io.BytesIO(video_bytes)
vreader = VideoReader(video_path, ctx=cpu(0))
fps = vreader.get_avg_fps()
f_start = 0 if start_time is None else int(start_time * fps)
f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1))
num_frames = f_end - f_start + 1
if num_frames > 0:
# T x 3 x H x W
# sample_fps = int(self.video_framerate)
sample_fps = int(self.feature_framerate)
t_stride = int(round(float(fps) / sample_fps))
all_pos = list(range(f_start, f_end + 1, t_stride))
if len(all_pos) > self.max_frames:
sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)]
else:
sample_pos = all_pos
patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()]
patch_images = torch.stack([self.transform(img) for img in patch_images])
patch_images = patch_images.unsqueeze(1)
slice_len = patch_images.shape[0]
# max_video_length = max_video_length if max_video_length > slice_len else slice_len
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = patch_images
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
# video_mask[:max_video_length] = [1] * max_video_length
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
#print(video.shape, video_mask.shape)
return video, video_mask
def __getitem__(self, idx):
video_id, caption = self.sentences_dict[idx]
pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(video_id, caption)
video, video_mask = self._get_rawvideo_dec(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask | InternVideo-main | Downstream/Video-Text-Retrieval/dataloaders/dataloader_vatex_retrieval.py |
import torch
from torch.utils.data import DataLoader
from dataloaders.dataloader_msrvtt_retrieval import MSRVTT_DataLoader
from dataloaders.dataloader_msrvtt_retrieval import MSRVTT_TrainDataLoader
from dataloaders.dataloader_msvd_retrieval import MSVD_DataLoader
from dataloaders.dataloader_lsmdc_retrieval import LSMDC_DataLoader
from dataloaders.dataloader_activitynet_retrieval import ActivityNet_DataLoader
from dataloaders.dataloader_didemo_retrieval import DiDeMo_DataLoader
from dataloaders.dataloader_vatex_retrieval import VATEX_DataLoader
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTT_TrainDataLoader(
csv_path=args.train_csv,
json_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
unfold_sentences=args.expand_msrvtt_sentences,
frame_order=args.train_frame_order,
slice_framepos=args.slice_framepos,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
dataloader = DataLoader(
msrvtt_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
persistent_workers=True,
)
return dataloader, len(msrvtt_dataset), train_sampler
def dataloader_msrvtt_test(args, tokenizer, subset="test"):
msrvtt_testset = MSRVTT_DataLoader(
csv_path=args.val_csv,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.eval_frame_order,
slice_framepos=args.slice_framepos,
)
dataloader_msrvtt = DataLoader(
msrvtt_testset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
persistent_workers=True,
)
return dataloader_msrvtt, len(msrvtt_testset)
def dataloader_msvd_train(args, tokenizer):
msvd_dataset = MSVD_DataLoader(
subset="train",
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.train_frame_order,
slice_framepos=args.slice_framepos,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset)
dataloader = DataLoader(
msvd_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
persistent_workers=True,
)
return dataloader, len(msvd_dataset), train_sampler
def dataloader_msvd_test(args, tokenizer, subset="test"):
msvd_testset = MSVD_DataLoader(
subset=subset,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.eval_frame_order,
slice_framepos=args.slice_framepos,
)
dataloader_msrvtt = DataLoader(
msvd_testset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
persistent_workers=True,
)
return dataloader_msrvtt, len(msvd_testset)
def dataloader_lsmdc_train(args, tokenizer):
lsmdc_dataset = LSMDC_DataLoader(
subset="train",
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.train_frame_order,
slice_framepos=args.slice_framepos,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_dataset)
dataloader = DataLoader(
lsmdc_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
persistent_workers=True,
)
return dataloader, len(lsmdc_dataset), train_sampler
def dataloader_lsmdc_test(args, tokenizer, subset="test"):
lsmdc_testset = LSMDC_DataLoader(
subset=subset,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.eval_frame_order,
slice_framepos=args.slice_framepos,
)
dataloader_msrvtt = DataLoader(
lsmdc_testset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
persistent_workers=True,
)
return dataloader_msrvtt, len(lsmdc_testset)
def dataloader_activity_train(args, tokenizer):
activity_dataset = ActivityNet_DataLoader(
subset="train",
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.train_frame_order,
slice_framepos=args.slice_framepos,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset)
dataloader = DataLoader(
activity_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
persistent_workers=True,
)
return dataloader, len(activity_dataset), train_sampler
def dataloader_activity_test(args, tokenizer, subset="test"):
activity_testset = ActivityNet_DataLoader(
subset=subset,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.eval_frame_order,
slice_framepos=args.slice_framepos,
)
dataloader_msrvtt = DataLoader(
activity_testset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
persistent_workers=True,
)
return dataloader_msrvtt, len(activity_testset)
def dataloader_didemo_train(args, tokenizer):
didemo_dataset = DiDeMo_DataLoader(
subset="train",
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.train_frame_order,
slice_framepos=args.slice_framepos,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset)
dataloader = DataLoader(
didemo_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
persistent_workers=True,
)
return dataloader, len(didemo_dataset), train_sampler
def dataloader_didemo_test(args, tokenizer, subset="test"):
didemo_testset = DiDeMo_DataLoader(
subset=subset,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.eval_frame_order,
slice_framepos=args.slice_framepos,
)
dataloader_didemo = DataLoader(
didemo_testset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
persistent_workers=True,
)
return dataloader_didemo, len(didemo_testset)
def dataloader_vatex_train(args, tokenizer):
vatex_dataset = VATEX_DataLoader(
subset="train",
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.train_frame_order,
slice_framepos=args.slice_framepos,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(vatex_dataset)
dataloader = DataLoader(
vatex_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
return dataloader, len(vatex_dataset), train_sampler
def dataloader_vatex_test(args, tokenizer, subset="test"):
vatex_testset = VATEX_DataLoader(
subset=subset,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
frame_order=args.eval_frame_order,
slice_framepos=args.slice_framepos,
)
dataloader_msrvtt = DataLoader(
vatex_testset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
)
return dataloader_msrvtt, len(vatex_testset)
DATALOADER_DICT = {}
DATALOADER_DICT["msrvtt"] = {"train":dataloader_msrvtt_train, "val":dataloader_msrvtt_test, "test":None}
DATALOADER_DICT["msvd"] = {"train":dataloader_msvd_train, "val":dataloader_msvd_test, "test":dataloader_msvd_test}
DATALOADER_DICT["lsmdc"] = {"train":dataloader_lsmdc_train, "val":dataloader_lsmdc_test, "test":dataloader_lsmdc_test}
DATALOADER_DICT["activity"] = {"train":dataloader_activity_train, "val":dataloader_activity_test, "test":None}
DATALOADER_DICT["didemo"] = {"train":dataloader_didemo_train, "val":dataloader_didemo_test, "test":dataloader_didemo_test}
DATALOADER_DICT["vatex"] = {"train":dataloader_vatex_train, "val":dataloader_vatex_test, "test":dataloader_vatex_test} | InternVideo-main | Downstream/Video-Text-Retrieval/dataloaders/data_dataloaders.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
import io
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
from collections import defaultdict
import json
import random
from dataloaders.rawvideo_util import RawVideoExtractor
try:
from petrel_client.client import Client
client = Client()
# Disable boto logger
import logging
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
except:
client = None
from decord import VideoReader, cpu
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode
class MSRVTT_DataLoader(Dataset):
"""MSRVTT dataset loader."""
def __init__(
self,
csv_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
image_resolution=224,
frame_order=0,
slice_framepos=0,
):
self.data = pd.read_csv(csv_path)
self.features_path = features_path
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.image_resolution = image_resolution
# 0: ordinary order; 1: reverse order; 2: random order.
self.frame_order = frame_order
assert self.frame_order in [0, 1, 2]
# 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.
self.slice_framepos = slice_framepos
assert self.slice_framepos in [0, 1, 2]
self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution)
self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>",
"MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"}
self.transform = Compose([
Resize(image_resolution, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_resolution),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __len__(self):
return len(self.data)
def _get_text(self, video_id, sentence):
choice_video_ids = [video_id]
n_caption = len(choice_video_ids)
k = n_caption
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
words = self.tokenizer.tokenize(sentence)
words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]]
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
return pairs_text, pairs_mask, pairs_segment, choice_video_ids
def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None):
# speed up video decode via decord.
# video_mask = np.zeros(self.max_frames, dtype=np.long)
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
# max_video_length = 0
max_video_length = [0] * len(choice_video_ids)
# T x 3 x H x W
# video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float)
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.image_resolution, self.image_resolution), dtype=np.float)
if s is None:
start_time, end_time = None, None
else:
start_time = int(s)
end_time = int(e)
start_time = start_time if start_time >= 0. else 0.
end_time = end_time if end_time >= 0. else 0.
if start_time > end_time:
start_time, end_time = end_time, start_time
elif start_time == end_time:
end_time = start_time + 1
# video_path = self.video_dict[video_id]
for i, video_id in enumerate(choice_video_ids):
video_path = os.path.join(self.features_path, "{}.mp4".format(video_id))
if video_path.startswith("s3://"):
video_path = video_path.replace('videos', 'MSRVTT_Videos')
video_bytes = client.get(video_path, enable_stream=True)
assert video_bytes is not None, "Get video failed from {}".format(video_path)
video_path = video_bytes
if isinstance(video_path, bytes):
video_path = io.BytesIO(video_bytes)
vreader = VideoReader(video_path, ctx=cpu(0))
fps = vreader.get_avg_fps()
f_start = 0 if start_time is None else int(start_time * fps)
f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1))
num_frames = f_end - f_start + 1
if num_frames > 0:
# T x 3 x H x W
# sample_fps = int(self.video_framerate)
sample_fps = int(self.feature_framerate)
t_stride = int(round(float(fps) / sample_fps))
all_pos = list(range(f_start, f_end + 1, t_stride))
if len(all_pos) > self.max_frames:
sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)]
else:
sample_pos = all_pos
patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()]
patch_images = torch.stack([self.transform(img) for img in patch_images])
patch_images = patch_images.unsqueeze(1)
slice_len = patch_images.shape[0]
# max_video_length = max_video_length if max_video_length > slice_len else slice_len
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = patch_images
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
# video_mask[:max_video_length] = [1] * max_video_length
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
#print(video.shape, video_mask.shape)
return video, video_mask
def _get_rawvideo(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
# Pair x L x T x 3 x H x W
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
# Individual for YoucokII dataset, due to it video format
video_path = os.path.join(self.features_path, "{}.mp4".format(video_id))
#if os.path.exists(video_path) is False:
# video_path = video_path.replace(".mp4", ".webm")
raw_video_data = self.rawVideoExtractor.get_video_data(video_path)
raw_video_data = raw_video_data['video']
if len(raw_video_data.shape) > 3:
raw_video_data_clip = raw_video_data
# L x T x 3 x H x W
raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip)
if self.max_frames < raw_video_slice.shape[0]:
if self.slice_framepos == 0:
video_slice = raw_video_slice[:self.max_frames, ...]
elif self.slice_framepos == 1:
video_slice = raw_video_slice[-self.max_frames:, ...]
else:
sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int)
video_slice = raw_video_slice[sample_indx, ...]
else:
video_slice = raw_video_slice
video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order)
slice_len = video_slice.shape[0]
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = video_slice
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
return video, video_mask
def __getitem__(self, idx):
video_id = self.data['video_id'].values[idx]
sentence = self.data['sentence'].values[idx]
pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(video_id, sentence)
# video, video_mask = self._get_rawvideo(choice_video_ids)
video, video_mask = self._get_rawvideo_dec(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask
class MSRVTT_TrainDataLoader(Dataset):
"""MSRVTT train dataset loader."""
def __init__(
self,
csv_path,
json_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
unfold_sentences=False,
image_resolution=224,
frame_order=0,
slice_framepos=0,
):
self.csv = pd.read_csv(csv_path)
self.data = json.load(open(json_path, 'r'))
self.features_path = features_path
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.image_resolution = image_resolution
# 0: ordinary order; 1: reverse order; 2: random order.
self.frame_order = frame_order
assert self.frame_order in [0, 1, 2]
# 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.
self.slice_framepos = slice_framepos
assert self.slice_framepos in [0, 1, 2]
self.unfold_sentences = unfold_sentences
self.sample_len = 0
if self.unfold_sentences:
train_video_ids = list(self.csv['video_id'].values)
self.sentences_dict = {}
for itm in self.data['sentences']:
if itm['video_id'] in train_video_ids:
self.sentences_dict[len(self.sentences_dict)] = (itm['video_id'], itm['caption'])
self.sample_len = len(self.sentences_dict)
else:
num_sentences = 0
self.sentences = defaultdict(list)
s_video_id_set = set()
for itm in self.data['sentences']:
self.sentences[itm['video_id']].append(itm['caption'])
num_sentences += 1
s_video_id_set.add(itm['video_id'])
# Use to find the clips in the same video
self.parent_ids = {}
self.children_video_ids = defaultdict(list)
for itm in self.data['videos']:
vid = itm["video_id"]
url_posfix = itm["url"].split("?v=")[-1]
self.parent_ids[vid] = url_posfix
self.children_video_ids[url_posfix].append(vid)
self.sample_len = len(self.csv)
self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution)
self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>",
"MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"}
self.transform = Compose([
Resize(image_resolution, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_resolution),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __len__(self):
return self.sample_len
def _get_text(self, video_id, caption=None):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
if caption is not None:
words = self.tokenizer.tokenize(caption)
else:
words = self._get_single_text(video_id)
words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]]
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
return pairs_text, pairs_mask, pairs_segment, choice_video_ids
def _get_single_text(self, video_id):
rind = random.randint(0, len(self.sentences[video_id]) - 1)
caption = self.sentences[video_id][rind]
words = self.tokenizer.tokenize(caption)
return words
def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None):
# speed up video decode via decord.
# video_mask = np.zeros(self.max_frames, dtype=np.long)
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
# max_video_length = 0
max_video_length = [0] * len(choice_video_ids)
# T x 3 x H x W
# video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float)
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.image_resolution, self.image_resolution), dtype=np.float)
if s is None:
start_time, end_time = None, None
else:
start_time = int(s)
end_time = int(e)
start_time = start_time if start_time >= 0. else 0.
end_time = end_time if end_time >= 0. else 0.
if start_time > end_time:
start_time, end_time = end_time, start_time
elif start_time == end_time:
end_time = start_time + 1
# video_path = self.video_dict[video_id]
for i, video_id in enumerate(choice_video_ids):
video_path = os.path.join(self.features_path, "{}.mp4".format(video_id))
if video_path.startswith("s3://"):
video_path = video_path.replace('videos', 'MSRVTT_Videos')
video_bytes = client.get(video_path, enable_stream=True)
assert video_bytes is not None, "Get video failed from {}".format(video_path)
video_path = video_bytes
if isinstance(video_path, bytes):
video_path = io.BytesIO(video_bytes)
vreader = VideoReader(video_path, ctx=cpu(0))
fps = vreader.get_avg_fps()
f_start = 0 if start_time is None else int(start_time * fps)
f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1))
num_frames = f_end - f_start + 1
if num_frames > 0:
# T x 3 x H x W
# sample_fps = int(self.video_framerate)
sample_fps = int(self.feature_framerate)
t_stride = int(round(float(fps) / sample_fps))
all_pos = list(range(f_start, f_end + 1, t_stride))
if len(all_pos) > self.max_frames:
sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)]
else:
sample_pos = all_pos
patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()]
patch_images = torch.stack([self.transform(img) for img in patch_images])
patch_images = patch_images.unsqueeze(1)
slice_len = patch_images.shape[0]
# max_video_length = max_video_length if max_video_length > slice_len else slice_len
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = patch_images
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
# video_mask[:max_video_length] = [1] * max_video_length
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
#print(video.shape, video_mask.shape)
return video, video_mask
def _get_rawvideo(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
# Pair x L x T x 3 x H x W
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
# Individual for YoucokII dataset, due to it video format
video_path = os.path.join(self.features_path, "{}.mp4".format(video_id))
if os.path.exists(video_path) is False:
video_path = video_path.replace(".mp4", ".webm")
raw_video_data = self.rawVideoExtractor.get_video_data(video_path)
raw_video_data = raw_video_data['video']
if len(raw_video_data.shape) > 3:
raw_video_data_clip = raw_video_data
# L x T x 3 x H x W
raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip)
if self.max_frames < raw_video_slice.shape[0]:
if self.slice_framepos == 0:
video_slice = raw_video_slice[:self.max_frames, ...]
elif self.slice_framepos == 1:
video_slice = raw_video_slice[-self.max_frames:, ...]
else:
sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int)
video_slice = raw_video_slice[sample_indx, ...]
else:
video_slice = raw_video_slice
video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order)
slice_len = video_slice.shape[0]
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = video_slice
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
return video, video_mask
def __getitem__(self, idx):
if self.unfold_sentences:
video_id, caption = self.sentences_dict[idx]
else:
video_id, caption = self.csv['video_id'].values[idx], None
pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(video_id, caption)
# video, video_mask = self._get_rawvideo(choice_video_ids)
video, video_mask = self._get_rawvideo_dec(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask
| InternVideo-main | Downstream/Video-Text-Retrieval/dataloaders/dataloader_msrvtt_retrieval.py |
import torch as th
import numpy as np
from PIL import Image
# pytorch=1.7.1
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
# pip install opencv-python
import cv2
import io
try:
from petrel_client.client import Client
client = Client()
# Disable boto logger
import logging
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
except:
client = None
class RawVideoExtractorCV2():
def __init__(self, centercrop=False, size=224, framerate=-1, ):
self.centercrop = centercrop
self.size = size
self.framerate = framerate
self.transform = self._transform(self.size)
def _transform(self, n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None):
if start_time is not None or end_time is not None:
assert isinstance(start_time, int) and isinstance(end_time, int) \
and start_time > -1 and end_time > start_time
assert sample_fp > -1
# Samples a frame sample_fp X frames.
# cap = cv2.VideoCapture(video_file)
if video_file.startswith("s3://"):
# video_bytes = client.get(video_file)
# assert video_bytes is not None, "Get video failed from {}".format(video_file)
# video_file = video_bytes
# video_file = io.BytesIO(video_bytes)
# print(video_file)
presigned_url = client.generate_presigned_url(video_file, client_method ='get_object', expires_in=36000)
assert presigned_url is not None, "Get video failed from {}".format(presigned_url)
cap = cv2.VideoCapture(presigned_url)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
total_duration = (frameCount + fps - 1) // fps
start_sec, end_sec = 0, total_duration
if start_time is not None:
start_sec, end_sec = start_time, end_time if end_time <= total_duration else total_duration
cap.set(cv2.CAP_PROP_POS_FRAMES, int(start_time * fps))
interval = 1
if sample_fp > 0:
interval = fps // sample_fp
else:
sample_fp = fps
if interval == 0: interval = 1
inds = [ind for ind in np.arange(0, fps, interval)]
assert len(inds) >= sample_fp
inds = inds[:sample_fp]
ret = True
images, included = [], []
for sec in np.arange(start_sec, end_sec + 1):
if not ret: break
sec_base = int(sec * fps)
for ind in inds:
cap.set(cv2.CAP_PROP_POS_FRAMES, sec_base + ind)
ret, frame = cap.read()
if not ret: break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
images.append(preprocess(Image.fromarray(frame_rgb).convert("RGB")))
cap.release()
if len(images) > 0:
video_data = th.tensor(np.stack(images))
else:
video_data = th.zeros(1)
return {'video': video_data}
def get_video_data(self, video_path, start_time=None, end_time=None):
image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time)
return image_input
def process_raw_data(self, raw_video_data):
tensor_size = raw_video_data.size()
tensor = raw_video_data.view(-1, 1, tensor_size[-3], tensor_size[-2], tensor_size[-1])
return tensor
def process_frame_order(self, raw_video_data, frame_order=0):
# 0: ordinary order; 1: reverse order; 2: random order.
if frame_order == 0:
pass
elif frame_order == 1:
reverse_order = np.arange(raw_video_data.size(0) - 1, -1, -1)
raw_video_data = raw_video_data[reverse_order, ...]
elif frame_order == 2:
random_order = np.arange(raw_video_data.size(0))
np.random.shuffle(random_order)
raw_video_data = raw_video_data[random_order, ...]
return raw_video_data
# An ordinary video frame extractor based CV2
RawVideoExtractor = RawVideoExtractorCV2 | InternVideo-main | Downstream/Video-Text-Retrieval/dataloaders/rawvideo_util.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from torch.utils.data import Dataset
import numpy as np
import json
from dataloaders.rawvideo_util import RawVideoExtractor
import io
from decord import VideoReader, cpu
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode
try:
from petrel_client.client import Client
client = Client()
# Disable boto logger
import logging
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
except:
client = None
class DiDeMo_DataLoader(Dataset):
def __init__(
self,
subset,
data_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
image_resolution=224,
frame_order=0,
slice_framepos=0,
):
self.data_path = data_path
self.features_path = features_path
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.image_resolution = image_resolution
self.tokenizer = tokenizer
# 0: ordinary order; 1: reverse order; 2: random order.
self.frame_order = frame_order
assert self.frame_order in [0, 1, 2]
# 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.
self.slice_framepos = slice_framepos
assert self.slice_framepos in [0, 1, 2]
self.subset = subset
assert self.subset in ["train", "val", "test"]
video_id_path_dict = {}
video_id_path_dict["train"] = os.path.join(self.data_path, "train_list.txt")
video_id_path_dict["val"] = os.path.join(self.data_path, "val_list.txt")
video_id_path_dict["test"] = os.path.join(self.data_path, "test_list.txt")
with open(video_id_path_dict[self.subset], 'r') as fp:
video_ids = [itm.strip()+".mp4" for itm in fp.readlines()]
video_json_path_dict = {}
video_json_path_dict["train"] = os.path.join(self.data_path, "train_data.json")
video_json_path_dict["val"] = os.path.join(self.data_path, "val_data.json")
video_json_path_dict["test"] = os.path.join(self.data_path, "test_data.json")
caption_dict = {}
with open(video_json_path_dict[self.subset], 'r') as f:
json_data = json.load(f)
for itm in json_data:
description = itm["description"]
times = itm["times"]
video = itm["video"] + ".mp4"
if video not in video_ids:
continue
# each video is split into 5-second temporal chunks
# average the points from each annotator
start_ = np.mean([t_[0] for t_ in times]) * 5
end_ = (np.mean([t_[1] for t_ in times]) + 1) * 5
if video in caption_dict:
caption_dict[video]["start"].append(start_)
caption_dict[video]["end"].append(end_)
caption_dict[video]["text"].append(description)
else:
caption_dict[video] = {}
caption_dict[video]["start"] = [start_]
caption_dict[video]["end"] = [end_]
caption_dict[video]["text"] = [description]
for k_ in caption_dict.keys():
caption_dict[k_]["start"] = [0]
# trick to save time on obtaining each video length
# [https://github.com/LisaAnne/LocalizingMoments/blob/master/README.md]:
# Some videos are longer than 30 seconds. These videos were truncated to 30 seconds during annotation.
caption_dict[k_]["end"] = [31]
caption_dict[k_]["text"] = [" ".join(caption_dict[k_]["text"])]
video_dict = {}
# for root, dub_dir, video_files in os.walk(self.features_path):
# for video_file in video_files:
# video_id_ = video_file
# if video_id_ not in video_ids:
# continue
# file_path_ = os.path.join(root, video_file)
# video_dict[video_id_] = file_path_
for video_file in client.list(self.features_path):
video_id_ = video_file
if video_id_ not in video_ids:
continue
file_path_ = os.path.join(self.features_path, video_file)
video_dict[video_id_] = file_path_
self.caption_dict = caption_dict
self.video_dict = video_dict
video_ids = list(set(video_ids) & set(self.caption_dict.keys()) & set(self.video_dict.keys()))
# Get all captions
self.iter2video_pairs_dict = {}
for video_id in self.caption_dict.keys():
if video_id not in video_ids:
continue
caption = self.caption_dict[video_id]
n_caption = len(caption['start'])
for sub_id in range(n_caption):
self.iter2video_pairs_dict[len(self.iter2video_pairs_dict)] = (video_id, sub_id)
print(f"caption dict len: {len(self.caption_dict)}")
print(f"video ids len: {len(video_ids)}")
print(f"iter2video pairs dict len: {len(self.iter2video_pairs_dict)}")
print(f"video dict len: {len(self.video_dict)}")
self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution)
self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>",
"MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"}
self.transform = Compose([
Resize(image_resolution, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_resolution),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __len__(self):
return len(self.iter2video_pairs_dict)
def _get_text(self, video_id, sub_id):
caption = self.caption_dict[video_id]
k = 1
r_ind = [sub_id]
starts = np.zeros(k, dtype=np.long)
ends = np.zeros(k, dtype=np.long)
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
for i in range(k):
ind = r_ind[i]
start_, end_ = caption['start'][ind], caption['end'][ind]
words = self.tokenizer.tokenize(caption['text'][ind])
starts[i], ends[i] = start_, end_
words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]]
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
return pairs_text, pairs_mask, pairs_segment, starts, ends
def _get_rawvideo(self, idx, s, e):
video_mask = np.zeros((len(s), self.max_frames), dtype=np.long)
max_video_length = [0] * len(s)
# Pair x L x T x 3 x H x W
video = np.zeros((len(s), self.max_frames, 1, 3,
self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float)
video_path = self.video_dict[idx]
try:
for i in range(len(s)):
start_time = int(s[i])
end_time = int(e[i])
start_time = start_time if start_time >= 0. else 0.
end_time = end_time if end_time >= 0. else 0.
if start_time > end_time:
start_time, end_time = end_time, start_time
elif start_time == end_time:
end_time = end_time + 1
cache_id = "{}_{}_{}".format(video_path, start_time, end_time)
# Should be optimized by gathering all asking of this video
raw_video_data = self.rawVideoExtractor.get_video_data(video_path, start_time, end_time)
raw_video_data = raw_video_data['video']
# print(f"video path: {video_path}, raw video data shape: {raw_video_data.shape}")
if len(raw_video_data.shape) > 3:
raw_video_data_clip = raw_video_data
# L x T x 3 x H x W
raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip)
if self.max_frames < raw_video_slice.shape[0]:
if self.slice_framepos == 0:
video_slice = raw_video_slice[:self.max_frames, ...]
elif self.slice_framepos == 1:
video_slice = raw_video_slice[-self.max_frames:, ...]
else:
sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int)
video_slice = raw_video_slice[sample_indx, ...]
else:
video_slice = raw_video_slice
video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order)
slice_len = video_slice.shape[0]
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = video_slice
else:
print("Dimension error! video path: {} error. video id: {}, start: {}, end: {}".format(video_path, idx, start_time, end_time))
except Exception as excep:
print("video path: {} error. video id: {}, start: {}, end: {}, Error: {}".format(video_path, idx, s, e, excep))
pass
# raise e
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
return video, video_mask
def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None):
# speed up video decode via decord.
video_mask = np.zeros((1, self.max_frames), dtype=np.long)
max_video_length = [0]
# T x 3 x H x W
# video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float)
video = np.zeros((len(s), self.max_frames, 1, 3,
self.image_resolution, self.image_resolution), dtype=np.float)
if s is None:
start_time, end_time = None, None
else:
start_time = int(s)
end_time = int(e)
start_time = start_time if start_time >= 0. else 0.
end_time = end_time if end_time >= 0. else 0.
if start_time > end_time:
start_time, end_time = end_time, start_time
elif start_time == end_time:
end_time = start_time + 1
video_path = self.video_dict[choice_video_ids]
try:
if video_path.startswith("s3://"):
video_bytes = client.get(video_path)
assert video_bytes is not None, "Get video failed from {}".format(video_path)
video_path = io.BytesIO(video_bytes)
vreader = VideoReader(video_path, ctx=cpu(0))
fps = vreader.get_avg_fps()
f_start = 0 if start_time is None else int(start_time * fps)
f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1))
num_frames = f_end - f_start + 1
if num_frames > 0:
# T x 3 x H x W
# sample_fps = int(self.video_framerate)
sample_fps = int(self.feature_framerate)
t_stride = int(round(float(fps) / sample_fps))
all_pos = list(range(f_start, f_end + 1, t_stride))
if len(all_pos) > self.max_frames:
sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)]
else:
sample_pos = all_pos
patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()]
patch_images = torch.stack([self.transform(img) for img in patch_images])
patch_images = patch_images.unsqueeze(1)
slice_len = patch_images.shape[0]
# max_video_length = max_video_length if max_video_length > slice_len else slice_len
max_video_length[0] = max_video_length[0] if max_video_length[0] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[0][:slice_len, ...] = patch_images
else:
print("Error. video id: {}".format(choice_video_ids))
except Exception as excep:
print("Error. video id: {}, start: {}, end: {}, Error: {}".format(choice_video_ids, s, e, excep))
print(video.shape)
pass
# raise e
v_length = max_video_length[0]
video_mask[0][:v_length] = [1] * v_length
# print(video.shape, video_mask.shape)
return video, video_mask
def __getitem__(self, feature_idx):
video_id, sub_id = self.iter2video_pairs_dict[feature_idx]
pairs_text, pairs_mask, pairs_segment, starts, ends = self._get_text(video_id, sub_id)
video, video_mask = self._get_rawvideo_dec(video_id, starts, ends)
# print(video.shape, video_mask.shape)
return pairs_text, pairs_mask, pairs_segment, video, video_mask
| InternVideo-main | Downstream/Video-Text-Retrieval/dataloaders/dataloader_didemo_retrieval.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
import io
from torch.utils.data import Dataset
import numpy as np
import pickle
from dataloaders.rawvideo_util import RawVideoExtractor
from decord import VideoReader, cpu
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode
try:
from petrel_client.client import Client
client = Client()
# Disable boto logger
import logging
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
except:
client = None
class MSVD_DataLoader(Dataset):
"""MSVD dataset loader."""
def __init__(
self,
subset,
data_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
image_resolution=224,
frame_order=0,
slice_framepos=0,
):
self.data_path = data_path
self.features_path = features_path
self.feature_framerate = feature_framerate
self.image_resolution = image_resolution
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
# 0: ordinary order; 1: reverse order; 2: random order.
self.frame_order = frame_order
assert self.frame_order in [0, 1, 2]
# 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.
self.slice_framepos = slice_framepos
assert self.slice_framepos in [0, 1, 2]
self.subset = subset
assert self.subset in ["train", "val", "test"]
video_id_path_dict = {}
video_id_path_dict["train"] = os.path.join(self.data_path, "train_list.txt")
video_id_path_dict["val"] = os.path.join(self.data_path, "val_list.txt")
video_id_path_dict["test"] = os.path.join(self.data_path, "test_list.txt")
caption_file = os.path.join(self.data_path, "raw-captions.pkl")
with open(video_id_path_dict[self.subset], 'r') as fp:
video_ids = [itm.strip() for itm in fp.readlines()]
with open(caption_file, 'rb') as f:
captions = pickle.load(f)
video_dict = {}
for video_file in client.list(self.features_path):
video_id_ = ".".join(video_file.split(".")[:-1])
if video_id_ not in video_ids:
continue
file_path_ = os.path.join(self.features_path, video_file)
video_dict[video_id_] = file_path_
self.video_dict = video_dict
self.sample_len = 0
self.sentences_dict = {}
self.cut_off_points = []
for video_id in video_ids:
assert video_id in captions
for cap in captions[video_id]:
cap_txt = " ".join(cap)
self.sentences_dict[len(self.sentences_dict)] = (video_id, cap_txt)
self.cut_off_points.append(len(self.sentences_dict))
## below variables are used to multi-sentences retrieval
# self.cut_off_points: used to tag the label when calculate the metric
# self.sentence_num: used to cut the sentence representation
# self.video_num: used to cut the video representation
self.multi_sentence_per_video = True # !!! important tag for eval
if self.subset == "val" or self.subset == "test":
self.sentence_num = len(self.sentences_dict)
self.video_num = len(video_ids)
assert len(self.cut_off_points) == self.video_num
print("For {}, sentence number: {}".format(self.subset, self.sentence_num))
print("For {}, video number: {}".format(self.subset, self.video_num))
print("Video number: {}".format(len(self.video_dict)))
print("Total Paire: {}".format(len(self.sentences_dict)))
self.sample_len = len(self.sentences_dict)
self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution)
self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>",
"MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"}
self.transform = Compose([
Resize(image_resolution, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_resolution),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __len__(self):
return self.sample_len
def _get_text(self, video_id, caption):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
words = self.tokenizer.tokenize(caption)
words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]]
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
return pairs_text, pairs_mask, pairs_segment, choice_video_ids
def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None):
# speed up video decode via decord.
# video_mask = np.zeros(self.max_frames, dtype=np.long)
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
# max_video_length = 0
max_video_length = [0] * len(choice_video_ids)
# T x 3 x H x W
# video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float)
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.image_resolution, self.image_resolution), dtype=np.float)
if s is None:
start_time, end_time = None, None
else:
start_time = int(s)
end_time = int(e)
start_time = start_time if start_time >= 0. else 0.
end_time = end_time if end_time >= 0. else 0.
if start_time > end_time:
start_time, end_time = end_time, start_time
elif start_time == end_time:
end_time = start_time + 1
# video_path = self.video_dict[video_id]
for i, video_id in enumerate(choice_video_ids):
video_path = os.path.join(self.features_path, "{}.avi".format(video_id))
if video_path.startswith("s3://"):
video_bytes = client.get(video_path)
assert video_bytes is not None, "Get video failed from {}".format(video_path)
video_path = io.BytesIO(video_bytes)
vreader = VideoReader(video_path, ctx=cpu(0))
fps = vreader.get_avg_fps()
f_start = 0 if start_time is None else int(start_time * fps)
f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1))
num_frames = f_end - f_start + 1
if num_frames > 0:
# T x 3 x H x W
# sample_fps = int(self.video_framerate)
sample_fps = int(self.feature_framerate)
t_stride = int(round(float(fps) / sample_fps))
all_pos = list(range(f_start, f_end + 1, t_stride))
if len(all_pos) > self.max_frames:
sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)]
else:
sample_pos = all_pos
patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()]
patch_images = torch.stack([self.transform(img) for img in patch_images])
patch_images = patch_images.unsqueeze(1)
slice_len = patch_images.shape[0]
# max_video_length = max_video_length if max_video_length > slice_len else slice_len
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = patch_images
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
# video_mask[:max_video_length] = [1] * max_video_length
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
#print(video.shape, video_mask.shape)
return video, video_mask
def _get_rawvideo(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
# Pair x L x T x 3 x H x W
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
video_path = self.video_dict[video_id]
raw_video_data = self.rawVideoExtractor.get_video_data(video_path)
raw_video_data = raw_video_data['video']
if len(raw_video_data.shape) > 3:
raw_video_data_clip = raw_video_data
# L x T x 3 x H x W
raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip)
if self.max_frames < raw_video_slice.shape[0]:
if self.slice_framepos == 0:
video_slice = raw_video_slice[:self.max_frames, ...]
elif self.slice_framepos == 1:
video_slice = raw_video_slice[-self.max_frames:, ...]
else:
sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int)
video_slice = raw_video_slice[sample_indx, ...]
else:
video_slice = raw_video_slice
video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order)
slice_len = video_slice.shape[0]
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = video_slice
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
return video, video_mask
def __getitem__(self, idx):
video_id, caption = self.sentences_dict[idx]
pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(video_id, caption)
# video, video_mask = self._get_rawvideo_dec(choice_video_ids)
video, video_mask = self._get_rawvideo(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask
| InternVideo-main | Downstream/Video-Text-Retrieval/dataloaders/dataloader_msvd_retrieval.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import io
import os
from torch.utils.data import Dataset
import numpy as np
import json
import math
from dataloaders.rawvideo_util import RawVideoExtractor
from decord import VideoReader, cpu
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode
try:
from petrel_client.client import Client
client = Client()
# Disable boto logger
import logging
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
except:
client = None
class ActivityNet_DataLoader(Dataset):
def __init__(
self,
subset,
data_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
image_resolution=224,
frame_order=0,
slice_framepos=0,
):
self.data_path = data_path
self.features_path = features_path
self.feature_framerate = feature_framerate
self.image_resolution = image_resolution
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
# 0: ordinary order; 1: reverse order; 2: random order.
self.frame_order = frame_order
assert self.frame_order in [0, 1, 2]
# 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.
self.slice_framepos = slice_framepos
assert self.slice_framepos in [0, 1, 2]
self.subset = subset
assert self.subset in ["train", "val"]
video_id_path_dict = {}
video_id_path_dict["train"] = os.path.join(self.data_path, "train_ids.json")
video_id_path_dict["val"] = os.path.join(self.data_path, "val_ids.json")
video_json_path_dict = {}
video_json_path_dict["train"] = os.path.join(self.data_path, "train.json")
video_json_path_dict["val"] = os.path.join(self.data_path, "val_1.json")
pseudo_video_id_list, video_id_list = self._get_video_id_single(video_id_path_dict[self.subset])
pseudo_caption_dict = self._get_captions_single(video_json_path_dict[self.subset])
# print("video id list: {}".format(len(video_id_list)))
# print("pseudo caption dict: {}".format(len(pseudo_caption_dict.keys())))
video_dict = {}
for content in client.list(self.features_path):
if content.endswith('/'):
video_cur_path = os.path.join(self.features_path, content)
video_files = client.list(video_cur_path)
for video_file in video_files:
video_id_ = ".".join(video_file.split(".")[:-1])
if video_id_ not in pseudo_video_id_list:
continue
file_path_ = os.path.join(video_cur_path, video_file)
video_dict[video_id_] = file_path_
self.video_dict = video_dict
# print("video dict: {}".format(len(video_dict)))
self.pseudo_video_id_list = pseudo_video_id_list
self.video_id_list = video_id_list
self.pseudo_caption_dict = pseudo_caption_dict
# Get iterator video ids
self.video_id2idx_dict = {pseudo_video_id: id for id, pseudo_video_id in enumerate(self.pseudo_video_id_list)}
# Get all captions
self.iter2video_pairs_dict = {}
for pseudo_video_id in self.pseudo_video_id_list:
if pseudo_video_id not in self.pseudo_caption_dict:
continue
caption = self.pseudo_caption_dict[pseudo_video_id]
n_caption = len(caption['start'])
for sub_id in range(n_caption):
self.iter2video_pairs_dict[len(self.iter2video_pairs_dict)] = (pseudo_video_id, sub_id)
# print(f"{subset} iter2video pairs dict: {len(self.iter2video_pairs_dict)}")
# print(f"{subset} pseudo caption dict: {len(pseudo_caption_dict)}")
self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution)
self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>",
"MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"}
self.transform = Compose([
Resize(image_resolution, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_resolution),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __len__(self):
return len(self.iter2video_pairs_dict)
def _get_video_id_from_pseduo(self, pseudo_video_id):
video_id = pseudo_video_id[2:]
return video_id
def _get_video_id_single(self, path):
pseudo_video_id_list = []
video_id_list = []
# print('Loading json: {}'.format(path))
with open(path, 'r') as f:
json_data = json.load(f)
for pseudo_video_id in json_data:
if pseudo_video_id in pseudo_video_id_list:
# print("reduplicate.")
pass
else:
video_id = self._get_video_id_from_pseduo(pseudo_video_id)
pseudo_video_id_list.append(pseudo_video_id)
video_id_list.append(video_id)
return pseudo_video_id_list, video_id_list
def _get_captions_single(self, path):
pseudo_caption_dict = {}
with open(path, 'r') as f:
json_data = json.load(f)
for pseudo_video_id, v_ in json_data.items():
pseudo_caption_dict[pseudo_video_id] = {}
duration = v_["duration"]
pseudo_caption_dict[pseudo_video_id]["start"] = np.array([0], dtype=object)
pseudo_caption_dict[pseudo_video_id]["end"] = np.array([int(math.ceil(float(duration)))], dtype=object)
pseudo_caption_dict[pseudo_video_id]["text"] = np.array([" ".join(v_["sentences"])], dtype=object)
return pseudo_caption_dict
def _get_text(self, pseudo_video_id, sub_id):
caption = self.pseudo_caption_dict[pseudo_video_id]
k = 1
r_ind = [sub_id]
starts = np.zeros(k, dtype=np.long)
ends = np.zeros(k, dtype=np.long)
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
for i in range(k):
ind = r_ind[i]
start_, end_ = caption['start'][ind], caption['end'][ind]
words = self.tokenizer.tokenize(caption['text'][ind])
starts[i], ends[i] = start_, end_
words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]]
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
return pairs_text, pairs_mask, pairs_segment, starts, ends
def _get_rawvideo(self, idx, s, e):
video_mask = np.zeros((len(s), self.max_frames), dtype=np.long)
max_video_length = [0] * len(s)
# Pair x L x T x 3 x H x W
video = np.zeros((len(s), self.max_frames, 1, 3,
self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float)
# video_path = self.video_dict[idx]
video_path = os.path.join(self.features_path, self.subset, "{}.mp4".format("v_"+idx))
try:
for i in range(len(s)):
start_time = int(s[i])
end_time = int(e[i])
start_time = start_time if start_time >= 0. else 0.
end_time = end_time if end_time >= 0. else 0.
if start_time > end_time:
start_time, end_time = end_time, start_time
elif start_time == end_time:
end_time = end_time + 1
# Should be optimized by gathering all asking of this video
raw_video_data = self.rawVideoExtractor.get_video_data(video_path, start_time, end_time)
raw_video_data = raw_video_data['video']
if len(raw_video_data.shape) > 3:
raw_video_data_clip = raw_video_data
# L x T x 3 x H x W
raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip)
if self.max_frames < raw_video_slice.shape[0]:
if self.slice_framepos == 0:
video_slice = raw_video_slice[:self.max_frames, ...]
elif self.slice_framepos == 1:
video_slice = raw_video_slice[-self.max_frames:, ...]
else:
sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int)
video_slice = raw_video_slice[sample_indx, ...]
else:
video_slice = raw_video_slice
video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order)
slice_len = video_slice.shape[0]
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = video_slice
else:
print("video path: {} error. video id: {}, start: {}, end: {}".format(video_path, idx, start_time, end_time))
except Exception as excep:
print("video path: {} error. video id: {}, start: {}, end: {}, Error: {}".format(video_path, idx, s, e, excep))
raise excep
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
return video, video_mask
def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None):
# speed up video decode via decord.
# video_mask = np.zeros(self.max_frames, dtype=np.long)
choice_video_ids = [choice_video_ids]
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
# max_video_length = 0
max_video_length = [0] * len(choice_video_ids)
# T x 3 x H x W
# video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float)
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.image_resolution, self.image_resolution), dtype=np.float)
if s is None:
start_time, end_time = None, None
else:
start_time = int(s)
end_time = int(e)
start_time = start_time if start_time >= 0. else 0.
end_time = end_time if end_time >= 0. else 0.
if start_time > end_time:
start_time, end_time = end_time, start_time
elif start_time == end_time:
end_time = start_time + 1
# video_path = self.video_dict[video_id]
for i, video_id in enumerate(choice_video_ids):
video_path = os.path.join(self.features_path, self.subset, "{}.mp4".format("v_"+video_id))
if video_path.startswith("s3://"):
video_bytes = client.get(video_path)
assert video_bytes is not None, "Get video failed from {}".format(video_path)
video_path = io.BytesIO(video_bytes)
vreader = VideoReader(video_path, ctx=cpu(0))
fps = vreader.get_avg_fps()
f_start = 0 if start_time is None else int(start_time * fps)
f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1))
num_frames = f_end - f_start + 1
if num_frames > 0:
# T x 3 x H x W
# sample_fps = int(self.video_framerate)
sample_fps = int(self.feature_framerate)
t_stride = int(round(float(fps) / sample_fps))
all_pos = list(range(f_start, f_end + 1, t_stride))
if len(all_pos) > self.max_frames:
sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)]
else:
sample_pos = all_pos
patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()]
patch_images = torch.stack([self.transform(img) for img in patch_images])
patch_images = patch_images.unsqueeze(1)
slice_len = patch_images.shape[0]
# max_video_length = max_video_length if max_video_length > slice_len else slice_len
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = patch_images
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
# video_mask[:max_video_length] = [1] * max_video_length
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# print(video.shape, video_mask.shape)
return video, video_mask
def __getitem__(self, feature_idx):
# print(f"cur idx: {feature_idx}")
pseudo_video_id, sub_id = self.iter2video_pairs_dict[feature_idx]
idx = self.video_id2idx_dict[pseudo_video_id]
pairs_text, pairs_mask, pairs_segment, starts, ends = self._get_text(pseudo_video_id, sub_id)
# video, video_mask = self._get_rawvideo_dec(self.video_id_list, starts, ends)
video, video_mask = self._get_rawvideo(self.video_id_list[idx], starts, ends)
return pairs_text, pairs_mask, pairs_segment, video, video_mask
| InternVideo-main | Downstream/Video-Text-Retrieval/dataloaders/dataloader_activitynet_retrieval.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from torch.utils.data import Dataset
import numpy as np
import json
import math
from dataloaders.rawvideo_util import RawVideoExtractor
import io
from decord import VideoReader, cpu
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode
try:
from petrel_client.client import Client
client = Client()
# Disable boto logger
import logging
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
except:
client = None
class LSMDC_DataLoader(Dataset):
"""LSMDC dataset loader."""
def __init__(
self,
subset,
data_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
image_resolution=224,
frame_order=0,
slice_framepos=0,
):
self.data_path = data_path
self.features_path = features_path
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.image_resolution = image_resolution
self.tokenizer = tokenizer
# 0: ordinary order; 1: reverse order; 2: random order.
self.frame_order = frame_order
assert self.frame_order in [0, 1, 2]
# 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.
self.slice_framepos = slice_framepos
assert self.slice_framepos in [0, 1, 2]
self.subset = subset
assert self.subset in ["train", "val", "test"]
video_json_path_dict = {}
video_json_path_dict["train"] = os.path.join(self.data_path, "LSMDC16_annos_training.csv")
video_json_path_dict["val"] = os.path.join(self.data_path, "LSMDC16_annos_val.csv")
video_json_path_dict["test"] = os.path.join(self.data_path, "LSMDC16_challenge_1000_publictect.csv")
# <CLIP_ID>\t<START_ALIGNED>\t<END_ALIGNED>\t<START_EXTRACTED>\t<END_EXTRACTED>\t<SENTENCE>
# <CLIP_ID> is not a unique identifier, i.e. the same <CLIP_ID> can be associated with multiple sentences.
# However, LSMDC16_challenge_1000_publictect.csv has no repeat instances
video_id_list = []
caption_dict = {}
with open(video_json_path_dict[self.subset], 'r') as fp:
for line in fp:
line = line.strip()
line_split = line.split("\t")
assert len(line_split) == 6
clip_id, start_aligned, end_aligned, start_extracted, end_extracted, sentence = line_split
caption_dict[len(caption_dict)] = (clip_id, sentence)
if clip_id not in video_id_list: video_id_list.append(clip_id)
if subset=='train':
video_id_list = video_id_list[:58200] + video_id_list[58300:]
video_dict = {}
for content in client.list(self.features_path):
if content.endswith('/'):
video_cur_path = os.path.join(self.features_path, content)
video_files = client.list(video_cur_path)
for video_file in video_files:
video_id_ = ".".join(video_file.split(".")[:-1])
if video_id_ not in video_id_list:
continue
file_path_ = os.path.join(video_cur_path, video_file)
video_dict[video_id_] = file_path_
self.video_dict = video_dict
# Get all captions
self.iter2video_pairs_dict = {}
for clip_id, sentence in caption_dict.values():
if clip_id not in self.video_dict:
continue
self.iter2video_pairs_dict[len(self.iter2video_pairs_dict)] = (clip_id, sentence)
self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution)
self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>",
"MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"}
self.transform = Compose([
Resize(image_resolution, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_resolution),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __len__(self):
return len(self.iter2video_pairs_dict)
def _get_video_id_from_pseduo(self, pseudo_video_id):
video_id = pseudo_video_id[2:]
return video_id
def _get_video_id_single(self, path):
pseudo_video_id_list = []
video_id_list = []
print('Loading json: {}'.format(path))
with open(path, 'r') as f:
json_data = json.load(f)
for pseudo_video_id in json_data:
if pseudo_video_id in pseudo_video_id_list:
print("reduplicate.")
else:
video_id = self._get_video_id_from_pseduo(pseudo_video_id)
pseudo_video_id_list.append(pseudo_video_id)
video_id_list.append(video_id)
return pseudo_video_id_list, video_id_list
def _get_captions_single(self, path):
pseudo_caption_dict = {}
with open(path, 'r') as f:
json_data = json.load(f)
for pseudo_video_id, v_ in json_data.items():
pseudo_caption_dict[pseudo_video_id] = {}
timestamps = v_["timestamps"]
pseudo_caption_dict[pseudo_video_id]["start"] = \
np.array([int(math.floor(float(itm[0]))) for itm in timestamps], dtype=object)
pseudo_caption_dict[pseudo_video_id]["end"] = \
np.array([int(math.ceil(float(itm[1]))) for itm in timestamps], dtype=object)
pseudo_caption_dict[pseudo_video_id]["text"] = np.array(v_["sentences"], dtype=object)
return pseudo_caption_dict
def _get_text(self, video_id, caption):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
words = self.tokenizer.tokenize(caption)
words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]]
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
return pairs_text, pairs_mask, pairs_segment, choice_video_ids
def _get_rawvideo(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
# Pair x L x T x 3 x H x W
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float)
try:
for i, video_id in enumerate(choice_video_ids):
video_path = self.video_dict[video_id]
raw_video_data = self.rawVideoExtractor.get_video_data(video_path)
raw_video_data = raw_video_data['video']
if len(raw_video_data.shape) > 3:
raw_video_data_clip = raw_video_data
# L x T x 3 x H x W
raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip)
if self.max_frames < raw_video_slice.shape[0]:
if self.slice_framepos == 0:
video_slice = raw_video_slice[:self.max_frames, ...]
elif self.slice_framepos == 1:
video_slice = raw_video_slice[-self.max_frames:, ...]
else:
sample_indx = np.linspace(0, raw_video_slice.shape[0]-1, num=self.max_frames, dtype=int)
video_slice = raw_video_slice[sample_indx, ...]
else:
video_slice = raw_video_slice
video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order)
slice_len = video_slice.shape[0]
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = video_slice
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
except Exception as excep:
print("Video ids: {}".format(choice_video_ids))
raise excep
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
return video, video_mask
def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None):
# speed up video decode via decord.
# video_mask = np.zeros(self.max_frames, dtype=np.long)
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
# max_video_length = 0
max_video_length = [0] * len(choice_video_ids)
# T x 3 x H x W
# video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float)
video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3,
self.image_resolution, self.image_resolution), dtype=np.float)
if s is None:
start_time, end_time = None, None
else:
start_time = int(s)
end_time = int(e)
start_time = start_time if start_time >= 0. else 0.
end_time = end_time if end_time >= 0. else 0.
if start_time > end_time:
start_time, end_time = end_time, start_time
elif start_time == end_time:
end_time = start_time + 1
for i, video_id in enumerate(choice_video_ids):
video_path = self.video_dict[video_id]
# video_path = os.path.join(self.features_path, "{}.avi".format(video_id))
if video_path.startswith("s3://"):
video_bytes = client.get(video_path)
assert video_bytes is not None, "Get video failed from {}".format(video_path)
video_path = io.BytesIO(video_bytes)
vreader = VideoReader(video_path, ctx=cpu(0))
fps = vreader.get_avg_fps()
f_start = 0 if start_time is None else int(start_time * fps)
f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1))
num_frames = f_end - f_start + 1
if num_frames > 0:
# T x 3 x H x W
# sample_fps = int(self.video_framerate)
sample_fps = int(self.feature_framerate)
t_stride = int(round(float(fps) / sample_fps))
all_pos = list(range(f_start, f_end + 1, t_stride))
if len(all_pos) > self.max_frames:
sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)]
else:
sample_pos = all_pos
patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()]
patch_images = torch.stack([self.transform(img) for img in patch_images])
patch_images = patch_images.unsqueeze(1)
slice_len = patch_images.shape[0]
# max_video_length = max_video_length if max_video_length > slice_len else slice_len
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len
if slice_len < 1:
pass
else:
video[i][:slice_len, ...] = patch_images
else:
print("video path: {} error. video id: {}".format(video_path, video_id))
# video_mask[:max_video_length] = [1] * max_video_length
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
#print(video.shape, video_mask.shape)
return video, video_mask
def __getitem__(self, feature_idx):
clip_id, sentence = self.iter2video_pairs_dict[feature_idx]
pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(clip_id, sentence)
video, video_mask = self._get_rawvideo_dec(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask
| InternVideo-main | Downstream/Video-Text-Retrieval/dataloaders/dataloader_lsmdc_retrieval.py |
"""
Used to compress video in: https://github.com/ArrowLuo/CLIP4Clip
Author: ArrowLuo
"""
import os
import argparse
import ffmpeg
import subprocess
import time
import multiprocessing
from multiprocessing import Pool
import shutil
try:
from psutil import cpu_count
except:
from multiprocessing import cpu_count
# multiprocessing.freeze_support()
def compress(paras):
input_video_path, output_video_path = paras
try:
command = ['ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-i', input_video_path,
'-filter:v',
'scale=\'if(gt(a,1),trunc(oh*a/2)*2,224)\':\'if(gt(a,1),224,trunc(ow*a/2)*2)\'', # scale to 224
'-map', '0:v',
'-r', '3', # frames per second
output_video_path,
]
ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ffmpeg.communicate()
retcode = ffmpeg.poll()
# print something above for debug
except Exception as e:
raise e
def prepare_input_output_pairs(input_root, output_root):
input_video_path_list = []
output_video_path_list = []
for root, dirs, files in os.walk(input_root):
for file_name in files:
input_video_path = os.path.join(root, file_name)
output_video_path = os.path.join(output_root, file_name)
if os.path.exists(output_video_path) and os.path.getsize(output_video_path) > 0:
pass
else:
input_video_path_list.append(input_video_path)
output_video_path_list.append(output_video_path)
return input_video_path_list, output_video_path_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Compress video for speed-up')
parser.add_argument('--input_root', type=str, help='input root')
parser.add_argument('--output_root', type=str, help='output root')
args = parser.parse_args()
input_root = args.input_root
output_root = args.output_root
assert input_root != output_root
if not os.path.exists(output_root):
os.makedirs(output_root, exist_ok=True)
input_video_path_list, output_video_path_list = prepare_input_output_pairs(input_root, output_root)
print("Total video need to process: {}".format(len(input_video_path_list)))
num_works = cpu_count()
print("Begin with {}-core logical processor.".format(num_works))
pool = Pool(num_works)
data_dict_list = pool.map(compress,
[(input_video_path, output_video_path) for
input_video_path, output_video_path in
zip(input_video_path_list, output_video_path_list)])
pool.close()
pool.join()
print("Compress finished, wait for checking files...")
for input_video_path, output_video_path in zip(input_video_path_list, output_video_path_list):
if os.path.exists(input_video_path):
if os.path.exists(output_video_path) is False or os.path.getsize(output_video_path) < 1.:
shutil.copyfile(input_video_path, output_video_path)
print("Copy and replace file: {}".format(output_video_path)) | InternVideo-main | Downstream/Video-Text-Retrieval/preprocess/compress_video.py |
"""
Adapted from: https://github.com/openai/CLIP/blob/main/clip/clip.py
"""
from collections import OrderedDict
from typing import Tuple, Union
import hashlib
import os
import urllib
import warnings
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torch import nn
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
}
_PT_NAME = {
"RN50": "RN50.pt",
"RN101": "RN101.pt",
"RN50x4": "RN50x4.pt",
"RN50x16": "RN50x16.pt",
"ViT-B/32": "ViT-B-32.pt",
"ViT-B/16": "ViT-B-16.pt",
"ViT-L/14": "ViT-L-14.pt",
}
from ipdb import set_trace
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def available_models():
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
# =============================
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask=None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
attn_mask_ = self.attn_mask
if self.attn_mask is not None and hasattr(self.attn_mask, '__call__'):
attn_mask_ = self.attn_mask(x.size(0)) # LND
attn_mask_ = attn_mask_.to(dtype=x.dtype, device=x.device) if attn_mask_ is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0]
def forward(self, x_tuple:tuple):
x, video_frame = x_tuple
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return (x, video_frame)
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor, video_frame=-1):
return self.resblocks((x, video_frame))[0]
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int,
linear_patch: str = '2d',):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
# For 3D
assert linear_patch in ['2d', '3d']
self.linear_patch = linear_patch
if self.linear_patch == '3d':
self.conv2 = nn.Conv3d(in_channels=3, out_channels=width, kernel_size=(3, patch_size, patch_size),
stride=(1, patch_size, patch_size), padding=(1, 0, 0), bias=False)
def forward(self, x: torch.Tensor, video_frame=-1):
if self.linear_patch == '3d':
assert video_frame != -1
x_3d = x.reshape(-1, video_frame, x.shape[-3], x.shape[-2], x.shape[-1])
x_3d = x_3d.permute(0, 2, 1, 3, 4)
x_3d = self.conv2(x_3d) # shape = [*, width, frame, grid, grid]
x_3d = x_3d.permute(0, 2, 1, 3, 4) # shape = [*, frame, width, grid, grid]
x = x_3d.reshape(-1, x_3d.shape[-3], x_3d.shape[-2], x_3d.shape[-1]).contiguous() # shape = [*, width, grid, grid]
else:
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, video_frame=video_frame)
x = x.permute(1, 0, 2) # LND -> NLD
# Move the three lines below to `encode_image` for entire hidden sequence
# x = self.ln_post(x[:, 0, :])
# if self.proj is not None:
# x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
# vision linear of patch
linear_patch: str = '2d',
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
linear_patch=linear_patch
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
@staticmethod
def get_config(pretrained_clip_name="ViT-B/32"):
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "ViT-B-32.pt")
if pretrained_clip_name in _MODELS and pretrained_clip_name in _PT_NAME:
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), _PT_NAME[pretrained_clip_name])
if pretrained_clip_name in ["ViT-B/32", "ViT-B/16"] and os.path.exists(model_path):
pass
else:
if pretrained_clip_name in _MODELS:
model_path = _download(_MODELS[pretrained_clip_name])
elif os.path.isfile(pretrained_clip_name):
model_path = pretrained_clip_name
else:
raise RuntimeError(f"Model {pretrained_clip_name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location="cpu").eval()
state_dict = model.state_dict()
except RuntimeError:
state_dict = torch.load(model_path, map_location="cpu")
return state_dict
def build_attention_mask(self, context_length):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.zeros(context_length, context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image, return_hidden=False, video_frame=-1):
hidden = self.visual(image.type(self.dtype), video_frame=video_frame)
hidden = self.visual.ln_post(hidden) @ self.visual.proj
x = hidden[:, 0, :]
if return_hidden:
return x, hidden
return x
def encode_text(self, text, return_hidden=False):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
pos_emd = self.positional_embedding[:x.size(1), :].type(self.dtype)
x = x + pos_emd
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
hidden = self.ln_final(x).type(self.dtype) @ self.text_projection
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = hidden[torch.arange(hidden.shape[0]), text.argmax(dim=-1)]
if return_hidden:
return x, hidden
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/module_clip.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
from torch import nn
import torch.nn.functional as F
from modules.until_module import PreTrainedModel, AllGather, CrossEn
from modules.module_cross import CrossModel, CrossConfig, Transformer as TransformerClip
from modules.module_clip import CLIP, convert_weights
from modules import clip_evl
from modules import clip_kc
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from ipdb import set_trace
logger = logging.getLogger(__name__)
allgather = AllGather.apply
class CLIP4ClipPreTrainedModel(PreTrainedModel, nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, cross_config, *inputs, **kwargs):
super(CLIP4ClipPreTrainedModel, self).__init__(cross_config)
self.cross_config = cross_config
self.clip = None
self.cross = None
@classmethod
def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
if state_dict is None: state_dict = {}
pretrained_clip_name = "ViT-B/32"
if hasattr(task_config, 'pretrained_clip_name'):
pretrained_clip_name = task_config.pretrained_clip_name
clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name)
# set_trace()
############# add ################
clip_state_dict = clip_state_dict['state_dict'] if 'state_dict' in clip_state_dict else clip_state_dict
for key, val in clip_state_dict.items():
if key not in state_dict:
state_dict[key] = val.clone()
new_key = key.replace('clip.', '')
if new_key not in state_dict:
state_dict[new_key] = val.clone()
############## add ####################
for key, val in clip_state_dict.items():
new_key = "clip." + key
if new_key not in state_dict:
state_dict[new_key] = val.clone()
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
model = cls(cross_config, clip_state_dict, *inputs, **kwargs)
## ===> Initialization trick [HARD CODE]
if model.linear_patch == "3d":
contain_conv2 = False
for key in state_dict.keys():
if key.find("visual.conv2.weight") > -1:
contain_conv2 = True
break
if contain_conv2 is False and hasattr(model.clip.visual, "conv2"):
cp_weight = state_dict["clip.visual.conv1.weight"].clone()
kernel_size = model.clip.visual.conv2.weight.size(2)
conv2_size = model.clip.visual.conv2.weight.size()
conv2_size = list(conv2_size)
left_conv2_size = conv2_size.copy()
right_conv2_size = conv2_size.copy()
left_conv2_size[2] = (kernel_size - 1) // 2
right_conv2_size[2] = kernel_size - 1 - left_conv2_size[2]
left_zeros, right_zeros = None, None
if left_conv2_size[2] > 0:
left_zeros = torch.zeros(*tuple(left_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
if right_conv2_size[2] > 0:
right_zeros = torch.zeros(*tuple(right_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
cat_list = []
if left_zeros != None: cat_list.append(left_zeros)
cat_list.append(cp_weight.unsqueeze(2))
if right_zeros != None: cat_list.append(right_zeros)
cp_weight = torch.cat(cat_list, dim=2)
state_dict["clip.visual.conv2.weight"] = cp_weight
if model.sim_header == 'tightTransf':
contain_cross = False
for key in state_dict.keys():
if key.find("cross.transformer") > -1:
contain_cross = True
break
if contain_cross is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["cross.embeddings.position_embeddings.weight"] = val.clone()
continue
if key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict["cross."+key] = val.clone()
continue
if model.sim_header == "seqLSTM" or model.sim_header == "seqTransf":
contain_frame_position = False
for key in state_dict.keys():
if key.find("frame_position_embeddings") > -1:
contain_frame_position = True
break
if contain_frame_position is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["frame_position_embeddings.weight"] = val.clone()
continue
if model.sim_header == "seqTransf" and key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict[key.replace("transformer.", "transformerClip.")] = val.clone()
continue
## <=== End of initialization trick
if state_dict is not None:
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
class dual_softmax_loss(nn.Module):
def __init__(self,):
super(dual_softmax_loss, self).__init__()
def forward(self, sim_matrix, temp=1000):
sim_matrix = sim_matrix * F.softmax(sim_matrix/temp, dim=0)*len(sim_matrix) #With an appropriate temperature parameter, the model achieves higher performance
logpt = F.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
loss = -logpt
return loss
class CLIP4Clip(CLIP4ClipPreTrainedModel):
def __init__(self, cross_config, clip_state_dict, task_config):
super(CLIP4Clip, self).__init__(cross_config)
self.task_config = task_config
self.ignore_video_index = -1
assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
self._stage_one = True
self._stage_two = False
show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two))
self.loose_type = False
if self._stage_one and check_attr('loose_type', self.task_config):
self.loose_type = True
show_log(task_config, "Test retrieval by loose type.")
# CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===>
##############add###################
if 'clip.visual.proj' in clip_state_dict:
new_dict = {}
for k, v in clip_state_dict.items():
new_k = k.replace('clip.', '')
new_dict[new_k] = v.clone()
clip_state_dict = new_dict
##############add###################
vit = "visual.proj" in clip_state_dict
assert vit
if vit:
vision_width = clip_state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = clip_state_dict["token_embedding.weight"].shape[0]
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks")))
show_log(task_config, "\t embed_dim: {}".format(embed_dim))
show_log(task_config, "\t image_resolution: {}".format(image_resolution))
show_log(task_config, "\t vision_layers: {}".format(vision_layers))
show_log(task_config, "\t vision_width: {}".format(vision_width))
show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size))
show_log(task_config, "\t context_length: {}".format(context_length))
show_log(task_config, "\t vocab_size: {}".format(vocab_size))
show_log(task_config, "\t transformer_width: {}".format(transformer_width))
show_log(task_config, "\t transformer_heads: {}".format(transformer_heads))
show_log(task_config, "\t transformer_layers: {}".format(transformer_layers))
self.linear_patch = '2d'
if hasattr(task_config, "linear_patch"):
self.linear_patch = task_config.linear_patch
show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch))
# use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40
cut_top_layer = 0
show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer))
#set_trace()
if hasattr(task_config, "clip_evl") and task_config.clip_evl == True:
ckpt_path = '/mnt/cache/share_data/liyizhuo.vendor/projects/all-in-one/outputs/outputs_k400/models/clip_evl_contrastive_mlm_webvid_howto_k400_lr1e5/last.ckpt'
# ckpt_path = '/mnt/cache/share_data/liyizhuo.vendor/projects/all-in-one/outputs/outputs_k400/models/clip_evl_contrastive_webvid_howto_k400_lr1e5/last.ckpt'
self.clip, _ = clip_evl.load(ckpt_path, t_size=8)
#set_trace()
self.clip = self.clip.float()
self.clip_evl = True
#set_trace()
elif hasattr(task_config, "clip_kc") and task_config.clip_kc == True:
# ckpt_path = '/mnt/lustre/share_data/liyizhuo/projects/all-in-one/outputs/outputs_msrvtt_choice/models/clip_kc_contrastive_mlm_webvid_howto_msrvtt_choice_lr5e6_bz256_cosine_step25k_sanity/epoch=0-step=359.ckpt'
ckpt_path = '/mnt/lustre/share_data/liyizhuo.vendor/projects/all-in-one/outputs/outputs_msrvtt_choice/models/clip_kc_contrastive_webvid_howto_clip_lm_01_msrvtt_choice_lr5e6_bz1024_cosine_step100k_freeze_text_ced00/epoch=13-step=47028.ckpt'
self.clip, _ = clip_kc.load(ckpt_path)
self.clip = self.clip.float()
self.clip_evl = True ### also use temporal
else:
self.clip_evl = False
self.clip = CLIP(
embed_dim,
image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer,
linear_patch=self.linear_patch
).float()
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_state_dict:
del clip_state_dict[key]
###################### Note this change ###############################
convert_weights(self.clip)
#######################################################################
# <=== End of CLIP Encoders
self.sim_header = 'meanP'
if hasattr(task_config, "sim_header"):
self.sim_header = task_config.sim_header
show_log(task_config, "\t sim_header: {}".format(self.sim_header))
if self.sim_header == "tightTransf": assert self.loose_type is False
cross_config.max_position_embeddings = context_length
if self.loose_type is False:
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config, "cross_num_hidden_layers")
self.cross = CrossModel(cross_config)
# <=== End of Cross Encoder
self.similarity_dense = nn.Linear(cross_config.hidden_size, 1)
if self.sim_header == "seqLSTM" or self.sim_header == "seqTransf":
self.frame_position_embeddings = nn.Embedding(cross_config.max_position_embeddings, cross_config.hidden_size)
if self.sim_header == "seqTransf":
self.transformerClip = TransformerClip(width=transformer_width, layers=self.task_config.cross_num_hidden_layers,
heads=transformer_heads, )
if self.sim_header == "seqLSTM":
self.lstm_visual = nn.LSTM(input_size=cross_config.hidden_size, hidden_size=cross_config.hidden_size,
batch_first=True, bidirectional=False, num_layers=1)
######## add wti #########
self.config.interaction = 'wti'
#self.config.interaction = 'no'
self.config.wti_arch = 2
self.config.cdcr = 0
self.config.dsl = False
if self.config.cdcr:
self.config.cdcr_alpha1 = 0.16
self.config.cdcr_alpha2 = 0
self.config.cdcr_lambda = 0.001
if self.config.interaction == 'wti' or self.config.interaction == 'ti':
if self.config.wti_arch == 1:
self.text_weight_fc = nn.Linear(transformer_width, 1)
self.video_weight_fc = nn.Linear(transformer_width, 1)
elif self.config.wti_arch == 2:
self.text_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.video_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
elif self.config.wti_arch == 3:
self.text_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.video_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.loss_fct = CrossEn()
self.loss_dsl = dual_softmax_loss()
############## this is important, has to be deactivated for clipevl ##############
if not self.clip_evl:
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None):
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
# T x 3 x H x W
# set_trace()
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
#set_trace()
sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask,
video, video_mask, shaped=True, video_frame=video_frame)
#set_trace()
if self.training:
loss = 0.
cdcr_loss = 0.
if self.wti_interaction != 'no' and self.config.cdcr:
sim_matrix, _, cdcr_loss = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
shaped=True, loose_type=self.loose_type)
else:
sim_matrix, *_tmp = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
shaped=True, loose_type=self.loose_type)
#sim_matrix = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
# shaped=True, loose_type=self.loose_type)
#set_trace()
sim_loss1 = self.loss_fct(sim_matrix)
sim_loss2 = self.loss_fct(sim_matrix.T)
sim_loss = (sim_loss1 + sim_loss2) / 2
loss += sim_loss
if self.config.dsl:
dsl_loss1 = self.loss_dsl(sim_matrix).mean()
dsl_loss2 = self.loss_dsl(sim_matrix.T).mean()
loss += (dsl_loss1 + dsl_loss2) / 2
if self.config.cdcr:
loss += self.config.cdcr_lambda * cdcr_loss
return loss
else:
return None
def get_sequence_output(self, input_ids, token_type_ids, attention_mask, shaped=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
bs_pair = input_ids.size(0)
### for naive
# sequence_hidden = self.clip.encode_text(input_ids).float()
### for wti
if self.config.interaction == 'wti' or self.config.interaction == 'ti':
if self.clip_evl:
sequence_hidden = self.clip.encode_text(input_ids, return_all_feats=True)[1].float()
else:
sequence_hidden = self.clip.encode_text(input_ids, return_hidden=True)[1].float()
else:
#set_trace()
sequence_hidden = self.clip.encode_text(input_ids).float()
#set_trace()
sequence_hidden = sequence_hidden.view(bs_pair, -1, sequence_hidden.size(-1))
return sequence_hidden
def get_visual_output(self, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
bs_pair = video_mask.size(0)
#set_trace()
if self.clip_evl:
visual_hidden = self.clip.encode_video(video).float()
else:
visual_hidden = self.clip.encode_image(video, video_frame=video_frame).float()
visual_hidden = visual_hidden.view(bs_pair, -1, visual_hidden.size(-1))
#set_trace()
return visual_hidden
def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
sequence_output = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True)
visual_output = self.get_visual_output(video, video_mask, shaped=True, video_frame=video_frame)
return sequence_output, visual_output
def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask):
concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames
concat_mask = torch.cat((attention_mask, video_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(video_mask)
concat_type = torch.cat((text_type_, video_type_), dim=1)
cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True)
cross_output = cross_layers[-1]
return cross_output, pooled_output, concat_mask
def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
return text_out
def _mean_pooling_for_similarity_visual(self, visual_output, video_mask,):
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return video_out
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,):
text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask)
video_out = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
return text_out, video_out
def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, sim_header="meanP"):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
if sim_header == "meanP":
# Default: Parameter-free type
pass
elif sim_header == "seqLSTM":
# Sequential type: LSTM
visual_output_original = visual_output
visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(),
batch_first=True, enforce_sorted=False)
visual_output, _ = self.lstm_visual(visual_output)
if self.training: self.lstm_visual.flatten_parameters()
visual_output, _ = pad_packed_sequence(visual_output, batch_first=True)
visual_output = torch.cat((visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1)
visual_output = visual_output + visual_output_original
elif sim_header == "seqTransf":
# Sequential type: Transformer Encoder
visual_output_original = visual_output
seq_length = visual_output.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device)
position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1)
frame_position_embeddings = self.frame_position_embeddings(position_ids)
visual_output = visual_output + frame_position_embeddings
extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0
extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1)
visual_output = visual_output.permute(1, 0, 2) # NLD -> LND
visual_output = self.transformerClip(visual_output, extended_video_mask)
visual_output = visual_output.permute(1, 0, 2) # LND -> NLD
visual_output = visual_output + visual_output_original
if self.training:
visual_output = allgather(visual_output, self.task_config)
video_mask = allgather(video_mask, self.task_config)
sequence_output = allgather(sequence_output, self.task_config)
torch.distributed.barrier()
visual_output = visual_output.squeeze(1)
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
#### we do not need to mean pooling over frames for evl and kc
#set_trace()
# if not self.clip_evl:
# visual_output = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
# visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
# else:
# visual_output = visual_output.squeeze(1)
#set_trace()
sequence_output = sequence_output.squeeze(1)
sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True)
logit_scale = self.clip.logit_scale.exp()
# if self.config.dsl:
# # temp = 1 is best for zero-shot inference
# temp = 1000
# retrieve_logits = torch.matmul(sequence_output, visual_output.t())
# retrieve_logits = retrieve_logits * F.softmax(retrieve_logits/temp, dim=0) * len(retrieve_logits)
# # retrieve_logits = retrieve_logits * F.softmax(retrieve_logits/temp, dim=1) * len(retrieve_logits)
# # retrieve_logits = F.softmax(retrieve_logits/temp, dim=1) * F.softmax(retrieve_logits/temp, dim=0) * len(retrieve_logits) * len(retrieve_logits)
# retrieve_logits = logit_scale * retrieve_logits
# else:
# retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t())
retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t())
return retrieve_logits
def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
b_text, s_text, h_text = sequence_output.size()
b_visual, s_visual, h_visual = visual_output.size()
retrieve_logits_list = []
step_size = b_text # set smaller to reduce memory cost
split_size = [step_size] * (b_text // step_size)
release_size = b_text - sum(split_size)
if release_size > 0:
split_size += [release_size]
# due to clip text branch retrun the last hidden
attention_mask = torch.ones(sequence_output.size(0), 1)\
.to(device=attention_mask.device, dtype=attention_mask.dtype)
sequence_output_splits = torch.split(sequence_output, split_size, dim=0)
attention_mask_splits = torch.split(attention_mask, split_size, dim=0)
for i in range(len(split_size)):
sequence_output_row = sequence_output_splits[i]
attention_mask_row = attention_mask_splits[i]
sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1)
sequence_output_l = sequence_output_l.view(-1, s_text, h_text)
attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1)
attention_mask_l = attention_mask_l.view(-1, s_text)
step_truth = sequence_output_row.size(0)
visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1)
visual_output_r = visual_output_r.view(-1, s_visual, h_visual)
video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1)
video_mask_r = video_mask_r.view(-1, s_visual)
cross_output, pooled_output, concat_mask = \
self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r)
retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual)
retrieve_logits_list.append(retrieve_logits_row)
retrieve_logits = torch.cat(retrieve_logits_list, dim=0)
return retrieve_logits
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, loose_type=False, use_dsl=True):
if shaped is False:
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
contrastive_direction = ()
### add wti ###
if self.config.interaction == 'wti' or self.config.interaction == 'ti':
if self.config.cdcr == 0:
retrieve_logits, _, _ = self.wti_interaction(sequence_output, visual_output, attention_mask, video_mask)
return retrieve_logits, contrastive_direction
# return retrieve_logits
else:
retrieve_logits, _, cdcr_loss = self.wti_interaction(sequence_output, visual_output, attention_mask, video_mask)
return retrieve_logits, contrastive_direction, cdcr_loss
#set_trace()
################
if loose_type:
assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"]
retrieve_logits = self._loose_similarity(sequence_output, visual_output, attention_mask, video_mask, sim_header=self.sim_header, use_dsl=use_dsl)
else:
assert self.sim_header in ["tightTransf"]
retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask, )
#set_trace()
return retrieve_logits, contrastive_direction
# return retrieve_logits
def wti_interaction(self, text_feat, video_feat, text_mask, video_mask):
#set_trace()
text_feat, video_feat = text_feat.contiguous(), video_feat.contiguous()
if self.training and torch.cuda.is_available(): # batch merge here
# text_feat = allgather(text_feat, self.config)
# video_feat = allgather(video_feat, self.config)
# text_mask = allgather(text_mask, self.config)
# video_mask = allgather(video_mask, self.config)
text_feat = allgather(text_feat, self.task_config)
video_feat = allgather(video_feat, self.task_config)
text_mask = allgather(text_mask, self.task_config)
video_mask = allgather(video_mask, self.task_config)
torch.distributed.barrier() # force sync
if self.config.interaction == 'wti':
text_weight = self.text_weight_fc(text_feat).squeeze(2) # B x N_t x D -> B x N_t
text_weight.masked_fill_(torch.tensor((1 - text_mask), dtype=torch.bool), float("-inf"))
text_weight = torch.softmax(text_weight, dim=-1) # B x N_t
video_weight = self.video_weight_fc(video_feat).squeeze(2) # B x N_v x D -> B x N_v
video_weight.masked_fill_(torch.tensor((1 - video_mask), dtype=torch.bool), float("-inf"))
video_weight = torch.softmax(video_weight, dim=-1) # B x N_v
text_feat = text_feat / text_feat.norm(dim=-1, keepdim=True)
video_feat = video_feat / video_feat.norm(dim=-1, keepdim=True)
retrieve_logits = torch.einsum('atd,bvd->abtv', [text_feat, video_feat])
retrieve_logits = torch.einsum('abtv,at->abtv', [retrieve_logits, text_mask])
retrieve_logits = torch.einsum('abtv,bv->abtv', [retrieve_logits, video_mask])
text_sum = text_mask.sum(-1)
video_sum = video_mask.sum(-1)
# max for video token
if self.config.interaction == 'ti': # token-wise interaction
t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt
v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv
t2v_logits = torch.sum(t2v_logits, dim=2) / (text_sum.unsqueeze(1))
v2t_logits = torch.sum(v2t_logits, dim=2) / (video_sum.unsqueeze(0))
retrieve_logits = (t2v_logits + v2t_logits) / 2.0
elif self.config.interaction == 'wti': # weighted token-wise interaction
t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt
t2v_logits = torch.einsum('abt,at->ab', [t2v_logits, text_weight])
v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv
v2t_logits = torch.einsum('abv,bv->ab', [v2t_logits, video_weight])
retrieve_logits = (t2v_logits + v2t_logits) / 2.0
if self.training:
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * retrieve_logits
if self.config.cdcr == 1:
# simple random
_text_feat = text_feat[torch.arange(text_feat.shape[0]),
torch.randint_like(text_sum, 0, 10000) % text_sum, :]
_video_feat = video_feat[torch.arange(video_feat.shape[0]),
torch.randint_like(video_sum, 0, 10000) % video_sum, :]
z_a_norm = (_text_feat - _text_feat.mean(0)) / _text_feat.std(0) # NxN_sxD
z_b_norm = (_video_feat - _video_feat.mean(0)) / _video_feat.std(0) # NxN_txD
# cross-correlation matrix
B, D = z_a_norm.shape
c = torch.einsum('ac,ad->cd', z_a_norm, z_b_norm) / B # DxD
# loss
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum()
cdcr_loss = (on_diag * self.config.cdcr_alpha1 + off_diag * self.config.cdcr_alpha2)
return retrieve_logits, retrieve_logits.T, cdcr_loss
elif self.config.cdcr == 2:
# selecet max
max_idx1 = max_idx1[torch.arange(max_idx1.shape[0]), torch.arange(max_idx1.shape[1])]
max_idx2 = max_idx2[torch.arange(max_idx2.shape[0]), torch.arange(max_idx2.shape[1])]
max_t_feat = text_feat[torch.arange(max_idx2.shape[0]).repeat_interleave(max_idx2.shape[1]),
max_idx2.flatten()]
max_v_feat = video_feat[torch.arange(max_idx1.shape[0]).repeat_interleave(max_idx1.shape[1]),
max_idx1.flatten()]
t_feat = text_feat.reshape(-1, text_feat.shape[-1])
t_mask = text_mask.flatten().type(torch.bool)
v_feat = video_feat.reshape(-1, text_feat.shape[-1])
v_mask = video_mask.flatten().type(torch.bool)
t_feat = t_feat[t_mask]
v_feat = v_feat[v_mask]
max_t_feat = max_t_feat[v_mask]
max_v_feat = max_v_feat[t_mask]
z_a_norm = (t_feat - t_feat.mean(0)) / t_feat.std(0) # (BxN_t)xD
z_b_norm = (max_v_feat - max_v_feat.mean(0)) / max_v_feat.std(0) # (BxN_t)xD
x_a_norm = (v_feat - v_feat.mean(0)) / v_feat.std(0) # (BxN_v)xD
x_b_norm = (max_t_feat - max_t_feat.mean(0)) / max_t_feat.std(0) # (BxN_v)xD
# cross-correlation matrix
N, D = z_a_norm.shape
c1 = torch.einsum('ac,ad->cd', z_a_norm, z_b_norm) / N # DxD
N, D = x_a_norm.shape
c2 = torch.einsum('ac,ad->cd', x_a_norm, x_b_norm) / N # DxD
c = (c1 + c2) / 2.0
# loss
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum()
cdcr_loss = (on_diag * self.config.cdcr_alpha1 + off_diag * self.config.cdcr_alpha2)
return retrieve_logits, retrieve_logits.T, cdcr_loss
elif self.config.cdcr == 3:
# selecet max
max_idx1 = max_idx1[torch.arange(max_idx1.shape[0]), torch.arange(max_idx1.shape[1])]
max_idx2 = max_idx2[torch.arange(max_idx2.shape[0]), torch.arange(max_idx2.shape[1])]
max_t_feat = text_feat[torch.arange(max_idx2.shape[0]).repeat_interleave(max_idx2.shape[1]),
max_idx2.flatten()].squeeze(1)
max_v_feat = video_feat[torch.arange(max_idx1.shape[0]).repeat_interleave(max_idx1.shape[1]),
max_idx1.flatten()].squeeze(1)
t_feat = text_feat.reshape(-1, text_feat.shape[-1])
t_mask = text_mask.flatten().type(torch.bool)
v_feat = video_feat.reshape(-1, video_feat.shape[-1])
v_mask = video_mask.flatten().type(torch.bool)
t_feat = t_feat[t_mask]
v_feat = v_feat[v_mask]
max_t_feat = max_t_feat[v_mask]
max_v_feat = max_v_feat[t_mask]
text_weight = text_weight.flatten()[t_mask]
video_weight = video_weight.flatten()[v_mask]
z_a_norm = (t_feat - t_feat.mean(0)) / t_feat.std(0) # (BxN_t)xD
z_b_norm = (max_v_feat - max_v_feat.mean(0)) / max_v_feat.std(0) # (BxN_t)xD
x_a_norm = (v_feat - v_feat.mean(0)) / v_feat.std(0) # (BxN_v)xD
x_b_norm = (max_t_feat - max_t_feat.mean(0)) / max_t_feat.std(0) # (BxN_v)xD
# cross-correlation matrix
N, D = z_a_norm.shape
B = text_feat.shape[0]
c1 = torch.einsum("acd,a->cd", torch.einsum('ac,ad->acd', z_a_norm, z_b_norm),
text_weight) / B # DxD
c2 = torch.einsum("acd,a->cd", torch.einsum('ac,ad->acd', x_a_norm, x_b_norm),
video_weight) / B # DxD
c = (c1 + c2) / 2.0
# loss
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum()
cdcr_loss = (on_diag * self.config.cdcr_alpha1 + off_diag * self.config.cdcr_alpha2)
return retrieve_logits, retrieve_logits.T, cdcr_loss
else:
return retrieve_logits, retrieve_logits.T, 0.0
else:
return retrieve_logits, retrieve_logits.T, 0.0 | InternVideo-main | Downstream/Video-Text-Retrieval/modules/modeling_backup.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
from collections import OrderedDict
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'cross_config.json'
WEIGHTS_NAME = 'cross_pytorch_model.bin'
class CrossConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `CrossModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs CrossConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CrossModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`CrossModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.n_head = n_head
def attention(self, x: torch.Tensor, attn_mask: torch.Tensor):
attn_mask_ = attn_mask.repeat_interleave(self.n_head, dim=0)
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0]
def forward(self, para_tuple: tuple):
# x: torch.Tensor, attn_mask: torch.Tensor
# print(para_tuple)
x, attn_mask = para_tuple
x = x + self.attention(self.ln_1(x), attn_mask)
x = x + self.mlp(self.ln_2(x))
return (x, attn_mask)
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads) for _ in range(layers)])
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
return self.resblocks((x, attn_mask))[0]
class CrossEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(CrossEmbeddings, self).__init__()
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, concat_embeddings, concat_type=None):
batch_size, seq_length = concat_embeddings.size(0), concat_embeddings.size(1)
# if concat_type is None:
# concat_type = torch.zeros(batch_size, concat_type).to(concat_embeddings.device)
position_ids = torch.arange(seq_length, dtype=torch.long, device=concat_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(concat_embeddings.size(0), -1)
# token_type_embeddings = self.token_type_embeddings(concat_type)
position_embeddings = self.position_embeddings(position_ids)
embeddings = concat_embeddings + position_embeddings # + token_type_embeddings
# embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class CrossPooler(nn.Module):
def __init__(self, config):
super(CrossPooler, self).__init__()
self.ln_pool = LayerNorm(config.hidden_size)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = QuickGELU()
def forward(self, hidden_states, hidden_mask):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
hidden_states = self.ln_pool(hidden_states)
pooled_output = hidden_states[:, 0]
pooled_output = self.dense(pooled_output)
pooled_output = self.activation(pooled_output)
return pooled_output
class CrossModel(PreTrainedModel):
def initialize_parameters(self):
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
def __init__(self, config):
super(CrossModel, self).__init__(config)
self.embeddings = CrossEmbeddings(config)
transformer_width = config.hidden_size
transformer_layers = config.num_hidden_layers
transformer_heads = config.num_attention_heads
self.transformer = Transformer(width=transformer_width, layers=transformer_layers, heads=transformer_heads,)
self.pooler = CrossPooler(config)
self.apply(self.init_weights)
def build_attention_mask(self, attention_mask):
extended_attention_mask = attention_mask.unsqueeze(1)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -1000000.0
extended_attention_mask = extended_attention_mask.expand(-1, attention_mask.size(1), -1)
return extended_attention_mask
def forward(self, concat_input, concat_type=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones(concat_input.size(0), concat_input.size(1))
if concat_type is None:
concat_type = torch.zeros_like(attention_mask)
extended_attention_mask = self.build_attention_mask(attention_mask)
embedding_output = self.embeddings(concat_input, concat_type)
embedding_output = embedding_output.permute(1, 0, 2) # NLD -> LND
embedding_output = self.transformer(embedding_output, extended_attention_mask)
embedding_output = embedding_output.permute(1, 0, 2) # LND -> NLD
pooled_output = self.pooler(embedding_output, hidden_mask=attention_mask)
return embedding_output, pooled_output
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/module_cross.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import math
from modules.until_config import PretrainedConfig
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
if 'beta' in dir(module) and 'gamma' in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError
@classmethod
def init_preweight(cls, model, state_dict, prefix=None, task_config=None):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
if prefix is not None:
old_keys = []
new_keys = []
for key in state_dict.keys():
old_keys.append(key)
new_keys.append(prefix + key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='')
if prefix is None and (task_config is None or task_config.local_rank == 0):
logger.info("-" * 20)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(missing_keys)))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(unexpected_keys)))
if len(error_msgs) > 0:
logger.error("Weights from pretrained model cause errors in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(error_msgs)))
return model
@property
def dtype(self):
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
@classmethod
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
"""
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
return model
model = cls.init_preweight(model, state_dict)
return model
##################################
###### LOSS FUNCTION #############
##################################
class CrossEn(nn.Module):
def __init__(self,):
super(CrossEn, self).__init__()
def forward(self, sim_matrix):
logpt = F.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
sim_loss = nce_loss.mean()
return sim_loss
class MILNCELoss(nn.Module):
def __init__(self, batch_size=1, n_pair=1,):
super(MILNCELoss, self).__init__()
self.batch_size = batch_size
self.n_pair = n_pair
torch_v = float(".".join(torch.__version__.split(".")[:2]))
self.bool_dtype = torch.bool if torch_v >= 1.3 else torch.uint8
def forward(self, sim_matrix):
mm_mask = np.eye(self.batch_size)
mm_mask = np.kron(mm_mask, np.ones((self.n_pair, self.n_pair)))
mm_mask = torch.tensor(mm_mask).float().to(sim_matrix.device)
from_text_matrix = sim_matrix + mm_mask * -1e12
from_video_matrix = sim_matrix.transpose(1, 0)
new_sim_matrix = torch.cat([from_video_matrix, from_text_matrix], dim=-1)
logpt = F.log_softmax(new_sim_matrix, dim=-1)
mm_mask_logpt = torch.cat([mm_mask, torch.zeros_like(mm_mask)], dim=-1)
masked_logpt = logpt + (torch.ones_like(mm_mask_logpt) - mm_mask_logpt) * -1e12
new_logpt = -torch.logsumexp(masked_logpt, dim=-1)
logpt_choice = torch.zeros_like(new_logpt)
mark_ind = torch.arange(self.batch_size).to(sim_matrix.device) * self.n_pair + (self.n_pair//2)
logpt_choice[mark_ind] = 1
sim_loss = new_logpt.masked_select(logpt_choice.to(dtype=self.bool_dtype)).mean()
return sim_loss
class MaxMarginRankingLoss(nn.Module):
def __init__(self,
margin=1.0,
negative_weighting=False,
batch_size=1,
n_pair=1,
hard_negative_rate=0.5,
):
super(MaxMarginRankingLoss, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = 1 - hard_negative_rate
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if n_pair > 1 and batch_size > 1:
alpha = easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate))
mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate))
self.mm_mask = mm_mask.float()
def forward(self, x):
d = torch.diag(x)
max_margin = F.relu(self.margin + x - d.view(-1, 1)) + \
F.relu(self.margin + x - d.view(1, -1))
if self.negative_weighting and self.n_pair > 1 and self.batch_size > 1:
max_margin = max_margin * self.mm_mask.to(max_margin.device)
return max_margin.mean()
class AllGather(torch.autograd.Function):
"""An autograd function that performs allgather on a tensor."""
@staticmethod
def forward(ctx, tensor, args):
output = [torch.empty_like(tensor) for _ in range(args.world_size)]
torch.distributed.all_gather(output, tensor)
ctx.rank = args.rank
ctx.batch_size = tensor.shape[0]
return torch.cat(output, dim=0)
@staticmethod
def backward(ctx, grad_output):
return (
grad_output[ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1)],
None,
)
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/until_module.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
logger = logging.getLogger(__name__)
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + math.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
""" Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
Learning rate is 1. afterwards. """
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
# next_m.mul_(beta1).add_(1 - beta1, grad) --> pytorch 1.7
next_m.mul_(beta1).add_(grad, alpha=1 - beta1)
# next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) --> pytorch 1.7
next_v.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
progress = state['step']/group['t_total']
lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss | InternVideo-main | Downstream/Video-Text-Retrieval/modules/optimization.py |
InternVideo-main | Downstream/Video-Text-Retrieval/modules/__init__.py |
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import logging
import tarfile
import tempfile
import shutil
import torch
from .file_utils import cached_path
logger = logging.getLogger(__name__)
class PretrainedConfig(object):
pretrained_model_archive_map = {}
config_name = ""
weights_name = ""
@classmethod
def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None):
archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if os.path.exists(archive_file) is False:
if pretrained_model_name in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
if task_config is None or task_config.local_rank == 0:
logger.error(
"Model name '{}' was not found in model name list. "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
archive_file))
return None
if resolved_archive_file == archive_file:
if task_config is None or task_config.local_rank == 0:
logger.info("loading archive file {}".format(archive_file))
else:
if task_config is None or task_config.local_rank == 0:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
if task_config is None or task_config.local_rank == 0:
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, cls.config_name)
config = cls.from_json_file(config_file)
config.type_vocab_size = type_vocab_size
if task_config is None or task_config.local_rank == 0:
logger.info("Model config {}".format(config))
if state_dict is None:
weights_path = os.path.join(serialization_dir, cls.weights_name)
if os.path.exists(weights_path):
state_dict = torch.load(weights_path, map_location='cpu')
else:
if task_config is None or task_config.local_rank == 0:
logger.info("Weight doesn't exsits. {}".format(weights_path))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return config, state_dict
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" | InternVideo-main | Downstream/Video-Text-Retrieval/modules/until_config.py |
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/file_utils.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
self.vocab = self.encoder
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def tokenize(self, text):
tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' '))
return tokens
def convert_tokens_to_ids(self, tokens):
return [self.encoder[bpe_token] for bpe_token in tokens] | InternVideo-main | Downstream/Video-Text-Retrieval/modules/tokenization_clip.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
from torch import nn
import torch.nn.functional as F
from modules.until_module import PreTrainedModel, AllGather, CrossEn
from modules.module_cross import CrossModel, CrossConfig, Transformer as TransformerClip
from modules.module_clip import CLIP, convert_weights
from modules import clip_evl
from modules.clip_evl.model_no_freeze_only_global import vit_only_global_l_sparse8_k400, vit_only_global_b_sparse8_k400
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
logger = logging.getLogger(__name__)
allgather = AllGather.apply
from einops import rearrange
class CLIP4ClipPreTrainedModel(PreTrainedModel, nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, cross_config, *inputs, **kwargs):
super(CLIP4ClipPreTrainedModel, self).__init__(cross_config)
self.cross_config = cross_config
self.clip = None
self.cross = None
@classmethod
def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
if state_dict is None: state_dict = {}
pretrained_clip_name = "ViT-B/32"
if hasattr(task_config, 'pretrained_clip_name'):
pretrained_clip_name = task_config.pretrained_clip_name
clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name)
############# add ################
clip_state_dict = clip_state_dict['state_dict'] if 'state_dict' in clip_state_dict else clip_state_dict
for key, val in clip_state_dict.items():
if key not in state_dict:
state_dict[key] = val.clone()
new_key = key.replace('clip.', '')
if new_key not in state_dict:
state_dict[new_key] = val.clone()
############## add ####################
for key, val in clip_state_dict.items():
new_key = "clip." + key
if new_key not in state_dict:
state_dict[new_key] = val.clone()
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
model = cls(cross_config, clip_state_dict, *inputs, **kwargs)
## ===> Initialization trick [HARD CODE]
if model.linear_patch == "3d":
contain_conv2 = False
for key in state_dict.keys():
if key.find("visual.conv2.weight") > -1:
contain_conv2 = True
break
if contain_conv2 is False and hasattr(model.clip.visual, "conv2"):
cp_weight = state_dict["clip.visual.conv1.weight"].clone()
kernel_size = model.clip.visual.conv2.weight.size(2)
conv2_size = model.clip.visual.conv2.weight.size()
conv2_size = list(conv2_size)
left_conv2_size = conv2_size.copy()
right_conv2_size = conv2_size.copy()
left_conv2_size[2] = (kernel_size - 1) // 2
right_conv2_size[2] = kernel_size - 1 - left_conv2_size[2]
left_zeros, right_zeros = None, None
if left_conv2_size[2] > 0:
left_zeros = torch.zeros(*tuple(left_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
if right_conv2_size[2] > 0:
right_zeros = torch.zeros(*tuple(right_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
cat_list = []
if left_zeros != None: cat_list.append(left_zeros)
cat_list.append(cp_weight.unsqueeze(2))
if right_zeros != None: cat_list.append(right_zeros)
cp_weight = torch.cat(cat_list, dim=2)
state_dict["clip.visual.conv2.weight"] = cp_weight
if model.sim_header == 'tightTransf':
contain_cross = False
for key in state_dict.keys():
if key.find("cross.transformer") > -1:
contain_cross = True
break
if contain_cross is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["cross.embeddings.position_embeddings.weight"] = val.clone()
continue
if key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict["cross."+key] = val.clone()
continue
if model.sim_header == "seqLSTM" or model.sim_header == "seqTransf":
contain_frame_position = False
for key in state_dict.keys():
if key.find("frame_position_embeddings") > -1:
contain_frame_position = True
break
if contain_frame_position is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["frame_position_embeddings.weight"] = val.clone()
continue
if model.sim_header == "seqTransf" and key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict[key.replace("transformer.", "transformerClip.")] = val.clone()
continue
## <=== End of initialization trick
############ evl model should not use this #############
if state_dict is not None and task_config.clip_evl is False:
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
class dual_softmax_loss(nn.Module):
def __init__(self,):
super(dual_softmax_loss, self).__init__()
def forward(self, sim_matrix, temp=1000):
sim_matrix = sim_matrix * F.softmax(sim_matrix/temp, dim=0)*len(sim_matrix) #With an appropriate temperature parameter, the model achieves higher performance
logpt = F.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
loss = -logpt
return loss
class CLIP4Clip(CLIP4ClipPreTrainedModel):
def __init__(self, cross_config, clip_state_dict, task_config):
super(CLIP4Clip, self).__init__(cross_config)
self.task_config = task_config
self.ignore_video_index = -1
### comment this for now
# assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
self._stage_one = True
self._stage_two = False
show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two))
self.loose_type = False
if self._stage_one and check_attr('loose_type', self.task_config):
self.loose_type = True
show_log(task_config, "Test retrieval by loose type.")
# CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===>
##############add###################
if 'clip.visual.proj' in clip_state_dict:
new_dict = {}
for k, v in clip_state_dict.items():
new_k = k.replace('clip.', '')
new_dict[new_k] = v.clone()
clip_state_dict = new_dict
##############add###################
vit = "visual.proj" in clip_state_dict
assert vit
if vit:
vision_width = clip_state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = clip_state_dict["token_embedding.weight"].shape[0]
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks")))
show_log(task_config, "\t embed_dim: {}".format(embed_dim))
show_log(task_config, "\t image_resolution: {}".format(image_resolution))
show_log(task_config, "\t vision_layers: {}".format(vision_layers))
show_log(task_config, "\t vision_width: {}".format(vision_width))
show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size))
show_log(task_config, "\t context_length: {}".format(context_length))
show_log(task_config, "\t vocab_size: {}".format(vocab_size))
show_log(task_config, "\t transformer_width: {}".format(transformer_width))
show_log(task_config, "\t transformer_heads: {}".format(transformer_heads))
show_log(task_config, "\t transformer_layers: {}".format(transformer_layers))
self.linear_patch = '2d'
if hasattr(task_config, "linear_patch"):
self.linear_patch = task_config.linear_patch
show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch))
# use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40
cut_top_layer = 0
show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer))
#### paramters for DRL, we do not use them for now ####
self.interaction = task_config.interaction if hasattr(task_config, 'interaction') else 'no'
self.wti_arch = task_config.wti_arch if hasattr(task_config, 'wti_arch') else 0
self.cdcr = task_config.cdcr if hasattr(task_config, 'cdcr') else 0
if hasattr(task_config, "clip_evl") and task_config.clip_evl == True:
self.clip, _ = clip_evl.load(task_config.pretrained_path, t_size=task_config.max_frames, mergeclip=task_config.mergeclip, mergeweight=task_config.mergeweight, clip_state_dict=clip_state_dict)
self.clip = self.clip.float()
self.clip_evl = True
else:
self.clip_evl = False
self.clip = CLIP(
embed_dim,
image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer,
linear_patch=self.linear_patch
).float()
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_state_dict:
del clip_state_dict[key]
###################### Note this change ###############################
if not self.clip_evl:
convert_weights(self.clip)
#######################################################################
# <=== End of CLIP Encoders
self.sim_header = 'meanP'
if hasattr(task_config, "sim_header"):
self.sim_header = task_config.sim_header
show_log(task_config, "\t sim_header: {}".format(self.sim_header))
if self.sim_header == "tightTransf": assert self.loose_type is False
cross_config.max_position_embeddings = context_length
if self.loose_type is False:
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config, "cross_num_hidden_layers")
self.cross = CrossModel(cross_config)
# <=== End of Cross Encoder
self.similarity_dense = nn.Linear(cross_config.hidden_size, 1)
if self.sim_header == "seqLSTM" or self.sim_header == "seqTransf":
self.frame_position_embeddings = nn.Embedding(cross_config.max_position_embeddings, cross_config.hidden_size)
if self.sim_header == "seqTransf":
self.transformerClip = TransformerClip(width=transformer_width, layers=self.task_config.cross_num_hidden_layers,
heads=transformer_heads, )
if self.sim_header == "seqLSTM":
self.lstm_visual = nn.LSTM(input_size=cross_config.hidden_size, hidden_size=cross_config.hidden_size,
batch_first=True, bidirectional=False, num_layers=1)
######## add wti #########
if self.cdcr:
self.cdcr_alpha1 = 0.16
self.cdcr_alpha2 = 0
self.cdcr_lambda = 0.001
if self.interaction == 'wti' or self.interaction == 'ti':
if self.wti_arch == 1:
self.text_weight_fc = nn.Linear(transformer_width, 1)
self.video_weight_fc = nn.Linear(transformer_width, 1)
elif self.wti_arch == 2:
self.text_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.video_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
elif self.wti_arch == 3:
self.text_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.video_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.loss_fct = CrossEn()
self.loss_dsl = dual_softmax_loss()
############## this has to be deactivated for clipevl ##############
if not self.clip_evl:
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None):
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
# T x 3 x H x W
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask,
video, video_mask, shaped=True, video_frame=video_frame)
if self.training:
loss = 0.
cdcr_loss = 0.
if self.wti_interaction != 'no' and self.cdcr:
sim_matrix, _, cdcr_loss = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
shaped=True, loose_type=self.loose_type)
else:
sim_matrix, *_tmp = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
shaped=True, loose_type=self.loose_type)
sim_loss1 = self.loss_fct(sim_matrix)
sim_loss2 = self.loss_fct(sim_matrix.T)
sim_loss = (sim_loss1 + sim_loss2) / 2
loss += sim_loss
if self.cdcr:
loss += self.cdcr_lambda * cdcr_loss
return loss
else:
return None
def get_sequence_output(self, input_ids, token_type_ids, attention_mask, shaped=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
bs_pair = input_ids.size(0)
### for naive
# sequence_hidden = self.clip.encode_text(input_ids).float()
### for wti
if self.interaction == 'wti' or self.interaction == 'ti':
if self.clip_evl:
sequence_hidden = self.clip.encode_text(input_ids, return_all_feats=True)[1].float()
else:
sequence_hidden = self.clip.encode_text(input_ids, return_hidden=True)[1].float()
else:
sequence_hidden = self.clip.encode_text(input_ids).float()
sequence_hidden = sequence_hidden.view(bs_pair, -1, sequence_hidden.size(-1))
return sequence_hidden
def get_visual_output(self, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
bs_pair = video_mask.size(0)
if self.clip_evl:
if len(video.size()) == 4:
# [b, t, c, h, w]
video = video.view(bs_pair, -1, video.size(-3), video.size(-2), video.size(-1))
video = video.permute(0, 2, 1, 3, 4).contiguous()
# [N, 1, d], [L, N, T, d]
evl_output, visual_output = self.clip.encode_video(video, return_all_feats=True)
# visual_output = visual_output.float()
visual_hidden = evl_output.float()
if self.interaction == 'wti':
# [L, N, T, d2] -> [N, T, d2] -> [N, T, d]
visual_hidden = self.clip.visual_ln_post(visual_output[0]) @ self.clip.visual_proj
else:
visual_hidden = self.clip.encode_image(video, video_frame=video_frame).float()
visual_hidden = visual_hidden.view(bs_pair, -1, visual_hidden.size(-1))
return visual_hidden
def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
sequence_output = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True)
visual_output = self.get_visual_output(video, video_mask, shaped=True, video_frame=video_frame)
return sequence_output, visual_output
def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask):
concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames
concat_mask = torch.cat((attention_mask, video_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(video_mask)
concat_type = torch.cat((text_type_, video_type_), dim=1)
cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True)
cross_output = cross_layers[-1]
return cross_output, pooled_output, concat_mask
def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
return text_out
def _mean_pooling_for_similarity_visual(self, visual_output, video_mask,):
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return video_out
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,):
text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask)
video_out = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
return text_out, video_out
def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, sim_header="meanP"):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
if sim_header == "meanP":
# Default: Parameter-free type
pass
elif sim_header == "seqLSTM":
# Sequential type: LSTM
visual_output_original = visual_output
visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(),
batch_first=True, enforce_sorted=False)
visual_output, _ = self.lstm_visual(visual_output)
if self.training: self.lstm_visual.flatten_parameters()
visual_output, _ = pad_packed_sequence(visual_output, batch_first=True)
visual_output = torch.cat((visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1)
visual_output = visual_output + visual_output_original
elif sim_header == "seqTransf":
# Sequential type: Transformer Encoder
visual_output_original = visual_output
seq_length = visual_output.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device)
position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1)
frame_position_embeddings = self.frame_position_embeddings(position_ids)
visual_output = visual_output + frame_position_embeddings
extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0
extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1)
visual_output = visual_output.permute(1, 0, 2) # NLD -> LND
visual_output = self.transformerClip(visual_output, extended_video_mask)
visual_output = visual_output.permute(1, 0, 2) # LND -> NLD
visual_output = visual_output + visual_output_original
if self.training:
visual_output = allgather(visual_output, self.task_config)
video_mask = allgather(video_mask, self.task_config)
sequence_output = allgather(sequence_output, self.task_config)
torch.distributed.barrier()
visual_output = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
sequence_output = sequence_output.squeeze(1)
sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True)
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t())
return retrieve_logits
def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
b_text, s_text, h_text = sequence_output.size()
b_visual, s_visual, h_visual = visual_output.size()
retrieve_logits_list = []
step_size = b_text # set smaller to reduce memory cost
split_size = [step_size] * (b_text // step_size)
release_size = b_text - sum(split_size)
if release_size > 0:
split_size += [release_size]
# due to clip text branch retrun the last hidden
attention_mask = torch.ones(sequence_output.size(0), 1)\
.to(device=attention_mask.device, dtype=attention_mask.dtype)
sequence_output_splits = torch.split(sequence_output, split_size, dim=0)
attention_mask_splits = torch.split(attention_mask, split_size, dim=0)
for i in range(len(split_size)):
sequence_output_row = sequence_output_splits[i]
attention_mask_row = attention_mask_splits[i]
sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1)
sequence_output_l = sequence_output_l.view(-1, s_text, h_text)
attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1)
attention_mask_l = attention_mask_l.view(-1, s_text)
step_truth = sequence_output_row.size(0)
visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1)
visual_output_r = visual_output_r.view(-1, s_visual, h_visual)
video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1)
video_mask_r = video_mask_r.view(-1, s_visual)
cross_output, pooled_output, concat_mask = \
self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r)
retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual)
retrieve_logits_list.append(retrieve_logits_row)
retrieve_logits = torch.cat(retrieve_logits_list, dim=0)
return retrieve_logits
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, loose_type=False):
if shaped is False:
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
contrastive_direction = ()
### add wti ###
if self.interaction == 'wti' or self.interaction == 'ti':
if self.cdcr == 0:
retrieve_logits, _, _ = self.wti_interaction(sequence_output, visual_output, attention_mask, video_mask)
return retrieve_logits, contrastive_direction
else:
retrieve_logits, _, cdcr_loss = self.wti_interaction(sequence_output, visual_output, attention_mask, video_mask)
return retrieve_logits, contrastive_direction, cdcr_loss
################
if loose_type:
assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"]
retrieve_logits = self._loose_similarity(sequence_output, visual_output, attention_mask, video_mask, sim_header=self.sim_header)
else:
assert self.sim_header in ["tightTransf"]
retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask, )
return retrieve_logits, contrastive_direction
def wti_interaction(self, text_feat, video_feat, text_mask, video_mask):
text_feat, video_feat = text_feat.contiguous(), video_feat.contiguous()
if self.training and torch.cuda.is_available(): # batch merge here
text_feat = allgather(text_feat, self.task_config)
video_feat = allgather(video_feat, self.task_config)
text_mask = allgather(text_mask, self.task_config)
video_mask = allgather(video_mask, self.task_config)
torch.distributed.barrier() # force sync
if self.interaction == 'wti':
text_weight = self.text_weight_fc(text_feat).squeeze(2) # B x N_t x D -> B x N_t
text_weight.masked_fill_(torch.tensor((1 - text_mask), dtype=torch.bool), float("-inf"))
text_weight = torch.softmax(text_weight, dim=-1) # B x N_t
video_weight = self.video_weight_fc(video_feat).squeeze(2) # B x N_v x D -> B x N_v
video_weight.masked_fill_(torch.tensor((1 - video_mask), dtype=torch.bool), float("-inf"))
video_weight = torch.softmax(video_weight, dim=-1) # B x N_v
text_feat = text_feat / text_feat.norm(dim=-1, keepdim=True)
video_feat = video_feat / video_feat.norm(dim=-1, keepdim=True)
retrieve_logits = torch.einsum('atd,bvd->abtv', [text_feat, video_feat])
retrieve_logits = torch.einsum('abtv,at->abtv', [retrieve_logits, text_mask])
retrieve_logits = torch.einsum('abtv,bv->abtv', [retrieve_logits, video_mask])
text_sum = text_mask.sum(-1)
video_sum = video_mask.sum(-1)
# max for video token
if self.interaction == 'ti': # token-wise interaction
t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt
v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv
t2v_logits = torch.sum(t2v_logits, dim=2) / (text_sum.unsqueeze(1))
v2t_logits = torch.sum(v2t_logits, dim=2) / (video_sum.unsqueeze(0))
retrieve_logits = (t2v_logits + v2t_logits) / 2.0
elif self.interaction == 'wti': # weighted token-wise interaction
t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt
t2v_logits = torch.einsum('abt,at->ab', [t2v_logits, text_weight])
v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv
v2t_logits = torch.einsum('abv,bv->ab', [v2t_logits, video_weight])
retrieve_logits = (t2v_logits + v2t_logits) / 2.0
if self.training:
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * retrieve_logits
if self.cdcr == 1:
# simple random
_text_feat = text_feat[torch.arange(text_feat.shape[0]),
torch.randint_like(text_sum, 0, 10000) % text_sum, :]
_video_feat = video_feat[torch.arange(video_feat.shape[0]),
torch.randint_like(video_sum, 0, 10000) % video_sum, :]
z_a_norm = (_text_feat - _text_feat.mean(0)) / _text_feat.std(0) # NxN_sxD
z_b_norm = (_video_feat - _video_feat.mean(0)) / _video_feat.std(0) # NxN_txD
# cross-correlation matrix
B, D = z_a_norm.shape
c = torch.einsum('ac,ad->cd', z_a_norm, z_b_norm) / B # DxD
# loss
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum()
cdcr_loss = (on_diag * self.cdcr_alpha1 + off_diag * self.cdcr_alpha2)
return retrieve_logits, retrieve_logits.T, cdcr_loss
elif self.cdcr == 2:
# selecet max
max_idx1 = max_idx1[torch.arange(max_idx1.shape[0]), torch.arange(max_idx1.shape[1])]
max_idx2 = max_idx2[torch.arange(max_idx2.shape[0]), torch.arange(max_idx2.shape[1])]
max_t_feat = text_feat[torch.arange(max_idx2.shape[0]).repeat_interleave(max_idx2.shape[1]),
max_idx2.flatten()]
max_v_feat = video_feat[torch.arange(max_idx1.shape[0]).repeat_interleave(max_idx1.shape[1]),
max_idx1.flatten()]
t_feat = text_feat.reshape(-1, text_feat.shape[-1])
t_mask = text_mask.flatten().type(torch.bool)
v_feat = video_feat.reshape(-1, text_feat.shape[-1])
v_mask = video_mask.flatten().type(torch.bool)
t_feat = t_feat[t_mask]
v_feat = v_feat[v_mask]
max_t_feat = max_t_feat[v_mask]
max_v_feat = max_v_feat[t_mask]
z_a_norm = (t_feat - t_feat.mean(0)) / t_feat.std(0) # (BxN_t)xD
z_b_norm = (max_v_feat - max_v_feat.mean(0)) / max_v_feat.std(0) # (BxN_t)xD
x_a_norm = (v_feat - v_feat.mean(0)) / v_feat.std(0) # (BxN_v)xD
x_b_norm = (max_t_feat - max_t_feat.mean(0)) / max_t_feat.std(0) # (BxN_v)xD
# cross-correlation matrix
N, D = z_a_norm.shape
c1 = torch.einsum('ac,ad->cd', z_a_norm, z_b_norm) / N # DxD
N, D = x_a_norm.shape
c2 = torch.einsum('ac,ad->cd', x_a_norm, x_b_norm) / N # DxD
c = (c1 + c2) / 2.0
# loss
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum()
cdcr_loss = (on_diag * self.cdcr_alpha1 + off_diag * self.cdcr_alpha2)
return retrieve_logits, retrieve_logits.T, cdcr_loss
elif self.cdcr == 3:
# selecet max
max_idx1 = max_idx1[torch.arange(max_idx1.shape[0]), torch.arange(max_idx1.shape[1])]
max_idx2 = max_idx2[torch.arange(max_idx2.shape[0]), torch.arange(max_idx2.shape[1])]
max_t_feat = text_feat[torch.arange(max_idx2.shape[0]).repeat_interleave(max_idx2.shape[1]),
max_idx2.flatten()].squeeze(1)
max_v_feat = video_feat[torch.arange(max_idx1.shape[0]).repeat_interleave(max_idx1.shape[1]),
max_idx1.flatten()].squeeze(1)
t_feat = text_feat.reshape(-1, text_feat.shape[-1])
t_mask = text_mask.flatten().type(torch.bool)
v_feat = video_feat.reshape(-1, video_feat.shape[-1])
v_mask = video_mask.flatten().type(torch.bool)
t_feat = t_feat[t_mask]
v_feat = v_feat[v_mask]
max_t_feat = max_t_feat[v_mask]
max_v_feat = max_v_feat[t_mask]
text_weight = text_weight.flatten()[t_mask]
video_weight = video_weight.flatten()[v_mask]
z_a_norm = (t_feat - t_feat.mean(0)) / t_feat.std(0) # (BxN_t)xD
z_b_norm = (max_v_feat - max_v_feat.mean(0)) / max_v_feat.std(0) # (BxN_t)xD
x_a_norm = (v_feat - v_feat.mean(0)) / v_feat.std(0) # (BxN_v)xD
x_b_norm = (max_t_feat - max_t_feat.mean(0)) / max_t_feat.std(0) # (BxN_v)xD
# cross-correlation matrix
N, D = z_a_norm.shape
B = text_feat.shape[0]
c1 = torch.einsum("acd,a->cd", torch.einsum('ac,ad->acd', z_a_norm, z_b_norm),
text_weight) / B # DxD
c2 = torch.einsum("acd,a->cd", torch.einsum('ac,ad->acd', x_a_norm, x_b_norm),
video_weight) / B # DxD
c = (c1 + c2) / 2.0
# loss
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum()
cdcr_loss = (on_diag * self.cdcr_alpha1 + off_diag * self.cdcr_alpha2)
return retrieve_logits, retrieve_logits.T, cdcr_loss
else:
return retrieve_logits, retrieve_logits.T, 0.0
else:
return retrieve_logits, retrieve_logits.T, 0.0 | InternVideo-main | Downstream/Video-Text-Retrieval/modules/modeling.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
from torch import nn
import torch.nn.functional as F
from modules.until_module import PreTrainedModel, AllGather, CrossEn
from modules.module_cross import CrossModel, CrossConfig, Transformer as TransformerClip
from modules.module_clip import CLIP, convert_weights
from modules import clip_evl
from modules import clip_kc
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from ipdb import set_trace
logger = logging.getLogger(__name__)
allgather = AllGather.apply
class CLIP4ClipPreTrainedModel(PreTrainedModel, nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, cross_config, *inputs, **kwargs):
super(CLIP4ClipPreTrainedModel, self).__init__(cross_config)
self.cross_config = cross_config
self.clip = None
self.cross = None
@classmethod
def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
if state_dict is None: state_dict = {}
pretrained_clip_name = "ViT-B/32"
if hasattr(task_config, 'pretrained_clip_name'):
pretrained_clip_name = task_config.pretrained_clip_name
clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name)
for key, val in clip_state_dict.items():
new_key = "clip." + key
if new_key not in state_dict:
state_dict[new_key] = val.clone()
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
model = cls(cross_config, clip_state_dict, *inputs, **kwargs)
## ===> Initialization trick [HARD CODE]
if model.linear_patch == "3d":
contain_conv2 = False
for key in state_dict.keys():
if key.find("visual.conv2.weight") > -1:
contain_conv2 = True
break
if contain_conv2 is False and hasattr(model.clip.visual, "conv2"):
cp_weight = state_dict["clip.visual.conv1.weight"].clone()
kernel_size = model.clip.visual.conv2.weight.size(2)
conv2_size = model.clip.visual.conv2.weight.size()
conv2_size = list(conv2_size)
left_conv2_size = conv2_size.copy()
right_conv2_size = conv2_size.copy()
left_conv2_size[2] = (kernel_size - 1) // 2
right_conv2_size[2] = kernel_size - 1 - left_conv2_size[2]
left_zeros, right_zeros = None, None
if left_conv2_size[2] > 0:
left_zeros = torch.zeros(*tuple(left_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
if right_conv2_size[2] > 0:
right_zeros = torch.zeros(*tuple(right_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
cat_list = []
if left_zeros != None: cat_list.append(left_zeros)
cat_list.append(cp_weight.unsqueeze(2))
if right_zeros != None: cat_list.append(right_zeros)
cp_weight = torch.cat(cat_list, dim=2)
state_dict["clip.visual.conv2.weight"] = cp_weight
if model.sim_header == 'tightTransf':
contain_cross = False
for key in state_dict.keys():
if key.find("cross.transformer") > -1:
contain_cross = True
break
if contain_cross is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["cross.embeddings.position_embeddings.weight"] = val.clone()
continue
if key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict["cross."+key] = val.clone()
continue
if model.sim_header == "seqLSTM" or model.sim_header == "seqTransf":
contain_frame_position = False
for key in state_dict.keys():
if key.find("frame_position_embeddings") > -1:
contain_frame_position = True
break
if contain_frame_position is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["frame_position_embeddings.weight"] = val.clone()
continue
if model.sim_header == "seqTransf" and key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict[key.replace("transformer.", "transformerClip.")] = val.clone()
continue
## <=== End of initialization trick
if state_dict is not None:
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
class CLIP4Clip(CLIP4ClipPreTrainedModel):
def __init__(self, cross_config, clip_state_dict, task_config):
super(CLIP4Clip, self).__init__(cross_config)
self.task_config = task_config
self.ignore_video_index = -1
assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
self._stage_one = True
self._stage_two = False
show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two))
self.loose_type = False
if self._stage_one and check_attr('loose_type', self.task_config):
self.loose_type = True
show_log(task_config, "Test retrieval by loose type.")
# CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===>
vit = "visual.proj" in clip_state_dict
assert vit
if vit:
vision_width = clip_state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = clip_state_dict["token_embedding.weight"].shape[0]
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks")))
show_log(task_config, "\t embed_dim: {}".format(embed_dim))
show_log(task_config, "\t image_resolution: {}".format(image_resolution))
show_log(task_config, "\t vision_layers: {}".format(vision_layers))
show_log(task_config, "\t vision_width: {}".format(vision_width))
show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size))
show_log(task_config, "\t context_length: {}".format(context_length))
show_log(task_config, "\t vocab_size: {}".format(vocab_size))
show_log(task_config, "\t transformer_width: {}".format(transformer_width))
show_log(task_config, "\t transformer_heads: {}".format(transformer_heads))
show_log(task_config, "\t transformer_layers: {}".format(transformer_layers))
self.linear_patch = '2d'
if hasattr(task_config, "linear_patch"):
self.linear_patch = task_config.linear_patch
show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch))
# use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40
cut_top_layer = 0
show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer))
self.clip = CLIP(
embed_dim,
image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer,
linear_patch=self.linear_patch
).float()
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_state_dict:
del clip_state_dict[key]
convert_weights(self.clip)
# <=== End of CLIP Encoders
self.sim_header = 'meanP'
if hasattr(task_config, "sim_header"):
self.sim_header = task_config.sim_header
show_log(task_config, "\t sim_header: {}".format(self.sim_header))
if self.sim_header == "tightTransf": assert self.loose_type is False
cross_config.max_position_embeddings = context_length
if self.loose_type is False:
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config, "cross_num_hidden_layers")
self.cross = CrossModel(cross_config)
# <=== End of Cross Encoder
self.similarity_dense = nn.Linear(cross_config.hidden_size, 1)
if self.sim_header == "seqLSTM" or self.sim_header == "seqTransf":
self.frame_position_embeddings = nn.Embedding(cross_config.max_position_embeddings, cross_config.hidden_size)
if self.sim_header == "seqTransf":
self.transformerClip = TransformerClip(width=transformer_width, layers=self.task_config.cross_num_hidden_layers,
heads=transformer_heads, )
if self.sim_header == "seqLSTM":
self.lstm_visual = nn.LSTM(input_size=cross_config.hidden_size, hidden_size=cross_config.hidden_size,
batch_first=True, bidirectional=False, num_layers=1)
self.loss_fct = CrossEn()
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None):
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
# T x 3 x H x W
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask,
video, video_mask, shaped=True, video_frame=video_frame)
if self.training:
loss = 0.
sim_matrix, *_tmp = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
shaped=True, loose_type=self.loose_type)
sim_loss1 = self.loss_fct(sim_matrix)
sim_loss2 = self.loss_fct(sim_matrix.T)
sim_loss = (sim_loss1 + sim_loss2) / 2
loss += sim_loss
return loss
else:
return None
def get_sequence_output(self, input_ids, token_type_ids, attention_mask, shaped=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
bs_pair = input_ids.size(0)
sequence_hidden = self.clip.encode_text(input_ids).float()
sequence_hidden = sequence_hidden.view(bs_pair, -1, sequence_hidden.size(-1))
return sequence_hidden
def get_visual_output(self, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
bs_pair = video_mask.size(0)
visual_hidden = self.clip.encode_image(video, video_frame=video_frame).float()
visual_hidden = visual_hidden.view(bs_pair, -1, visual_hidden.size(-1))
return visual_hidden
def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
sequence_output = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True)
visual_output = self.get_visual_output(video, video_mask, shaped=True, video_frame=video_frame)
return sequence_output, visual_output
def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask):
concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames
concat_mask = torch.cat((attention_mask, video_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(video_mask)
concat_type = torch.cat((text_type_, video_type_), dim=1)
cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True)
cross_output = cross_layers[-1]
return cross_output, pooled_output, concat_mask
def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
return text_out
def _mean_pooling_for_similarity_visual(self, visual_output, video_mask,):
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return video_out
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,):
text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask)
video_out = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
return text_out, video_out
def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, sim_header="meanP"):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
if sim_header == "meanP":
# Default: Parameter-free type
pass
elif sim_header == "seqLSTM":
# Sequential type: LSTM
visual_output_original = visual_output
visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(),
batch_first=True, enforce_sorted=False)
visual_output, _ = self.lstm_visual(visual_output)
if self.training: self.lstm_visual.flatten_parameters()
visual_output, _ = pad_packed_sequence(visual_output, batch_first=True)
visual_output = torch.cat((visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1)
visual_output = visual_output + visual_output_original
elif sim_header == "seqTransf":
# Sequential type: Transformer Encoder
visual_output_original = visual_output
seq_length = visual_output.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device)
position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1)
frame_position_embeddings = self.frame_position_embeddings(position_ids)
visual_output = visual_output + frame_position_embeddings
extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0
extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1)
visual_output = visual_output.permute(1, 0, 2) # NLD -> LND
visual_output = self.transformerClip(visual_output, extended_video_mask)
visual_output = visual_output.permute(1, 0, 2) # LND -> NLD
visual_output = visual_output + visual_output_original
if self.training:
visual_output = allgather(visual_output, self.task_config)
video_mask = allgather(video_mask, self.task_config)
sequence_output = allgather(sequence_output, self.task_config)
torch.distributed.barrier()
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
visual_output = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
sequence_output = sequence_output.squeeze(1)
sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True)
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t())
return retrieve_logits
def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
b_text, s_text, h_text = sequence_output.size()
b_visual, s_visual, h_visual = visual_output.size()
retrieve_logits_list = []
step_size = b_text # set smaller to reduce memory cost
split_size = [step_size] * (b_text // step_size)
release_size = b_text - sum(split_size)
if release_size > 0:
split_size += [release_size]
# due to clip text branch retrun the last hidden
attention_mask = torch.ones(sequence_output.size(0), 1)\
.to(device=attention_mask.device, dtype=attention_mask.dtype)
sequence_output_splits = torch.split(sequence_output, split_size, dim=0)
attention_mask_splits = torch.split(attention_mask, split_size, dim=0)
for i in range(len(split_size)):
sequence_output_row = sequence_output_splits[i]
attention_mask_row = attention_mask_splits[i]
sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1)
sequence_output_l = sequence_output_l.view(-1, s_text, h_text)
attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1)
attention_mask_l = attention_mask_l.view(-1, s_text)
step_truth = sequence_output_row.size(0)
visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1)
visual_output_r = visual_output_r.view(-1, s_visual, h_visual)
video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1)
video_mask_r = video_mask_r.view(-1, s_visual)
cross_output, pooled_output, concat_mask = \
self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r)
retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual)
retrieve_logits_list.append(retrieve_logits_row)
retrieve_logits = torch.cat(retrieve_logits_list, dim=0)
return retrieve_logits
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, loose_type=False):
if shaped is False:
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
contrastive_direction = ()
if loose_type:
assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"]
retrieve_logits = self._loose_similarity(sequence_output, visual_output, attention_mask, video_mask, sim_header=self.sim_header)
else:
assert self.sim_header in ["tightTransf"]
retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask, )
return retrieve_logits, contrastive_direction | InternVideo-main | Downstream/Video-Text-Retrieval/modules/modeling_raw.py |
# Modified from https://github.com/lucidrains/CoCa-pytorch/blob/main/coca_pytorch/coca_pytorch.py
from turtle import forward
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# for controlling freezing of parameters
def set_module_requires_grad_(module, requires_grad):
for param in module.parameters():
param.requires_grad = requires_grad
def freeze_all_layers_(module):
set_module_requires_grad_(module, False)
def unfreeze_all_layers_(module):
set_module_requires_grad_(module, True)
def freeze_model_and_make_eval_(model):
model.eval()
freeze_all_layers_(model)
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.register_buffer("mask", None, persistent=False)
self.register_buffer("pos_emb", None, persistent=False)
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n]
pos_emb = self.rotary_emb(n, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def forward(self, x, attn_mask=None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# extra attention mask - for masking out attention from text CLS token to padding
if exists(attn_mask):
attn_mask = rearrange(attn_mask, 'b i j -> b 1 i j')
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.attn_out(out) + self.ff_out(ff)
# cross attention - using multi-query + one-headed key / values as in PaLM w/ optional parallel feedforward
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim=None,
dim_head=64,
heads=8,
parallel_ff=False,
ff_mult=4,
norm_context=False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(context_dim) if norm_context else nn.Identity()
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(context_dim, dim_head * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# whether to have parallel feedforward
ff_inner_dim = ff_mult * dim
self.ff = nn.Sequential(
nn.Linear(dim, ff_inner_dim * 2, bias=False),
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
) if parallel_ff else None
def forward(self, x, context):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
# pre-layernorm, for queries and context
x = self.norm(x)
context = self.context_norm(context)
# get queries
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# scale
q = q * self.scale
# get key / values
k, v = self.to_kv(context).chunk(2, dim=-1)
# query / key similarity
sim = einsum('b h i d, b j d -> b h i j', q, k)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True)
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b h i j, b j d -> b h i d', attn, v)
# merge and combine heads
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
# add parallel feedforward (for multimodal layers)
if exists(self.ff):
out = out + self.ff(x)
return out
# transformer
class CoCa(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
unimodal_depth,
multimodal_depth,
image_dim = None,
num_img_queries=256,
dim_head=64,
heads=8,
ff_mult=4,
img_encoder=None,
caption_loss_weight=1.,
contrastive_loss_weight=1.,
pad_id=0
):
super().__init__()
self.dim = dim
self.pad_id = pad_id
self.caption_loss_weight = caption_loss_weight
self.contrastive_loss_weight = contrastive_loss_weight
# token embeddings
self.token_emb = nn.Embedding(num_tokens, dim)
self.text_cls_token = nn.Parameter(torch.randn(dim))
# image encoder
self.img_encoder = img_encoder
if exists(self.img_encoder):
freeze_model_and_make_eval_(self.img_encoder)
# attention pooling for image tokens
self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning
self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True)
self.img_attn_pool_norm = LayerNorm(dim)
self.text_cls_norm = LayerNorm(dim)
# contrastive learning temperature
self.temperature = nn.Parameter(torch.Tensor([1.]))
# unimodal layers
self.unimodal_layers = nn.ModuleList([])
for ind in range(unimodal_depth):
self.unimodal_layers.append(
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
)
# multimodal layers
self.multimodal_layers = nn.ModuleList([])
for ind in range(multimodal_depth):
self.multimodal_layers.append(nn.ModuleList([
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult))
]))
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
self.to_logits[-1].weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
def embed_text(self, text):
batch, device = text.shape[0], text.device
seq = text.shape[1]
text_tokens = self.token_emb(text)
# append text cls tokens
text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch)
text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2)
# create specific mask for text cls token at the end
# to prevent it from attending to padding
cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j')
attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True)
# go through unimodal layers
for attn_ff in self.unimodal_layers:
text_tokens = attn_ff(text_tokens, attn_mask=attn_mask)
# get text cls token
text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1]
text_embeds = self.text_cls_norm(text_cls_tokens)
return text_embeds, text_tokens
def embed_image(self, images=None, image_tokens=None):
# encode images into embeddings
# with the img_encoder passed in at init
# it can also accept precomputed image tokens
assert not (exists(images) and exists(image_tokens))
if exists(images):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding'
self.img_encoder.eval()
with torch.no_grad():
image_tokens = self.img_encoder(images).detach()
# attention pool image tokens
img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0])
img_queries = self.img_attn_pool(img_queries, image_tokens)
img_queries = self.img_attn_pool_norm(img_queries)
return img_queries[:, 0], img_queries[:, 1:]
def forward(
self,
text,
images=None,
image_tokens=None,
labels=None,
return_loss=False,
return_embeddings=False
):
batch, device = text.shape[0], text.device
if return_loss and not exists(labels):
text, labels = text[:, :-1], text[:, 1:]
text_embeds, text_tokens = self.embed_text(text)
image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens)
# return embeddings if that is what the researcher wants
if return_embeddings:
return text_embeds, image_embeds
# go through multimodal layers
for attn_ff, cross_attn in self.multimodal_layers:
text_tokens = attn_ff(text_tokens)
text_tokens = cross_attn(text_tokens, image_tokens)
logits = self.to_logits(text_tokens)
if not return_loss:
return logits
# shorthand
ce = F.cross_entropy
# calculate caption loss (cross entropy loss)
logits = rearrange(logits, 'b n c -> b c n')
caption_loss = ce(logits, labels, ignore_index=self.pad_id)
caption_loss = caption_loss * self.caption_loss_weight
# calculate contrastive loss
sim = einsum('i d, j d -> i j', text_embeds, image_embeds)
sim = sim * self.temperature.exp()
contrastive_labels = torch.arange(batch, device=device)
contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5
contrastive_loss = contrastive_loss * self.contrastive_loss_weight
return caption_loss + contrastive_loss
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/coca.py |
from .clip import *
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from . import evl_utils
from .evl_utils import TransformerDecoder_uniformer_diff_conv_balance
from einops import rearrange
from ipdb import set_trace
from copy import deepcopy
from .clip_decoders import CaptionDecoder
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
# evl
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True,
backbone='vit_2plus1d_dw_bias_b16',
uni_layer=0,
uni_type='2d',
add_ffn=False,
t_conv_type='3d',
pre_prompt=False,
balance=0.,
after_me=True,
before_me=False,
me_type='stm',
me_reduction=4,
init_zero=True,
use_capdecoder=False,
):
super().__init__()
# All assertions is for adhoc clip_kc and should be removed
# assert vision_layers == 12, vision_layers
assert image_resolution == 224, image_resolution
# assert vision_patch_size == 32, vision_patch_size
assert vision_width == n_dim, (vision_width, n_dim)
self.vision_width = n_dim
self.context_length = context_length
vision_heads = vision_width // 64
self.visual = evl_utils.__dict__[backbone](pretrained=False, init_zero=init_zero, num_frames=t_size, t_size=t_size)
self.evl = TransformerDecoder_uniformer_diff_conv_balance(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type,
pre_prompt=pre_prompt, balance=balance,
after_me=after_me, before_me=before_me,
me_type=me_type, me_reduction=me_reduction,
init_zero=init_zero,
)
self.visual_ln_post = nn.LayerNorm(n_dim)
scale = n_dim ** -0.5
self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim))
self.return_qk = use_image_attnmap
self.return_num = n_layers
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.embed_dim = embed_dim
# We seperate the mask embedding to load pretrained model
self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width))
# # To keep the num_embeddings unchanged, we add this to embedded text
# self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width))
##################### try cap decoder ###############
self.use_capdecoder = use_capdecoder
if self.use_capdecoder:
self.caption_decoder = CaptionDecoder(
n_layers=6,
transformer_width=transformer_width,
vision_width=vision_width,
transformer_heads=transformer_heads,
vocab_size=vocab_size,
)
## we do not use logits, just text feats ##
self.caption_decoder.predictor = nn.Identity()
#####################################################
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
nn.init.normal_(self.text_mask_embedding, std=0.02)
# nn.init.constant_(self.eot_token_embedding, 0.0)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
nn.init.constant_(self.visual_ln_post.weight, 1.0)
nn.init.constant_(self.visual_ln_post.bias, 0.0)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_video(self, video, return_all_feats=False, masked_indices=None, mode="video"):
# if len(video.size()) == 4: #[bs * T, C, H, W]
#set_trace()
# frames = 12
# video = rearrange(video, '(b t) c h w -> b t c h w', b=int(video.size(0)/frames), t=frames)
# video = rearrange(video, 'b t c h w -> b c t h w')
#set_trace()
# video: [N, C, T, H, W]
features = self.visual(video, return_num=self.return_num, masked_indices=masked_indices, mode=mode)
x = self.visual_ln_post(self.evl(features))
if self.visual_proj is not None:
x = x @ self.visual_proj
if return_all_feats:
return x, features[-1]
# return x, features[-1], features[-1][0] # [N, T, C], [L, N, T, C], [N, T, C]
return x
def encode_text(self, text, masked_indices=None, return_all_feats=False):
# assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \
# "The last token of each sentence should be eot_token, check the input"
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
# x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding
if masked_indices is not None:
x[masked_indices] = self.text_mask_embedding
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)]
if self.text_projection is not None:
feats = feats @ self.text_projection
if return_all_feats:
return feats, x
return feats
def forward(self, video, text):
video_features = self.encode_video(video)
text_features = self.encode_text(text)
# normalized features
video_features = video_features / video_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_video= logit_scale * video_features @ text_features.t()
logits_per_text = logits_per_video.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_video, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name) and not isinstance(l, TransformerDecoder_uniformer_diff_conv_balance):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def interpolate_temporal_pos_embed(pos_embed, T):
# [1, t, d] -> [1, d, t]
pos_embed = pos_embed.transpose(-2, -1)
# [1, d, t] -> [1, d, T]
pos_embed = F.interpolate(pos_embed, size=(T), mode='linear')
# [1, d, T] -> [1, T, d]
return pos_embed.transpose(-2, -1)
def interploate_rpb(rpb, T):
t1 = T * 2 - 1
rpb = rpb.transpose(0, 1).unsqueeze(0)
rpb = F.interpolate(rpb, size=(t1), mode='linear')
return rpb.squeeze(0).transpose(0, 1)
def build_model(
state_dict: dict,
# evl
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, no_pretrain=False,
init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None,
):
vit = "visual.proj" in state_dict or "visual.positional_embedding" in state_dict
if "visual.proj" in state_dict:
state_dict["visual_proj"] = state_dict["visual.proj"]
state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"]
state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"]
del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"]
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# if k.startswith("backbone."):
# k = k.replace("backbone.", "visual.")
# new_state_dict[k] = v
# state_dict = new_state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
# embed_dim = 512
# context_length = 77
# vocab_size = 49408
# transformer_width = 512
# transformer_layers = 12
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
vision_width = state_dict["visual_proj"].shape[0]
n_dim = vision_width
if vision_width == 768:
backbone = "vit_2plus1d_dw_bias_b16"
n_head = 12
elif vision_width == 1024:
backbone = "vit_2plus1d_dw_bias_l14"
n_head = 16
else:
raise NotImplementedError
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers,
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, backbone=backbone,
init_zero=init_zero, use_capdecoder=use_capdecoder,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
###### convert_weights(model)#####
##### interpolate pos embedding ######
temporal_key = 'visual.temporal_positional_embedding'
temporal_key2 = 'evl.pemb_t'
if temporal_key in state_dict and t_size != state_dict[temporal_key].size(1):
state_dict[temporal_key] = interpolate_temporal_pos_embed(state_dict[temporal_key], t_size)
state_dict[temporal_key2] = interpolate_temporal_pos_embed(state_dict[temporal_key2], t_size)
for kk, vv in state_dict.items():
if 'rpb_t' in kk:
size_old = state_dict[kk].shape
state_dict[kk] = interploate_rpb(vv, t_size)
size_new = state_dict[kk].shape
print('Interpolating' ,kk, size_old, '-->', size_new)
# set_trace()
if mergeclip:
assert 0.0 <= mergeweight <= 1.0
assert clip_state_dict is not None
clip_sd = {k: v.cpu() for k, v in clip_state_dict.items()}
new_sd = deepcopy(state_dict)
for k in new_sd:
if k not in clip_sd:
continue
if any(x in k for x in clip_sd.keys()):
print('merging: ', k)
new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight)
############## only merge the clip text features, this is for ActivityNet ###########
# if 'visual' in k:
# new_sd[k] = clip_sd[k]
# else:
# new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight)
################################################################################
############## only merge the clip visual features, this is for MSVD ###########
# if 'visual' in k:
# new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight)
# else:
# new_sd[k] = clip_sd[k]
################################################################################
state_dict = new_sd
# strict=False, for parameters of decoder
# assert False, (len(model.state_dict()), len(state_dict))
if not no_pretrain:
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
return model.eval()
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/model.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder_uniformer_diff_conv_balance
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_b16',
n_layers=12,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
uni_layer=4,
uni_type='3d',
add_ffn=True,
t_conv_type='1d',
pre_prompt=True,
balance=0.,
after_me=True,
before_me=False,
me_type='dstm',
me_reduction=4,
num_classes=400,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder_uniformer_diff_conv_balance(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type,
pre_prompt=pre_prompt, balance=balance,
after_me=after_me, before_me=before_me,
me_type=me_type, me_reduction=me_reduction,
num_classes=num_classes
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_2plus1d_diff_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.5
model = EVL(
backbone='vit_2plus1d_dw_bias_b16',
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
uni_layer=0,
uni_type='2d',
add_ffn=False,
t_conv_type='3d',
pre_prompt=False,
balance=0.,
after_me=True,
before_me=False,
me_type='stm',
me_reduction=4,
num_classes=400,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_diff_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_2plus1d_diff_b_sparse8()
cal_flops(model, frame=8, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/model_no_freeze_diff.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
num_classes=num_classes,
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_l_sparse16(pretrained=True):
# 16x224x224
# k400 1x1: 86.5
model = EVL(
backbone='vit_l14',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse16.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_l_sparse32(pretrained=True):
# 32x224x224
# k400 1x1: 87.0
model = EVL(
backbone='vit_l14',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse32.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_l336_sparse32(pretrained=True):
# 32x336x336
# k400 1x1: 87.4
model = EVL(
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l336_sparse32.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_l_sparse16()
cal_flops(model, frame=16, size=224)
# model = vit_l_sparse32()
# cal_flops(model, frame=32, size=224)
# model = vit_l336_sparse32()
# cal_flops(model, frame=32, size=336) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/model_freeze.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=False,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
num_classes=num_classes, add_residual=add_residual
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.4
model = EVL(
backbone='vit_b16',
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=True,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_2plus1d_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.5
model = EVL(
backbone='vit_2plus1d_b16',
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=True,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
# model = vit_b_sparse8()
# cal_flops(model, frame=8, size=224)
model = vit_2plus1d_b_sparse8()
cal_flops(model, frame=8, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/model_no_freeze.py |
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False, download_root: str = None,
# evl
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
'''
with open(model_path, 'rb') as opened_file:
try:
# loading JIT archive
model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
'''
init_state_dict = torch.load(model_path, map_location='cpu')['state_dict']
state_dict = {}
for k, v in init_state_dict.items():
k = k.replace('clip.','')
state_dict[k] = v
if not jit:
model = build_model(
state_dict or model.state_dict(),
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain,
init_zero=init_zero, mergeclip=mergeclip, mergeweight=mergeweight, use_capdecoder=use_capdecoder, clip_state_dict=clip_state_dict,
).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
special_tokens_mask[i, len(tokens):] = 1
if return_special_tokens_mask:
return result, special_tokens_mask
return result
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/clip.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/simple_tokenizer.py |
import torch
import torch.nn as nn
from .evl_utils.evl_module import ResidualDecoderBlock
from .coca import Residual, ParallelTransformerBlock, CrossAttention
from einops import repeat
class CaptionDecoder(nn.Module):
def __init__(
self,
n_layers,
transformer_width,
vision_width,
transformer_heads,
vocab_size,
num_visual_queries=256,
):
super().__init__()
scale = transformer_width ** -0.5
self.visual_queries = nn.Parameter(
scale * torch.randn(num_visual_queries, transformer_width)
)
dim_head = transformer_width // transformer_heads
ff_mult = 4
self.visual_attn_pooler = CrossAttention(
dim=transformer_width,
context_dim=vision_width,
dim_head=dim_head,
heads=transformer_heads,
norm_context=True,
)
self.visual_pooler_norm = nn.LayerNorm(transformer_width)
self.text_norm = nn.LayerNorm(transformer_width)
self.multimodal_layers = nn.ModuleList([])
for ind in range(n_layers):
self.multimodal_layers.append(
nn.ModuleList(
[
Residual(
ParallelTransformerBlock(
dim=transformer_width,
dim_head=dim_head,
heads=transformer_heads,
ff_mult=ff_mult,
)
),
Residual(
CrossAttention(
dim=transformer_width,
dim_head=dim_head,
heads=transformer_heads,
parallel_ff=True,
ff_mult=ff_mult,
)
),
]
)
)
self.predictor = nn.Sequential(
nn.LayerNorm(transformer_width),
nn.Linear(transformer_width, transformer_width),
nn.GELU(),
nn.LayerNorm(transformer_width),
nn.Linear(transformer_width, vocab_size),
)
def forward(self, image_feats, text_embeds):
# image_feats: # L, N, T, C
# text_feats: embeded text feats # N, L, C
# [L, N, T, C] -> [N, T * L, C]
image_feats = image_feats.permute(1, 0, 2, 3).flatten(1, 2)
visual_queries = repeat(
self.visual_queries, 'n d -> b n d', b=image_feats.shape[0]
)
image_feats = self.visual_pooler_norm(
self.visual_attn_pooler(visual_queries, image_feats)
)
text_embeds = self.text_norm(text_embeds)
# go through multimodal layers
for attn_ff, cross_attn in self.multimodal_layers:
text_embeds = attn_ff(text_embeds)
text_embeds = cross_attn(text_embeds, image_feats)
logits = self.predictor(text_embeds)
return logits
class MaskedTextDecoder(nn.Module):
def __init__(
self,
n_layers,
transformer_width,
vision_width,
transformer_heads,
vocab_size,
drop_rate,
drop_path_rate,
):
super().__init__()
self.visual_encoder_to_decoder = nn.Sequential(
nn.LayerNorm(vision_width), nn.Linear(vision_width, transformer_width)
)
self.text_encoder_to_decoder = nn.Sequential(
nn.LayerNorm(transformer_width),
nn.Linear(transformer_width, transformer_width),
)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, n_layers)
] # stochastic depth decay rule
# We are
self.text_decoder = nn.ModuleList(
[
ResidualDecoderBlock(
d_model=transformer_width,
n_head=transformer_heads,
dropout=drop_rate,
drop_path=dpr[i],
)
for i in range(n_layers)
]
)
self.text_decoder_ln = nn.LayerNorm(transformer_width)
def forward(self, text_feats, visual_feats):
visual_feats = self.visual_encoder_to_decoder(visual_feats)
text_feats = self.text_encoder_to_decoder(text_feats)
# ! Shape
# [L, N, T, C] -> [T * L, N, C]
visual_feats = visual_feats.permute(2, 0, 1, 3).flatten(0, 1)
# [N, L, C] -> [L, N, C]
text_feats = text_feats.permute(1, 0, 2)
for dec in self.text_decoder:
text_feats = dec(text_feats, visual_feats)
text_feats = self.text_decoder_ln(text_feats).permute(1, 0, 2)
return text_feats
class MaskedVisualDecoder(nn.Module):
def __init__(
self,
n_layers,
transformer_width,
vision_width,
transformer_heads,
patch_size,
drop_path_rate=0.0,
):
super().__init__()
self.visual_encoder_to_decoder = nn.Sequential(
nn.LayerNorm(vision_width), nn.Linear(vision_width, transformer_width)
)
self.text_encoder_to_decoder = nn.Sequential(
nn.LayerNorm(transformer_width),
nn.Linear(transformer_width, transformer_width),
)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, n_layers)
] # stochastic depth decay rule
# We are setting the d_model as transformer_width because we want the decoder to be small
# Mayber later I will add a specific setting for this
self.vision_decoder = nn.ModuleList(
[
ResidualDecoderBlock(
d_model=transformer_width,
n_head=transformer_heads,
drop_path=dpr[i],
)
for i in range(n_layers)
]
)
self.predictor = nn.Sequential(
nn.LayerNorm(transformer_width),
nn.Linear(transformer_width, transformer_width),
nn.GELU(),
nn.LayerNorm(transformer_width),
nn.Linear(transformer_width, 3 * patch_size * patch_size),
)
def forward(self, text_feats, visual_feats):
# Remove cls_token first
visual_feats = self.visual_encoder_to_decoder(visual_feats[1:])
text_feats = self.text_encoder_to_decoder(text_feats)
# [L, N, T, C] -> [T * L, N, C]
visual_feats = visual_feats.permute(2, 0, 1, 3).flatten(0, 1)
# [N, L, C] -> [L, N, C]
text_feats = text_feats.permute(1, 0, 2)
for dec in self.vision_decoder:
visual_feats = dec(visual_feats, text_feats)
visual_feats = self.predictor(visual_feats).permute(1, 0, 2) # [N, L, C]
return visual_feats | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/clip_decoders.py |
#!/usr/bin/env python
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention.py |
#!/usr/bin/env python
from collections import OrderedDict
from timm.models.layers import trunc_normal_, DropPath
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class STM(nn.Module):
def __init__(self, n_dim, reduction=4):
super(STM, self).__init__()
reduced_c = n_dim // reduction
self.reduce = nn.Sequential(
nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False),
nn.BatchNorm2d(reduced_c)
)
self.shift = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad = (0, 0, 0, 0, 0, 0, 0, 1)
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.reduce(fea) # NT, C//r, H, W
# t feature
reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W
t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
# apply transformation conv to t+1 feature
conv_bottleneck = self.shift(bottleneck) # NT, C//r, H, W
# reshape fea: N, T, C//r, H, W
reshape_conv_bottleneck = conv_bottleneck.view((-1, T) + conv_bottleneck.size()[1:])
__, tPlusone_fea = reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
# motion fea = t+1_fea - t_fea
# pad the last timestamp
diff_fea = tPlusone_fea - t_fea # N, T-1, C//r, H, W
# pad = (0,0,0,0,0,0,0,1)
diff_fea_pluszero = F.pad(diff_fea, self.pad, mode="constant", value=0) # N, T, C//r, H, W
diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
y = self.recover(diff_fea_pluszero) # NT, C, H, W
# reshape
y = y.reshape(N, T, C, L).permute(3, 0, 1, 2)
y = torch.cat([cls_token, y], dim=0)
return y
class DSTM(nn.Module):
def __init__(self, n_dim, reduction=4):
super(DSTM, self).__init__()
reduced_c = n_dim // reduction
self.reduce = nn.Sequential(
nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False),
nn.BatchNorm2d(reduced_c)
)
# DW(T+1) - T
self.shift_pre = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover_pre = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad_pre = (0, 0, 0, 0, 0, 0, 0, 1)
# DW(T-1) - T
self.shift_back = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover_back = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad_back = (0, 0, 0, 0, 0, 0, 0, 1)
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.reduce(fea) # NT, C//r, H, W
# t feature
reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W
pre_t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
back_t_fea, __ = reshape_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
# apply transformation conv to t+1/t-1 feature
pre_conv_bottleneck = self.shift_pre(bottleneck) # NT, C//r, H, W
back_conv_bottleneck = self.shift_back(bottleneck) # NT, C//r, H, W
# reshape fea: N, T, C//r, H, W
pre_reshape_conv_bottleneck = pre_conv_bottleneck.view((-1, T) + pre_conv_bottleneck.size()[1:])
back_reshape_conv_bottleneck = back_conv_bottleneck.view((-1, T) + back_conv_bottleneck.size()[1:])
__, tPlusone_fea = pre_reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
tMinusone_fea, _ = back_reshape_conv_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
# pre_fea = t+1_fea - t_fea
# back_fea = t-1_fea - t_fea
pre_diff_fea = tPlusone_fea - pre_t_fea # N, T-1, C//r, H, W
back_diff_fea = tMinusone_fea - back_t_fea # N, T-1, C//r, H, W
# pad the last/first timestamp
pre_diff_fea_pluszero = F.pad(pre_diff_fea, self.pad_pre, mode="constant", value=0) # N, T, C//r, H, W
pre_diff_fea_pluszero = pre_diff_fea_pluszero.view((-1,) + pre_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
back_diff_fea_pluszero = F.pad(back_diff_fea, self.pad_back, mode="constant", value=0) # N, T, C//r, H, W
back_diff_fea_pluszero = back_diff_fea_pluszero.view((-1,) + back_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
# recover channel
pre_y = self.recover_pre(pre_diff_fea_pluszero) # NT, C, H, W
back_y = self.recover_back(back_diff_fea_pluszero) # NT, C, H, W
# reshape
y = (pre_y + back_y).reshape(N, T, C, L).permute(3, 0, 1, 2)
# cat cls_token
y = torch.cat([cls_token, y], dim=0)
return y
class TDN(nn.Module):
def __init__(self, channel, n_segment=8, index=1, reduction=4):
super(TDN, self).__init__()
self.channel = channel
self.reduction = reduction
self.n_segment = n_segment
self.stride = 2**(index-1)
self.conv1 = nn.Conv2d(in_channels=self.channel,
out_channels=self.channel//self.reduction,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,
kernel_size=3, padding=1, groups=self.channel//self.reduction, bias=False)
self.avg_pool_forward2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.avg_pool_forward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_forward = nn.Sigmoid()
self.avg_pool_backward2 = nn.AvgPool2d(kernel_size=2, stride=2)#nn.AdaptiveMaxPool2d(1)
self.avg_pool_backward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_backward = nn.Sigmoid()
self.pad1_forward = (0, 0, 0, 0, 0, 0, 0, 1)
self.pad1_backward = (0, 0, 0, 0, 0, 0, 1, 0)
self.conv3 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.channel)
self.conv3_smallscale2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale2 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv3_smallscale4 = nn.Conv2d(in_channels = self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale4 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
def spatial_pool(self, x):
nt, channel, height, width = x.size()
input_x = x
# [N, C, H * W]
input_x = input_x.view(nt, channel, height * width)
# [N, 1, C, H * W]
input_x = input_x.unsqueeze(1)
# [N, 1, H, W]
context_mask = self.conv_mask(x)
# [N, 1, H * W]
context_mask = context_mask.view(nt, 1, height * width)
# [N, 1, H * W]
context_mask = self.softmax(context_mask)
context_mask = context_mask.view(nt,1,height,width)
return context_mask
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.conv1(fea) # nt, c//r, h, w
bottleneck = self.bn1(bottleneck) # nt, c//r, h, w
reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w
t_fea_forward, _ = reshape_bottleneck.split([self.n_segment -1, 1], dim=1) # n, t-1, c//r, h, w
_, t_fea_backward = reshape_bottleneck.split([1, self.n_segment -1], dim=1) # n, t-1, c//r, h, w
conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w
reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:]) # n, t, c//r, h, w
_, tPlusone_fea_forward = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w
tPlusone_fea_backward ,_ = reshape_conv_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w
diff_fea_forward = tPlusone_fea_forward - t_fea_forward # n, t-1, c//r, h, w
diff_fea_backward = tPlusone_fea_backward - t_fea_backward# n, t-1, c//r, h, w
diff_fea_pluszero_forward = F.pad(diff_fea_forward, self.pad1_forward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_forward = diff_fea_pluszero_forward.view((-1,) + diff_fea_pluszero_forward.size()[2:]) #nt, c//r, h, w
diff_fea_pluszero_backward = F.pad(diff_fea_backward, self.pad1_backward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_backward = diff_fea_pluszero_backward.view((-1,) + diff_fea_pluszero_backward.size()[2:]) #nt, c//r, h, w
y_forward_smallscale2 = self.avg_pool_forward2(diff_fea_pluszero_forward) # nt, c//r, 1, 1
y_backward_smallscale2 = self.avg_pool_backward2(diff_fea_pluszero_backward) # nt, c//r, 1, 1
y_forward_smallscale4 = diff_fea_pluszero_forward
y_backward_smallscale4 = diff_fea_pluszero_backward
y_forward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_forward_smallscale2))
y_backward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_backward_smallscale2))
y_forward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_forward_smallscale4))
y_backward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_backward_smallscale4))
y_forward_smallscale2 = F.interpolate(y_forward_smallscale2, diff_fea_pluszero_forward.size()[2:])
y_backward_smallscale2 = F.interpolate(y_backward_smallscale2, diff_fea_pluszero_backward.size()[2:])
y_forward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_forward + 1.0/3.0*y_forward_smallscale2 + 1.0/3.0*y_forward_smallscale4))# nt, c, 1, 1
y_backward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_backward + 1.0/3.0*y_backward_smallscale2 + 1.0/3.0*y_backward_smallscale4)) # nt, c, 1, 1
y_forward = self.sigmoid_forward(y_forward) - 0.5
y_backward = self.sigmoid_backward(y_backward) - 0.5
y = 0.5 * y_forward + 0.5 * y_backward
attn = fea * y
x = x + attn.reshape(N, T, C, L).permute(3, 0, 1, 2)
x = torch.cat([cls_token, x], dim=0)
return x
class CMlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = conv_1x1x1(in_features, hidden_features)
self.act = act_layer()
self.fc2 = conv_1x1x1(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CBlock(nn.Module):
def __init__(self, dim, mlp_ratio=4., dropout=0., drop_path=0., uni_type='3d', add_ffn=True):
super().__init__()
self.norm1 = bn_3d(dim)
self.conv1 = conv_1x1x1(dim, dim, 1)
self.conv2 = conv_1x1x1(dim, dim, 1)
if uni_type == '3d':
print('Use 3d conv for local MHRA')
self.attn = conv_3x3x3(dim, dim, groups=dim)
else:
print('Use 2d conv for local MHRA')
self.attn = conv_1x3x3(dim, dim, groups=dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.add_ffn = add_ffn
if add_ffn:
print('Add FFN in local MHRA')
self.norm2 = bn_3d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=nn.GELU, drop=dropout)
print('Init zero')
nn.init.constant_(self.conv2.weight, 0.)
nn.init.constant_(self.conv2.bias, 0.)
if add_ffn:
nn.init.constant_(self.mlp.fc2.weight, 0.)
nn.init.constant_(self.mlp.fc2.bias, 0.)
def forward(self, x):
x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
if self.add_ffn:
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ResidualDecoderBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None,
mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0, init_zero=True):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
if init_zero:
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.constant_(self.attn.in_proj_bias, 0.)
# nn.init.xavier_uniform_(self.attn.out_proj.weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
# nn.init.xavier_uniform_(self.mlp[-1].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
else:
nn.init.trunc_normal_(self.attn.in_proj_weight, std=.02)
nn.init.constant_(self.attn.in_proj_bias, 0.)
nn.init.trunc_normal_(self.attn.out_proj.weight, std=.02)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.trunc_normal_(self.mlp.c_fc.weight, std=.02)
nn.init.constant_(self.mlp.c_fc.bias, 0.)
nn.init.trunc_normal_(self.mlp.c_proj.weight, std=.02)
nn.init.constant_(self.mlp.c_proj.bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class TransformerDecoder_uniformer_diff_conv_balance(nn.Module):
def __init__(self, n_layers=4,
uni_layer=4, uni_type='3d', add_ffn=True, t_conv_type='1d', pre_prompt=True,
n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
balance=0.,
use_t_conv=True, after_me=True, before_me=False, me_type='dstm', me_reduction=4,
use_t_pos_embed=True, num_classes=400, init_zero=True):
super().__init__()
n_layers += uni_layer
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.uni_layer = uni_layer
self.uni_dec = nn.ModuleList([
CBlock(n_dim, mlp_ratio=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], uni_type=uni_type, add_ffn=add_ffn)
for i in range(uni_layer)
])
self.dec = nn.ModuleList([
ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], init_zero=init_zero)
for i in range(n_layers)
])
self.pre_prompt = pre_prompt
if pre_prompt:
print('Add pre prompt')
self.pre_temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
if use_t_conv:
self.t_conv_type = t_conv_type
if t_conv_type == '1d':
print('Use 1d t_conv for CPE')
self.tconv = nn.ModuleList([
nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
if init_zero:
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
m.weight.data[...] = torch.Tensor([0, 1, 0])
else:
print('Use 3d t_conv for CPE')
self.tconv = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
if init_zero:
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
else:
self.tconv = None
self.before_me = before_me
self.after_me = after_me
if before_me or after_me:
assert before_me != after_me
print(f'Use {me_type} attention, Before {before_me}, After {after_me}')
if me_type == 'stm':
me_op = STM
elif me_type == 'dstm':
me_op = DSTM
elif me_type == 'tdn':
me_op = TDN
self.me = nn.ModuleList([me_op(n_dim, reduction=me_reduction) for i in range(n_layers)])
if use_t_pos_embed:
self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim]))
else:
self.pemb_t = None
print(F'Balnce weight {balance}')
self.balance = nn.Parameter(torch.ones((n_dim)) * balance)
self.sigmoid = nn.Sigmoid()
if not init_zero:
nn.init.normal_(self.temporal_cls_token, std=1e-6)
if self.pemb_t is not None:
nn.init.trunc_normal_(self.pemb_t, std=.02)
def forward(self, clip_feats_all, mode='video'):
# clip_feats_all = clip_feats_all[-len(self.dec):]
# only return n_layers features, save memory
clip_feats = [x for x in clip_feats_all]
if self.after_me:
origin_clip_feats = [x for x in clip_feats_all]
L, N, T, C = clip_feats[0].size()
x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(clip_feats)):
if self.before_me:
# contain residual
clip_feats[i] = self.me[i](clip_feats[i])
if self.tconv is not None:
L, N, T, C = clip_feats[i].shape
if self.t_conv_type == '1d':
clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T
clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C)
else:
H = W = int((L - 1) ** 0.5)
_, tmp_feats = clip_feats[i][:1], clip_feats[i][1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W)
tmp_feats = self.tconv[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1)
clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats
if self.pemb_t is not None and mode == 'video':
clip_feats[i] = clip_feats[i] + self.pemb_t[i]
if self.after_me:
clip_feats[i] = clip_feats[i] + self.me[i](origin_clip_feats[i])
if i < self.uni_layer:
# L, N, T, C
L, N, T, C = clip_feats[i].shape
H = W = int((L - 1) ** 0.5)
_, tmp_feats = clip_feats[i][:1], clip_feats[i][1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W)
tmp_feats = self.uni_dec[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1)
clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats
clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
if self.pre_prompt:
pre_x = self.pre_temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(self.dec)):
if i < self.uni_layer:
pre_x = self.dec[i](pre_x, clip_feats[i])
elif i == self.uni_layer:
clip_feats[i] = torch.cat([pre_x, clip_feats[i]], dim=0)
x = self.dec[i](x, clip_feats[i])
else:
x = self.dec[i](x, clip_feats[i])
else:
for i in range(len(self.dec)):
x = self.dec[i](x, clip_feats[i])
# real residual
# L, N, T, C
residual = clip_feats_all[-1][0].mean(1)
weight = self.sigmoid(self.balance)
# return self.proj((1 - weight) * x[0, :, :] + weight * residual)
return (1 - weight) * x[0, :, :] + weight * residual
if __name__ == '__main__':
model = TransformerDecoder_uniformer_diff_conv_balance()
# construct a fake input to demonstrate input tensor shape
L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim
# we use intermediate feature maps from multiple blocks, so input features should be a list
input_features = []
for i in range(8): # vit-b has 12 blocks
# every item in input_features contains features maps from a single block
# every item is a tuple containing 3 feature maps:
# (1) block output features (i.e. after mlp) with shape L, N, T, C
# (2) projected query features with shape L, N, T, C
# (3) projected key features with shape L, N, T, C
input_features.append(
tuple(torch.zeros([L, N, T, C]) for _ in range(3)))
# some small optimizations:
# (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$.
# (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model.
print(model)
print(model(input_features).shape) # should be N, 400
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/evl_module_uniformer_diff_conv_balance.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x, return_qk=False):
if return_qk:
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
q, k, attn_output, _ = self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, return_qk=True)
return q, k, attn_output
else:
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, return_qk=False):
if return_qk:
q, k, attn_output = self.attention(self.ln_1(x), return_qk=True)
x = x + self.drop_path(attn_output)
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x, q, k
else:
x = x + self.drop_path(self.attention(self.ln_1(x)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers)
])
def forward(self, x, return_num=4, T=8):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x)
if i >= self.layers - return_num:
L, NT, C = x.shape
N = NT // T
features.append(x.view(L, N, T, C))
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim, drop_path_rate=0.,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
def forward(self, x, return_num=4, return_qk=True):
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T,
)
return features
def vit_b32(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_b16(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_l14(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_l14_336(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit.py |
from .evl_module import TransformerDecoder
from .evl_module_uniformer_diff_conv_balance import TransformerDecoder_uniformer_diff_conv_balance
from .clip_vit import vit_b32, vit_b16, vit_l14, vit_l14_336
from .clip_vit_2plus1d import vit_2plus1d_b32, vit_2plus1d_b16, vit_2plus1d_l14, vit_2plus1d_l14_336
from .clip_vit_2plus1d_dw_bias import vit_2plus1d_dw_bias_b32, vit_2plus1d_dw_bias_b16, vit_2plus1d_dw_bias_l14, vit_2plus1d_dw_bias_l14_336 | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__init__.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import _pad, linear, softmax, dropout
Tensor = torch.Tensor
pad = _pad
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_module.py |
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from einops import rearrange
from .attention import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
# temporal
self.attn_t = MultiheadAttention(d_model, n_head)
self.ln_t = LayerNorm(d_model)
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
# init zero
print('Init zero for (2+1)d')
nn.init.constant_(self.attn_t.in_proj_weight, 0)
nn.init.constant_(self.attn_t.in_proj_bias, 0)
nn.init.constant_(self.attn_t.out_proj.weight, 1)
nn.init.constant_(self.attn_t.out_proj.bias, 0)
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def attention_temporal(self, x):
self.attn_mask = None
return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8):
# temporal
# x: 1+HWT, N, C
xt = x[1:, :, :]
_, N, C = xt.shape
xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T)
res_temporal = self.attention_temporal(self.ln_t(xt))
res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T)
xt = x[1:, :, :] + self.drop_path(res_temporal)
# spatial
init_cls_token = x[:1, :, :]
cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C)
xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T)
xs = torch.cat((cls_token, xs), 0)
res_spatial = self.attention(self.ln_1(xs))
# Taking care of CLS token
cls_token = res_spatial[0, :, :]
cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N)
cls_token = torch.mean(cls_token, 0, True) # averaging for every frame
res_spatial = res_spatial[1:, :, :]
res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N)
x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers)
])
def forward(self, x, return_num=4, T=8):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T=T)
if i >= self.layers - return_num:
# LT + 1, N, C
LT, N, C = x.shape
L = (LT - 1) // T
cls_x, tmp_x = x[:1], x[1:]
cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1)
tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C
tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C
features.append(tmp_x)
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0.,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
def forward(self, x, return_num=4):
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
cls_tokens = x[:N, :1, :]
x = x[:, 1:]
x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T)
# x = x + self.temporal_positional_embedding
x = x + self.temporal_positional_embedding
x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T)
x = torch.cat((cls_tokens, x), dim=1)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T,
)
return features
def vit_2plus1d_b32(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_b16(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_l14(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_2plus1d_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit_2plus1d.py |
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module_bias import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False, rpb=None):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True, rpb=rpb)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True, rpb=rpb)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, rpb=rpb)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, rpb=rpb)
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_bias.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import _pad, linear, softmax, dropout
Tensor = torch.Tensor
pad = _pad
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False,
rpb: Tensor = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
- rpb: relative postion bias
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
if rpb is not None:
attn_output_weights = attn_output_weights + rpb
attn_output_weights = softmax(attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_module_bias.py |
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from einops import rearrange
import torch.utils.checkpoint as checkpoint
from .attention_bias import MultiheadAttention
from ipdb import set_trace
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0, t_size=8, spatial_size=7, init_zero=True):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
print(f'Add RPB: t_size {t_size}, spatial_size {spatial_size}')
self.pos_embed = nn.Conv3d(d_model, d_model, kernel_size=3, stride=1, padding=1, groups=d_model)
# temporal
self.attn_t = MultiheadAttention(d_model, n_head)
self.ln_t = LayerNorm(d_model)
self.rpb_t = nn.Parameter(torch.zeros([t_size * 2 - 1, n_head]))
idx_tensor_t = torch.zeros([t_size, t_size], dtype=torch.long)
for q in range(t_size):
for k in range(t_size):
offs = q - k + t_size - 1
idx_tensor_t[q, k] = offs
self.idx_tensor_t = idx_tensor_t
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.rpb = nn.Parameter(torch.zeros([(spatial_size * 2 - 1) ** 2, n_head]))
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
idx_tensor = torch.zeros([spatial_size ** 2, spatial_size ** 2], dtype=torch.long)
for q in range(spatial_size ** 2):
qi, qj = q // spatial_size, q % spatial_size
for k in range(spatial_size ** 2):
ki, kj = k // spatial_size, k % spatial_size
i_offs = qi - ki + spatial_size - 1
j_offs = qj - kj + spatial_size - 1
idx_tensor[q, k] = i_offs * (spatial_size * 2 - 1) + j_offs
self.idx_tensor = idx_tensor
if init_zero:
# init zero
print('Init zero for (2+1)d')
nn.init.constant_(self.pos_embed.weight, 0)
nn.init.constant_(self.pos_embed.bias, 0)
nn.init.constant_(self.attn_t.in_proj_weight, 0)
nn.init.constant_(self.attn_t.in_proj_bias, 0)
nn.init.constant_(self.attn_t.out_proj.weight, 1)
nn.init.constant_(self.attn_t.out_proj.bias, 0)
nn.init.constant_(self.ln_t.weight, 1.)
nn.init.constant_(self.ln_t.bias, 0.)
else:
nn.init.trunc_normal_(self.rpb_t, std=.02)
nn.init.trunc_normal_(self.rpb, std=.02)
nn.init.trunc_normal_(self.pos_embed.weight, std=.02)
nn.init.constant_(self.pos_embed.bias, 0)
nn.init.trunc_normal_(self.attn_t.in_proj_weight, std=.02)
nn.init.constant_(self.attn_t.in_proj_bias, 0)
nn.init.trunc_normal_(self.attn_t.out_proj.weight, std=.02)
nn.init.constant_(self.attn_t.out_proj.bias, 0)
nn.init.constant_(self.ln_t.weight, 1.)
nn.init.constant_(self.ln_t.bias, 0.)
def attention(self, x, rpb=None):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0]
def attention_temporal(self, x, rpb=None):
self.attn_mask = None
return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0]
def forward(self, x, T=8, mode='video'):
# temporal
# x: 1+HWT, N, C
# pos_emb
tmp_x = x[1:, :, :]
LT, N, C = tmp_x.shape
L = LT // T
H = W = int(L ** 0.5)
tmp_x = tmp_x.view(H, W, T, N, C).permute(3, 4, 2, 0, 1)
tmp_x = tmp_x + self.pos_embed(tmp_x)
tmp_x = tmp_x.view(N, C, T, L).permute(3, 2, 0, 1).view(LT, N, C)
x[1:, :, :] = tmp_x
xt = x[1:, :, :]
_, N, C = xt.shape
xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T)
# no rpb_t for image
if mode == 'image':
rpb_t = None
else:
# rpb_t: T, T, H => B*H, T, T
self.idx_tensor_t = self.idx_tensor_t.to(xt.device)
rpb_t = self.rpb_t[self.idx_tensor_t].permute(2, 0, 1).repeat(N*L, 1, 1)
# set_trace()
res_temporal = self.attention_temporal(self.ln_t(xt), rpb=rpb_t)
res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T)
xt = x[1:, :, :] + self.drop_path(res_temporal)
# spatial
init_cls_token = x[:1, :, :]
cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C)
xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T)
xs = torch.cat((cls_token, xs), 0)
# rpb: L, L, H => B*H, L+1, L+1
rpb = torch.zeros((self.n_head, L+1, L+1), device=xs.device, dtype=xs.dtype)
self.idx_tensor = self.idx_tensor.to(xs.device)
rpb[:, 1:, 1:] = self.rpb[self.idx_tensor].permute(2, 0, 1)
rpb = rpb.repeat(T*N, 1, 1)
res_spatial = self.attention(self.ln_1(xs), rpb=rpb)
# Taking care of CLS token
cls_token = res_spatial[0, :, :]
cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N)
cls_token = torch.mean(cls_token, 0, True) # averaging for every frame
res_spatial = res_spatial[1:, :, :]
res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N)
x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0., t_size=8, spatial_size=7, init_zero=True):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask,
drop_path=dpr[i], t_size=t_size, spatial_size=spatial_size, init_zero=init_zero) for i in range(layers)
])
def forward(self, x, return_num=4, T=8, mode='video'):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T=T, mode=mode)
if i >= self.layers - return_num:
# LT + 1, N, C
LT, N, C = x.shape
L = (LT - 1) // T
cls_x, tmp_x = x[:1], x[1:]
cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1)
tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C
tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C
features.append(tmp_x)
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim,
num_frames=8, drop_path_rate=0., t_size=8, spatial_size=7, init_zero=True,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate, t_size=t_size, spatial_size=spatial_size, init_zero=init_zero)
self.mask_embedding = nn.Parameter(scale * torch.randn(width))
print('-' * 100)
print('tsize:', t_size, 'num frame: ', num_frames)
print('-' * 100)
def forward(self, x, return_num=4, masked_indices=None, mode='video'):
if len(x.size()) == 5:
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
if masked_indices is not None:
masked_indices = masked_indices.view(N * T, -1)
x[masked_indices] = self.mask_embedding.type(x.dtype)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
cls_tokens = x[:N, :1, :]
x = x[:, 1:]
# add temporal position embedding for video
if mode == 'video':
x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T)
# x = x + self.temporal_positional_embedding
x = x + self.temporal_positional_embedding
x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T)
else:
pass
x = torch.cat((cls_tokens, x), dim=1)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T, mode=mode
)
return features
def vit_2plus1d_dw_bias_b32(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8, init_zero=True):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
init_zero=init_zero,
t_size=t_size,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_b16(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8, init_zero=True):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
spatial_size=14,
init_zero=init_zero,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_l14(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8, init_zero=True):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
init_zero=init_zero,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_l14_336(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8, init_zero=True):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
init_zero=init_zero,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_2plus1d_dw_bias_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(4, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit_2plus1d_dw_bias.py |
#!/usr/bin/env python
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualDecoderBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None,
mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.xavier_uniform_(self.attn.out_proj.weight)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.xavier_uniform_(self.mlp[-1].weight)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class TransformerDecoder(nn.Module):
def __init__(self, n_layers=4,
n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8,
use_t_conv=True, use_t_pos_embed=True, num_classes=400,
add_residual=False,
):
super().__init__()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i])
for i in range(n_layers)
])
self.proj = nn.Sequential(
nn.LayerNorm(n_dim),
nn.Dropout(cls_dropout),
nn.Linear(n_dim, num_classes),
)
self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
self.add_residual = add_residual
print(f'Add residual {add_residual}')
if use_t_conv:
self.tconv = nn.ModuleList([
nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
m.weight.data[...] = torch.Tensor([0, 1, 0])
else:
self.tconv = None
if use_t_pos_embed:
self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim]))
else:
self.pemb_t = None
self.t_size = t_size
def forward(self, clip_feats_all):
# clip_feats_all = clip_feats_all[-len(self.dec):]
# only return n_layers features, save memory
clip_feats = [x for x in clip_feats_all]
L, N, T, C = clip_feats[0].size()
x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(clip_feats)):
if self.tconv is not None:
L, N, T, C = clip_feats[i].shape
clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T
clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C)
if self.pemb_t is not None:
clip_feats[i] = clip_feats[i] + self.pemb_t[i]
clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
for i in range(len(self.dec)):
x = self.dec[i](x, clip_feats[i])
if self.add_residual:
residual = clip_feats_all[-1][0].mean(1)
return self.proj(x[0, :, :] + residual)
else:
return self.proj(x[0, :, :])
if __name__ == '__main__':
model = TransformerDecoder()
# construct a fake input to demonstrate input tensor shape
L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim
# we use intermediate feature maps from multiple blocks, so input features should be a list
input_features = []
for i in range(4): # vit-b has 12 blocks
# every item in input_features contains features maps from a single block
# every item is a tuple containing 3 feature maps:
# (1) block output features (i.e. after mlp) with shape L, N, T, C
# (2) projected query features with shape L, N, T, C
# (3) projected key features with shape L, N, T, C
input_features.append(
torch.zeros([L, N, T, C]))
# some small optimizations:
# (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$.
# (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model.
print(model(input_features).shape) # should be N, 400
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/evl_module.py |
import os
import time
import torch
import torch.nn as nn
# from fvcore.nn import FlopCountAnalysis
# from fvcore.nn import flop_count_table
from modules.clip_evl import evl_utils
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_b16',
t_size=16,
backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=174,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](
pretrained=False,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
def forward(self, x, mode='video'):
output = self.backbone(x, mode=mode)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_only_global_b_sparse8_k400(pretrained=True):
model = EVL(
backbone='vit_only_global_b16',
t_size=8,
backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=400,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
def vit_only_global_l_sparse8_k400(pretrained=True):
model = EVL(
backbone='vit_only_global_l14',
t_size=8,
backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=400,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_only_global_l_sparse8_k400()
# cal_flops(model, frame=1, size=224)
cal_flops(model, frame=8, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_only_global.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
from . import evl_utils
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_b16',
t_size=16,
dw_reduction=1.5,
backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=174,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](
pretrained=False,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
def forward(self, x, mode='video'):
output = self.backbone(x, mode=mode)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_fusion_b_sparse16_k400(pretrained=True):
model = EVL(
backbone='vit_fusion_b16',
t_size=16,
dw_reduction=1.5,
backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=400,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
def vit_fusion_b_sparse16_sthsth(pretrained=True):
model = EVL(
backbone='vit_fusion_b16',
t_size=16,
dw_reduction=1.5,
backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=174,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_fusion_b_sparse16_k400()
# cal_flops(model, frame=1, size=224)
cal_flops(model, frame=16, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_uniformer.py |
from .clip import *
from .evl_utils import * | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from . import evl_utils
from .evl_utils import TransformerDecoder_uniformer_diff_conv_balance
from einops import rearrange
from ipdb import set_trace
from copy import deepcopy
from torch.utils.checkpoint import checkpoint_sequential
# from .clip_decoders import CaptionDecoder
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_checkpoint=False, checkpoint_num=[0, 0]):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
self.use_checkpoint = use_checkpoint
self.checkpoint_num = checkpoint_num
def forward(self, x: torch.Tensor):
if self.use_checkpoint and self.checkpoint_num[1] > 0:
segments = min(len(self.resblocks), self.checkpoint_num[1])
return checkpoint_sequential(self.resblocks, segments, x)
else:
return self.resblocks(x)
# return self.resblocks(x)
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
# evl
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True,
backbone='vit_2plus1d_dw_bias_b16',
uni_layer=0,
uni_type='2d',
add_ffn=False,
t_conv_type='3d',
pre_prompt=False,
balance=0.,
after_me=True,
before_me=False,
me_type='stm',
me_reduction=4,
init_zero=True,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
use_capdecoder=False,
):
super().__init__()
# All assertions is for adhoc clip_kc and should be removed
# assert vision_layers == 12, vision_layers
assert image_resolution == 224, image_resolution
# assert vision_patch_size == 32, vision_patch_size
assert vision_width == n_dim, (vision_width, n_dim)
self.vision_width = n_dim
self.context_length = context_length
vision_heads = vision_width // 64
self.visual = evl_utils.__dict__[backbone](
pretrained=False, t_size=t_size, mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, n_dim=n_dim,
n_head=n_head, return_list=return_list, drop_path_rate=drop_path_rate, backbone_drop_path_rate=drop_path_rate,
use_checkpoint=True, checkpoint_num=[24,100],
)
# self.evl = TransformerDecoder_uniformer_diff_conv_balance(
# n_layers=n_layers, n_dim=n_dim, n_head=n_head,
# mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
# mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
# use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
# uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type,
# pre_prompt=pre_prompt, balance=balance,
# after_me=after_me, before_me=before_me,
# me_type=me_type, me_reduction=me_reduction,
# init_zero=init_zero,
# )
self.visual_ln_post = nn.LayerNorm(n_dim)
scale = n_dim ** -0.5
self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim))
self.return_qk = use_image_attnmap
self.return_num = n_layers
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
use_checkpoint=False, checkpoint_num=[24,100],
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.embed_dim = embed_dim
# We seperate the mask embedding to load pretrained model
self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width))
# # To keep the num_embeddings unchanged, we add this to embedded text
# self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
nn.init.normal_(self.text_mask_embedding, std=0.02)
# nn.init.constant_(self.eot_token_embedding, 0.0)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
nn.init.constant_(self.visual_ln_post.weight, 1.0)
nn.init.constant_(self.visual_ln_post.bias, 0.0)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_video(self, video, return_all_feats=False, masked_indices=None, mode="video"):
# video: [N, C, T, H, W]
feats = self.visual(video, return_all_feats=return_all_feats, mode=mode)
if return_all_feats:
x, feats = feats
x = self.visual_ln_post(x)
if self.visual_proj is not None:
x = x @ self.visual_proj
if return_all_feats:
return x, feats # [N, C], [L, N, T, C]
return x
def encode_text(self, text, masked_indices=None, return_all_feats=False):
# assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \
# "The last token of each sentence should be eot_token, check the input"
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
# x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding
if masked_indices is not None:
x[masked_indices] = self.text_mask_embedding
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)]
if self.text_projection is not None:
feats = feats @ self.text_projection
if return_all_feats:
return feats, x
return feats
def forward(self, video, text):
video_features = self.encode_video(video)
text_features = self.encode_text(text)
# normalized features
video_features = video_features / video_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_video= logit_scale * video_features @ text_features.t()
logits_per_text = logits_per_video.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_video, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name) and not isinstance(l, TransformerDecoder_uniformer_diff_conv_balance):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def interpolate_temporal_pos_embed(pos_embed, T):
# [1, t, d] -> [1, d, t]
pos_embed = pos_embed.transpose(-2, -1)
# [1, d, t] -> [1, d, T]
pos_embed = F.interpolate(pos_embed, size=(T), mode='linear')
# [1, d, T] -> [1, T, d]
return pos_embed.transpose(-2, -1)
def interploate_rpb(rpb, T):
t1 = T * 2 - 1
rpb = rpb.transpose(0, 1).unsqueeze(0)
rpb = F.interpolate(rpb, size=(t1), mode='linear')
return rpb.squeeze(0).transpose(0, 1)
def build_model(
state_dict: dict,
# evl
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, no_pretrain=False,
init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None,
):
vit = "visual.proj" in state_dict or "visual.positional_embedding" in state_dict
if "visual.proj" in state_dict:
state_dict["visual_proj"] = state_dict["visual.proj"]
state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"]
state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"]
del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"]
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# if k.startswith("backbone."):
# k = k.replace("backbone.", "visual.")
# new_state_dict[k] = v
# state_dict = new_state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
# embed_dim = 512
# context_length = 77
# vocab_size = 49408
# transformer_width = 512
# transformer_layers = 12
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
########### add this ############
# for k, v in state_dict.items():
# print(k, v.shape)
################################################
vision_width = state_dict["visual_proj"].shape[0]
n_dim = vision_width
if vision_width == 768:
backbone = "vit_only_global_b16"
n_head = 12
return_list = [8, 9, 10, 11]
# comment this as we always use vit-L/14
# elif vision_width == 1024 and state_dict["input_resolution"] == 224:
elif vision_width == 1024:
backbone = "vit_only_global_l14"
n_head = 16
return_list = [20, 21, 22, 23]
elif vision_width == 1024 and state_dict["input_resolution"] == 336:
backbone = "vit_only_global_l14_336"
n_head = 16
return_list = [20, 21, 22, 23]
else:
raise NotImplementedError
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers,
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, backbone=backbone,
init_zero=init_zero, return_list=return_list,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
# convert_weights(model)
temporal_key = 'visual.temporal_positional_embedding'
temporal_key2 = 'evl.pemb_t'
if temporal_key in state_dict and t_size != state_dict[temporal_key].size(1):
state_dict[temporal_key] = interpolate_temporal_pos_embed(state_dict[temporal_key], t_size)
state_dict[temporal_key2] = interpolate_temporal_pos_embed(state_dict[temporal_key2], t_size)
for kk, vv in state_dict.items():
if 'rpb_t' in kk:
size_old = state_dict[kk].shape
state_dict[kk] = interploate_rpb(vv, t_size)
size_new = state_dict[kk].shape
print('Interpolating' ,kk, size_old, '-->', size_new)
# set_trace()
# print('$' * 100)
# for k, v in model.state_dict().items():
# print(k, v.shape)
if mergeclip:
assert 0.0 <= mergeweight <= 1.0
assert clip_state_dict is not None
clip_sd = {k: v.cpu() for k, v in clip_state_dict.items()}
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_sd:
del clip_sd[key]
## trick: use this func to transfer 2d clip weights to 3d, and mark back
evl_utils.clip_vit_fusion.clip_load_state_dict(model, clip_sd)
loaded_dict = model.state_dict()
clip_sd_new = {k: v.cpu() for k, v in loaded_dict.items() if k in clip_sd}
# set_trace()
new_sd = deepcopy(state_dict)
for k in new_sd:
if k not in clip_sd_new:
continue
if any(x in k for x in clip_sd_new.keys()):
print('merging: ', k, '\t', clip_sd_new[k].shape, state_dict[k].shape)
new_sd[k] = clip_sd_new[k] * mergeweight + state_dict[k] * (1.0 - mergeweight)
############## only merge the clip text features, this is for ActivityNet ###########
# if 'visual' in k:
# new_sd[k] = clip_sd[k]
# else:
# new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight)
################################################################################
############## only merge the clip visual features, this is for MSVD ###########
# if 'visual' in k:
# new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight)
# else:
# new_sd[k] = clip_sd[k]
################################################################################
state_dict = new_sd
# print('$' * 100)
# for k, v in state_dict.items():
# print(k, v.shape)
if not no_pretrain:
# msg = evl_utils.clip_vit_fusion.clip_load_state_dict(model, state_dict)
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
return model.eval()
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/model.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder_uniformer_diff_conv_balance
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_b16',
n_layers=12,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_frames=8,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
uni_layer=4,
uni_type='3d',
add_ffn=True,
t_conv_type='1d',
pre_prompt=True,
balance=0.,
after_me=True,
before_me=False,
me_type='dstm',
me_reduction=4,
num_classes=400,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False, num_frames=num_frames, t_size=t_size)
self.evl = TransformerDecoder_uniformer_diff_conv_balance(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type,
pre_prompt=pre_prompt, balance=balance,
after_me=after_me, before_me=before_me,
me_type=me_type, me_reduction=me_reduction,
num_classes=num_classes
)
self.return_num = n_layers
def forward(self, x, mode='image'):
features = self.backbone(x, return_num=self.return_num, mode=mode)
output = self.evl(features, mode=mode)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_2plus1d_diff_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.5
model = EVL(
backbone='vit_2plus1d_dw_bias_b16',
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_frames=8,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
uni_layer=0,
uni_type='2d',
add_ffn=False,
t_conv_type='3d',
pre_prompt=False,
balance=0.,
after_me=True,
before_me=False,
me_type='stm',
me_reduction=4,
num_classes=400,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_diff_b_sparse8.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_2plus1d_diff_b_sparse8()
cal_flops(model, frame=1, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_diff.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
num_classes=num_classes,
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_l_sparse16(pretrained=True):
# 16x224x224
# k400 1x1: 86.5
model = EVL(
backbone='vit_l14',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse16.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_l_sparse32(pretrained=True):
# 32x224x224
# k400 1x1: 87.0
model = EVL(
backbone='vit_l14',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse32.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_l336_sparse32(pretrained=True):
# 32x336x336
# k400 1x1: 87.4
model = EVL(
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l336_sparse32.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_l_sparse16()
cal_flops(model, frame=16, size=224)
# model = vit_l_sparse32()
# cal_flops(model, frame=32, size=224)
# model = vit_l336_sparse32()
# cal_flops(model, frame=32, size=336) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/model_freeze.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=False,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
num_classes=num_classes, add_residual=add_residual
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.4
model = EVL(
backbone='vit_b16',
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=True,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_2plus1d_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.5
model = EVL(
backbone='vit_2plus1d_b16',
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=True,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
# model = vit_b_sparse8()
# cal_flops(model, frame=8, size=224)
model = vit_2plus1d_b_sparse8()
cal_flops(model, frame=8, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.