python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
"""
This is a launcher script for launching mjrl training using hydra
"""
import os
import hydra
import multiprocessing
from omegaconf import OmegaConf
os.environ["MUJOCO_GL"] = "egl"
cwd = os.getcwd()
# ===============================================================================
# Process Inputs and configure job
# ===============================================================================
@hydra.main(config_path="config", config_name="DMC_BC_config", version_base="1.1")
def configure_jobs(config: dict) -> None:
print("========================================")
print("Job Configuration")
print("========================================")
config = OmegaConf.structured(OmegaConf.to_yaml(config))
from train_loop import bc_pvr_train_loop
config["cwd"] = cwd
with open("job_config.json", "w") as fp:
OmegaConf.save(config=config, f=fp.name)
print(OmegaConf.to_yaml(config))
bc_pvr_train_loop(config)
if __name__ == "__main__":
multiprocessing.set_start_method("spawn")
configure_jobs()
| eai-vc-main | cortexbench/mujoco_vc/visual_imitation/hydra_launcher.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from mjrl.utils.gym_env import GymEnv
from mjrl.samplers.core import sample_paths
from mjrl.policies.gaussian_mlp import MLP, BatchNormMLP
from mjrl.algos.behavior_cloning import BC
from mujoco_vc.gym_wrapper import env_constructor
from mujoco_vc.rollout_utils import rollout_from_init_states
from mujoco_vc.model_loading import (
load_pretrained_model,
fuse_embeddings_concat,
fuse_embeddings_flare,
)
from tabulate import tabulate
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from vc_models.utils.wandb import setup_wandb
import mj_envs, gym, mjrl.envs, dmc2gym
import numpy as np, time as timer, multiprocessing, pickle, os, torch, gc
import torch.nn as nn
import torchvision.transforms as T
def set_seed(seed=None):
"""
Set all seeds to make results reproducible
:param seed: an integer to your choosing (default: None)
"""
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
def configure_cluster_GPUs(gpu_logical_id: int) -> int:
"""
Maps the GPU logical ID to physical ID. This is required for MuJoCo to
correctly use the GPUs, since it relies on physical ID unlike pytorch
"""
# get the correct GPU ID
if "SLURM_STEP_GPUS" in os.environ.keys():
physical_gpu_ids = os.environ.get("SLURM_STEP_GPUS")
gpu_id = int(physical_gpu_ids.split(",")[gpu_logical_id])
print("Found slurm-GPUS: <Physical_id:{}>".format(physical_gpu_ids))
print(
"Using GPU <Physical_id:{}, Logical_id:{}>".format(gpu_id, gpu_logical_id)
)
else:
gpu_id = 0 # base case when no GPUs detected in SLURM
print("No GPUs detected. Defaulting to 0 as the device ID")
return gpu_id
def bc_pvr_train_loop(config: dict) -> None:
# configure GPUs
# os.environ['GPUS'] = os.environ.get('SLURM_STEP_GPUS', '0')
physical_gpu_id = configure_cluster_GPUs(config["env_kwargs"]["render_gpu_id"])
config["env_kwargs"]["render_gpu_id"] = physical_gpu_id
# set the seed
set_seed(config["seed"])
# infer the demo location
demo_paths_loc = os.path.join(
config["data_dir"], config["env_kwargs"]["env_name"] + ".pickle"
)
try:
demo_paths = pickle.load(open(demo_paths_loc, "rb"))
except:
print("Unable to load the data. Check the data path.")
print(demo_paths_loc)
quit()
demo_paths = demo_paths[: config["num_demos"]]
demo_score = np.mean([np.sum(p["rewards"]) for p in demo_paths])
print("Number of demonstrations used : %i" % len(demo_paths))
print("Demonstration score : %.2f " % demo_score)
# store init_states for evaluation on training trajectories
if config["env_kwargs"]["suite"] == "dmc":
init_states = [
p["env_infos"]["internal_state"][0].astype(np.float64) for p in demo_paths
]
elif config["env_kwargs"]["suite"] == "adroit":
init_states = [p["init_state_dict"] for p in demo_paths]
elif config["env_kwargs"]["suite"] == "metaworld":
init_states = []
else:
print("\n\n Unsupported environment suite.")
quit()
# construct the environment and policy
env_kwargs = config["env_kwargs"]
e = env_constructor(**env_kwargs, fuse_embeddings=fuse_embeddings_flare)
policy = BatchNormMLP(
env_spec=e.spec,
hidden_sizes=eval(config["bc_kwargs"]["hidden_sizes"]),
seed=config["seed"],
nonlinearity=config["bc_kwargs"]["nonlinearity"],
dropout=config["bc_kwargs"]["dropout"],
)
# compute embeddings and create dataset
print("===================================================================")
print(">>>>>>>>> Precomputing frozen embedding dataset >>>>>>>>>>>>>>>>>>>")
demo_paths = compute_embeddings(
demo_paths,
device=config["device"],
embedding_name=config["env_kwargs"]["embedding_name"],
)
demo_paths = precompute_features(
demo_paths,
history_window=config["env_kwargs"]["history_window"],
fuse_embeddings=fuse_embeddings_flare,
proprio_key=config["env_kwargs"]["proprio_key"],
)
gc.collect() # garbage collection to free up RAM
dataset = FrozenEmbeddingDataset(
demo_paths,
history_window=config["env_kwargs"]["history_window"],
fuse_embeddings=fuse_embeddings_flare,
)
# Dataset in this case is pre-loaded and on the RAM (CPU) and not on the disk
dataloader = DataLoader(
dataset,
batch_size=config["bc_kwargs"]["batch_size"],
shuffle=True,
num_workers=0,
pin_memory=True,
)
optimizer = torch.optim.Adam(
list(policy.model.parameters()), lr=config["bc_kwargs"]["lr"]
)
loss_func = torch.nn.MSELoss()
# Update logging to match CortexBench conventions
# Make log dir
wandb_run = setup_wandb(config)
if os.path.isdir(config["job_name"]) == False:
os.mkdir(config["job_name"])
previous_dir = os.getcwd()
os.chdir(config["job_name"]) # important! we are now in the directory to save data
if os.path.isdir("iterations") == False:
os.mkdir("iterations")
if os.path.isdir("logs") == False:
os.mkdir("logs")
highest_tr_score, highest_score = -np.inf, -np.inf
highest_tr_success, highest_success = 0.0, 0.0
for epoch in tqdm(range(config["epochs"])):
# move the policy to correct device
policy.model.to(config["device"])
policy.model.train()
# update policy for one BC epoch
running_loss = 0.0
for mb_idx, batch in enumerate(dataloader):
optimizer.zero_grad()
feat = batch["features"].float().to(config["device"])
tar = batch["actions"].float().to(config["device"])
pred = policy.model(feat)
loss = loss_func(pred, tar.detach())
loss.backward()
optimizer.step()
running_loss = running_loss + loss.to("cpu").data.numpy().ravel()[0]
# log average loss for the epoch
wandb_run.log({"epoch_loss": running_loss / (mb_idx + 1)}, step=epoch + 1)
# move the policy to CPU for saving and evaluation
policy.model.to("cpu")
policy.model.eval()
# ensure enironment embedding is in eval mode before rollouts
e.env.embedding.eval()
# perform evaluation rollouts every few epochs
if (epoch % config["eval_frequency"] == 0 and epoch > 0) or (
epoch == config["epochs"] - 1
):
paths = sample_paths(
num_traj=config["eval_num_traj"],
env=e,
policy=policy,
eval_mode=True,
horizon=e.horizon,
base_seed=config["seed"],
num_cpu=config["num_cpu"],
)
(
mean_score,
success_percentage,
highest_score,
highest_success,
) = compute_metrics_from_paths(
env=e,
suite=config["env_kwargs"]["suite"],
paths=paths,
highest_score=highest_score,
highest_success=highest_success,
)
epoch_log = {}
epoch_log["eval/epoch"] = epoch
epoch_log["eval/score_mean"] = mean_score
epoch_log["eval/success"] = success_percentage
epoch_log["eval/highest_success"] = highest_success
epoch_log["eval/highest_score"] = highest_score
# log statistics on training paths
if len(init_states) > 0:
paths = rollout_from_init_states(
init_states[: config["eval_num_traj"]],
e,
policy,
eval_mode=True,
horizon=e.horizon,
)
else:
# use same seed as used for collecting the training paths
paths = sample_paths(
num_traj=config["eval_num_traj"],
env=e,
policy=policy,
eval_mode=True,
horizon=e.horizon,
base_seed=54321,
num_cpu=config["num_cpu"],
)
(
tr_score,
tr_success,
highest_tr_score,
highest_tr_success,
) = compute_metrics_from_paths(
env=e,
suite=config["env_kwargs"]["suite"],
paths=paths,
highest_score=highest_tr_score,
highest_success=highest_tr_success,
)
epoch_log["train/epoch"] = epoch
epoch_log["train/score"] = tr_score
epoch_log["train/success"] = tr_success
epoch_log["train/highest_score"] = highest_tr_score
epoch_log["train/highest_success"] = highest_tr_success
# Log with wandb
wandb_run.log(data=epoch_log)
print(
"Epoch = %i | BC performance (eval mode) = %.3f " % (epoch, mean_score)
)
print(tabulate(sorted(epoch_log.items())))
# save policy and logging
if (epoch % config["save_frequency"] == 0 and epoch > 0) or (
epoch == config["epochs"] - 1
):
# pickle.dump(agent.policy, open('./iterations/policy_%i.pickle' % epoch, 'wb'))
if highest_score == mean_score:
pickle.dump(policy, open("./iterations/best_policy.pickle", "wb"))
def compute_metrics_from_paths(
env: GymEnv,
suite: str,
paths: list,
highest_score: float = -1.0,
highest_success: float = -1.0,
):
mean_score = np.mean([np.sum(p["rewards"]) for p in paths])
if suite == "dmc":
# we evaluate dmc based on returns, not success
success_percentage = -1.0
if suite == "adroit":
success_percentage = env.env.unwrapped.evaluate_success(paths)
if suite == "metaworld":
sc = []
for i, path in enumerate(paths):
sc.append(path["env_infos"]["success"][-1])
success_percentage = np.mean(sc) * 100
highest_score = mean_score if mean_score >= highest_score else highest_score
highest_success = (
success_percentage if success_percentage >= highest_success else highest_success
)
return mean_score, success_percentage, highest_score, highest_success
class FrozenEmbeddingDataset(Dataset):
def __init__(
self,
paths: list,
history_window: int = 1,
fuse_embeddings: callable = None,
device: str = "cuda",
):
self.paths = paths
assert "embeddings" in self.paths[0].keys()
# assume equal length trajectories
# code will work even otherwise but may have some edge cases
self.path_length = max([p["actions"].shape[0] for p in paths])
self.num_paths = len(self.paths)
self.history_window = history_window
self.fuse_embeddings = fuse_embeddings
self.device = device
def __len__(self):
return self.path_length * self.num_paths
def __getitem__(self, index):
traj_idx = int(index // self.path_length)
timestep = int(index - traj_idx * self.path_length)
timestep = min(timestep, self.paths[traj_idx]["actions"].shape[0])
if "features" in self.paths[traj_idx].keys():
features = self.paths[traj_idx]["features"][timestep]
action = self.paths[traj_idx]["actions"][timestep]
else:
embeddings = [
self.paths[traj_idx]["embeddings"][max(timestep - k, 0)]
for k in range(self.history_window)
]
embeddings = embeddings[
::-1
] # embeddings[-1] should be most recent embedding
features = self.fuse_embeddings(embeddings)
# features = torch.from_numpy(features).float().to(self.device)
action = self.paths[traj_idx]["actions"][timestep]
# action = torch.from_numpy(action).float().to(self.device)
return {"features": features, "actions": action}
def compute_embeddings(
paths: list, embedding_name: str, device: str = "cpu", chunk_size: int = 20
):
model, embedding_dim, transforms, metadata = load_pretrained_model(
embedding_name=embedding_name
)
model.to(device)
for path in tqdm(paths):
inp = path["images"] # shape (B, H, W, 3)
path["embeddings"] = np.zeros((inp.shape[0], embedding_dim))
path_len = inp.shape[0]
preprocessed_inp = torch.cat(
[transforms(frame) for frame in inp]
) # shape (B, 3, H, W)
for chunk in range(path_len // chunk_size + 1):
if chunk_size * chunk < path_len:
with torch.no_grad():
inp_chunk = preprocessed_inp[
chunk_size * chunk : min(chunk_size * (chunk + 1), path_len)
]
emb = model(inp_chunk.to(device))
# save embedding in RAM and free up GPU memory
emb = emb.to("cpu").data.numpy()
path["embeddings"][
chunk_size * chunk : min(chunk_size * (chunk + 1), path_len)
] = emb
del path["images"] # no longer need the images, free up RAM
return paths
def precompute_features(
paths: list,
history_window: int = 1,
fuse_embeddings: callable = None,
proprio_key: str = None,
):
assert "embeddings" in paths[0].keys()
for path in paths:
features = []
for t in range(path["embeddings"].shape[0]):
emb_hist_t = [
path["embeddings"][max(t - k, 0)] for k in range(history_window)
]
emb_hist_t = emb_hist_t[
::-1
] # emb_hist_t[-1] should correspond to time t embedding
feat_t = fuse_embeddings(emb_hist_t)
if proprio_key not in [None, "None"]:
assert proprio_key in path["env_infos"].keys()
feat_t = np.concatenate([feat_t, path["env_infos"][proprio_key][t]])
features.append(feat_t.copy())
path["features"] = np.array(features)
return paths
| eai-vc-main | cortexbench/mujoco_vc/visual_imitation/train_loop.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
core_model_set = [
"vc1_vitb",
"vc1_vitl",
]
print(",".join(core_model_set))
| eai-vc-main | cortexbench/mujoco_vc/visual_imitation/core_model_set.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import gym
ENV_TO_SUITE = {
"dmc_walker_stand-v1": "dmc",
"dmc_walker_walk-v1": "dmc",
"dmc_reacher_easy-v1": "dmc",
"dmc_cheetah_run-v1": "dmc",
"dmc_finger_spin-v1": "dmc",
"pen-v0": "adroit",
"relocate-v0": "adroit",
"assembly-v2-goal-observable": "metaworld",
"bin-picking-v2-goal-observable": "metaworld",
"button-press-topdown-v2-goal-observable": "metaworld",
"drawer-open-v2-goal-observable": "metaworld",
"hammer-v2-goal-observable": "metaworld",
}
if __name__ == "__main__":
# import the suites
import mj_envs, dmc2gym
from metaworld.envs import ALL_V2_ENVIRONMENTS_GOAL_OBSERVABLE
from collections import namedtuple
for id in ENV_TO_SUITE.keys():
print("Creating env : %s" % id)
if ENV_TO_SUITE[id] == "metaworld":
e = ALL_V2_ENVIRONMENTS_GOAL_OBSERVABLE[id]()
e._freeze_rand_vec = False
e.spec = namedtuple("spec", ["id", "max_episode_steps"])
e.spec.id = id
e.spec.max_episode_steps = 500
else:
e = gym.make(id)
| eai-vc-main | cortexbench/mujoco_vc/src/mujoco_vc/supported_envs.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import gym, dmc2gym, mjrl, mj_envs
| eai-vc-main | cortexbench/mujoco_vc/src/mujoco_vc/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from vc_models import vc_models_dir_path
from omegaconf import OmegaConf
from PIL import Image
import os
import hydra
import torch, torchvision.transforms as T
import numpy as np
# ===================================
# Model Loading
# ===================================
def load_pretrained_model(embedding_name, input_type=np.ndarray, *args, **kwargs):
"""
Load the pretrained model based on the config corresponding to the embedding_name
"""
config_path = os.path.join(
vc_models_dir_path, "conf/model", embedding_name + ".yaml"
)
print("Loading config path: %s" % config_path)
config = OmegaConf.load(config_path)
model, embedding_dim, transforms, metadata = hydra.utils.call(config)
model = model.eval() # model loading API is unreliable, call eval to be double sure
def final_transforms(transforms):
if input_type == np.ndarray:
return lambda input: transforms(Image.fromarray(input)).unsqueeze(0)
else:
return transforms
return model, embedding_dim, final_transforms(transforms), metadata
# ===================================
# Temporal Embedding Fusion
# ===================================
def fuse_embeddings_concat(embeddings: list):
assert type(embeddings[0]) == np.ndarray
return np.array(embeddings).ravel()
def fuse_embeddings_flare(embeddings: list):
if type(embeddings[0]) == np.ndarray:
history_window = len(embeddings)
delta = [embeddings[i + 1] - embeddings[i] for i in range(history_window - 1)]
delta.append(embeddings[-1].copy())
return np.array(delta).ravel()
elif type(embeddings[0]) == torch.Tensor:
history_window = len(embeddings)
# each embedding will be (Batch, Dim)
delta = [embeddings[i + 1] - embeddings[i] for i in range(history_window - 1)]
delta.append(embeddings[-1])
return torch.cat(delta, dim=1)
else:
print("Unsupported embedding format in fuse_embeddings_flare.")
print("Provide either numpy.ndarray or torch.Tensor.")
quit()
| eai-vc-main | cortexbench/mujoco_vc/src/mujoco_vc/model_loading.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import gym
from mjrl.utils.gym_env import GymEnv
from gym.spaces.box import Box
from mujoco_vc.model_loading import load_pretrained_model
from mujoco_vc.supported_envs import ENV_TO_SUITE
from typing import Union, Tuple
class MuJoCoPixelObsWrapper(gym.ObservationWrapper):
def __init__(
self,
env,
width,
height,
camera_name,
device_id=-1,
depth=False,
*args,
**kwargs
):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = Box(low=0.0, high=255.0, shape=(3, width, height))
self.width = width
self.height = height
self.camera_name = camera_name
self.depth = depth
self.device_id = device_id
def get_image(self):
if self.spec.id.startswith("dmc"):
# dmc backend
# dmc expects camera_id as an integer and not name
if self.camera_name == None or self.camera_name == "None":
self.camera_name = 0
img = self.env.unwrapped.render(
mode="rgb_array",
width=self.width,
height=self.height,
camera_id=int(self.camera_name),
)
else:
# mujoco-py backend
img = self.sim.render(
width=self.width,
height=self.height,
depth=self.depth,
camera_name=self.camera_name,
device_id=self.device_id,
)
img = img[::-1, :, :]
return img
def observation(self, observation):
# This function creates observations based on the current state of the environment.
# Argument `observation` is ignored, but `gym.ObservationWrapper` requires it.
# Output format is (H, W, 3)
return self.get_image()
class FrozenEmbeddingWrapper(gym.ObservationWrapper):
"""
This wrapper places a frozen vision model over the image observation.
Args:
env (Gym environment): the original environment
suite (str): category of environment ["dmc", "adroit", "metaworld"]
embedding_name (str): name of the embedding to use (name of config)
history_window (int, 1) : timesteps of observation embedding to incorporate into observation (state)
embedding_fusion (callable, 'None'): function for fusing the embeddings into a state.
Defaults to concatenation if not specified
obs_dim (int, 'None') : dimensionality of observation space. Inferred if not specified.
Required if function != None. Defaults to history_window * embedding_dim
add_proprio (bool, 'False') : flag to specify if proprioception should be appended to observation
device (str, 'cuda'): where to allocate the model.
"""
def __init__(
self,
env,
embedding_name: str,
suite: str,
history_window: int = 1,
fuse_embeddings: callable = None,
obs_dim: int = None,
device: str = "cuda",
seed: int = None,
add_proprio: bool = False,
*args,
**kwargs
):
gym.ObservationWrapper.__init__(self, env)
self.embedding_buffer = (
[]
) # buffer to store raw embeddings of the image observation
self.obs_buffer = [] # temp variable, delete this line later
self.history_window = history_window
self.fuse_embeddings = fuse_embeddings
if device == "cuda" and torch.cuda.is_available():
print("Using CUDA.")
device = torch.device("cuda")
else:
print("Not using CUDA.")
device = torch.device("cpu")
self.device = device
# get the embedding model
embedding, embedding_dim, transforms, metadata = load_pretrained_model(
embedding_name=embedding_name, seed=seed
)
embedding.to(device=self.device)
# freeze the PVR
for p in embedding.parameters():
p.requires_grad = False
self.embedding, self.embedding_dim, self.transforms = (
embedding,
embedding_dim,
transforms,
)
# proprioception
if add_proprio:
self.get_proprio = lambda: get_proprioception(self.unwrapped, suite)
proprio = self.get_proprio()
self.proprio_dim = 0 if proprio is None else proprio.shape[0]
else:
self.proprio_dim = 0
self.get_proprio = None
# final observation space
obs_dim = (
obs_dim
if obs_dim != None
else int(self.history_window * self.embedding_dim + self.proprio_dim)
)
self.observation_space = Box(low=-np.inf, high=np.inf, shape=(obs_dim,))
def observation(self, observation):
# observation shape : (H, W, 3)
inp = self.transforms(
observation
) # numpy to PIL to torch.Tensor. Final dimension: (1, 3, H, W)
inp = inp.to(self.device)
with torch.no_grad():
emb = (
self.embedding(inp)
.view(-1, self.embedding_dim)
.to("cpu")
.numpy()
.squeeze()
)
# update observation buffer
if len(self.embedding_buffer) < self.history_window:
# initialization
self.embedding_buffer = [emb.copy()] * self.history_window
else:
# fixed size buffer, replace oldest entry
for i in range(self.history_window - 1):
self.embedding_buffer[i] = self.embedding_buffer[i + 1].copy()
self.embedding_buffer[-1] = emb.copy()
# fuse embeddings to obtain observation
if self.fuse_embeddings != None:
obs = self.fuse_embeddings(self.embedding_buffer)
else:
# print("Fuse embedding function not given. Defaulting to concat.")
obs = np.array(self.embedding_buffer).ravel()
# add proprioception if necessary
if self.proprio_dim > 0:
proprio = self.get_proprio()
obs = np.concatenate([obs, proprio])
return obs
def get_obs(self):
return self.observation(self.env.observation(None))
def get_image(self):
return self.env.get_image()
def reset(self):
self.embedding_buffer = [] # reset to empty buffer
return super().reset()
def env_constructor(
env_name: str,
pixel_based: bool = True,
device: str = "cuda",
image_width: int = 256,
image_height: int = 256,
camera_name: str = None,
embedding_name: str = "resnet50",
history_window: int = 1,
fuse_embeddings: callable = None,
render_gpu_id: int = -1,
seed: int = 123,
add_proprio=False,
*args,
**kwargs
) -> GymEnv:
# construct basic gym environment
assert env_name in ENV_TO_SUITE.keys()
suite = ENV_TO_SUITE[env_name]
if suite == "metaworld":
# Meta world natively misses many specs. We will explicitly add them here.
from metaworld.envs import ALL_V2_ENVIRONMENTS_GOAL_OBSERVABLE
from collections import namedtuple
e = ALL_V2_ENVIRONMENTS_GOAL_OBSERVABLE[env_name]()
e._freeze_rand_vec = False
e.spec = namedtuple("spec", ["id", "max_episode_steps"])
e.spec.id = env_name
e.spec.max_episode_steps = 500
else:
e = gym.make(env_name)
# seed the environment for reproducibility
e.seed(seed)
# get correct camera name
camera_name = (
None if (camera_name == "None" or camera_name == "default") else camera_name
)
# Use appropriate observation wrapper
if pixel_based:
e = MuJoCoPixelObsWrapper(
env=e,
width=image_width,
height=image_height,
camera_name=camera_name,
device_id=0,
)
e = FrozenEmbeddingWrapper(
env=e,
embedding_name=embedding_name,
suite=suite,
history_window=history_window,
fuse_embeddings=fuse_embeddings,
device=device,
seed=seed,
add_proprio=add_proprio,
)
e = GymEnv(e)
else:
e = GymEnv(e)
# Output wrapped env
e.set_seed(seed)
return e
def get_proprioception(env: gym.Env, suite: str) -> Union[np.ndarray, None]:
assert isinstance(env, gym.Env)
if suite == "metaworld":
return env.unwrapped._get_obs()[:4]
elif suite == "adroit":
# In adroit, in-hand tasks like pen lock the base of the hand
# while other tasks like relocate allow for movement of hand base
# as if attached to an arm
if env.unwrapped.spec.id == "pen-v0":
return env.unwrapped.get_obs()[:24]
elif env.unwrapped.spec.id == "relocate-v0":
return env.unwrapped.get_obs()[:30]
else:
print("Unsupported environment. Proprioception is defaulting to None.")
return None
elif suite == "dmc":
# no proprioception used for dm-control
return None
else:
print("Unsupported environment. Proprioception is defaulting to None.")
return None
| eai-vc-main | cortexbench/mujoco_vc/src/mujoco_vc/gym_wrapper.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
from mjrl.utils.gym_env import GymEnv
from mjrl.utils import tensor_utils
from tqdm import tqdm
logging.disable(logging.CRITICAL)
import multiprocessing as mp
import time as timer
import gc
def toggle_tqdm(rng, debug):
if debug:
return tqdm(rng)
else:
return rng
def rollout_from_init_states(
init_states,
env,
policy,
eval_mode=False,
horizon=1e6,
debug=False,
) -> list:
assert isinstance(env, GymEnv)
assert isinstance(init_states, list)
num_traj = len(init_states)
horizon = min(horizon, env.horizon)
paths = []
for ep in toggle_tqdm(range(num_traj), debug):
# set initial state
env.reset()
init_state = init_states[ep]
if env.env_id.startswith("dmc"):
# dm-control physics backend
env.env.unwrapped._env.physics.set_state(init_state.astype(np.float64))
env.env.unwrapped._env.physics.forward()
else:
# mujoco_py backend
env.env.unwrapped.set_env_state(init_state)
observations = []
actions = []
rewards = []
agent_infos = []
env_infos = []
done = False
t = 0
o = env.get_obs()
while t < horizon and done != True:
a, agent_info = policy.get_action(o)
if eval_mode:
a = agent_info["evaluation"]
env_info_base = env.get_env_infos()
next_o, r, done, env_info_step = env.step(a)
# below is important to ensure correct env_infos for the timestep
env_info = env_info_step if env_info_base == {} else env_info_base
observations.append(o)
actions.append(a)
rewards.append(r)
agent_infos.append(agent_info)
env_infos.append(env_info)
o = next_o.copy()
t += 1
path = dict(
observations=np.array(observations),
actions=np.array(actions),
rewards=np.array(rewards),
agent_infos=tensor_utils.stack_tensor_dict_list(agent_infos),
env_infos=tensor_utils.stack_tensor_dict_list(env_infos),
terminated=done,
)
paths.append(path)
del env
gc.collect()
return paths
if __name__ == "__main__":
import pickle
from mjrl.policies.gaussian_mlp import MLP, BatchNormMLP
from gym_wrapper import env_constructor
# DMC test
data_paths = pickle.load(
open(
"/checkpoint/maksymets/vc/datasets/dmc-expert-v0.1/dmc_reacher_easy-v1.pickle",
"rb",
)
)
e = env_constructor(
env_name="dmc_reacher_easy-v1",
camera=0,
embedding_name="r3m_resnet50_ego4d",
history_window=3,
seed=12345,
)
policy = BatchNormMLP(e.spec, dropout=0.0)
init_states = [
p["env_infos"]["internal_state"][0].astype(np.float64) for p in data_paths
]
del data_paths
gc.collect()
paths = rollout_from_init_states(
init_states=init_states,
env=e,
policy=policy,
eval_mode=True,
horizon=10, # short horizon for debugging
debug=True, # will toggle tqdm
)
# Adroit test
data_paths = pickle.load(
open(
"/checkpoint/maksymets/vc/datasets/adroit-expert-v0.1/pen-v0.pickle", "rb"
)
)
e = env_constructor(
env_name="pen-v0",
camera=0,
embedding_name="r3m_resnet50_ego4d",
history_window=3,
seed=12345,
)
policy = BatchNormMLP(e.spec, dropout=0.0)
init_states = [p["init_state_dict"] for p in data_paths]
del data_paths
gc.collect()
paths = rollout_from_init_states(
init_states=init_states,
env=e,
policy=policy,
eval_mode=True,
horizon=10, # short horizon for debugging
debug=True, # will toggle tqdm
)
# Metaworld
# Current dataset did not store the full state information.
# So excat scene configuration cannot be recreated.
# Fixing this requires recollecting the dataset or using the same seed as collection (123)
| eai-vc-main | cortexbench/mujoco_vc/src/mujoco_vc/rollout_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from datetime import datetime
import os
import subprocess
import hydra
from omegaconf import DictConfig
import numpy as np
import torch
from habitat.config.default_structured_configs import register_hydra_plugin
from habitat_baselines.config.default_structured_configs import (
HabitatBaselinesConfigPlugin,
)
from habitat.config.default_structured_configs import HabitatConfigPlugin
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.rl.ddppo.ddp_utils import rank0_only
from habitat2_vc.policy import EAIPolicy # noqa: F401
def get_random_seed():
seed = (
os.getpid()
+ int(datetime.now().strftime("%S%f"))
+ int.from_bytes(os.urandom(2), "big")
)
print("Using a generated random seed {}".format(seed))
return seed
def setup_experiment(config: DictConfig):
"""
Setups the random seed and the wandb logger.
"""
# Set random seed
seed = get_random_seed()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Add the seed to the config
config.habitat.seed = seed
# Single-agent setup
config.habitat.simulator.agents_order = list(config.habitat.simulator.agents.keys())
# Add the wandb information to the habitat config
config.habitat_baselines.wb.project_name = config.WANDB.project
config.habitat_baselines.wb.run_name = config.WANDB.name
config.habitat_baselines.wb.group = config.WANDB.group
config.habitat_baselines.wb.entity = config.WANDB.entity
# Set torch to single threaded
if (
config.habitat_baselines.force_torch_single_threaded
and torch.cuda.is_available()
):
torch.set_num_threads(1)
# Create the checkpoint and video directories
if rank0_only():
os.makedirs(config.habitat_baselines.checkpoint_folder, exist_ok=True)
os.makedirs(config.habitat_baselines.video_dir, exist_ok=True)
# Create the symlink to the data folder
data_path = hydra.utils.to_absolute_path(config.habitat.dataset.data_path)
base_data_path = data_path.split("data/")[0] + "data/"
subprocess.call(
[
"ln",
"-s",
base_data_path,
"data",
]
)
# Set the log levels
os.environ["GLOG_minloglevel"] = "3"
os.environ["MAGNUM_LOG"] = "quiet"
os.environ["HABITAT_SIM_LOG"] = "quiet"
@hydra.main(config_path="configs", config_name="config", version_base="1.1")
def main(config: DictConfig) -> None:
r"""Main function for habitat_vc
Args:
cfg: DictConfig object containing the configs for the experiment.
"""
# Setup the experiment
setup_experiment(config)
# Get the trainer
trainer_init = baseline_registry.get_trainer(config.habitat_baselines.trainer_name)
assert (
trainer_init is not None
), f"{config.habitat_baselines.trainer_name} is not supported"
trainer = trainer_init(config)
# Train or eval
if config.RUN_TYPE == "train":
trainer.train()
elif config.RUN_TYPE == "eval":
trainer.eval()
if __name__ == "__main__":
# Register habitat hydra plugins
register_hydra_plugin(HabitatBaselinesConfigPlugin)
register_hydra_plugin(HabitatConfigPlugin)
# Call hydra main
main()
| eai-vc-main | cortexbench/habitat2_vc/run_rearrangement_vc.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import wandb
from tqdm import tqdm
import pandas as pd
pd.set_option("display.max_colwidth", None)
class WandbMetrics:
def __init__(self, project, entity="cortexbench"):
api = wandb.Api()
self.runs = api.runs(entity + "/" + project)
def extract_metrics(self, metric, filter_tags=[]):
print(f"Extracting {metric} metrics")
dfs = []
for run in tqdm(self.runs):
if any([tag in run.tags for tag in filter_tags]):
continue
steps, metrics = [], []
for i, row in run.history(keys=[metric]).iterrows():
steps.append(row["_step"])
metrics.append(row[metric])
dfs.append(
pd.DataFrame(
data={"model_name": run.name, "step": steps, "metric": metrics}
)
)
return pd.concat(dfs)
def extract_data(self, train_metric, eval_metric, tags=[]):
self.eval_metrics = self.extract_metrics(eval_metric, tags)
self.train_metrics = self.extract_metrics(train_metric, tags)
def clean_model_name(self, model_name):
return " ".join(model_name.split("_")[0:-6])
def get_metrics(self, max_step):
# Compare all runs for a fixed step
eval_metrics = self.eval_metrics.query(f"step < {max_step}")
train_metrics = self.train_metrics.query(f"step < {max_step}")
# Extract the max evaluation metric
final_metrics = eval_metrics.groupby("model_name").max().reset_index()
final_metrics["train_metric"] = 0
final_metrics["max_train_step"] = 0
final_metrics["max_test_step"] = 0
# Get the closest train metric to the max eval metric
for k, row in final_metrics.iterrows():
run = row["model_name"]
run_train_metrics = train_metrics[train_metrics["model_name"] == run]
# Get closest train metric to max eval
# train_metric = run_train_metrics.iloc[abs(run_train_metrics['step'] - row['step']).idxmin()]['metric']
# Get max metric from train
train_metric = run_train_metrics["metric"].max()
final_metrics.loc[k, "train_metric"] = train_metric
final_metrics.loc[k, "max_train_step"] = run_train_metrics["step"].max()
final_metrics.loc[k, "max_test_step"] = eval_metrics[
eval_metrics["model_name"] == run
]["step"].max()
# Clean model name
# final_metrics['model_name'] = final_metrics['model_name'].apply(self.clean_model_name)
final_metrics["eval_metric"] = final_metrics["metric"]
return (
final_metrics[
[
"model_name",
"train_metric",
"eval_metric",
"max_train_step",
"max_test_step",
]
]
.sort_values("eval_metric", ascending=False)
.reset_index(drop=True)
)
if __name__ == "__main__":
# Project
project = "habitat2.0"
# Train and eval metrics
train_metric, eval_metric = "metrics/pick_success", "eval_metrics/pick_success"
# All runs with this task are old so shouln'd be reported
filter_tags = ["Old"]
# Extract data
WB = WandbMetrics(project)
WB.extract_data(train_metric, eval_metric, filter_tags)
# I'm filtering all results after a fixed step horizon to do a fair comparison
train_horizon = 500_000_000
results = WB.get_metrics(train_horizon)
results.to_csv(f"./results_{project}.csv", index=False)
| eai-vc-main | cortexbench/habitat2_vc/analysis/retrieve_results.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import hydra
import torch
from torch import nn as nn
from omegaconf import open_dict
from habitat_baselines.rl.ddppo.policy.running_mean_and_var import RunningMeanAndVar
from vc_models.models.compression_layer import create_compression_layer
class VisualEncoder(nn.Module):
def __init__(
self,
backbone_config: str,
input_channels: int = 3,
image_size: int = 128,
normalize_visual_inputs: bool = True,
global_pool: bool = False,
use_cls: bool = False,
use_augmentations: bool = False,
loaded_backbone_data=None,
freeze_backbone: bool = True,
):
super().__init__()
self.freeze_backbone = freeze_backbone
self.is_resnet = "resnet" in backbone_config.metadata.model
if normalize_visual_inputs:
self.running_mean_and_var = RunningMeanAndVar(input_channels)
else:
self.running_mean_and_var = nn.Sequential()
backbone_config.transform.resize_size = image_size
backbone_config.transform.output_size = image_size
if use_augmentations is False:
backbone_config.transform.jitter = False
backbone_config.transform.shift = False
if "resnet" in backbone_config.metadata.model:
with open_dict(backbone_config):
# In the case of the VIP, the fc layer is part of the model
# so we don't use the compression layer but the fc layer + avgpool
if "vip" in backbone_config.metadata.algo:
backbone_config.model.use_avgpool_and_flatten = True
else:
backbone_config.model.use_avgpool_and_flatten = False
if loaded_backbone_data is None:
(
self.backbone,
self.embed_dim,
self.visual_transform,
_,
) = hydra.utils.call(backbone_config)
else:
(
self.backbone,
self.embed_dim,
self.visual_transform,
) = loaded_backbone_data
if not backbone_config.model.use_avgpool_and_flatten:
final_spatial_compress = 1.0 / (2**5)
final_spatial = int(image_size * final_spatial_compress)
self.compression, _, self.output_size = create_compression_layer(
self.embed_dim, final_spatial
)
else:
self.output_size = self.embed_dim
self.compression = nn.Sequential()
elif (
"vit" in backbone_config.metadata.model
or "beit" in backbone_config.metadata.model
):
assert (
global_pool and use_cls
) is False, "Both global_pool and use_cls config param cant be 'True'"
if "model" in backbone_config.model:
model = backbone_config.model.model
else:
model = backbone_config.model
with open_dict(model):
if (
backbone_config.metadata.algo == "omnimae"
or backbone_config.metadata.algo == "tmae"
):
model.img_size = [3, image_size, image_size]
else:
model.img_size = image_size
model.global_pool = global_pool
model.use_cls = use_cls
if loaded_backbone_data is None:
(
self.backbone,
self.embed_dim,
self.visual_transform,
_,
) = hydra.utils.call(backbone_config)
else:
(
self.backbone,
self.embed_dim,
self.visual_transform,
) = loaded_backbone_data
if model.global_pool or model.use_cls:
self.compression = nn.Identity()
self.output_size = self.embed_dim
else:
self.compression, _, self.output_size = create_compression_layer(
self.embed_dim, self.backbone.final_spatial
)
else:
raise ValueError(f"unknown backbone {backbone_config.metadata.model}")
def get_loaded_backbone_data(self):
return self.backbone, self.embed_dim, self.visual_transform
def forward(
self, x: torch.Tensor, number_of_envs: int
) -> torch.Tensor: # type: ignore
# convert channels-last to channels-first
x = x.permute(0, 3, 1, 2).float() / 255
# Apply visual transforms
x = self.visual_transform(x)
# If the transformations have normalization, do not apply running mean and var
if "Normalize" not in str(self.visual_transform):
x = self.running_mean_and_var(x)
# For resnets, make sure that the model is is in eval mode and
# that the gradients are not computed
if self.is_resnet and self.freeze_backbone:
self.backbone.eval()
with torch.no_grad():
x = self.backbone(x)
else:
x = self.backbone(x)
# Apply compression layer
x = self.compression(x)
return x
| eai-vc-main | cortexbench/habitat2_vc/habitat2_vc/visual_encoder.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple, List
import torch
from gym import spaces
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.rl.models.rnn_state_encoder import build_rnn_state_encoder
from habitat_baselines.rl.ppo import Net, NetPolicy
from torch import nn as nn
from omegaconf import DictConfig
from habitat_baselines.utils.common import get_num_actions
from habitat2_vc.visual_encoder import VisualEncoder
class EAINet(Net):
def __init__(
self,
observation_space: spaces.Dict,
action_space,
input_image_size,
backbone_config,
hidden_size: int,
rnn_type: str,
num_recurrent_layers: int,
use_augmentations: bool,
use_augmentations_test_time: bool,
run_type: str,
freeze_backbone: bool,
freeze_batchnorm: bool,
global_pool: bool,
use_cls: bool,
):
super().__init__()
rnn_input_size = 0
# visual encoder
if (use_augmentations and run_type == "train") or (
use_augmentations_test_time and run_type == "eval"
):
use_augmentations = True
if "robot_head_rgb" in observation_space.spaces.keys():
self.visual_encoder = VisualEncoder(
backbone_config=backbone_config,
image_size=input_image_size,
global_pool=global_pool,
use_cls=use_cls,
use_augmentations=use_augmentations,
freeze_backbone=freeze_backbone,
)
self.visual_fc = nn.Sequential(
nn.Flatten(),
nn.Linear(self.visual_encoder.output_size, hidden_size),
nn.ReLU(True),
)
# freeze backbone
if freeze_backbone:
# Freeze all backbone
for p in self.visual_encoder.backbone.parameters():
p.requires_grad = False
if freeze_batchnorm:
self.visual_encoder.backbone = convert_frozen_batchnorm(
self.visual_encoder.backbone
)
rnn_input_size += hidden_size
# previous action embeddings
# NOTE(Sergio): Actions are continuous, for discrete use action_space.n
# and an embedding layer with action_space.n + 1
num_actions = get_num_actions(action_space)
self.prev_action_embedding = nn.Linear(num_actions, 32)
rnn_input_size += 32
# State information
fuse_keys = sorted(observation_space.spaces.keys())
self._fuse_keys_1d: List[str] = [
k for k in fuse_keys if len(observation_space.spaces[k].shape) == 1
]
if len(self._fuse_keys_1d) != 0:
rnn_input_size += sum(
observation_space.spaces[k].shape[0] for k in self._fuse_keys_1d
)
# state encoder
self.state_encoder = build_rnn_state_encoder(
input_size=rnn_input_size,
hidden_size=hidden_size,
rnn_type=rnn_type,
num_layers=num_recurrent_layers,
)
# save configuration
self._hidden_size = hidden_size
self.train()
@property
def output_size(self):
return self._hidden_size
@property
def perception_embedding_size(self):
return self._hidden_size
@property
def is_blind(self):
return False
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
def forward(
self,
observations: Dict[str, torch.Tensor],
rnn_hidden_states,
prev_actions,
masks,
rnn_build_seq_info: Optional[Dict[str, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
x = []
aux_loss_state = {}
# number of environments
N = rnn_hidden_states.size(0)
# visual encoder
if "robot_head_rgb" in observations:
rgb = observations["robot_head_rgb"]
rgb = self.visual_encoder(rgb, N)
rgb = self.visual_fc(rgb)
aux_loss_state["perception_embed"] = rgb
x.append(rgb)
# Fusing of other state information (joint, is_holding, ...)
if len(self._fuse_keys_1d) != 0:
fuse_states = torch.cat(
[observations[k] for k in self._fuse_keys_1d], dim=-1
)
x.append(fuse_states.float())
# previous action embedding
# NOTE(Sergio): Actions are continuous, for discrete use
# torch.where(masks.view(-1), prev_actions + 1, start_token)
prev_actions = self.prev_action_embedding(masks.float() * prev_actions.float())
x.append(prev_actions)
# state encoder
out = torch.cat(x, dim=1)
out, rnn_hidden_states = self.state_encoder(
out, rnn_hidden_states, masks, rnn_build_seq_info
)
aux_loss_state["rnn_output"] = out
return out, rnn_hidden_states, aux_loss_state
@baseline_registry.register_policy
class EAIPolicy(NetPolicy):
def __init__(
self,
observation_space: spaces.Dict,
action_space,
input_image_size,
backbone_config,
hidden_size: int = 512,
rnn_type: str = "GRU",
num_recurrent_layers: int = 1,
use_augmentations: bool = False,
use_augmentations_test_time: bool = False,
run_type: str = "train",
freeze_backbone: bool = False,
freeze_batchnorm: bool = False,
global_pool: bool = False,
use_cls: bool = False,
policy_config: DictConfig = None,
aux_loss_config: Optional[DictConfig] = None,
**kwargs
):
super().__init__(
EAINet(
observation_space=observation_space,
action_space=action_space, # for previous action
input_image_size=input_image_size,
backbone_config=backbone_config,
hidden_size=hidden_size,
rnn_type=rnn_type,
num_recurrent_layers=num_recurrent_layers,
use_augmentations=use_augmentations,
use_augmentations_test_time=use_augmentations_test_time,
run_type=run_type,
freeze_backbone=freeze_backbone,
freeze_batchnorm=freeze_batchnorm,
global_pool=global_pool,
use_cls=use_cls,
),
action_space=action_space,
policy_config=policy_config,
aux_loss_config=aux_loss_config,
)
@classmethod
def from_config(
cls, config: DictConfig, observation_space: spaces.Dict, action_space, **kwargs
):
return cls(
# Spaces
observation_space=observation_space,
action_space=action_space,
# RNN
hidden_size=config.habitat_baselines.rl.policy.hidden_size,
rnn_type=config.habitat_baselines.rl.policy.rnn_type,
num_recurrent_layers=config.habitat_baselines.rl.policy.num_recurrent_layers,
# Backbone
backbone_config=config.model,
freeze_backbone=config.habitat_baselines.rl.policy.freeze_backbone,
freeze_batchnorm=config.habitat_baselines.rl.policy.freeze_backbone,
# Loss
aux_loss_config=config.habitat_baselines.rl.auxiliary_losses,
# Image
input_image_size=config.habitat_baselines.rl.policy.input_image_size,
use_augmentations=config.habitat_baselines.rl.policy.use_augmentations,
use_augmentations_test_time=config.habitat_baselines.rl.policy.use_augmentations_test_time,
run_type=config.RUN_TYPE,
# Policy
policy_config=config.habitat_baselines.rl.policy,
# Pooling
global_pool=config.habitat_baselines.rl.policy.global_pool,
use_cls=config.habitat_baselines.rl.policy.use_cls,
)
def convert_frozen_batchnorm(module):
r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
:class:`torch.nn.FrozenBatchNorm` layers.
Args:
module (nn.Module): module containing one or more :attr:`BatchNorm*D` layers
process_group (optional): process group to scope synchronization,
default is the whole world
Returns:
The original :attr:`module` with the converted :class:`torch.nn.FrozenBatchNorm`
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
a new :class:`torch.nn.FrozenBatchNorm` layer object will be returned
instead.
Example::
>>> # Network with nn.BatchNorm layer
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
>>> module = torch.nn.Sequential(
>>> torch.nn.Linear(20, 100),
>>> torch.nn.BatchNorm1d(100),
>>> ).cuda()
>>> # creating process group (optional)
>>> # ranks is a list of int identifying rank ids.
>>> ranks = list(range(8))
>>> r1, r2 = ranks[:4], ranks[4:]
>>> # Note: every rank calls into new_group for every
>>> # process group created, even if that rank is not
>>> # part of the group.
>>> # xdoctest: +SKIP("distributed")
>>> frozen_bn_module = convert_frozen_batchnorm(module)
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = _FrozenBatchNorm(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, convert_frozen_batchnorm(child))
del module
return module_output
class _FrozenBatchNorm(torch.nn.modules.batchnorm._NormBase):
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = True,
track_running_stats: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
)
def forward(self, input: torch.Tensor) -> torch.Tensor:
self._check_input_dim(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore[has-type]
self.num_batches_tracked.add_(1) # type: ignore[has-type]
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
# if self.training:
# bn_training = True
# else:
# bn_training = (self.running_mean is None) and (self.running_var is None)
bn_training = False
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
return torch.nn.functional.batch_norm(
input,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean
if not self.training or self.track_running_stats
else None,
self.running_var if not self.training or self.track_running_stats else None,
self.weight,
self.bias,
bn_training,
exponential_average_factor,
self.eps,
)
def _check_input_dim(self, input):
return
| eai-vc-main | cortexbench/habitat2_vc/habitat2_vc/policy.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
| eai-vc-main | cortexbench/habitat2_vc/habitat2_vc/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from setuptools import find_packages
install_requires = [
"hydra-core",
"wandb",
"gym",
]
setup(
name="trifinger",
version="0.1",
install_requires=install_requires,
packages=find_packages(where="src"),
package_dir={"": "src"},
)
| eai-vc-main | cortexbench/trifinger_vc/setup.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import sys
sys.path.append("../")
from trifinger_vc.utils import tf_test_util
import numpy as np
from trifinger_vc.algos.bc_finetune import BCFinetune
import random
import torch
from omegaconf import OmegaConf
import json
import os
import trifinger_vc.utils.train_utils as t_utils
from hydra import compose, initialize
from trifinger_vc.utils.sim_nn import Task
import shutil
import hydra
import unittest
class TestTrifingerBC(unittest.TestCase):
def test_step_reach_env(self):
env = tf_test_util.init_reach_env()
env.reset()
for i in range(9):
pred_action = np.zeros(3)
observation, reward, episode_done, info = env.step(pred_action)
assert reward == 0, "expect 0 reward"
def test_task(self):
cfg = tf_test_util.setup_bc_tests()
cfg["task"]["n_outer_iter"] = 1
bc = tf_test_util.init_bc_algo(cfg)
task = tf_test_util.init_reach_task(cfg,bc)
traj_list = bc.traj_info["test_demos"]
demo = traj_list[0]
sim_traj_dict = task.execute_policy(
bc.policy,
demo,
save_dir=tf_test_util.EXP_DIR,
encoder=bc.encoder,
epoch=10,
)
# assert that the type and shape of the values return match
assert type(sim_traj_dict) is dict, "Expected dictionary to be returned from execute_policy"
traj_dict_keys = sim_traj_dict.keys()
assert "t" in traj_dict_keys, "Expected dictionary to have timestamp"
assert "o_pos_cur" in traj_dict_keys, "Expect cube pos to be returned"
assert "robot_pos" in traj_dict_keys, "Expect robot pos to be returned"
print(sim_traj_dict["robot_pos"])
print(type(sim_traj_dict["robot_pos"]))
expected_robot_pos = np.array([[-0.0805528, 1.14, -1.50, -0.08, 1.15, -1.5, -0.08, 1.15, -1.5 ] \
, [-0.07, 1.14, -1.50, -0.07, 1.15, -1.50, -0.08, 1.15, -1.50] \
, [-0.07, 1.14, -1.50, -0.07, 1.15, -1.50, -0.08, 1.15, -1.50] \
, [-0.06, 1.14, -1.51, -0.07, 1.15, -1.50 , -0.08, 1.150, -1.50] \
, [-0.06, 1.14, -1.51, -0.07, 1.15, -1.50 , -0.08, 1.15, -1.50] \
, [-0.06, 1.145, -1.51, -0.079, 1.15, -1.50, -0.08, 1.15, -1.50]])
assert np.all(sim_traj_dict["robot_pos"]- expected_robot_pos < 0.02), "Robot pos not as expected"
tf_test_util.cleanup_bc_tests()
def test_bc_algo(self):
cfg = tf_test_util.setup_bc_tests()
cfg["task"]["n_outer_iter"] = 1
bc = tf_test_util.init_bc_algo(cfg)
bc.train(tf_test_util.EXP_DIR,no_wandb=True)
assert os.path.isdir(os.path.join(tf_test_util.EXP_DIR,"ckpts")), "Expect checkpoints dir to be created."
assert os.path.isdir(os.path.join(tf_test_util.EXP_DIR,"sim")), "Expect checkpoints dir to be created."
assert os.path.isfile(os.path.join(tf_test_util.EXP_DIR,"ckpts","epoch_1_ckpt.pth")), "Expected checkpoint file to be saved."
tf_test_util.cleanup_bc_tests()
def test_bc_train(self):
cfg = tf_test_util.setup_bc_tests()
cfg["task"]["n_outer_iter"] = 10
random.seed(cfg.seed)
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(cfg.seed)
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
# Load train and test trajectories
with open(cfg.task.demo_path, "r") as f:
traj_info = json.load(f)
train_traj_stats = traj_info["train_demo_stats"]
test_traj_stats = traj_info["test_demo_stats"]
exp_dir = "./test_output"
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# Get traj lists (read from demo files) and add to traj_info
traj_info["train_demos"] = t_utils.get_traj_list('./assets/',train_traj_stats, "pos")
traj_info["test_demos"] = t_utils.get_traj_list('./assets/',test_traj_stats, "pos")
bc = BCFinetune(cfg, traj_info, device)
bc.policy.train()
bc.encoder.train()
for i in range(10):
train_loss = bc.train_one_epoch()
assert abs(train_loss - 9.340) < 2, "Training loss not as expected"
for sim_env_name, sim_params in bc.sim_dict.items():
sim = Task(
bc.conf.task.state_type,
bc.algo_conf.pretrained_rep, # obj_state_type
downsample_time_step=bc.traj_info["downsample_time_step"],
traj_scale=bc.traj_info["scale"],
goal_type=bc.conf.task.goal_type,
object_type=bc.traj_info["object_type"],
finger_type=bc.traj_info["finger_type"],
enable_shadows=sim_params["enable_shadows"],
camera_view=sim_params["camera_view"],
arena_color=sim_params["arena_color"],
task=bc.task,
n_fingers_to_move=bc.n_fingers_to_move,
)
traj_list = bc.traj_info["test_demos"]
demo = traj_list[0]
sim_traj_dict = sim.execute_policy(
bc.policy,
demo,
save_dir=exp_dir,
encoder=bc.encoder,
epoch=10,
)
# one for each time step
assert (
abs(sim_traj_dict["scaled_success"].mean() - 0.5) < 0.05
), "Mean success of sim rollout not as expected."
assert (
abs(sim_traj_dict["scaled_success"][-1] - [0.7222]) < 0.02
), "Sim rollout performance not as expected."
tf_test_util.cleanup_bc_tests()
def test_bc_train_long(self):
cfg = tf_test_util.setup_bc_tests()
cfg["task"]["n_outer_iter"] = 10
random.seed(cfg.seed)
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(cfg.seed)
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
# Load train and test trajectories
with open(cfg.task.demo_path, "r") as f:
traj_info = json.load(f)
train_traj_stats = traj_info["train_demo_stats"]
test_traj_stats = traj_info["test_demo_stats"]
exp_dir = "./test_output"
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# Get traj lists (read from demo files) and add to traj_info
traj_info["train_demos"] = t_utils.get_traj_list('./assets/',train_traj_stats, "pos")
traj_info["test_demos"] = t_utils.get_traj_list('./assets/',test_traj_stats, "pos")
bc = BCFinetune(cfg, traj_info, device)
bc.policy.train()
bc.encoder.train()
for i in range(50):
train_loss = bc.train_one_epoch()
assert abs(train_loss - 2.5) < 1, "Training loss not as expected"
for sim_env_name, sim_params in bc.sim_dict.items():
sim = Task(
bc.conf.task.state_type,
bc.algo_conf.pretrained_rep, # obj_state_type
downsample_time_step=bc.traj_info["downsample_time_step"],
traj_scale=bc.traj_info["scale"],
goal_type=bc.conf.task.goal_type,
object_type=bc.traj_info["object_type"],
finger_type=bc.traj_info["finger_type"],
enable_shadows=sim_params["enable_shadows"],
camera_view=sim_params["camera_view"],
arena_color=sim_params["arena_color"],
task=bc.task,
n_fingers_to_move=bc.n_fingers_to_move,
)
traj_list = bc.traj_info["test_demos"]
demo = traj_list[0]
sim_traj_dict = sim.execute_policy(
bc.policy,
demo,
save_dir=exp_dir,
encoder=bc.encoder,
epoch=10,
)
assert (
abs(sim_traj_dict["scaled_success"].mean() - 0.5) < 0.05
), "Mean success of sim rollout not as expected."
assert abs(
sim_traj_dict["scaled_success"][-1] - [0.8080] < 0.02
), "Sim rollout performance not as expected."
tf_test_util.cleanup_bc_tests()
if __name__ == "__main__":
unittest.main()
| eai-vc-main | cortexbench/trifinger_vc/tests/test_tf.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
| eai-vc-main | cortexbench/trifinger_vc/tests/__init__.py |
# w Copyright (c) Meta Platforms, Inc. and affiliates.
import random
import os
import torch
import numpy as np
import hydra
from omegaconf import OmegaConf
import logging
import json
import trifinger_vc.utils.train_utils as t_utils
from trifinger_vc.algos.bc_finetune import BCFinetune
# A logger for this file
log = logging.getLogger(__name__)
"""
Main script to launch imitation learning training
(mbirl, policy_opt, bc, bc_finetune)
"""
@hydra.main(version_base=None, config_path="../src/trifinger_vc/config", config_name="bc_default")
def main(conf):
random.seed(conf.seed)
np.random.seed(conf.seed)
torch.manual_seed(conf.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(conf.seed)
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
# Name experiment and make exp directory
exp_dir, exp_str, exp_id = t_utils.get_exp_dir(conf)
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
exp_id = f"{conf.task.name}_{conf.algo.pretrained_rep}_freeze_{conf.algo.freeze_pretrained_rep}_r2p_{conf.rep_to_policy}_seed_{conf.seed}"
log.info(f"Running experiment with config:\n{OmegaConf.to_yaml(conf)}\n")
log.info(f"Saving experiment logs in {exp_dir}")
# Save conf
torch.save(conf, f=f"{exp_dir}/conf.pth")
# Load train and test trajectories
with open(conf.task.demo_path, "r") as f:
traj_info = json.load(f)
train_traj_stats = traj_info["train_demo_stats"]
test_traj_stats = traj_info["test_demo_stats"]
pretrained_rep = conf.algo.pretrained_rep
demo_root_dir = os.path.join(os.path.dirname(conf.task.demo_path), os.pardir)
# Get traj lists (read from demo files) and add to traj_info
traj_info["train_demos"] = t_utils.get_traj_list(demo_root_dir, train_traj_stats, "pos")
traj_info["test_demos"] = t_utils.get_traj_list(demo_root_dir, test_traj_stats, "pos")
if not conf.no_wandb:
wandb_info = t_utils.configure_wandb(exp_id, exp_dir, conf)
log.info(f"Start wandb logging with info\n{wandb_info}")
bc = BCFinetune(conf, traj_info, device)
bc.train(model_data_dir=exp_dir, no_wandb=conf.no_wandb)
if __name__ == "__main__":
main()
| eai-vc-main | cortexbench/trifinger_vc/bc_experiments/train_bc.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import enum
import gym
import numpy as np
import pybullet
import torch
from dataclasses import dataclass
from scipy.spatial.transform import Rotation
import trifinger_simulation
import trifinger_simulation.visual_objects
from trifinger_simulation import trifingerpro_limits
import trifinger_simulation.tasks.move_cube as task
from trifinger_simulation.trifinger_platform import ObjectType
from trifinger_simulation import camera
from trifinger_vc.trifinger_envs.action import ActionType
from trifinger_vc.control.impedance_controller import ImpedanceController
from trifinger_vc.control.custom_pinocchio_utils import CustomPinocchioUtils
import trifinger_vc.control.cube_utils as c_utils
MOVE_CUBE_EPISODE = 1000
class BaseCubeEnv(gym.Env):
"""Gym environment for moving cubes with TriFingerPro."""
def __init__(
self,
goal_pose: dict,
action_type: ActionType = ActionType.POSITION,
step_size: int = 100,
difficulty: int = 1,
visual_observation: bool = False,
seq_eval: bool = True,
):
"""Initialize.
Args:
goal_pose: Goal trajectory for the cube. If ``None`` a new
random trajectory is sampled upon reset.
action_type: Specify which type of actions to use.
See :class:`ActionType` for details.
step_size: Number of actual control steps to be performed in one
call of step().
"""
# Basic initialization
# ====================
# if goal_pose is None:
# self.goal = task.sample_goal(difficulty).to_dict()
# else:
# self.goal = goal_pose
self.action_type = action_type
self.info = {"difficulty": difficulty}
self.difficulty = difficulty
self.max_episode_len = MOVE_CUBE_EPISODE
if step_size < 1:
raise ValueError("step_size cannot be less than 1.")
self.step_size = step_size
self.visual_observation = visual_observation
self.camera_id = 0
# will be initialized in reset()
self.platform = None
# Create the action and observation spaces
# ========================================
robot_torque_space = gym.spaces.Box(
low=trifingerpro_limits.robot_torque.low,
high=trifingerpro_limits.robot_torque.high,
)
robot_position_space = gym.spaces.Box(
low=trifingerpro_limits.robot_position.low,
high=trifingerpro_limits.robot_position.high,
)
robot_velocity_space = gym.spaces.Box(
low=trifingerpro_limits.robot_velocity.low,
high=trifingerpro_limits.robot_velocity.high,
)
object_state_space = gym.spaces.Dict(
{
"position": gym.spaces.Box(
low=trifingerpro_limits.object_position.low,
high=trifingerpro_limits.object_position.high,
),
"orientation": gym.spaces.Box(
low=trifingerpro_limits.object_orientation.low,
high=trifingerpro_limits.object_orientation.high,
),
}
)
observation_state_space = gym.spaces.Box(
low=np.append(
trifingerpro_limits.robot_position.low,
trifingerpro_limits.object_position.low,
),
high=np.append(
trifingerpro_limits.robot_position.high,
trifingerpro_limits.object_position.high,
),
)
position_error_state_space = gym.spaces.Box(
low=np.full(3, -999999, dtype=np.float32),
high=np.full(3, 999999, dtype=np.float32),
)
orientation_error_state_space = gym.spaces.Box(
low=np.full(4, -999999, dtype=np.float32),
high=np.full(4, 999999, dtype=np.float32),
)
goal_state_space = gym.spaces.Box(
low=np.append(
trifingerpro_limits.object_position.low,
trifingerpro_limits.object_orientation.low,
),
high=np.append(
trifingerpro_limits.object_position.high,
trifingerpro_limits.object_orientation.high,
),
)
observation_state_space = gym.spaces.Box(
low=np.concatenate(
(
trifingerpro_limits.robot_position.low,
trifingerpro_limits.object_position.low,
trifingerpro_limits.object_orientation.low,
goal_state_space.low,
),
axis=0,
),
high=np.concatenate(
(
trifingerpro_limits.robot_position.high,
trifingerpro_limits.object_position.high,
trifingerpro_limits.object_orientation.high,
goal_state_space.high,
),
axis=0,
),
)
observation_state_space = gym.spaces.Box(
low=np.ones(23) * -10,
high=np.ones(23) * 10,
)
self.robot_torque_space = gym.spaces.Box(
low=trifingerpro_limits.robot_torque.low,
high=trifingerpro_limits.robot_torque.high,
)
if self.action_type == ActionType.TORQUE:
# self.action_space = robot_torque_space
self._initial_action = trifingerpro_limits.robot_torque.default
elif self.action_type == ActionType.POSITION:
# self.action_space = robot_position_space
self._initial_action = trifingerpro_limits.robot_position.default
elif self.action_type == ActionType.TORQUE_AND_POSITION:
# self.action_space = gym.spaces.Dict(
# {
# "torque": robot_torque_space,
# "position": robot_position_space,
# }
# )
self._initial_action = {
"torque": trifingerpro_limits.robot_torque.default,
"position": trifingerpro_limits.robot_position.default,
}
else:
raise ValueError("Invalid action_type")
self.action_space = gym.spaces.Box(
low=-2 * np.ones(9),
high=2 * np.ones(9),
)
self.observation_space = gym.spaces.Dict(
{
"t": gym.spaces.Discrete(MOVE_CUBE_EPISODE + 1),
"state_obs": observation_state_space,
}
)
if self.visual_observation:
self.img_size = (270, 270, 3)
self.image_observation_space = gym.spaces.Box(
low=np.zeros(self.img_size), high=np.ones(self.img_size) * 255
)
self.success_space = gym.spaces.Box(low=-1 * np.ones(1), high=np.ones(1))
self.observation_space = gym.spaces.Dict(
{
"t": gym.spaces.Discrete(MOVE_CUBE_EPISODE + 1),
"state_obs": observation_state_space,
"scaled_success": self.success_space,
"scaled_success_reach": self.success_space,
"pixels": self.image_observation_space,
}
)
self.train_start_list = self.get_start_list()
self.train_goal_list = self.get_goal_list()
self.eval_start_list = self.get_eval_start_list()
self.eval_goal_list = self.get_eval_goal_list()
self.eval_start_count = 0
self.eval_count = 0
# goes through hardcoded eval goal values in order rather than randomly choosing
self.sequential_eval = seq_eval
def get_start_list(self):
return [
np.array([2.04097663, -1.65100083, 3.25]),
np.array([4.10744807, 7.23903795, 3.25]),
np.array([2.21298789, -1.77792618, 3.25]),
np.array([2.64859868, -2.20080816, 3.25]),
np.array([-4.34777044, 6.33049147, 3.25]),
np.array([-4.44752978, -1.9291824, 3.25]),
np.array([-4.77374649, -2.92058681, 3.25]),
np.array([4.13839802, -7.93056262, 3.25]),
np.array([-3.6339912, -3.78330752, 3.25]),
np.array([1.67778578, -1.07522108, 3.25]),
np.array([0.72958577, 5.62491041, 3.25]),
np.array([5.50109756, 1.60540866, 3.25]),
np.array([-4.84623381, -1.91969908, 3.25]),
np.array([-2.64804082, 0.25612383, 3.25]),
np.array([-6.75746318, -0.83377183, 3.25]),
np.array([-3.81740024, -3.40561904, 3.25]),
np.array([-5.17478337, -6.27176169, 3.25]),
np.array([-3.94487777, 1.97520862, 3.25]),
np.array([-2.18946437, 3.83887034, 3.25]),
np.array([-8.45837181, 1.69275326, 3.25]),
np.array([-6.47569175, -2.13925613, 3.25]),
np.array([-3.10575608, -5.55079685, 3.25]),
np.array([3.46376521, -1.65422878, 3.25]),
np.array([-0.0720884, 6.85211944, 3.25]),
np.array([0.23277159, 6.48953965, 3.25]),
np.array([0.35250774, 7.69888375, 3.25]),
np.array([-1.53017535, -3.94902122, 3.25]),
np.array([5.46574845, -4.00952579, 3.25]),
np.array([-6.32064986, -2.72127592, 3.25]),
np.array([1.09125718, -4.08004056, 3.25]),
np.array([-3.6541273, 4.97720398, 3.25]),
np.array([6.11267395, 6.43009359, 3.25]),
np.array([0.69486026, -8.91990217, 3.25]),
np.array([2.60528523, 4.81703968, 3.25]),
np.array([-1.92844214, -2.97537717, 3.25]),
np.array([-5.35549988, -4.30591255, 3.25]),
np.array([-5.57041867, 6.64359229, 3.25]),
np.array([-5.87918698, 5.4926953, 3.25]),
np.array([-0.64131894, 6.00955903, 3.25]),
np.array([-2.48863439, -0.31338188, 3.25]),
np.array([-0.02733371, -3.19647573, 3.25]),
np.array([-4.4459109, 7.33152599, 3.25]),
np.array([-2.58218984, -0.85153104, 3.25]),
np.array([-0.53642423, -2.85615106, 3.25]),
np.array([-1.94631083, 3.88030117, 3.25]),
np.array([4.53668622, -5.11221288, 3.25]),
np.array([-2.77463316, 0.71408483, 3.25]),
np.array([-2.8336516, -3.67925051, 3.25]),
np.array([-0.45671894, 4.32993726, 3.25]),
np.array([2.79136047, 7.29243927, 3.25]),
np.array([-0.6892756, 3.96817383, 3.25]),
np.array([4.99552183, 3.56101594, 3.25]),
np.array([5.16958045, -7.02891967, 3.25]),
np.array([1.23990442, -1.38083498, 3.25]),
np.array([5.92869115, 6.2522862, 3.25]),
np.array([-3.14521847, -8.13946438, 3.25]),
np.array([2.9719716, -6.96319138, 3.25]),
np.array([5.07185006, -1.16377918, 3.25]),
np.array([1.66742066, 4.02562049, 3.25]),
np.array([1.77176953, 3.41187981, 3.25]),
np.array([-0.13260779, -2.68537634, 3.25]),
np.array([4.33229546, -0.03551759, 3.25]),
np.array([-1.43365107, -1.84130095, 3.25]),
np.array([-2.92969646, 5.75421449, 3.25]),
np.array([1.11222653, 3.18992928, 3.25]),
np.array([5.25777992, -3.84619755, 3.25]),
np.array([-5.07620368, -5.58340159, 3.25]),
np.array([-3.05283113, -7.62402811, 3.25]),
np.array([1.23449075, 0.44386378, 3.25]),
np.array([-2.03197261, 5.92553343, 3.25]),
np.array([-1.00614565, 1.65717695, 3.25]),
np.array([6.94632315, 3.60978841, 3.25]),
np.array([-3.53368917, 8.10776891, 3.25]),
np.array([0.2204234, 5.20549202, 3.25]),
np.array([-5.29871847, -2.50313875, 3.25]),
np.array([-1.18429566, -3.25836533, 3.25]),
np.array([7.021721, -1.37745048, 3.25]),
np.array([-4.61213103, -3.81696923, 3.25]),
np.array([-1.80475419, -2.29072473, 3.25]),
np.array([-7.17524205, -0.65156247, 3.25]),
np.array([-4.55399435, -3.30533432, 3.25]),
np.array([-0.05460599, -5.58954694, 3.25]),
np.array([4.19168691, -7.49274173, 3.25]),
np.array([4.84372648, 4.82713899, 3.25]),
np.array([6.63102781, 5.70623944, 3.25]),
np.array([7.59700729, -0.83047598, 3.25]),
np.array([4.46110769, 4.83956357, 3.25]),
np.array([-4.6037906, 0.19172261, 3.25]),
np.array([-7.18088318, -1.33220808, 3.25]),
np.array([1.06310965, 2.41328782, 3.25]),
np.array([-0.49105523, -1.11458754, 3.25]),
np.array([0.01794725, 3.06635785, 3.25]),
np.array([-5.38248375, 1.22571585, 3.25]),
np.array([-4.5219725, -5.00797691, 3.25]),
np.array([1.64514413, 4.37356647, 3.25]),
np.array([-2.13024822, 0.58961604, 3.25]),
np.array([-1.91045255, 2.92433814, 3.25]),
np.array([5.69786521, -3.72389571, 3.25]),
np.array([-4.26038794, -0.25427055, 3.25]),
np.array([-3.73057202, -7.6881122, 3.25]),
]
def get_eval_start_list(self):
return [
np.array([6.67169073, -2.96553179, 3.25]),
np.array([4.53332389, 2.98308279, 3.25]),
np.array([-2.91775021, 2.57252752, 3.25]),
np.array([8.93065598, -0.15437427, 3.25]),
np.array([-8.19208537, -1.94309468, 3.25]),
np.array([1.8349047, -4.78840247, 3.25]),
np.array([-0.29920792, 5.39048065, 3.25]),
np.array([8.02817476, -2.77101145, 3.25]),
np.array([6.75243009, -5.60007531, 3.25]),
np.array([-1.2305441, -1.93330211, 3.25]),
np.array([-4.16567822, 4.60573848, 3.25]),
np.array([1.68092937, -0.61479163, 3.25]),
np.array([-1.93641802, -2.23759902, 3.25]),
np.array([3.75552483, 4.99247274, 3.25]),
np.array([-2.63227948, 1.02710679, 3.25]),
np.array([-0.73785682, 6.72614777, 3.25]),
np.array([-5.98990161, 1.40376386, 3.25]),
np.array([-4.14701302, -7.64395404, 3.25]),
np.array([-2.68738883, -0.86268445, 3.25]),
np.array([3.56820047, -4.01970462, 3.25]),
np.array([4.73531203, -7.38510796, 3.25]),
np.array([4.54052887, -1.01960825, 3.25]),
np.array([-8.56401189, 0.82893131, 3.25]),
np.array([-3.23477287, -5.12156484, 3.25]),
np.array([-3.8107995, 2.98017638, 3.25]),
]
def get_goal_list(self):
return [
np.array([-6.85620415, 5.69309662, 3.4868318]),
np.array([6.89543661, 0.20638839, 3.60589458]),
np.array([-4.76185274, -3.57138597, 3.48521864]),
np.array([4.48172165, 2.30776027, 3.40143134]),
np.array([-10.097758, -2.05704158, 3.50874507]),
np.array([-6.21063435, 5.96678709, 3.49914875]),
np.array([-0.85843888, 0.26477303, 3.51648274]),
np.array([-1.53639816, -1.34207088, 3.35050419]),
np.array([2.4713391, -8.3362068, 3.40881575]),
np.array([1.76395876e-03, 1.59974155e00, 3.34845197e00]),
np.array([-2.44383359, 7.52655064, 3.34270859]),
np.array([1.09045117, -1.26148746, 3.45028295]),
np.array([4.2388288, 8.1671043, 3.42516367]),
np.array([1.88647559, -7.03245503, 3.4258199]),
np.array([0.11318267, 2.57698791, 3.44239848]),
np.array([4.10511002, 2.40155972, 3.55802448]),
np.array([-0.23120615, -1.45758424, 3.47215934]),
np.array([-3.05966982, 10.02575994, 3.34350474]),
np.array([1.73366214, 10.70642224, 3.43047809]),
np.array([1.68763431, -0.56803548, 3.39711601]),
np.array([-9.77245964, -1.42591748, 3.34540121]),
np.array([-3.71715436, 0.15941034, 3.33814527]),
np.array([0.89186381, -10.34613863, 3.544193]),
np.array([-0.57973103, 10.59727006, 3.38286818]),
np.array([-10.70692197, 0.85174816, 3.48813104]),
np.array([3.74088445, -4.07057836, 3.58707664]),
np.array([-6.51509437, 3.33729785, 3.41168711]),
np.array([9.92651822, -5.09583286, 3.3516998]),
np.array([-9.71215617, 0.43383868, 3.3529111]),
np.array([-7.48044561, -7.8204012, 3.35138153]),
np.array([-6.77449691, -2.21448351, 3.4748631]),
np.array([5.24973063, 7.75546124, 3.39087428]),
np.array([5.7441052, -9.48213409, 3.44377653]),
np.array([-1.65363983, 6.93396322, 3.34352824]),
np.array([1.72672181, -2.20423246, 3.34493667]),
np.array([-6.32620696, -6.15006496, 3.34785745]),
np.array([-7.25481784, -2.84468915, 3.40973936]),
np.array([3.48910405, 0.27649298, 3.33779743]),
np.array([-7.29880413, -1.67084031, 3.47002878]),
np.array([-5.39445235, 5.24321575, 3.34222376]),
np.array([3.27466144, 0.63430133, 3.39329086]),
np.array([1.84325319, 6.99002939, 3.36439045]),
np.array([-6.83167302, -5.41291579, 3.36950817]),
np.array([-0.91039109, -0.63790262, 3.34861123]),
np.array([6.51689054, 1.39720148, 3.44225852]),
np.array([-4.96093917, -6.83616067, 3.46017926]),
np.array([1.84286209, 2.71032173, 3.33851569]),
np.array([-9.25094037, -2.60808305, 3.34171691]),
np.array([-4.39315839, 5.4206937, 3.34240775]),
np.array([7.79844963, 1.21241137, 3.54043111]),
np.array([7.9784517, -1.04042639, 3.35562883]),
np.array([9.74992113, -0.05703117, 3.34283087]),
np.array([-1.80699541, 0.62056611, 3.52324641]),
np.array([-3.33449495, -3.27455263, 3.35692825]),
np.array([1.91787857, -1.55797992, 3.49959849]),
np.array([-8.18887959, -6.95140586, 3.34517562]),
np.array([-6.55092508, -5.36105026, 3.51953136]),
np.array([-4.65692181, 5.00578746, 3.57180856]),
np.array([-10.01640723, 0.09701515, 3.47691971]),
np.array([6.08581384, -6.55555138, 3.51073652]),
np.array([6.37559629, -1.39824096, 3.38839112]),
np.array([4.22314207, 10.60955302, 3.40776734]),
np.array([10.49006752, -0.25291699, 3.6091191]),
np.array([-5.5563258, -0.45557905, 3.56926722]),
np.array([-0.59690022, 0.23712072, 3.34728676]),
np.array([1.54201962, 0.53821618, 3.41215915]),
np.array([3.91624165, -3.5208636, 3.65523469]),
np.array([-9.66192239, -1.57394663, 3.35618386]),
np.array([-10.28422427, 3.20408299, 3.52148926]),
np.array([-5.86194317, 7.78183548, 3.34852961]),
np.array([-3.41343808, 10.86818437, 3.35983464]),
np.array([10.88775929, 1.83811875, 3.36764426]),
np.array([1.64951292, 7.73225581, 3.35893576]),
np.array([-3.87361636, 10.68113917, 3.38532573]),
np.array([-10.40482953, -2.83170933, 3.36578927]),
np.array([1.61077724, 4.92156534, 3.33918436]),
np.array([0.17828444, -5.5765294, 3.34321059]),
np.array([5.167725, -1.28080891, 3.64031652]),
np.array([-8.55232423, 1.28828846, 3.37625187]),
np.array([-9.78914147, -4.66853043, 3.40276421]),
np.array([-5.83961344, -0.53358555, 3.34591576]),
np.array([7.90392253, 4.11711935, 3.54403815]),
np.array([0.77248579, -5.16369315, 3.34268256]),
np.array([1.58398011, 0.66349796, 3.34651256]),
np.array([2.96027527, -3.30153252, 3.52695208]),
np.array([-3.32688568, -5.9893656, 3.38640985]),
np.array([-2.38823957, 1.22484347, 3.51193319]),
np.array([-0.67132962, 9.86015055, 3.41217951]),
np.array([-0.67080763, -6.43749339, 3.35517908]),
np.array([-5.63190129, -6.7793298, 3.5780783]),
np.array([-3.90313746, 9.41344458, 3.4665348]),
np.array([-4.88213205, 6.32855783, 3.35855582]),
np.array([-8.22583522, 4.5383908, 3.34817245]),
np.array([-0.62195955, 0.33415983, 3.33682747]),
np.array([10.65720498, 3.41036641, 3.50191099]),
np.array([-3.30091672, 9.51880107, 3.47082805]),
np.array([-10.51216611, 3.15678105, 3.42093078]),
np.array([6.69407137, -0.58780311, 3.35043057]),
np.array([-6.7290203, -8.85983436, 3.54240275]),
np.array([6.44124682, -4.54900372, 3.50910745]),
]
def get_eval_goal_list(self):
return [
np.array([5.14456575, 9.53934744, 3.4736776]),
np.array([9.47314765, -1.05800597, 3.38940632]),
np.array([-7.89212926, 5.73950083, 3.34253909]),
np.array([5.25182976, -0.06633719, 3.34521151]),
np.array([-11.45348978, -1.08593413, 3.34631526]),
np.array([-2.49468065, -10.71326428, 3.42221313]),
np.array([-7.46557298, -5.45556846, 3.6661241]),
np.array([8.33472767, -7.27369026, 3.34793479]),
np.array([-6.54476041, -6.11756091, 3.61223536]),
np.array([-10.06022672, -2.42743655, 3.36778525]),
np.array([-2.85501714, -2.09537331, 3.55102278]),
np.array([-2.34413951, -6.80405336, 3.38061399]),
np.array([-6.53886547, -2.29299191, 3.37285069]),
np.array([-1.87206664, 1.74855269, 3.34257076]),
np.array([6.5523002, -6.84960049, 3.45466889]),
np.array([7.64386918, 5.86611545, 3.42190653]),
np.array([-1.29261219, 7.50578918, 3.41643612]),
np.array([-0.46343966, -3.91072315, 3.4125123]),
np.array([6.85678941, 2.0588009, 3.58958953]),
np.array([-3.10926912, -2.49296228, 3.43359971]),
np.array([-7.3301309, -5.06979915, 3.39754574]),
np.array([-7.61911634, -6.00939488, 3.57340908]),
np.array([-2.88103846, 10.77367604, 3.34477527]),
np.array([1.11187448, 4.50634239, 3.39748213]),
np.array([-5.39123021, 9.35176932, 3.3435149]),
]
def _get_fingertip_pos(self, t):
# r_obs = self.platform.get_robot_observation(t)
r_obs = self.platform.simfinger.get_observation(t)
# pass joint pos for xyz coordinates
return self.hand_kinematics.get_ft_pos(r_obs.position)
def compute_reward(
self,
fingertip_pos,
achieved_goal,
desired_goal,
info: dict,
) -> float:
"""Compute the reward for the given achieved and desired goal.
Args:
achieved_goal: Current position of the object.
desired_goal: Goal pose of the object.
info: An info dictionary containing a field "time_index" which
contains the time index of the achieved_goal.
Returns:
The reward that corresponds to the provided achieved goal w.r.t. to
the desired goal. Note that the following should always hold true::
ob, reward, done, info = env.step()
assert reward == env.compute_reward(
ob['achieved_goal'],
ob['desired_goal'],
info,
)
"""
# reward wrt xy position, not taking z into account
arena_radius = 0.195
xy_dist = np.linalg.norm(desired_goal[:2] - achieved_goal[:2])
scaled_dist = xy_dist / (2 * arena_radius)
start_scaled_dist = xy_dist / self.start_dist
reward = 0
# additional reward the closer the fingertips are to the cube
ftip_dist_to_cube = 0
for i in range(3):
ftip_dist_to_cube += np.linalg.norm(
fingertip_pos[(3 * i) : (3 * i) + 3] - achieved_goal[0:3]
)
if ftip_dist_to_cube < 0.15:
reward += 50.0
for i in range(3):
ftip_dist_to_target = np.linalg.norm(
fingertip_pos[(3 * i) : (3 * i) + 3] - desired_goal[0:3]
)
cube_dist_to_target = np.linalg.norm(achieved_goal[0:3] - desired_goal[0:3])
reward += -200 * cube_dist_to_target + -10 * ftip_dist_to_target
reward += -100 * (ftip_dist_to_cube) + 50
self.prev_finger_dist = ftip_dist_to_cube
if xy_dist < 0.07:
reward += 10
if xy_dist < 0.04:
reward += 20
return reward
def step(self, action):
"""Run one timestep of the environment's dynamics.
When end of episode is reached, you are responsible for calling
``reset()`` to reset this environment's state.
Args:
action: An action provided by the agent (depends on the selected
:class:`ActionType`).
Returns:
tuple:
- observation (dict): agent's observation of the current
environment.
- reward (float): amount of reward returned after previous action.
- done (bool): whether the episode has ended, in which case further
step() calls will return undefined results.
- info (dict): info dictionary containing the current time index.
"""
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
def seed(self, seed=None):
"""Sets the seed for this env’s random number generator.
.. note::
Spaces need to be seeded separately. E.g. if you want to sample
actions directly from the action space using
``env.action_space.sample()`` you can set a seed there using
``env.action_space.seed()``.
Returns:
List of seeds used by this environment. This environment only uses
a single seed, so the list contains only one element.
"""
self.np_random, seed = gym.utils.seeding.np_random(seed)
task.seed(seed)
return [seed]
def _goal_orientation(self):
return self.goal[3:]
def _goal_pos(self):
return self.goal[:3]
def render(self, mode="human"):
camera_observation = None
cam_imgs = self.platform.get_camera_observation(self.step_count)
if mode == "rgb_array":
# 0:camera 60, 1:camera180, 2:camera300
if self.camera_id == -1: # return ALL images
camera_observation = np.concatenate(
(cam_imgs[0].image, cam_imgs[1].image, cam_imgs[2].image), axis=2
)
else:
camera_observation = torch.tensor(
cam_imgs.cameras[self.camera_id].image
)
elif mode == "eval":
camera_observation = cam_imgs.cameras[0].image
else:
raise NotImplementedError
return camera_observation
def get_success(self, curr, goal):
success = 1 - (np.linalg.norm(curr - goal) / self.start_dist)
if success < 0:
success = 0
return success
def get_success_reach(self, curr, goal):
dist = 0
for i in range(3):
dist += np.linalg.norm(curr[(3 * i) : (3 * i) + 3] - goal[0:3])
success = 1 - (dist / self.start_finger_dist)
if success < 0:
success = 0
return success
def _create_observation(self, t):
robot_observation = self.platform.get_robot_observation(t)
camera_observation = self.platform.get_camera_observation(t)
object_observation = camera_observation.filtered_object_pose
if self.visual_observation:
observation = {
"t": t,
"pixels": self.render("rgb_array"),
"state_obs": np.concatenate(
(
robot_observation.position,
object_observation.position,
object_observation.orientation,
self.goal,
),
axis=0,
),
"scaled_success": self.get_success(
object_observation.position, self.goal[0:3]
),
"scaled_success_reach": self.get_success_reach(
self.hand_kinematics.get_ft_pos(robot_observation.position),
object_observation.position,
),
}
else:
observation = {
"t": t,
"state_obs": np.concatenate(
(
robot_observation.position,
object_observation.position,
object_observation.orientation,
self.goal,
),
axis=0,
),
}
if not self.run_rl_policy:
position_error = np.linalg.norm(
object_observation.position - self._goal_pos()
)
# Get cube vertices
obj_pose = {
"position": object_observation.position,
"orientation": object_observation.orientation,
}
# From trifinger_simulation tasks/move_cube/__init__.py evaluate_state()
goal_rot = Rotation.from_quat(self._goal_orientation())
actual_rot = Rotation.from_quat(object_observation.orientation)
error_rot = goal_rot.inv() * actual_rot
orientation_error = error_rot.magnitude()
# Add new observation fields
ft_pos_cur = self.hand_kinematics.get_ft_pos(robot_observation.position)
v_wf_dict = c_utils.get_vertices_wf(obj_pose)
observation["robot_position"] = robot_observation.position
observation["object_position"] = object_observation.position
observation["object_orientation"] = object_observation.orientation
observation["object_vertices"] = v_wf_dict
observation["desired_goal"] = self.goal
observation["achieved_goal_position_error"] = position_error
observation["achieved_goal_orientation_error"] = orientation_error
observation["ft_pos_cur"] = ft_pos_cur
# Save camera observation images
if self.visual_observation:
camera_observation_dict = {
"camera60": {
"image": camera_observation.cameras[0].image,
"timestamp": camera_observation.cameras[0].timestamp,
},
"camera180": {
"image": camera_observation.cameras[1].image,
"timestamp": camera_observation.cameras[1].timestamp,
},
"camera300": {
"image": camera_observation.cameras[2].image,
"timestamp": camera_observation.cameras[2].timestamp,
},
}
observation["camera_observation"] = camera_observation_dict
observation["policy"] = {
"controller": self.hand_kinematics.get_observation()
}
return observation
def _gym_action_to_robot_action(self, gym_action):
# construct robot action depending on action type
if self.action_type == ActionType.TORQUE:
robot_action = self.platform.Action(torque=gym_action)
elif self.action_type == ActionType.POSITION:
robot_action = self.platform.Action(position=gym_action)
elif self.action_type == ActionType.TORQUE_AND_POSITION:
robot_action = self.platform.Action(
torque=gym_action["torque"], position=gym_action["position"]
)
else:
raise ValueError("Invalid action_type")
return robot_action
def close(self):
pybullet.disconnect()
super().close()
class MoveCubeEnv(BaseCubeEnv):
"""Gym environment for moving cubes with simulated TriFingerPro."""
def __init__(
self,
goal_pose: dict = None,
action_type: ActionType = ActionType.TORQUE,
step_size: int = 100,
difficulty: int = 1,
visualization: bool = False,
goal_visualization: bool = False,
no_collisions: bool = False,
enable_cameras: bool = False,
finger_type: str = "trifinger_meta",
camera_delay_steps: int = 90,
time_step: float = 0.004,
object_type: ObjectType = ObjectType.COLORED_CUBE,
enable_shadows: bool = False,
camera_view: str = "default",
arena_color: str = "default",
random_q_init: bool = False,
visual_observation: bool = False,
run_rl_policy: bool = True,
):
"""Initialize.
Args:
goal_pose: Goal trajectory for the cube. If ``None`` a new
random trajectory is sampled upon reset.
action_type (ActionType): Specify which type of actions to use.
See :class:`ActionType` for details.
step_size (int): Number of actual control steps to be performed in
one call of step().
visualization (bool): If true, the pyBullet GUI is run for
visualization.
no_collisions (bool): If true, turn of collisions between platform and object.
enable_cameras (bool): If true, enable cameras that capture RGB image
observations.
finger_type (str): Finger type ("trifingerpro", "trifingeredu")
camera_delay_steps (int): Number of time steps by which camera
observations are held back after they are generated. This is
used to simulate the delay of the camera observation that is
happening on the real system due to processing (mostly the
object detection).
time_step (float): Simulation timestep
random_q_init (bool): If true, use random intitial joint positions
run_rl_policy (bool): If true, don't add extra observation fields used for bc policy
"""
super().__init__(
goal_pose=goal_pose,
action_type=action_type,
step_size=step_size,
difficulty=difficulty,
visual_observation=visual_observation,
)
self.visualization = visualization
self.goal_visualization = goal_visualization
self.no_collisions = no_collisions
self.enable_cameras = enable_cameras
self.finger_type = finger_type
self.time_step = time_step
self.enable_shadows = enable_shadows
self.camera_view = camera_view
self.random_q_init = random_q_init
self.visual_observation = visual_observation
self.run_rl_policy = run_rl_policy
if self.visual_observation:
self.enable_cameras = True
# initialize simulation
# initial_robot_position = trifingerpro_limits.robot_position.default
self.q_nominal = np.array([-0.08, 1.15, -1.5] * 3)
if self.random_q_init:
self.initial_robot_position = self.sample_init_robot_position()
else:
self.initial_robot_position = np.array(
[
-0.0809731,
1.1499023,
-1.50172085,
-0.08046894,
1.14986721,
-1.50067745,
-0.07987084,
1.14964149,
-1.50124104,
]
)
self.platform = trifinger_simulation.TriFingerPlatform(
visualization=self.visualization,
enable_cameras=self.enable_cameras,
finger_type=self.finger_type,
time_step_s=self.time_step,
initial_robot_position=self.initial_robot_position,
camera_delay_steps=camera_delay_steps,
object_type=object_type,
enable_shadows=self.enable_shadows,
camera_view=self.camera_view,
arena_color=arena_color,
fix_cube_base=False,
)
self.hand_kinematics = HandKinematics(self.platform.simfinger)
# Make camera for RL training
if self.run_rl_policy:
target_positions = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
camera_up_vectors = [[0, 0, 1], [0, 0, 1], [0, 0, 1]]
field_of_view = 33
self.tricamera = camera.TriFingerCameras(
pybullet_client_id=self.platform.simfinger._pybullet_client_id,
target_positions=target_positions,
camera_up_vectors=camera_up_vectors,
field_of_view=field_of_view,
)
self.goal_visualization = True
else:
self.tricamera = None
# visualize the cube vertices
if self.visualization and not self.enable_cameras:
self.draw_verts = True
else:
self.draw_verts = False
self.vert_markers = None
self.start_dist = 1000
def sample_init_robot_position(self):
q0_range = [-0.15, 0.15]
q1_range = [0.8, 1.15]
q2_range = [-1.35, -1.65]
i = 0
q_new = np.array([q0_range[i], q1_range[i], q2_range[i]] * 3)
q_new = np.zeros(9)
for i in range(3):
q0 = np.random.uniform(q0_range[0], q0_range[1])
q1 = np.random.uniform(q1_range[0], q1_range[1])
q2 = np.random.uniform(q2_range[0], q2_range[1])
q_new[3 * i] = q0
q_new[3 * i + 1] = q1
q_new[3 * i + 2] = q2
return q_new
def step(self, action):
"""
Run one timestep of the environment's dynamics, which is
self.step_size number of simulation steps
When end of episode is reached, you are responsible for calling
``reset()`` to reset this environment's state.
Args:
action: An action provided by the agent (depends on the selected
:class:`ActionType`).
Returns:
tuple:
- observation (dict): agent's observation of the current
environment.
- reward (float): amount of reward returned after previous action.
- done (bool): whether the episode has ended, in which case further
step() calls will return undefined results.
- info (dict): info dictionary containing the current time index.
"""
if self.run_rl_policy:
action = torch.tensor(action) / 50.0
action = np.clip(action, -0.02, 0.02)
if not self.action_space.contains(np.array(action, dtype=np.float32)):
raise ValueError("Given action is not contained in the action space.")
num_steps = self.step_size
# ensure episode length is not exceeded due to step_size
step_count_after = self.step_count + num_steps
if step_count_after > MOVE_CUBE_EPISODE:
excess = step_count_after - MOVE_CUBE_EPISODE
num_steps = max(1, num_steps - excess)
reward = 0.0
for _i in range(num_steps):
# Get current robot state
robot_obs = self.platform.get_robot_observation(self.step_count)
joint_position = robot_obs.position
joint_velocity = robot_obs.velocity
self.step_count += 1
if self.step_count > MOVE_CUBE_EPISODE:
raise RuntimeError("Exceeded number of steps for one episode.")
# Update desired position and velocity
x_des_i = self.x_des_plan + (_i + 1) * (action / num_steps)
dx_des_i = action / (self.step_size * self.time_step)
# Compute torque with impedance controller
torque = self.hand_kinematics.get_torque(
x_des_i,
dx_des_i,
joint_position,
joint_velocity,
)
torque = np.clip(
torque, self.robot_torque_space.low, self.robot_torque_space.high
)
# Send action to robot
robot_action = self._gym_action_to_robot_action(torque)
t = self.platform.append_desired_action(robot_action)
# Use observations of step t + 1 to follow what would be expected
# in a typical gym environment. Note that on the real robot, this
# will not be possible
self.info["time_index"] = t + 1
# Alternatively use the observation of step t. This is the
# observation from the moment before action_t is applied, i.e. the
# result of that action is not yet visible in this observation.
#
# When using this observation, the resulting cumulative reward
# should match exactly the one computed during replay (with the
# above it will differ slightly).
# self.info["time_index"] = t
# visualize the goal
if self.goal_visualization:
self.goal_marker = trifinger_simulation.visual_objects.CubeMarker(
width=task._CUBE_WIDTH,
position=self._goal_pos(),
orientation=self._goal_orientation(),
pybullet_client_id=self.platform.simfinger._pybullet_client_id,
)
observation = self._create_observation(self.info["time_index"])
# Update plan with action
self.x_des_plan += action
# Compute reward
reward = 0
reward += self.compute_reward(
self.hand_kinematics.get_ft_pos(observation["state_obs"][:9]),
observation["state_obs"][9:16],
observation["state_obs"][16:],
self.info,
)
# Draw cube vertices from observation
if self.draw_verts:
v_wf_dict = observation["object_observation"]["vertices"]
positions = [v_wf for k, v_wf in v_wf_dict.items()]
self.vert_markers.set_state(positions)
is_done = self.step_count >= MOVE_CUBE_EPISODE
return observation, reward, is_done, self.info
def choose_start_from_demos(self, eval=False):
start_pos_list = self.train_start_list
if eval:
self.eval_start_count += 1
if self.eval_start_count == len(self.eval_start_list):
self.eval_start_count = 0
start_pos_list = self.eval_start_list
else:
start_pos_list = self.train_start_list
if self.sequential_eval and eval:
idx = self.eval_start_count
else:
idx = np.random.randint(0, len(start_pos_list))
return start_pos_list[idx] / 100.0
def choose_goal_from_demos(self, eval=False):
if eval:
self.eval_count += 1
if self.eval_count == len(self.eval_goal_list):
self.eval_count = 0
goal_pos_list = self.eval_goal_list
else:
goal_pos_list = self.train_goal_list
if self.sequential_eval and eval:
idx = self.eval_count
else:
idx = np.random.randint(0, len(goal_pos_list))
return goal_pos_list[idx] / 100.0
def reset(
self,
goal_pose_dict=None,
init_pose_dict=None,
init_robot_position=None,
random_init_cube_pos=False,
eval_mode=False,
):
"""Reset the environment."""
##hard-reset simulation
# del self.platform
# initialize cube at the centre
if init_pose_dict is None:
initial_object_pose = task.sample_goal(difficulty=-1)
if not random_init_cube_pos:
# Hardcode init pose to arena center
initial_object_pose.position = [
0,
0,
task._CUBE_WIDTH / 2,
]
if self.run_rl_policy:
initial_object_pose.position = self.choose_start_from_demos(
eval=eval_mode
)
else:
initial_object_pose = task.Pose.from_dict(init_pose_dict)
if init_robot_position is None:
if self.random_q_init:
init_robot_position = self.sample_init_robot_position()
else:
init_robot_position = self.initial_robot_position
self.platform.reset(
initial_object_pose=initial_object_pose,
initial_robot_position=init_robot_position,
)
# Set pybullet GUI params
self._set_sim_params()
if self.no_collisions:
self.disable_collisions()
# if no goal is given, sample one randomly
if goal_pose_dict is None:
if self.difficulty == 0 or self.difficulty not in [1, 2, 3]:
self.goal = np.append(
initial_object_pose.position, initial_object_pose.orientation
)
else:
pose = task.sample_goal(self.difficulty)
self.goal = np.append(pose.position, pose.orientation)
if self.run_rl_policy:
self.goal[0:3] = self.choose_goal_from_demos(eval=eval_mode)
else:
pose = goal_pose_dict
self.goal = np.append(pose["position"], pose["orientation"])
# visualize the goal
if self.goal_visualization:
self.goal_marker = trifinger_simulation.visual_objects.CubeMarker(
width=task._CUBE_WIDTH,
position=self._goal_pos(),
orientation=self._goal_orientation(),
pybullet_client_id=self.platform.simfinger._pybullet_client_id,
)
if self.draw_verts:
v_wf_dict = c_utils.get_vertices_wf(initial_object_pose.to_dict())
if self.vert_markers is None:
self.vert_markers = trifinger_simulation.visual_objects.Marker(
8,
goal_size=0.005,
initial_position=[v_wf for k, v_wf in v_wf_dict.items()],
)
else:
positions = [v_wf for k, v_wf in v_wf_dict.items()]
self.vert_markers.set_state(positions)
# Reset state for policy execution
self.x_des_plan = torch.FloatTensor(
self.hand_kinematics.get_ft_pos(init_robot_position).copy()
)
self.info = {"time_index": -1, "goal": self.goal, "difficulty": self.difficulty}
self.step_count = 0
self.start_dist = np.linalg.norm(initial_object_pose.position - self.goal[0:3])
self.start_finger_dist = 0
for i in range(3):
self.start_finger_dist += np.linalg.norm(
self.x_des_plan[(3 * i) : (3 * i) + 3]
- initial_object_pose.position[0:3]
)
self.prev_finger_dist = self.start_finger_dist
self.platform._camera_update()
new_obs = self._create_observation(0)
return new_obs
def _set_sim_params(self):
"""Set pybullet GUI params"""
pybullet.configureDebugVisualizer(
pybullet.COV_ENABLE_GUI, 0
) # Turn off debug camera visuals
pybullet.configureDebugVisualizer(
pybullet.COV_ENABLE_SHADOWS, self.enable_shadows
) # Turn off shadow rendering
def disable_collisions(self):
"""Disable collisions between finger and object, for debugging finger controllers"""
obj_id = self.platform.cube._object_id
robot_id = self.platform.simfinger.finger_id
obj_link_id = -1
finger_link_ids = (
self.platform.simfinger.pybullet_link_indices
+ self.platform.simfinger.pybullet_tip_link_indices
)
for link_id in finger_link_ids:
pybullet.setCollisionFilterPair(
robot_id, obj_id, link_id, obj_link_id, enableCollision=0
)
# Make object invisible
# pybullet.changeVisualShape(obj_id, obj_link_id, rgbaColor=[0,0,0,0])
class HandKinematics:
def __init__(self, simfinger):
self.Nf = 3 # Number of fingers
self.Nq = self.Nf * 3 # Number of joints in hand
# class with kinematics functions
self.kinematics = CustomPinocchioUtils(
simfinger.finger_urdf_path,
simfinger.tip_link_names,
simfinger.link_names,
)
self.controller = ImpedanceController(self.kinematics)
def get_ft_pos(self, q):
"""Get fingertip positions given current joint configuration q"""
ft_pos = np.array(self.kinematics.forward_kinematics(q)).reshape(self.Nq)
return ft_pos
def get_torque(self, x_des, dx_des, q_cur, dq_cur):
return self.controller.get_command_torque(x_des, dx_des, q_cur, dq_cur)
def get_observation(self):
return self.controller.get_observation()
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/trifinger_envs/gym_cube_env.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from .cube_reach import CubeReachEnv
from .gym_cube_env import MoveCubeEnv
from gym.envs.registration import (
registry,
register,
make,
spec,
load_env_plugins as _load_env_plugins,
)
register(
id="MoveCube-v0",
entry_point="trifinger_envs:MoveCubeEnv",
max_episode_steps=1000,
)
register(
id="CubeReach-v0",
entry_point="trifinger_envs.cube_reach:CubeReachEnv",
max_episode_steps=1000,
)
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/trifinger_envs/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import enum
class ActionType(enum.Enum):
"""Different action types that can be used to control the robot."""
#: Use pure torque commands. The action is a list of torques (one per
#: joint) in this case.
TORQUE = enum.auto()
#: Use joint position commands. The action is a list of angular joint
#: positions (one per joint) in this case. Internally a PD controller is
#: executed for each action to determine the torques that are applied to
#: the robot.
POSITION = enum.auto()
#: Use both torque and position commands. In this case the action is a
#: dictionary with keys "torque" and "position" which contain the
#: corresponding lists of values (see above). The torques resulting from
#: the position controller are added to the torques in the action before
#: applying them to the robot.
TORQUE_AND_POSITION = enum.auto() | eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/trifinger_envs/action.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import enum
import typing
import gym
import math
import numpy as np
import pybullet
import torch
import trifinger_simulation
from trifinger_simulation import camera
from trifinger_simulation import visual_objects
from trifinger_simulation import trifingerpro_limits
import trifinger_simulation.tasks.move_cube as task
from trifinger_simulation import sample
from trifinger_vc.trifinger_envs.action import ActionType
from trifinger_vc.control.impedance_controller import ImpedanceController
from trifinger_vc.control.custom_pinocchio_utils import CustomPinocchioUtils
from trifinger_simulation.trifinger_platform import ObjectType
import trifinger_vc.utils.data_utils as d_utils
try:
import robot_fingers
except ImportError:
robot_fingers = None
import trifinger_vc.control.cube_utils as c_utils
REACH_EPISODE_LENGTH = 500
class CubeReachEnv(gym.Env):
"""Gym environment for moving cubes with TriFingerPro."""
def __init__(
self,
render_mode: str = "",
fixed_goal: bool = True,
visual_observation: bool = False,
action_type: ActionType = ActionType.TORQUE,
step_size: int = 100,
visualization: bool = False,
enable_cameras: bool = True,
camera_id: int = 0,
finger_type: str = "trifingerpro",
camera_delay_steps: int = 0,
time_step: float = 0.004,
randomize_starts: bool = True,
randomize_all: bool = False,
sample_radius: float = 0.00,
max_goal_dist: float = 100,
object_type: ObjectType = ObjectType.COLORED_CUBE,
enable_shadows: bool = False,
camera_view: str = "default",
arena_color: str = "default",
random_q_init: bool = False,
run_rl_policy: bool = True,
seq_eval: bool = True,
):
"""Initialize.
Args:
fixed_goal: Default true, if false will sample random goal.
visual_observation: Default false, if true will output images as observations instead of state of robot fingertips.
action_type (ActionType): Specify which type of actions to use.
See :class:`ActionType` for details.
step_size (int): Number of actual control steps to be performed in
one call of step().
visualization (bool): If true, the pyBullet GUI is run for
visualization.
no_collisions (bool): If true, turn of collisions between platform and object.
enable_cameras (bool): If true, enable cameras that capture RGB image
observations.
finger_type (str): Finger type ("trifingerpro", "trifingeredu")
camera_delay_steps (int): Number of time steps by which camera
observations are held back after they are generated. This is
used to simulate the delay of the camera observation that is
happening on the real system due to processing (mostly the
object detection).
time_step (float): Simulation timestep
run_rl_policy (bool): If true, don't add extra observation fields used for bc policy
"""
super().__init__()
if render_mode == "human":
visualization = True
self.visualization = visualization
self.enable_cameras = enable_cameras
self.finger_type = finger_type
self.run_rl_policy = run_rl_policy
self.time_step = time_step
self.randomize_starts = randomize_starts
self.sample_radius = sample_radius
self.randomize_all = randomize_all
self.max_goal_dist = max_goal_dist
self.camera_id = camera_id
if self.camera_id > 2:
raise ValueError("Not a valid camera_id, choose value [0,1,2].")
if self.randomize_all:
self.randomize_starts = True
self.max_episode_len = REACH_EPISODE_LENGTH
if self.randomize_all:
self.max_episode_len = 1000
# initialize simulation
self.q_nominal = np.array([-0.08, 1.15, -1.5] * 3)
self.random_q_init = random_q_init
self.initial_robot_position = self.q_nominal
self.platform = trifinger_simulation.TriFingerPlatform(
visualization=self.visualization,
enable_cameras=self.enable_cameras,
finger_type=self.finger_type,
time_step_s=self.time_step,
initial_robot_position=self.q_nominal,
camera_delay_steps=camera_delay_steps,
object_type=object_type,
enable_shadows=enable_shadows,
camera_view=camera_view,
arena_color=arena_color,
fix_cube_base=True,
)
self.hand_kinematics = HandKinematics(self.platform.simfinger)
# Make camera for RL training
if self.run_rl_policy:
target_positions = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
camera_up_vectors = [[0, 0, 1], [0, 0, 1], [0, 0, 1]]
field_of_view = 33
self.tricamera = camera.TriFingerCameras(
pybullet_client_id=self.platform.simfinger._pybullet_client_id,
target_positions=target_positions,
camera_up_vectors=camera_up_vectors,
field_of_view=field_of_view,
)
else:
self.tricamera = None
# Basic initialization
# ====================
self.visual_observation = visual_observation
self.action_type = action_type
self.dense_reward_weights = np.zeros(2)
self.dense_reward_weights[0] = 100000
if step_size < 1:
raise ValueError("step_size cannot be less than 1.")
self.step_size = step_size
# will be initialized in reset()
# self.platform = None
# Create the action and observation spaces
# ========================================
self.robot_torque_space = gym.spaces.Box(
low=trifingerpro_limits.robot_torque.low,
high=trifingerpro_limits.robot_torque.high,
)
robot_velocity_space = gym.spaces.Box(
low=trifingerpro_limits.robot_velocity.low,
high=trifingerpro_limits.robot_velocity.high,
)
goal_state_space = gym.spaces.Box(
low=np.ones(9) * -0.7,
high=np.ones(9) * 0.7,
)
self.observation_state_space = gym.spaces.Box(
low=np.ones(18) * -0.7,
high=np.ones(18) * 0.7,
)
self.action_space = gym.spaces.Box(
low=np.ones(3) * -2,
high=np.ones(3) * 2,
)
# used for initializing random start positions
self.action_bounds = {
"low": trifingerpro_limits.robot_position.low,
"high": trifingerpro_limits.robot_position.high,
}
# actions are dealt with as displacement of fingertips regardless of type
if self.action_type == ActionType.TORQUE:
self._initial_action = trifingerpro_limits.robot_torque.default
elif self.action_type == ActionType.POSITION:
self._initial_action = trifingerpro_limits.robot_position.default
elif self.action_type == ActionType.TORQUE_AND_POSITION:
self._initial_action = {
"torque": trifingerpro_limits.robot_torque.default,
"position": trifingerpro_limits.robot_position.default,
}
else:
raise ValueError("Invalid action_type")
self.ftip_dist_space = gym.spaces.Box(
low=np.ones(1) * -100,
high=np.ones(1) * 100,
)
self.total_success_space = gym.spaces.Box(
low=np.ones(1) * -100,
high=np.ones(1) * 100,
)
self.img_size = (270, 270, 3)
self.image_observation_space = gym.spaces.Box(
low=np.zeros(self.img_size), high=np.ones(self.img_size) * 255
)
self.observation_space = gym.spaces.Dict(
{
"observation": self.observation_state_space,
}
)
if self.visual_observation:
if self.camera_id == -1:
self.observation_space = gym.spaces.Dict(
{
"pixels": gym.spaces.Box(
low=np.zeros((256, 256, 9)),
high=np.ones((256, 256, 9)) * 255,
),
"ftip_dist": self.observation_state_space,
}
)
else:
self.observation_space = gym.spaces.Dict(
{
"pixels": self.image_observation_space,
"ftip_dist": self.observation_state_space,
"scaled_success": self.total_success_space,
}
)
self.start_pos = None
# self.goal_marker = trifinger_simulation.visual_objects.CubeMarker(
# width=task._CUBE_WIDTH,
# position=[0.05, 0.07, 0.0319],
# orientation=0,
# pybullet_client_id=self.platform.simfinger._pybullet_client_id,
# )
self.eval_goal_list = self.get_eval_goal_list()
self.eval_count = 0
# goes through hardcoded eval goal values in order rather than randomly choosing
self.sequential_eval = seq_eval
self.train_goal_list = self.get_train_goal_list()
# Finger ids to move
self.finger_to_move_list = [0] # TODO hardcoded
"""
mode=rgb_array returns numpy.ndarray with shape (x, y, 3) of current observation
"""
def get_train_goal_list(self):
return [
np.array([3.05603813, -7.55214019, 3.25]),
np.array([-3.15326713, 0.35681094, 3.25]),
np.array([-0.20568451, 7.48419172, 3.25]),
np.array([-1.80023987, -3.33667845, 3.25]),
np.array([0.63224735, -0.20621713, 3.25]),
np.array([2.49144056, -1.52591661, 3.25]),
np.array([-8.10157516, 3.60477928, 3.25]),
np.array([-4.75578621, -5.62289382, 3.25]),
np.array([0.60647659, -2.64716854, 3.25]),
np.array([-1.11332975, 5.00887828, 3.25]),
np.array([5.98420496, -4.31522391, 3.25]),
np.array([-4.18048378, 5.86477577, 3.25]),
np.array([2.63104316, -0.24772835, 3.25]),
np.array([-4.98861264, 5.96657986, 3.25]),
np.array([-2.10679078, -3.15221106, 3.25]),
np.array([-7.90809522, -4.2657171, 3.25]),
np.array([-1.3794515, 5.83348671, 3.25]),
np.array([4.48787389, -2.4191718, 3.25]),
np.array([-1.36567956, -5.11484226, 3.25]),
np.array([-2.9759321, 7.29904344, 3.25]),
np.array([-1.68308814, 0.35553572, 3.25]),
np.array([8.93032708, 0.30403264, 3.25]),
np.array([4.41736031, -6.83057901, 3.25]),
np.array([-3.28454635, 2.72672544, 3.25]),
np.array([4.51527941, 3.46186233, 3.25]),
np.array([0.02471094, 6.74989932, 3.25]),
np.array([-7.25012877, -4.12715448, 3.25]),
np.array([0.08717153, 6.12825175, 3.25]),
np.array([0.47511044, -4.20393201, 3.25]),
np.array([8.20551313, 0.42598918, 3.25]),
np.array([7.53531281, -3.53960009, 3.25]),
np.array([1.63535131, -4.59013092, 3.25]),
np.array([0.65539638, 6.58593092, 3.25]),
np.array([2.83107544, -2.68763681, 3.25]),
np.array([2.82826438, -8.44225464, 3.25]),
np.array([-1.55811306, -3.29802461, 3.25]),
np.array([8.48321033, 0.93042389, 3.25]),
np.array([-3.14584343, -4.08948458, 3.25]),
np.array([-2.80634012, -8.02044702, 3.25]),
np.array([3.14693547, 8.00778896, 3.25]),
np.array([-6.57006396, -4.22565421, 3.25]),
np.array([-2.99551142, -3.63649108, 3.25]),
np.array([-1.08590006, 6.13535156, 3.25]),
np.array([-6.13850402, -5.16321051, 3.25]),
np.array([2.82973147, 4.65223176, 3.25]),
np.array([2.87652314, -4.5091759, 3.25]),
np.array([2.89854216, -6.15023629, 3.25]),
np.array([-0.24121648, 5.12888577, 3.25]),
np.array([-5.52839414, 2.1008083, 3.25]),
np.array([6.99050079, 2.24616699, 3.25]),
np.array([-0.96494484, -3.1828791, 3.25]),
np.array([-3.10124255, 3.8221943, 3.25]),
np.array([-2.56092877, -3.03297289, 3.25]),
np.array([4.50346113, -7.31932264, 3.25]),
np.array([5.91994241, 4.94647579, 3.25]),
np.array([-0.48606156, -5.32731048, 3.25]),
np.array([-0.32667426, -8.66828972, 3.25]),
np.array([1.07453595, 7.36318464, 3.25]),
np.array([-3.25205737, 6.89068226, 3.25]),
np.array([3.26506201, 3.42383366, 3.25]),
np.array([2.07172391, 2.67414843, 3.25]),
np.array([0.48822116, -8.55367921, 3.25]),
np.array([4.83845338, -0.06968285, 3.25]),
np.array([2.81093887, 7.46827855, 3.25]),
np.array([0.16453263, 2.7395888, 3.25]),
np.array([0.72086808, 3.73863384, 3.25]),
np.array([-2.60081194, -4.16909876, 3.25]),
np.array([3.839713, -0.29123967, 3.25]),
np.array([-1.61879305, -4.78198183, 3.25]),
np.array([-7.55117813, 1.13727678, 3.25]),
np.array([3.66259269, 6.03049238, 3.25]),
np.array([-4.33543528, -4.87801221, 3.25]),
np.array([-1.29923973, -0.15892838, 3.25]),
np.array([3.68191348, -4.96217322, 3.25]),
np.array([-3.81746439, 6.50004219, 3.25]),
np.array([-3.421152, -5.53083725, 3.25]),
np.array([5.49898056, -2.90976879, 3.25]),
np.array([-0.38942852, -6.84294041, 3.25]),
np.array([3.27499388, 3.09205193, 3.25]),
np.array([1.468062, 8.53217955, 3.25]),
np.array([-4.66475019, -3.24606976, 3.25]),
np.array([-4.65764194, 3.18195181, 3.25]),
np.array([-1.57019021, -6.97081706, 3.25]),
np.array([7.57547351, 0.02846027, 3.25]),
np.array([-4.86324653, -1.69117867, 3.25]),
np.array([0.96394429, 0.18087209, 3.25]),
np.array([-3.34152739, -5.18181183, 3.25]),
np.array([-4.18771876, 3.58084266, 3.25]),
np.array([5.86468526, -5.3484374, 3.25]),
np.array([1.59870173, 8.36118042, 3.25]),
np.array([5.89203303, 2.6759065, 3.25]),
np.array([-0.79057999, 6.58881004, 3.25]),
np.array([-4.04837897, 2.31781327, 3.25]),
np.array([3.66880724, -6.76704128, 3.25]),
np.array([-6.97825733, 3.36637523, 3.25]),
np.array([5.63888276, 4.1776771, 3.25]),
np.array([-2.15349959, 5.91943316, 3.25]),
np.array([-4.85276579, 4.91514082, 3.25]),
np.array([-7.31107254, -3.19688512, 3.25]),
np.array([-7.56355014, -2.69394404, 3.25]),
]
def get_eval_goal_list(self):
return [
np.array([0.00927656, 3.03888736, 3.25]),
np.array([1.0535054, 0.54244131, 3.25]),
np.array([2.97988333, 2.19828506, 3.25]),
np.array([-0.08625725, -2.66008382, 3.25]),
np.array([-5.53817563, 1.30016464, 3.25]),
np.array([-7.34284403, -3.30897914, 3.25]),
np.array([5.34721599, -7.04574016, 3.25]),
np.array([1.5701743, 2.77699441, 3.25]),
np.array([5.51455727, 6.71779349, 3.25]),
np.array([-0.62604526, 1.95728886, 3.25]),
np.array([2.18948636, -7.21505172, 3.25]),
np.array([0.99774909, -8.47347619, 3.25]),
np.array([8.5452943, 0.08286776, 3.25]),
np.array([-7.71756237, 3.42348443, 3.25]),
np.array([3.66341366, 1.91997392, 3.25]),
np.array([4.89323018, 6.2648753, 3.25]),
np.array([4.04716893, 3.53093616, 3.25]),
np.array([8.5513687, 0.39826775, 3.25]),
np.array([-3.07441005, -3.34725609, 3.25]),
np.array([-3.42368536, -4.14163919, 3.25]),
np.array([2.61979674, 5.75253347, 3.25]),
np.array([0.54666075, -1.66785584, 3.25]),
np.array([4.90558802, 2.54940494, 3.25]),
np.array([5.24091262, 6.37654168, 3.25]),
np.array([3.30044642, 6.45136387, 3.25]),
]
def render(self, mode="human"):
# TODO implement "human" and "ansi" modes
if mode == "rgb_array":
# 0:camera 60, 1:camera180, 2:camera300
# camera_observation = self.platform.get_camera_observation(self.step_count)
if self.camera_id == -1: # return ALL images
cam_imgs = self.tricamera.get_images()
camera_observation = np.concatenate(
(cam_imgs[0], cam_imgs[1], cam_imgs[2]), axis=2
)
else:
camera_observation = self.tricamera.get_images()[self.camera_id]
camera_observation = torch.tensor(camera_observation)
return camera_observation
elif mode == "eval":
camera_observation = self.tricamera.get_images()[0]
# self.hide_marker_from_camera()
return camera_observation
else:
raise NotImplementedError
def compute_ftip_dist(self, achieved_goal, desired_goal) -> dict:
d = {}
for i in range(3):
k = f"f%s_dist" % i
x = torch.tensor(
desired_goal[(3 * i) : (3 * i) + 3]
- achieved_goal[(3 * i) : (3 * i) + 3]
)
d[k] = torch.sum(torch.sqrt(x**2))
return d
def scaled_success(self, cur_ft_pos):
"""
args:
ftpos (np.array): current fingertip positions [9,]
"""
scaled_err = d_utils.get_reach_scaled_err(
self.finger_to_move_list,
self.start_pos,
cur_ft_pos,
self.goal.clone().detach().cpu().numpy(),
task._CUBE_WIDTH / 2,
)
success = 1 - scaled_err
if success < 0:
success = 0
return success
def compute_reward(
self,
achieved_goal,
desired_goal,
info: dict,
) -> float:
"""Compute the reward for the given achieved and desired goal.
Args:
achieved_goal: Current position of the object.
desired_goal: Goal pose of the object.
info: An info dictionary containing a field "time_index" which
contains the time index of the achieved_goal.
Returns:
The reward that corresponds to the provided achieved goal w.r.t. to
the desired goal. Note that the following should always hold true::
ob, reward, done, info = env.step()
assert reward == env.compute_reward(
ob['achieved_goal'],
ob['desired_goal'],
info,
)
"""
current_dist_to_goal = (achieved_goal[0:3] - self.goal).norm()
reward = -750 * (current_dist_to_goal)
return reward
def step(self, action):
"""Run one timestep of the environment's dynamics.
When end of episode is reached, you are responsible for calling
``reset()`` to reset this environment's state.
Args:
action: An action provided by the agent (depends on the selected
:class:`ActionType`).
Returns:
tuple:
- observation (dict): agent's observation of the current
environment.
- reward (float): amount of reward returned after previous action.
- done (bool): whether the episode has ended, in which case further
step() calls will return undefined results.
- info (dict): info dictionary containing the current time index.
"""
if self.run_rl_policy:
action = torch.tensor(action) / 50.0
action = torch.clip(action, -0.02, 0.02)
if not self.action_space.contains(np.array(action, dtype=np.float32)):
print(action)
raise ValueError("Given action is not contained in the action space.")
# TODO add option for more than one finger?
# TODO check if tensor
if self.run_rl_policy:
three_finger_action = torch.zeros(9, dtype=torch.float32)
three_finger_action[0:3] = action.clone().detach()
else:
three_finger_action = torch.zeros(9)
three_finger_action[0:3] = torch.FloatTensor(action).detach()
num_steps = self.step_size
# ensure episode length is not exceeded due to step_size
step_count_after = self.step_count + num_steps
if step_count_after > self.max_episode_len:
excess = step_count_after - self.max_episode_len
num_steps = max(1, num_steps - excess)
reward = 0.0
for i in range(num_steps):
# Get current robot state
robot_obs = self.platform.get_robot_observation(self.step_count)
joint_position = robot_obs.position
joint_velocity = robot_obs.velocity
self.step_count += 1
if self.step_count > self.max_episode_len:
raise RuntimeError("Exceeded number of steps for one episode.")
# Update desired position and velocity
x_des_i = self.x_des_plan + (i + 1) * (three_finger_action / num_steps)
dx_des_i = three_finger_action / (self.step_size * self.time_step)
# Compute torque with impedance controller
torque = self.hand_kinematics.get_torque(
x_des_i,
dx_des_i,
joint_position,
joint_velocity,
)
torque = np.clip(
torque, self.robot_torque_space.low, self.robot_torque_space.high
)
# Send action to robot
robot_action = self._gym_action_to_robot_action(torque)
t = self.platform.append_desired_action(robot_action)
# Use observations of step t + 1 to follow what would be expected
# in a typical gym environment. Note that on the real robot, this
# will not be possible
self.info["time_index"] = t + 1
# Alternatively use the observation of step t. This is the
# observation from the moment before action_t is applied, i.e. the
# result of that action is not yet visible in this observation.
#
# When using this observation, the resulting cumulative reward
# should match exactly the one computed during replay (with the
# above it will differ slightly).
# self.info["time_index"] = t
observation = self._create_observation(self.info["time_index"])
# Update plan with action
self.x_des_plan += three_finger_action
# Compute reward
reward = 0
if self.run_rl_policy:
reward += self.compute_reward(
observation["ftip_dist"][:9],
self.goal,
self.info,
)
is_done = self.step_count >= self.max_episode_len
return observation, reward, is_done, self.info
def rand_step(self, tensordict):
action = (np.random.rand(3) * 2) - np.ones(3)
print("rand_step")
print(action)
return self.step(action)
def state_dict(self):
return {}
def fake_tensordict(self):
# TODO is this still used?
observation = self._create_observation(self.info["time_index"])
if self.visual_observation:
observation = {
"pixels": self.render("rgb_array"),
"ftip_dist": observation,
"scaled_success": self.scaled_success(
observation,
),
}
return observation
def _is_above_table(self, coord):
return (
True
if (coord[0][-1] > 0 and coord[1][-1] > 0 and coord[2][-1] > 0)
else False
)
def choose_start_pos(self):
while True:
initial_robot_position = (
trifingerpro_limits.robot_position.default
+ ((np.random.rand(9) * 2) - 1) * self.sample_radius
)
eepos = self.platform.simfinger.kinematics.forward_kinematics(
initial_robot_position
)
if self._is_above_table(eepos):
return initial_robot_position
def choose_goal(self):
return self.goal
def choose_goal_from_demos(self, eval=False):
if eval:
self.eval_count += 1
if self.eval_count == len(self.eval_goal_list):
self.eval_count = 0
goal_pos_list = self.eval_goal_list
else:
goal_pos_list = self.train_goal_list
if self.sequential_eval and eval:
idx = self.eval_count
else:
idx = np.random.randint(0, len(goal_pos_list))
return torch.FloatTensor(goal_pos_list[idx] / 100.0)
def sample_init_robot_position(self):
q0_range = [-0.15, 0.15]
q1_range = [0.8, 1.15]
q2_range = [-1.35, -1.65]
i = 0
q_new = np.array([q0_range[i], q1_range[i], q2_range[i]] * 3)
q_new = np.zeros(9)
for i in range(3):
q0 = np.random.uniform(q0_range[0], q0_range[1])
q1 = np.random.uniform(q1_range[0], q1_range[1])
q2 = np.random.uniform(q2_range[0], q2_range[1])
q_new[3 * i] = q0
q_new[3 * i + 1] = q1
q_new[3 * i + 2] = q2
return q_new
def reset(self, init_pose_dict=None, init_robot_position=None, eval_mode=False):
"""Reset the environment."""
# initialize cube at the centre
if init_pose_dict is None:
initial_object_pose = task.sample_goal(difficulty=-1)
else:
initial_object_pose = task.Pose.from_dict(init_pose_dict)
if self.run_rl_policy:
# train/test split use same pos. as those used in demonstrations
initial_object_pose.position = self.choose_goal_from_demos(eval_mode)
if init_robot_position is None:
if self.random_q_init:
init_robot_position = self.sample_init_robot_position()
else:
init_robot_position = self.initial_robot_position
self.platform.reset(
initial_object_pose=initial_object_pose,
initial_robot_position=init_robot_position,
)
# Set pybullet GUI params
self._set_sim_params()
self.start_pos = self.hand_kinematics.get_ft_pos(init_robot_position)
self.goal = torch.tensor(initial_object_pose.position) # Cube is fixed
self.info = {"time_index": -1, "goal": self.goal}
self.step_count = 0
new_obs = self._create_observation(0)
# Reset state for policy execution
self.x_des_plan = torch.FloatTensor(self.start_pos.copy())
return new_obs
def seed(self, seed=None):
"""Sets the seed for this env’s random number generator.
.. note::
Spaces need to be seeded separately. E.g. if you want to sample
actions directly from the action space using
``env.action_space.sample()`` you can set a seed there using
``env.action_space.seed()``.
Returns:
List of seeds used by this environment. This environment only uses
a single seed, so the list contains only one element.
"""
self.np_random, seed = gym.utils.seeding.np_random(seed)
task.seed(seed)
self.action_space.seed(seed)
self.observation_space.seed(seed)
return [seed]
def _create_observation(self, t):
robot_observation = self.platform.get_robot_observation(t)
ftip_pos = self.hand_kinematics.get_ft_pos(robot_observation.position)
scaled_success = self.scaled_success(ftip_pos)
if self.run_rl_policy:
goal_pos = torch.clone(torch.FloatTensor(ftip_pos))
goal_pos[0:3] = self.goal
observation_vec = torch.cat(
(torch.FloatTensor(ftip_pos), torch.FloatTensor(goal_pos))
)
if self.visual_observation:
observation = {
"pixels": self.render("rgb_array"),
"ftip_dist": observation_vec,
"scaled_success": scaled_success,
}
else:
# robot_observation = self.platform.simfinger.get_observation(t)
# camera_observation = self.platform.get_camera_observation(t)
goal_pos = torch.clone(torch.FloatTensor(ftip_pos))
goal_pos[0:3] = self.goal
observation = torch.cat(
(torch.FloatTensor(ftip_pos), torch.FloatTensor(goal_pos))
)
else:
camera_observation = self.platform.get_camera_observation(t)
object_observation = camera_observation.filtered_object_pose
# Get cube vertices
obj_pose = {
"position": object_observation.position,
"orientation": object_observation.orientation,
}
observation = {
"t": t,
}
# Compute distances of each finger to object
ftpos_dist_to_obj = d_utils.get_per_finger_ftpos_err(
np.expand_dims(ftip_pos, 0),
np.tile(object_observation.position, (1, 3)),
)
# Add new observation fields
v_wf_dict = c_utils.get_vertices_wf(obj_pose)
observation["robot_position"] = robot_observation.position
observation["object_position"] = object_observation.position
observation["object_orientation"] = object_observation.orientation
observation["object_vertices"] = v_wf_dict
observation["desired_goal"] = self.goal.clone().detach().cpu().numpy()
observation["scaled_success"] = scaled_success
observation["achieved_goal_position_error"] = ftpos_dist_to_obj
observation["ft_pos_cur"] = ftip_pos
# Save camera observation images
if self.visual_observation:
camera_observation_dict = {
"camera60": {
"image": camera_observation.cameras[0].image,
"timestamp": camera_observation.cameras[0].timestamp,
},
"camera180": {
"image": camera_observation.cameras[1].image,
"timestamp": camera_observation.cameras[1].timestamp,
},
"camera300": {
"image": camera_observation.cameras[2].image,
"timestamp": camera_observation.cameras[2].timestamp,
},
}
observation["camera_observation"] = camera_observation_dict
observation["policy"] = {
"controller": self.hand_kinematics.get_observation()
}
return observation
def _gym_action_to_robot_action(self, gym_action):
# construct robot action depending on action type
if self.action_type == ActionType.TORQUE:
robot_action = self.platform.Action(torque=gym_action)
elif self.action_type == ActionType.POSITION:
robot_action = self.platform.Action(position=gym_action)
elif self.action_type == ActionType.TORQUE_AND_POSITION:
robot_action = self.platform.Action(
torque=gym_action["torque"], position=gym_action["position"]
)
else:
raise ValueError("Invalid action_type")
return robot_action
def _set_sim_params(self):
"""Set pybullet GUI params"""
pybullet.configureDebugVisualizer(
pybullet.COV_ENABLE_GUI, 0
) # Turn off debug camera visuals
pybullet.configureDebugVisualizer(
pybullet.COV_ENABLE_SHADOWS, 0
) # Turn off debug camera visuals
# kinematics wrapper
class HandKinematics:
def __init__(self, simfinger):
self.Nf = 3 # Number of fingers
self.Nq = self.Nf * 3 # Number of joints in hand
# class with kinematics functions
self.kinematics = CustomPinocchioUtils(
simfinger.finger_urdf_path,
simfinger.tip_link_names,
simfinger.link_names,
)
self.controller = ImpedanceController(self.kinematics)
def get_ft_pos(self, q):
"""Get fingertip positions given current joint configuration q"""
ft_pos = np.array(self.kinematics.forward_kinematics(q)).reshape(self.Nq)
return ft_pos
def get_torque(self, x_des, dx_des, q_cur, dq_cur):
return self.controller.get_command_torque(x_des, dx_des, q_cur, dq_cur)
def get_observation(self):
return self.controller.get_observation()
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/trifinger_envs/cube_reach.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import os
import omegaconf
import hydra
import vc_models
vc_models_abs_path = os.path.dirname(os.path.abspath(vc_models.__file__))
MODEL_NAMES = vc_models.vc_model_zoo
# # assumes directory contains nested directories or .yaml model files
# def find_models(root_path):
# models = {}
# for f in os.listdir(root_path):
# if os.path.isdir(os.path.join(root_path, f)):
# temp_d = find_models(os.path.join(root_path, f))
# temp_d.update(models)
# models = temp_d
# elif f.endswith(".yaml"):
# models[f.split(".")[0]] = os.path.join(root_path, f)
# return models
# VC_MODEL_NAMES = find_models(
# os.path.join(vc_models.vc_models_dir_path, "conf/model")
# )
# def get_model_and_transform(model_name, device="cpu"):
# ## Pretrained VC models
# if model_name not in MODEL_NAMES:
# raise NameError("Invalid model_name")
# return get_vc_model_and_transform(
# model_name, device=device
# )
def get_vc_model_and_transform(model_name, device="cpu", use_compression_layer=False):
if model_name not in MODEL_NAMES:
raise NameError("Invalid vc model name")
# Assumes models are in top level of vc/conf/model
cfg_path = os.path.join(vc_models_abs_path, "conf", "model", f"{model_name}.yaml")
main_model_cfg = omegaconf.OmegaConf.load(cfg_path)
if use_compression_layer:
if "model" in main_model_cfg.model:
model_cfg = main_model_cfg.model.model
else:
model_cfg = main_model_cfg.model
model_cfg.global_pool = not use_compression_layer
model_cfg.use_cls = not use_compression_layer
model, embedding_dim, transform, metadata = hydra.utils.call(main_model_cfg)
return model, transform, embedding_dim
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/utils/model_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from hydra.core.hydra_config import HydraConfig
import wandb
import torch
import numpy as np
from omegaconf import OmegaConf
import wandb
from trifinger_vc.utils import data_utils
from trifinger_vc.utils.model_utils import MODEL_NAMES
from vc_models.models.vit import model_utils
DEMO_TRAJ_IDS = [1, 2, 3, 110, 210, 310, 111, 211, 311]
_TRIFINGER_DEMOS_URL = "https://dl.fbaipublicfiles.com/eai-vc/trifinger_demos.zip"
def get_exp_dir(params_dict):
"""
Get experiment directory to save logs in, and experiment name
args:
params_dict: hydra config dict
return:
exp_dir: Path of experiment directory
exp_str: Name of experiment run - to name wandb run
exp_id: Experiment id - for conf.exp_id to label wandb run
"""
if params_dict["exp_dir_to_resume"] is None:
hydra_output_dir = HydraConfig.get().runtime.output_dir
else:
hydra_output_dir = params_dict["exp_dir_to_resume"]
if "experiment" in HydraConfig.get().runtime.choices:
exp_dir_path = HydraConfig.get().sweep.dir
exp_id = os.path.basename(os.path.normpath(exp_dir_path))
run_id = params_dict["run_id"]
exp_str = f"{exp_id}_r-{run_id}"
else:
hydra_run_dir = HydraConfig.get().run.dir
run_date_time = "_".join(hydra_run_dir.split("/")[-2:])
exp_id = f"single_run_{run_date_time}"
exp_str = exp_id
return hydra_output_dir, exp_str, exp_id
def configure_wandb(exp_name, exp_dir,conf):
# wandb init
wandb_info_pth = os.path.join(exp_dir, "wandb_info.pth")
if os.path.exists(wandb_info_pth):
# Resume logging to existing wandb run
wandb_info = torch.load(wandb_info_pth)
conf_for_wandb = wandb_info["conf_for_wandb"]
wandb_id = wandb_info["id"]
else:
# Convert conf to container, so I can add "exp_id" and conf of forward model
conf_for_wandb = OmegaConf.to_container(
conf, resolve=True, throw_on_missing=True
)
conf_for_wandb["exp_id"] = exp_name # Add experiment id to conf, for wandb
# If using a forward model, add forward_model_ckpt["conf"].algo args to wandb conf
if (
"mpc_forward_model_ckpt" in conf_for_wandb["algo"]
and conf_for_wandb["algo"]["mpc_forward_model_ckpt"]
):
ckpt_info = torch.load(conf_for_wandb["algo"]["mpc_forward_model_ckpt"])
conf_for_wandb["dyn_model"] = ckpt_info["conf"].copy()
# Then, convert conf_for_wandb container --> omegaconf --> container
# Needed to do this to get dyn_model params to log as dyn_model.param in wandb
conf_for_wandb = OmegaConf.to_container(
OmegaConf.create(conf_for_wandb), resolve=True, throw_on_missing=True
)
wandb_id = wandb.util.generate_id()
wandb_info = {
"run_name": exp_name,
"id": wandb_id,
"conf_for_wandb": conf_for_wandb,
}
torch.save(wandb_info, wandb_info_pth)
wandb.init(
project=conf.run_name,
entity=conf.wb_entity,
id=wandb_info["id"],
name=wandb_info["run_name"],
config=wandb_info["conf_for_wandb"],
settings=wandb.Settings(start_method="thread"),
resume="allow",
)
return wandb_info
def plot_loss(loss_dict, outer_i=None):
"""Log loss to wandb"""
log_dict = {f"{k}": v for k, v in loss_dict.items()}
if outer_i:
log_dict["outer_i"] = outer_i
wandb.log(log_dict)
def find_most_recent_ckpt(ckpts_dir):
start_epoch = 0
files_in_ckpts_dir = os.listdir(ckpts_dir)
if len(files_in_ckpts_dir) == 0:
ckpt_pth = None
else:
for item in files_in_ckpts_dir:
if item.endswith("ckpt.pth"):
start_epoch = max(start_epoch, int(item.split("_")[1]))
ckpt_pth = os.path.join(ckpts_dir, "epoch_%d_ckpt.pth" % start_epoch)
return ckpt_pth, start_epoch
def get_obs_dict_from_traj(traj, t, obj_state_type):
"""Get observation dict for forward models"""
obs_dict = {
"ft_state": torch.unsqueeze(torch.FloatTensor(traj["ft_pos_cur"][t]), 0),
}
if "mode" in traj:
obs_dict["mode"] = traj["mode"][t]
if obj_state_type == "na":
return obs_dict # No object state
# Add object state to obs_dict
if obj_state_type == "pos":
o_state = traj["o_pos_cur"][t]
elif obj_state_type == "vertices":
o_state = traj["vertices"][t]
elif obj_state_type in MODEL_NAMES:
o_state = traj[obj_state_type][t]
else:
raise ValueError
obs_dict["o_state"] = torch.unsqueeze(torch.FloatTensor(o_state), 0)
return obs_dict
def parse_traj_dict(traj_dict, cost_state, obj_state_type):
"""
Parse out relevant part of traj_dict to compare with pred_traj from mpc
args:
state (str): "ftpos" | "obj" | "ftpos_obj"
return
traj (nparray [T, state_dim]): where each row is a state vector of the format [ftpos, o_state]
"""
ftpos_traj = torch.Tensor(traj_dict["ft_pos_cur"])
latent_rep_traj = torch.Tensor(traj_dict[obj_state_type])
if cost_state == "obj":
# TODO assuming that "obj" state means only using latent rep
# no support for "pos" or "vertices" obj_state_type
traj = latent_rep_traj
elif cost_state == "ftpos":
traj = ftpos_traj
elif cost_state == "ftpos_obj":
traj = torch.cat([ftpos_traj, latent_rep_traj], dim=1)
else:
raise ValueError("Invalid cost_state")
return traj
def parse_pred_traj(pred_traj, state, fnum=3, mpc_use_ftpos=True):
"""
Parse out relevant part of pred_traj from mpc for cost function, based on state for cst
args:
pred_traj (nparray [T, state_dim]): where each row is a state vector of the format [ftpos, o_state]
state (str): cost state type: "ftpos" | "obj" | "ftpos_obj"
"""
ftpos_dim = 3 * fnum
if mpc_use_ftpos:
if state == "ftpos":
return pred_traj[:, :ftpos_dim] # First part of pred_traj
elif state == "obj":
assert (
pred_traj.shape[1] > ftpos_dim
), "State does not include object state. Try using mpc_type = two_phase"
return pred_traj[:, ftpos_dim:] # Last part of pred_traj
elif state == "ftpos_obj":
assert (
pred_traj.shape[1] > ftpos_dim
), "State does not include object state. Try using mpc_type = two_phase"
return pred_traj
else:
raise ValueError(f"{state} is invalid state")
else:
if state == "obj":
return pred_traj
else:
raise ValueError(
f"{state} is invalid state, pred_traj does not contain ftpos"
)
def download_demos_if_needed(data_path):
if os.path.isdir(data_path):
return
os.makedirs(data_path)
model_utils._download_url(_TRIFINGER_DEMOS_URL, data_path + ".zip")
import zipfile
with zipfile.ZipFile(data_path + ".zip","r") as zip_ref:
zip_ref.extractall(data_path)
def get_traj_list(demo_root_dir, demo_stats_list, obj_state_type):
"""Given list of demo stats demo_stats_list, load demo dicts and save in traj_list"""
try:
download_demos_if_needed(os.path.join(demo_root_dir,"data"))
except Exception as e:
print(f"Unable to download demonstrations from S3:\n {e}")
traj_list = []
for demo_stat in demo_stats_list:
demo_dir = demo_stat["path"]
downsample_data_path = os.path.join(demo_root_dir,"data","trifinger-demos", demo_dir, "downsample.pth")
if not os.path.exists(downsample_data_path):
raise ValueError(f"{downsample_data_path} not found")
demo_dict = torch.load(downsample_data_path)
if obj_state_type in MODEL_NAMES:
# Load latent state from obj_state_type.pth file
latent_data_path = os.path.join(demo_dir, f"{obj_state_type}.pth")
if not os.path.exists(latent_data_path):
raise ValueError(f"{latent_data_path} not found")
latent_data = torch.load(latent_data_path)["data"]
demo_dict[obj_state_type] = latent_data
traj_list.append(demo_dict)
return traj_list
def get_traj_range(traj_list, key, traj_stats=None):
max_val = -np.inf
min_val = np.inf
if key == "position_error":
for i, traj in enumerate(traj_list):
if traj_stats and traj_stats[i]["diff"] not in DEMO_TRAJ_IDS:
# For getting ranges, skip non-demo trajectories
continue
traj_for_key = traj[key]
if traj_for_key[-1] > 1:
print(traj_stats[i]["id"])
print(traj_for_key[-1])
print(traj["o_pos_des"][-1])
print(traj["o_pos_cur"][-1])
max_val = max(max_val, traj_for_key[-1])
min_val = min(min_val, traj_for_key[-1])
else:
for i, traj in enumerate(traj_list):
if traj_stats and traj_stats[i]["diff"] not in DEMO_TRAJ_IDS:
# For getting ranges, skip non-demo trajectories
continue
traj_for_key = traj[key]
max_val = max(max_val, np.max(traj_for_key))
min_val = min(min_val, np.min(traj_for_key))
return min_val, max_val
def get_traj_range_per_dim(traj_list, key, traj_stats=None):
"""Get per-dimension ranges"""
# Initialize max and min arrays
min_val = np.ones(traj_list[0][key].shape[1]) * np.inf
max_val = np.ones(traj_list[0][key].shape[1]) * -np.inf
for i, traj in enumerate(traj_list):
if traj_stats and traj_stats[i]["diff"] not in DEMO_TRAJ_IDS:
# For getting ranges, skip non-demo trajectories
continue
traj_for_key = traj[key]
for t in range(traj_for_key.shape[0]):
traj_t = traj_for_key[t, :]
max_val = np.where(traj_t > max_val, traj_t, max_val)
min_val = np.where(traj_t < min_val, traj_t, min_val)
return min_val, max_val
def get_bc_obs_vec_from_obs_dict(
obs_dict_in,
state_type,
goal_type,
):
"""
Return obs vector for bc policy
args:
obs_dict (dict):
obs_type (str): [
"goal_none", # no goal
"goal_cond", # goal state appended to observation
]
"""
# If obs_dict fields aren't batched (only have 1 dim), add extra dim
# so the shape is [1, D]
obs_dict = {}
for k, v in obs_dict_in.items():
if v is not None and v.dim() == 1:
obs_dict[k] = torch.unsqueeze(v, 0)
else:
obs_dict[k] = v
if state_type == "ftpos":
state = obs_dict["ft_state"]
elif state_type == "obj":
state = obs_dict["o_state"]
elif state_type == "ftpos_obj":
state = torch.cat([obs_dict["ft_state"], obs_dict["o_state"]], dim=1)
else:
raise NameError("Invalid state_type")
if goal_type == "goal_none":
obs_vec = state
elif goal_type == "goal_cond":
obs_vec = torch.cat([state, obs_dict["o_goal"]], dim=1)
elif goal_type == "goal_o_pos":
# Use object position goal state - relative to init position of object
obs_vec = torch.cat([state, obs_dict["o_goal_pos_rel"]], dim=1)
else:
raise NameError("Invalid goal_type")
return obs_vec
def scale_to_range(x, in_range_min, in_range_max, out_range_min, out_range_max):
# Scale x from in_range to out_range
# Scale to be symmetric around 0 and then shift by offset
# From https://codereview.stackexchange.com/questions/185785/scale-numpy-array-to-certain-range
y = (x - (in_range_max + in_range_min) / 2) / (in_range_max - in_range_min)
return y * (out_range_max - out_range_min) + (out_range_max + out_range_min) / 2
def save_demo_to_file(save_dir, epoch, observation_list,expert_demo_dict,pred_actions):
save_path = os.path.join(save_dir, f"obs_epoch_{epoch+1}.npz")
np.savez_compressed(save_path, data=observation_list)
print(f"Saved sim rollout to {save_path}")
# Plot actions
expert_actions = expert_demo_dict["delta_ftpos"]
title = "Fingertip position deltas (epoch: {})".format(epoch)
save_name = f"action_epoch_{epoch+1}.png"
save_path = os.path.join(save_dir, save_name)
data_utils.plot_traj(
title,
save_path,
[
"x1",
"y1",
"z1",
"x2",
"y2",
"z2",
"x3",
"y3",
"z3",
],
{
"pred": {
"y": np.array(pred_actions),
"x": expert_demo_dict["t"][:-1],
"marker": "x",
},
"demo": {
"y": expert_actions[:-1],
"x": expert_demo_dict["t"][:-1],
"marker": ".",
},
},
)
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/utils/train_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import numpy as np
from PIL import Image
import trifinger_vc.utils.data_utils as d_utils
import trifinger_vc.utils.model_utils as m_utils
class EncoderModel(torch.nn.Module):
def __init__(
self,
pretrained_rep="r3m",
freeze_pretrained_rep=False,
rep_to_policy="linear_layer",
):
super().__init__()
(
self.pretrained_rep_model,
self.transform,
pretrained_rep_dim,
) = m_utils.get_vc_model_and_transform(
pretrained_rep, use_compression_layer=False
)
self.pretrained_rep = pretrained_rep
self.pretrained_rep_dim = pretrained_rep_dim
self.rep_to_policy = rep_to_policy
if freeze_pretrained_rep:
for (
name,
param,
) in self.pretrained_rep_model.named_parameters():
param.requires_grad = False
# this only works for ViTs
output_rep_dim = 784
if self.rep_to_policy == "linear_layer":
assert (
self.pretrained_rep_model.classifier_feature == "global_pool"
or self.pretrained_rep_model.classifier_feature == "use_cls_token"
)
self.compression = nn.Sequential(
nn.Linear(self.pretrained_rep_dim, output_rep_dim), nn.ReLU(True)
)
elif self.rep_to_policy == "none":
self.compression = nn.Identity()
output_rep_dim = pretrained_rep_dim
# else:
elif self.rep_to_policy == "1D_avgpool":
assert self.pretrained_rep_model.classifier_feature == "reshape_embedding"
self.compression = nn.AdaptiveAvgPool1d(output_rep_dim)
self.pretrained_rep_dim = output_rep_dim
def encode_img(self, img):
"""
Encode img by first passing it through transform, then through model
** Only works for single, unbatched image **
"""
img_preproc = self.transform(Image.fromarray(img.astype(np.uint8))).unsqueeze(0)
device = next(self.parameters()).device
return self.forward(img_preproc.to(device))[0].detach()
def forward(self, input_tensor):
x = self.pretrained_rep_model(input_tensor)
if self.rep_to_policy == "1D_avgpool":
N = x.shape[0]
x = torch.einsum("ndhw->nhwd", x)
x = x.reshape(N, -1)
x = self.compression(x)
return x
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/utils/encoder_model.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import torch
import copy
def construct_policy(task_state_type,train_ft_state_shape, pretrained_dim,task_goal_type,out_dim,max_a,device):
o_state_dim = pretrained_dim
if task_state_type == "obj":
state_dim = o_state_dim
elif task_state_type == "ftpos_obj":
state_dim = o_state_dim + train_ft_state_shape
elif task_state_type == "ftpos":
state_dim = train_ft_state_shape
else:
raise NameError("Invalid state_type")
if task_goal_type == "goal_none":
in_dim = state_dim
elif task_goal_type == "goal_cond":
in_dim = state_dim + o_state_dim
elif task_goal_type == "goal_o_pos":
in_dim = state_dim + 3
else:
raise NameError("Invalid goal_type")
policy = DeterministicPolicy(
in_dim=in_dim,
out_dim=out_dim,
max_a=max_a,
device=device,
)
return policy
class DeterministicPolicy(torch.nn.Module):
def __init__(
self,
in_dim,
out_dim,
max_a=None,
device="cpu",
):
super().__init__()
num_neurons = 2000
self.activation = torch.nn.ReLU
self.policy = torch.nn.Sequential(
torch.nn.Linear(in_dim, num_neurons),
self.activation(),
torch.nn.Linear(num_neurons, num_neurons),
self.activation(),
torch.nn.Linear(num_neurons, out_dim),
)
self.policy.to(device)
self.device = device
self.init_state = copy.deepcopy(self.policy.state_dict())
self.max_a = max_a
self.in_dim = in_dim
self.out_dim = out_dim
def forward(self, state):
action = self.policy(state)
return action
def reset(self):
self.policy.load_state_dict(self.init_state)
def clip_action(self, a):
if self.max_a is not None:
a = torch.where(
a > self.max_a, torch.tensor([self.max_a]).to(self.device), a
)
a = torch.where(
a < -self.max_a, -torch.tensor([self.max_a]).to(self.device), a
)
return a
def scale_to_range(self, a):
"""Does not do anything; just returns a"""
return a
class ScaledDeterministicPolicy(torch.nn.Module):
def __init__(
self,
in_dim,
out_dim,
max_a=None,
min_a_per_dim=None,
max_a_per_dim=None,
device="cpu",
):
super().__init__()
num_neurons = 2000
self.activation = torch.nn.Tanh
# self.activation = torch.nn.ReLU
self.policy = torch.nn.Sequential(
torch.nn.Linear(in_dim, num_neurons),
self.activation(),
torch.nn.Linear(num_neurons, num_neurons),
self.activation(),
torch.nn.Linear(num_neurons, out_dim),
torch.nn.Tanh(),
)
self.policy.to(device)
self.device = device
self.init_state = copy.deepcopy(self.policy.state_dict())
self.max_a = max_a
if min_a_per_dim is not None and max_a_per_dim is not None:
assert max_a_per_dim >= min_a_per_dim
self.min_a_per_dim = torch.unsqueeze(torch.Tensor(min_a_per_dim), 0).to(
self.device
)
self.max_a_per_dim = torch.unsqueeze(torch.Tensor(max_a_per_dim), 0).to(
self.device
)
else:
self.min_a_per_dim = None
self.max_a_per_dim = None
self.in_dim = in_dim
self.out_dim = out_dim
def forward(self, state):
action = self.policy(state)
return action
def reset(self):
self.policy.load_state_dict(self.init_state)
def clip_action(self, a):
if self.max_a is not None:
a = torch.where(
a > self.max_a, torch.tensor([self.max_a]).to(self.device), a
)
a = torch.where(
a < -self.max_a, -torch.tensor([self.max_a]).to(self.device), a
)
if self.min_a_per_dim is not None and self.max_a_per_dim is not None:
a = torch.where(a > self.max_a_per_dim, self.max_a_per_dim, a)
a = torch.where(a < self.min_a_per_dim, self.min_a_per_dim, a)
return a
def scale_to_range(self, a):
in_range_min = torch.ones(self.min_a_per_dim.shape).to(self.device) * -1
in_range_max = torch.ones(self.min_a_per_dim.shape).to(self.device)
a_scaled = t_utils.scale_to_range(
a, in_range_min, in_range_max, self.min_a_per_dim, self.max_a_per_dim
)
return a_scaled
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/utils/policy.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from trifinger_vc.utils.model_utils import MODEL_NAMES, get_vc_model_and_transform
import vc_models.transforms as vc_t
class BCFinetuneDataset(torch.utils.data.Dataset):
def __init__(
self,
demo_list,
state_type="ftpos_obj",
obj_state_type="pos",
device="cpu",
augment_prob=0.0,
times_to_use_demo=1,
jitter_brightness=0.5,
jitter_contrast=0.5,
jitter_saturation=0.5,
jitter_hue=0.03,
shift_pad=10,
fingers_to_move=3,
task="move_cube",
demo_root_dir="assets/data/trifinger-demos",
):
"""
args:
state_type: ["ftpos", "ftpos_obj", "obj"]
obj_state_type: ["pos", "vertices"] + MODEL_NAMES
augment_prob: probablity to augment images [0.0, 1.0]
times_to_use_demo: (int) number of times to use each demo
task: task name (str)
if "reach_cube", take subset of action corresponding to diff (number of fingers that move)
"""
self.dataset = []
self.state_type = state_type
self.obj_state_type = obj_state_type
self.device = device
self.augment_prob = augment_prob
self.times_to_use_demo = times_to_use_demo
self.n_fingers_to_move = fingers_to_move
# Get transformation for img preproc
if self.obj_state_type in MODEL_NAMES:
_, self.preproc_transform, pretrained_rep_dim = get_vc_model_and_transform(
self.obj_state_type, device=self.device
)
else:
raise NameError
# Make dataset from demo list, and save
self.dataset = []
self.n_augmented_samples = 0
# Random image shift and color jitter
self.rand_augment_transforms = vc_t.transform_augment(
# Resize/crop
resize_size=256,
output_size=224,
# Jitter
jitter=True,
jitter_prob=1.0,
jitter_brightness=jitter_brightness,
jitter_contrast=jitter_contrast,
jitter_saturation=jitter_saturation,
jitter_hue=jitter_hue,
# Shift
shift=True,
shift_pad=shift_pad,
# Randomize environments
randomize_environments=False,
)
for demo_stats in demo_list:
if demo_root_dir is not None:
demo_dir = os.path.join(demo_root_dir,demo_stats["path"])
else:
demo_dir = demo_stats["path"]
self.add_new_traj(demo_dir)
# Dimensions
self.out_dim = self.dataset[0]["output"]["action"].shape[0]
self.pretrained_rep_dim = pretrained_rep_dim
def add_new_traj(self, demo_dir):
# Read data from demo_dir
downsample_data_path = os.path.join(demo_dir, "downsample.pth")
if not os.path.exists(downsample_data_path):
print(f"{downsample_data_path} not found")
return
demo = torch.load(downsample_data_path)
num_obs = demo["o_pos_cur"].shape[0]
# Goal position (absolute)
o_goal_pos = torch.FloatTensor(demo["o_pos_cur"][-1]).to(self.device)
# Goal position (relative)
o_init_pos = torch.FloatTensor(demo["o_pos_cur"][0]).to(self.device)
o_goal_pos_rel = o_goal_pos - o_init_pos
# Goal image
orig_img_goal = (
torch.Tensor(demo["image_60"][-1]).permute((2, 0, 1)) / 255.0
) # [3, 270, 270]
for i in range(num_obs - 1):
# Current fingertip positions
ft_pos_cur = demo["ft_pos_cur"][i]
# Action (fingertip position deltas)
action = torch.FloatTensor(demo["delta_ftpos"][i])
# Get subset of delta_ftpos that corresonds to diff (number of fingers that move)
# For the reach task this will be [:3], and for other tasks [:9]
action = action[: self.n_fingers_to_move * 3]
# transform expects images as float tensor with values in range [0.0, 1.0]
orig_img = (
torch.Tensor(demo["image_60"][i]).permute((2, 0, 1)) / 255.0
) # [3, 270, 270]
for j in range(self.times_to_use_demo):
# Augment images
if np.random.rand() < self.augment_prob:
img = self.rand_augment_transforms(orig_img)
self.n_augmented_samples += 1
else:
img = orig_img
# For testing
# plt.imsave(f"test_img_{i}_aug_{j}.png", img.permute(1,2,0).detach().numpy())
# Transform images for encoder
img_preproc = self.preproc_transform(img).to(self.device)
img_preproc_goal = self.preproc_transform(orig_img_goal).to(self.device)
# Observation dict (current state and action)
input_dict = {
"ft_state": torch.FloatTensor(ft_pos_cur).to(self.device),
"rgb_img_preproc": img_preproc,
"rgb_img_preproc_goal": img_preproc_goal,
"o_goal_pos": o_goal_pos,
"o_goal_pos_rel": o_goal_pos_rel,
}
output_dict = {
"action": torch.FloatTensor(action).to(self.device),
}
data_dict = {"input": input_dict, "output": output_dict}
self.dataset.append(data_dict)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# TODO instead of reading all demos into memory, can read from files each time here
# and apply image augmentation
return self.dataset[idx]
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/utils/dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import imageio
import torchvision.transforms as T
from PIL import Image
import trifinger_vc.control.finger_utils as f_utils
NON_TRAJ_KEYS = ["ft_pos_targets_per_mode"]
def get_traj_dict_from_obs_list(data, scale=1, include_image_obs=True):
"""
Process list of observation dicts into dict of lists (trajectories for each quantity)
args:
data: list of observation dicts
scale: amount to scale all distances by. by default, all distances are in meters. convert to cm with scale=100
"""
data_keys = data[0].keys()
if "object_observation" in data_keys: # prev. key in obs
position_error = np.array(
[data[i]["achieved_goal"]["position_error"] for i in range(len(data))]
)
o_cur = np.array(
[data[i]["object_observation"]["position"] for i in range(len(data))]
)
o_cur_ori = np.array(
[data[i]["object_observation"]["orientation"] for i in range(len(data))]
)
robot_pos = np.array(
[data[i]["robot_observation"]["position"] for i in range(len(data))]
)
o_des = np.array(
[data[i]["desired_goal"]["position"] for i in range(len(data))]
)
o_des_ori = np.array(
[data[i]["desired_goal"]["orientation"] for i in range(len(data))]
)
else:
position_error = np.array(
[data[i]["achieved_goal_position_error"] for i in range(len(data))]
)
o_cur = np.array([data[i]["object_position"] for i in range(len(data))])
o_cur_ori = np.array([data[i]["object_orientation"] for i in range(len(data))])
robot_pos = np.array([data[i]["robot_position"] for i in range(len(data))])
o_des = np.array([data[i]["desired_goal"][:3] for i in range(len(data))])
o_des_ori = np.array([data[i]["desired_goal"][3:] for i in range(len(data))])
ft_pos_cur = np.array(
[data[i]["policy"]["controller"]["ft_pos_cur"] for i in range(len(data))]
)
ft_pos_des = np.array(
[data[i]["policy"]["controller"]["ft_pos_des"] for i in range(len(data))]
)
delta_ftpos = np.array([data[i]["action"]["delta_ftpos"] for i in range(len(data))])
ft_vel_cur = np.array(
[data[i]["policy"]["controller"]["ft_vel_cur"] for i in range(len(data))]
)
# ft_vel_des = np.array(
# [data[i]["policy"]["controller"]["ft_vel_des"] for i in range(len(data))]
# )
t = np.expand_dims(np.array([data[i]["t"] for i in range(len(data))]), 1)
traj_dict = {
"t": t,
"o_pos_cur": scale * o_cur,
"o_pos_des": scale * o_des,
"o_ori_cur": o_cur_ori,
"o_ori_des": o_des_ori,
"ft_pos_cur": scale * ft_pos_cur,
"ft_pos_des": scale * ft_pos_des,
"ft_vel_cur": scale * ft_vel_cur,
# "ft_vel_des": scale * ft_vel_des,
"position_error": scale * position_error,
"delta_ftpos": scale * delta_ftpos,
"robot_pos": robot_pos,
}
if "scaled_success" in data_keys:
scaled_success = np.array([data[i]["scaled_success"] for i in range(len(data))])
traj_dict["scaled_success"] = scaled_success
if include_image_obs:
image60 = np.array(
[
data[i]["camera_observation"]["camera60"]["image"]
for i in range(len(data))
]
)
image180 = np.array(
[
data[i]["camera_observation"]["camera180"]["image"]
for i in range(len(data))
]
)
image300 = np.array(
[
data[i]["camera_observation"]["camera300"]["image"]
for i in range(len(data))
]
)
traj_dict["image_60"] = image60
traj_dict["image_180"] = image180
traj_dict["image_300"] = image300
# Mode information
if "ft_pos_targets_per_mode" in data[-1]["policy"]:
traj_dict["ft_pos_targets_per_mode"] = (
scale * data[-1]["policy"]["ft_pos_targets_per_mode"]
)
# Add "mode"
if "mode" not in data[0]["policy"]:
traj_dict["mode"] = np.array(
[
len(data[i]["policy"]["ft_pos_targets_per_mode"])
for i in range(len(data))
]
)
else:
traj_dict["mode"] = np.array(
[data[i]["policy"]["mode"] for i in range(len(data))]
)
# Object vertices
if (
"object_observation" in data[0] and "vertices" in data[0]["object_observation"]
) or ("object_vertices" in data[0]):
vertices = []
# Flatten vertices dict at each timestep and add to vertices list
for i in range(len(data)):
if "object_observation" in data[0]:
v_wf_dict = data[i]["object_observation"]["vertices"]
else:
v_wf_dict = data[i]["object_vertices"]
v_wf_flat = np.zeros(len(v_wf_dict) * 3)
for k, v_wf in v_wf_dict.items():
v_wf_flat[k * 3 : k * 3 + 3] = v_wf
vertices.append(v_wf_flat)
traj_dict["vertices"] = scale * np.array(vertices)
else:
# Vertices were not logged
pass
return traj_dict
def downsample_traj_dict(
traj_dict,
cur_time_step=0.004,
new_time_step=0.1,
):
"""
Downsample each of the trajectories in traj_dict.
Add R3M embeddings for image_60
args:
traj_dict: dict of trajectories, generated by calling get_traj_dict_from_obs_list()
cur_time_step: time step of raw observations (simulation/control timestep)
new_time_step: time step to downsample to
"""
every_x_steps = max(1, int(new_time_step / cur_time_step))
num_waypoints = int(traj_dict["t"].shape[0] / every_x_steps)
indices_to_take = (
np.linspace(1, traj_dict["t"].shape[0], num_waypoints + 1, dtype=int) - 1
)
new_traj_dict = {}
for k, traj in traj_dict.items():
if "delta" in k:
continue # Need to recompute deltas for downsampled traj
if k in NON_TRAJ_KEYS:
new_traj_dict[k] = traj
continue
# new_traj = traj[::every_x_steps, :]
new_traj = traj[indices_to_take]
new_traj_dict[k] = new_traj
# Compute deltas for downsampled traj
new_delta_ftpos = np.zeros(new_traj_dict["ft_pos_cur"].shape)
ft_pos = new_traj_dict["ft_pos_des"]
for t in range(ft_pos.shape[0] - 1):
delta = ft_pos[t + 1] - ft_pos[t]
new_delta_ftpos[t, :] = delta
new_traj_dict["delta_ftpos"] = new_delta_ftpos
return new_traj_dict
def get_traj_mode(traj_dict, mode):
"""Parse out part of trajectory with corresponding mode"""
assert mode in [1, 2], "Invalid mode"
new_traj_dict = {}
indices_to_take = np.where(traj_dict["mode"] == mode)[0]
for k, traj in traj_dict.items():
if k in NON_TRAJ_KEYS:
new_traj_dict[k] = traj
continue
new_traj = traj[indices_to_take]
new_traj_dict[k] = new_traj
return new_traj_dict
def crop_traj_dict(traj_dict, crop_range):
"""crop_range: [crop_min, crop_max]"""
if crop_range[0] is None:
crop_min = 0
else:
crop_min = crop_range[0]
new_traj_dict = {}
for k, traj in traj_dict.items():
if crop_range[1] is None:
crop_max = traj.shape[0]
else:
crop_max = crop_range[1]
if k in NON_TRAJ_KEYS:
new_traj_dict[k] = traj
continue
if traj.ndim == 2:
new_traj = traj[crop_min:crop_max, :]
else:
new_traj = traj[crop_min:crop_max]
new_traj_dict[k] = new_traj
return new_traj_dict
def plot_traj(title, save_path, d_list, data_dicts, plot_timestamp=None):
"""
Plot trajectories
data_dicts = {
"label_1": {"y": y data, "x": x data (optional), "marker": marker string (optional)],},
"label_2": {"y": y data, "x"},
...
}
"""
plt.figure(figsize=(10, 10), dpi=200)
plt.subplots_adjust(hspace=1)
plt.suptitle(title)
k = 0
for i, d in enumerate(d_list):
k += 1
plt.subplot(len(d_list), 1, k)
if len(d_list) > 1:
plt.title(d)
for label, data in data_dicts.items():
num_steps = data["y"].shape[0]
if "x" in data:
x = data["x"]
else:
x = list(range(num_steps))
if "marker" in data:
marker = data["marker"]
else:
marker = None
plt.plot(x, data["y"][:, i], marker=marker, label=label)
if plot_timestamp is not None:
plt.axvline(x=plot_timestamp, ls="--", c="k", lw=1)
plt.legend()
if save_path is not None:
plt.savefig(save_path)
else:
plt.show()
plt.close()
def encode_img(model, transform, img):
"""
Encode img by first passing it through transform, then through model
** Only works for single, unbatched image **
"""
img_preproc = transform(Image.fromarray(img.astype(np.uint8))).unsqueeze(0)
return model(img_preproc)[0].detach()
def resize_img(img, new_dim=64):
resize = T.Compose(
[T.Resize(new_dim, interpolation=T.InterpolationMode.BICUBIC), T.ToTensor()]
)
resized_img = resize(Image.fromarray(img.astype(np.uint8)))
# resized_img = resized_img.detach().numpy().transpose(1,2,0) * 255.0
return resized_img
def save_gif(images, save_str, duration=None):
frames = []
for i, img in enumerate(images):
# img = resize_img(img).detach().numpy().transpose(1,2,0) * 255.
frames.append(img.astype(np.uint8))
if duration is None:
imageio.mimsave(save_str, frames)
else:
imageio.mimsave(save_str, frames, duration=duration)
def add_actions_to_obs(observation_list):
"""
Given observation list with ft_pos_cur,
add delta_ftpos and delta_q actions to observation.
"""
for t in range(len(observation_list) - 1):
ftpos_cur = observation_list[t]["policy"]["controller"]["ft_pos_cur"]
ftpos_next = observation_list[t + 1]["policy"]["controller"]["ft_pos_cur"]
delta_ftpos = ftpos_next - ftpos_cur
q_cur = observation_list[t]["robot_position"]
q_next = observation_list[t + 1]["robot_position"]
delta_q = q_next - q_cur
action_dict = {"delta_ftpos": delta_ftpos, "delta_q": delta_q}
observation_list[t]["action"] = action_dict
action_dict = {
"delta_ftpos": np.zeros(delta_ftpos.shape),
"delta_q": np.zeros(delta_q.shape),
}
observation_list[-1]["action"] = action_dict
def get_per_finger_ftpos_err(pred_ftpos, gt_ftpos, fnum=3):
"""Compute ftpos L2 distance for each finger"""
ftpos_err = np.ones((pred_ftpos.shape[0], fnum)) * np.nan
for i in range(fnum):
per_finger_err = np.linalg.norm(
(pred_ftpos[:, i * 3 : i * 3 + 3] - gt_ftpos[:, i * 3 : i * 3 + 3]),
axis=1,
)
ftpos_err[:, i] = per_finger_err
return ftpos_err
def get_reach_scaled_err(
finger_to_move_list, init_ft_pos, cur_ft_pos, cube_pos, cube_half_size
):
"""Given list of finger ids to move, compute average scaled error"""
total_scaled_err = 0
for finger_to_move in finger_to_move_list:
cur_ft_pos_i = cur_ft_pos[3 * finger_to_move : 3 * finger_to_move + 3]
cur_dist_to_obj = max(
np.linalg.norm(cur_ft_pos_i - cube_pos) - cube_half_size, 0
)
init_ft_pos_i = init_ft_pos[3 * finger_to_move : 3 * finger_to_move + 3]
init_dist_to_obj = np.linalg.norm(init_ft_pos_i - cube_pos) - cube_half_size
if init_dist_to_obj <= 0:
# To prevent divide-by-0 error
init_dist_to_obj = np.linalg.norm(init_ft_pos_i - cube_pos)
scaled_err = min(1, (cur_dist_to_obj / init_dist_to_obj))
total_scaled_err += scaled_err
avg_scaled_err = total_scaled_err / len(finger_to_move_list)
return avg_scaled_err | eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/utils/data_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import shutil
import hydra
from hydra import compose, initialize
import random
import numpy as np
import json
import torch
import os
from trifinger_simulation.trifinger_platform import ObjectType
import trifinger_vc.utils.train_utils as t_utils
from trifinger_vc.algos.bc_finetune import BCFinetune
from trifinger_vc.utils.sim_nn import Task
from trifinger_vc.trifinger_envs.action import ActionType
from trifinger_vc.trifinger_envs.cube_reach import CubeReachEnv
EXP_DIR = "./test_output"
def init_reach_env():
sim_time_step = 0.004
downsample_time_step = 0.2
traj_scale = 1
n_fingers_to_move = 1
a_dim = n_fingers_to_move * 3
task = "reach_cube"
state_type = "ftpos_obj"
# obj_state_type = "mae_vit_base_patch16_ego4d_210_epochs"
goal_type = "goal_none"
step_size = int(downsample_time_step / sim_time_step)
object_type = ObjectType.COLORED_CUBE
env = CubeReachEnv(
action_type=ActionType.TORQUE,
step_size=step_size,
visualization=False,
enable_cameras=True,
finger_type="trifingerpro",
camera_delay_steps=0,
time_step=sim_time_step,
object_type=object_type,
enable_shadows=False,
camera_view="default",
arena_color="default",
visual_observation=True,
run_rl_policy=False,
)
return env
def init_bc_algo(cfg):
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
# Load train and test trajectories
with open(cfg.task.demo_path, "r") as f:
traj_info = json.load(f)
train_traj_stats = traj_info["train_demo_stats"]
test_traj_stats = traj_info["test_demo_stats"]
# Get traj lists (read from demo files) and add to traj_info
traj_info["train_demos"] = t_utils.get_traj_list('./assets/',train_traj_stats, "pos")
traj_info["test_demos"] = t_utils.get_traj_list('./assets/',test_traj_stats, "pos")
bc = BCFinetune(cfg, traj_info, device)
bc.policy.train()
bc.encoder.train()
return bc
def init_reach_task(cfg, bc):
sim_params = list(bc.sim_dict.items())[0][1]
task = Task(
bc.conf.task.state_type,
bc.algo_conf.pretrained_rep, # obj_state_type
downsample_time_step=bc.traj_info["downsample_time_step"],
traj_scale=bc.traj_info["scale"],
goal_type=bc.conf.task.goal_type,
object_type=bc.traj_info["object_type"],
finger_type=bc.traj_info["finger_type"],
enable_shadows=sim_params["enable_shadows"],
camera_view=sim_params["camera_view"],
arena_color=sim_params["arena_color"],
task=bc.task,
n_fingers_to_move=bc.n_fingers_to_move,
)
return task
def setup_bc_tests():
hydra.core.global_hydra.GlobalHydra.instance().clear()
initialize(config_path="../config", job_name="tf_bc")
cfg = compose(config_name="test_bc")
cfg["task"]["n_outer_iter"] = 10
random.seed(cfg.seed)
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(cfg.seed)
if not os.path.exists(EXP_DIR):
os.makedirs(EXP_DIR)
return cfg
def cleanup_bc_tests():
shutil.rmtree(EXP_DIR)
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/utils/tf_test_util.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
"""
Run MoveCubePolicy to generate cube re-posititioning demos
"""
import sys
import os
import argparse
import numpy as np
import torch
import json
from trifinger_simulation.trifinger_platform import ObjectType
import trifinger_vc.utils.data_utils as d_utils
import trifinger_vc.utils.train_utils as t_utils
from trifinger_vc.utils.encoder_model import EncoderModel
from trifinger_vc.trifinger_envs.action import ActionType
from trifinger_vc.trifinger_envs.gym_cube_env import MoveCubeEnv
from trifinger_vc.trifinger_envs.cube_reach import CubeReachEnv
from trifinger_vc.utils.policy import DeterministicPolicy
from trifinger_vc.utils.model_utils import MODEL_NAMES
"""
Class to execute sequence of actions. Includes instance of the environment and the policy.
The main function is execute_policy which rolls out an episode using the policy and returns a dictionary containing the trajectory.
"""
class Task:
def __init__(
self,
state_type,
obj_state_type,
downsample_time_step=0.2,
traj_scale=1,
goal_type=None,
object_type="colored_cube",
finger_type="trifingerpro",
goal_visualization=False,
enable_shadows=False,
camera_view="default",
arena_color="default",
task="move_cube",
n_fingers_to_move=3,
):
if task == "reach_cube":
assert (
goal_type == "goal_none"
), f"Need to use algo.goal_type=goal_none when running {self.task} task"
if torch.cuda.is_available():
self.device = "cuda"
else:
self.device = "cpu"
self.sim_time_step = 0.004
self.downsample_time_step = downsample_time_step
self.traj_scale = traj_scale
self.n_fingers_to_move = n_fingers_to_move
self.a_dim = self.n_fingers_to_move * 3
self.task = task
self.state_type = state_type
self.obj_state_type = obj_state_type
self.goal_type = goal_type
step_size = int(self.downsample_time_step / self.sim_time_step)
if object_type == "colored_cube":
self.object_type = ObjectType.COLORED_CUBE
elif object_type == "green_cube":
self.object_type = ObjectType.GREEN_CUBE
else:
raise NameError
# Set env based on task
if self.task == "move_cube":
self.env = MoveCubeEnv(
goal_pose=None, # passing None to sample a random trajectory
action_type=ActionType.TORQUE,
step_size=step_size,
visualization=False,
goal_visualization=goal_visualization,
no_collisions=False,
enable_cameras=True,
finger_type=finger_type,
time_step=self.sim_time_step,
camera_delay_steps=0,
object_type=self.object_type,
enable_shadows=enable_shadows,
camera_view=camera_view,
arena_color=arena_color,
visual_observation=True,
run_rl_policy=False,
)
elif self.task == "reach_cube":
self.env = CubeReachEnv(
action_type=ActionType.TORQUE,
step_size=step_size,
visualization=False,
enable_cameras=True,
finger_type=finger_type,
camera_delay_steps=0,
time_step=self.sim_time_step,
object_type=self.object_type,
enable_shadows=enable_shadows,
camera_view=camera_view,
arena_color=arena_color,
visual_observation=True,
run_rl_policy=False,
)
else:
raise NameError
def close(self):
self.env.close()
def reset(self, expert_demo_dict, encoder=None):
# Reset environment with init and goal positions, scaled from cm -> m
obj_init_pos = expert_demo_dict["o_pos_cur"][0, :] / self.traj_scale
obj_init_ori = expert_demo_dict["o_ori_cur"][0, :]
# Use final object position in demo as goal
obj_goal_pos = expert_demo_dict["o_pos_cur"][-1, :] / self.traj_scale
obj_goal_ori = expert_demo_dict["o_ori_cur"][-1, :]
init_pose = {"position": obj_init_pos, "orientation": obj_init_ori}
goal_pose = {"position": obj_goal_pos, "orientation": obj_goal_ori}
qpos_init = expert_demo_dict["robot_pos"][0, :]
if self.task == "move_cube":
observation = self.env.reset(
goal_pose_dict=goal_pose,
init_pose_dict=init_pose,
init_robot_position=qpos_init,
)
elif self.task == "reach_cube":
observation = self.env.reset(
init_pose_dict=init_pose,
init_robot_position=qpos_init,
)
else:
raise NameError
# Set goals for bc state
# Image goal from demo
img_goal = expert_demo_dict["image_60"][-1] # TODO hardcoded image_60
if encoder is not None:
self.o_goal = encoder.encode_img(img_goal).to(self.device)
else:
self.o_goal = torch.flatten(torch.FloatTensor(img_goal).to(self.device))
# Object goal position, scaled to cm for policy
self.o_goal_pos = (
torch.FloatTensor(obj_goal_pos).to(self.device) * self.traj_scale
)
# Relative goal, scaled to cm for policy
self.o_goal_pos_rel = (
torch.FloatTensor(obj_goal_pos - obj_init_pos).to(self.device)
* self.traj_scale
)
return observation
def execute_policy(
self,
policy,
expert_demo_dict,
cam_name="image_60",
save_dir=None,
encoder=None,
epoch=-1,
):
# Reset env and update policy network
observation_list = []
observation = self.reset(expert_demo_dict, encoder=encoder)
observation_list.append(observation)
pred_actions = []
episode_done = False
action_counter = 0
expert_actions = expert_demo_dict["delta_ftpos"]
while not episode_done:
# Get bc input tensor from observation
# Scale observation by traj_scale, for bc policy
q_cur = observation["robot_position"]
ft_pos_cur = observation["ft_pos_cur"] * self.traj_scale
# TODO hardcoded using image_60
img = observation["camera_observation"]["camera60"]["image"]
# TODO set o_state based on self.obj_state_type
if self.obj_state_type in MODEL_NAMES:
assert encoder is not None
with torch.no_grad():
o_state = encoder.encode_img(img)
elif self.obj_state_type == "pos":
o_pos_cur = observation["object_position"] * self.traj_scale
o_state = torch.FloatTensor(o_pos_cur).to(self.device)
elif self.obj_state_type == "vertices":
# TODO
raise NotImplementedError
elif self.obj_state_type == "rgb":
o_state = torch.flatten(torch.FloatTensor(img).to(self.device))
else:
raise NameError
# Make obs for policy
ft_state = torch.FloatTensor(ft_pos_cur).to(self.device)
obs_dict = {
"ft_state": ft_state,
"o_state": o_state,
"o_goal": self.o_goal,
"o_goal_pos": self.o_goal_pos,
"o_goal_pos_rel": self.o_goal_pos_rel,
}
obs_tensor = t_utils.get_bc_obs_vec_from_obs_dict(
obs_dict, self.state_type, self.goal_type
)
# Get action from policy, convert back to meters
with torch.no_grad():
a = policy(obs_tensor)
a = policy.scale_to_range(a)
a = policy.clip_action(a)
pred_action = np.squeeze(a.cpu().detach().numpy()) / self.traj_scale
three_finger_action = np.zeros(9)
three_finger_action[: self.n_fingers_to_move * 3] = (
pred_action * self.traj_scale
)
pred_actions.append(three_finger_action)
# TODO test env w/ groundtruth actions - this works
# pred_action = expert_actions[action_counter, :] / self.traj_scale
# pred_actions.append(pred_action)
# action_counter += 1
observation, reward, episode_done, info = self.env.step(pred_action)
observation_list.append(observation)
d_utils.add_actions_to_obs(observation_list)
# Get traj_dict and downsample
traj_dict = d_utils.get_traj_dict_from_obs_list(
observation_list, scale=self.traj_scale
)
if save_dir is not None:
t_utils.save_demo_to_file(save_dir, epoch, observation_list,
expert_demo_dict,pred_actions)
return traj_dict
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/utils/sim_nn.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/algos/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import sys
import torch
import numpy as np
import logging
import trifinger_vc.utils.train_utils as t_utils
import trifinger_vc.utils.data_utils as d_utils
import trifinger_vc.control.cube_utils as c_utils
from trifinger_vc.utils.policy import construct_policy
from trifinger_vc.utils.dataset import BCFinetuneDataset
from trifinger_vc.utils.encoder_model import EncoderModel
from trifinger_vc.utils.sim_nn import Task
# A logger for this file
log = logging.getLogger(__name__)
class BCFinetune:
def __init__(self, conf, traj_info, device):
self.conf = conf
self.algo_conf = conf.algo
self.traj_info = traj_info
self.device = device
# Get task name
self.task = self.conf.task.name
if(self.task == "reach_cube"):
fingers_to_move = 1
elif (self.task == "move_cube"):
fingers_to_move = 3
else:
fingers_to_move = 0
# Make dataset and dataloader
train_dataset = BCFinetuneDataset(
self.traj_info["train_demo_stats"],
state_type=self.conf.task.state_type,
obj_state_type=self.algo_conf.pretrained_rep,
device=self.device,
augment_prob=self.algo_conf.image_aug_dict["augment_prob"],
times_to_use_demo=self.algo_conf.image_aug_dict["times_to_use_demo"],
jitter_brightness=self.algo_conf.image_aug_dict["jitter_brightness"],
jitter_contrast=self.algo_conf.image_aug_dict["jitter_contrast"],
jitter_saturation=self.algo_conf.image_aug_dict["jitter_saturation"],
jitter_hue=self.algo_conf.image_aug_dict["jitter_hue"],
shift_pad=self.algo_conf.image_aug_dict["shift_pad"],
fingers_to_move=fingers_to_move
)
self.train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=self.algo_conf.batch_size, shuffle=True
)
log.info(
f"Loaded training dataset with {train_dataset.n_augmented_samples} / {len(train_dataset)} augmented samples"
)
test_dataset = BCFinetuneDataset(
self.traj_info["test_demo_stats"],
state_type=self.conf.task.state_type,
obj_state_type=self.algo_conf.pretrained_rep,
device=self.device,
augment_prob=self.algo_conf.image_aug_dict["augment_prob"],
times_to_use_demo=1,
jitter_brightness=self.algo_conf.image_aug_dict["jitter_brightness"],
jitter_contrast=self.algo_conf.image_aug_dict["jitter_contrast"],
jitter_saturation=self.algo_conf.image_aug_dict["jitter_saturation"],
jitter_hue=self.algo_conf.image_aug_dict["jitter_hue"],
shift_pad=self.algo_conf.image_aug_dict["shift_pad"],
fingers_to_move=fingers_to_move
)
self.test_dataloader = torch.utils.data.DataLoader(
test_dataset, batch_size=self.algo_conf.batch_size, shuffle=True
)
self.n_fingers_to_move = train_dataset.n_fingers_to_move
log.info(
f"Loaded test dataset with {test_dataset.n_augmented_samples} / {len(test_dataset)} augmented samples"
)
# Encoder
self.encoder = EncoderModel(
pretrained_rep=self.algo_conf.pretrained_rep,
freeze_pretrained_rep=self.algo_conf.freeze_pretrained_rep,
rep_to_policy=self.conf.rep_to_policy,
).to(self.device)
log.info(f"Model:\n{self.encoder}")
if "obj" in self.conf.task.state_type:
self.state_type_key = "o_state"
elif self.conf.task.state_type == "goal_cond":
self.state_type_key = "o_goal"
# Policy
self.policy = construct_policy(self.conf.task.state_type, train_dataset[0]["input"]["ft_state"].shape[0], self.encoder.pretrained_rep_dim,
self.conf.task.goal_type,train_dataset.out_dim,self.traj_info["max_a"],self.device)
self.policy.eval()
log.info(f"Policy:\n{self.policy}")
self.optimizer = torch.optim.AdamW(
[
{"params": self.encoder.parameters(), "lr": self.algo_conf.visual_lr},
{"params": self.policy.parameters(), "lr": self.algo_conf.lr},
],
lr=self.algo_conf.lr,
weight_decay=self.algo_conf.adam_weight_decay,
)
self.loss_fn = torch.nn.MSELoss()
# Load sim env for rollouts
self.run_sim = self.algo_conf.run_sim
all_sim_dict = {
"sim_env_demo": {
"enable_shadows": False,
"camera_view": "default",
"arena_color": "default",
},
"sim_env_real": {
"enable_shadows": True,
"camera_view": "real",
"arena_color": "real",
},
# "sim_env_shadows": {
# "enable_shadows": True,
# "camera_view": "default",
# "arena_color": "default",
# },
"sim_env_real_camera_view": {
"enable_shadows": False,
"camera_view": "real",
"arena_color": "default",
},
"sim_env_real_arena_color": {
"enable_shadows": False,
"camera_view": "default",
"arena_color": "real",
},
}
self.sim_dict = {}
for env_name in conf.eval_envs:
self.sim_dict[env_name] = all_sim_dict[env_name]
def train_one_epoch(self):
total_train_loss = 0.0
for i_batch, batch in enumerate(self.train_dataloader):
self.optimizer.zero_grad()
# First, pass img and goal img through encoder
if "obj" in self.conf.task.state_type:
latent_state = self.encoder(batch["input"]["rgb_img_preproc"])
batch["input"]["o_state"] = latent_state
if self.conf.task.goal_type == "goal_cond":
latent_goal = self.encoder(batch["input"]["rgb_img_preproc_goal"])
batch["input"]["o_goal"] = latent_goal
# Then, make observation pass through policy
obs_vec = t_utils.get_bc_obs_vec_from_obs_dict(
batch["input"], self.conf.task.state_type, self.conf.task.goal_type
)
pred_actions = self.policy(obs_vec)
loss = self.loss_fn(pred_actions, batch["output"]["action"])
loss.backward()
self.optimizer.step()
total_train_loss += loss.item()
return total_train_loss
def get_latent_rep(self, batch_input):
# if "obj" in self.conf.task.state_type
if self.state_type_key == "o_state":
latent = self.encoder(batch_input["rgb_img_preproc"])
if self.state_type_key == "o_goal":
latent = self.encoder(batch_input["rgb_img_preproc_goal"])
return latent
def train(self, model_data_dir=None, no_wandb=False):
# Make logging directories
ckpts_dir = os.path.join(model_data_dir, "ckpts")
sim_dir = os.path.join(model_data_dir, "sim")
if not os.path.exists(ckpts_dir):
os.makedirs(ckpts_dir)
if not os.path.exists(sim_dir):
os.makedirs(sim_dir)
# Search for most recent ckpt in ckpts_dir
ckpt_pth_to_load, start_epoch = t_utils.find_most_recent_ckpt(ckpts_dir)
if ckpt_pth_to_load is not None:
ckpt_info = torch.load(ckpt_pth_to_load)
self.optimizer.load_state_dict(ckpt_info["optimizer_state_dict"])
self.policy.load_state_dict(ckpt_info["policy"])
self.encoder.load_state_dict(ckpt_info["encoder"])
log.info(f"Loading state from {ckpt_pth_to_load}.")
# initializing dictionary that keeps track of max values
# Todo - if we load from checkpoint this max dict needs to be loaded as well
self.max_dict = {}
for sim_env_name in self.sim_dict.keys():
self.max_dict[sim_env_name] = {"train": {}, "test": {}}
# Sim rollout
if self.run_sim:
sim_log_dict = self.sim_rollout(
sim_dir,
start_epoch,
max_demo_per_diff=self.algo_conf.max_demo_per_diff,
)
log.info(sim_log_dict)
if not no_wandb:
all_dict = {**sim_log_dict}
t_utils.plot_loss(all_dict, start_epoch + 1)
for outer_i in range(start_epoch, self.conf.task.n_outer_iter):
# Update policy network
self.policy.train()
self.encoder.train()
total_train_loss = self.train_one_epoch()
avg_train_loss = total_train_loss / len(self.train_dataloader)
# Test
self.policy.eval()
self.encoder.eval()
total_test_loss = 0.0
with torch.no_grad():
for i_batch, batch in enumerate(self.test_dataloader):
self.optimizer.zero_grad()
# First, pass img and goal img through encoder
batch["input"][self.state_type_key] = self.get_latent_rep(batch["input"])
if self.conf.task.goal_type == "goal_cond":
latent_goal = self.encoder(batch["input"]["rgb_img_preproc_goal"])
batch["input"]["o_goal"] = latent_goal
# Then, make observation pass through policy
obs_vec = t_utils.get_bc_obs_vec_from_obs_dict(
batch["input"],
self.conf.task.state_type,
self.conf.task.goal_type,
)
pred_actions = self.policy(obs_vec)
loss = self.loss_fn(pred_actions, batch["output"]["action"])
total_test_loss += loss.item()
avg_test_loss = total_test_loss / (i_batch + 1)
loss_dict = {
"train_loss": avg_train_loss,
"test_loss": avg_test_loss,
"epoch": outer_i,
}
sim_log_dict = {}
log.info(f"Epoch: {outer_i}, loss: {avg_train_loss}")
if (outer_i + 1) % self.conf.task.n_epoch_every_log == 0:
# Sim rollout
if self.run_sim:
sim_log_dict = self.sim_rollout(
sim_dir,
outer_i,
max_demo_per_diff=self.algo_conf.max_demo_per_diff,
)
log.info(sim_log_dict)
if not no_wandb:
all_dict = {**loss_dict, **sim_log_dict}
t_utils.plot_loss(all_dict, outer_i + 1)
torch.save(
{
"loss_train": avg_train_loss,
"loss_test": avg_test_loss,
"policy": self.policy.state_dict(),
"encoder": self.encoder.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"conf": self.algo_conf,
"in_dim": self.policy.in_dim,
"out_dim": self.policy.out_dim,
},
f=f"{ckpts_dir}/epoch_{outer_i+1}_ckpt.pth",
)
def sim_rollout(self, sim_dir, outer_i, max_demo_per_diff=10):
"""Rollout policies for train and test demos"""
log_dict = {}
for sim_env_name in self.sim_dict.keys():
log_dict[sim_env_name] = {"train": {}, "test": {}}
for sim_env_name, sim_params in self.sim_dict.items():
sim = Task(
self.conf.task.state_type,
self.algo_conf.pretrained_rep, # obj_state_type
downsample_time_step=self.traj_info["downsample_time_step"],
traj_scale=self.traj_info["scale"],
goal_type=self.conf.task.goal_type,
object_type=self.traj_info["object_type"],
finger_type=self.traj_info["finger_type"],
enable_shadows=sim_params["enable_shadows"],
camera_view=sim_params["camera_view"],
arena_color=sim_params["arena_color"],
task=self.task,
n_fingers_to_move=self.n_fingers_to_move,
)
for split_name in ["train", "test"]:
traj_list = self.traj_info[f"{split_name}_demos"]
plot_count_dict = {}
totals_dict = {}
for demo_i, demo in enumerate(traj_list):
diff = self.traj_info[f"{split_name}_demo_stats"][demo_i]["diff"]
traj_i = self.traj_info[f"{split_name}_demo_stats"][demo_i]["id"]
if diff in plot_count_dict:
if plot_count_dict[diff] >= max_demo_per_diff:
continue
else:
plot_count_dict[diff] += 1
else:
plot_count_dict[diff] = 1
log.info(
f"Rolling out demo (diff {diff} | id: {traj_i}) for split {split_name} in sim env {sim_env_name}"
)
traj_label = f"diff-{diff}_traj-{traj_i}"
traj_sim_dir = os.path.join(
sim_dir, sim_env_name, split_name, traj_label
)
if not os.path.exists(traj_sim_dir):
os.makedirs(traj_sim_dir)
sim_traj_dict = sim.execute_policy(
self.policy,
demo,
self.policy.state_dict(),
save_dir=traj_sim_dir,
encoder=self.encoder,
epoch=outer_i,
)
# Save gif of sim rollout
d_utils.save_gif(
sim_traj_dict["image_60"],
os.path.join(
traj_sim_dir, f"viz_{traj_label}_epoch_{outer_i+1}.gif"
),
)
# Compute final error for ftpos of each finger
final_sim_ftpos = np.expand_dims(sim_traj_dict["ft_pos_cur"][-1], 0)
final_demo_ftpos = np.expand_dims(demo["ft_pos_cur"][-1], 0)
final_ftpos_dist = d_utils.get_per_finger_ftpos_err(
final_demo_ftpos, final_sim_ftpos, fnum=3
)
final_ftpos_dist = np.squeeze(final_ftpos_dist)
# Achieved object distance to goal
sim_obj_pos_err = sim_traj_dict["position_error"][-1]
# Compute scaled error and reward, based on task
scaled_reward = sim_traj_dict["scaled_success"][-1]
scaled_err = 1 - scaled_reward
# Per traj log
log_dict[sim_env_name][split_name][traj_label] = {
"sim_obj_pos_err": sim_obj_pos_err,
"scaled_err": scaled_err,
"scaled_reward": scaled_reward,
"final_ftpos_dist_0": final_ftpos_dist[0],
"final_ftpos_dist_1": final_ftpos_dist[1],
"final_ftpos_dist_2": final_ftpos_dist[2],
}
if diff in totals_dict:
totals_dict[diff]["sim_obj_pos_err"] += sim_obj_pos_err
totals_dict[diff]["scaled_err"] += scaled_err
totals_dict[diff]["scaled_reward"] += scaled_reward
totals_dict[diff]["final_ftpos_dist_0"] += final_ftpos_dist[0]
totals_dict[diff]["final_ftpos_dist_1"] += final_ftpos_dist[1]
totals_dict[diff]["final_ftpos_dist_2"] += final_ftpos_dist[2]
else:
totals_dict[diff] = {
"sim_obj_pos_err": sim_obj_pos_err,
"scaled_err": scaled_err,
"scaled_reward": scaled_reward,
"final_ftpos_dist_0": final_ftpos_dist[0],
"final_ftpos_dist_1": final_ftpos_dist[1],
"final_ftpos_dist_2": final_ftpos_dist[2],
}
# Log avg obj pos err for each diff
for diff, per_diff_totals_dict in totals_dict.items():
if (
f"diff-{diff}_max_avg_scaled_reward"
not in self.max_dict[sim_env_name][split_name].keys()
):
self.max_dict[sim_env_name][split_name][
f"diff-{diff}_max_avg_scaled_reward"
] = 0.0
for key, total in per_diff_totals_dict.items():
log_dict[sim_env_name][split_name][f"diff-{diff}_avg_{key}"] = (
total / plot_count_dict[diff]
)
curr_avg_scaled_reward = log_dict[sim_env_name][split_name][
f"diff-{diff}_avg_scaled_reward"
]
if (
curr_avg_scaled_reward
> self.max_dict[sim_env_name][split_name][
f"diff-{diff}_max_avg_scaled_reward"
]
):
self.max_dict[sim_env_name][split_name][
f"diff-{diff}_max_avg_scaled_reward"
] = curr_avg_scaled_reward
log_dict[sim_env_name][split_name][
f"diff-{diff}_max_avg_scaled_reward"
] = curr_avg_scaled_reward
else:
log_dict[sim_env_name][split_name][
f"diff-{diff}_max_avg_scaled_reward"
] = self.max_dict[sim_env_name][split_name][
f"diff-{diff}_max_avg_scaled_reward"
]
sim.close()
return log_dict
def __del__(self):
del self.sim
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/algos/bc_finetune.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation
from trifinger_simulation.tasks import move_cube as move_cube_task
import trifinger_vc.control.finger_utils as f_utils
CUBE_HALF_SIZE = move_cube_task._CUBE_WIDTH / 2
# Information about object faces given face_id
OBJ_FACES_INFO = {
1: {
"center_param": np.array([0.0, -1.0, 0.0]),
"face_down_default_quat": np.array([0.707, 0, 0, 0.707]),
"adjacent_faces": [6, 4, 3, 5],
"opposite_face": 2,
"up_axis": np.array([0.0, 1.0, 0.0]), # UP axis when this face is ground face
},
2: {
"center_param": np.array([0.0, 1.0, 0.0]),
"face_down_default_quat": np.array([-0.707, 0, 0, 0.707]),
"adjacent_faces": [6, 4, 3, 5],
"opposite_face": 1,
"up_axis": np.array([0.0, -1.0, 0.0]),
},
3: {
"center_param": np.array([1.0, 0.0, 0.0]),
"face_down_default_quat": np.array([0, 0.707, 0, 0.707]),
"adjacent_faces": [1, 2, 4, 6],
"opposite_face": 5,
"up_axis": np.array([-1.0, 0.0, 0.0]),
},
4: {
"center_param": np.array([0.0, 0.0, 1.0]),
"face_down_default_quat": np.array([0, 1, 0, 0]),
"adjacent_faces": [1, 2, 3, 5],
"opposite_face": 6,
"up_axis": np.array([0.0, 0.0, -1.0]),
},
5: {
"center_param": np.array([-1.0, 0.0, 0.0]),
"face_down_default_quat": np.array([0, -0.707, 0, 0.707]),
"adjacent_faces": [1, 2, 4, 6],
"opposite_face": 3,
"up_axis": np.array([1.0, 0.0, 0.0]),
},
6: {
"center_param": np.array([0.0, 0.0, -1.0]),
"face_down_default_quat": np.array([0, 0, 0, 1]),
"adjacent_faces": [1, 2, 3, 5],
"opposite_face": 4,
"up_axis": np.array([0.0, 0.0, 1.0]),
},
}
def get_cp_pos_wf_from_cp_param(
cp_param, obj_pose, cube_half_size=CUBE_HALF_SIZE, ft_radius=0
):
"""
Compute contact point position in world frame
Inputs:
cp_param: Contact point param [px, py, pz]
cube: Block object, which contains object shape info
"""
cube_pos_wf = obj_pose["position"]
cube_quat_wf = obj_pose["orientation"]
cp = get_cp_of_from_cp_param(cp_param, cube_half_size, ft_radius=ft_radius)
rotation = Rotation.from_quat(cube_quat_wf)
translation = np.asarray(cube_pos_wf)
return rotation.apply(cp["pos_of"]) + translation
def get_cp_pos_wf_from_cp_params(
cp_params, obj_pose, cube_half_size=CUBE_HALF_SIZE, ft_radius=0
):
"""
Get contact point positions in world frame from cp_params
"""
# Get contact points in wf
fingertip_goal_list = []
for i in range(len(cp_params)):
# for i in range(cp_params.shape[0]):
fingertip_goal_list.append(
get_cp_pos_wf_from_cp_param(
cp_params[i], obj_pose, cube_half_size, ft_radius=ft_radius
)
)
return fingertip_goal_list
def get_cp_of_from_cp_param(cp_param, cube_half_size=CUBE_HALF_SIZE, ft_radius=0):
"""
Compute contact point position in object frame
Inputs:
cp_param: Contact point param [px, py, pz]
"""
effective_cube_half_size = cube_half_size + ft_radius
obj_shape = (
effective_cube_half_size,
effective_cube_half_size,
effective_cube_half_size,
)
cp_of = []
# Get cp position in OF
for i in range(3):
cp_of.append(-obj_shape[i] + (cp_param[i] + 1) * obj_shape[i])
cp_of = np.asarray(cp_of)
x_param = cp_param[0]
y_param = cp_param[1]
z_param = cp_param[2]
# For now, just hard code quat
if y_param == -1:
quat = (np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2)
elif y_param == 1:
quat = (np.sqrt(2) / 2, 0, 0, -np.sqrt(2) / 2)
elif x_param == 1:
quat = (0, 0, 1, 0)
elif z_param == 1:
quat = (np.sqrt(2) / 2, 0, np.sqrt(2) / 2, 0)
elif x_param == -1:
quat = (1, 0, 0, 0)
elif z_param == -1:
quat = (np.sqrt(2) / 2, 0, -np.sqrt(2) / 2, 0)
cp = {"pos_of": cp_of, "quat_of": quat}
return cp
def get_face_from_cp_param(cp_param):
"""
Get face id on cube, given cp_param
cp_param: [x,y,z]
"""
x_param = cp_param[0]
y_param = cp_param[1]
z_param = cp_param[2]
# For now, just hard code quat
if y_param == -1:
face = 1
elif y_param == 1:
face = 2
elif x_param == 1:
face = 3
elif z_param == 1:
face = 4
elif x_param == -1:
face = 5
elif z_param == -1:
face = 6
return face
def get_cp_params(obj_pose, finger_type):
"""
Get contact points on cube for each finger
Assign closest cube face to each finger
Since we are lifting object, don't worry about wf z-axis, just care about wf xy-plane
"""
# face that is touching the ground
ground_face = get_closest_ground_face(obj_pose)
# get finger base positions
finger_base_positions = f_utils.get_finger_base_positions(finger_type)
# Transform finger base positions to object frame
base_pos_list_of = []
for f_wf in finger_base_positions:
f_of = get_of_from_wf(f_wf, obj_pose)
base_pos_list_of.append(f_of)
# Find distance from x axis and y axis, and store in xy_distances
# Need some additional logic to prevent multiple fingers from being assigned to same face
x_axis = np.array([1, 0])
y_axis = np.array([0, 1])
# Object frame axis corresponding to plane parallel to ground plane
x_ind, y_ind = _get_parallel_ground_plane_xy(ground_face)
xy_distances = np.zeros(
(3, 2)
) # Row corresponds to a finger, columns are x and y axis distances
for f_i, f_of in enumerate(base_pos_list_of):
point_in_plane = np.array(
[f_of[0, x_ind], f_of[0, y_ind]]
) # Ignore dimension of point that's not in the plane
x_dist = _get_distance_from_pt_2_line(x_axis, np.array([0, 0]), point_in_plane)
y_dist = _get_distance_from_pt_2_line(y_axis, np.array([0, 0]), point_in_plane)
xy_distances[f_i, 0] = x_dist
xy_distances[f_i, 1] = y_dist
# Do the face assignment - greedy approach (assigned closest fingers first)
free_faces = OBJ_FACES_INFO[ground_face][
"adjacent_faces"
].copy() # List of face ids that haven't been assigned yet
assigned_faces = np.zeros(3)
for i in range(3):
# Find indices max element in array
max_ind = np.unravel_index(np.argmax(xy_distances), xy_distances.shape)
curr_finger_id = max_ind[0]
furthest_axis = max_ind[1]
# print("current finger {}".format(curr_finger_id))
# Do the assignment
x_dist = xy_distances[curr_finger_id, 0]
y_dist = xy_distances[curr_finger_id, 1]
if furthest_axis == 0: # distance to x axis is greater than to y axis
if base_pos_list_of[curr_finger_id][0, y_ind] > 0:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][1] # 2
else:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][0] # 1
else:
if base_pos_list_of[curr_finger_id][0, x_ind] > 0:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][2] # 3
else:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][3] # 5
# print("first choice face: {}".format(face))
# Handle faces that may already be assigned
if face not in free_faces:
alternate_axis = abs(furthest_axis - 1)
if alternate_axis == 0:
if base_pos_list_of[curr_finger_id][0, y_ind] > 0:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][1] # 2
else:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][0] # 1
else:
if base_pos_list_of[curr_finger_id][0, x_ind] > 0:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][2] # 3
else:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][3] # 5
# print("second choice face: {}".format(face))
# If backup face isn't free, assign random face from free_faces
if face not in free_faces:
# print("random")
# print(xy_distances[curr_finger_id, :])
face = free_faces[0]
assigned_faces[curr_finger_id] = face
# Replace row with -np.inf so we can assign other fingers
xy_distances[curr_finger_id, :] = -np.inf
# Remove face from free_faces
free_faces.remove(face)
# print(assigned_faces)
# Set contact point params
cp_params = []
for i in range(3):
face = assigned_faces[i]
param = OBJ_FACES_INFO[face]["center_param"].copy()
# print(i)
# print(param)
cp_params.append(param)
# print("assigning cp params for lifting")
# print(cp_params)
return cp_params
def get_closest_ground_face(obj_pose):
"""
Determine face that is closest to ground
"""
min_z = np.inf
min_face = None
for i in range(1, 7):
c = OBJ_FACES_INFO[i]["center_param"].copy()
c_wf = get_wf_from_of(c, obj_pose)
if c_wf[2] < min_z:
min_z = c_wf[2]
min_face = i
return min_face
def get_vertices_wf(obj_pose):
"""Get vertices of cube in world frame, given obj_pose in world frame"""
v_of_dict = get_vertices_of()
v_wf_dict = {}
# TODO fill this in
for k, v_of in v_of_dict.items():
v_wf = get_wf_from_of(v_of, obj_pose)
v_wf_dict[k] = v_wf
return v_wf_dict
def get_vertices_of():
"""Get vertices of cube in object frame"""
v = {
0: np.array([-1, -1, -1]) * CUBE_HALF_SIZE,
1: np.array([1, -1, -1]) * CUBE_HALF_SIZE,
2: np.array([-1, -1, 1]) * CUBE_HALF_SIZE,
3: np.array([1, -1, 1]) * CUBE_HALF_SIZE,
4: np.array([-1, 1, -1]) * CUBE_HALF_SIZE,
5: np.array([1, 1, -1]) * CUBE_HALF_SIZE,
6: np.array([-1, 1, 1]) * CUBE_HALF_SIZE,
7: np.array([1, 1, 1]) * CUBE_HALF_SIZE,
}
return v
##############################################################################
# Transformation functions
##############################################################################
def get_wf_from_of(p, obj_pose):
"""
Trasform point p from world frame to object frame, given object pose
"""
cube_pos_wf = obj_pose["position"]
cube_quat_wf = obj_pose["orientation"]
rotation = Rotation.from_quat(cube_quat_wf)
translation = np.asarray(cube_pos_wf)
return rotation.apply(p) + translation
def get_of_from_wf(p, obj_pose):
"""
Trasform point p from object frame to world frame, given object pose
"""
cube_pos_wf = obj_pose["position"]
cube_quat_wf = obj_pose["orientation"]
rotation = Rotation.from_quat(cube_quat_wf)
translation = np.asarray(cube_pos_wf)
rotation_inv = rotation.inv()
translation_inv = -rotation_inv.apply(translation)
return rotation_inv.apply(p) + translation_inv
##############################################################################
# Non-cube specific functions TODO move somewhere else
##############################################################################
def lin_interp_pos_two_points(x_cur, x_des, T, time_step=0.001):
"""
Linearly interpolate x_cur, x_des positions to get waypoints
No orientation
args:
x_cur: start position
x_des: end position
T: duration of trajectory, in seconds
time_step: timestep between waypoints (simulation timestep)
"""
delta_x = x_des - x_cur
dx = delta_x / T
num_points = int(T / time_step)
x_traj = np.linspace(x_cur, x_des, num=num_points)
dx_traj = np.tile(dx, (num_points, 1))
return x_traj, dx_traj
def lin_interp_pos(x, time_step_in, time_step_out=0.001):
"""
Linearly interpolate between all position waypoints in x (between each row) [T, dim]
"""
T = x.shape[0]
interp_n = int(
time_step_in / time_step_out
) # Number of interpolation points between two waypoints
# Linearly interpolate between each position waypoint (row) and force waypoint
# Initial row indices
row_ind_in = np.arange(T)
# Output row coordinates
row_coord_out = np.linspace(0, T - 1, interp_n * (T - 1) + T)
# scipy.interpolate.interp1d instance
itp_x = interp1d(row_ind_in, x, axis=0)
x_interpolated = itp_x(row_coord_out)
return x_interpolated
def lin_interp_pos_traj(x_in, time_step_in, time_step_out):
"""
Linearly interpolate between all position waypoints in x (between each row) [T, dim]
Output position and velocity trajectories
"""
x_pos_traj = lin_interp_pos(x_in, time_step_in, time_step_out)
x_vel_traj = np.zeros(x_pos_traj.shape)
for i in range(x_pos_traj.shape[0] - 1):
v = (x_pos_traj[i + 1, :] - x_pos_traj[i, :]) / time_step_out
x_vel_traj[i, :] = v
return x_pos_traj, x_vel_traj
##############################################################################
# Private functions
##############################################################################
def _get_parallel_ground_plane_xy(ground_face):
"""
Given a ground face id, get the axes that are parallel to the floor
"""
if ground_face in [1, 2]:
x_ind = 0
y_ind = 2
if ground_face in [3, 5]:
x_ind = 2
y_ind = 1
if ground_face in [4, 6]:
x_ind = 0
y_ind = 1
return x_ind, y_ind
def _get_distance_from_pt_2_line(a, b, p):
"""
Get distance from point to line (in 2D)
Inputs:
a, b: points on line
p: standalone point, for which we want to compute its distance to line
"""
a = np.squeeze(a)
b = np.squeeze(b)
p = np.squeeze(p)
ba = b - a
ap = a - p
c = ba * (np.dot(ap, ba) / np.dot(ba, ba))
d = ap - c
return np.sqrt(np.dot(d, d))
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/control/cube_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
class ImpedanceController:
"""
Impedance controller for TriFinger robot
args:
kinematics: CustomPinocchioUtils() class for kinematics
max_x_err: maximum magnitude of position error (for velocity clipping)
kp, kv: joint space gains
Nf: number of fingers in hand
"""
def __init__(
self,
kinematics,
max_x_err=0.01,
kp=[300] * 9,
kv=[5] * 9,
Nf=3,
):
self.kp = kp
self.kv = kv
self.max_x_err = max_x_err
self.kinematics = kinematics
self.Nf = Nf # Number of finger in hand
self.Nq = self.Nf * 3 # Number of joints in hand (3 per finger)
self._dx_mag_avg = np.ones(
self.Nf
) # Init linear velocity mag average for moving average per finger
# For observation
self.x_des = np.ones(9) * np.nan
self.dx_des = np.ones(9) * np.nan
self.x_cur = np.ones(9) * np.nan
self.dx_cur = np.ones(9) * np.nan
self.torque = np.ones(9) * np.nan
def get_command_torque(self, x_des, dx_des, q_cur, dq_cur, f_des=None):
"""
Compute joint torques to move finger all fingers to x_des, dx_des
args:
x_des: Desired fingertip position, world frame (NOT CONTROLLING ORIENTATION RIGHT NOW) [9,]
dx_des: Desired fingertip lin vel, world frame (NOT CONTROLLING ORIENTATION RIGHT NOW) [9,]
q_cur: Current joint angles of all fingers [Nq,]
dq_cur: Current joint velocities [Nq,]
f_des: fingertip forces in world frame [9,]
return:
torque: joint torques [9,]
"""
Kp = np.diag(self.kp)
Kv = np.diag(self.kv)
# Linear Jacobian for hand [9, Nq] (stacked finger linear Jacobians [3, Nq])
J_lin = self.kinematics.get_hand_lin_jacobian(q_cur)
g = self.kinematics.get_hand_g(q_cur, J_lin) # Joint space gravity vector
x_des = np.expand_dims(x_des, 1)
# Compute current fingertip position
x_cur = np.array(self.kinematics.forward_kinematics(q_cur)).reshape(
(self.Nq, 1)
)
delta_x = np.array(x_des) - np.array(x_cur)
# print("Current x: {}".format(x_cur))
# print("Desired x: {}".format(x_des))
delta_x_mags = np.linalg.norm(delta_x.reshape((self.Nf, 3)), axis=1)
# print("Delta: {}".format(delta_x_mags))
# Cap delta_x magnitude to self.max_x_err
if self.max_x_err is not None:
for f_id in range(3):
delta_x_i = delta_x[f_id * 3 : f_id * 3 + 3, :]
if np.linalg.norm(delta_x_i) > self.max_x_err:
unit = delta_x_i / np.linalg.norm(delta_x_i)
delta_x[f_id * 3 : f_id * 3 + 3, :] = unit * self.max_x_err
# Get current fingertip velocity
dx_cur = J_lin @ np.expand_dims(np.array(dq_cur), 1)
delta_dx = np.expand_dims(np.array(dx_des), 1) - np.array(dx_cur)
F_star = Kv @ delta_dx + Kp @ delta_x
dx_mags = np.linalg.norm(dx_cur.reshape((self.Nf, 3)), axis=1)
# print("dx mag: ", dx_mags)
torque = np.squeeze(J_lin.T @ F_star) + g
# TODO Feed-forward term for desired forces
if f_des is not None:
torque += J_lin.T @ f_des
# For observation
self.x_des = np.squeeze(x_des)
self.dx_des = np.squeeze(dx_des)
self.x_cur = np.squeeze(x_cur)
self.dx_cur = np.squeeze(dx_cur)
self.torque = np.squeeze(torque)
return torque
def is_avg_dx_converged(self, q_cur, dq_cur, epsilon=0.001):
"""
Return: True if linear velocity magnitude moving average of each finger has converged to < epsilon
"""
J_lin = self.kinematics.get_hand_lin_jacobian(
q_cur
) # Linear Jacobian for hand [9, Nq]
# Get current fingertip velocity
dx_cur = J_lin @ np.expand_dims(np.array(dq_cur), 1)
all_fingers_converged = True
for f_id in range(self.Nf):
dx_cur_i = dx_cur[f_id * 3 : f_id * 3 + 3]
self._dx_mag_avg[f_id] = (
0.5 * np.linalg.norm(dx_cur_i) + 0.5 * self._dx_mag_avg[f_id]
)
all_fingers_converged = (
all_fingers_converged and self._dx_mag_avg[f_id] < epsilon
)
return all_fingers_converged
def get_observation(self):
"""Create and return observation"""
obs = {
"ft_pos_des": self.x_des,
"ft_vel_des": self.dx_des,
"ft_pos_cur": self.x_cur,
"ft_vel_cur": self.dx_cur,
"kp": self.kp,
"kv": self.kv,
"max_x_err": self.max_x_err,
"torque": self.torque,
}
return obs
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/control/impedance_controller.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pinocchio
from trifinger_simulation.pinocchio_utils import Kinematics
class CustomPinocchioUtils(Kinematics):
"""
Consists of kinematic methods for the finger platform.
"""
m1 = 0.2
m2 = 0.2
m3 = 0.01
ms = [m1, m2, m3]
tip_m = 0.02
I1 = np.zeros((3, 3))
# np.fill_diagonal(I1,[3.533e-4,5.333e-5,3.533e-4])
np.fill_diagonal(I1, [4.59 - 4, 6.93e-5, 4.59e-4])
I2 = np.zeros((3, 3))
# np.fill_diagonal(I2,[3.533e-4,3.533e-4,5.333e-5])
np.fill_diagonal(I2, [4.41e-4, 4.41e-4, 6.67e-5])
I3 = np.zeros((3, 3))
# np.fill_diagonal(I3,[1.667e-5,1.667e-5,6.667e-7])
np.fill_diagonal(I3, [3.5e-5, 3.5e-5, 1.4e-6])
Is = [I1, I2, I3]
def __init__(self, finger_urdf_path, tip_link_names, link_names):
"""
Initializes the finger model on which control's to be performed.
Args:
finger (SimFinger): An instance of the SimFinger class
link_names: finger link names
"""
super().__init__(finger_urdf_path, tip_link_names)
self.link_ids = [
self.robot_model.getFrameId(link_name) for link_name in link_names
]
def get_hand_lin_jacobian(self, q):
J = np.zeros((9, 9))
for f_id in range(3):
J_i = self.get_tip_link_jacobian(f_id, q) # [6, 9]
J[f_id * 3 : f_id * 3 + 3, :] = J_i[:3, :]
return J
def get_tip_link_jacobian(self, finger_id, q):
"""
Get Jacobian for tip link of specified finger
All other columns are 0
"""
pinocchio.computeJointJacobians(
self.robot_model,
self.data,
q,
)
# pinocchio.framesKinematics(
# self.robot_model, self.data, q,
# )
pinocchio.framesForwardKinematics(
self.robot_model,
self.data,
q,
)
frame_id = self.tip_link_ids[finger_id]
Ji = pinocchio.getFrameJacobian(
self.robot_model,
self.data,
frame_id,
pinocchio.ReferenceFrame.LOCAL_WORLD_ALIGNED,
)
# print(self.robot_model.frames[frame_id].placement)
# print(self.data.oMf[frame_id].rotation)
return Ji
def get_any_link_jacobian(self, frame_id, q):
"""
Get Jacobian for link with frame_id
"""
pinocchio.computeJointJacobians(
self.robot_model,
self.data,
q,
)
pinocchio.framesForwardKinematics(
self.robot_model,
self.data,
q,
)
Ji = pinocchio.getFrameJacobian(
self.robot_model,
self.data,
frame_id,
pinocchio.ReferenceFrame.LOCAL_WORLD_ALIGNED,
)
return Ji # 6x9
def get_finger_g(self, f_id, q, Jvi):
"""Get joint space gravity vector for single finger finger_id"""
g = np.zeros(9)
grav = np.array([0, 0, -9.81])
# Get g for each link in finger
for j, l_id in enumerate(self.link_ids[f_id * 3 : f_id * 3 + 3]):
Jj = self.get_any_link_jacobian(l_id, q)
Jjv = Jj[:3, :]
g -= self.ms[j] * Jjv.T @ grav * 0.33
Jj = self.get_any_link_jacobian(self.tip_link_ids[f_id], q)
Jjv = Jj[:3, :]
g -= self.tip_m * Jjv.T @ grav * 0.33
return g
def get_hand_g(self, q_cur, J):
"""Get joint space gravity vector for 3-fingered hand"""
return self.inverse_dyn(q_cur)
# This doesn't work well, fingers droop slightly when trying to hold position. Depends on accurate model.
# g = np.zeros(9)
# for f_id in range(3):
# g_i = self.get_finger_g(f_id, q_cur, J)
# g += g_i
# return g
def inverse_dyn(self, q):
# q = pinocchio.neutral(self.robot_model)
v = pinocchio.utils.zero(self.robot_model.nv)
a = pinocchio.utils.zero(self.robot_model.nv)
tau = pinocchio.rnea(self.robot_model, self.data, q, v, a)
return tau
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/control/custom_pinocchio_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
DIST_FRAME_TO_SURFACE = 0.01
def get_ft_radius(finger_type):
# These are offsets to cube half size for computing contact points
# Tuned empirically; do not match actual fingertip dimensions
if finger_type in ["trifingeredu", "trifingernyu", "trifinger_meta"]:
ft_radius = 0.007 # EDU
elif finger_type == "trifingerpro":
ft_radius = 0.008 # PRO
else:
raise NameError("Invalid finger_type")
return ft_radius
def get_finger_base_positions(finger_type):
"""
The initial position of the fingertips, as angle on the arena, tuned empirically
These values are critical for good contact point assignment
"""
if finger_type in ["trifingeredu", "trifingernyu", "trifinger_meta"]:
theta_0 = 90
theta_1 = 350
theta_2 = 220
elif finger_type == "trifingerpro":
theta_0 = 90
theta_1 = 310
theta_2 = 200
else:
raise NameError("Invalid finger_type")
r = 0.15
finger_base_positions = [
np.array(
[
[
np.cos(theta_0 * (np.pi / 180)) * r,
np.sin(theta_0 * (np.pi / 180)) * r,
0,
]
]
),
np.array(
[
[
np.cos(theta_1 * (np.pi / 180)) * r,
np.sin(theta_1 * (np.pi / 180)) * r,
0,
]
]
),
np.array(
[
[
np.cos(theta_2 * (np.pi / 180)) * r,
np.sin(theta_2 * (np.pi / 180)) * r,
0,
]
]
),
]
return finger_base_positions
| eai-vc-main | cortexbench/trifinger_vc/src/trifinger_vc/control/finger_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from setuptools import find_packages
from setuptools import find_namespace_packages
packages = find_packages(where="src") + find_namespace_packages(
include=["hydra_plugins.*"], where="src"
)
install_requires = [
"torch >= 1.10.2",
"torchvision >= 0.11.3",
"timm==0.6.11",
"hydra-core",
"wandb>=0.13",
"six"
]
setup(
name="vc_models",
version="0.1",
packages=packages,
package_dir={"": "src"},
install_requires=install_requires,
include_package_data=True,
)
| eai-vc-main | vc_models/setup.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import pytest
def pytest_addoption(parser):
parser.addoption(
"--nocluster",
action="store_true",
default=False,
help="Run outside of FAIR cluster.",
)
@pytest.fixture
def nocluster(request):
return request.config.getoption("--nocluster")
| eai-vc-main | vc_models/tests/conftest.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import os
import hydra
import omegaconf
import numpy as np
import torch
import torchvision
import PIL
import vc_models
from vc_models.utils import get_model_tag
from vc_models.models.vit import model_utils as vit_model_utils
vc_models_abs_path = os.path.dirname(os.path.abspath(vc_models.__file__))
def get_config_path(model_name):
cfg_path = os.path.join(vc_models_abs_path, "conf", "model", f"{model_name}")
if os.path.isdir(cfg_path):
pytest.skip()
cfg_path += ".yaml"
return cfg_path
@pytest.mark.parametrize("model_name", vc_models.vc_model_zoo)
def test_cfg_name(model_name):
cfg_path = get_config_path(model_name)
model_cfg = omegaconf.OmegaConf.load(cfg_path)
model_tag = get_model_tag(model_cfg.metadata)
if model_name == vit_model_utils.VC1_LARGE_NAME:
assert model_tag == 'mae_vit_large_patch16_ego_imagenet_inav_182_epochs'
elif model_name == vit_model_utils.VC1_BASE_NAME:
assert model_tag == 'mae_vit_base_patch16_ego_imagenet_inav_182_epochs'
else:
assert model_tag == model_name
@pytest.mark.parametrize("model_name", vc_models.vc_model_zoo)
def test_model_loading(model_name):
"""
Test creating the model architecture without loading the checkpoint.
"""
cfg_path = get_config_path(model_name)
model_cfg = omegaconf.OmegaConf.load(cfg_path)
if "model" in model_cfg.model:
model = hydra.utils.call(model_cfg.model.model)
else:
model = hydra.utils.call(model_cfg.model)
assert model.training
assert next(model.parameters()).device == torch.device("cpu")
with torch.no_grad():
model(torch.zeros(1, 3, 224, 224))
@pytest.mark.parametrize("model_name", vc_models.vc_model_zoo)
def test_model_loading_with_checkpoint(model_name, nocluster):
"""
Test creating the model architecture as well as loading the checkpoint.
"""
if nocluster:
pytest.skip()
cfg_path = get_config_path(model_name)
model_cfg = omegaconf.OmegaConf.load(cfg_path)
model, embedding_dim, transform, metadata = hydra.utils.call(model_cfg)
assert isinstance(model, torch.nn.Module)
assert isinstance(embedding_dim, int)
assert isinstance(
transform, (torch.nn.Module, torchvision.transforms.transforms.Compose)
)
assert isinstance(metadata, omegaconf.Container)
assert model.training
assert next(model.parameters()).device == torch.device("cpu")
with torch.no_grad():
# Test transform
imarray = np.random.rand(100, 100, 3) * 255
img = PIL.Image.fromarray(imarray.astype("uint8")).convert("RGB")
transformed_img = transform(img).unsqueeze(0)
# Test embedding dim is correct
assert torch.Size([1, embedding_dim]) == model(transformed_img).shape
| eai-vc-main | vc_models/tests/test_model_loading.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_search_path import ConfigSearchPath
from hydra.plugins.search_path_plugin import SearchPathPlugin
class VcModelsPlugin(SearchPathPlugin):
def manipulate_search_path(self, search_path: ConfigSearchPath) -> None:
search_path.prepend(
provider="vc_models-plugin",
path="pkg://vc_models/conf",
)
| eai-vc-main | vc_models/src/hydra_plugins/eaif_models_plugin/eaif_models_plugin.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
| eai-vc-main | vc_models/src/hydra_plugins/eaif_models_plugin/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import os
vc_models_dir_path = os.path.dirname(os.path.abspath(__file__))
vc_models_config_files = os.listdir(vc_models_dir_path + "/conf/model")
vc_model_zoo = [
f.split(".")[0] for f in vc_models_config_files if f.endswith(".yaml")
]
| eai-vc-main | vc_models/src/vc_models/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from vc_models import vc_model_zoo
print(",".join(vc_model_zoo))
| eai-vc-main | vc_models/src/vc_models/__main__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import os
import logging
import omegaconf
log = logging.getLogger(__name__)
def setup_wandb(config):
try:
log.info(f"wandb initializing...")
import wandb
wandb_run = start_wandb(config, wandb)
log.info(f"wandb initialized")
return wandb_run
except Exception as e:
log.warning(f"Cannot initialize wandb: {e}")
return
def start_wandb(config, wandb):
resume = "allow"
wandb_id = wandb.util.generate_id()
if "dir" in config.wandb and config.wandb.dir is not None:
wandb_filename = os.path.join(config.wandb.dir, "wandb", "wandb_id.txt")
if os.path.exists(wandb_filename):
# if file exists, then we are resuming from a previous eval
with open(wandb_filename, "r") as file:
wandb_id = file.read().rstrip("\n")
resume = "must"
else:
os.makedirs(os.path.dirname(wandb_filename), exist_ok=True)
with open(wandb_filename, "w") as file:
file.write(wandb_id)
if isinstance(config, omegaconf.DictConfig):
config = omegaconf.OmegaConf.to_container(
config, resolve=True, throw_on_missing=True
)
wandb_cfg_dict = config["wandb"]
return wandb.init(id=wandb_id, config=config, resume=resume, **wandb_cfg_dict)
| eai-vc-main | vc_models/src/vc_models/utils/wandb.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import omegaconf
def get_model_tag(metadata: omegaconf.DictConfig):
if isinstance(metadata.data, omegaconf.ListConfig):
data = "_".join(sorted(metadata.data))
else:
data = metadata.data
comment = ""
if "comment" in metadata:
comment = f"_{metadata.comment}"
return f"{metadata.algo}_{metadata.model}_{data}{comment}"
| eai-vc-main | vc_models/src/vc_models/utils/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch.nn as nn
def create_compression_layer(
embed_dim, final_spatial, after_compression_flat_size=2048
):
num_compression_channels = int(
round(after_compression_flat_size / (final_spatial**2))
)
compression = nn.Sequential(
nn.Conv2d(
embed_dim,
num_compression_channels,
kernel_size=3,
padding=1,
bias=False,
),
nn.GroupNorm(1, num_compression_channels),
nn.ReLU(True),
nn.Flatten(),
)
output_shape = (
num_compression_channels,
final_spatial,
final_spatial,
)
output_size = np.prod(output_shape)
return compression, output_shape, output_size
| eai-vc-main | vc_models/src/vc_models/models/compression_layer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from PIL import Image
log = logging.getLogger(__name__)
zero_img = Image.new("RGB", (100, 100))
def load_model(
model,
transform,
metadata=None,
checkpoint_dict=None,
):
if checkpoint_dict is not None:
msg = model.load_state_dict(checkpoint_dict)
log.warning(msg)
with torch.no_grad():
transformed_img = transform(zero_img).unsqueeze(0)
embedding_dim = model.eval()(transformed_img).shape[1]
model.train()
return model, embedding_dim, transform, metadata
| eai-vc-main | vc_models/src/vc_models/models/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import urllib
import vc_models
import hydra
import omegaconf
import six
VC1_BASE_NAME = "vc1_vitb"
VC1_LARGE_NAME = "vc1_vitl"
_EAI_VC1_BASE_URL = "https://dl.fbaipublicfiles.com/eai-vc/"
# progress_bar and download_url from
# https://github.com/facebookresearch/Detectron/blob/1809dd41c1ffc881c0d6b1c16ea38d08894f8b6d/detectron/utils/io.py
def _progress_bar(count, total):
"""Report download progress.
Credit:
https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write(
' [{}] {}% of {:.1f}MB file \r'.
format(bar, percents, total / 1024 / 1024)
)
sys.stdout.flush()
if count >= total:
sys.stdout.write('\n')
def _download_url(
url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar
):
"""Download url and write it to dst_file_path.
Credit:
https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
"""
try:
response = urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
print(f"Error downloading model from {_EAI_VC1_BASE_URL}:\n{e}")
raise
if six.PY2:
total_size = response.info().getheader('Content-Length').strip()
else:
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far
def download_model_if_needed(ckpt_file):
model_base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..')
ckpt_file = os.path.join(model_base_dir,ckpt_file)
if not os.path.isfile(ckpt_file):
os.makedirs(os.path.dirname(ckpt_file),exist_ok=True)
model_name = ckpt_file.split("/")[-1]
model_url = _EAI_VC1_BASE_URL + model_name
_download_url(model_url,ckpt_file)
def load_model(model_name):
"""
Loads a model from the vc_models package.
Args:
model_name (str): name of the model to load
Returns:
model (torch.nn.Module): the model
embedding_dim (int): the dimension of the embedding
transform (torchvision.transforms): the transform to apply to the image
metadata (dict): the metadata of the model
"""
models_filepath = os.path.dirname(os.path.abspath(vc_models.__file__))
cfg_path = os.path.join(models_filepath,"conf", "model", f"{model_name}.yaml")
model_cfg = omegaconf.OmegaConf.load(cfg_path)
# returns tuple of model, embedding_dim, transform, metadata
return hydra.utils.call(model_cfg)
| eai-vc-main | vc_models/src/vc_models/models/vit/model_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
| eai-vc-main | vc_models/src/vc_models/models/vit/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# adapted from:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
import os
from functools import partial
import timm.models.vision_transformer
import torch
import torch.nn as nn
from vc_models.models.vit import model_utils
from timm.models.vision_transformer import resize_pos_embed
class VisionTransformer(timm.models.vision_transformer.VisionTransformer):
"""Vision Transformer with support for global average pooling"""
def __init__(
self, global_pool=False, use_cls=True, mask_ratio=None, del_head=True, **kwargs
):
super(VisionTransformer, self).__init__(**kwargs)
if global_pool:
self.classifier_feature = "global_pool"
elif use_cls:
self.classifier_feature = "use_cls_token"
else:
self.classifier_feature = "reshape_embedding"
if del_head:
del self.head # don't use prediction head
if self.classifier_feature == "global_pool":
norm_layer = kwargs["norm_layer"]
embed_dim = kwargs["embed_dim"]
self.fc_norm = norm_layer(embed_dim)
del self.norm # remove the original norm
if self.classifier_feature == "reshape_embedding":
self.final_spatial = int(self.patch_embed.num_patches**0.5)
self.embed_dim = (
self.patch_embed.grid_size[0],
self.patch_embed.grid_size[1],
kwargs["embed_dim"],
)
self.mask_ratio = mask_ratio
def random_masking(self, x, mask_ratio):
"""
Perform per-sample random masking by per-sample shuffling.
Per-sample shuffling is done by argsort random noise.
x: [N, L, D], sequence
"""
N, L, D = x.shape # batch, length, dim
len_keep = int(L * (1 - mask_ratio))
noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = torch.argsort(
noise, dim=1
) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=1)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
# generate the binary mask: 0 is keep, 1 is remove
mask = torch.ones([N, L], device=x.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return x_masked, mask, ids_restore
def handle_outcome(self, x):
if self.classifier_feature == "global_pool":
x = x[:, 1:, :].mean(dim=1) # global pool without cls token
outcome = self.fc_norm(x)
elif self.classifier_feature == "use_cls_token":
x = self.norm(x)
outcome = x[:, 0] # use cls token
elif self.classifier_feature == "reshape_embedding":
x = self.norm(x)
outcome = reshape_embedding(
x[:, 1:]
) # remove cls token and reshape embedding
else:
raise NotImplementedError
return outcome
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
# add pos embed w/o cls token
x = x + self.pos_embed[:, 1:, :]
# masking: length -> length * mask_ratio
if self.mask_ratio is not None:
x, _, _ = self.random_masking(x, mask_ratio=self.mask_ratio)
# append cls token
cls_token = self.cls_token + self.pos_embed[:, :1, :]
x = torch.cat((cls_token.expand(B, -1, -1), x), dim=1)
x = self.blocks(x)
return self.handle_outcome(x)
def forward(self, x):
return self.forward_features(x)
class ClipVisionTransformer(VisionTransformer):
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
x = torch.cat(
[
self.cls_token.squeeze()
+ torch.zeros(B, 1, x.shape[-1], device=x.device),
x,
],
dim=1,
) # shape = [*, grid ** 2 + 1, width]
x = x + self.pos_embed.squeeze().to(x.dtype)
x = self.norm_pre(x)
x = self.blocks(x)
return self.handle_outcome(x)
def reshape_embedding(x):
N, L, D = x.shape
H = W = int(L**0.5)
x = x.reshape(N, H, W, D)
x = torch.einsum("nhwd->ndhw", x)
return x
def vit_small_patch16(**kwargs):
"""ViT small as defined in the DeiT paper."""
model = VisionTransformer(
patch_size=16,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
def vit_base_patch16(**kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
def clip_vit_base_patch16(**kwargs):
model = ClipVisionTransformer(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
# CLIP-specific:
pre_norm=True,
num_classes=512,
**kwargs
)
return model
def vit_large_patch16(**kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
def vit_huge_patch14(**kwargs):
model = VisionTransformer(
patch_size=14,
embed_dim=1280,
depth=32,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
def load_mae_encoder(model, checkpoint_path=None):
if checkpoint_path is None:
return model
else:
model_utils.download_model_if_needed(checkpoint_path)
if not os.path.isabs(checkpoint_path):
model_base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','..')
checkpoint_path = os.path.join(model_base_dir,checkpoint_path)
state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
if state_dict["pos_embed"].shape != model.pos_embed.shape:
state_dict["pos_embed"] = resize_pos_embed(
state_dict["pos_embed"],
model.pos_embed,
getattr(model, "num_tokens", 1),
model.patch_embed.grid_size,
)
# filter out keys with name decoder or mask_token
state_dict = {
k: v
for k, v in state_dict.items()
if "decoder" not in k and "mask_token" not in k
}
if model.classifier_feature == "global_pool":
# remove layer that start with norm
state_dict = {k: v for k, v in state_dict.items() if not k.startswith("norm")}
# add fc_norm in the state dict from the model
state_dict["fc_norm.weight"] = model.fc_norm.weight
state_dict["fc_norm.bias"] = model.fc_norm.bias
model.load_state_dict(state_dict)
return model
def load_contrastive_vit(model, checkpoint_path=None, state_dict_key="state_dict"):
if checkpoint_path is None:
return model
old_state_dict = torch.load(checkpoint_path, map_location="cpu")[state_dict_key]
state_dict = {}
for k in list(old_state_dict.keys()):
# retain only base_encoder up to before the embedding layer
if k.startswith("module.base_encoder") and not k.startswith(
"module.base_encoder.head"
):
# remove prefix
state_dict[k[len("module.base_encoder.") :]] = old_state_dict[k]
# delete renamed or unused k
del old_state_dict[k]
if model.classifier_feature == "global_pool":
# remove layer that start with norm
state_dict = {k: v for k, v in state_dict.items() if not k.startswith("norm")}
# add fc_norm in the state dict from the model
state_dict["fc_norm.weight"] = model.fc_norm.weight
state_dict["fc_norm.bias"] = model.fc_norm.bias
if state_dict["pos_embed"].shape != model.pos_embed.shape:
state_dict["pos_embed"] = resize_pos_embed(
state_dict["pos_embed"],
model.pos_embed,
getattr(model, "num_tokens", 1),
model.patch_embed.grid_size,
)
model.load_state_dict(state_dict)
return model
| eai-vc-main | vc_models/src/vc_models/models/vit/vit.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_search_path import ConfigSearchPath
from hydra.plugins.search_path_plugin import SearchPathPlugin
class VcModelsPlugin(SearchPathPlugin):
def manipulate_search_path(self, search_path: ConfigSearchPath) -> None:
search_path.prepend(
provider="vc_models-plugin",
path="pkg://vc_models/conf",
)
| eai-vc-main | vc_models/src/vc_models/hydra_plugins/eaif_models_plugin/eaif_models_plugin.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
| eai-vc-main | vc_models/src/vc_models/hydra_plugins/eaif_models_plugin/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchvision.transforms as T
class ToTensorIfNot(T.ToTensor):
def __call__(self, pic):
if not torch.is_tensor(pic):
return super().__call__(pic)
return pic
| eai-vc-main | vc_models/src/vc_models/transforms/to_tensor_if_not.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
# Implementation borrowed from here:
# https://github.com/facebookresearch/drqv2/blob/main/drqv2.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class RandomShiftsAug(nn.Module):
def __init__(self, pad):
super().__init__()
self.pad = pad
def forward(self, x):
if len(x.shape) == 3:
single_frame = True
x = x.unsqueeze(0)
else:
single_frame = False
n, _, h, w = x.size()
assert h == w
padding = tuple([self.pad] * 4)
x = F.pad(x, padding, "replicate")
eps = 1.0 / (h + 2 * self.pad)
arange = torch.linspace(
-1.0 + eps, 1.0 - eps, h + 2 * self.pad, device=x.device, dtype=x.dtype
)[:h]
arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2)
base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2)
base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1)
shift = torch.randint(
0, 2 * self.pad + 1, size=(n, 1, 1, 2), device=x.device, dtype=x.dtype
)
shift *= 2.0 / (h + 2 * self.pad)
grid = base_grid + shift
out = F.grid_sample(x, grid, padding_mode="zeros", align_corners=False)
out = out.squeeze(0) if single_frame else out
return out
| eai-vc-main | vc_models/src/vc_models/transforms/random_shifts_aug.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
import torchvision.transforms as T
from vc_models.transforms.to_tensor_if_not import ToTensorIfNot
from vc_models.transforms.random_shifts_aug import RandomShiftsAug
from vc_models.transforms.randomize_env_transform import RandomizeEnvTransform
def vit_transforms(resize_size=256, output_size=224):
return T.Compose(
[
T.Resize(resize_size, interpolation=T.InterpolationMode.BICUBIC),
T.CenterCrop(output_size),
ToTensorIfNot(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
def resnet_transforms(resize_size=256, output_size=224):
return T.Compose(
[
T.Resize(resize_size),
T.CenterCrop(output_size),
ToTensorIfNot(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
def r3m_transforms(resize_size=256, output_size=224):
return T.Compose(
[
ToTensorIfNot(), # this divides by 255
T.Resize(resize_size),
T.CenterCrop(output_size),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
def clip_transforms(resize_size=256, output_size=224):
return T.Compose(
[
T.Resize(resize_size, interpolation=T.InterpolationMode.BICUBIC),
T.CenterCrop(output_size),
ToTensorIfNot(),
T.Normalize(
(0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711),
),
]
)
def transform_augment(
# Resize/crop
resize_size=256,
output_size=224,
# Jitter
jitter=True,
jitter_prob=1.0,
jitter_brightness=0.3,
jitter_contrast=0.3,
jitter_saturation=0.3,
jitter_hue=0.3,
# Shift
shift=True,
shift_pad=4,
# Randomize environments
randomize_environments=False,
normalize=False,
):
transforms = [ToTensorIfNot(), T.Resize(resize_size), T.CenterCrop(output_size)]
if jitter:
transforms.append(
T.RandomApply(
[
T.ColorJitter(
jitter_brightness,
jitter_contrast,
jitter_saturation,
jitter_hue,
)
],
p=jitter_prob,
)
)
if shift:
transforms.append(RandomShiftsAug(shift_pad))
if normalize:
transforms.append(T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
transforms = T.Compose(transforms)
return RandomizeEnvTransform(
transforms, randomize_environments=randomize_environments
) | eai-vc-main | vc_models/src/vc_models/transforms/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from vc_models.transforms.to_tensor_if_not import ToTensorIfNot
class RandomizeEnvTransform:
def __init__(self, transform, randomize_environments=False):
self.apply = transform
self.randomize_environments = randomize_environments
def __call__(
self,
x: torch.Tensor,
N: Optional[int] = None,
):
x = ToTensorIfNot()(x)
single_img = x.ndim == 3
if single_img:
x = x.unsqueeze(0)
if not self.randomize_environments or N is None:
x = self.apply(x)
else:
# shapes
TN = x.size(0)
T = TN // N
# apply the same augmentation when t == 1 for speed
# typically, t == 1 during policy rollout
if T == 1:
x = self.apply(x)
else:
# put environment (n) first
_, A, B, C = x.shape
x = torch.einsum("tnabc->ntabc", x.view(T, N, A, B, C))
# apply the same transform within each environment
x = torch.cat([self.apply(imgs) for imgs in x])
# put timestep (t) first
_, A, B, C = x.shape
x = torch.einsum("ntabc->tnabc", x.view(N, T, A, B, C)).flatten(0, 1)
if single_img:
return x[0]
return x
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.apply})"
| eai-vc-main | vc_models/src/vc_models/transforms/randomize_env_transform.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
from io import open
from setuptools import find_packages, setup
setup(
name="torchscale",
version="0.1.2",
author="TorchScale Team",
author_email="[email protected]",
description="Transformers at any scale",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="Transformers at any scale",
license="MIT",
url="https://github.com/microsoft/torchscale",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=["torch>=1.8", "fairscale==0.4.0", "timm==0.4.12"],
python_requires=">=3.8.0",
classifiers=[
"Programming Language :: Python :: 3",
],
)
| APAC-SCALE-master | setup.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | torchscale/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import numpy as np
import torch
import torch.nn as nn
def fixed_pos_embedding(x):
seq_len, dim = x.shape
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim))
sinusoid_inp = (
torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x)
)
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\
def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m
def apply_rotary_pos_emb(x, sin, cos, scale=1):
sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos))
# einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
return (x * cos) + (rotate_every_two(x) * sin)
class XPOS(nn.Module):
def __init__(
self, head_dim, scale_base=512
):
super().__init__()
self.head_dim = head_dim
self.scale_base = scale_base
self.register_buffer(
"scale", (torch.arange(0, head_dim, 2) + 0.4 * head_dim) / (1.4 * head_dim)
)
def forward(self, x, offset=0, downscale=False):
length = x.shape[1]
min_pos = -(length + offset) // 2
max_pos = length + offset + min_pos
scale = self.scale ** torch.arange(min_pos, max_pos, 1).to(self.scale).div(self.scale_base)[:, None]
sin, cos = fixed_pos_embedding(scale)
if scale.shape[0] > length:
scale = scale[-length:]
sin = sin[-length:]
cos = cos[-length:]
if downscale:
scale = 1 / scale
x = apply_rotary_pos_emb(x, sin, cos, scale)
return x
| APAC-SCALE-master | torchscale/component/xpos_relative_position.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import copy
import torch
import torch.nn as nn
def MultiwayWrapper(args, module, dim=1):
if args.multiway:
return MultiwayNetwork(module, dim=dim)
return module
def set_split_position(position):
def apply_fn(module):
if hasattr(module, "split_position"):
module.split_position = position
return apply_fn
class MultiwayNetwork(nn.Module):
def __init__(self, module, dim=1):
super().__init__()
self.dim = dim
self.A = module
self.B = copy.deepcopy(module)
self.B.reset_parameters()
self.split_position = -1
def forward(self, x, **kwargs):
if self.split_position == -1:
return self.A(x, **kwargs)
if self.split_position == 0:
return self.B(x, **kwargs)
x1, x2 = torch.split(
x,
[self.split_position, x.size(self.dim) - self.split_position],
dim=self.dim,
)
# x1, x2 = x[:self.split_position], x[self.split_position:]
y1, y2 = self.A(x1, **kwargs), self.B(x2, **kwargs)
return torch.cat([y1, y2], dim=self.dim)
class MutliwayEmbedding(MultiwayNetwork):
def __init__(self, modules, dim=1):
super(MultiwayNetwork, self).__init__()
self.dim = dim
assert len(modules) == 2
self.A = modules[0]
self.B = modules[1]
self.split_position = -1 | APAC-SCALE-master | torchscale/component/multiway_network.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import torch
import torch.nn.functional as F
from torch import nn
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from .multiway_network import MultiwayWrapper
from .xpos_relative_position import XPOS
class MultiheadAttention(nn.Module):
def __init__(
self,
args,
embed_dim,
num_heads,
dropout=0.0,
self_attention=False,
encoder_decoder_attention=False,
subln=False,
):
super().__init__()
self.args = args
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert self.self_attention ^ self.encoder_decoder_attention
self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.out_proj = MultiwayWrapper(
args, nn.Linear(embed_dim, embed_dim, bias=True)
)
self.inner_attn_ln = (
MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if subln and self.self_attention
else None
)
self.dropout_module = torch.nn.Dropout(dropout)
self.xpos = (
XPOS(self.head_dim, args.xpos_scale_base)
if args.xpos_rel_pos and self.self_attention
else None
)
def reset_parameters(self):
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(
self,
query,
key,
value,
incremental_state=None,
key_padding_mask=None,
attn_mask=None,
rel_pos=None,
):
bsz, tgt_len, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
key_bsz, src_len, _ = key.size()
assert key_bsz == bsz, f"{query.size(), key.size()}"
assert value is not None
assert bsz, src_len == value.shape[:2]
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim)
k = k.reshape(bsz * self.num_heads, src_len, self.head_dim)
v = v.reshape(bsz * self.num_heads, src_len, self.head_dim)
if incremental_state is not None:
if "prev_key" in incremental_state:
prev_key = incremental_state["prev_key"].view(
bsz * self.num_heads, -1, self.head_dim
)
prev_value = incremental_state["prev_value"].view(
bsz * self.num_heads, -1, self.head_dim
)
k = torch.cat([prev_key, k], dim=1)
v = torch.cat([prev_value, v], dim=1)
incremental_state["prev_key"] = k.view(
bsz, self.num_heads, -1, self.head_dim
)
incremental_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim
)
src_len = k.size(1)
if self.xpos is not None:
if incremental_state is not None:
offset = src_len - 1
else:
offset = 0
k = self.xpos(k, offset=0, downscale=True)
q = self.xpos(q, offset=offset, downscale=False)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if rel_pos is not None:
rel_pos = rel_pos.view(attn_weights.size())
attn_weights = attn_weights + rel_pos
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(
attn_weights
)
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1)
if self.inner_attn_ln is not None:
attn = self.inner_attn_ln(attn)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
return attn, attn_weights
| APAC-SCALE-master | torchscale/component/multihead_attention.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import torch
import torch.nn as nn
class RelativePositionBias(nn.Module):
def __init__(
self, bidirectional=True, num_buckets=32, max_distance=128, n_heads=12
):
super().__init__()
self.bidirectional = bidirectional
self.num_buckets = num_buckets
self.max_distance = max_distance
self.n_heads = n_heads
self.relative_attention_bias = nn.Embedding(self.num_buckets, self.n_heads)
@staticmethod
def _relative_position_bucket(
relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).to(torch.long) * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1)
)
ret += torch.where(is_small, n, val_if_large)
return ret
def compute_bias(self, qlen, klen, step=None):
step = 0 if step is None else step
context_position = torch.arange(
step,
step + qlen,
dtype=torch.long,
device=self.relative_attention_bias.weight.device,
)[:, None]
memory_position = torch.arange(
klen, dtype=torch.long, device=self.relative_attention_bias.weight.device
)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position, # shape (qlen, klen)
bidirectional=self.bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(
rp_bucket
) # shape (qlen, klen, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(
0
) # shape (1, num_heads, qlen, klen)
return values
def forward(self, batch_size, qlen, klen, step=None):
# shape (batch * num_heads, qlen, klen)
return (
self.compute_bias(qlen, klen, step)
.repeat(batch_size, 1, 1, 1)
.view(-1, qlen, klen)
)
| APAC-SCALE-master | torchscale/component/relative_position_bias.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
import torch.nn.functional as F
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class PositionalEmbedding(nn.Embedding):
def forward(
self,
x,
positions=None,
**kwargs,
):
if positions is None:
# being consistent with Fairseq, which starts from 2.
positions = (
torch.arange(2, x.size(1) + 2, device=x.device).long().unsqueeze(0)
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
| APAC-SCALE-master | torchscale/component/embedding.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
from timm.models.layers import drop_path
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self):
return "p={}".format(self.drop_prob)
| APAC-SCALE-master | torchscale/component/droppath.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | torchscale/component/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
import torch.nn.functional as F
from flash_attn.flash_attention import FlashMHA
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = self.get_rng_state()
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def get_rng_state(self):
state = {"torch_rng_state": torch.get_rng_state()}
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(self, state):
torch.set_rng_state(state["torch_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
def __enter__(self):
return self
def __exit__(self, *exc):
self.set_rng_state(self.rng_state)
def make_experts(args, embed_dim, expert_ffn_dim):
world_size = (
1
if not torch.distributed.is_initialized()
else torch.distributed.get_world_size()
)
expert_list = []
ddp_rank = args.ddp_rank
start_seed = torch.randint(1000000, (1,)).item()
# at least as many experts than gpus
if args.moe_expert_count >= world_size:
assert (
args.moe_expert_count % world_size == 0
), f"{args.moe_expert_count}, {world_size}"
local_moe_expert_count = args.moe_expert_count // world_size
for i in range(local_moe_expert_count):
with set_torch_seed(start_seed + ddp_rank * local_moe_expert_count + i):
expert_list.append(
FeedForwardNetwork(
embed_dim,
expert_ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
)
else:
assert (
world_size % args.moe_expert_count == 0
), f"{world_size}, {args.moe_expert_count}"
with set_torch_seed(start_seed + ddp_rank % args.moe_expert_count):
expert_list.append(
FeedForwardNetwork(
embed_dim,
expert_ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
)
experts = nn.ModuleList(expert_list)
return experts
def get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
elif activation == "flashattention":
return FlashMHA
else:
raise NotImplementedError
class FeedForwardNetwork(nn.Module):
def __init__(
self,
embed_dim,
ffn_dim,
activation_fn,
dropout,
activation_dropout,
layernorm_eps,
subln=False,
):
super().__init__()
self.embed_dim = embed_dim
self.activation_fn = get_activation_fn(activation=str(activation_fn))
self.activation_dropout_module = torch.nn.Dropout(activation_dropout)
self.dropout_module = torch.nn.Dropout(dropout)
self.fc1 = nn.Linear(self.embed_dim, ffn_dim)
self.fc2 = nn.Linear(ffn_dim, self.embed_dim)
self.ffn_layernorm = LayerNorm(ffn_dim, eps=layernorm_eps) if subln else None
def reset_parameters(self):
self.fc1.reset_parameters()
self.fc2.reset_parameters()
if self.ffn_layernorm is not None:
self.ffn_layernorm.reset_parameters()
def forward(self, x):
x_shape = x.shape
x = x.reshape(-1, x.size(-1))
x = self.fc1(x)
x = self.activation_fn(x.float()).type_as(x)
x = self.activation_dropout_module(x)
if self.ffn_layernorm is not None:
x = self.ffn_layernorm(x)
x = self.fc2(x)
x = x.view(x_shape)
x = self.dropout_module(x)
return x
| APAC-SCALE-master | torchscale/component/feedforward_network.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | torchscale/component/xmoe/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# NOTE: This is a mirror of the code in
# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe
import logging
import time
from typing import Any, Tuple, cast
import torch
import torch.distributed as dist
from torch import Tensor
from torch.nn import Module, ModuleList
try:
from fairseq.modules.moe import MOELayer
has_fairseq = True
Base = MOELayer
except ModuleNotFoundError:
Base = Module
has_fairseq = False
try:
# To enable Tutel MoE optimizations:
# python3 -m pip install --user --upgrade git+https://github.com/microsoft/[email protected]
from tutel import moe as tutel_moe
has_tutel, fused_cumsum_sub_one = True, tutel_moe.fast_cumsum_sub_one
except ModuleNotFoundError:
has_tutel, fused_cumsum_sub_one = False, lambda mask: torch.cumsum(mask, dim=0) - 1
logger = logging.getLogger(__name__)
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
# Based on https://github.com/pytorch/pytorch/pull/40762
class _AllToAll(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor) -> Tensor: # type: ignore
ctx.group = group
input = input.contiguous()
output = torch.empty_like(input)
if torch.distributed.is_initialized():
dist.all_to_all_single(output, input, group=group)
else:
assert group is None
output = input
return output
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:
return (None, _AllToAll.apply(ctx.group, *grad_output))
def _find_my_group_index(grouped_ranks):
my_rank = dist.get_rank()
for i, group in enumerate(grouped_ranks):
if my_rank in group:
return i
raise RuntimeError
def get_moe_group(moe_expert_count):
if dist.is_initialized():
if not hasattr(get_moe_group, "_moe_groups"):
world_size = dist.get_world_size()
if world_size <= moe_expert_count:
assert moe_expert_count % world_size == 0
moe_groups = [[i] for i in range(world_size)]
else:
assert world_size % moe_expert_count == 0
ranks_per_group = world_size // moe_expert_count
moe_groups = [
[i + j * moe_expert_count for j in range(ranks_per_group)]
for i in range(moe_expert_count)
]
get_moe_group._moe_group_idx = moe_groups
get_moe_group._moe_groups = [dist.new_group(g) for g in moe_groups]
my_group_idx = _find_my_group_index(get_moe_group._moe_group_idx)
return get_moe_group._moe_groups[my_group_idx]
def get_all2all_group(moe_expert_count):
if dist.is_initialized():
if not hasattr(get_all2all_group, "_all2all_groups"):
world_size = dist.get_world_size()
# more experts than world size
if world_size <= moe_expert_count:
assert moe_expert_count % world_size == 0
all2all_groups = [[i for i in range(world_size)]]
# larger world than num experts
else:
assert world_size % moe_expert_count == 0
ranks_per_group = world_size // moe_expert_count
all2all_groups = [
[i * moe_expert_count + j for j in range(moe_expert_count)]
for i in range(ranks_per_group)
]
get_all2all_group._all2all_group_idx = all2all_groups
get_all2all_group._all2all_groups = [
dist.new_group(g) for g in all2all_groups
]
my_group_idx = _find_my_group_index(get_all2all_group._all2all_group_idx)
return get_all2all_group._all2all_groups[my_group_idx]
class MOELayer(Base):
"""MOELayer module which implements MixtureOfExperts as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
moe = MOELayer(gate, expert)
output = moe(input)
l_aux = moe.l_aux
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
gate (torch.nn.Module):
gate network
expert (torch.nn.Module):
expert network
"""
def __init__(self, gate, experts, args):
if has_fairseq:
super(Base, self).__init__()
else:
super().__init__()
self.gate = gate
if type(experts) == ModuleList:
self.experts = cast(ModuleList, experts)
else:
self.experts = ModuleList([experts])
self.expert_group = get_moe_group(args.moe_expert_count)
self.all2all_group = get_all2all_group(args.moe_expert_count)
self.world_size = dist.get_world_size(group=self.expert_group)
self.all2all_size = dist.get_world_size(group=self.all2all_group)
for p in experts.parameters():
p.expert = True # type: ignore
self.num_local_experts = len(self.experts)
self.args = args
self.in_generation = False
self.a2a_cuda_event_intervals = []
self.a2a_cpu_time_ms = 0.0
def forward(self, *input: Tensor, input_padding_mask=None, **kwargs: Any) -> Tensor:
assert len(input) == 1, "only single input Tensor supported"
input = input[0]
assert (
len(input.shape) == 3
), "input Tensor must have dimensions: (s)equence, (t)oken, (m)odel"
if input_padding_mask is not None:
assert (
len(input_padding_mask.shape) == 2
), "input Tensor must have dimensions: (s)equence, (t)oken"
assert input_padding_mask.shape[0] == input.shape[0]
assert input_padding_mask.shape[1] == input.shape[1]
# assert input.shape[0] % len(self.experts) == 0, "num tokens must be order of number of local experts"
# Implement Algorithm 2 from GShard paper.
d_model = input.shape[2]
# Pad to expected batch size
input_shape = list(input.shape)
expected_bsz = (
getattr(self.args, "batch_size", 0)
if self.training
else getattr(self.args, "batch_size_valid", 0)
)
# This indicates that --batch-size or --max-sentences is not specified
if expected_bsz is None:
expected_bsz = 0
# Note: Padding is not necessary at generation time at present
# because all DDP workers process the same batch. Also, batch size at generation time
# can be different from that present in the checkpoint state
if (
not self.in_generation
and expected_bsz != 0
and input_shape[0] != expected_bsz
):
logger.warning(
f"padding batch with unexpected size {input_shape[0]} (expected: {expected_bsz})"
)
assert input_shape[0] < expected_bsz, f"{input_shape[0]} < {expected_bsz}"
padded_input = torch.zeros(
(expected_bsz, input_shape[1], input_shape[2]),
dtype=input.dtype,
layout=input.layout,
device=input.device,
)
padded_input[: input_shape[0], :, :] = input
input = padded_input
padded_input_padding_mask = torch.ones(
(
expected_bsz,
input_shape[1],
),
dtype=torch.bool,
device=input.device,
)
if input_padding_mask is not None:
padded_input_padding_mask[: input_shape[0], :] = input_padding_mask
else:
padded_input_padding_mask[: input_shape[0], :] = False
input_padding_mask = padded_input_padding_mask
# Reshape into S tokens by dropping sequence dimension.
reshaped_input = input.reshape(-1, d_model)
reshaped_input_shape = reshaped_input.shape
reshaped_input_padding_mask = (
input_padding_mask.reshape(-1) if input_padding_mask is not None else None
)
# Doing padding here when --max-tokens is specified and not --batch-size or --max-sentences
# Pro of --max-tokens: more flexible for MT variable sequence lengths
# Con of --max-tokens: extra all-reduce needed to figure out optimal padding without running OOM
if expected_bsz == 0:
expected_dim = reshaped_input_shape[0] * torch.ones(
(1,), dtype=torch.long, device=input.device
)
dist.all_reduce(expected_dim, group=dist.group.WORLD, op=dist.ReduceOp.MAX)
expected_dim = int(expected_dim.item())
padded_input = torch.zeros(
(expected_dim, reshaped_input_shape[1]),
dtype=input.dtype,
layout=input.layout,
device=input.device,
)
padded_input[: reshaped_input_shape[0], :] = reshaped_input
reshaped_input = padded_input
padded_input_padding_mask = torch.ones(
(expected_dim,), dtype=torch.bool, device=padded_input.device
)
if reshaped_input_padding_mask is not None:
padded_input_padding_mask[
: reshaped_input_shape[0]
] = reshaped_input_padding_mask
else:
padded_input_padding_mask[: reshaped_input_shape[0]] = False
reshaped_input_padding_mask = padded_input_padding_mask
if has_tutel:
l_aux, self.metadata, C, E, indices_, locations_, gates_ = self.gate(
reshaped_input, reshaped_input_padding_mask
)
S, M = reshaped_input.size(0), reshaped_input.size(1)
if not hasattr(self, "_tutel_dispatcher"):
self._tutel_dispatcher = tutel_moe.fast_dispatcher(
E, C, M, dispatch_dtype=reshaped_input.dtype
)
self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C)
dispatched_input = self._tutel_dispatcher.encode(reshaped_input)
else:
l_aux, combine_weights, dispatch_mask, self.metadata = self.gate(
reshaped_input, reshaped_input_padding_mask
)
dispatch_mask = dispatch_mask.to(input.dtype).permute(
1, 2, 0
) # S,E,C -> E,C,S
E, C, S = dispatch_mask.size()
M = reshaped_input.size(1)
assert reshaped_input.size() == (S, M)
# einsum("sec,sm->ecm")
dispatched_input = torch.mm(
dispatch_mask.view(E * C, S), reshaped_input
) # -> (E*C),M
if self.all2all_size > 1:
dispatched_input = self.all_to_all_wrapper(dispatched_input)
# Re-shape after all-to-all: ecm -> gecm
dispatched_input = dispatched_input.reshape(
self.all2all_size, self.num_local_experts, -1, d_model
)
chunks = dispatched_input.chunk(self.num_local_experts, dim=1)
expert_outputs = []
for chunk, expert in zip(chunks, self.experts):
expert_outputs += [expert(chunk)]
expert_output = torch.cat(expert_outputs, dim=1)
if self.all2all_size > 1:
expert_output = self.all_to_all_wrapper(expert_output)
# Re-shape back: gecm -> ecm
expert_output = expert_output.reshape(
self.all2all_size * self.num_local_experts, -1, d_model
)
if has_tutel:
combined_output = self._tutel_dispatcher.decode(
expert_output.view(E * C, M)
)
else:
# einsum("sec,ecm->sm")
combined_output = combine_weights.view(S, E * C).mm(
expert_output.view(E * C, M)
)
# Remove padding here when --max-tokens is specified and not --batch-size or --max-sentences
combined_output = combined_output[: reshaped_input_shape[0], :]
combined_output = combined_output.reshape(input.shape)
combined_output = combined_output[: input_shape[0], :, :]
self.record_all_to_all_stats()
return combined_output, l_aux
def prepare_for_inference_(self):
self.in_generation = True
def all_to_all_wrapper(self, input: Tensor):
dummy_a2a = getattr(self.args, "dummy_a2a", False)
if dummy_a2a:
input = input.contiguous()
output = input.detach().clone()
return input
# always record times, since it is not a lot of overhead
# if we do not log it we simply clear it off in record_all_to_all_stats
cuda_start = torch.cuda.Event(enable_timing=True)
cuda_end = torch.cuda.Event(enable_timing=True)
cpu_start = time.time() * 1000
cuda_start.record()
output = _AllToAll.apply(self.all2all_group, input)
cuda_end.record()
cpu_end = time.time() * 1000
self.a2a_cpu_time_ms += cpu_end - cpu_start
self.a2a_cuda_event_intervals.append((cuda_start, cuda_end))
return output
def record_all_to_all_stats(self):
# controlled via an argument as we want to minimize any impact from torch.cuda.synchronize()
record_a2a_perf_stats = getattr(self.args, "record_a2a_perf_stats", False)
if record_a2a_perf_stats:
torch.cuda.synchronize()
self.metadata["all_to_all_cpu_time_ms"] = self.a2a_cpu_time_ms
a2a_cuda_time_ms = 0.0
for ev_start, ev_end in self.a2a_cuda_event_intervals:
a2a_cuda_time_ms += ev_start.elapsed_time(ev_end)
self.metadata["all_to_all_cuda_time_ms"] = a2a_cuda_time_ms
# reset stats
self.a2a_cpu_time_ms = 0.0
self.a2a_cuda_event_intervals = []
| APAC-SCALE-master | torchscale/component/xmoe/moe_layer.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf
# Code is inspired by Top2GatingOnLogits from lingvo:
# https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477
# NOTE: This is a mirror of the code in
# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe
import math
from typing import Callable, Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from .moe_layer import fused_cumsum_sub_one, has_tutel
# use a fixed temperature to compute balance loss
TEMPERATURE_FOR_L_UAX = 0.07
# maximum capacity of 1 expert as a fraction of number of tokens in the batch
# Note: setting this to 1.0 causes inference to significantly slow down
EVAL_CAPACITY_TOKEN_FRACTION = 0.25
# logging
SAMPLE_FRACTION = 0.2
def top1gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
capacity_factor=1.0,
eval_mode=False,
moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,
use_xmoe=False,
gate_obj=None,
) -> Tuple[Tensor, Tensor, Tensor, Dict]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = capacity_factor * S/E
capacity = int(capacity_factor * math.ceil(num_tokens / num_experts))
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
mask1 = one_hot(indices1_s, num_classes=num_experts, unsqueeze_indices=True)
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
gates1_s = (gates * mask1).sum(dim=1)
# Compute locations in capacity buffer
locations1 = fused_cumsum_sub_one(mask1)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
if has_tutel:
locations1_s = torch.sum(locations1 * mask1, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[
indices1_s,
],
[
locations1_s,
],
[
gates1_s,
],
)
# Remove locations outside capacity from mask
mask1 = mask1 * torch.lt(locations1, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
# locations1_sc = num_tokens * capacity
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
dispatch_mask = combine1_sec.bool()
if use_fp32:
return l_aux, combine1_sec.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine1_sec, dispatch_mask, metadata
class Top1Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(
self,
model_dim: int,
num_experts: int,
use_fp32=False,
input_noise_type=None,
capacity_factor=1.0,
moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,
use_xmoe=False,
) -> None:
# TODO: merge this to top2gate.py
#
super().__init__()
if not use_xmoe:
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
else:
self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False)
wg = torch.empty(num_experts, 16)
torch.nn.init.orthogonal_(wg, gain=0.32)
self.register_parameter("wg", torch.nn.Parameter(wg))
self.use_xmoe = use_xmoe
self.use_fp32 = use_fp32
self.input_noise_type = input_noise_type
self.capacity_factor = capacity_factor
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
def forward(self, input, mask=None): # type: ignore
if self.use_xmoe:
input = self.wg_reduction(input)
with torch.no_grad():
wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True)
self.wg.mul_(1.5 / wg_norm)
logits = self._cosine(input, self.wg)
logits = self._make_finite(logits)
else:
logits = self.wg(input)
return top1gating(
logits,
mask,
use_fp32=self.use_fp32,
capacity_factor=self.capacity_factor,
eval_mode=not self.training,
moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction,
use_xmoe=self.use_xmoe,
gate_obj=self,
)
def _make_finite(self, scores):
ok = scores.isfinite()
if not ok.all():
# NaNs here can break the assignment algorithm
scores[~ok] = scores[ok].min()
return scores
def _get_gating_temperature(self, eps=1e-4):
if self.gating_t.data.item() < eps:
return eps
return self.gating_t
def _cosine(self, mat1, mat2, eps=1e-4):
assert mat1.dim() == 2
assert mat2.dim() == 2
# mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps)
mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps)
return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1)
gumbel_map: Dict[torch.device, Callable] = {}
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
def one_hot(indices: torch.Tensor, num_classes: int, unsqueeze_indices=False) -> Tensor:
if unsqueeze_indices:
indices = indices.unsqueeze(-1)
assert indices.shape[-1] == 1, "last dimension of indices must be have size 1"
output = torch.zeros(
indices.shape[:-1] + (num_classes,), device=indices.device, dtype=indices.dtype
)
output.scatter_(len(output.shape) - 1, indices, 1)
return output
def entropy(probs):
logits = torch.distributions.utils.probs_to_logits(probs)
p_log_p = probs * logits
return -p_log_p.sum(-1)
def top2gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
second_expert_policy="sampling",
normalize_gate_prob_before_dropping=False,
eval_mode=False,
moe_eval_capacity_token_fraction=0.25,
batch_prioritized_routing=False,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = 2S/E
capacity = 2 * math.ceil(num_tokens / num_experts)
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1, keepdim=True)
mask1 = one_hot(indices1_s, num_experts)
if second_expert_policy == "sampling":
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
else:
logits_w_noise = logits
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1, keepdim=True)
mask2 = one_hot(indices2_s, num_experts)
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
if normalize_gate_prob_before_dropping:
# Normalize gate probabilities
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s = gates1_s / denom_s
gates2_s = gates2_s / denom_s
if second_expert_policy == "random":
sampled = (2 * gates2_s) > torch.rand_like(gates2_s)
mask2 = mask2 * sampled.repeat(num_experts, 1).transpose(1, 0)
# Compute locations in capacity buffer
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
mask2 = mask2 * nonpadding.unsqueeze(-1).to(mask1.dtype)
if batch_prioritized_routing:
# if batch_prioritized_routing:
importance_scores = -1 * gates.max(dim=1)[0]
sorted_mask1 = mask1[importance_scores.argsort(dim=0)]
sorted_cumsum1 = fused_cumsum_sub_one(sorted_mask1) * sorted_mask1
importance_sorted_locations1 = sorted_cumsum1[
importance_scores.argsort(dim=0).argsort(dim=0)
]
sorted_mask2 = mask2[importance_scores.argsort(dim=0)]
sorted_cumsum2 = fused_cumsum_sub_one(sorted_mask2) * sorted_mask2
importance_sorted_locations2 = sorted_cumsum2[
importance_scores.argsort(dim=0).argsort(dim=0)
]
importance_sorted_locations2 += torch.sum(mask1, dim=0, keepdim=True)
locations1, locations2 = (
importance_sorted_locations1,
importance_sorted_locations2,
)
else:
locations1 = fused_cumsum_sub_one(mask1)
locations2 = fused_cumsum_sub_one(mask2)
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
# for logging purposes
metadata["overflow_expert1"] = (
100 * torch.sum(mask1 * torch.ge(locations1, capacity)) / torch.sum(mask1)
)
metadata["overflow_expert2"] = (
100 * torch.sum(mask2 * torch.ge(locations2, capacity)) / torch.sum(mask2)
)
# Remove locations outside capacity from mask
mask1_, mask2_ = mask1, mask2
mask1 = mask1 * torch.lt(locations1, capacity)
mask2 = mask2 * torch.lt(locations2, capacity)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
expert2_hist = (
100
* torch.histc(
(indices2_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert2_count"] = (expert2_hist == 0).sum()
expert2_hist = (
torch.sort(expert2_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
metadata["expert2_balance_top"] = expert2_hist[:sample_count].sum()
metadata["expert2_balance_bottom"] = expert2_hist[-sample_count:].sum()
if not normalize_gate_prob_before_dropping:
# Normalize gate probabilities
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
if has_tutel:
locations1_s = torch.sum(locations1 * mask1_, dim=1)
locations2_s = torch.sum(locations2 * mask2_, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[indices1_s, indices2_s],
[locations1_s, locations2_s],
[gates1_s, gates2_s],
)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
gates2 = gates2_s.unsqueeze(-1) * mask2.to(gates2_s.dtype) # einsum("s,se->se")
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
locations2_sc = one_hot(locations2_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
combine2_sec = torch.bmm(
# einsum("se,sc->sec")
gates2.unsqueeze(-1),
locations2_sc.to(gates2.dtype).unsqueeze(1),
)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
if use_fp32:
return l_aux, combine_weights.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine_weights, dispatch_mask, metadata
class Top2Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(
self,
model_dim: int,
num_experts: int,
use_fp32=False,
second_expert_policy="sampling",
normalize_gate_prob_before_dropping=False,
moe_eval_capacity_token_fraction=0.25,
batch_prioritized_routing=False,
use_xmoe=False,
) -> None:
super().__init__()
if not use_xmoe:
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
else:
self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False)
wg = torch.empty(num_experts, 16)
torch.nn.init.orthogonal_(wg, gain=0.32)
self.register_parameter("wg", torch.nn.Parameter(wg))
self.use_fp32 = use_fp32
self.second_expert_policy = second_expert_policy
self.normalize_gate_prob_before_dropping = normalize_gate_prob_before_dropping
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
self.batch_prioritized_routing = batch_prioritized_routing
self.use_xmoe = use_xmoe
def forward(self, input, mask=None): # type: ignore
if self.use_xmoe:
input = self.wg_reduction(input)
with torch.no_grad():
wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True)
self.wg.mul_(1.5 / wg_norm)
logits = self._cosine(input, self.wg)
logits = self._make_finite(logits)
else:
logits = self.wg(input)
return top2gating(
logits,
mask,
use_fp32=self.use_fp32,
second_expert_policy=self.second_expert_policy,
normalize_gate_prob_before_dropping=self.normalize_gate_prob_before_dropping,
eval_mode=not self.training,
moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction,
batch_prioritized_routing=self.batch_prioritized_routing,
)
def _cosine(self, mat1, mat2, eps=1e-4):
assert mat1.dim() == 2
assert mat2.dim() == 2
# mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps)
mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps)
return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1)
def _make_finite(self, scores):
ok = scores.isfinite()
if not ok.all():
# NaNs here can break the assignment algorithm
scores[~ok] = scores[ok].min()
return scores
| APAC-SCALE-master | torchscale/component/xmoe/routing.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import numpy as np
import torch
import torch.nn as nn
from fairscale.nn import checkpoint_wrapper, wrap
from torchscale.architecture.utils import init_bert_params
from torchscale.component.droppath import DropPath
from torchscale.component.feedforward_network import FeedForwardNetwork, make_experts
from torchscale.component.multihead_attention import MultiheadAttention
from torchscale.component.relative_position_bias import RelativePositionBias
from torchscale.component.xmoe.moe_layer import MOELayer
from torchscale.component.xmoe.routing import Top1Gate, Top2Gate
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
class DecoderLayer(nn.Module):
def __init__(
self,
args,
depth,
is_moe_layer=False,
is_encoder_decoder=False,
):
super().__init__()
self.args = args
self.embed_dim = args.decoder_embed_dim
self.dropout_module = torch.nn.Dropout(args.dropout)
if args.drop_path_rate > 0:
drop_path_prob = np.linspace(0, args.drop_path_rate, args.decoder_layers)[
depth
]
self.drop_path = DropPath(drop_path_prob)
else:
self.drop_path = None
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.normalize_before = args.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
if not is_encoder_decoder:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
self.is_moe_layer = is_moe_layer
self.ffn_dim = args.decoder_ffn_embed_dim
if not self.is_moe_layer:
self.ffn = self.build_ffn(
self.embed_dim,
self.args,
)
else:
if args.moe_top1_expert:
gate = Top1Gate(
self.embed_dim,
args.moe_expert_count,
use_fp32=args.moe_gating_use_fp32,
moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
else:
gate = Top2Gate(
self.embed_dim,
args.moe_expert_count,
args.moe_gating_use_fp32,
args.moe_second_expert_policy,
args.moe_normalize_gate_prob_before_dropping,
args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
experts = make_experts(args, self.embed_dim, self.ffn_dim)
self.moe_layer = MOELayer(gate, experts, args)
self.final_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
if args.deepnorm:
if is_encoder_decoder:
self.alpha = math.pow(3.0 * args.decoder_layers, 0.25)
else:
self.alpha = math.pow(2.0 * args.decoder_layers, 0.25)
else:
self.alpha = 1.0
def build_ffn(self, embed_dim, args):
return FeedForwardNetwork(
embed_dim,
self.ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
encoder_decoder_attention=False,
subln=args.subln,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=False,
encoder_decoder_attention=True,
subln=args.subln,
)
def residual_connection(self, x, residual):
return residual * self.alpha + x
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
incremental_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
self_attn_rel_pos=None,
cross_attn_rel_pos=None,
):
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask,
rel_pos=self_attn_rel_pos,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=None,
rel_pos=cross_attn_rel_pos,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if not self.is_moe_layer:
x = self.ffn(x)
l_aux = None
else:
x, l_aux = self.moe_layer(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, None, l_aux
class Decoder(nn.Module):
def __init__(
self,
args,
embed_tokens=None,
embed_positions=None,
output_projection=None,
is_encoder_decoder=False,
**kwargs
):
super().__init__(**kwargs)
self.args = args
self.dropout_module = torch.nn.Dropout(args.dropout)
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
if (
output_projection is None
and not args.no_output_layer
and args.vocab_size > 0
):
self.output_projection = self.build_output_projection(args)
else:
self.output_projection = output_projection
if args.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, eps=args.layernorm_eps)
else:
self.layernorm_embedding = None
self.layers = nn.ModuleList([])
moe_freq = args.moe_freq
for i in range(args.decoder_layers):
is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0
self.layers.append(
self.build_decoder_layer(
args,
depth=i,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim, eps=args.layernorm_eps)
else:
self.layer_norm = None
self.self_attn_relative_position = None
self.cross_attn_relative_position = None
if args.rel_pos_buckets > 0 and args.max_rel_pos > 0:
self.self_attn_relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.decoder_attention_heads,
)
if is_encoder_decoder:
self.cross_attn_relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.decoder_attention_heads,
)
if args.bert_init:
self.apply(init_bert_params)
if args.deepnorm:
if is_encoder_decoder:
init_scale = math.pow(12.0 * args.decoder_layers, 0.25)
else:
init_scale = math.pow(8.0 * args.decoder_layers, 0.25)
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.div_(init_scale)
if args.subln:
if is_encoder_decoder:
init_scale = math.sqrt(math.log(args.decoder_layers * 3))
else:
init_scale = math.sqrt(math.log(args.decoder_layers * 2))
for name, p in self.named_parameters():
if "encoder_attn" in name:
continue
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.mul_(init_scale)
def build_output_projection(
self,
args,
):
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = self.embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, args.vocab_size, bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
return output_projection
def build_decoder_layer(
self, args, depth, is_moe_layer=False, is_encoder_decoder=False
):
layer = DecoderLayer(
args,
depth,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
if args.fsdp:
layer = wrap(layer)
return layer
def forward_embedding(
self,
tokens,
token_embedding=None,
incremental_state=None,
):
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
tokens, incremental_state=incremental_state
)
if incremental_state is not None:
tokens = tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
if token_embedding is None:
token_embedding = self.embed_tokens(tokens)
x = embed = self.embed_scale * token_embedding
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
def forward(
self,
prev_output_tokens,
self_attn_padding_mask=None,
encoder_out=None,
incremental_state=None,
features_only=False,
return_all_hiddens=False,
token_embeddings=None,
**kwargs
):
# embed tokens and positions
x, _ = self.forward_embedding(
prev_output_tokens, token_embeddings, incremental_state
)
# relative position
self_attn_rel_pos_bias = None
slen = prev_output_tokens.size(1)
if self.self_attn_relative_position is not None:
self_attn_rel_pos_bias = self.self_attn_relative_position(
batch_size=x.size(0), qlen=slen, klen=slen
)
if incremental_state is not None:
self_attn_rel_pos_bias = self_attn_rel_pos_bias[-1:, :, :]
cross_attn_rel_pos_bias = None
if self.cross_attn_relative_position is not None:
cross_attn_rel_pos_bias = self.cross_attn_relative_position(
batch_size=x.size(0),
qlen=slen,
klen=encoder_out["encoder_out"].size(1),
)
if incremental_state is not None:
cross_attn_rel_pos_bias = cross_attn_rel_pos_bias[-1:, :, :]
# decoder layers
inner_states = [x]
if encoder_out is None:
l_aux = []
else:
l_aux = encoder_out["l_aux"] if "l_aux" in encoder_out else []
for idx, layer in enumerate(self.layers):
if incremental_state is None:
self_attn_mask = torch.triu(
torch.zeros([x.size(1), x.size(1)])
.float()
.fill_(float("-inf"))
.type_as(x),
1,
)
else:
self_attn_mask = None
if idx not in incremental_state:
incremental_state[idx] = {}
x, layer_attn, _, l_aux_i = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["encoder_padding_mask"]
if encoder_out is not None
else None,
incremental_state[idx] if incremental_state is not None else None,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
self_attn_rel_pos=self_attn_rel_pos_bias,
cross_attn_rel_pos=cross_attn_rel_pos_bias,
)
l_aux.append(l_aux_i)
inner_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
if not features_only:
x = self.output_layer(x)
return x, {
"inner_states": inner_states,
"l_aux": l_aux,
"attn": None,
}
def output_layer(self, features):
return self.output_projection(features)
| APAC-SCALE-master | torchscale/architecture/decoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
class EncoderConfig(object):
def __init__(self, **kwargs):
self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768)
self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12)
self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072)
self.encoder_layers = kwargs.pop("encoder_layers", 12)
self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True)
self.normalize_output = kwargs.pop("normalize_output", True)
self.activation_fn = kwargs.pop("activation_fn", "gelu")
# self.activation_fn = kwargs.pop("activation_fn", "flashattention")
self.dropout = kwargs.pop("dropout", 0.0)
self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0)
self.attention_dropout = kwargs.pop("attention_dropout", 0.0)
self.activation_dropout = kwargs.pop("activation_dropout", 0.0)
self.no_scale_embedding = kwargs.pop("no_scale_embedding", True)
self.layernorm_embedding = kwargs.pop("layernorm_embedding", False)
self.moe_freq = kwargs.pop("moe_freq", 0)
self.moe_top1_expert = kwargs.pop("moe_top1_expert", False)
self.moe_expert_count = kwargs.pop("moe_expert_count", 0)
self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True)
self.moe_eval_capacity_token_fraction = kwargs.pop(
"moe_eval_capacity_token_fraction", 0.25
)
self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random")
self.moe_normalize_gate_prob_before_dropping = kwargs.pop(
"moe_normalize_gate_prob_before_dropping", False
)
self.use_xmoe = kwargs.pop("use_xmoe", False)
self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0)
self.max_rel_pos = kwargs.pop("max_rel_pos", 0)
self.deepnorm = kwargs.pop("deepnorm", False)
self.subln = kwargs.pop("subln", True)
self.bert_init = kwargs.pop("bert_init", False)
self.multiway = kwargs.pop("multiway", False)
self.share_encoder_input_output_embed = kwargs.pop(
"share_encoder_input_output_embed", False
)
self.max_source_positions = kwargs.pop("max_source_positions", 1024)
self.no_output_layer = kwargs.pop("no_output_layer", False)
self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5)
# Text
self.vocab_size = kwargs.pop("vocab_size", -1)
# Vision
self.img_size = kwargs.pop("img_size", 224)
self.patch_size = kwargs.pop("patch_size", 16)
self.in_chans = kwargs.pop("in_chans", 3)
# Fairscale
self.checkpoint_activations = kwargs.pop("checkpoint_activations", False)
self.fsdp = kwargs.pop("fsdp", False)
self.ddp_rank = kwargs.pop("ddp_rank", 0)
self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False)
self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512)
if self.deepnorm:
self.encoder_normalize_before = False
self.subln = False
if self.subln:
self.encoder_normalize_before = True
self.deepnorm = False
if self.use_xmoe:
self.moe_normalize_gate_prob_before_dropping = True
self.moe_second_expert_policy = "random"
assert self.moe_freq > 0 and self.moe_expert_count > 0
def override(self, args):
for hp in self.__dict__.keys():
if getattr(args, hp, None) is not None:
self.__dict__[hp] = getattr(args, hp, None)
class DecoderConfig(object):
def __init__(self, **kwargs):
self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768)
self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12)
self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072)
self.decoder_layers = kwargs.pop("decoder_layers", 12)
self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True)
self.activation_fn = kwargs.pop("activation_fn", "gelu")
self.dropout = kwargs.pop("dropout", 0.0)
self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0)
self.attention_dropout = kwargs.pop("attention_dropout", 0.0)
self.activation_dropout = kwargs.pop("activation_dropout", 0.0)
self.no_scale_embedding = kwargs.pop("no_scale_embedding", True)
self.layernorm_embedding = kwargs.pop("layernorm_embedding", False)
self.moe_freq = kwargs.pop("moe_freq", 0)
self.moe_top1_expert = kwargs.pop("moe_top1_expert", False)
self.moe_expert_count = kwargs.pop("moe_expert_count", 0)
self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True)
self.moe_eval_capacity_token_fraction = kwargs.pop(
"moe_eval_capacity_token_fraction", 0.25
)
self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random")
self.moe_normalize_gate_prob_before_dropping = kwargs.pop(
"moe_normalize_gate_prob_before_dropping", False
)
self.use_xmoe = kwargs.pop("use_xmoe", False)
self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0)
self.max_rel_pos = kwargs.pop("max_rel_pos", 0)
self.deepnorm = kwargs.pop("deepnorm", False)
self.subln = kwargs.pop("subln", True)
self.bert_init = kwargs.pop("bert_init", False)
self.multiway = kwargs.pop("multiway", False)
self.share_decoder_input_output_embed = kwargs.pop(
"share_decoder_input_output_embed", False
)
self.max_target_positions = kwargs.pop("max_target_positions", 1024)
self.no_output_layer = kwargs.pop("no_output_layer", False)
self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5)
# Text
self.vocab_size = kwargs.pop("vocab_size", -1)
# Fairscale
self.checkpoint_activations = kwargs.pop("checkpoint_activations", False)
self.fsdp = kwargs.pop("fsdp", False)
self.ddp_rank = kwargs.pop("ddp_rank", 0)
self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False)
self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512)
if self.deepnorm:
self.decoder_normalize_before = False
self.subln = False
if self.subln:
self.decoder_normalize_before = True
self.deepnorm = False
if self.use_xmoe:
self.moe_normalize_gate_prob_before_dropping = True
self.moe_second_expert_policy = "random"
assert self.moe_freq > 0 and self.moe_expert_count > 0
def override(self, args):
for hp in self.__dict__.keys():
if getattr(args, hp, None) is not None:
self.__dict__[hp] = getattr(args, hp, None)
class EncoderDecoderConfig(object):
def __init__(self, **kwargs):
self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768)
self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12)
self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072)
self.encoder_layers = kwargs.pop("encoder_layers", 12)
self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True)
self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768)
self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12)
self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072)
self.decoder_layers = kwargs.pop("decoder_layers", 12)
self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True)
self.activation_fn = kwargs.pop("activation_fn", "gelu")
self.dropout = kwargs.pop("dropout", 0.0)
self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0)
self.attention_dropout = kwargs.pop("attention_dropout", 0.0)
self.activation_dropout = kwargs.pop("activation_dropout", 0.0)
self.no_scale_embedding = kwargs.pop("no_scale_embedding", True)
self.layernorm_embedding = kwargs.pop("layernorm_embedding", False)
self.moe_freq = kwargs.pop("moe_freq", 0)
self.moe_top1_expert = kwargs.pop("moe_top1_expert", False)
self.moe_expert_count = kwargs.pop("moe_expert_count", 0)
self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True)
self.moe_eval_capacity_token_fraction = kwargs.pop(
"moe_eval_capacity_token_fraction", 0.25
)
self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random")
self.moe_normalize_gate_prob_before_dropping = kwargs.pop(
"moe_normalize_gate_prob_before_dropping", False
)
self.use_xmoe = kwargs.pop("use_xmoe", False)
self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0)
self.max_rel_pos = kwargs.pop("max_rel_pos", 0)
self.deepnorm = kwargs.pop("deepnorm", False)
self.subln = kwargs.pop("subln", True)
self.bert_init = kwargs.pop("bert_init", False)
self.multiway = kwargs.pop("multiway", False)
self.share_all_embeddings = kwargs.pop("share_all_embeddings", False)
self.share_decoder_input_output_embed = kwargs.pop(
"share_decoder_input_output_embed", False
)
self.max_source_positions = kwargs.pop("max_source_positions", 1024)
self.max_target_positions = kwargs.pop("max_target_positions", 1024)
self.no_output_layer = kwargs.pop("no_output_layer", False)
self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5)
# Text
self.vocab_size = kwargs.pop("vocab_size", -1)
# Fairscale
self.checkpoint_activations = kwargs.pop("checkpoint_activations", False)
self.fsdp = kwargs.pop("fsdp", False)
self.ddp_rank = kwargs.pop("ddp_rank", 0)
self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False)
self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512)
if self.deepnorm:
self.encoder_normalize_before = False
self.decoder_normalize_before = False
self.subln = False
if self.subln:
self.encoder_normalize_before = True
self.decoder_normalize_before = True
self.deepnorm = False
if self.use_xmoe:
self.moe_normalize_gate_prob_before_dropping = True
self.moe_second_expert_policy = "random"
assert self.moe_freq > 0 and self.moe_expert_count > 0
def override(self, args):
for hp in self.__dict__.keys():
if getattr(args, hp, None) is not None:
self.__dict__[hp] = getattr(args, hp, None)
| APAC-SCALE-master | torchscale/architecture/config.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
from torchscale.architecture.decoder import Decoder
from torchscale.architecture.encoder import Encoder
class EncoderDecoder(nn.Module):
def __init__(
self,
args,
encoder_embed_tokens=None,
encoder_embed_positions=None,
decoder_embed_tokens=None,
decoder_embed_positions=None,
output_projection=None,
**kwargs
):
super().__init__()
self.args = args
if args.share_all_embeddings:
args.share_decoder_input_output_embed = True
self.encoder = Encoder(
args,
encoder_embed_tokens,
encoder_embed_positions,
is_encoder_decoder=True,
**kwargs
)
if args.share_all_embeddings and decoder_embed_tokens is None:
decoder_embed_tokens = self.encoder.embed_tokens
self.decoder = Decoder(
args,
decoder_embed_tokens,
decoder_embed_positions,
output_projection,
is_encoder_decoder=True,
**kwargs
)
def forward(
self,
src_tokens,
prev_output_tokens,
return_all_hiddens=False,
features_only=False,
**kwargs
):
encoder_out = self.encoder(src_tokens, return_all_hiddens=return_all_hiddens)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
| APAC-SCALE-master | torchscale/architecture/encoder_decoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | torchscale/architecture/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import numpy as np
import torch
import torch.nn as nn
from fairscale.nn import checkpoint_wrapper, wrap
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from torchscale.architecture.utils import init_bert_params
from torchscale.component.droppath import DropPath
from torchscale.component.feedforward_network import FeedForwardNetwork, make_experts
from torchscale.component.multihead_attention import MultiheadAttention
from torchscale.component.multiway_network import MultiwayWrapper, set_split_position
from torchscale.component.relative_position_bias import RelativePositionBias
from torchscale.component.xmoe.moe_layer import MOELayer
from torchscale.component.xmoe.routing import Top1Gate, Top2Gate
class EncoderLayer(nn.Module):
def __init__(self, args, depth, is_moe_layer=False, is_encoder_decoder=False):
super().__init__()
self.args = args
self.embed_dim = args.encoder_embed_dim
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
self.dropout_module = torch.nn.Dropout(args.dropout)
if args.drop_path_rate > 0:
drop_path_prob = np.linspace(0, args.drop_path_rate, args.encoder_layers)[
depth
]
self.drop_path = DropPath(drop_path_prob)
else:
self.drop_path = None
self.normalize_before = args.encoder_normalize_before
self.is_moe_layer = is_moe_layer
self.ffn_dim = args.encoder_ffn_embed_dim
if not self.is_moe_layer:
self.ffn = MultiwayWrapper(
args,
self.build_ffn(
self.embed_dim,
self.args,
),
)
else:
assert not self.args.multiway
if args.moe_top1_expert:
gate = Top1Gate(
self.embed_dim,
args.moe_expert_count,
use_fp32=args.moe_gating_use_fp32,
moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
else:
gate = Top2Gate(
self.embed_dim,
args.moe_expert_count,
args.moe_gating_use_fp32,
args.moe_second_expert_policy,
args.moe_normalize_gate_prob_before_dropping,
args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
experts = make_experts(args, self.embed_dim, self.ffn_dim)
self.moe_layer = MOELayer(gate, experts, args)
self.final_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if args.deepnorm:
if is_encoder_decoder:
self.alpha = (
math.pow(
math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625
)
* 0.81
)
else:
self.alpha = math.pow(2.0 * args.encoder_layers, 0.25)
else:
self.alpha = 1.0
def build_ffn(self, embed_dim, args):
return FeedForwardNetwork(
embed_dim,
self.ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
encoder_decoder_attention=False,
subln=args.subln,
)
def residual_connection(self, x, residual):
return residual * self.alpha + x
def forward(self, x, encoder_padding_mask, attn_mask=None, rel_pos=None, multiway_split_position=None, incremental_state=None):
if multiway_split_position is not None:
assert self.args.multiway
self.apply(set_split_position(multiway_split_position))
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
rel_pos=rel_pos,
incremental_state=incremental_state,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if not self.is_moe_layer:
x = self.ffn(x)
l_aux = None
else:
x = x.transpose(0, 1)
x, l_aux = self.moe_layer(x)
x = x.transpose(0, 1)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, l_aux
class Encoder(nn.Module):
def __init__(
self,
args,
embed_tokens=None,
embed_positions=None,
output_projection=None,
is_encoder_decoder=False,
**kwargs
):
self.args = args
super().__init__(**kwargs)
self.dropout_module = torch.nn.Dropout(args.dropout)
embed_dim = args.encoder_embed_dim
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
if (
output_projection is None
and not is_encoder_decoder
and not args.no_output_layer
and args.vocab_size > 0
):
self.output_projection = self.build_output_projection(args)
else:
self.output_projection = output_projection
if args.layernorm_embedding:
self.layernorm_embedding = MultiwayWrapper(
args, LayerNorm(embed_dim, eps=args.layernorm_eps), dim=1
)
else:
self.layernorm_embedding = None
self.layers = nn.ModuleList([])
moe_freq = args.moe_freq
for i in range(args.encoder_layers):
is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0
self.layers.append(
self.build_encoder_layer(
args,
depth=i,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before and args.normalize_output:
self.layer_norm = MultiwayWrapper(args, LayerNorm(embed_dim, eps=args.layernorm_eps))
else:
self.layer_norm = None
if args.rel_pos_buckets > 0 and args.max_rel_pos > 0:
self.relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.encoder_attention_heads,
)
else:
self.relative_position = None
if args.bert_init:
self.apply(init_bert_params)
if args.deepnorm:
if is_encoder_decoder:
init_scale = (
math.pow(
math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625
)
/ 1.15
)
else:
init_scale = math.pow(8.0 * args.encoder_layers, 0.25)
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.div_(init_scale)
if args.subln:
if is_encoder_decoder:
init_scale = math.sqrt(
math.log(3 * args.decoder_layers)
* math.log(2 * args.encoder_layers)
/ 3
)
else:
init_scale = math.sqrt(math.log(args.encoder_layers * 2))
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.mul_(init_scale)
def build_output_projection(
self,
args,
):
if args.share_encoder_input_output_embed:
assert args.encoder_embedding_type == "language"
output_projection = torch.nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = self.embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.encoder_embed_dim, args.vocab_size, bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.encoder_embed_dim**-0.5
)
return output_projection
def build_encoder_layer(
self, args, depth, is_moe_layer=False, is_encoder_decoder=False
):
layer = EncoderLayer(
args,
depth,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
if args.fsdp:
layer = wrap(layer)
return layer
def forward_embedding(
self,
src_tokens,
token_embedding=None,
positions=None,
):
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
if src_tokens is not None:
x = embed + self.embed_positions(src_tokens, positions=positions)
else:
x = embed + self.embed_positions(x, positions=positions)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
def forward(
self,
src_tokens,
encoder_padding_mask=None,
attn_mask=None,
return_all_hiddens=False,
token_embeddings=None,
multiway_split_position=None,
features_only=False,
incremental_state=None,
positions=None,
**kwargs
):
assert src_tokens is not None or token_embeddings is not None
if encoder_padding_mask is None:
if src_tokens is not None:
encoder_padding_mask = torch.zeros_like(
src_tokens, device=src_tokens.device
).bool()
else:
encoder_padding_mask = torch.zeros(
[token_embeddings.size(0), token_embeddings.size(1)],
device=token_embeddings.device,
).bool()
if multiway_split_position is not None:
assert self.args.multiway
self.apply(set_split_position(multiway_split_position))
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings, positions)
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
rel_pos_bias = None
if self.relative_position is not None:
rel_pos_bias = self.relative_position(
batch_size=x.size(0), qlen=x.size(1), klen=x.size(1)
)
# incremental_state is not None during inference if we use the bidirectional encoder as a generator as in s2s-ft (https://arxiv.org/abs/2110.13640)
l_aux = []
for idx, layer in enumerate(self.layers):
x, l_aux_i = layer(
x,
encoder_padding_mask=encoder_padding_mask if incremental_state is None else None,
attn_mask=attn_mask,
rel_pos=rel_pos_bias,
multiway_split_position=multiway_split_position,
incremental_state=incremental_state[idx] if incremental_state is not None else None,
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
l_aux.append(l_aux_i)
if self.layer_norm is not None:
x = self.layer_norm(x)
if not features_only and self.output_projection is not None:
x = self.output_projection(x)
return {
"encoder_out": x,
"encoder_embedding": encoder_embedding,
"encoder_padding_mask": encoder_padding_mask,
"encoder_states": encoder_states,
"l_aux": l_aux,
}
| APAC-SCALE-master | torchscale/architecture/encoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
from torchscale.component.multihead_attention import MultiheadAttention
from torchscale.component.multiway_network import MultiwayNetwork
def init_bert_params(module):
def normal_(data):
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
if isinstance(module.q_proj, MultiwayNetwork):
normal_(module.q_proj.A.weight.data)
normal_(module.q_proj.B.weight.data)
normal_(module.k_proj.A.weight.data)
normal_(module.k_proj.B.weight.data)
normal_(module.v_proj.A.weight.data)
normal_(module.v_proj.B.weight.data)
else:
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
| APAC-SCALE-master | torchscale/architecture/utils.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
from torchscale.architecture.encoder import Encoder
from torchscale.component.embedding import (
PositionalEmbedding,
TextEmbedding,
VisionEmbedding,
)
from torchscale.component.multiway_network import MutliwayEmbedding
class BEiT3(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.args = args
assert args.multiway
assert args.vocab_size > 0
assert not args.share_encoder_input_output_embed
self.text_embed = TextEmbedding(args.vocab_size, args.encoder_embed_dim)
self.vision_embed = VisionEmbedding(
args.img_size,
args.patch_size,
args.in_chans,
args.encoder_embed_dim,
contain_mask_token=True,
prepend_cls_token=True,
)
# being consistent with Fairseq, which starts from 2 for position embedding
embed_positions = MutliwayEmbedding(
modules=[
PositionalEmbedding(self.vision_embed.num_position_embeddings() + 2, args.encoder_embed_dim),
PositionalEmbedding(args.max_source_positions, args.encoder_embed_dim),
],
dim=1,
)
self.encoder = Encoder(
args,
embed_tokens=None,
embed_positions=embed_positions,
output_projection=None,
is_encoder_decoder=False,
)
def forward(
self,
textual_tokens=None,
visual_tokens=None,
text_padding_position=None,
attn_mask=None,
vision_masked_position=None,
incremental_state=None,
positions=None,
):
assert textual_tokens is not None or visual_tokens is not None
if textual_tokens is None:
x = self.vision_embed(visual_tokens, vision_masked_position)
encoder_padding_mask = None
multiway_split_position = -1
elif visual_tokens is None:
x = self.text_embed(textual_tokens)
encoder_padding_mask = text_padding_position
multiway_split_position = 0
else:
x1 = self.vision_embed(visual_tokens, vision_masked_position)
multiway_split_position = x1.size(1)
x2 = self.text_embed(textual_tokens)
x = torch.cat([x1, x2], dim=1)
if text_padding_position is not None:
encoder_padding_mask = torch.cat(
[
torch.zeros(x1.shape[:-1]).to(x1.device).bool(),
text_padding_position,
],
dim=1,
)
else:
encoder_padding_mask = None
encoder_out = self.encoder(
src_tokens=None,
encoder_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
token_embeddings=x,
multiway_split_position=multiway_split_position,
incremental_state=incremental_state,
positions=positions,
)
encoder_out["multiway_split_position"] = multiway_split_position
return encoder_out
| APAC-SCALE-master | torchscale/model/BEiT3.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | torchscale/model/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import pytest
import torch
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
testcases = [
{},
{"vocab_size": 64000},
{"activation_fn": "relu"},
{"drop_path_rate": 0.1},
{"decoder_normalize_before": False},
{"no_scale_embedding": False},
{"layernorm_embedding": True},
{"rel_pos_buckets": 32, "max_rel_pos": 256},
{"deepnorm": True, "subln": False, "decoder_normalize_before": False},
{"bert_init": True},
{"multiway": True},
{"share_decoder_input_output_embed": True},
{"checkpoint_activations": True},
{"fsdp": True},
]
@pytest.mark.parametrize("args", testcases)
def test_decoder(args):
config = DecoderConfig(**args)
model = Decoder(config)
prev_output_tokens = torch.ones(2, 10)
token_embeddings = torch.rand(2, 10, config.decoder_embed_dim)
model(
prev_output_tokens=prev_output_tokens,
token_embeddings=token_embeddings,
features_only=True,
)
| APAC-SCALE-master | tests/test_decoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import pytest
import torch
from torchscale.architecture.config import EncoderConfig
from torchscale.architecture.encoder import Encoder
testcases = [
{},
{"vocab_size": 64000},
{"activation_fn": "relu"},
{"drop_path_rate": 0.1},
{"encoder_normalize_before": False},
{"no_scale_embedding": False},
{"layernorm_embedding": True},
{"rel_pos_buckets": 32, "max_rel_pos": 256},
{"deepnorm": True, "subln": False, "encoder_normalize_before": False},
{"bert_init": True},
{"multiway": True},
{"share_encoder_input_output_embed": True},
{"checkpoint_activations": True},
{"fsdp": True},
]
@pytest.mark.parametrize("args", testcases)
def test_encoder(args):
config = EncoderConfig(**args)
model = Encoder(config)
token_embeddings = torch.rand(2, 10, config.encoder_embed_dim)
model(src_tokens=None, token_embeddings=token_embeddings)
| APAC-SCALE-master | tests/test_encoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | tests/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import pytest
import torch
from torchscale.architecture.config import EncoderDecoderConfig
from torchscale.architecture.encoder_decoder import EncoderDecoder
from torchscale.component.embedding import PositionalEmbedding, TextEmbedding
testcases = [
{},
{"vocab_size": 64000},
{"activation_fn": "relu"},
{"drop_path_rate": 0.1},
{"encoder_normalize_before": False, "decoder_normalize_before": False},
{"no_scale_embedding": False},
{"layernorm_embedding": True},
{"rel_pos_buckets": 32, "max_rel_pos": 256},
{
"deepnorm": True,
"subln": False,
"encoder_normalize_before": False,
"decoder_normalize_before": False,
},
{"bert_init": True},
{"multiway": True},
{"share_decoder_input_output_embed": True},
{"share_all_embeddings": True},
{"checkpoint_activations": True},
{"fsdp": True},
]
@pytest.mark.parametrize("args", testcases)
def test_decoder(args):
config = EncoderDecoderConfig(**args)
model = EncoderDecoder(
config,
encoder_embed_tokens=TextEmbedding(64000, config.encoder_embed_dim),
decoder_embed_tokens=TextEmbedding(64000, config.decoder_embed_dim),
encoder_embed_positions=PositionalEmbedding(
config.max_source_positions, config.encoder_embed_dim
),
decoder_embed_positions=PositionalEmbedding(
config.max_target_positions, config.decoder_embed_dim
),
)
src_tokens = torch.ones(2, 20).long()
prev_output_tokens = torch.ones(2, 10).long()
model(
src_tokens=src_tokens,
prev_output_tokens=prev_output_tokens,
features_only=True,
)
| APAC-SCALE-master | tests/test_encoder_decoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | examples/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# flake8: noqa
import models
import tasks
import criterions
from fairseq_cli.generate import cli_main
if __name__ == "__main__":
cli_main()
| APAC-SCALE-master | examples/fairseq/generate.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | examples/fairseq/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# flake8: noqa
import models
import tasks
import criterions
from fairseq_cli.interactive import cli_main
if __name__ == "__main__":
cli_main()
| APAC-SCALE-master | examples/fairseq/interactive.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# flake8: noqa
import models
import tasks
import criterions
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main()
| APAC-SCALE-master | examples/fairseq/train.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import json
import logging
import os
from argparse import Namespace
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import sentencepiece as spm
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II, MISSING
from .data.mlm_loader import MLMLoader
logger = logging.getLogger(__name__)
SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"])
SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"])
@dataclass
class PretrainingConfig(FairseqDataclass):
data: str = field(
default=MISSING,
metadata={
"help": "colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner"
},
)
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
default="complete",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
mask_prob: float = field(
default=0.15,
metadata={"help": "probability of replacing a token with mask"},
)
leave_unmasked_prob: float = field(
default=0.1,
metadata={"help": "probability that a masked token is unmasked"},
)
random_token_prob: float = field(
default=0.1,
metadata={"help": "probability of replacing a token with a random token"},
)
freq_weighted_replacement: bool = field(
default=False,
metadata={"help": "sample random replacement words based on word frequencies"},
)
mask_whole_words: bool = field(
default=False,
metadata={"help": "mask whole words; you may also want to set --bpe"},
)
mask_multiple_length: int = field(
default=1,
metadata={"help": "repeat the mask indices multiple times"},
)
mask_stdev: float = field(
default=0.0,
metadata={"help": "stdev of the mask length"},
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed --tokens-per-sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
seed: int = II("common.seed")
span_length: float = field(
default=3.0,
metadata={"help": "average span length for masking"},
)
remove_source_sentinel: bool = field(
default=False,
metadata={"help": "remove the source sentinel for the span corruption task"},
)
remove_target_sentinel: bool = field(
default=False,
metadata={"help": "remove the target sentinel for the span corruption task"},
)
batch_read_ahead: int = field(
default=100000,
metadata={"help": "batch read ahead size for infinibatch"},
)
required_batch_size_multiple: int = II("dataset.required_batch_size_multiple")
spm_model: str = field(
default="",
metadata={"help": "sentencepice model to tokenize the data"},
)
dict_file: str = field(
default="",
metadata={"help": ""},
)
pad_to_max_length: bool = field(
default=False,
)
@register_task("pretraining", dataclass=PretrainingConfig)
class PLMTask(FairseqTask):
def __init__(self, cfg, dictionary, tokenizer):
super().__init__(cfg)
self.cfg = cfg
self.dictionary = dictionary
self.tokenizer = tokenizer
self.seed = cfg.seed
self.mask_idx = dictionary.index("<mask>")
@classmethod
def setup_task(cls, cfg, **kwargs):
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
if cfg.dict_file != "":
dictionary = Dictionary.load(cfg.dict_file)
else:
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
# add mask token
dictionary.add_symbol("<mask>")
for i in range(100):
dictionary.add_symbol(f"<mask_{i}>")
dictionary.pad_to_multiple_(cfg.required_batch_size_multiple)
logger.info("dictionary: {} types".format(len(dictionary)))
# tokenizer = SentencepieceBPE(Namespace(sentencepiece_model=cfg.spm_model))
tokenizer = spm.SentencePieceProcessor()
tokenizer.Load(cfg.spm_model)
return cls(cfg, dictionary, tokenizer)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
self.datasets[split] = {
"data": json.load(open(f"{self.cfg.data}/json/{split}.json")),
"data_dir": self.cfg.data,
"shuffle": True if split == "train" else False,
}
self.datasets[split] = Namespace(**self.datasets[split])
def dataset(self, split):
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
**kwargs,
):
return MLMLoader(
self.cfg,
dataset,
self.dictionary,
self.tokenizer,
max_tokens=max_tokens,
max_sentences=max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| APAC-SCALE-master | examples/fairseq/tasks/pretraining.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import argparse
import importlib
import os
# register dataclass
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("tasks." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group("Additional command-line arguments")
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
| APAC-SCALE-master | examples/fairseq/tasks/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
from infinibatch.iterators import CheckpointableIterator
from . import utils
class BaseBatchGen(CheckpointableIterator):
"""
This is a base class for batch generators that use infinibatch
"""
def __init__(self):
self._iter = None
self.epoch = 1
self.next_epoch_idx = 1
self.sharded_checkpoint = True
self.should_close_after_finished = True
def _build_iter(self):
"""
Build infinibatch iterator and assign to self._iter
"""
raise NotImplementedError()
def _move_to_tensor(self, batch):
def to_tensor(x):
return torch.tensor(x)
return utils.apply_to_sample(to_tensor, batch)
@property
def iterator(self):
if self._iter is None:
raise NotImplementedError("_build_iter() must called first")
return self._iter
def __iter__(self):
if self._iter is None:
raise NotImplementedError("_build_iter() must called first")
return self._iter
def __next__(self):
return next(self._iter)
def setstate(self, value):
self._iter.setstate(value)
def getstate(self):
return self._iter.getstate()
def close(self):
self._iter.close()
def __len__(self) -> int:
return 819200000
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
return self
def end_of_epoch(self) -> bool:
return False
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return self.getstate()
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.setstate(state_dict)
@property
def first_batch(self):
return "DUMMY"
| APAC-SCALE-master | examples/fairseq/tasks/data/basic_loader.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | examples/fairseq/tasks/data/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import collections
from random import Random
from typing import Dict, Iterable, Optional
import numpy as np
from infinibatch import iterators
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if isinstance(x, np.ndarray):
return f(x)
elif isinstance(x, collections.OrderedDict):
# OrderedDict has attributes that needs to be preserved
od = collections.OrderedDict(
(key, _apply(value)) for key, value in x.items()
)
od.__dict__ = x.__dict__
return od
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
class NativeCheckpointableIterator(iterators.CheckpointableIterator):
def __init__(self, iterable: Iterable):
self._input_iterable = iterable
self.setstate(None)
def getstate(self) -> Dict:
return {"num_items_yielded": self._num_items_yielded}
def setstate(self, checkpoint: Optional[Dict]):
self._iterator = iter(self._input_iterable)
self._num_items_yielded = (
iterators._advance_iterator(self._iterator, checkpoint["num_items_yielded"])
if checkpoint is not None
else 0
)
def __next__(self):
item = next(self._iterator)
self._num_items_yielded += 1
return item
def close(self):
pass
class WeightIterator(object):
def __init__(self, weights, seed):
self.weights = weights
self.seed = seed
self.control_index = list(range(len(weights)))
self.setstate(None)
def __iter__(self):
return self
def getstate(self):
return {"random_state": self._random_state}
def setstate(self, checkpoint):
self._random_state = checkpoint["random_state"] if checkpoint else None
self._random = (
None # this will trigger the lazy initialization in self.__next__
)
def __next__(self):
if self._random is None:
self._random = Random(self.seed)
if self._random_state is not None:
self._random.setstate(self._random_state)
idx = self._random.choices(self.control_index, self.weights)[0]
self._random_state = self._random.getstate()
return idx
def close(self):
pass
| APAC-SCALE-master | examples/fairseq/tasks/data/utils.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import copy
import itertools
import os
import numpy as np
from infinibatch import iterators
from .basic_loader import BaseBatchGen
from .utils import NativeCheckpointableIterator, WeightIterator
class MLMLoader(BaseBatchGen):
def __init__(
self,
args,
dataset,
dictionary,
tokenizer,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
):
super().__init__()
self.args = args
self.data = dataset.data
self.data_dir = dataset.data_dir
self.shuffle = dataset.shuffle
self.dictionary = dictionary
self.tokenizer = tokenizer
self.max_tokens = max_tokens
self.max_sentences = max_sentences
self.max_positions = max_positions
self.tokens_per_sample = args.tokens_per_sample
self.sample_break_mode = args.sample_break_mode
self.ignore_invalid_inputs = ignore_invalid_inputs
self.required_batch_size_multiple = required_batch_size_multiple
self.seed = str(seed)
self.num_shards = num_shards
self.shard_id = shard_id
self.batch_read_ahead = args.batch_read_ahead
self._build_iter()
def _build_iter(self):
tokenized_lines = self._multilingual_tokenize()
self.padded_batches = self._batchify(tokenized_lines)
prefetch_batches = iterators.PrefetchIterator(
self.padded_batches,
buffer_size=10000,
buffer_in_main_process=True,
log_empty_buffer_warning=True and self.shard_id == 0,
)
prefetch_batches = iterators.MapIterator(prefetch_batches, self._move_to_tensor)
self._iter = prefetch_batches
def _multilingual_tokenize(self):
multilingual_iters = []
weights = []
for data in self.data:
multilingual_iters.append(self._tokenize(data))
if "weight" in data:
weights.append(float(data["weight"]))
else:
weights.append(int(data["count"]))
if len(multilingual_iters) == 1:
return multilingual_iters[0]
sampling_iterator = WeightIterator(weights)
control_iterator = NativeCheckpointableIterator(sampling_iterator)
tokenized_lines = iterators.MultiplexIterator(
control_iterator, multilingual_iters
)
return tokenized_lines
def _tokenize(self, data):
"""
data:
{
'source': list[Path],
'source_lang': str,
'count': int,
'weight': float,
'name': str,
}
"""
dataset = list(
zip(
data["source"],
itertools.repeat(data["source_lang"]),
)
)
if self.shuffle:
chunk_files = iterators.InfinitePermutationSourceIterator(
dataset,
seed=self.seed,
shuffle=self.shuffle,
num_instances=self.num_shards,
instance_rank=self.shard_id,
)
else:
chunk_files = iterators.ChunkedSourceIterator(
dataset,
num_instances=self.num_shards,
instance_rank=self.shard_id,
)
tokenized_lines = iterators.SelectManyIterator(
chunk_files, lambda files: self._read_from_files(*files)
)
tokenized_lines = iterators.SamplingRandomMapIterator(
tokenized_lines, self._prepare, self.seed
)
return tokenized_lines
def _batchify(self, lines):
if self.max_sentences is not None:
if self.batch_read_ahead > 0:
lines = iterators.BlockwiseShuffleIterator(
lines, self.batch_read_ahead, self.seed
)
batches = iterators.FixedBatchIterator(lines, self.max_sentences)
else:
def dynamic_batch_size(sample):
lengths = [len(x) for x in sample]
batch_size = self.max_tokens // max(lengths)
batch_size = (
batch_size
// self.required_batch_size_multiple
* self.required_batch_size_multiple
)
return max(1, batch_size)
batches = iterators.BucketedReadaheadBatchIterator(
lines,
read_ahead=self.batch_read_ahead,
key=(lambda x: max(len(x[0]), len(x[1]))) if self.shuffle else None,
batch_size=dynamic_batch_size,
shuffle=self.shuffle,
seed=self.seed,
)
def collate(batch):
batch_size = len(batch)
mlm_source_max_length = max([len(x[0]) for x in batch])
mlm_target_max_length = max([len(x[1]) for x in batch])
s2s_source_max_length = max([len(x[2]) for x in batch])
s2s_target_max_length = max([len(x[3]) for x in batch])
if self.args.pad_to_max_length:
mlm_source_max_length = self.args.tokens_per_sample
mlm_target_max_length = self.args.tokens_per_sample
mlm_source_ids = np.full(
shape=(batch_size, mlm_source_max_length),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
mlm_target_ids = np.full(
shape=(batch_size, mlm_target_max_length),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
s2s_source_ids = np.full(
shape=(batch_size, s2s_source_max_length),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
s2s_target_ids = np.full(
shape=(batch_size, s2s_target_max_length - 1),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
s2s_prev_input_ids = np.full(
shape=(batch_size, s2s_target_max_length - 1),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
for i, (
mlm_input_ids,
mlm_label_ids,
s2s_input_ids,
s2s_label_ids,
) in enumerate(batch):
mlm_source_ids[i, : len(mlm_input_ids)] = mlm_input_ids
mlm_target_ids[i, : len(mlm_label_ids)] = mlm_label_ids
s2s_source_ids[i, : len(s2s_input_ids)] = s2s_input_ids
s2s_target_ids[i, : len(s2s_label_ids) - 1] = s2s_label_ids[1:]
s2s_prev_input_ids[i, : len(s2s_label_ids) - 1] = s2s_label_ids[:-1]
ret_batch = {
"net_input": {
"src_tokens": mlm_source_ids.astype(np.int64),
},
"target": mlm_target_ids.astype(np.int64),
"nsentences": batch_size,
"ntokens": sum([len(x[0]) for x in batch]),
}
return ret_batch
padded_batches = iterators.MapIterator(batches, collate)
return padded_batches
def _prepare(self, _random, doc):
nonmasked_tokens, masked_tokens = self._mask_lm(_random, doc)
nonnoise_spans, noise_spans = self._span_corruption(_random, doc)
return nonmasked_tokens, masked_tokens, nonnoise_spans, noise_spans
def _mask_lm(self, _random, doc):
def mask_tokens():
return "<mask>"
length = len(doc)
mask_tokens_num = int(length * self.args.mask_prob)
mask_tokens_num = min(max(mask_tokens_num, 1), length - 1)
possible_mask_positions = _random.sample(range(length), k=mask_tokens_num)
possible_mask_positions = sorted(possible_mask_positions)
nonmasked_tokens = copy.deepcopy(doc)
masked_tokens = [self.dictionary.pad() for _ in range(len(doc))]
for position in possible_mask_positions:
# masked_tokens.append(nonmasked_tokens[position])
masked_tokens[position] = nonmasked_tokens[position]
nonmasked_tokens[position] = self.dictionary.indices[mask_tokens()]
return nonmasked_tokens, masked_tokens
def _span_corruption(self, _random, doc):
def mask_tokens(i):
return f"<mask_{i}>"
length = len(doc)
noise_tokens_num = int(length * self.args.mask_prob)
noise_tokens_num = min(max(noise_tokens_num, 1), length - 1)
noise_spans_num = int(noise_tokens_num / self.args.span_length)
noise_spans_num = max(noise_spans_num, 1)
nonnoise_tokens_num = length - noise_tokens_num
if noise_spans_num == 1:
noise_split_positions = [0, noise_tokens_num]
else:
possible_split_positions = list(range(1, noise_tokens_num))
_random.shuffle(possible_split_positions)
noise_split_positions = sorted(
possible_split_positions[: noise_spans_num - 1]
)
noise_split_positions = [0] + noise_split_positions + [noise_tokens_num]
possible_insert_positions = list(range(nonnoise_tokens_num))
_random.shuffle(possible_insert_positions)
noise_insert_positions = sorted(possible_insert_positions[:noise_spans_num])
nonnoise_spans, noise_spans = [], []
last_end = 0
for i in range(noise_spans_num):
start_pos = noise_insert_positions[i] + noise_split_positions[i]
end_pos = noise_insert_positions[i] + noise_split_positions[i + 1]
mask_id = self.dictionary.indices[mask_tokens(i)]
if getattr(self.args, "remove_target_sentinel", False):
noise_spans.append(doc[start_pos:end_pos])
else:
noise_spans.append([mask_id] + doc[start_pos:end_pos])
if getattr(self.args, "remove_source_sentinel", False):
nonnoise_spans.extend(doc[last_end:start_pos])
else:
nonnoise_spans.extend(doc[last_end:start_pos] + [mask_id])
last_end = end_pos
nonnoise_spans.extend(doc[last_end:])
noise_spans = sum(noise_spans, [])
return nonnoise_spans, noise_spans
def _read_from_files(self, source_file, source_lang):
# data = []
file_path = os.path.join(self.data_dir, source_file)
if not os.path.exists(file_path):
print("| file {} not exists".format(file_path), flush=True)
return iter([]) # skip bad file
with open(file_path, "r", encoding="utf8") as f:
lines = f.read().strip().split("\n")
doc = [self.dictionary.bos()]
for line in lines:
if line == "":
if self.sample_break_mode == "complete_doc":
# data.append(doc)
yield doc
doc = [self.dictionary.bos()]
continue
tokenized_line = self.tokenizer.EncodeAsPieces(line)
tokenized_id = [
self.dictionary.index(token) for token in tokenized_line
] + [self.dictionary.eos_index]
if len(tokenized_id) > self.tokens_per_sample:
continue
if len(doc) + len(tokenized_id) > self.tokens_per_sample:
# data.append(doc)
yield doc
doc = [self.dictionary.bos()]
doc.extend(tokenized_id)
if len(doc) > 1 and len(doc) <= self.tokens_per_sample:
# data.append(doc)
yield doc
# return data
| APAC-SCALE-master | examples/fairseq/tasks/data/mlm_loader.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import warnings
import torch
import torch.distributed as dist
from fairseq.utils import multi_tensor_l2norm_available, multi_tensor_total_norm
@torch.no_grad()
def clip_grad_norm_(
params, max_norm, moe_expert_count, aggregate_norm_fn=None
) -> torch.Tensor:
def grad_exists(p):
return p is not None and getattr(p, "grad", None) is not None
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
params = list(filter(grad_exists, params))
grads, expert_grads, base_expert_grads, sharded_grads = [], [], [], []
denom = math.sqrt(max(dist.get_global_world_size(), moe_expert_count))
for p in params:
if hasattr(p, "expert"):
expert_grads.append(p.grad.detach() / denom)
elif hasattr(p, "base_expert"):
base_expert_grads.append(p.grad.detach())
elif hasattr(p, "_is_sharded"):
sharded_grads.append(p.grad.detach())
else:
grads.append(p.grad.detach())
if len(grads) == 0:
if len(params) > 0:
total_norm = params[0].new_tensor(0.0)
else:
total_norm = torch.tensor(0.0)
elif len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
# calculate split_norm and all_reduce with other workers
norms = [total_norm]
for split_grads in [expert_grads, sharded_grads]:
if len(split_grads) == 0:
continue
split_norm = torch.norm(
torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in split_grads])
)
if dist.is_initialized():
split_norm.pow_(2)
dist.all_reduce(split_norm)
split_norm.sqrt_()
norms.append(split_norm)
if len(norms) > 1:
total_norm = torch.norm(torch.stack(norms))
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads + expert_grads + sharded_grads + base_expert_grads:
g.mul_(clip_coef)
return total_norm
| APAC-SCALE-master | examples/fairseq/utils/sparse_clip.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| APAC-SCALE-master | examples/fairseq/utils/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
import torch
from fairseq import distributed_utils, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding
from fairseq.modules import PositionalEmbedding
from omegaconf import II
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
DEFAULT_MAX_TARGET_POSITIONS = 1024
logger = logging.getLogger(__name__)
@dataclass
class LanguageConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
relu_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
decoder_embed_dim: int = field(
default=512, metadata={"help": "decoder embedding dimension"}
)
decoder_output_dim: int = field(
default=512, metadata={"help": "decoder output dimension"}
)
decoder_input_dim: int = field(
default=512, metadata={"help": "decoder input dimension"}
)
decoder_ffn_embed_dim: int = field(
default=2048, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"})
decoder_attention_heads: int = field(
default=8, metadata={"help": "num decoder attention heads"}
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False, metadata={"help": "checkpoint activations at each layer"}
)
offload_activations: bool = field(
default=False,
metadata={"help": "move checkpointed activations to CPU after they are used."},
)
# config for Fully Sharded Data Parallel (FSDP) training
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": (
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
)
},
)
moe_freq: int = field(
default=0,
metadata={"help": "Frequency at which we insert MoE Transformer layers"},
)
moe_expert_count: int = field(
default=0, metadata={"help": "Number of experts in each MoE Layer"}
)
moe_gating_use_fp32: bool = field(
default=False,
metadata={"help": "Use FP32 computations in MoE top2 gating function"},
)
moe_second_expert_policy: str = field(
default="sampling",
metadata={"help": "policy for second expert, options: all/sampling/random"},
)
moe_normalize_gate_prob_before_dropping: bool = field(
default=False,
metadata={
"help": "whether to normalize gate probs before or after dropping experts for capacity and randomization"
},
)
moe_expert_ffn_dim: Optional[int] = field(
default=None, metadata={"help": "MoE expert FFN dimension"}
)
moe_top1_expert: Optional[bool] = field(
default=False, metadata={"help": "Use top1 gate instead of top2"}
)
moe_eval_capacity_token_fraction: Optional[float] = field(
default=0.25,
metadata={
"help": (
"Default: 0.25, Fraction of tokens as capacity during validation, "
"if set to negative, use same as training. range: (0.0, 1.0]."
)
},
)
moe_normalize_expert_grad: Optional[str] = field(
default="world_size",
metadata={
"help": "Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'"
},
)
record_a2a_perf_stats: Optional[bool] = field(
default=False,
metadata={"help": "records all to all perf stats during distributed training"},
)
dummy_a2a: Optional[bool] = field(
default=False,
metadata={
"help": "By passes all to all during distributed training by returning the input buffer as output"
},
)
moe_batch_prioritized_routing: Optional[bool] = field(
default=False,
metadata={
"help": "if true orders token by the gate prob before capacity dropping."
},
)
use_xmoe: Optional[bool] = field(
default=False,
)
# options from other parts of the config
add_bos_token: bool = II("task.add_bos_token")
tokens_per_sample: int = II("task.tokens_per_sample")
max_target_positions: Optional[int] = II("task.max_target_positions")
tpu: bool = II("common.tpu")
memory_efficient_fp16: bool = II("common.memory_efficient_fp16")
fp16: bool = II("common.fp16")
fp16_no_flatten_grads: bool = II("common.fp16_no_flatten_grads")
ddp_backend: str = II("distributed_training.ddp_backend")
world_size: int = II("distributed_training.distributed_world_size")
distributed_rank: int = II("distributed_training.distributed_rank")
ddp_rank: int = II("distributed_training.distributed_rank")
deepnorm: Optional[bool] = field(
default=False,
)
subln: Optional[bool] = field(
default=False,
)
rel_pos_buckets: Optional[int] = field(
default=0,
)
max_rel_pos: Optional[int] = field(
default=0,
)
xpos_rel_pos: Optional[bool] = field(
default=False,
)
xpos_scale_base: Optional[int] = field(
default=512,
)
@register_model("lm", dataclass=LanguageConfig)
class LanguageModel(FairseqLanguageModel):
def __init__(self, args, decoder):
self.args = args
super().__init__(decoder)
@classmethod
def build_model(cls, args, task):
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_embed_dim
)
embed_positions = (
PositionalEmbedding(
args.max_target_positions,
args.decoder_embed_dim,
task.dictionary.pad(),
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
embed_tokens.weight.shape[1],
embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, len(task.dictionary), bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
if getattr(args, "moe_freq", 0) > 0 and (
getattr(args, "fp16", False)
and not getattr(args, "memory_efficient_fp16", False)
and getattr(args, "ddp_backend", None) != "fully_sharded"
):
assert (
args.fp16_no_flatten_grads
), "If training moe models, set --fp16-no-flatten-grads to calculate correct gradnorm"
args.ddp_rank = distributed_utils.get_data_parallel_rank()
config = DecoderConfig()
config.override(args)
decoder = LMDecoder(
config,
embed_tokens,
embed_positions,
output_projection,
is_encoder_decoder=False,
dictionary=task.dictionary,
)
return cls(args, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
return Embedding(len(dictionary), embed_dim, dictionary.pad())
class LMDecoder(Decoder, FairseqIncrementalDecoder):
def forward(self, src_tokens, **kwargs):
self_attn_padding_mask = src_tokens.eq(self.dictionary.pad())
return super().forward(src_tokens, self_attn_padding_mask, **kwargs)
def max_positions(self):
return self.embed_positions.max_positions
def reorder_incremental_state_scripting(
self,
incremental_state,
new_order,
):
for module in incremental_state:
for key in incremental_state[module]:
result = incremental_state[module][key].index_select(0, new_order)
incremental_state[module][key] = result
@register_model_architecture("lm", "lm_base")
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.base_layers = getattr(args, "base_layers", 0)
args.base_sublayers = getattr(args, "base_sublayers", 1)
args.base_shuffle = getattr(args, "base_shuffle", False)
args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
| APAC-SCALE-master | examples/fairseq/models/language_modeling.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import argparse
import importlib
import os
MODEL_REGISTRY = {}
MODEL_DATACLASS_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_MODEL_NAME_REGISTRY = {}
ARCH_MODEL_INV_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("models." + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group("Named architectures")
group_archs.add_argument(
"--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name]
)
group_args = parser.add_argument_group("Additional command-line arguments")
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + "_parser"] = parser
| APAC-SCALE-master | examples/fairseq/models/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, List, Optional, Tuple
import torch
from fairseq import distributed_utils, utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding
from fairseq.modules import PositionalEmbedding
from torch import Tensor
from torchscale.architecture.config import DecoderConfig, EncoderConfig
from torchscale.architecture.encoder import Encoder
from .language_modeling import LMDecoder as MTDecoder
logger = logging.getLogger(__name__)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
@register_model("mt")
class TranslationModel(FairseqEncoderDecoderModel):
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension (extra linear layer '
'if different from decoder embed dim')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
parser.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activations at each layer, which saves GPU '
'memory usage at the cost of some additional compute')
parser.add_argument('--offload-activations', action='store_true',
help='checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# args for Fully Sharded Data Parallel (FSDP) training
parser.add_argument(
'--min-params-to-wrap', type=int, metavar='D', default=DEFAULT_MIN_PARAMS_TO_WRAP,
help=(
'minimum number of params for a layer to be wrapped with FSDP() when '
'training with --ddp-backend=fully_sharded. Smaller values will '
'improve memory efficiency, but may make torch.distributed '
'communication less efficient due to smaller input sizes. This option '
'is set to 0 (i.e., always wrap) when --checkpoint-activations or '
'--offload-activations are passed.'
)
)
# args for mixture-of-expert layers
parser.add_argument('--moe-freq', type=int, metavar='D', default=0,
help='Frequency at which we insert MoE Transformer layers')
parser.add_argument('--encoder-moe-freq', type=int, metavar='D', default=0,
help='Frequency at which we insert MoE Transformer encoder layers')
parser.add_argument('--decoder-moe-freq', type=int, metavar='D', default=0,
help='Frequency at which we insert MoE Transformer decoder layers')
parser.add_argument('--moe-expert-count', type=int, metavar='D', default=0,
help='Number of experts in each MoE Layer')
parser.add_argument('--moe-gating-use-fp32', default=False, action='store_true',
help="Use FP32 computations in MoE top2 gating function")
parser.add_argument('--moe-second-expert-policy', type=str, default='sampling',
help="policy for second expert, options: all/sampling/random")
parser.add_argument(
'--moe-normalize-gate-prob-before-dropping', default=False, action='store_true',
help=(
"whether to normalize gate probs before or after dropping experts "
"for capacity and randomization"
)
)
parser.add_argument('--moe-expert-ffn-dim', type=int, default=0,
help="MoE Expert FFN dimension")
parser.add_argument('--moe-top1-expert', default=False, action='store_true',
help="Use top1 gate instead of top2")
parser.add_argument(
'--moe-eval-capacity-token-fraction', type=float, default=0.25,
help=(
"Fraction of tokens as capacity during validation"
"if set to negative, use same as training. range: (0.0, 1.0]."
)
)
parser.add_argument('--moe-normalize-expert-grad', type=str, default='world_size',
help="Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'")
parser.add_argument('--use-moe-pad-mask', default=False, action='store_true',
help="Don't route padding tokens to any expert")
parser.add_argument('--use-xmoe', default=False, action='store_true',
help="Enable X-Moe")
parser.add_argument('--freeze-moe', default=False, action='store_true',
help="Freeze MoE Params")
parser.add_argument('--deepnorm', default=False, action='store_true',
help="Enable DeepNorm")
parser.add_argument('--subln', default=False, action='store_true',
help="Enable SubLN")
parser.add_argument('--pretrained-dense-mt-model-path', type=str, default='')
# args for pseudo-MoE layers
parser.add_argument('--alternate-ffn-embed-dim', type=int, default=0,
help="FFN embed dim of alternate pseudo-MoE blocks")
parser.add_argument('--rel-pos-buckets', type=int, default=0,
help='')
parser.add_argument('--max-rel-pos', type=int, default=0,
help='')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
args.ddp_rank = distributed_utils.get_data_parallel_rank()
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
if getattr(args, "offload_activations", False):
args.checkpoint_activations = True # offloading implies checkpointing
encoder_embed_positions = (
PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
src_dict.pad(),
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
decoder_embed_positions = (
PositionalEmbedding(
args.max_target_positions,
args.decoder_embed_dim,
tgt_dict.pad(),
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
decoder_embed_tokens.weight.shape[1],
decoder_embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = decoder_embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, len(tgt_dict), bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
encoder = cls.build_encoder(
args,
encoder_embed_tokens,
encoder_embed_positions,
src_dict,
)
decoder = cls.build_decoder(
args,
decoder_embed_tokens,
decoder_embed_positions,
output_projection,
tgt_dict,
)
if not args.share_all_embeddings:
min_params_to_wrap = getattr(
args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
)
# fsdp_wrap is a no-op when --ddp-backend != fully_sharded
encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)
decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, args, embed_tokens, embed_positions, dictionary):
config = EncoderConfig()
config.override(args)
return MTEncoder(
config,
embed_tokens,
embed_positions,
is_encoder_decoder=True,
dictionary=dictionary,
)
@classmethod
def build_decoder(
cls, args, embed_tokens, embed_positions, output_projection, dictionary
):
config = DecoderConfig()
config.override(args)
return MTDecoder(
config,
embed_tokens,
embed_positions,
output_projection,
is_encoder_decoder=True,
dictionary=dictionary,
)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = False,
features_only: bool = False,
**kwargs
):
encoder_out = self.encoder(src_tokens, return_all_hiddens=return_all_hiddens)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
class MTEncoder(Encoder, FairseqEncoder):
def forward(self, src_tokens, **kwargs):
self_attn_padding_mask = src_tokens.eq(self.dictionary.pad())
return super().forward(
src_tokens=src_tokens, encoder_padding_mask=self_attn_padding_mask, **kwargs
)
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = encoder_out["encoder_out"].index_select(0, new_order)
new_encoder_embedding = encoder_out["encoder_embedding"].index_select(
0, new_order
)
new_encoder_padding_mask = encoder_out["encoder_padding_mask"].index_select(
0, new_order
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(0, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask,
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
}
def max_positions(self):
return self.embed_positions.max_positions
@register_model_architecture("mt", "mt_base")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
args.is_moe = getattr(args, "is_moe", False)
args.selected_expert_count = getattr(args, "selected_expert_count", 2)
| APAC-SCALE-master | examples/fairseq/models/machine_translation.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import logging
from dataclasses import dataclass, field
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model, register_model_architecture
from fairseq.models.squad import SQuADHead
from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding
from fairseq.modules import PositionalEmbedding
from omegaconf import II
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from torchscale.architecture.config import EncoderConfig
from .machine_translation import MTEncoder as Encoder
DEFAULT_MAX_SOURCE_POSITIONS = 1024
logger = logging.getLogger(__name__)
@dataclass
class BertConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
encoder_embed_dim: int = field(
default=512, metadata={"help": "encoder embedding dimension"}
)
encoder_output_dim: int = field(
default=512, metadata={"help": "encoder output dimension"}
)
encoder_input_dim: int = field(
default=512, metadata={"help": "encoder input dimension"}
)
encoder_ffn_embed_dim: int = field(
default=2048, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_layers: int = field(default=6, metadata={"help": "num encoder layers"})
encoder_attention_heads: int = field(
default=8, metadata={"help": "num encoder attention heads"}
)
encoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each encoder block"}
)
no_encoder_final_norm: bool = field(
default=False,
metadata={"help": "don't add an extra layernorm after the last encoder block"},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
share_encoder_input_output_embed: bool = field(
default=False, metadata={"help": "share encoder input and output embeddings"}
)
encoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the encoder"},
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False, metadata={"help": "checkpoint activations at each layer"}
)
offload_activations: bool = field(
default=False,
metadata={"help": "move checkpointed activations to CPU after they are used."},
)
# config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "LayerDrop probability for encoder"}
)
encoder_layers_to_keep: Optional[str] = field(
default=None,
metadata={
"help": "which layers to *keep* when pruning as a comma-separated list"
},
)
# config for Fully Sharded Data Parallel (FSDP) training
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": (
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
)
},
)
max_source_positions: int = field(
default=1024, metadata={"help": "max source positions"}
)
pooler_activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use for pooler layer"}
)
pooler_dropout: float = field(
default=0.0,
metadata={"help": "dropout probability in the masked_lm pooler layers"},
)
# options from other parts of the config
# add_bos_token: bool = II("task.add_bos_token")
# tokens_per_sample: int = II("task.tokens_per_sample")
tpu: bool = II("common.tpu")
rel_pos_buckets: int = field(default=0, metadata={"help": ""})
max_rel_pos: int = field(default=0, metadata={"help": ""})
use_xmoe: Optional[bool] = field(
default=False,
)
moe_freq: int = field(
default=0,
metadata={"help": "Frequency at which we insert MoE Transformer layers"},
)
moe_expert_count: int = field(
default=0, metadata={"help": "Number of experts in each MoE Layer"}
)
moe_gating_use_fp32: bool = field(
default=False,
metadata={"help": "Use FP32 computations in MoE top2 gating function"},
)
moe_second_expert_policy: str = field(
default="sampling",
metadata={"help": "policy for second expert, options: all/sampling/random"},
)
moe_normalize_gate_prob_before_dropping: bool = field(
default=False,
metadata={
"help": "whether to normalize gate probs before or after dropping experts for capacity and randomization"
},
)
moe_expert_ffn_dim: Optional[int] = field(
default=None, metadata={"help": "MoE expert FFN dimension"}
)
moe_top1_expert: Optional[bool] = field(
default=False, metadata={"help": "Use top1 gate instead of top2"}
)
moe_eval_capacity_token_fraction: Optional[float] = field(
default=0.25,
metadata={
"help": (
"Default: 0.25, Fraction of tokens as capacity during validation, "
"if set to negative, use same as training. range: (0.0, 1.0]."
)
},
)
moe_normalize_expert_grad: Optional[str] = field(
default="world_size",
metadata={
"help": "Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'"
},
)
record_a2a_perf_stats: Optional[bool] = field(
default=False,
metadata={"help": "records all to all perf stats during distributed training"},
)
dummy_a2a: Optional[bool] = field(
default=False,
metadata={
"help": "By passes all to all during distributed training by returning the input buffer as output"
},
)
moe_batch_prioritized_routing: Optional[bool] = field(
default=False,
metadata={
"help": "if true orders token by the gate prob before capacity dropping."
},
)
ddp_rank: int = II("distributed_training.distributed_rank")
deepnorm: Optional[bool] = field(
default=False,
)
subln: Optional[bool] = field(
default=False,
)
@register_model("mlm", dataclass=BertConfig)
class BertModel(BaseFairseqModel):
def __init__(self, args, encoder):
super().__init__()
self.args = args
self.encoder = encoder
self.padding_idx = self.encoder.embed_tokens.padding_idx
self.classification_heads = nn.ModuleDict()
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
args.max_source_positions = getattr(
args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS
)
embed_tokens = cls.build_embedding(
args, task.dictionary, args.encoder_embed_dim
)
embed_positions = (
PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
task.dictionary.pad(),
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
lm_head = cls.build_lm_head(
args,
args.encoder_embed_dim,
len(task.dictionary),
args.activation_fn,
weight=embed_tokens.weight,
)
config = EncoderConfig()
config.override(args)
encoder = Encoder(
config,
embed_tokens=embed_tokens,
embed_positions=embed_positions,
output_projection=lm_head,
is_encoder_decoder=False,
dictionary=task.dictionary,
)
return cls(args, encoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad())
return embed_tokens
@classmethod
def build_lm_head(cls, args, embed_dim, output_dim, activation_fn, weight):
return LMHead(embed_dim, output_dim, activation_fn, weight)
def output_layer(self, features, masked_tokens=None):
return self.encoder.output_projection(features, masked_tokens=masked_tokens)
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = ClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
def register_question_answering_head(self, name, num_classes=None):
self.classification_heads[name] = SQuADHead(
self.args.encoder_embed_dim,
)
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
# upgrade children modules
super().upgrade_state_dict_named(state_dict, name)
# Handle new classification heads present in the state dict.
current_head_names = (
[]
if not hasattr(self, "classification_heads")
else self.classification_heads.keys()
)
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0] # noqa: E203
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if getattr(self.args, "load_checkpoint_heads", False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, "classification_heads"):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
def get_normalized_probs_scriptable(
self,
net_output,
log_probs,
sample = None,
):
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=-1)
else:
return utils.softmax(logits, dim=-1)
def forward(
self,
src_tokens=None,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
masked_tokens=None,
**kwargs
):
encoder_out = self.encoder(
src_tokens, features_only=True, return_all_hiddens=return_all_hiddens
)
x, extra = encoder_out["encoder_out"], encoder_out
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
elif not features_only:
x = self.output_layer(x, masked_tokens=masked_tokens)
return x, extra
class ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x.float()).type_as(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class LMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the masked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x.float()).type_as(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight) + self.bias
return x
@register_model_architecture("mlm", "mlm_base")
def base_unilm_architecture(args):
if hasattr(args, "encoder_final_norm"):
args.no_encoder_final_norm = not args.encoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
# args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_encoder_input_output_embed = getattr(
args, "share_encoder_input_output_embed", True
)
args.encoder_output_dim = getattr(
args, "encoder_output_dim", args.encoder_embed_dim
)
args.encoder_input_dim = getattr(args, "encoder_input_dim", args.encoder_embed_dim)
# Model training is not stable without this
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.no_encoder_final_norm = getattr(args, "no_encoder_final_norm", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
| APAC-SCALE-master | examples/fairseq/models/bert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import MoECriterion, register_criterion, MoECriterionConfig
@register_criterion("masked_lm_moe_cross_entropy", dataclass=MoECriterionConfig)
class MaskedLMMoECrossEntropyCriterion(MoECriterion):
def compute_inner_loss(self, model, sample, reduce=True):
masked_tokens = sample["target"].ne(self.padding_idx)
sample_size = masked_tokens.int().sum()
masked_tokens = torch.where(
masked_tokens.any(),
masked_tokens,
masked_tokens.new([True]),
)
net_output = model(**sample["net_input"], masked_tokens=masked_tokens)
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output)
if masked_tokens is not None:
target = target[masked_tokens]
nll_loss = F.nll_loss(
lprobs,
target.view(-1),
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
logging_output = {
"inner_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return net_output, nll_loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
MaskedLMMoECrossEntropyCriterion.reduce_moe_metrics(logging_outputs)
loss_sum = sum(log.get("inner_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"inner_loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["inner_loss"].avg)
) | APAC-SCALE-master | examples/fairseq/criterions/masked_lm_moe.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.