python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from torch import optim
import torch.nn.functional as F
from . import networks,criterion
from torch.autograd import Variable
class AudioVisualModel(torch.nn.Module):
def name(self):
return 'AudioVisualModel'
def __init__(self, nets, opt):
super(AudioVisualModel, self).__init__()
self.opt = opt
#initialize model
self.net_visual, self.net_audio = nets
def forward(self, input, volatile=False):
visual_input = input['frame']
audio_diff = input['audio_diff_spec']
audio_mix = input['audio_mix_spec']
audio_gt = Variable(audio_diff[:,:,:-1,:], requires_grad=False)
input_spectrogram = Variable(audio_mix, requires_grad=False, volatile=volatile)
visual_feature = self.net_visual(Variable(visual_input, requires_grad=False, volatile=volatile))
mask_prediction = self.net_audio(input_spectrogram, visual_feature)
#complex masking to obtain the predicted spectrogram
spectrogram_diff_real = input_spectrogram[:,0,:-1,:] * mask_prediction[:,0,:,:] - input_spectrogram[:,1,:-1,:] * mask_prediction[:,1,:,:]
spectrogram_diff_img = input_spectrogram[:,0,:-1,:] * mask_prediction[:,1,:,:] + input_spectrogram[:,1,:-1,:] * mask_prediction[:,0,:,:]
binaural_spectrogram = torch.cat((spectrogram_diff_real.unsqueeze(1), spectrogram_diff_img.unsqueeze(1)), 1)
output = {'mask_prediction': mask_prediction, 'binaural_spectrogram': binaural_spectrogram, 'audio_gt': audio_gt}
return output
| 2.5D-Visual-Sound-main | models/audioVisual_model.py |
2.5D-Visual-Sound-main | models/__init__.py |
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
def unet_conv(input_nc, output_nc, norm_layer=nn.BatchNorm2d):
downconv = nn.Conv2d(input_nc, output_nc, kernel_size=4, stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(output_nc)
return nn.Sequential(*[downconv, downnorm, downrelu])
def unet_upconv(input_nc, output_nc, outermost=False, norm_layer=nn.BatchNorm2d):
upconv = nn.ConvTranspose2d(input_nc, output_nc, kernel_size=4, stride=2, padding=1)
uprelu = nn.ReLU(True)
upnorm = norm_layer(output_nc)
if not outermost:
return nn.Sequential(*[upconv, upnorm, uprelu])
else:
return nn.Sequential(*[upconv, nn.Sigmoid()])
def create_conv(input_channels, output_channels, kernel, paddings, batch_norm=True, Relu=True, stride=1):
model = [nn.Conv2d(input_channels, output_channels, kernel, stride = stride, padding = paddings)]
if(batch_norm):
model.append(nn.BatchNorm2d(output_channels))
if(Relu):
model.append(nn.ReLU())
return nn.Sequential(*model)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
class VisualNet(nn.Module):
def __init__(self, original_resnet):
super(VisualNet, self).__init__()
layers = list(original_resnet.children())[0:-2]
self.feature_extraction = nn.Sequential(*layers) #features before conv1x1
def forward(self, x):
x = self.feature_extraction(x)
return x
class AudioNet(nn.Module):
def __init__(self, ngf=64, input_nc=2, output_nc=2):
super(AudioNet, self).__init__()
#initialize layers
self.audionet_convlayer1 = unet_conv(input_nc, ngf)
self.audionet_convlayer2 = unet_conv(ngf, ngf * 2)
self.audionet_convlayer3 = unet_conv(ngf * 2, ngf * 4)
self.audionet_convlayer4 = unet_conv(ngf * 4, ngf * 8)
self.audionet_convlayer5 = unet_conv(ngf * 8, ngf * 8)
self.audionet_upconvlayer1 = unet_upconv(1296, ngf * 8) #1296 (audio-visual feature) = 784 (visual feature) + 512 (audio feature)
self.audionet_upconvlayer2 = unet_upconv(ngf * 16, ngf *4)
self.audionet_upconvlayer3 = unet_upconv(ngf * 8, ngf * 2)
self.audionet_upconvlayer4 = unet_upconv(ngf * 4, ngf)
self.audionet_upconvlayer5 = unet_upconv(ngf * 2, output_nc, True) #outermost layer use a sigmoid to bound the mask
self.conv1x1 = create_conv(512, 8, 1, 0) #reduce dimension of extracted visual features
def forward(self, x, visual_feat):
audio_conv1feature = self.audionet_convlayer1(x)
audio_conv2feature = self.audionet_convlayer2(audio_conv1feature)
audio_conv3feature = self.audionet_convlayer3(audio_conv2feature)
audio_conv4feature = self.audionet_convlayer4(audio_conv3feature)
audio_conv5feature = self.audionet_convlayer5(audio_conv4feature)
visual_feat = self.conv1x1(visual_feat)
visual_feat = visual_feat.view(visual_feat.shape[0], -1, 1, 1) #flatten visual feature
visual_feat = visual_feat.repeat(1, 1, audio_conv5feature.shape[-2], audio_conv5feature.shape[-1]) #tile visual feature
audioVisual_feature = torch.cat((visual_feat, audio_conv5feature), dim=1)
audio_upconv1feature = self.audionet_upconvlayer1(audioVisual_feature)
audio_upconv2feature = self.audionet_upconvlayer2(torch.cat((audio_upconv1feature, audio_conv4feature), dim=1))
audio_upconv3feature = self.audionet_upconvlayer3(torch.cat((audio_upconv2feature, audio_conv3feature), dim=1))
audio_upconv4feature = self.audionet_upconvlayer4(torch.cat((audio_upconv3feature, audio_conv2feature), dim=1))
mask_prediction = self.audionet_upconvlayer5(torch.cat((audio_upconv4feature, audio_conv1feature), dim=1)) * 2 - 1
return mask_prediction
| 2.5D-Visual-Sound-main | models/networks.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class BaseLoss(nn.Module):
def __init__(self):
super(BaseLoss, self).__init__()
def forward(self, preds, targets, weight=None):
if isinstance(preds, list):
N = len(preds)
if weight is None:
weight = preds[0].new_ones(1)
errs = [self._forward(preds[n], targets[n], weight[n])
for n in range(N)]
err = torch.mean(torch.stack(errs))
elif isinstance(preds, torch.Tensor):
if weight is None:
weight = preds.new_ones(1)
err = self._forward(preds, targets, weight)
return err
class L1Loss(BaseLoss):
def __init__(self):
super(L1Loss, self).__init__()
def _forward(self, pred, target, weight):
return torch.mean(weight * torch.abs(pred - target))
class L2Loss(BaseLoss):
def __init__(self):
super(L2Loss, self).__init__()
def _forward(self, pred, target, weight):
return torch.mean(weight * torch.pow(pred - target, 2))
class MSELoss(BaseLoss):
def __init__(self):
super(MSELoss, self).__init__()
def _forward(self, pred, target):
return F.mse_loss(pred, target)
class BCELoss(BaseLoss):
def __init__(self):
super(BCELoss, self).__init__()
def _forward(self, pred, target, weight):
return F.binary_cross_entropy(pred, target, weight=weight)
class BCEWithLogitsLoss(BaseLoss):
def __init__(self):
super(BCEWithLogitsLoss, self).__init__()
def _forward(self, pred, target, weight):
return F.binary_cross_entropy_with_logits(pred, target, weight=weight)
| 2.5D-Visual-Sound-main | models/criterion.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
| 2.5D-Visual-Sound-main | data/base_dataset.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def CreateDataLoader(opt):
from data.custom_dataset_data_loader import CustomDatasetDataLoader
data_loader = CustomDatasetDataLoader()
print(data_loader.name())
data_loader.initialize(opt)
return data_loader
| 2.5D-Visual-Sound-main | data/data_loader.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class BaseDataLoader():
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data():
return None
| 2.5D-Visual-Sound-main | data/base_data_loader.py |
2.5D-Visual-Sound-main | data/__init__.py |
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataset(opt):
dataset = None
if opt.model == 'audioVisual':
from data.audioVisual_dataset import AudioVisualDataset
dataset = AudioVisualDataset()
else:
raise ValueError("Dataset [%s] not recognized." % opt.model)
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.nThreads))
def load_data(self):
return self
def __len__(self):
return len(self.dataset)
def __iter__(self):
for i, data in enumerate(self.dataloader):
yield data
| 2.5D-Visual-Sound-main | data/custom_dataset_data_loader.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os.path
import time
import librosa
import h5py
import random
import math
import numpy as np
import glob
import torch
from PIL import Image, ImageEnhance
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset
def normalize(samples, desired_rms = 0.1, eps = 1e-4):
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return samples
def generate_spectrogram(audio):
spectro = librosa.core.stft(audio, n_fft=512, hop_length=160, win_length=400, center=True)
real = np.expand_dims(np.real(spectro), axis=0)
imag = np.expand_dims(np.imag(spectro), axis=0)
spectro_two_channel = np.concatenate((real, imag), axis=0)
return spectro_two_channel
def process_image(image, augment):
image = image.resize((480,240))
w,h = image.size
w_offset = w - 448
h_offset = h - 224
left = random.randrange(0, w_offset + 1)
upper = random.randrange(0, h_offset + 1)
image = image.crop((left, upper, left+448, upper+224))
if augment:
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
return image
class AudioVisualDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.audios = []
#load hdf5 file here
h5f_path = os.path.join(opt.hdf5FolderPath, opt.mode+".h5")
h5f = h5py.File(h5f_path, 'r')
self.audios = h5f['audio'][:]
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
vision_transform_list = [transforms.ToTensor(), normalize]
self.vision_transform = transforms.Compose(vision_transform_list)
def __getitem__(self, index):
#load audio
audio, audio_rate = librosa.load(self.audios[index], sr=self.opt.audio_sampling_rate, mono=False)
#randomly get a start time for the audio segment from the 10s clip
audio_start_time = random.uniform(0, 9.9 - self.opt.audio_length)
audio_end_time = audio_start_time + self.opt.audio_length
audio_start = int(audio_start_time * self.opt.audio_sampling_rate)
audio_end = audio_start + int(self.opt.audio_length * self.opt.audio_sampling_rate)
audio = audio[:, audio_start:audio_end]
audio = normalize(audio)
audio_channel1 = audio[0,:]
audio_channel2 = audio[1,:]
#get the frame dir path based on audio path
path_parts = self.audios[index].strip().split('/')
path_parts[-1] = path_parts[-1][:-4] + '.mp4'
path_parts[-2] = 'frames'
frame_path = '/'.join(path_parts)
# get the closest frame to the audio segment
#frame_index = int(round((audio_start_time + audio_end_time) / 2.0 + 0.5)) #1 frame extracted per second
frame_index = int(round(((audio_start_time + audio_end_time) / 2.0 + 0.05) * 10)) #10 frames extracted per second
frame = process_image(Image.open(os.path.join(frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB'), self.opt.enable_data_augmentation)
frame = self.vision_transform(frame)
#passing the spectrogram of the difference
audio_diff_spec = torch.FloatTensor(generate_spectrogram(audio_channel1 - audio_channel2))
audio_mix_spec = torch.FloatTensor(generate_spectrogram(audio_channel1 + audio_channel2))
return {'frame': frame, 'audio_diff_spec':audio_diff_spec, 'audio_mix_spec':audio_mix_spec}
def __len__(self):
return len(self.audios)
def name(self):
return 'AudioVisualDataset'
| 2.5D-Visual-Sound-main | data/audioVisual_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gym
import torch
from collections import deque, defaultdict
from gym import spaces
import numpy as np
from gym_minigrid.minigrid import OBJECT_TO_IDX, COLOR_TO_IDX
# Helper functions and wrappers
def _format_observation(obs):
obs = torch.tensor(obs)
return obs.view((1, 1) + obs.shape) # (...) -> (T,B,...).
class Minigrid2Image(gym.ObservationWrapper):
"""Get MiniGrid observation to ignore language instruction."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = env.observation_space.spaces['image']
def observation(self, observation):
return observation['image']
class Observation_WrapperSetup:
"""Environment wrapper to format observation items into torch."""
def __init__(self, gym_env, fix_seed=False, env_seed=1):
self.gym_env = gym_env
self.episode_return = None
self.episode_step = None
self.episode_win = None
self.fix_seed = fix_seed
self.env_seed = env_seed
def initial(self):
initial_reward = torch.zeros(1, 1)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
self.episode_win = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.uint8)
if self.fix_seed:
self.gym_env.seed(seed=self.env_seed)
initial_frame = _format_observation(self.gym_env.reset())
if self.gym_env.carrying:
carried_col, carried_obj = torch.LongTensor([[COLOR_TO_IDX[self.gym_env.carrying.color]]]), torch.LongTensor([[OBJECT_TO_IDX[self.gym_env.carrying.type]]])
else:
carried_col, carried_obj = torch.LongTensor([[5]]), torch.LongTensor([[1]])
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
episode_win=self.episode_win,
carried_col = carried_col,
carried_obj = carried_obj)
def step(self, action):
frame, reward, done, _ = self.gym_env.step(action.item())
self.episode_step += 1
episode_step = self.episode_step
self.episode_return += reward
episode_return = self.episode_return
if done and reward > 0:
self.episode_win[0][0] = 1
else:
self.episode_win[0][0] = 0
episode_win = self.episode_win
if done:
if self.fix_seed:
self.gym_env.seed(seed=self.env_seed)
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
self.episode_win = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_observation(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
if self.gym_env.carrying:
carried_col, carried_obj = torch.LongTensor([[COLOR_TO_IDX[self.gym_env.carrying.color]]]), torch.LongTensor([[OBJECT_TO_IDX[self.gym_env.carrying.type]]])
else:
carried_col, carried_obj = torch.LongTensor([[5]]), torch.LongTensor([[1]])
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step = episode_step,
episode_win = episode_win,
carried_col = carried_col,
carried_obj = carried_obj
)
def get_full_obs(self):
env = self.gym_env.unwrapped
full_grid = env.grid.encode()
full_grid[env.agent_pos[0]][env.agent_pos[1]] = np.array([
OBJECT_TO_IDX['agent'],
COLOR_TO_IDX['red'],
env.agent_dir
])
return full_grid
def close(self):
self.gym_env.close()
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[:-1] + (shp[-1] * k,)),
dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
| adversarially-motivated-intrinsic-goals-main | env_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Naive profiling using timeit."""
import collections
import timeit
class Timings:
"""Not thread-safe."""
def __init__(self):
self._means = collections.defaultdict(int)
self._vars = collections.defaultdict(int)
self._counts = collections.defaultdict(int)
self.reset()
def reset(self):
self.last_time = timeit.default_timer()
def time(self, name):
"""Save an update for event `name`.
Nerd alarm: We could just store a
collections.defaultdict(list)
and compute means and standard deviations at the end. But thanks to the
clever math in Sutton-Barto
(http://www.incompleteideas.net/book/first/ebook/node19.html) and
https://math.stackexchange.com/a/103025/5051 we can update both the
means and the stds online. O(1) FTW!
"""
now = timeit.default_timer()
x = now - self.last_time
self.last_time = now
n = self._counts[name]
mean = self._means[name] + (x - self._means[name]) / (n + 1)
var = (
n * self._vars[name] + n * (self._means[name] - mean) ** 2 + (x - mean) ** 2
) / (n + 1)
self._means[name] = mean
self._vars[name] = var
self._counts[name] += 1
def means(self):
return self._means
def vars(self):
return self._vars
def stds(self):
return {k: v ** 0.5 for k, v in self._vars.items()}
def summary(self, prefix=""):
means = self.means()
stds = self.stds()
total = sum(means.values())
result = prefix
for k in sorted(means, key=means.get, reverse=True):
result += f"\n %s: %.6fms +- %.6fms (%.2f%%) " % (
k,
1000 * means[k],
1000 * stds[k],
100 * means[k] / total,
)
result += "\nTotal: %.6fms" % (1000 * total)
return result
| adversarially-motivated-intrinsic-goals-main | torchbeast/core/prof.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import datetime
import csv
import json
import logging
import os
import time
from typing import Dict
import git
def gather_metadata() -> Dict:
date_start = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
# gathering git metadata
try:
repo = git.Repo(search_parent_directories=True)
git_sha = repo.commit().hexsha
git_data = dict(
commit=git_sha,
branch=None if repo.head.is_detached else repo.active_branch.name,
is_dirty=repo.is_dirty(),
path=repo.git_dir,
)
except git.InvalidGitRepositoryError:
git_data = None
# gathering slurm metadata
if "SLURM_JOB_ID" in os.environ:
slurm_env_keys = [k for k in os.environ if k.startswith("SLURM")]
slurm_data = {}
for k in slurm_env_keys:
d_key = k.replace("SLURM_", "").replace("SLURMD_", "").lower()
slurm_data[d_key] = os.environ[k]
else:
slurm_data = None
return dict(
date_start=date_start,
date_end=None,
successful=False,
git=git_data,
slurm=slurm_data,
env=os.environ.copy(),
)
class FileWriter:
def __init__(
self,
xpid: str = None,
xp_args: dict = None,
rootdir: str = "~/palaas",
symlink_to_latest: bool = True,
):
if not xpid:
# make unique id
xpid = "{proc}_{unixtime}".format(
proc=os.getpid(), unixtime=int(time.time())
)
self.xpid = xpid
self._tick = 0
# metadata gathering
if xp_args is None:
xp_args = {}
self.metadata = gather_metadata()
# we need to copy the args, otherwise when we close the file writer
# (and rewrite the args) we might have non-serializable objects (or
# other nasty stuff).
self.metadata["args"] = copy.deepcopy(xp_args)
self.metadata["xpid"] = self.xpid
formatter = logging.Formatter("%(message)s")
self._logger = logging.getLogger("palaas/out")
# to stdout handler
shandle = logging.StreamHandler()
shandle.setFormatter(formatter)
self._logger.addHandler(shandle)
self._logger.setLevel(logging.INFO)
rootdir = os.path.expandvars(os.path.expanduser(rootdir))
# to file handler
self.basepath = os.path.join(rootdir, self.xpid)
if not os.path.exists(self.basepath):
self._logger.info("Creating log directory: %s", self.basepath)
os.makedirs(self.basepath, exist_ok=True)
else:
self._logger.info("Found log directory: %s", self.basepath)
if symlink_to_latest:
# Add 'latest' as symlink unless it exists and is no symlink.
symlink = os.path.join(rootdir, "latest")
try:
if os.path.islink(symlink):
os.remove(symlink)
if not os.path.exists(symlink):
os.symlink(self.basepath, symlink)
self._logger.info("Symlinked log directory: %s", symlink)
except OSError:
# os.remove() or os.symlink() raced. Don't do anything.
pass
self.paths = dict(
msg="{base}/out.log".format(base=self.basepath),
logs="{base}/logs.csv".format(base=self.basepath),
fields="{base}/fields.csv".format(base=self.basepath),
meta="{base}/meta.json".format(base=self.basepath),
)
self._logger.info("Saving arguments to %s", self.paths["meta"])
if os.path.exists(self.paths["meta"]):
self._logger.warning(
"Path to meta file already exists. " "Not overriding meta."
)
else:
self._save_metadata()
self._logger.info("Saving messages to %s", self.paths["msg"])
if os.path.exists(self.paths["msg"]):
self._logger.warning(
"Path to message file already exists. " "New data will be appended."
)
fhandle = logging.FileHandler(self.paths["msg"])
fhandle.setFormatter(formatter)
self._logger.addHandler(fhandle)
self._logger.info("Saving logs data to %s", self.paths["logs"])
self._logger.info("Saving logs' fields to %s", self.paths["fields"])
if os.path.exists(self.paths["logs"]):
self._logger.warning(
"Path to log file already exists. " "New data will be appended."
)
with open(self.paths["fields"], "r") as csvfile:
reader = csv.reader(csvfile)
self.fieldnames = list(reader)[0]
else:
self.fieldnames = ["_tick", "_time"]
self._fieldfile = open(self.paths["fields"], "w")
self._fieldwriter = csv.writer(self._fieldfile)
self._logfile = open(self.paths["logs"], "a")
self._logwriter = csv.DictWriter(self._logfile, fieldnames=self.fieldnames)
def log(self, to_log: Dict, tick: int = None, verbose: bool = False) -> None:
if tick is not None:
raise NotImplementedError
else:
to_log["_tick"] = self._tick
self._tick += 1
to_log["_time"] = time.time()
old_len = len(self.fieldnames)
for k in to_log:
if k not in self.fieldnames:
self.fieldnames.append(k)
if old_len != len(self.fieldnames):
self._fieldwriter.writerow(self.fieldnames)
self._logger.info("Updated log fields: %s", self.fieldnames)
if to_log["_tick"] == 0:
self._logfile.write("# %s\n" % ",".join(self.fieldnames))
if verbose:
self._logger.info(
"LOG | %s",
", ".join(["{}: {}".format(k, to_log[k]) for k in sorted(to_log)]),
)
self._logwriter.writerow(to_log)
self._logfile.flush()
def close(self, successful: bool = True) -> None:
self.metadata["date_end"] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S.%f"
)
self.metadata["successful"] = successful
self._save_metadata()
for f in [self._logfile, self._fieldfile]:
f.close()
def _save_metadata(self) -> None:
with open(self.paths["meta"], "w") as jsonfile:
json.dump(self.metadata, jsonfile, indent=4, sort_keys=True)
| adversarially-motivated-intrinsic-goals-main | torchbeast/core/file_writer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This file taken from
# https://github.com/deepmind/scalable_agent/blob/
# cd66d00914d56c8ba2f0615d9cdeefcb169a8d70/vtrace.py
# and modified.
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute V-trace off-policy actor critic targets.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
"""
import collections
import torch
import torch.nn.functional as F
VTraceFromLogitsReturns = collections.namedtuple(
"VTraceFromLogitsReturns",
[
"vs",
"pg_advantages",
"log_rhos",
"behavior_action_log_probs",
"target_action_log_probs",
],
)
VTraceReturns = collections.namedtuple("VTraceReturns", "vs pg_advantages")
def action_log_probs(policy_logits, actions):
return -F.nll_loss(
F.log_softmax(torch.flatten(policy_logits, 0, -2), dim=-1),
torch.flatten(actions),
reduction="none",
).view_as(actions)
def from_logits(
behavior_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace for softmax policies."""
target_action_log_probs = action_log_probs(target_policy_logits, actions)
behavior_action_log_probs = action_log_probs(behavior_policy_logits, actions)
log_rhos = target_action_log_probs - behavior_action_log_probs
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behavior_action_log_probs=behavior_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict(),
)
@torch.no_grad()
def from_importance_weights(
log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace from log importance weights."""
with torch.no_grad():
rhos = torch.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = torch.clamp(rhos, max=clip_rho_threshold)
else:
clipped_rhos = rhos
cs = torch.clamp(rhos, max=1.0)
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = torch.cat(
[values[1:], torch.unsqueeze(bootstrap_value, 0)], dim=0
)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
acc = torch.zeros_like(bootstrap_value)
result = []
for t in range(discounts.shape[0] - 1, -1, -1):
acc = deltas[t] + discounts[t] * cs[t] * acc
result.append(acc)
result.reverse()
vs_minus_v_xs = torch.stack(result)
# Add V(x_s) to get v_s.
vs = torch.add(vs_minus_v_xs, values)
# Advantage for policy gradient.
broadcasted_bootstrap_values = torch.ones_like(vs[0]) * bootstrap_value
vs_t_plus_1 = torch.cat(
[vs[1:], broadcasted_bootstrap_values.unsqueeze(0)], dim=0
)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = torch.clamp(rhos, max=clip_pg_rho_threshold)
else:
clipped_pg_rhos = rhos
pg_advantages = clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values)
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=vs, pg_advantages=pg_advantages)
| adversarially-motivated-intrinsic-goals-main | torchbeast/core/vtrace.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""The environment class."""
import torch
def _format_frame(frame):
frame = torch.from_numpy(frame)
return frame.view((1, 1) + frame.shape) # (...) -> (T,B,...).
class Environment:
def __init__(self, gym_env):
self.gym_env = gym_env
self.episode_return = None
self.episode_step = None
def initial(self):
initial_reward = torch.zeros(1, 1)
# This supports only single-tensor actions ATM.
initial_last_action = torch.zeros(1, 1, dtype=torch.int64)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.bool)
initial_frame = _format_frame(self.gym_env.reset())
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
last_action=initial_last_action,
)
def step(self, action):
frame, reward, done, unused_info = self.gym_env.step(action.item())
self.episode_step += 1
self.episode_return += reward
episode_step = self.episode_step
episode_return = self.episode_return
if done:
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_frame(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step=episode_step,
last_action=action,
)
def close(self):
self.gym_env.close()
| adversarially-motivated-intrinsic-goals-main | torchbeast/core/environment.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| adversarially-motivated-intrinsic-goals-main | monobeast/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Must be run with OMP_NUM_THREADS=1
import random
import argparse
import logging
import os
import threading
import time
import timeit
import traceback
import pprint
import typing
import torch
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
import gym
import gym_minigrid.wrappers as wrappers
from torch.distributions.normal import Normal
from torchbeast.core import environment
from torchbeast.core import file_writer
from torchbeast.core import prof
from torchbeast.core import vtrace
from env_utils import Observation_WrapperSetup, FrameStack
# Some Global Variables
# We start t* at 7 steps.
generator_batch = dict()
generator_batch_aux = dict()
generator_current_target = 7.0
generator_count = 0
# yapf: disable
parser = argparse.ArgumentParser(description='PyTorch Scalable Agent')
parser.add_argument('--env', type=str, default='MiniGrid-Empty-8x8-v0',
help='Gym environment.')
parser.add_argument('--mode', default='train',
choices=['train', 'test', 'test_render'],
help='Training or test mode.')
parser.add_argument('--xpid', default=None,
help='Experiment id (default: None).')
# Training settings.
parser.add_argument('--disable_checkpoint', action='store_true',
help='Disable saving checkpoint.')
parser.add_argument('--savedir', default='./experimentsMinigrid',
help='Root dir where experiment data will be saved.')
parser.add_argument('--total_frames', default=600000000, type=int, metavar='T',
help='Total environment frames to train for.')
parser.add_argument('--num_actors', default=4, type=int, metavar='N',
help='Number of actors (default: 4).')
parser.add_argument('--num_buffers', default=None, type=int,
metavar='N', help='Number of shared-memory buffers.')
parser.add_argument('--num_threads', default=4, type=int,
metavar='N', help='Number learner threads.')
parser.add_argument('--disable_cuda', action='store_true',
help='Disable CUDA.')
# Loss settings.
parser.add_argument('--entropy_cost', default=0.0005, type=float,
help='Entropy cost/multiplier.')
parser.add_argument('--generator_entropy_cost', default=0.05, type=float,
help='Entropy cost/multiplier.')
parser.add_argument('--baseline_cost', default=0.5, type=float,
help='Baseline cost/multiplier.')
parser.add_argument('--discounting', default=0.99, type=float,
help='Discounting factor.')
parser.add_argument('--reward_clipping', default='abs_one',
choices=['abs_one', 'soft_asymmetric', 'none'],
help='Reward clipping.')
# Optimizer settings.
parser.add_argument('--learning_rate', default=0.001, type=float,
metavar='LR', help='Learning rate.')
parser.add_argument('--generator_learning_rate', default=0.002, type=float,
metavar='LR', help='Learning rate.')
parser.add_argument('--alpha', default=0.99, type=float,
help='RMSProp smoothing constant.')
parser.add_argument('--momentum', default=0, type=float,
help='RMSProp momentum.')
parser.add_argument('--epsilon', default=0.01, type=float,
help='RMSProp epsilon.')
# Other Hyperparameters
parser.add_argument('--batch_size', default=8, type=int, metavar='B',
help='Learner batch size (default: 4).')
parser.add_argument('--generator_batch_size', default=32, type=int, metavar='BB',
help='Learner batch size (default: 4).')
parser.add_argument('--unroll_length', default=100, type=int, metavar='T',
help='The unroll length (time dimension; default: 64).')
parser.add_argument('--goal_dim', default=10, type=int,
help='Size of Goal Embedding')
parser.add_argument('--state_embedding_dim', default=256, type=int,
help='Dimension of the state embedding representation used in the student')
parser.add_argument('--generator_reward_negative', default= -0.1, type=float,
help='Coefficient for the intrinsic reward')
parser.add_argument('--generator_threshold', default=-0.5, type=float,
help='Threshold mean reward for wich scheduler increases difficulty')
parser.add_argument('--generator_counts', default=10, type=int,
help='Number of time before generator increases difficulty')
parser.add_argument('--generator_maximum', default=100, type=float,
help='Maximum difficulty')
parser.add_argument('--generator_reward_coef', default=1.0, type=float,
help='Coefficient for the generator reward')
# Map Layout
parser.add_argument('--fix_seed', action='store_true',
help='Fix the environment seed so that it is \
no longer procedurally generated but rather a layout every time.')
parser.add_argument('--env_seed', default=1, type=int,
help='The seed to set for the env if we are using a single fixed seed.')
parser.add_argument('--inner', action='store_true',
help='Exlucde outer wall')
parser.add_argument('--num_input_frames', default=1, type=int,
help='Number of input frames to the model and state embedding including the current frame \
When num_input_frames > 1, it will also take the previous num_input_frames - 1 frames as input.')
# Ablations and other settings
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
parser.add_argument('--num_lstm_layers', default=1, type=int,
help='Lstm layers.')
parser.add_argument('--disable_use_embedding', action='store_true',
help='Disable embeddings.')
parser.add_argument('--no_extrinsic_rewards', action='store_true',
help='Only intrinsic rewards.')
parser.add_argument('--no_generator', action='store_true',
help='Use vanilla policy-deprecated')
parser.add_argument('--intrinsic_reward_coef', default=1.0, type=float,
help='Coefficient for the intrinsic reward')
parser.add_argument('--random_agent', action='store_true',
help='Use a random agent to test the env.')
parser.add_argument('--novelty', action='store_true',
help='Discount rewards based on times goal has been proposed.')
parser.add_argument('--novelty_bonus', default=0.1, type=float,
help='Bonus you get for proposing objects if novelty')
parser.add_argument('--novelty_coef', default=0.3, type=float,
help='Modulates novelty bonus if novelty')
parser.add_argument('--restart_episode', action='store_true',
help='Restart Episode when reaching intrinsic goal.')
parser.add_argument('--modify', action='store_true',
help='Modify Goal instead of having to reach the goal')
parser.add_argument('--no_boundary_awareness', action='store_true',
help='Remove Episode Boundary Awareness')
parser.add_argument('--generator_loss_form', type=str, default='threshold',
help='[threshold,dummy,gaussian, linear]')
parser.add_argument('--generator_target', default=5.0, type=float,
help='Mean target for Gassian and Linear Rewards')
parser.add_argument('--target_variance', default=15.0, type=float,
help='Variance for the Gaussian Reward')
# yapf: enable
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
def compute_baseline_loss(advantages):
# Take the mean over batch, sum over time.
return 0.5 * torch.sum(torch.mean(advantages ** 2, dim=1))
def compute_entropy_loss(logits):
# Regularizing Entropy Loss
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
entropy_per_timestep = torch.sum(-policy * log_policy, dim=-1)
return -torch.sum(torch.mean(entropy_per_timestep, dim=1))
def compute_policy_gradient_loss(logits, actions, advantages):
# Main Policy Loss
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
advantages.requires_grad = False
policy_gradient_loss_per_timestep = cross_entropy * advantages
return torch.sum(torch.mean(policy_gradient_loss_per_timestep, dim=1))
def act(
actor_index: int,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
model: torch.nn.Module,
generator_model,
buffers: Buffers,
initial_agent_state_buffers, flags):
"""Defines and generates IMPALA actors in multiples threads."""
try:
logging.info("Actor %i started.", actor_index)
timings = prof.Timings() # Keep track of how fast things are.
gym_env = create_env(flags)
seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little")
gym_env.seed(seed)
#gym_env = wrappers.FullyObsWrapper(gym_env)
if flags.num_input_frames > 1:
gym_env = FrameStack(gym_env, flags.num_input_frames)
env = Observation_WrapperSetup(gym_env, fix_seed=flags.fix_seed, env_seed=flags.env_seed)
env_output = env.initial()
initial_frame = env_output['frame']
agent_state = model.initial_state(batch_size=1)
generator_output = generator_model(env_output)
goal = generator_output["goal"]
agent_output, unused_state = model(env_output, agent_state, goal)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for key in generator_output:
buffers[key][index][0, ...] = generator_output[key]
buffers["initial_frame"][index][0, ...] = initial_frame
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout
for t in range(flags.unroll_length):
aux_steps = 0
timings.reset()
if flags.modify:
new_frame = torch.flatten(env_output['frame'], 2, 3)
old_frame = torch.flatten(initial_frame, 2, 3)
ans = new_frame == old_frame
ans = torch.sum(ans, 3) != 3 # Reached if the three elements of the frame are not the same.
reached_condition = torch.squeeze(torch.gather(ans, 2, torch.unsqueeze(goal.long(),2)))
else:
agent_location = torch.flatten(env_output['frame'], 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(agent_output["action"].shape)
reached_condition = goal == agent_location
if reached_condition: # Generate new goal when reached intrinsic goal
if flags.restart_episode:
env_output = env.initial()
else:
env.episode_step = 0
initial_frame = env_output['frame']
with torch.no_grad():
generator_output = generator_model(env_output)
goal = generator_output["goal"]
if env_output['done'][0] == 1: # Generate a New Goal when episode finished
initial_frame = env_output['frame']
with torch.no_grad():
generator_output = generator_model(env_output)
goal = generator_output["goal"]
with torch.no_grad():
agent_output, agent_state = model(env_output, agent_state, goal)
timings.time("model")
env_output = env.step(agent_output["action"])
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
for key in generator_output:
buffers[key][index][t + 1, ...] = generator_output[key]
buffers["initial_frame"][index][t + 1, ...] = initial_frame
timings.time("write")
full_queue.put(index)
if actor_index == 0:
logging.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
logging.error("Exception in worker process %i", actor_index)
traceback.print_exc()
print()
raise e
def get_batch(
flags,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock=threading.Lock()):
"""Returns a Batch with the history."""
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(flags.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(t.to(device=flags.device, non_blocking=True)
for t in initial_agent_state)
timings.time("device")
return batch, initial_agent_state
def reached_goal_func(frames, goals, initial_frames = None, done_aux = None):
"""Auxiliary function which evaluates whether agent has reached the goal."""
if flags.modify:
new_frame = torch.flatten(frames, 2, 3)
old_frame = torch.flatten(initial_frames, 2, 3)
ans = new_frame == old_frame
ans = torch.sum(ans, 3) != 3 # reached if the three elements are not the same
reached = torch.squeeze(torch.gather(ans, 2, torch.unsqueeze(goals.long(),2)))
if flags.no_boundary_awareness:
reached = reached.float() * (1 - done_aux.float())
return reached
else:
agent_location = torch.flatten(frames, 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(goals.shape)
return (goals == agent_location).float()
def learn(
actor_model, model, actor_generator_model, generator_model, batch, initial_agent_state, optimizer, generator_model_optimizer, scheduler, generator_scheduler, flags, max_steps=100.0, lock=threading.Lock()
):
"""Performs a learning (optimization) step for the policy, and for the generator whenever the generator batch is full."""
with lock:
# Loading Batch
next_frame = batch['frame'][1:].float().to(device=flags.device)
initial_frames = batch['initial_frame'][1:].float().to(device=flags.device)
done_aux = batch['done'][1:].float().to(device=flags.device)
reached_goal = reached_goal_func(next_frame, batch['goal'][1:].to(device=flags.device), initial_frames = initial_frames, done_aux = done_aux)
intrinsic_rewards = flags.intrinsic_reward_coef * reached_goal
reached = reached_goal.type(torch.bool)
intrinsic_rewards = intrinsic_rewards*(intrinsic_rewards - 0.9 * (batch["episode_step"][1:].float()/max_steps))
learner_outputs, unused_state = model(batch, initial_agent_state, batch['goal'])
bootstrap_value = learner_outputs["baseline"][-1]
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
# Student Rewards
if flags.no_generator:
total_rewards = rewards
elif flags.no_extrinsic_rewards:
total_rewards = intrinsic_rewards
else:
total_rewards = rewards + intrinsic_rewards
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(total_rewards, -1, 1)
elif flags.reward_clipping == "soft_asymmetric":
squeezed = torch.tanh(total_rewards / 5.0)
# Negative rewards are given less weight than positive rewards.
clipped_rewards = torch.where(total_rewards < 0, 0.3 * squeezed, squeezed) * 5.0
elif flags.reward_clipping == "none":
clipped_rewards = total_rewards
discounts = (~batch["done"]).float() * flags.discounting
clipped_rewards += 1.0 * (rewards>0.0).float()
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
# Student Loss
# Compute loss as a weighted sum of the baseline loss, the policy
# gradient loss and an entropy regularization term.
pg_loss = compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
episode_returns = batch["episode_return"][batch["done"]]
if torch.isnan(torch.mean(episode_returns)):
aux_mean_episode = 0.0
else:
aux_mean_episode = torch.mean(episode_returns).item()
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": aux_mean_episode,
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
"gen_rewards": None,
"gg_loss": None,
"generator_baseline_loss": None,
"generator_entropy_loss": None,
"mean_intrinsic_rewards": None,
"mean_episode_steps": None,
"ex_reward": None,
"generator_current_target": None,
}
if flags.no_generator:
stats["gen_rewards"] = 0.0,
stats["gg_loss"] = 0.0,
stats["generator_baseline_loss"] = 0.0,
stats["generator_entropy_loss"] = 0.0,
stats["mean_intrinsic_rewards"] = 0.0,
stats["mean_episode_steps"] = 0.0,
stats["ex_reward"] = 0.0,
stats["generator_current_target"] = 0.0,
scheduler.step()
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 40.0)
optimizer.step()
actor_model.load_state_dict(model.state_dict())
# Generator:
if not flags.no_generator:
global generator_batch
global generator_batch_aux
global generator_current_target
global generator_count
global goal_count_dict
# Loading Batch
is_done = batch['done']==1
reached = reached_goal.type(torch.bool)
if 'frame' in generator_batch.keys():
generator_batch['frame'] = torch.cat((generator_batch['frame'], batch['initial_frame'][is_done].float().to(device=flags.device)), 0)
generator_batch['goal'] = torch.cat((generator_batch['goal'], batch['goal'][is_done].to(device=flags.device)), 0)
generator_batch['episode_step'] = torch.cat((generator_batch['episode_step'], batch['episode_step'][is_done].float().to(device=flags.device)), 0)
generator_batch['generator_logits'] = torch.cat((generator_batch['generator_logits'], batch['generator_logits'][is_done].float().to(device=flags.device)), 0)
generator_batch['reached'] = torch.cat((generator_batch['reached'], torch.zeros(batch['goal'].shape)[is_done].float().to(device=flags.device)), 0)
generator_batch['ex_reward'] = torch.cat((generator_batch['ex_reward'], batch['reward'][is_done].float().to(device=flags.device)), 0)
generator_batch['carried_obj'] = torch.cat((generator_batch['carried_obj'], batch['carried_obj'][is_done].float().to(device=flags.device)), 0)
generator_batch['carried_col'] = torch.cat((generator_batch['carried_col'], batch['carried_col'][is_done].float().to(device=flags.device)), 0)
generator_batch['carried_obj'] = torch.cat((generator_batch['carried_obj'], batch['carried_obj'][reached].float().to(device=flags.device)), 0)
generator_batch['carried_col'] = torch.cat((generator_batch['carried_col'], batch['carried_col'][reached].float().to(device=flags.device)), 0)
generator_batch['ex_reward'] = torch.cat((generator_batch['ex_reward'], batch['reward'][reached].float().to(device=flags.device)), 0)
generator_batch['frame'] = torch.cat((generator_batch['frame'], batch['initial_frame'][reached].float().to(device=flags.device)), 0)
generator_batch['goal'] = torch.cat((generator_batch['goal'], batch['goal'][reached].to(device=flags.device)), 0)
generator_batch['episode_step'] = torch.cat((generator_batch['episode_step'], batch['episode_step'][reached].float().to(device=flags.device)), 0)
generator_batch['generator_logits'] = torch.cat((generator_batch['generator_logits'], batch['generator_logits'][reached].float().to(device=flags.device)), 0)
generator_batch['reached'] = torch.cat((generator_batch['reached'], torch.ones(batch['goal'].shape)[reached].float().to(device=flags.device)), 0)
else:
generator_batch['frame'] = (batch['initial_frame'][is_done]).float().to(device=flags.device) # Notice we use initial_frame from batch
generator_batch['goal'] = (batch['goal'][is_done]).to(device=flags.device)
generator_batch['episode_step'] = (batch['episode_step'][is_done]).float().to(device=flags.device)
generator_batch['generator_logits'] = (batch['generator_logits'][is_done]).float().to(device=flags.device)
generator_batch['reached'] = (torch.zeros(batch['goal'].shape)[is_done]).float().to(device=flags.device)
generator_batch['ex_reward'] = (batch['reward'][is_done]).float().to(device=flags.device)
generator_batch['carried_obj'] = (batch['carried_obj'][is_done]).float().to(device=flags.device)
generator_batch['carried_col'] = (batch['carried_col'][is_done]).float().to(device=flags.device)
generator_batch['carried_obj'] = torch.cat((generator_batch['carried_obj'], batch['carried_obj'][reached].float().to(device=flags.device)), 0)
generator_batch['carried_col'] = torch.cat((generator_batch['carried_col'], batch['carried_col'][reached].float().to(device=flags.device)), 0)
generator_batch['ex_reward'] = torch.cat((generator_batch['ex_reward'], batch['reward'][reached].float().to(device=flags.device)), 0)
generator_batch['frame'] = torch.cat((generator_batch['frame'], batch['initial_frame'][reached].float().to(device=flags.device)), 0)
generator_batch['goal'] = torch.cat((generator_batch['goal'], batch['goal'][reached].to(device=flags.device)), 0)
generator_batch['episode_step'] = torch.cat((generator_batch['episode_step'], batch['episode_step'][reached].float().to(device=flags.device)), 0)
generator_batch['generator_logits'] = torch.cat((generator_batch['generator_logits'], batch['generator_logits'][reached].float().to(device=flags.device)), 0)
generator_batch['reached'] = torch.cat((generator_batch['reached'], torch.ones(batch['goal'].shape)[reached].float().to(device=flags.device)), 0)
if generator_batch['frame'].shape[0] >= flags.generator_batch_size: # Run Gradient step, keep batch residual in batch_aux
for key in generator_batch:
generator_batch_aux[key] = generator_batch[key][flags.generator_batch_size:]
generator_batch[key] = generator_batch[key][:flags.generator_batch_size].unsqueeze(0)
generator_outputs = generator_model(generator_batch)
generator_bootstrap_value = generator_outputs["generator_baseline"][-1]
# Generator Reward
def distance2(episode_step, reached, targ=flags.generator_target):
aux = flags.generator_reward_negative * torch.ones(episode_step.shape).to(device=flags.device)
aux += (episode_step >= targ).float() * reached
return aux
if flags.generator_loss_form == 'gaussian':
generator_target = flags.generator_target * torch.ones(generator_batch['episode_step'].shape).to(device=flags.device)
gen_reward = Normal(generator_target, flags.target_variance*torch.ones(generator_target.shape).to(device=flags.device))
generator_rewards = flags.generator_reward_coef * (2 + gen_reward.log_prob(generator_batch['episode_step']) - gen_reward.log_prob(generator_target)) * generator_batch['reached'] -1
elif flags.generator_loss_form == 'linear':
generator_rewards = (generator_batch['episode_step']/flags.generator_target * (generator_batch['episode_step'] <= flags.generator_target).float() + \
torch.exp ((-generator_batch['episode_step'] + flags.generator_target)/20.0) * (generator_batch['episode_step'] > flags.generator_target).float()) * \
2*generator_batch['reached'] - 1
elif flags.generator_loss_form == 'dummy':
generator_rewards = torch.tensor(distance2(generator_batch['episode_step'], generator_batch['reached'])).to(device=flags.device)
elif flags.generator_loss_form == 'threshold':
generator_rewards = torch.tensor(distance2(generator_batch['episode_step'], generator_batch['reached'], targ=generator_current_target)).to(device=flags.device)
if torch.mean(generator_rewards).item() >= flags.generator_threshold:
generator_count += 1
else:
generator_count = 0
if generator_count >= flags.generator_counts and generator_current_target<=flags.generator_maximum:
generator_current_target += 1.0
generator_count = 0
goal_count_dict *= 0.0
if flags.novelty:
frames_aux = torch.flatten(generator_batch['frame'], 2, 3)
frames_aux = frames_aux[:,:,:,0]
object_ids =torch.zeros(generator_batch['goal'].shape).long()
for i in range(object_ids.shape[1]):
object_ids[0,i] = frames_aux[0,i,generator_batch['goal'][0,i]]
goal_count_dict[object_ids[0,i]] += 1
bonus = (object_ids>2).float().to(device=flags.device) * flags.novelty_bonus
generator_rewards += bonus
if flags.reward_clipping == "abs_one":
generator_clipped_rewards = torch.clamp(generator_rewards, -1, 1)
if not flags.no_extrinsic_rewards:
generator_clipped_rewards = 1.0 * (generator_batch['ex_reward'] > 0).float() + generator_clipped_rewards * (generator_batch['ex_reward'] <= 0).float()
generator_discounts = torch.zeros(generator_batch['episode_step'].shape).float().to(device=flags.device)
goals_aux = generator_batch["goal"]
if flags.inner:
goals_aux = goals_aux.float()
goals_aux -= 2 * (torch.floor(goals_aux/generator_model.height))
goals_aux -= generator_model.height -1
goals_aux = goals_aux.long()
generator_vtrace_returns = vtrace.from_logits(
behavior_policy_logits=generator_batch["generator_logits"],
target_policy_logits=generator_outputs["generator_logits"],
actions=goals_aux,
discounts=generator_discounts,
rewards=generator_clipped_rewards,
values=generator_outputs["generator_baseline"],
bootstrap_value=generator_bootstrap_value,
)
# Generator Loss
gg_loss = compute_policy_gradient_loss(
generator_outputs["generator_logits"],
goals_aux,
generator_vtrace_returns.pg_advantages,
)
generator_baseline_loss = flags.baseline_cost * compute_baseline_loss(
generator_vtrace_returns.vs - generator_outputs["generator_baseline"]
)
generator_entropy_loss = flags.generator_entropy_cost * compute_entropy_loss(
generator_outputs["generator_logits"]
)
generator_total_loss = gg_loss + generator_entropy_loss +generator_baseline_loss
intrinsic_rewards_gen = generator_batch['reached']*(1- 0.9 * (generator_batch["episode_step"].float()/max_steps))
stats["gen_rewards"] = torch.mean(generator_clipped_rewards).item()
stats["gg_loss"] = gg_loss.item()
stats["generator_baseline_loss"] = generator_baseline_loss.item()
stats["generator_entropy_loss"] = generator_entropy_loss.item()
stats["mean_intrinsic_rewards"] = torch.mean(intrinsic_rewards_gen).item()
stats["mean_episode_steps"] = torch.mean(generator_batch["episode_step"]).item()
stats["ex_reward"] = torch.mean(generator_batch['ex_reward']).item()
stats["generator_current_target"] = generator_current_target
generator_scheduler.step()
generator_model_optimizer.zero_grad()
generator_total_loss.backward()
nn.utils.clip_grad_norm_(generator_model.parameters(), 40.0)
generator_model_optimizer.step()
actor_generator_model.load_state_dict(generator_model.state_dict())
if generator_batch_aux['frame'].shape[0]>0:
generator_batch = {key: tensor[:] for key, tensor in generator_batch_aux.items()}
else:
generator_batch = dict()
return stats
def create_buffers(obs_shape, num_actions, flags, width, height, logits_size) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
generator_baseline=dict(size=(T + 1,), dtype=torch.float32),
action=dict(size=(T + 1,), dtype=torch.int64),
episode_win=dict(size=(T + 1,), dtype=torch.int32),
generator_logits=dict(size=(T + 1, logits_size), dtype=torch.float32),
goal=dict(size=(T + 1,), dtype=torch.int64),
initial_frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
carried_col =dict(size=(T + 1,), dtype=torch.int64),
carried_obj =dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
def train(flags):
"""Full training loop."""
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
env = create_env(flags)
#env = wrappers.FullyObsWrapper(env)
if flags.num_input_frames > 1:
env = FrameStack(env, flags.num_input_frames)
generator_model = Generator(env.observation_space.shape, env.width, env.height, num_input_frames=flags.num_input_frames)
model = Net(env.observation_space.shape, env.action_space.n, state_embedding_dim=flags.state_embedding_dim, num_input_frames=flags.num_input_frames, use_lstm=flags.use_lstm, num_lstm_layers=flags.num_lstm_layers)
global goal_count_dict
goal_count_dict = torch.zeros(11).float().to(device=flags.device)
if flags.inner:
logits_size = (env.width-2)*(env.height-2)
else:
logits_size = env.width * env.height
buffers = create_buffers(env.observation_space.shape, model.num_actions, flags, env.width, env.height, logits_size)
model.share_memory()
generator_model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
actor_processes = []
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(i, free_queue, full_queue, model, generator_model, buffers,
initial_agent_state_buffers, flags))
actor.start()
actor_processes.append(actor)
learner_model = Net(env.observation_space.shape, env.action_space.n, state_embedding_dim=flags.state_embedding_dim, num_input_frames=flags.num_input_frames, use_lstm=flags.use_lstm, num_lstm_layers=flags.num_lstm_layers).to(
device=flags.device
)
learner_generator_model = Generator(env.observation_space.shape, env.width, env.height, num_input_frames=flags.num_input_frames).to(device=flags.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
generator_model_optimizer = torch.optim.RMSprop(
learner_generator_model.parameters(),
lr=flags.generator_learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_frames) / flags.total_frames
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
generator_scheduler = torch.optim.lr_scheduler.LambdaLR(generator_model_optimizer, lr_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
"gen_rewards",
"gg_loss",
"generator_entropy_loss",
"generator_baseline_loss",
"mean_intrinsic_rewards",
"mean_episode_steps",
"ex_reward",
"generator_current_target",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
frames, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal frames, stats
timings = prof.Timings()
while frames < flags.total_frames:
timings.reset()
batch, agent_state = get_batch(flags, free_queue, full_queue, buffers,
initial_agent_state_buffers, timings)
stats = learn(model, learner_model, generator_model, learner_generator_model, batch, agent_state, optimizer, generator_model_optimizer, scheduler, generator_scheduler, flags, env.max_steps)
timings.time("learn")
with lock:
to_log = dict(frames=frames)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
frames += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"generator_model_state_dict": generator_model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"generator_model_optimizer_state_dict": generator_model_optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"generator_scheduler_state_dict": generator_scheduler.state_dict(),
"flags": vars(flags),
},
checkpointpath,
)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while frames < flags.total_frames:
start_frames = frames
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min.
checkpoint()
last_checkpoint_time = timer()
fps = (frames - start_frames) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"After %i frames: loss %f @ %.1f fps. %sStats:\n%s",
frames,
total_loss,
fps,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return # Try joining actors then quit.
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d frames.", frames)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
checkpoint()
plogger.close()
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
class Generator(nn.Module):
"""Constructs the Teacher Policy which takes an initial observation and produces a goal."""
def __init__(self, observation_shape, width, height, num_input_frames, hidden_dim=256):
super(Generator, self).__init__()
self.observation_shape = observation_shape
self.height = height
self.width = width
self.env_dim = self.width * self.height
self.state_embedding_dim = 256
self.use_index_select = True
self.obj_dim = 5
self.col_dim = 3
self.con_dim = 2
self.num_channels = (self.obj_dim + self.col_dim + self.con_dim) * num_input_frames
if flags.disable_use_embedding:
print("not_using_embedding")
self.num_channels = 3*num_input_frames
self.embed_object = nn.Embedding(11, self.obj_dim)
self.embed_color = nn.Embedding(6, self.col_dim)
self.embed_contains = nn.Embedding(4, self.con_dim)
K = self.num_channels # number of input filters
F = 3 # filter dimensions
S = 1 # stride
P = 1 # padding
M = 16 # number of intermediate filters
Y = 8 # number of output filters
L = 4 # number of convnet layers
E = 1 # output of last layer
in_channels = [K] + [M] * 4
out_channels = [M] * 3 + [E]
conv_extract = [
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
for i in range(L)
]
def interleave(xs, ys):
return [val for pair in zip(xs, ys) for val in pair]
self.extract_representation = nn.Sequential(
*interleave(conv_extract, [nn.ELU()] * len(conv_extract))
)
self.out_dim = self.env_dim * 16 + self.obj_dim + self.col_dim
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
if flags.inner:
self.aux_env_dim = (self.height-2) * (self.width-2)
else:
self.aux_env_dim = self.env_dim
self.baseline_teacher = init_(nn.Linear(self.aux_env_dim, 1))
def _select(self, embed, x):
"""Efficient function to get embedding from an index."""
if self.use_index_select:
out = embed.weight.index_select(0, x.reshape(-1))
# handle reshaping x to 1-d and output back to N-d
return out.reshape(x.shape +(-1,))
else:
return embed(x)
def create_embeddings(self, x, id):
"""Generates compositional embeddings."""
if id == 0:
objects_emb = self._select(self.embed_object, x[:,:,:,id::3])
elif id == 1:
objects_emb = self._select(self.embed_color, x[:,:,:,id::3])
elif id == 2:
objects_emb = self._select(self.embed_contains, x[:,:,:,id::3])
embeddings = torch.flatten(objects_emb, 3, 4)
return embeddings
def convert_inner(self, goals):
"""Transform environment if using inner flag."""
goals = goals.float()
goals += 2*(1+torch.floor(goals/(self.height-2)))
goals += self.height - 1
goals = goals.long()
return goals
def agent_loc(self, frames):
"""Returns the location of an agent from an observation."""
T, B, height, width, *_ = frames.shape
agent_location = torch.flatten(frames, 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(T,B,1)
return agent_location
def forward(self, inputs):
"""Main Function, takes an observation and returns a goal."""
x = inputs["frame"]
T, B, *_ = x.shape
carried_col = inputs["carried_col"]
carried_obj = inputs["carried_obj"]
x = torch.flatten(x, 0, 1) # Merge time and batch.
if flags.disable_use_embedding:
x = x.float()
carried_obj = carried_obj.float()
carried_col = carried_col.float()
else:
x = x.long()
carried_obj = carried_obj.long()
carried_col = carried_col.long()
x = torch.cat([self.create_embeddings(x, 0), self.create_embeddings(x, 1), self.create_embeddings(x, 2)], dim = 3)
carried_obj_emb = self._select(self.embed_object, carried_obj)
carried_col_emb = self._select(self.embed_color, carried_col)
x = x.transpose(1, 3)
carried_obj_emb = carried_obj_emb.view(T * B, -1)
carried_col_emb = carried_col_emb.view(T * B, -1)
x = self.extract_representation(x)
x = x.view(T * B, -1)
generator_logits = x.view(T*B, -1)
generator_baseline = self.baseline_teacher(generator_logits)
goal = torch.multinomial(F.softmax(generator_logits, dim=1), num_samples=1)
generator_logits = generator_logits.view(T, B, -1)
generator_baseline = generator_baseline.view(T, B)
goal = goal.view(T, B)
if flags.inner:
goal = self.convert_inner(goal)
return dict(goal=goal, generator_logits=generator_logits, generator_baseline=generator_baseline)
class MinigridNet(nn.Module):
"""Constructs the Student Policy which takes an observation and a goal and produces an action."""
def __init__(self, observation_shape, num_actions, state_embedding_dim=256, num_input_frames=1, use_lstm=False, num_lstm_layers=1):
super(MinigridNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
self.state_embedding_dim = state_embedding_dim
self.use_lstm = use_lstm
self.num_lstm_layers = num_lstm_layers
self.use_index_select = True
self.obj_dim = 5
self.col_dim = 3
self.con_dim = 2
self.goal_dim = flags.goal_dim
self.agent_loc_dim = 10
self.num_channels = (self.obj_dim + self.col_dim + self.con_dim + 1) * num_input_frames
if flags.disable_use_embedding:
print("not_using_embedding")
self.num_channels = (3+1+1+1+1)*num_input_frames
self.embed_object = nn.Embedding(11, self.obj_dim)
self.embed_color = nn.Embedding(6, self.col_dim)
self.embed_contains = nn.Embedding(4, self.con_dim)
self.embed_goal = nn.Embedding(self.observation_shape[0]*self.observation_shape[1] + 1, self.goal_dim)
self.embed_agent_loc = nn.Embedding(self.observation_shape[0]*self.observation_shape[1] + 1, self.agent_loc_dim)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.feat_extract = nn.Sequential(
init_(nn.Conv2d(in_channels=self.num_channels, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
)
self.fc = nn.Sequential(
init_(nn.Linear(32 + self.obj_dim + self.col_dim, self.state_embedding_dim)),
nn.ReLU(),
init_(nn.Linear(self.state_embedding_dim, self.state_embedding_dim)),
nn.ReLU(),
)
if use_lstm:
self.core = nn.LSTM(self.state_embedding_dim, self.state_embedding_dim, self.num_lstm_layers)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.policy = init_(nn.Linear(self.state_embedding_dim, self.num_actions))
self.baseline = init_(nn.Linear(self.state_embedding_dim, 1))
def initial_state(self, batch_size):
"""Initializes LSTM."""
if not self.use_lstm:
return tuple()
return tuple(torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) for _ in range(2))
def create_embeddings(self, x, id):
"""Generates compositional embeddings."""
if id == 0:
objects_emb = self._select(self.embed_object, x[:,:,:,id::3])
elif id == 1:
objects_emb = self._select(self.embed_color, x[:,:,:,id::3])
elif id == 2:
objects_emb = self._select(self.embed_contains, x[:,:,:,id::3])
embeddings = torch.flatten(objects_emb, 3, 4)
return embeddings
def _select(self, embed, x):
"""Efficient function to get embedding from an index."""
if self.use_index_select:
out = embed.weight.index_select(0, x.reshape(-1))
# handle reshaping x to 1-d and output back to N-d
return out.reshape(x.shape +(-1,))
else:
return embed(x)
def agent_loc(self, frames):
"""Returns the location of an agent from an observation."""
T, B, *_ = frames.shape
agent_location = torch.flatten(frames, 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(T,B,1)
return agent_location
def forward(self, inputs, core_state=(), goal=[]):
"""Main Function, takes an observation and a goal and returns and action."""
# -- [unroll_length x batch_size x height x width x channels]
x = inputs["frame"]
T, B, h, w, *_ = x.shape
# -- [unroll_length*batch_size x height x width x channels]
x = torch.flatten(x, 0, 1) # Merge time and batch.
goal = torch.flatten(goal, 0, 1)
# Creating goal_channel
goal_channel = torch.zeros_like(x, requires_grad=False)
goal_channel = torch.flatten(goal_channel, 1,2)[:,:,0]
for i in range(goal.shape[0]):
goal_channel[i,goal[i]] = 1.0
goal_channel = goal_channel.view(T*B, h, w, 1)
carried_col = inputs["carried_col"]
carried_obj = inputs["carried_obj"]
if flags.disable_use_embedding:
x = x.float()
goal = goal.float()
carried_obj = carried_obj.float()
carried_col = carried_col.float()
else:
x = x.long()
goal = goal.long()
carried_obj = carried_obj.long()
carried_col = carried_col.long()
# -- [B x H x W x K]
x = torch.cat([self.create_embeddings(x, 0), self.create_embeddings(x, 1), self.create_embeddings(x, 2), goal_channel.float()], dim = 3)
carried_obj_emb = self._select(self.embed_object, carried_obj)
carried_col_emb = self._select(self.embed_color, carried_col)
if flags.no_generator:
goal_emb = torch.zeros(goal_emb.shape, dtype=goal_emb.dtype, device=goal_emb.device, requires_grad = False)
x = x.transpose(1, 3)
x = self.feat_extract(x)
x = x.view(T * B, -1)
carried_obj_emb = carried_obj_emb.view(T * B, -1)
carried_col_emb = carried_col_emb.view(T * B, -1)
union = torch.cat([x, carried_obj_emb, carried_col_emb], dim=1)
core_input = self.fc(union)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
nd = nd.view(1, -1, 1)
core_state = tuple(nd * s for s in core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return dict(policy_logits=policy_logits, baseline=baseline, action=action), core_state
Net = MinigridNet
GeneratorNet = Generator
class Minigrid2Image(gym.ObservationWrapper):
"""Get MiniGrid observation to ignore language instruction."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = env.observation_space.spaces["image"]
def observation(self, observation):
return observation["image"]
def create_env(flags):
return Minigrid2Image(wrappers.FullyObsWrapper(gym.make(flags.env)))
def main(flags):
if flags.mode == "train":
train(flags)
else:
test(flags)
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
| adversarially-motivated-intrinsic-goals-main | monobeast/minigrid/monobeast_amigo.py |
import numpy as np
import pandas as pd
import statsmodels.api as sm
import gc
import operator
import networkx as nx
from tqdm import tqdm
G = nx.watts_strogatz_graph(2000000, 10, 0.5)
assignments = np.concatenate([[k]*10 for k in list(np.random.randint(0, 2, 2000000//10))])
sample = np.random.choice(2000000, 100000)
print('genearating the graph')
data = []
for i in tqdm(sample):
neighbor = len(G[i])
bb_1 = np.sum([assignments[j] for j in G[i]])
bb_0 = neighbor - bb_1
bbb_0 = 0
bbb_1 = 0
bbb_2 = 0
bbn_0 = 0
bbn_1 = 0
bbn_2 = 0
open_square_0 = 0
open_square_1 = 0
open_square_2 = 0
open_square_3 = 0
for j in G[i]:
for k in G[i]:
if k > j:
if np.abs(j-k) <= 5: # this is a simplistic weight to judge if connected (to speed up )
if assignments[j] + assignments[k] == 0:
bbb_0 += 1
elif assignments[j] + assignments[k] == 1:
bbb_1 += 1
else:
bbb_2 += 1
else:
if assignments[j] + assignments[k] == 0:
bbn_0 += 1
elif assignments[j] + assignments[k] == 1:
bbn_1 += 1
else:
bbn_2 += 1
for l in G[i]:
if l > k and np.abs(l-k) > 5 and np.abs(l-j) > 5:
if assignments[j] + assignments[k] + assignments[l] == 0:
open_square_0 += 1
elif assignments[j] + assignments[k] + assignments[l]== 1:
open_square_1 += 1
elif assignments[j] + assignments[k] + assignments[l]== 2:
open_square_2 += 1
else:
open_square_3 += 1
data.append([i, assignments[i], neighbor, bb_0, bb_1, bbb_0, bbb_1, bbb_2, bbn_0, bbn_1, bbn_2,
open_square_0, open_square_1, open_square_2, open_square_3])
data = pd.DataFrame.from_records(data)
data.columns = ['id', 'assignment', 'neighbor', 'bb_0', 'bb_1', 'bbb_0', 'bbb_1', 'bbb_2', 'bbn_0', 'bbn_1', 'bbn_2',
'open_square_0', 'open_square_1', 'open_square_2', 'open_square_3'
]
data['open_square_3_normalized'] = 1.0 * data['open_square_3']/(data['open_square_3']+data['open_square_2']+data['open_square_1']+data['open_square_0'])
data['open_square_2_normalized'] = 1.0 * data['open_square_2']/(data['open_square_3']+data['open_square_2']+data['open_square_1']+data['open_square_0'])
data['open_square_1_normalized'] = 1.0 * data['open_square_1']/(data['open_square_3']+data['open_square_2']+data['open_square_1']+data['open_square_0'])
data['open_square_0_normalized'] = 1.0 * data['open_square_0']/(data['open_square_3']+data['open_square_2']+data['open_square_1']+data['open_square_0'])
data['bbb_2_normalized'] = 1.0 * data['bbb_2']/(data['bbb_2']+data['bbb_1']+data['bbb_0'])
data['bbb_1_normalized'] = 1.0 * data['bbb_1']/(data['bbb_2']+data['bbb_1']+data['bbb_0'])
data['bbb_0_normalized'] = 1.0 * data['bbb_0']/(data['bbb_2']+data['bbb_1']+data['bbb_0'])
data['bbn_2_normalized'] = 1.0 * data['bbn_2']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_1_normalized'] = 1.0 * data['bbn_1']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_0_normalized'] = 1.0 * data['bbn_0']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_2_normalized'] = 1.0 * data['bbn_2']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_1_normalized'] = 1.0 * data['bbn_1']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_0_normalized'] = 1.0 * data['bbn_0']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bb_0_normalized'] = 1.0 * data['bb_0']/(data['bb_0']+data['bb_1'])
data['bb_1_normalized'] = 1.0 * data['bb_1']/(data['bb_0']+data['bb_1'])
# compute structural diversity and structural diversity of the treated
print('computing structural diversity')
structural_diversity = []
c = 0
for uid in list(data['id']):
structural_diversity.append(
nx.number_connected_components(nx.subgraph(G, [j for j in nx.neighbors(G, uid) if assignments[j] == 1]))
)
c += 1
data['structural_diversity'] = structural_diversity
structural_diversity_1 = []
c = 0
for uid in list(data['id']):
structural_diversity_1.append(
nx.number_connected_components(nx.subgraph(G, [j for j in nx.neighbors(G, uid)]))
)
c += 1
data['structural_diversity_1'] = structural_diversity_1
data['gender'] = np.random.randint(0, 2, len(data))
# pure cutoff
data['y1'] = data['neighbor'] * 0.1 + data['gender'] * 1 + \
data['assignment'] * (data['bbb_2_normalized'] > 0.7).astype(float) * 2 + \
np.random.normal(0, 1, len(data))
# structural diversity is causal
data['y2'] = \
data['neighbor'] * 0.1 + data['gender'] * 1 + \
data['structural_diversity'] + \
data['assignment'] * data['structural_diversity'] * 1 + \
np.random.normal(0, 1, len(data))
# structural diversity is correlational
data['y3'] = \
data['neighbor'] * 0.1 + data['gender'] * 1 + \
data['structural_diversity_1'] + \
data['assignment'] * data['structural_diversity_1'] * 1 + \
np.random.normal(0, 1, len(data))
# irrelevant covariates
data['y4'] = data['neighbor'] + np.random.normal(0, 1, len(data))
data.to_csv('data_ws.csv')
# bootstrapping
print('bootstrapping')
probabilities = []
for replicate in tqdm(range(100)):
probabilities_mc = []
assignments = np.concatenate([[k]*10 for k in list(np.random.randint(0, 2, 2000000//10))])
r = np.random.randint(10)
assignments = np.concatenate([assignments[r:], assignments[:r]])
for i in sample:
neighbor = len(G[i])
bb_1 = np.sum([assignments[j] for j in G[i]])
bb_0 = neighbor - bb_1
bbb_0 = 0
bbb_1 = 0
bbb_2 = 0
bbn_0 = 0
bbn_1 = 0
bbn_2 = 0
open_square_0 = 0
open_square_1 = 0
open_square_2 = 0
open_square_3 = 0
for j in G[i]:
for k in G[i]:
if k > j:
if np.abs(j-k) <= 5:
if assignments[j] + assignments[k] == 0:
bbb_0 += 1
elif assignments[j] + assignments[k] == 1:
bbb_1 += 1
else:
bbb_2 += 1
else:
if assignments[j] + assignments[k] == 0:
bbn_0 += 1
elif assignments[j] + assignments[k] == 1:
bbn_1 += 1
else:
bbn_2 += 1
for l in G[i]:
if l > k and np.abs(l-k) > 5 and np.abs(l-j) > 5:
if assignments[j] + assignments[k] + assignments[l] == 0:
open_square_0 += 1
elif assignments[j] + assignments[k] + assignments[l]== 1:
open_square_1 += 1
elif assignments[j] + assignments[k] + assignments[l]== 2:
open_square_2 += 1
else:
open_square_3 += 1
probabilities_mc.append([bb_0, bb_1, bbb_0, bbb_1, bbb_2, bbn_0, bbn_1, bbn_2,
open_square_0, open_square_1, open_square_2, open_square_3, assignments[i]
])
probabilities.append(probabilities_mc)
probabilities = np.array(probabilities).T
np.save('probabilities_ws.npy', probabilities)
| CausalMotifs-master | generate_WS.py |
from causalPartition import causalPartition
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# load and process data
data = pd.read_csv('data_ws.csv')
probabilities = np.load('probabilities_ws.npy')
new_probabilities = {}
new_probabilities['bbb_2_normalized'] = 1.0 * probabilities[4]/(probabilities[4]+probabilities[3]+probabilities[2])
new_probabilities['bbb_1_normalized'] = 1.0 * probabilities[3]/(probabilities[4]+probabilities[3]+probabilities[2])
new_probabilities['bbb_0_normalized'] = 1.0 * probabilities[2]/(probabilities[4]+probabilities[3]+probabilities[2])
new_probabilities['bbn_2_normalized'] = 1.0 * probabilities[7]/(probabilities[7]+probabilities[6]+probabilities[5])
new_probabilities['bbn_1_normalized'] = 1.0 * probabilities[6]/(probabilities[7]+probabilities[6]+probabilities[5])
new_probabilities['bbn_0_normalized'] = 1.0 * probabilities[5]/(probabilities[7]+probabilities[6]+probabilities[5])
new_probabilities['bb_1_normalized'] = 1.0 * probabilities[1]/(probabilities[1]+probabilities[0])
new_probabilities['bb_0_normalized'] = 1.0 * probabilities[0]/(probabilities[1]+probabilities[0])
new_probabilities['open_square_0_normalized'] = 1.0 * probabilities[8]/(probabilities[8]+probabilities[9]+probabilities[10]+probabilities[11])
new_probabilities['open_square_1_normalized'] = 1.0 * probabilities[9]/(probabilities[8]+probabilities[9]+probabilities[10]+probabilities[11])
new_probabilities['open_square_2_normalized'] = 1.0 * probabilities[10]/(probabilities[8]+probabilities[9]+probabilities[10]+probabilities[11])
new_probabilities['open_square_3_normalized'] = 1.0 * probabilities[11]/(probabilities[8]+probabilities[9]+probabilities[10]+probabilities[11])
new_probabilities['assignment'] = probabilities[-1]
# to satisfy positivity
idx = np.logical_and(np.logical_and(data['bbb_0'] + data['bbb_1'] + data['bbb_2'] > 0,
data['bbn_0'] + data['bbn_1'] + data['bbn_2'] > 0),
data['open_square_0']+data['open_square_1']+data['open_square_2']+data['open_square_3'] > 0)
data_ = data[idx]
probabilities_ = {}
input_features = ['assignment',
'bb_1_normalized', 'bb_0_normalized',
'bbb_0_normalized', 'bbb_1_normalized', 'bbb_2_normalized',
'bbn_0_normalized', 'bbn_1_normalized', 'bbn_2_normalized',
'open_square_0_normalized', 'open_square_1_normalized', 'open_square_2_normalized',
'open_square_3_normalized'
]
for key in ['assignment']+input_features:
probabilities_[key] = new_probabilities[key][idx]
# train the tree (separate=True means we treat the assignment variable as a dimension); please revise the parameters
outcome = 'y2'
partition = causalPartition(data_, probabilities_, 'assignment')
train_result_separate = partition.split_exposure_hajek(True, outcome, input_features,
max_attempt=10, eps=0.001,
delta=0.01,
criteria={'non_trivial_reduction': 0,
'min_leaf_size': 4000})
partition.plot_tree(train_result_separate)
est_result_separate = partition.estimate_exposure_hajek(train_result_separate,
input_features, outcome, eps=0.001, separate=True)
partition.plot_tree(est_result_separate)
# train the tree (separate=False means we examine heterogeneous indirect effects); please revise the parameters
outcome = 'y2'
input_features = [
# 'assignment',
'bb_1_normalized', 'bb_0_normalized',
'bbb_0_normalized', 'bbb_1_normalized', 'bbb_2_normalized',
'bbn_0_normalized', 'bbn_1_normalized', 'bbn_2_normalized',
'open_square_0_normalized', 'open_square_1_normalized', 'open_square_2_normalized', 'open_square_3_normalized'
]
partition = causalPartition(data_, probabilities_, 'assignment')
train_result_nonseparate = partition.split_exposure_hajek(False, outcome, input_features,
max_attempt=10, eps=0.001,
delta=0.01,
criteria={'non_trivial_reduction': 0,
'min_leaf_size': 4000})
partition.plot_tree(train_result_nonseparate)
est_result_separate = partition.estimate_exposure_hajek(train_result_nonseparate,
input_features, outcome, eps=0.01, separate=False)
partition.plot_tree(est_result_separate)
| CausalMotifs-master | example.py |
import numpy as np
import pandas as pd
import statsmodels.api as sm
import gc
import operator
import networkx as nx
class causalPartition:
df = None # the whole dataset
probabilities = None # the Monte Carlo probabilities, a dict, each element represents a dimension of the intervention vector
# each element is a matrix [num_nodes * num_bootstrap]
result_separate = None
result_eht = None
treatment = None # the name of treatment feature (should belong to the dict probabilities)
df_train = None
df_est = None
def __init__(self, df, probabilities, treatment, ratio_train=0.5):
"""
Ratio_train is the ratio in the training set, which is used to split the sample in the beginning to help construct honest estimator.
By default it is 50% vs 50% to make the training and estimation sets have the roguhly same widths for CIs_contain_zero
"""
self.df = df
self.probabilities = probabilities
self.treatment = treatment
self.idx_tr = np.random.random(len(df)) < ratio_train # sample the training set
self.idx_est = np.logical_not(self.idx_tr) # sample the estimation set
self.df_train = df[self.idx_tr]
self.df_est = df[np.logical_not(self.idx_tr)]
self.result_separate = None
self.est_result_separate_eht = None
# for each observation, if there is small probability of belong to the partition defined by rules
def _contain_zero(self, probabilities, rules, eps, delta, treated=None):
"""
For each observation (indicated by an element in the vector),
whether it has <= eps probability to belong to the partition implied by [rules]
Treated: == 1/0 if we want to append the rule for the treatment variable
== none otherwise
"""
if treated is None:
return np.mean(np.product([self.probabilities[key] <= th for key, sign, th in rules if sign == 0] + \
[self.probabilities[key] > th for key, sign, th in rules if sign == 1],
axis=0) > 0, axis=1
) <= eps
else:
# Also consider the treated conditions for egos.
# In separate trees, the treatment conditions for egos should also be considered
return np.mean(np.product([probabilities[key] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key] > th for key, sign, th in rules if sign == 1] + \
probabilities[self.treatment] == treated,
axis=0) > 0, axis=1
) <= eps
def _hajek_se(self, d, p, outcome):
"""
- The taylor linearization for hajek se
- WLS is directly used for non-separate cases; but in this case S.E. is usually overestimated
"""
average_hajek_var_up = np.sum( ((d[outcome]/p) ** 2) * (1 - p) ) # numerator
average_hajek_var_down = np.sum( ((1.0/p) ** 2) * (1 - p) ) # denominator
average_hajek_cov = np.sum( ((1.0/p) ** 2) * d[outcome] * (1 - p) )
average_hajek_sum_up = np.sum(d[outcome]/p) # numerator
average_hajek_sum_down = np.sum(1.0/p) # denominator
se = np.sqrt(1.0 / (average_hajek_sum_down**2) * average_hajek_var_up + \
(average_hajek_sum_up**2) / (average_hajek_sum_down**4) * average_hajek_var_down + \
- 2.0 * average_hajek_sum_up / (average_hajek_sum_down**3) * average_hajek_cov)
# Taylor linearization ((Sarndal, Swensson and Wretman, 1992, pp. 172-174)
return se
def _plot_tree(self, est_result_separate, node_id, prefix):
if node_id > 1 and node_id % 2 == 0:
sign = '<='
elif node_id > 1 and node_id % 2 == 1:
sign = '> '
else:
sign = ''
if 'left_result' in est_result_separate:
print('%s%s(%d) split %s at %f, n=%d, avg=%f, se=%f' % (prefix, sign, node_id, est_result_separate['feature'],
est_result_separate['threshold'], est_result_separate['N'], est_result_separate['hajek'], est_result_separate['hajek_se']))
self._plot_tree(est_result_separate['left_result'], node_id*2, prefix+'\t')
self._plot_tree(est_result_separate['right_result'], node_id*2+1, prefix+'\t')
else:
print('%s%s(%d) terminate, n=%d, avg=%f, se=%f' % (prefix, sign, node_id,
est_result_separate['N'], est_result_separate['hajek'], est_result_separate['hajek_se']))
def plot_tree(self, result=None):
if 1 in result:
print('treated')
self._plot_tree(result[1], 1, '')
print('non-treated')
self._plot_tree(result[0], 1, '')
else:
self._plot_tree(result, 1, '')
def _split_exposure_hajek(self, node_id, df, probabilities, feature_set, max_attempt, eps, delta,
outcome, rules, N, current_mse, criteria={'non_trivial_reduction': 0},
first_split_treatment=True):
"""
the actual splitting implementation for separate tree;
by recursion
"""
b_feature = ''
b_threshold = 0
b_left = None
b_right = None
b_average_left_hajek = 0
b_average_right_hajek = 0
b_mse = 10000000000.0 # a very large mse
ranges = {}
# enumerate each feature
for feature in feature_set:
gc.collect()
# find a more compact region
upper = 1.
lower = 0.
for rule in rules:
# rules: list of tuples to describe the decision rules
# tuples(feature, 0/1: lower or upper bound, value)
if rule[0] == feature:
if rule[1] == 0:
lower = np.maximum(rule[2], lower)
else:
upper = np.minimum(rule[2], upper)
if lower >= upper:
continue
for k in range(max_attempt):
if first_split_treatment and node_id == 1:
if feature != self.treatment or k != 0:
continue
threshold = np.random.uniform(lower, upper) # randomly select a threshold, left < , right >
# make sure it is a valid split --- each observation should have non-trial (>eps) probability to belong to each partition
cz_l = self._contain_zero(probabilities, rules+[(feature, 0, threshold)], eps, delta)
cz_r = self._contain_zero(probabilities, rules+[(feature, 1, threshold)], eps, delta)
if np.mean(cz_l) > delta or np.mean(cz_r) > delta:
continue
# if (almost) positivity can't be satisfied
idxs_left = np.product([df[key] <= th for key, sign, th in rules if sign == 0] + \
[df[key] > th for key, sign, th in rules if sign == 1] + \
[df[feature] <= threshold],
axis=0) > 0
idxs_right = np.product([df[key] <= th for key, sign, th in rules if sign == 0] + \
[df[key] > th for key, sign, th in rules if sign == 1] + \
[df[feature] > threshold],
axis=0) > 0
left = df[idxs_left]
right = df[idxs_right]
# generalized propensity score (probability of belonging in an exposure condition)
propensities_left = np.mean(np.product([probabilities[key][idxs_left] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_left] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_left] <= threshold],
axis=0) > 0, axis=1)
# generalized propensity score (probability of belonging in an exposure condition)
propensities_right = np.mean(np.product([probabilities[key][idxs_right] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_right] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_right] > threshold],
axis=0) > 0, axis=1)
# again, filter small propensities data points (usually should not filter or filter very few)
if len(left) == 0 or len(right) == 0:
continue
filter_left = propensities_left > 0
left = left[filter_left]
propensities_left = propensities_left[filter_left]
filter_right = propensities_right > 0
right = right[filter_right]
propensities_right = propensities_right[filter_right]
mod_left = sm.WLS(left[outcome], np.ones(len(left)), weights=1.0 / propensities_left)
mod_right = sm.WLS(right[outcome], np.ones(len(right)), weights=1.0 / propensities_right)
res_left = mod_left.fit()
res_right = mod_right.fit()
average_left_hajek = res_left.params[0]
average_right_hajek = res_right.params[0]
average_left_hajek_se = self._hajek_se(left, propensities_left, outcome)
average_right_hajek_se = self._hajek_se(right, propensities_right, outcome)
mse_left = np.sum((1.0 / propensities_left) * ((res_left.resid) ** 2))
mse_right = np.sum((1.0 / propensities_right) * ((res_right.resid) ** 2))
mse = mse_left * len(left)/(len(left)+len(right)) + mse_right * len(right)/(len(left)+len(right))
if mse < b_mse:
flag = True
assert len(criteria) > 0
if 'non_trivial_reduction' in criteria:
if not (mse < current_mse - criteria['non_trivial_reduction']):
flag = False
if 'reasonable_propensity' in criteria:
if not (np.abs(np.sum(1.0 / propensities_left)/len(df) - 1.0) <= criteria['reasonable_propensity'] \
and \
np.abs(np.sum(1.0 / propensities_right)/len(df) - 1.0) <= criteria['reasonable_propensity'] \
):
flag = False
if 'separate_reduction' in criteria:
if not (mse_left < current_mse and mse_right < current_mse):
flag = False
if 'min_leaf_size' in criteria:
if not (len(left) >= criteria['min_leaf_size'] and len(right) >= criteria['min_leaf_size']):
flag = False
if flag:
b_feature = feature
b_mse = mse
b_mse_left = mse_left
b_mse_right = mse_right
b_threshold = threshold
b_average_left_hajek = average_left_hajek
b_average_right_hajek = average_right_hajek
b_average_left_hajek_se = average_left_hajek_se
b_average_right_hajek_se = average_right_hajek_se
b_left_den = np.sum(1.0 / propensities_left)
b_right_den = np.sum(1.0 / propensities_right)
b_left = left
b_right = right
b_left_rules = rules + [(feature, 0, threshold)]
b_right_rules = rules + [(feature, 1, threshold)]
result = {}
if b_feature != '':
# if find a valid partition
result_left = self._split_exposure_hajek(node_id*2, df, probabilities, feature_set, max_attempt, eps, delta,
outcome, b_left_rules, len(b_left), b_mse_left, criteria)
result_right = self._split_exposure_hajek(node_id*2+1, df, probabilities, feature_set, max_attempt, eps, delta,
outcome, b_right_rules, len(b_right), b_mse_right, criteria)
result['mse'] = result_left['mse'] * 1.0 * len(b_left)/(len(b_left)+len(b_right)) + \
result_right['mse'] * 1.0 * len(b_right)/(len(b_left)+len(b_right))
result['feature'] = b_feature
result['threshold'] = b_threshold
result_left['hajek'] = b_average_left_hajek
result_right['hajek'] = b_average_right_hajek
result_left['hajek_se'] = b_average_left_hajek_se
result_right['hajek_se'] = b_average_right_hajek_se
result_left['N'] = len(b_left)
result_right['N'] = len(b_right)
result_left['den'] = b_left_den
result_right['den'] = b_right_den
result['left_result'] = result_left
result['right_result'] = result_right
return result
else:
result['mse'] = current_mse
return result
def _split_exposure_validate_eht(self, node_id, df_est, result, probabilities_est, rules, outcome, eps=0.005):
"""
estimation set for non-separate case
"""
est_result = {}
if 'left_result' in result:
est_result['feature'] = result['feature']
est_result['threshold'] = result['threshold']
est_result['left_result'] = self._split_exposure_validate_eht(node_id*2, df_est, result['left_result'], probabilities_est,
rules+[(result['feature'], 0, result['threshold'])], outcome, eps)
est_result['right_result'] = self._split_exposure_validate_eht(node_id*2+1, df_est, result['right_result'], probabilities_est,
rules+[(result['feature'], 1, result['threshold'])], outcome, eps)
if rules:
# if this is not the root
idxs = np.product([df_est[key] <= th for key, sign, th in rules if sign == 0] + \
[df_est[key] > th for key, sign, th in rules if sign == 1],
axis=0) > 0
dff = df_est[idxs]
else:
idxs = np.ones(len(df_est)).astype(bool)
dff = df_est
propensities_1 = np.mean(np.product([probabilities_est[key][idxs] <= th for key, sign, th in rules if sign == 0] + \
[probabilities_est[key][idxs] > th for key, sign, th in rules if sign == 1]+\
[probabilities_est[self.treatment][idxs] == 1],
axis=0), axis=1)
propensities_0 = np.mean(np.product([probabilities_est[key][idxs] <= th for key, sign, th in rules if sign == 0] + \
[probabilities_est[key][idxs] > th for key, sign, th in rules if sign == 1]+\
[probabilities_est[self.treatment][idxs] == 0],
axis=0), axis=1)
idxs_filter = np.logical_and(propensities_1 > 0, propensities_0 > 0)
dff = dff[idxs_filter]
propensities_1 = propensities_1[idxs_filter]
propensities_0 = propensities_0[idxs_filter]
mod = sm.WLS(dff[outcome], sm.add_constant(dff[self.treatment]),
weights=1.0 / propensities_1 * dff[self.treatment] + 1.0 / propensities_0 * (1-dff[self.treatment]))
res = mod.fit()
mse = np.sum((res.resid ** 2) * (1.0 / propensities_1 * dff[self.treatment] + 1.0 / propensities_0 * (1-dff[self.treatment])))
average_hajek = res.params[1]
average_hajek_se = res.bse[1] # dff[outcome].std() / np.sqrt(len(dff)-1)
est_result['hajek'] = average_hajek
est_result['hajek_se'] = average_hajek_se
est_result['mse'] = mse
est_result['N'] = len(dff)
return est_result
def _split_exposure_validate(self, node_id, df_est, result,
probabilities_est, rules, outcome, eps=0.005):
est_result = {}
if 'left_result' in result:
est_result['feature'] = result['feature']
est_result['threshold'] = result['threshold']
est_result['left_result'] = self._split_exposure_validate(node_id*2, df_est, result['left_result'], probabilities_est,
rules+[(result['feature'], 0, result['threshold'])], outcome, eps)
est_result['right_result'] = self._split_exposure_validate(node_id*2+1, df_est, result['right_result'], probabilities_est,
rules+[(result['feature'], 1, result['threshold'])], outcome, eps)
if rules:
idxs = np.product([df_est[key] <= th for key, sign, th in rules if sign == 0] + \
[df_est[key] > th for key, sign, th in rules if sign == 1], axis=0) > 0
dff = df_est[idxs]
propensities = np.mean(np.product([probabilities_est[key][idxs] <= th for key, sign, th in rules if sign == 0] + \
[probabilities_est[key][idxs] > th for key, sign, th in rules if sign == 1],
axis=0), axis=1)
idxs_filter = propensities > eps
dff = dff[idxs_filter]
propensities = propensities[idxs_filter]
else:
dff = df_est
propensities = np.ones(len(dff))
mod = sm.OLS(dff[outcome], np.ones(len(dff)))
res = mod.fit()
mse = np.sum((res.resid ** 2) * 1.0 / propensities)
average_hajek = res.params[0]
if node_id == 1:
average_hajek_se = dff[outcome].std() / np.sqrt(len(dff)-1)
else:
average_hajek_se = self._hajek_se(dff, propensities, outcome)
est_result['hajek'] = average_hajek
est_result['hajek_se'] = average_hajek_se
est_result['mse'] = mse
est_result['N'] = len(dff)
return est_result
def _split_exposure_hajek_eht(self, node_id, df, probabilities, feature_set, max_attempt, eps, delta, outcome, rules, N, current_mse, criteria):
"""
the actual splitting implementation for non-separate tree;
recursion
"""
b_feature = ''
b_threshold = 0
b_left = None
b_right = None
b_average_left_hajek = 0
b_average_right_hajek = 0
b_mse = 10000000000.0
ranges = {}
for feature in feature_set:
gc.collect()
# find the more compact valid region
upper = 1.
lower = 0.
for rule in rules:
if rule[0] == feature:
if rule[1] == 0:
lower = np.maximum(rule[2], lower)
else:
upper = np.minimum(rule[2], upper)
if lower > upper:
continue
for k in range(max_attempt):
threshold = np.random.uniform(lower, upper)
cz_l_1 = self._contain_zero(probabilities, rules+[(feature, 0, threshold)], eps, delta, treated=1)
cz_r_1 = self._contain_zero(probabilities, rules+[(feature, 1, threshold)], eps, delta, treated=1)
cz_l_0 = self._contain_zero(probabilities, rules+[(feature, 0, threshold)], eps, delta, treated=0)
cz_r_0 = self._contain_zero(probabilities, rules+[(feature, 1, threshold)], eps, delta, treated=0)
if np.mean(cz_l_1) > delta or np.mean(cz_r_1) > delta or np.mean(cz_r_0) > delta or np.mean(cz_r_0) > delta:
continue
idxs_left = np.product([df[key] <= th for key, sign, th in rules if sign == 0] + \
[df[key] > th for key, sign, th in rules if sign == 1] + \
[df[feature] <= threshold],
axis=0) > 0
idxs_right = np.product([df[key] <= th for key, sign, th in rules if sign == 0] + \
[df[key] > th for key, sign, th in rules if sign == 1] + \
[df[feature] > threshold],
axis=0) > 0
left = df[idxs_left]
right = df[idxs_right]
# propensity score for left partition + ego treated
propensities_left_1 = np.mean(np.product([probabilities[key][idxs_left] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_left] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_left] <= threshold] + \
[probabilities[self.treatment][idxs_left] == 1],
axis=0), axis=1)
# propensity score for left partition + ego non treated
propensities_left_0 = np.mean(np.product([probabilities[key][idxs_left] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_left] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_left] <= threshold] + \
[probabilities[self.treatment][idxs_left] == 0],
axis=0), axis=1)
propensities_right_1 = np.mean(np.product([probabilities[key][idxs_right] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_right] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_right] > threshold] + \
[probabilities[self.treatment][idxs_right] == 1],
axis=0), axis=1)
propensities_right_0 = np.mean(np.product([probabilities[key][idxs_right] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_right] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_right] > threshold] + \
[probabilities[self.treatment][idxs_right] == 0],
axis=0), axis=1)
# filter those whose propensities scores are very small (This may lead to lose observations)
idxs_left_filter = np.logical_and(propensities_left_1 > eps, propensities_left_0 > eps)
left = left[idxs_left_filter]
propensities_left_1 = propensities_left_1[idxs_left_filter]
propensities_left_0 = propensities_left_0[idxs_left_filter]
# filter those whose propensities scores are very small (This may lead to lose observations)
idxs_right_filter = np.logical_and(propensities_right_1 > eps, propensities_right_0 > eps)
right = right[idxs_right_filter]
propensities_right_1 = propensities_right_1[idxs_right_filter]
propensities_right_0 = propensities_right_0[idxs_right_filter]
if np.mean(left[self.treatment]) == 0 or np.mean(left[self.treatment]) == 1 or \
np.mean(right[self.treatment]) == 0 or np.mean(right[self.treatment]) == 1:
continue
if len(left) == 0 or len(right) == 0:
continue
# The covariate implementation does not work as expected; should always be None
mod_left = sm.WLS(left[outcome], sm.add_constant(left[[self.treatment]]), \
weights=1.0 / propensities_left_1 * left[self.treatment] + 1.0 / propensities_left_0 * (1-left[self.treatment]))
res_left = mod_left.fit()
mod_right = sm.WLS(right[outcome], sm.add_constant(right[self.treatment]), \
weights=1.0 / propensities_right_1 * right[self.treatment] + 1.0 / propensities_right_0 * (1-right[self.treatment]))
res_right = mod_right.fit()
average_left_hajek = res_left.params[1]
average_right_hajek = res_right.params[1]
average_left_hajek_se = res_left.bse[1]
average_right_hajek_se = res_right.bse[1]
# need further improvement
mse_left = np.sum((1.0 / propensities_left_1 * left[self.treatment] + 1.0 / propensities_left_0 * (1-left[self.treatment])) *
((res_left.resid) ** 2))
mse_right = np.sum((1.0 / propensities_right_1 * right[self.treatment] + 1.0 / propensities_right_0 * (1-right[self.treatment])) *
((res_right.resid) ** 2))
mse = mse_left * 1.0 * len(left)/(len(left)+len(right)) + mse_right * 1.0 * len(right)/(len(left)+len(right))
if mse < b_mse:
flag = True
assert len(criteria) > 0
if 'non_trivial_reduction' in criteria:
if not (mse < current_mse - criteria['non_trivial_reduction']):
flag = False
if 'reasonable_propensity' in criteria:
if not (np.abs(np.sum(1.0 / propensities_left_1 * left[self.treatment])/len(df) - 1.0) <= criteria['reasonable_propensity'] \
and \
np.abs(np.sum(1.0 / propensities_right_1 * right[self.treatment])/len(df) - 1.0) <= criteria['reasonable_propensity'] \
and \
np.abs(np.sum(1.0 / propensities_left_0 * (1 - left[self.treatment]))/len(df) - 1.0) <= criteria['reasonable_propensity'] \
and \
np.abs(np.sum(1.0 / propensities_right_0 * (1 - right[self.treatment]))/len(df) - 1.0) <= criteria['reasonable_propensity']
):
flag = False
if 'separate_reduction' in criteria:
if not (mse_left < current_mse and mse_right < current_mse):
flag = False
if 'min_leaf_size' in criteria:
if not (len(left) >= criteria['min_leaf_size'] and len(right) >= criteria['min_leaf_size']):
flag = False
if flag:
b_feature = feature
b_mse = mse
b_mse_left = mse_left
b_mse_right = mse_right
b_threshold = threshold
b_average_left_hajek = average_left_hajek
b_average_right_hajek = average_right_hajek
b_average_left_hajek_se = average_left_hajek_se
b_average_right_hajek_se = average_right_hajek_se
b_left = left
b_right = right
b_left_rules = rules + [(feature, 0, threshold)]
b_right_rules = rules + [(feature, 1, threshold)]
result = {}
if b_feature != '':
# if find a valid partition
result_left = self._split_exposure_hajek_eht(node_id*2, df, probabilities, feature_set, max_attempt, eps, delta, outcome, b_left_rules, N, b_mse_left, criteria)
result_right = self._split_exposure_hajek_eht(node_id*2+1, df, probabilities, feature_set, max_attempt, eps, delta, outcome, b_right_rules, N, b_mse_right, criteria)
result['mse'] = result_left['mse'] * 1.0 * len(b_left)/(len(b_left)+len(b_right)) + \
result_right['mse'] * 1.0 * len(b_right)/(len(b_left)+len(b_right))
result['feature'] = b_feature
result['threshold'] = b_threshold
result_left['hajek'] = b_average_left_hajek
result_right['hajek'] = b_average_right_hajek
result_left['hajek_se'] = b_average_left_hajek_se
result_right['hajek_se'] = b_average_right_hajek_se
result_left['N'] = len(b_left)
result_right['N'] = len(b_right)
result['left_result'] = result_left
result['right_result'] = result_right
return result
else:
result['mse'] = current_mse
return result
def estimate_exposure_hajek(self, train_result_separate, indirect_space, outcome, eps=0.005, separate=True):
"""
train_result_separate: result from training
indirect_space: feature space (consistent with training input)
outcome: (consistent with training input)
eps: (consistent with training input)
df_est=None: leave it
probabilities=None: leave it
separate=True: separate trees.
"""
if separate:
df_est = self.df_est
probabilities = self.probabilities
probabilities_est = {}
for key in [self.treatment]+indirect_space:
probabilities_est[key] = self.probabilities[key][self.idx_est]
est_result_separate = {}
est_result_separate = self._split_exposure_validate(1, df_est, train_result_separate, probabilities_est, [], outcome, eps)
self.est_result_separate = est_result_separate
return est_result_separate
else:
# if find a valid partition for T == 1 or 0 separately
df_est = self.df_est
probabilities_est = {}
for key in indirect_space+[self.treatment]:
probabilities_est[key] = self.probabilities[key][self.idx_est.astype(bool)]
est_result_separate_eht = {}
est_result_separate_eht = self._split_exposure_validate_eht(1, df_est, train_result_separate, probabilities_est, [], outcome, eps)
self.est_result_separate_eht = est_result_separate_eht
return est_result_separate_eht
def split_exposure_hajek(self, separate, outcome, feature_set, max_attempt=30, eps=0.0, delta=0.0,
df_train=None, probabilities=None, criteria={'non_trivial_reduction': 0}):
"""
The API for spitting
separate: True=separate trees
outcome: outcome variable
feature_set: a list of features used to partition (may include ``assignment'')
min_variance_reduct: minimum variance reduction in each partition, only partition if reduction is significantly large
max_attempt: sample threshold -- a larger value tend to over fit more
eps: avoid non-zero or zero-trivial probability
delta: avoid non-zero or zero-trivial probability
df_train: leave it as None
probabilities: leave it as None
"""
if separate == True:
df_train = self.df_train # training set
probabilities = self.probabilities # probability tensor
probabilities_train = {}
for key in [self.treatment]+feature_set:
probabilities_train[key] = probabilities[key][self.idx_tr]
mod = sm.WLS(df_train[outcome], np.ones(len(df_train)))
res = mod.fit()
total_sse = np.sum(res.resid ** 2) # total sse
train_result = {}
train_result = self._split_exposure_hajek(1, df_train, probabilities_train, feature_set, max_attempt,
eps, delta, outcome, [],
len(df_train), total_sse, criteria)
train_result['N'] = len(df_train)
train_result['hajek'] = df_train[outcome].mean()
train_result['hajek_se'] = df_train[outcome].std() / np.sqrt(len(df_train[outcome])-1)
self.result_separate = train_result
return train_result
else:
df_train = self.df_train
probabilities = self.probabilities
probabilities_train = {}
for key in [self.treatment]+feature_set:
probabilities_train[key] = probabilities[key][self.idx_tr]
mod = sm.WLS(df_train[outcome], sm.add_constant(df_train[self.treatment]))
res = mod.fit()
total_sse = np.sum(res.resid ** 2) * 2
train_result_eht = {}
train_result_eht = self._split_exposure_hajek_eht(1, df_train, probabilities_train, feature_set, max_attempt,
eps, delta, outcome, [], len(df_train), total_sse, criteria)
train_result_eht['N'] = len(df_train)
train_result_eht['hajek'] = res.params[1]
train_result_eht['hajek_se'] = res.bse[1]
return train_result_eht
| CausalMotifs-master | causalPartition.py |
import os
import sys
header = """Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
def prefixed_header(prefix):
return [ prefix + l + "\n" for l in header.split("\n")]
filename = sys.argv[1]
with open(filename, "r") as f:
lines = f.readlines()
ext = os.path.splitext(filename)[1]
if ext in (".c", ".cc", ".h", ".cpp"):
lines = prefixed_header("// ") + lines
elif ext in (".py"):
lines = prefixed_header("# ") + lines
else:
print(f"File {filename} is not python or C/C++. Skip..")
sys.exit(0)
with open(filename, "w") as f:
f.writelines(lines) | CollaQ-main | header.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.vdn import VDNMixer
from modules.mixers.qmix import QMixer
import torch as th
from torch.optim import RMSprop, Adam
from collections import defaultdict
import numpy as np
from operator import itemgetter
import operator
class RNDModel(th.nn.Module):
def __init__(self, args):
super(RNDModel, self).__init__()
self.state_dim = int(np.prod(args.state_shape))
self.body_target = th.nn.Sequential(th.nn.Linear(self.state_dim, 128), th.nn.ReLU(),
th.nn.Linear(128, 128), th.nn.ReLU(), th.nn.Linear(128, 128), th.nn.ReLU(), th.nn.Linear(128, 128),)
self.body_model = th.nn.Sequential(th.nn.Linear(self.state_dim, 128), th.nn.ReLU(),
th.nn.Linear(128, 128),)
self.optim = Adam(self.body_model.parameters(), lr=0.00001)
self.to(args.device)
def get_reward(self, x):
bs = x.shape[0]
x = x.reshape(-1, self.state_dim)
y_target = self.body_target(x)
y_model = self.body_model(x)
reward = (y_target - y_model).pow(2).sum(dim=-1)
reward = reward.reshape(bs, -1, 1)
return reward
def update(self, x):
x = x.reshape(-1, self.state_dim)
y_target = self.body_target(x)
y_model = self.body_model(x)
loss = (y_target - y_model).pow(2).sum(dim=-1).mean(dim=0)
self.optim.zero_grad()
loss.backward()
self.optim.step()
class QExploreLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "vdn":
self.mixer = VDNMixer()
elif args.mixer == "qmix":
self.mixer = QMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
# Setup intrinsic module
self.e_type = args.e_type
if args.e_type == "count":
self.count_dict = defaultdict(int)
self.bin_coef = args.bin_coef
elif args.e_type == "rnd":
self.rnd_model = RNDModel(args)
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
# Calculate estimated Q-Values
mac_out = []
self.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs = self.mac.forward(batch, t=t)
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:])
# Calculate intrinsic reward
if self.e_type == "count":
#TODO: need to check dimesion
char_state = np.core.defchararray.add(((batch["state"][:, 1:]+1).cpu().numpy()//self.bin_coef).astype(str), "_")
shape = char_state.shape[0:2]
char_state = char_state.reshape((-1, char_state.shape[-1]))
idx = [''.join(row) for row in char_state]
rewards_i = []
for _idx in idx:
self.count_dict[_idx] += 1
rewards_i.append(1/self.count_dict[_idx])
rewards_i = th.tensor(rewards_i, device=self.args.device).reshape((shape+(1,)))
elif self.e_type == "rnd":
rewards_i = self.rnd_model.get_reward(batch["state"][:, 1:]).detach()
self.rnd_model.update(batch["state"][:, 1:])
# Calculate 1-step Q-Learning targets
targets = rewards + rewards_i * self.args.intrinsic_coef + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error ** 2).sum() / mask.sum()
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("rewards_i", rewards_i.mean().item(), t_env)
self.logger.log_stat("rewards", rewards.mean().item(), t_env)
self.logger.log_stat("grad_norm", grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
| CollaQ-main | src_code/learners/q_explore_learner.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.vdn import VDNMixer
from modules.mixers.qmix import QMixer
import torch as th
from torch.optim import RMSprop
class QInfluenceLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "vdn":
self.mixer = VDNMixer()
elif args.mixer == "qmix":
self.mixer = QMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
# Calculate estimated Q-Values
mac_out = []
self.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs = self.mac.forward(batch, t=t)
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:])
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error ** 2).sum() / mask.sum()
# Optimise
self.optimiser.zero_grad()
loss.backward()
'''
total_norm_alone = 0
for p in self.mac.agent.agent_alone.parameters():
param_norm = p.grad.data.norm(2)
total_norm_alone += param_norm.item() ** 2
total_norm_alone = total_norm_alone ** (1. / 2)
total_norm_interactive = 0
for p in self.mac.agent.agent_interactive.parameters():
param_norm = p.grad.data.norm(2)
total_norm_interactive += param_norm.item() ** 2
total_norm_interactive = total_norm_interactive ** (1. / 2)
if th.rand(1) < 0.02:
print(total_norm_alone, total_norm_interactive)
'''
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
self.mac._update_targets()
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
| CollaQ-main | src_code/learners/q_influence_learner.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.vdn import VDNMixer
from modules.mixers.qmix import QMixer, QAttnMixerV1, QAttnMixerV2
import torch as th
from torch.optim import RMSprop
class QInteractiveLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.self_last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "vdn":
self.mixer = VDNMixer()
elif args.mixer == "qmix":
self.mixer = QMixer(args)
elif args.mixer == "qmix_attnv1":
self.mixer = QAttnMixerV1(args, self.mac.input_shape, self.mac.input_alone_shape)
elif args.mixer == "qmix_attnv2":
self.mixer = QAttnMixerV2(args, self.mac.input_shape, self.mac.input_alone_shape)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
# Calculate estimated Q-Values
mac_out = []
mac_out_interactive = []
mac_out_interactive_ = []
mac_out_alone = []
self.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs, agent_outs_interactive, agent_outs_interactive_, agent_outs_alone = self.mac.get_individual_q(batch, t=t)
mac_out.append(agent_outs)
mac_out_interactive.append(agent_outs_interactive)
mac_out_interactive_.append(agent_outs_interactive_)
mac_out_alone.append(agent_outs_alone)
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
mac_out_interactive = th.stack(mac_out_interactive, dim=1) # Concat over time
mac_out_interactive_ = th.stack(mac_out_interactive_, dim=1) # Concat over time
mac_out_alone = th.stack(mac_out_alone, dim=1) # Concat over time
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:])
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error ** 2).sum() / mask.sum()
if self.args.regulization == "all":
#Optimize for 0 interactive
min_q_interactive = mac_out_interactive_[:, :-1] * avail_actions[:, :-1] * mask.unsqueeze(-1)
reg_loss = (min_q_interactive ** 2).sum() / mask.unsqueeze(-1).sum()
loss += reg_loss
elif self.args.regulization == "chosen_":
#Optimize for 0 interactive
chosen_action_qvals_interactive = th.gather(mac_out_interactive_[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
reg_loss = ((chosen_action_qvals_interactive * mask) ** 2).sum() / mask.sum()
loss += reg_loss
elif self.args.regulization == "all_":
#Optimize for 0 interactive
min_q_interactive = mac_out_interactive_[:, :-1] * avail_actions[:, :-1] * mask.unsqueeze(-1)
reg_loss = (min_q_interactive ** 2).sum() / (mask.unsqueeze(-1) * avail_actions[:, :-1]).sum()
loss += reg_loss
else:
reg_loss = th.zeros(1).sum()
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (episode_num - self.self_last_target_update_episode) / (self.args.minus_target_update_interval) >= 1.0:
self.mac.update_targets()
self.self_last_target_update_episode = episode_num
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("reg_loss", reg_loss.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
| CollaQ-main | src_code/learners/q_interactive_learner.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
import torch as th
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.0):
super(ScaledDotProductAttention, self).__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = th.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = self.dropout(F.softmax(attn, dim=-1))
output = th.matmul(attn, v)
return output, attn
class Multi_Head_Attention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dout, dropout=0., bias=True):
super(Multi_Head_Attention, self).__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=bias)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=bias)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=bias)
self.fc = nn.Sequential(nn.Linear(n_head * d_v, n_head * d_v, bias=bias), nn.ReLU(), nn.Linear(n_head * d_v, dout, bias=bias))
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.layer_norm_q = nn.LayerNorm(n_head * d_k, eps=1e-6)
self.layer_norm_k = nn.LayerNorm(n_head * d_k, eps=1e-6)
self.layer_norm_v = nn.LayerNorm(n_head * d_v, eps=1e-6)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
residual = q
# Transpose for attention dot product: b x n x lq x dv
q, k, v = self.layer_norm_q(q).transpose(1, 2), self.layer_norm_k(k).transpose(1, 2), self.layer_norm_v(v).transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q, attn = self.attention(q, k, v, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.fc(q)
return q, residual, attn.squeeze()
class Multi_Head_Attention_2layer(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dout, dropout=0., bias=True):
super(Multi_Head_Attention_2layer, self).__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs_1 = nn.Linear(d_model, n_head * d_k, bias=bias)
self.w_ks_1 = nn.Linear(d_model, n_head * d_k, bias=bias)
self.w_vs_1 = nn.Linear(d_model, n_head * d_v, bias=bias)
self.fc_1 = nn.Linear(n_head * d_v, dout, bias=bias)
self.attention_1 = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.layer_norm_q_1 = nn.LayerNorm(n_head * d_k, eps=1e-6)
self.layer_norm_k_1 = nn.LayerNorm(n_head * d_k, eps=1e-6)
self.layer_norm_v_1 = nn.LayerNorm(n_head * d_v, eps=1e-6)
# 2nd layer of attention
self.w_qs_2 = nn.Linear(n_head * d_k, n_head * d_k, bias=bias)
self.w_ks_2 = nn.Linear(d_model, n_head * d_k, bias=bias)
self.w_vs_2 = nn.Linear(d_model, n_head * d_v, bias=bias)
self.fc_2 = nn.Linear(n_head * d_v, dout, bias=bias)
self.attention_2 = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.layer_norm_q_2 = nn.LayerNorm(n_head * d_k, eps=1e-6)
self.layer_norm_k_2 = nn.LayerNorm(n_head * d_k, eps=1e-6)
self.layer_norm_v_2 = nn.LayerNorm(n_head * d_v, eps=1e-6)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
#In this layer, we perform self attention
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q_ = self.w_qs_1(q).view(sz_b, len_q, n_head, d_k)
k_ = self.w_ks_1(k).view(sz_b, len_k, n_head, d_k)
v_ = self.w_vs_1(v).view(sz_b, len_v, n_head, d_v)
residual1 = q_
# Transpose for attention dot product: b x n x lq x dv
q_, k_, v_ = self.layer_norm_q_1(q_).transpose(1, 2), self.layer_norm_k_1(k_).transpose(1, 2), self.layer_norm_v_1(v_).transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q_, attn1 = self.attention_1(q_, k_, v_, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q_ = q_.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q_ = self.fc_1(q_)
# In second layer we use attention
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q_ = self.w_qs_2(q_).view(sz_b, len_q, n_head, d_k)
k_ = self.w_ks_2(k).view(sz_b, len_k, n_head, d_k)
v_ = self.w_vs_2(v).view(sz_b, len_v, n_head, d_v)
residual2 = q_
# Transpose for attention dot product: b x n x lq x dv
q_, k_, v_ = self.layer_norm_q_2(q_).transpose(1, 2), self.layer_norm_k_2(k_).transpose(1, 2), self.layer_norm_v_2(v_).transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q_, attn2 = self.attention_2(q_, k_, v_, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q_ = q_.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q_ = self.fc_2(q_)
return q_, th.cat((residual1, residual2), dim=-1), attn2.squeeze()
class SelfAttnInteractive(nn.Module):
def __init__(self, input_shape, input_shape_alone, args):
super(SelfAttnInteractive, self).__init__()
self.args = args
if args.obs_agent_id:
self.individual_feats_size = (input_shape-args.n_agents-input_shape_alone+1) // (args.n_agents - 1) - 4
else:
self.individual_feats_size = (input_shape-input_shape_alone) // (args.n_agents - 1) - 4
self.all_feats_size = self.individual_feats_size + 4
self.n_enemies = (input_shape-self.individual_feats_size-self.args.n_agents-self.args.n_actions-self.all_feats_size*(self.args.n_agents-1)) // self.all_feats_size
self.self_relative = th.tensor([1, 0, 0, 0], device=self.args.device).float().reshape(1, 1, -1)
if args.attn_layers == 1:
self.a_self_attn = Multi_Head_Attention(1, self.all_feats_size, args.attn_embed_dim, args.attn_embed_dim, args.attn_embed_dim)
self.e_self_attn = Multi_Head_Attention(1, self.all_feats_size, args.attn_embed_dim, args.attn_embed_dim, args.attn_embed_dim)
elif args.attn_layers == 2:
self.a_self_attn = Multi_Head_Attention_2layer(1, self.all_feats_size, args.attn_embed_dim, args.attn_embed_dim, args.attn_embed_dim)
self.e_self_attn = Multi_Head_Attention_2layer(1, self.all_feats_size, args.attn_embed_dim, args.attn_embed_dim, args.attn_embed_dim)
self.n_agents = args.n_agents
def forward(self, inputs):
if self.args.obs_agent_id:
bs = inputs.shape[0]
# World features
world_feats = inputs[:, :-self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1+self.n_enemies)]
action_id_feats = inputs[:, -self.n_agents-self.args.n_actions:]
self_feats = inputs[:, -self.n_agents-self.args.n_actions-self.individual_feats_size:-self.n_agents-self.args.n_actions].reshape(bs, 1, -1)
self_feats = th.cat((self.self_relative.expand((bs, 1, 4)), self_feats), dim=-1)
#Ally features
ally_feats = inputs[:, -self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.n_agents-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
ally_feats, self_feats_a, _ = self.a_self_attn(self_feats, ally_feats, ally_feats)
ally_self_feats = th.cat((ally_feats.reshape(bs, -1), self_feats_a.reshape(bs, -1)), dim=-1)
#Enemy features
enemy_feats = inputs[:, -self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1+self.n_enemies):-self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1)].reshape(bs, self.n_enemies, -1)
enemy_feats, self_feats_e, _ = self.e_self_attn(self_feats, enemy_feats, enemy_feats)
enemy_self_feats = th.cat((enemy_feats.reshape(bs, -1), self_feats_e.reshape(bs, -1)), dim=-1)
#Concat everything
inputs = th.cat((world_feats, enemy_self_feats, ally_self_feats, action_id_feats), dim=-1)
else:
bs = inputs.shape[0]
# World features
world_feats = inputs[:, :-self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1+self.n_enemies)]
action_id_feats = inputs[:, -self.args.n_actions:]
self_feats = inputs[:, -self.args.n_actions-self.individual_feats_size:-self.args.n_actions].reshape(bs, 1, -1)
self_feats = th.cat((self.self_relative.expand((bs, 1, 4)), self_feats), dim=-1)
#Ally features
ally_feats = inputs[:, -self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
ally_feats, self_feats_a, _ = self.a_self_attn(self_feats, ally_feats, ally_feats)
ally_self_feats = th.cat((ally_feats.reshape(bs, -1), self_feats_a.reshape(bs, -1)), dim=-1)
#Enemy features
enemy_feats = inputs[:, -self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1+self.n_enemies):-self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1)].reshape(bs, self.n_enemies, -1)
enemy_feats, self_feats_e, _ = self.e_self_attn(self_feats, enemy_feats, enemy_feats)
enemy_self_feats = th.cat((enemy_feats.reshape(bs, -1), self_feats_e.reshape(bs, -1)), dim=-1)
#Concat everything
inputs = th.cat((world_feats, enemy_self_feats, ally_self_feats, action_id_feats), dim=-1)
return inputs
class SelfAttn(nn.Module):
def __init__(self, input_shape, input_shape_alone, args):
super(SelfAttn, self).__init__()
self.args = args
if args.obs_agent_id:
self.individual_feats_size = (input_shape-args.n_agents-input_shape_alone+1) // (args.n_agents - 1) - 4
else:
self.individual_feats_size = (input_shape-input_shape_alone) // (args.n_agents - 1) - 4
self.all_feats_size = self.individual_feats_size + 4
self.n_enemies = (input_shape-self.individual_feats_size-self.args.n_agents-self.args.n_actions-self.all_feats_size*(self.args.n_agents-1)) // self.all_feats_size
self.self_relative = th.tensor([1, 0, 0, 0], device=self.args.device).float().reshape(1, 1, -1)
if args.attn_layers == 1:
self.a_self_attn = Multi_Head_Attention(1, self.all_feats_size, args.attn_embed_dim, args.attn_embed_dim, args.attn_embed_dim)
elif args.attn_layers == 2:
self.a_self_attn = Multi_Head_Attention_2layer(1, self.all_feats_size, args.attn_embed_dim, args.attn_embed_dim, args.attn_embed_dim)
self.n_agents = args.n_agents
def forward(self, inputs):
if self.args.obs_agent_id:
bs = inputs.shape[0]
# World features
world_feats = inputs[:, :-self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1+self.n_enemies)]
action_id_feats = inputs[:, -self.n_agents-self.args.n_actions:]
self_feats = inputs[:, -self.n_agents-self.args.n_actions-self.individual_feats_size:-self.n_agents-self.args.n_actions].reshape(bs, 1, -1)
self_feats = th.cat((self.self_relative.expand((bs, 1, 4)), self_feats), dim=-1)
#Ally features
ally_feats = inputs[:, -self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.n_agents-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
ally_feats, self_feats_a, _ = self.a_self_attn(self_feats, ally_feats, ally_feats)
ally_self_feats = th.cat((ally_feats.reshape(bs, -1), self_feats_a.reshape(bs, -1)), dim=-1)
#Enemy features
enemy_feats = inputs[:, -self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1+self.n_enemies):-self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1)].reshape(bs, self.n_enemies, -1)
enemy_self_feats = enemy_feats.reshape(bs, -1)
#Concat everything
inputs = th.cat((world_feats, enemy_self_feats, ally_self_feats, action_id_feats), dim=-1)
else:
bs = inputs.shape[0]
# World features
world_feats = inputs[:, :-self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1+self.n_enemies)]
action_id_feats = inputs[:, -self.args.n_actions:]
self_feats = inputs[:, -self.args.n_actions-self.individual_feats_size:-self.args.n_actions].reshape(bs, 1, -1)
self_feats = th.cat((self.self_relative.expand((bs, 1, 4)), self_feats), dim=-1)
#Ally features
ally_feats = inputs[:, -self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
ally_feats, self_feats_a, _ = self.a_self_attn(self_feats, ally_feats, ally_feats)
ally_self_feats = th.cat((ally_feats.reshape(bs, -1), self_feats_a.reshape(bs, -1)), dim=-1)
#Enemy features
enemy_feats = inputs[:, -self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1+self.n_enemies):-self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1)].reshape(bs, self.n_enemies, -1)
enemy_self_feats = enemy_feats.reshape(bs, -1)
#Concat everything
inputs = th.cat((world_feats, enemy_self_feats, ally_self_feats, action_id_feats), dim=-1)
return inputs
class SelfAttnAlone(nn.Module):
def __init__(self, input_shape, input_shape_alone, args):
super(SelfAttnAlone, self).__init__()
self.args = args
if args.obs_agent_id:
self.individual_feats_size = (input_shape-args.n_agents-input_shape_alone+1) // (args.n_agents - 1) - 4
else:
self.individual_feats_size = (input_shape-input_shape_alone) // (args.n_agents - 1) - 4
self.all_feats_size = self.individual_feats_size + 4
self.n_enemies = (input_shape-self.individual_feats_size-self.args.n_agents-self.args.n_actions-self.all_feats_size*(self.args.n_agents-1)) // self.all_feats_size
self.self_relative = th.tensor([1, 0, 0, 0], device=self.args.device).float().reshape(1, 1, -1)
if args.attn_layers == 1:
self.e_self_attn = Multi_Head_Attention(1, self.all_feats_size, args.attn_embed_dim, args.attn_embed_dim, args.attn_embed_dim)
elif args.attn_layers == 2:
self.e_self_attn = Multi_Head_Attention_2layer(1, self.all_feats_size, args.attn_embed_dim, args.attn_embed_dim, args.attn_embed_dim)
self.n_agents = args.n_agents
def forward(self, inputs):
bs = inputs.shape[0]
# World features
world_feats = inputs[:, :-self.individual_feats_size-1-self.args.n_actions-self.all_feats_size*self.n_enemies]
action_id_feats = inputs[:, -1-self.args.n_actions:]
self_feats = inputs[:, -1-self.args.n_actions-self.individual_feats_size:-1-self.args.n_actions].reshape(bs, 1, -1)
self_feats = th.cat((self.self_relative.expand((bs, 1, 4)), self_feats), dim=-1)
#Enemy features
enemy_feats = inputs[:, -self.individual_feats_size-1-self.args.n_actions-self.all_feats_size*self.n_enemies:-self.individual_feats_size-1-self.args.n_actions].reshape(bs, self.n_enemies, -1)
enemy_feats, self_feats_e, _ = self.e_self_attn(self_feats, enemy_feats, enemy_feats)
enemy_self_feats = th.cat((enemy_feats.reshape(bs, -1), self_feats_e.reshape(bs, -1)), dim=-1)
#Concat everything
inputs = th.cat((world_feats, enemy_self_feats, action_id_feats), dim=-1)
return inputs
class RNNRegAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNRegAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
q = self.fc2(h)
return h, q
class RNNInteractiveRegAgent(nn.Module):
def __init__(self, input_shape, input_shape_alone, args):
super(RNNInteractiveRegAgent, self).__init__()
self.agent_alone = RNNRegAgent(input_shape_alone, args)
self.agent_interactive = RNNRegAgent(input_shape, args)
self.individual_feats_size = (input_shape-args.n_agents-input_shape_alone+1) // (args.n_agents - 1) - 4
self.all_feats_size = self.individual_feats_size + 4
self.args = args
def init_hidden(self):
# make hidden states on same device as model
hidden_alone = self.agent_alone.init_hidden()
hidden_interactive = self.agent_interactive.init_hidden()
return hidden_interactive, hidden_alone, hidden_interactive
def forward(self, inputs, inputs_alone, hidden_state, hidden_state_alone, hidden_state_):
_inputs = inputs.clone()
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.args.n_agents-self.args.n_actions-self.all_feats_size*(self.args.n_agents-1)]
action_id_feats = _inputs[:, -self.args.n_agents-self.args.n_actions:]
self_feats = _inputs[:, -self.args.n_agents-self.args.n_actions-self.individual_feats_size:-self.args.n_agents-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.args.n_agents-self.args.n_actions-self.all_feats_size*(self.args.n_agents-1):-self.args.n_agents-self.args.n_actions-self.individual_feats_size].reshape(bs, self.args.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
h_alone, q_alone = self.agent_alone(inputs_alone, hidden_state_alone)
h_interactive_, q_interactive_ = self.agent_interactive(_inputs, hidden_state_)
h_interactive, q_interactive = self.agent_interactive(inputs, hidden_state)
q = q_alone + q_interactive
return q, h_interactive, h_alone, h_interactive_, q_interactive_
def get_individual_q(self, inputs, inputs_alone, hidden_state, hidden_state_alone, hidden_state_):
_inputs = inputs.clone()
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.args.n_agents-self.args.n_actions-self.all_feats_size*(self.args.n_agents-1)]
action_id_feats = _inputs[:, -self.args.n_agents-self.args.n_actions:]
self_feats = _inputs[:, -self.args.n_agents-self.args.n_actions-self.individual_feats_size:-self.args.n_agents-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.args.n_agents-self.args.n_actions-self.all_feats_size*(self.args.n_agents-1):-self.args.n_agents-self.args.n_actions-self.individual_feats_size].reshape(bs, self.args.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
h_alone, q_alone = self.agent_alone(inputs_alone, hidden_state_alone)
h_interactive_, q_interactive_ = self.agent_interactive(_inputs, hidden_state_)
h_interactive, q_interactive = self.agent_interactive(inputs, hidden_state)
q = q_alone + q_interactive
return q, q_interactive, q_interactive_, q_alone, h_interactive, h_alone, h_interactive_
def get_parameters(self):
return self.parameters()
def update_n_agents(self, n_agents):
pass
class RNNInteractiveAttnAgentV1(nn.Module):
def __init__(self, input_shape, input_shape_alone, args):
super(RNNInteractiveAttnAgentV1, self).__init__()
if args.obs_agent_id:
self.individual_feats_size = (input_shape-args.n_agents-input_shape_alone+1) // (args.n_agents - 1) - 4
else:
self.individual_feats_size = (input_shape-input_shape_alone) // (args.n_agents - 1) - 4
self.all_feats_size = self.individual_feats_size + 4
self.self_attn_i = SelfAttn(input_shape, input_shape_alone, args)
self.agent_alone = RNNRegAgent(input_shape_alone, args)
if args.attn_layers == 1:
self.agent_interactive = RNNRegAgent(input_shape+args.attn_embed_dim*2-self.individual_feats_size-(args.n_agents-1)*self.all_feats_size, args)
elif args.attn_layers == 2:
self.agent_interactive = RNNRegAgent(input_shape+args.attn_embed_dim*3-self.individual_feats_size-(args.n_agents-1)*self.all_feats_size, args)
self.args = args
self.n_agents = args.n_agents
def init_hidden(self):
# make hidden states on same device as model
hidden_alone = self.agent_alone.init_hidden()
hidden_interactive = self.agent_interactive.init_hidden()
return hidden_interactive, hidden_alone, hidden_interactive
def forward(self, inputs, inputs_alone, hidden_state, hidden_state_alone, hidden_state_):
_inputs = inputs.clone()
if self.args.obs_agent_id:
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1)]
action_id_feats = _inputs[:, -self.n_agents-self.args.n_actions:]
self_feats = _inputs[:, -self.n_agents-self.args.n_actions-self.individual_feats_size:-self.n_agents-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.n_agents-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
else:
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1)]
action_id_feats = _inputs[:, -self.args.n_actions:]
self_feats = _inputs[:, -self.args.n_actions-self.individual_feats_size:-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
inputs = self.self_attn_i(inputs)
_inputs = self.self_attn_i(_inputs)
h_alone, q_alone = self.agent_alone(inputs_alone, hidden_state_alone)
h_interactive_, q_interactive_ = self.agent_interactive(_inputs, hidden_state_)
h_interactive, q_interactive = self.agent_interactive(inputs, hidden_state)
q = q_alone + q_interactive
return q, h_interactive, h_alone, h_interactive_, q_interactive_
def get_individual_q(self, inputs, inputs_alone, hidden_state, hidden_state_alone, hidden_state_):
_inputs = inputs.clone()
if self.args.obs_agent_id:
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1)]
action_id_feats = _inputs[:, -self.n_agents-self.args.n_actions:]
self_feats = _inputs[:, -self.n_agents-self.args.n_actions-self.individual_feats_size:-self.n_agents-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.n_agents-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
else:
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1)]
action_id_feats = _inputs[:, -self.args.n_actions:]
self_feats = _inputs[:, -self.args.n_actions-self.individual_feats_size:-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
inputs = self.self_attn_i(inputs)
_inputs = self.self_attn_i(_inputs)
h_alone, q_alone = self.agent_alone(inputs_alone, hidden_state_alone)
h_interactive_, q_interactive_ = self.agent_interactive(_inputs, hidden_state_)
h_interactive, q_interactive = self.agent_interactive(inputs, hidden_state)
q = q_alone + q_interactive
return q, q_interactive, q_interactive_, q_alone, h_interactive, h_alone, h_interactive_
def get_parameters(self):
return self.parameters()
def update_n_agents(self, n_agents):
self.n_agents = n_agents
self.self_attn_i.n_agents = n_agents
class RNNInteractiveAttnAgentV2(nn.Module):
def __init__(self, input_shape, input_shape_alone, args):
super(RNNInteractiveAttnAgentV2, self).__init__()
if args.obs_agent_id:
self.individual_feats_size = (input_shape-args.n_agents-input_shape_alone+1) // (args.n_agents - 1) - 4
else:
self.individual_feats_size = (input_shape-input_shape_alone) // (args.n_agents - 1) - 4
self.all_feats_size = self.individual_feats_size + 4
self.self_attn_i = SelfAttnInteractive(input_shape, input_shape_alone, args)
if args.attn_layers == 1:
self.agent_interactive = RNNRegAgent(input_shape+args.attn_embed_dim*4-self.individual_feats_size-(args.n_agents-1+args.n_actions - 6)*self.all_feats_size, args)
elif args.attn_layers == 2:
self.agent_interactive = RNNRegAgent(input_shape+args.attn_embed_dim*6-self.individual_feats_size-(args.n_agents-1+args.n_actions - 6)*self.all_feats_size, args)
# self.self_attn_a = SelfAttnAlone(input_shape, input_shape_alone, args)
# self.agent_alone = RNNRegAgent(input_shape_alone+args.attn_embed_dim*2-self.individual_feats_size-(args.n_actions - 6)*self.all_feats_size, args)
self.agent_alone = RNNRegAgent(input_shape_alone, args)
self.args = args
self.n_agents = args.n_agents
def init_hidden(self):
# make hidden states on same device as model
hidden_alone = self.agent_alone.init_hidden()
hidden_interactive = self.agent_interactive.init_hidden()
return hidden_interactive, hidden_alone, hidden_interactive
def forward(self, inputs, inputs_alone, hidden_state, hidden_state_alone, hidden_state_):
_inputs = inputs.clone()
if self.args.obs_agent_id:
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1)]
action_id_feats = _inputs[:, -self.n_agents-self.args.n_actions:]
self_feats = _inputs[:, -self.n_agents-self.args.n_actions-self.individual_feats_size:-self.n_agents-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.n_agents-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
else:
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1)]
action_id_feats = _inputs[:, -self.args.n_actions:]
self_feats = _inputs[:, -self.args.n_actions-self.individual_feats_size:-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
inputs = self.self_attn_i(inputs)
_inputs = self.self_attn_i(_inputs)
# inputs_alone = self.self_attn_a(inputs_alone)
h_alone, q_alone = self.agent_alone(inputs_alone, hidden_state_alone)
h_interactive_, q_interactive_ = self.agent_interactive(_inputs, hidden_state_)
h_interactive, q_interactive = self.agent_interactive(inputs, hidden_state)
q = q_alone + q_interactive
return q, h_interactive, h_alone, h_interactive_, q_interactive_
def get_individual_q(self, inputs, inputs_alone, hidden_state, hidden_state_alone, hidden_state_):
_inputs = inputs.clone()
if self.args.obs_agent_id:
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1)]
action_id_feats = _inputs[:, -self.n_agents-self.args.n_actions:]
self_feats = _inputs[:, -self.n_agents-self.args.n_actions-self.individual_feats_size:-self.n_agents-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.n_agents-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.n_agents-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
else:
bs = _inputs.shape[0]
# World features
world_feats = _inputs[:, :-self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1)]
action_id_feats = _inputs[:, -self.args.n_actions:]
self_feats = _inputs[:, -self.args.n_actions-self.individual_feats_size:-self.args.n_actions]
ally_feats = _inputs[:, -self.individual_feats_size-self.args.n_actions-self.all_feats_size*(self.n_agents-1):-self.args.n_actions-self.individual_feats_size].reshape(bs, self.n_agents-1, -1)
_inputs = th.cat((world_feats, th.zeros(ally_feats.reshape(bs, -1).shape, device=self.args.device), self_feats, action_id_feats), dim=-1)
inputs = self.self_attn_i(inputs)
_inputs = self.self_attn_i(_inputs)
# inputs_alone = self.self_attn_a(inputs_alone)
h_alone, q_alone = self.agent_alone(inputs_alone, hidden_state_alone)
h_interactive_, q_interactive_ = self.agent_interactive(_inputs, hidden_state_)
h_interactive, q_interactive = self.agent_interactive(inputs, hidden_state)
q = q_alone + q_interactive
return q, q_interactive, q_interactive_, q_alone, h_interactive, h_alone, h_interactive_
def get_parameters(self):
return self.parameters()
def update_n_agents(self, n_agents):
self.n_agents = n_agents
self.self_attn_i.n_agents = n_agents
# self.self_attn_a.n_agents = n_agents
class RNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
q = self.fc2(h)
return q, h
class RNNInteractiveAgent(nn.Module):
def __init__(self, input_shape, input_shape_alone, args):
super(RNNInteractiveAgent, self).__init__()
self.agent_alone = RNNAgent(input_shape_alone, args)
self.agent_interactive = RNNAgent(input_shape, args)
def init_hidden(self):
# make hidden states on same device as model
hidden_alone = self.agent_alone.init_hidden()
hidden_interactive = self.agent_interactive.init_hidden()
return hidden_interactive, hidden_alone
def forward(self, inputs, inputs_alone, hidden_state, hidden_state_alone):
q_alone, h_alone = self.agent_alone(inputs_alone, hidden_state_alone)
q_interactive, h_interactive = self.agent_interactive(inputs, hidden_state)
return q_interactive+q_alone-self.agent_interactive.fc2.bias, h_interactive, h_alone
def get_parameters(self):
return self.parameters()
def update_n_agents(self, n_agents):
pass
class RNNInteractiveAttnAgent(nn.Module):
def __init__(self, input_shape, input_shape_alone, args):
super(RNNInteractiveAttnAgent, self).__init__()
if args.obs_agent_id:
self.individual_feats_size = (input_shape-args.n_agents-input_shape_alone+1) // (args.n_agents - 1) - 4
else:
self.individual_feats_size = (input_shape-input_shape_alone) // (args.n_agents - 1) - 4
self.all_feats_size = self.individual_feats_size + 4
self.self_attn_i = SelfAttn(input_shape, input_shape_alone, args)
self.agent_alone = RNNAgent(input_shape_alone, args)
if args.attn_layers == 1:
self.agent_interactive = RNNAgent(input_shape+args.attn_embed_dim*2-self.individual_feats_size-(args.n_agents-1)*self.all_feats_size, args)
elif args.attn_layers == 2:
self.agent_interactive = RNNAgent(input_shape+args.attn_embed_dim*3-self.individual_feats_size-(args.n_agents-1)*self.all_feats_size, args)
self.args = args
self.n_agents = args.n_agents
def init_hidden(self):
# make hidden states on same device as model
hidden_alone = self.agent_alone.init_hidden()
hidden_interactive = self.agent_interactive.init_hidden()
return hidden_interactive, hidden_alone
def forward(self, inputs, inputs_alone, hidden_state, hidden_state_alone):
inputs = self.self_attn_i(inputs)
q_alone, h_alone = self.agent_alone(inputs_alone, hidden_state_alone)
q_interactive, h_interactive = self.agent_interactive(inputs, hidden_state)
return q_interactive+q_alone-self.agent_interactive.fc2.bias, h_interactive, h_alone
def get_parameters(self):
return self.parameters()
def update_n_agents(self, n_agents):
self.n_agents = n_agents
self.self_attn_i.n_agents = n_agents
# #TODO: CHANGED THIS
# class RNNAgent(nn.Module):
# def __init__(self, input_shape, args):
# super(RNNAgent, self).__init__()
# self.args = args
# self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
# self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
# # self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
# def init_hidden(self):
# # make hidden states on same device as model
# return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
# def forward(self, inputs, hidden_state):
# x = F.relu(self.fc1(inputs))
# h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
# h = self.rnn(x, h_in)
# return h
# class RNNInteractiveAgent(nn.Module):
# def __init__(self, input_shape, input_shape_alone, args):
# super(RNNInteractiveAgent, self).__init__()
# self.agent_alone = RNNAgent(input_shape_alone, args)
# self.agent_interactive = RNNAgent(input_shape, args)
# self.fc = nn.Linear(2 * args.rnn_hidden_dim, args.n_actions, bias=False)
# self.alone_bias = nn.Parameter(th.zeros(args.n_actions))
# self.interactive_bias = nn.Parameter(th.zeros(args.n_actions))
# self.args = args
# def init_hidden(self):
# # make hidden states on same device as model
# hidden_alone = self.agent_alone.init_hidden()
# hidden_interactive = self.agent_interactive.init_hidden()
# return hidden_interactive, hidden_alone
# def forward(self, inputs, inputs_alone, hidden_state, hidden_state_alone):
# h_alone = self.agent_alone(inputs_alone, hidden_state_alone)
# h_interactive = self.agent_interactive(inputs, hidden_state)
# q = self.fc(th.cat((h_interactive, h_alone), dim=-1))+self.interactive_bias+self.alone_bias
# return q, h_interactive, h_alone
# def get_interactive_q(self, inputs, hidden_state):
# h_interactive = self.agent_interactive(inputs, hidden_state)
# q_interactive = self.fc(th.cat((th.zeros_like(h_interactive, device=self.args.device), h_interactive), dim=-1))
# return q_interactive, h_interactive
# def get_alone_q(self, inputs_alone, hidden_state_alone):
# h_alone = self.agent_alone(inputs_alone, hidden_state_alone)
# q_alone = self.fc(th.cat((h_alone, th.zeros_like(h_alone, device=self.args.device)), dim=-1))
# return q_alone, h_alone
# def get_individual_q(self, inputs, inputs_alone, hidden_state, hidden_state_alone):
# h_alone = self.agent_alone(inputs_alone, hidden_state_alone)
# h_interactive = self.agent_interactive(inputs, hidden_state)
# q = self.fc(th.cat((h_interactive, h_alone), dim=-1))+self.interactive_bias+self.alone_bias
# q_interactive = self.fc(th.cat((th.zeros_like(h_interactive, device=self.args.device), h_interactive), dim=-1))
# q_alone = self.fc(th.cat((h_alone, th.zeros_like(h_alone, device=self.args.device)), dim=-1))
# return q, q_interactive, q_alone, h_interactive, h_alone, th.zeros(1, device=self.args.device).sum()
# def get_parameters(self):
# return self.parameters()
# def update_n_agents(self, n_agents):
# pass
| CollaQ-main | src_code/modules/rnn_interactive_agent.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from modules.agents import REGISTRY as agent_REGISTRY
from components.action_selectors import REGISTRY as action_REGISTRY
import torch as th
import copy
# This multi-agent controller shares parameters between agents
class BasicMACInteractive:
def __init__(self, scheme, groups, args):
self.n_agents = args.n_agents
self.args = args
self.input_shape = self._get_input_shape(scheme)
self.input_alone_shape = self._get_input_alone_shape(scheme)
self._build_agents(self.input_shape, self.input_alone_shape)
self.agent_output_type = args.agent_output_type
self.action_selector = action_REGISTRY[args.action_selector](args)
self.hidden_states = None
self.hidden_states_alone = None
self.test_total = 0
self.avg_num_agents_attack = th.zeros(self.n_agents + 1)
self.avg_ally_distance = 0
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False, env=None):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
agent_outputs = self.forward(ep_batch, t_ep, test_mode=test_mode)
chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
# if test_mode:
# self.focus_fire_rate(chosen_actions, ep_batch, t_ep)
return chosen_actions
def forward(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, self.hidden_states, self.hidden_states_alone = self.agent(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone)
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def init_hidden(self, batch_size):
hidden_states, hidden_states_alone = self.agent.init_hidden()
self.hidden_states = hidden_states.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
self.hidden_states_alone = hidden_states_alone.unsqueeze(0).expand(batch_size, self.n_agents, -1)
def parameters(self):
return self.agent.get_parameters()
def load_state(self, other_mac):
self.agent.load_state_dict(other_mac.agent.state_dict())
def cuda(self):
self.agent.cuda()
def save_models(self, path):
th.save(self.agent.state_dict(), "{}/agent.th".format(path))
def load_models(self, path):
self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
def _build_agents(self, input_shape, input_alone_shape):
self.agent = agent_REGISTRY[self.args.agent](input_shape, input_alone_shape, self.args)
def _build_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs = []
inputs.append(batch["obs"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
return inputs
def _build_alone_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs_alone = []
inputs_alone.append(batch["obs_alone"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs_alone.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs_alone.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs_alone.append(th.eye(1, device=batch.device).expand(self.n_agents, -1).unsqueeze(0).expand(bs, -1, -1))
inputs_alone = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs_alone], dim=1)
return inputs_alone
def _get_input_shape(self, scheme):
input_shape = scheme["obs"]["vshape"]
if self.args.obs_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape
def _get_input_alone_shape(self, scheme):
input_alone_shape = scheme["obs_alone"]["vshape"]
if self.args.obs_last_action:
input_alone_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_alone_shape += 1
return input_alone_shape
def focus_fire_rate(self, chosen_actions, batch, t):
self.test_total += 1
n_actions_no_attack = 6
#Compute focus fire rate
target_id = th.clamp(chosen_actions - n_actions_no_attack, min=-1)
max_id = self.args.n_actions - n_actions_no_attack
num_agents_attack = []
for i in range(max_id):
num_agents_attack.append(th.sum(target_id == i).item())
#Compute average distance
inputs = batch["obs"][:, t]
bs = batch.batch_size
individual_feats_size = (self.input_shape-self.n_agents-self.input_alone_shape+1) // (self.n_agents - 1) - 4
all_feats_size = individual_feats_size + 4
n_enemies = (self.input_shape-individual_feats_size-self.n_agents-self.args.n_actions-all_feats_size*(self.n_agents-1)) // all_feats_size
enemy_ally_feats = inputs[:, :, -individual_feats_size-all_feats_size*(self.n_agents-1+n_enemies):-individual_feats_size]\
.reshape(inputs.shape[0], inputs.shape[1], self.n_agents-1+n_enemies, -1)
#Compute enemy
e_shootable = (enemy_ally_feats[:, :, :n_enemies, 0] > 0).long()
e_visible = (enemy_ally_feats[:, :, :n_enemies, 1] > 0).long()
e_distance = enemy_ally_feats[:, :, :n_enemies, 1]
e_average_distance = th.sum(e_distance, dim=1)/(th.sum(e_visible, dim=1) + 1e-6)
#Compute ally
#Compute enemy
a_visible = (enemy_ally_feats[:, :, :n_enemies, 0] > 0).long()
a_distance = enemy_ally_feats[:, :, :n_enemies, 1] * a_visible
a_average_distance = th.sum(a_distance, dim=1)/(th.sum(a_visible, dim=1) + 1e-6)
for num_attack in num_agents_attack:
self.avg_num_agents_attack[num_attack] += 1
self.avg_ally_distance += a_average_distance.mean().item()
th.set_printoptions(precision=2)
print("focus fire rate: ", self.avg_num_agents_attack/self.test_total)
print("focus fire rate mean: ", self.avg_num_agents_attack[2:].sum()/self.test_total)
print("average distance between agents: ", "%.2f" % (self.avg_ally_distance/self.test_total))
# This multi-agent controller shares parameters between agents
class BasicMACInteractiveRegV1:
def __init__(self, scheme, groups, args):
self.n_agents = args.n_agents
self.args = args
self.input_shape = self._get_input_shape(scheme)
self.input_alone_shape = self._get_input_alone_shape(scheme)
self._build_agents(self.input_shape, self.input_alone_shape)
self.agent_output_type = args.agent_output_type
self.action_selector = action_REGISTRY[args.action_selector](args)
self.hidden_states = None
self.hidden_states_ = None
self.hidden_states_alone = None
self.test_total = 0
self.avg_num_agents_attack = th.zeros(self.n_agents + 1)
self.avg_ally_distance = 0
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False, env=None):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
agent_outputs = self.forward(ep_batch, t_ep, test_mode=test_mode)
chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
# if test_mode:
# self.focus_fire_rate(chosen_actions, ep_batch, t_ep)
return chosen_actions
def forward(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, self.hidden_states, self.hidden_states_alone, self.hidden_states_, agent_outs_interactive_ = self.agent(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone, self.hidden_states_)
agent_outs = agent_outs - agent_outs_interactive_
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
assert(False)
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def get_individual_q(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, agent_outs_interactive, agent_outs_interactive_, agent_outs_alone, self.hidden_states, self.hidden_states_alone, self.hidden_states_ = self.agent.get_individual_q(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone, self.hidden_states_)
agent_outs = agent_outs - agent_outs_interactive_
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
assert(False)
#TODO: NOT IMPLEMENTED YET
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs_interactive[reshaped_avail_actions == 0] = -1e10
agent_outs_interactive_[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
agent_outs_interactive = th.nn.functional.softmax(agent_outs_interactive, dim=-1)
agent_outs_interactive_ = th.nn.functional.softmax(agent_outs_interactive_, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
agent_outs_interactive = ((1 - self.action_selector.epsilon) * agent_outs_interactive
+ th.ones_like(agent_outs_interactive) * self.action_selector.epsilon/epsilon_action_num)
agent_outs_interactive_ = ((1 - self.action_selector.epsilon) * agent_outs_interactive_
+ th.ones_like(agent_outs_interactive_) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
agent_outs_interactive[reshaped_avail_actions == 0] = 0.0
agent_outs_interactive_[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1), agent_outs_interactive.view(ep_batch.batch_size, self.n_agents, -1), agent_outs_interactive_.view(ep_batch.batch_size, self.n_agents, -1), \
agent_outs_alone.view(ep_batch.batch_size, self.n_agents, -1)
def init_hidden(self, batch_size):
hidden_states, hidden_states_alone, hidden_states_ = self.agent.init_hidden()
self.hidden_states = hidden_states.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
self.hidden_states_alone = hidden_states_alone.unsqueeze(0).expand(batch_size, self.n_agents, -1)
self.hidden_states_ = hidden_states_.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
def parameters(self):
return self.agent.get_parameters()
def load_state(self, other_mac):
self.agent.load_state_dict(other_mac.agent.state_dict())
def cuda(self):
self.agent.cuda()
def save_models(self, path):
th.save(self.agent.state_dict(), "{}/agent.th".format(path))
def load_models(self, path):
self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
def _build_agents(self, input_shape, input_alone_shape):
self.agent = agent_REGISTRY[self.args.agent](input_shape, input_alone_shape, self.args)
if self.args.pretrained:
print('Loading pretrained model')
model_dict = self.agent.agent_alone.state_dict()
checkpoint = th.load(self.args.single_model_name+"/"+"-".join(self.args.env_args['map_name'])+"/agent.th")
# 1. filter out unnecessary keys
state_dict = {}
for k, v in checkpoint.items():
if 'agent_alone' in k:
state_dict[k[12:]] = v
pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
self.agent.agent_alone.load_state_dict(model_dict)
def update_targets(self):
pass
def _build_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs = []
inputs.append(batch["obs"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
return inputs
def _build_alone_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs_alone = []
inputs_alone.append(batch["obs_alone"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs_alone.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs_alone.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs_alone.append(th.eye(1, device=batch.device).expand(self.n_agents, -1).unsqueeze(0).expand(bs, -1, -1))
inputs_alone = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs_alone], dim=1)
return inputs_alone
def _get_input_shape(self, scheme):
input_shape = scheme["obs"]["vshape"]
if self.args.obs_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape
def _get_input_alone_shape(self, scheme):
input_alone_shape = scheme["obs_alone"]["vshape"]
if self.args.obs_last_action:
input_alone_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_alone_shape += 1
return input_alone_shape
def focus_fire_rate(self, chosen_actions, batch, t):
self.test_total += 1
n_actions_no_attack = 6
#Compute focus fire rate
target_id = th.clamp(chosen_actions - n_actions_no_attack, min=-1)
max_id = self.args.n_actions - n_actions_no_attack
num_agents_attack = []
for i in range(max_id):
num_agents_attack.append(th.sum(target_id == i).item())
#Compute average distance
inputs = batch["obs"][:, t]
bs = batch.batch_size
individual_feats_size = (self.input_shape-self.n_agents-self.input_alone_shape+1) // (self.n_agents - 1) - 4
all_feats_size = individual_feats_size + 4
n_enemies = (self.input_shape-individual_feats_size-self.n_agents-self.args.n_actions-all_feats_size*(self.n_agents-1)) // all_feats_size
enemy_ally_feats = inputs[:, :, -individual_feats_size-all_feats_size*(self.n_agents-1+n_enemies):-individual_feats_size]\
.reshape(inputs.shape[0], inputs.shape[1], self.n_agents-1+n_enemies, -1)
#Compute enemy
e_shootable = (enemy_ally_feats[:, :, :n_enemies, 0] > 0).long()
e_visible = (enemy_ally_feats[:, :, :n_enemies, 1] > 0).long()
e_distance = enemy_ally_feats[:, :, :n_enemies, 1]
e_average_distance = th.sum(e_distance, dim=1)/(th.sum(e_visible, dim=1) + 1e-6)
#Compute ally
#Compute enemy
a_visible = (enemy_ally_feats[:, :, :n_enemies, 0] > 0).long()
a_distance = enemy_ally_feats[:, :, :n_enemies, 1] * a_visible
a_average_distance = th.sum(a_distance, dim=1)/(th.sum(a_visible, dim=1) + 1e-6)
for num_attack in num_agents_attack:
self.avg_num_agents_attack[num_attack] += 1
self.avg_ally_distance += a_average_distance.mean().item()
th.set_printoptions(precision=2)
print("focus fire rate: ", self.avg_num_agents_attack/self.test_total)
print("focus fire rate mean: ", self.avg_num_agents_attack[2:].sum()/self.test_total)
print("average distance between agents: ", "%.2f" % (self.avg_ally_distance/self.test_total))
# This multi-agent controller shares parameters between agents
class BasicMACInteractiveRegV2:
def __init__(self, scheme, groups, args):
self.n_agents = args.n_agents
self.args = args
self.input_shape = self._get_input_shape(scheme)
self.input_alone_shape = self._get_input_alone_shape(scheme)
self._build_agents(self.input_shape, self.input_alone_shape)
self.agent_output_type = args.agent_output_type
self.action_selector = action_REGISTRY[args.action_selector](args)
self.hidden_states = None
self.hidden_states_ = None
self.hidden_states_alone = None
self.target_hidden_states = None
self.target_hidden_states_ = None
self.target_hidden_states_alone = None
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False, env=None):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
agent_outputs = self.forward(ep_batch, t_ep, test_mode=test_mode)
chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
# if test_mode:
# self.focus_fire_rate(chosen_actions, ep_batch, t_ep)
return chosen_actions
def forward(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, self.hidden_states, self.hidden_states_alone, self.hidden_states_, _ = self.agent(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone, self.hidden_states_)
_, self.target_hidden_states, self.target_hidden_states_alone, self.target_hidden_states_, agent_outs_interactive_ = self.target_agent(agent_inputs, agent_alone_inputs, self.target_hidden_states, self.target_hidden_states_alone, self.target_hidden_states_)
agent_outs = agent_outs - agent_outs_interactive_
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
assert(False)
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def get_individual_q(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, agent_outs_interactive, agent_outs_interactive_, agent_outs_alone, self.hidden_states, self.hidden_states_alone, self.hidden_states_ = self.agent.get_individual_q(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone, self.hidden_states_)
_, _, target_agent_outs_interactive_, _, self.target_hidden_states, self.target_hidden_states_alone, self.target_hidden_states_ = self.target_agent.get_individual_q(agent_inputs, agent_alone_inputs, self.target_hidden_states, self.target_hidden_states_alone, self.target_hidden_states_)
agent_outs = agent_outs - target_agent_outs_interactive_
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
assert(False)
#TODO: NOT IMPLEMENTED YET
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs_interactive[reshaped_avail_actions == 0] = -1e10
agent_outs_interactive_[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
agent_outs_interactive = th.nn.functional.softmax(agent_outs_interactive, dim=-1)
agent_outs_interactive_ = th.nn.functional.softmax(agent_outs_interactive_, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
agent_outs_interactive = ((1 - self.action_selector.epsilon) * agent_outs_interactive
+ th.ones_like(agent_outs_interactive) * self.action_selector.epsilon/epsilon_action_num)
agent_outs_interactive_ = ((1 - self.action_selector.epsilon) * agent_outs_interactive_
+ th.ones_like(agent_outs_interactive_) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
agent_outs_interactive[reshaped_avail_actions == 0] = 0.0
agent_outs_interactive_[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1), agent_outs_interactive.view(ep_batch.batch_size, self.n_agents, -1), agent_outs_interactive_.view(ep_batch.batch_size, self.n_agents, -1), \
agent_outs_alone.view(ep_batch.batch_size, self.n_agents, -1)
def init_hidden(self, batch_size):
hidden_states, hidden_states_alone, hidden_states_ = self.agent.init_hidden()
self.hidden_states = hidden_states.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
self.hidden_states_alone = hidden_states_alone.unsqueeze(0).expand(batch_size, self.n_agents, -1)
self.hidden_states_ = hidden_states_.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
target_hidden_states, target_hidden_states_alone, target_hidden_states_ = self.target_agent.init_hidden()
self.target_hidden_states = target_hidden_states.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
self.target_hidden_states_alone = target_hidden_states_alone.unsqueeze(0).expand(batch_size, self.n_agents, -1)
self.target_hidden_states_ = target_hidden_states_.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
def parameters(self):
return self.agent.get_parameters()
def load_state(self, other_mac):
self.agent.load_state_dict(other_mac.agent.state_dict())
self.target_agent.load_state_dict(other_mac.target_agent.state_dict())
def cuda(self):
self.agent.cuda()
self.target_agent.cuda()
def save_models(self, path):
th.save(self.agent.state_dict(), "{}/agent.th".format(path))
def load_models(self, path):
self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
self.target_agent.load_state_dict(self.agent.state_dict())
def _build_agents(self, input_shape, input_alone_shape):
self.agent = agent_REGISTRY[self.args.agent](input_shape, input_alone_shape, self.args)
if self.args.pretrained:
model_dict = self.agent.agent_alone.state_dict()
checkpoint = th.load(self.args.single_model_name+"/"+"-".join(self.args.env_args['map_name'])+"/agent.th")
# 1. filter out unnecessary keys
state_dict = {}
for k, v in checkpoint.items():
if 'agent_alone' in k:
state_dict[k[12:]] = v
pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
self.agent.agent_alone.load_state_dict(model_dict)
self.target_agent = copy.deepcopy(self.agent)
def update_targets(self):
self.target_agent.load_state_dict(self.agent.state_dict())
def _build_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs = []
inputs.append(batch["obs"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
return inputs
def _build_alone_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs_alone = []
inputs_alone.append(batch["obs_alone"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs_alone.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs_alone.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs_alone.append(th.eye(1, device=batch.device).expand(self.n_agents, -1).unsqueeze(0).expand(bs, -1, -1))
inputs_alone = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs_alone], dim=1)
return inputs_alone
def _get_input_shape(self, scheme):
input_shape = scheme["obs"]["vshape"]
if self.args.obs_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape
def _get_input_alone_shape(self, scheme):
input_alone_shape = scheme["obs_alone"]["vshape"]
if self.args.obs_last_action:
input_alone_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_alone_shape += 1
return input_alone_shape
def focus_fire_rate(self, chosen_actions, batch, t):
n_actions_no_attack = 6
#Compute focus fire rate
target_id = th.clamp(chosen_actions - n_actions_no_attack, min=-1)
max_id = self.args.n_actions - n_actions_no_attack
num_agents_attack = []
for i in range(max_id):
num_agents_attack.append(th.sum(target_id == i).item())
#Compute average distance
inputs = batch["obs"][:, t]
bs = batch.batch_size
individual_feats_size = (self.input_shape-self.n_agents-self.input_alone_shape+1) // (self.n_agents - 1) - 4
all_feats_size = individual_feats_size + 4
n_enemies = (self.input_shape-individual_feats_size-self.n_agents-self.args.n_actions-all_feats_size*(self.n_agents-1)) // all_feats_size
enemy_ally_feats = inputs[:, :, -individual_feats_size-all_feats_size*(self.n_agents-1+n_enemies):-individual_feats_size]\
.reshape(inputs.shape[0], inputs.shape[1], self.n_agents-1+n_enemies, -1)
#Compute enemy
e_shootable = (enemy_ally_feats[:, :, :n_enemies, 0] > 0).long()
e_visible = (enemy_ally_feats[:, :, :n_enemies, 1] > 0).long()
e_distance = enemy_ally_feats[:, :, :n_enemies, 1]
e_average_distance = th.sum(e_distance, dim=1)/(th.sum(e_visible, dim=1) + 1e-6)
#Compute ally
#Compute enemy
a_visible = (enemy_ally_feats[:, :, :n_enemies, 0] > 0).long()
a_distance = enemy_ally_feats[:, :, :n_enemies, 1] * a_visible
a_average_distance = th.sum(a_distance, dim=1)/(th.sum(a_visible, dim=1) + 1e-6)
if t == 0:
self.avg_num_agents_attack = th.zeros(self.n_agents + 1)
self.avg_ally_distance = 0
for num_attack in num_agents_attack:
self.avg_num_agents_attack[num_attack] += 1
self.avg_ally_distance += a_average_distance.mean().item()
th.set_printoptions(precision=2)
print("focus fire rate: ", self.avg_num_agents_attack/(t+1))
print("average distance between agents: ", "%.2f" % (self.avg_ally_distance/(t+1)))
| CollaQ-main | src_code/controllers/basic_controller_interactive.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# from modules.agents import REGISTRY as agent_REGISTRY
# from components.action_selectors import REGISTRY as action_REGISTRY
# import torch as th
# import copy
# # This multi-agent controller shares parameters between agents
# class BasicMACInfluence:
# def __init__(self, scheme, groups, args):
# self.n_agents = args.n_agents
# self.args = args
# input_shape = self._get_input_shape(scheme)
# input_alone_shape = self._get_input_alone_shape(scheme)
# self._build_agents(input_shape, input_alone_shape)
# self.agent_output_type = args.agent_output_type
# self.action_selector = action_REGISTRY[args.action_selector](args)
# self.hidden_states = None
# self.hidden_states_alone = None
# self.target_hidden_states = None
# def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False, env=None):
# # Only select actions for the selected batch elements in bs
# avail_actions = ep_batch["avail_actions"][:, t_ep]
# agent_outputs_interactive, agent_outputs = self.get_interactive_q(ep_batch, t_ep, test_mode=test_mode, env=env)
# chosen_actions = self.action_selector.select_action(agent_outputs_interactive[bs], agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
# return chosen_actions
# def forward(self, ep_batch, t, test_mode=False):
# agent_inputs = self._build_inputs(ep_batch, t)
# agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
# avail_actions = ep_batch["avail_actions"][:, t]
# agent_outs, self.hidden_states, self.hidden_states_alone = self.agent(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone)
# # Softmax the agent outputs if they're policy logits
# if self.agent_output_type == "pi_logits":
# if getattr(self.args, "mask_before_softmax", True):
# # Make the logits for unavailable actions very negative to minimise their affect on the softmax
# reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
# agent_outs[reshaped_avail_actions == 0] = -1e10
# agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
# if not test_mode:
# # Epsilon floor
# epsilon_action_num = agent_outs.size(-1)
# if getattr(self.args, "mask_before_softmax", True):
# # With probability epsilon, we will pick an available action uniformly
# epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
# agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
# + th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
# if getattr(self.args, "mask_before_softmax", True):
# # Zero out the unavailable actions
# agent_outs[reshaped_avail_actions == 0] = 0.0
# return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
# def get_interactive_q(self, ep_batch, t, test_mode=False, env=None):
# agent_inputs = self._build_inputs(ep_batch, t)
# agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
# avail_actions = ep_batch["avail_actions"][:, t]
# agent_outs, self.hidden_states, self.hidden_states_alone = self.agent.forward(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone)
# #TODO: need support from sc2
# # _, self.target_hidden_states = self.target_agent.get_interactive_q(agent_inputs, self.target_hidden_states)
# # agent_inputs_simulated, num_actions = self._build_simulated_states_inputs(ep_batch, t, env)
# # agent_outs_interactive, _ = self.target_agent.get_interactive_q(agent_inputs_simulated, self.target_hidden_states.repeat_interleave((self.n_agents-1)*num_actions, 0))
# # agent_outs_interactive = th.sum(agent_outs_interactive.reshape(self.n_agents, num_actions, -1), dim=-1)
# agent_outs_interactive, self.target_hidden_states = self.target_agent.get_interactive_q(agent_inputs, self.target_hidden_states)
# # Softmax the agent outputs if they're policy logits
# if self.agent_output_type == "pi_logits":
# if getattr(self.args, "mask_before_softmax", True):
# # Make the logits for unavailable actions very negative to minimise their affect on the softmax
# reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
# agent_outs_interactive[reshaped_avail_actions == 0] = -1e10
# agent_outs[reshaped_avail_actions == 0] = -1e10
# agent_outs_interactive = th.nn.functional.softmax(agent_outs_interactive, dim=-1)
# agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
# if not test_mode:
# # Epsilon floor
# epsilon_action_num = agent_outs.size(-1)
# if getattr(self.args, "mask_before_softmax", True):
# # With probability epsilon, we will pick an available action uniformly
# epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
# agent_outs_interactive = ((1 - self.action_selector.epsilon) * agent_outs_interactive
# + th.ones_like(agent_outs_interactive) * self.action_selector.epsilon/epsilon_action_num)
# agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
# + th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
# if getattr(self.args, "mask_before_softmax", True):
# # Zero out the unavailable actions
# agent_outs_interactive[reshaped_avail_actions == 0] = 0.0
# agent_outs[reshaped_avail_actions == 0] = 0.0
# return agent_outs_interactive.view(ep_batch.batch_size, self.n_agents, -1), agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
# def init_hidden(self, batch_size):
# hidden_states, hidden_states_alone = self.agent.init_hidden()
# self.hidden_states = hidden_states.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
# self.hidden_states_alone = hidden_states_alone.unsqueeze(0).expand(batch_size, self.n_agents, -1)
# target_hidden_states, _ = self.target_agent.init_hidden()
# self.target_hidden_states = target_hidden_states.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
# def parameters(self):
# return self.agent.get_parameters()
# def load_state(self, other_mac):
# self.agent.load_state_dict(other_mac.agent.state_dict())
# def cuda(self):
# self.agent.cuda()
# self.target_agent.cuda()
# def save_models(self, path):
# th.save(self.agent.state_dict(), "{}/agent.th".format(path))
# def load_models(self, path):
# self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
# def _build_agents(self, input_shape, input_alone_shape):
# self.agent = agent_REGISTRY[self.args.agent](input_shape, input_alone_shape, self.args)
# # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
# self.target_agent = copy.deepcopy(self.agent)
# def _update_targets(self):
# self.target_agent.load_state_dict(self.agent.state_dict())
# def _build_inputs(self, batch, t):
# # Assumes homogenous agents with flat observations.
# # Other MACs might want to e.g. delegate building inputs to each agent
# bs = batch.batch_size
# inputs = []
# inputs.append(batch["obs"][:, t]) # b1av
# if self.args.obs_last_action:
# if t == 0:
# inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
# else:
# inputs.append(batch["actions_onehot"][:, t-1])
# if self.args.obs_agent_id:
# inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
# inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
# return inputs
# def _build_simulated_states_inputs(self, batch, t, env=None):
# # Assumes homogenous agents with flat observations.
# # Other MACs might want to e.g. delegate building inputs to each agent
# assert(batch.batch_size == 1)
# inputs, num_actions = env.simulate_next_state(self.args.obs_last_action, self.args.obs_agent_id)
# inputs = th.tensor(inputs, device=batch.device) # b1av
# inputs = inputs.reshape(-1, inputs.shape[-1]).float()
# return inputs, num_actions
# def _build_alone_inputs(self, batch, t):
# # Assumes homogenous agents with flat observations.
# # Other MACs might want to e.g. delegate building inputs to each agent
# bs = batch.batch_size
# inputs_alone = []
# inputs_alone.append(batch["obs_alone"][:, t]) # b1av
# if self.args.obs_last_action:
# if t == 0:
# inputs_alone.append(th.zeros_like(batch["actions_onehot"][:, t]))
# else:
# inputs_alone.append(batch["actions_onehot"][:, t-1])
# if self.args.obs_agent_id:
# inputs_alone.append(th.eye(1, device=batch.device).expand(self.n_agents, -1).unsqueeze(0).expand(bs, -1, -1))
# inputs_alone = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs_alone], dim=1)
# return inputs_alone
# def _get_input_shape(self, scheme):
# input_shape = scheme["obs"]["vshape"]
# if self.args.obs_last_action:
# input_shape += scheme["actions_onehot"]["vshape"][0]
# if self.args.obs_agent_id:
# input_shape += self.n_agents
# return input_shape
# def _get_input_alone_shape(self, scheme):
# input_alone_shape = scheme["obs_alone"]["vshape"]
# if self.args.obs_last_action:
# input_alone_shape += scheme["actions_onehot"]["vshape"][0]
# if self.args.obs_agent_id:
# input_alone_shape += 1
# return input_alone_shape
from modules.agents import REGISTRY as agent_REGISTRY
from components.action_selectors import REGISTRY as action_REGISTRY
import torch as th
import copy
# This multi-agent controller shares parameters between agents
class BasicMACInfluence:
def __init__(self, scheme, groups, args):
self.n_agents = args.n_agents
self.args = args
input_shape = self._get_input_shape(scheme)
input_alone_shape = self._get_input_alone_shape(scheme)
self._build_agents(input_shape, input_alone_shape)
self.agent_output_type = args.agent_output_type
self.action_selector = action_REGISTRY[args.action_selector](args)
self.hidden_states = None
self.hidden_states_alone = None
self.target_hidden_states_alone = None
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False, env=None):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
agent_outputs_alone, agent_outputs = self.get_alone_q(ep_batch, t_ep, test_mode=test_mode, env=env)
chosen_actions = self.action_selector.select_action(agent_outputs_alone[bs], agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
return chosen_actions
def forward(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, self.hidden_states, self.hidden_states_alone = self.agent(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone)
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def get_individual_q(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, agent_outs_interactive, agent_outs_alone, self.hidden_states, self.hidden_states_alone = self.agent.get_individual_q(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone)
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs_interactive[reshaped_avail_actions == 0] = -1e10
agent_outs_alone[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
agent_outs_interactive = th.nn.functional.softmax(agent_outs_interactive, dim=-1)
agenagent_outs_alonet_outs = th.nn.functional.softmax(agent_outs_alone, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
agent_outs_interactive = ((1 - self.action_selector.epsilon) * agent_outs_interactive
+ th.ones_like(agent_outs_interactive) * self.action_selector.epsilon/epsilon_action_num)
agent_outs_alone = ((1 - self.action_selector.epsilon) * agent_outs_alone
+ th.ones_like(agent_outs_alone) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
agent_outs_interactive[reshaped_avail_actions == 0] = 0.0
agent_outs_alone[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1), agent_outs_interactive.view(ep_batch.batch_size, self.n_agents, -1), agent_outs_alone.view(ep_batch.batch_size, self.n_agents, -1)
def get_alone_q(self, ep_batch, t, test_mode=False, env=None):
agent_inputs = self._build_inputs(ep_batch, t)
agent_alone_inputs = self._build_alone_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, self.hidden_states, self.hidden_states_alone = self.agent.forward(agent_inputs, agent_alone_inputs, self.hidden_states, self.hidden_states_alone)
#TODO: need support from sc2
# _, self.target_hidden_states = self.target_agent.get_interactive_q(agent_inputs, self.target_hidden_states)
# agent_inputs_simulated, num_actions = self._build_simulated_states_inputs(ep_batch, t, env)
# agent_outs_interactive, _ = self.target_agent.get_interactive_q(agent_inputs_simulated, self.target_hidden_states.repeat_interleave((self.n_agents-1)*num_actions, 0))
# agent_outs_interactive = th.sum(agent_outs_interactive.reshape(self.n_agents, num_actions, -1), dim=-1)
agent_outs_alone, self.target_hidden_states_alone = self.target_agent.get_alone_q(agent_alone_inputs, self.target_hidden_states_alone)
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs_alone[reshaped_avail_actions == 0] = -1e10
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs_alone = th.nn.functional.softmax(agent_outs_alone, dim=-1)
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs_alone = ((1 - self.action_selector.epsilon) * agent_outs_alone
+ th.ones_like(agent_outs_alone) * self.action_selector.epsilon/epsilon_action_num)
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs_alone[reshaped_avail_actions == 0] = 0.0
agent_outs[reshaped_avail_actions == 0] = 0.0
return agent_outs_alone.view(ep_batch.batch_size, self.n_agents, -1), agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def init_hidden(self, batch_size):
hidden_states, hidden_states_alone = self.agent.init_hidden()
self.hidden_states = hidden_states.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
self.hidden_states_alone = hidden_states_alone.unsqueeze(0).expand(batch_size, self.n_agents, -1)
_, target_hidden_states_alone = self.target_agent.init_hidden()
self.target_hidden_states_alone = target_hidden_states_alone.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
def parameters(self):
return self.agent.get_parameters()
def load_state(self, other_mac):
self.agent.load_state_dict(other_mac.agent.state_dict())
def cuda(self):
self.agent.cuda()
self.target_agent.cuda()
def save_models(self, path):
th.save(self.agent.state_dict(), "{}/agent.th".format(path))
def load_models(self, path):
self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
def _build_agents(self, input_shape, input_alone_shape):
self.agent = agent_REGISTRY[self.args.agent](input_shape, input_alone_shape, self.args)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_agent = copy.deepcopy(self.agent)
def _update_targets(self):
self.target_agent.load_state_dict(self.agent.state_dict())
def _build_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs = []
inputs.append(batch["obs"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
return inputs
def _build_simulated_states_inputs(self, batch, t, env=None):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
assert(batch.batch_size == 1)
inputs, num_actions = env.simulate_next_state(self.args.obs_last_action, self.args.obs_agent_id)
inputs = th.tensor(inputs, device=batch.device) # b1av
inputs = inputs.reshape(-1, inputs.shape[-1]).float()
return inputs, num_actions
def _build_alone_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs_alone = []
inputs_alone.append(batch["obs_alone"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs_alone.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs_alone.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs_alone.append(th.eye(1, device=batch.device).expand(self.n_agents, -1).unsqueeze(0).expand(bs, -1, -1))
inputs_alone = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs_alone], dim=1)
return inputs_alone
def _get_input_shape(self, scheme):
input_shape = scheme["obs"]["vshape"]
if self.args.obs_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape
def _get_input_alone_shape(self, scheme):
input_alone_shape = scheme["obs_alone"]["vshape"]
if self.args.obs_last_action:
input_alone_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_alone_shape += 1
return input_alone_shape
| CollaQ-main | src_code/controllers/basic_controller_influence.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import find_packages, setup
setup(
name='animated_drawings',
description="Companion code for `A Method For Automatically Animating Children's Drawings of the Human Form.`",
author='FAIR',
author_email='[email protected]',
python_requires='>=3.8.13',
install_requires=[
'numpy== 1.23.3',
'scipy==1.10.0',
'scikit-image==0.19.3',
'scikit-learn==1.1.2',
'shapely==1.8.5.post1',
'opencv-python==4.6.0.66',
'Pillow==9.3.0',
'glfw==2.5.5',
'PyOpenGL==3.1.6',
'PyYAML==6.0',
'requests==2.31.0',
'torchserve==0.7.0',
'tqdm==4.64.1',
'Flask==2.3.2'
],
packages=find_packages(),
)
| AnimatedDrawings-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.model.vectors import Vectors
import numpy as np
def test_initialize_with_tuple_or_list1():
v1 = Vectors((0, 1, 2))
assert np.array_equal(v1.vs, np.array([[0, 1, 2]]))
v2 = Vectors([3, 4, 5])
assert np.array_equal(v2.vs, np.array([[3, 4, 5]]))
def test_initialize2():
try:
Vectors('f') # type: ignore
except AssertionError:
return
assert False
def test_initialize_with_single_dimensional_array():
v1 = Vectors(np.array([0, 1, 2]))
assert np.array_equal(v1.vs, np.array([[0, 1, 2]]))
def test_div():
v1 = Vectors(np.array([0, 1, 2]))
assert np.array_equal((v1/2).vs, np.array([[0.0, 0.5, 1.0]]))
def test_norm():
v1 = Vectors(np.array([10, 10, 10]))
v1.norm()
v2 = Vectors(np.array([10, 10, 10]) / 300**0.5)
assert np.array_equal(v1.vs, v2.vs)
def test_norm_zero():
v1 = Vectors(np.array([0, 0, 0]))
v1.norm()
v2 = Vectors(np.array([0, 0, 0]) / 1**0.5)
assert np.array_equal(v1.vs, v2.vs)
| AnimatedDrawings-main | tests/test_vectors.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.model.bvh import BVH
from pkg_resources import resource_filename
def test_bvh_from_file():
bvh_fn = resource_filename(__name__, 'test_bvh_files/zombie.bvh')
b = BVH.from_file(bvh_fn)
# was the skeleton built correctly?
assert b.root_joint.joint_count() == 34
# did frame_time parse correctly?
assert b.frame_time == 0.0333333
# did frame num parse correctly?
assert b.frame_max_num == 779
# there should be root position data for each frame
assert b.frame_max_num == b.pos_data.shape[0]
# and it should be an xzy coordinate
assert b.pos_data.shape[-1] == 3
# there should be rotation data for each frame
assert b.frame_max_num == b.rot_data.shape[0]
# there should be a rotation for every joint within that frame
assert b.rot_data.shape[1] == b.root_joint.joint_count()
# and the rotation is a quaternion with dimensionality of 4
assert b.rot_data.shape[-1] == 4
| AnimatedDrawings-main | tests/test_bvh.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from animated_drawings.model.arap import ARAP, plot_mesh
def test_single_triangle_mesh():
show_plots = False # Make true if you'd like to see mesh viz during test run
vertices = np.array([
[2.0, 2.0],
[3.0, 3.0],
[4.0, 2.0]
])
triangles = np.array([
[0, 1, 2]
], np.int32)
pins_xy = np.array([[2.0, 2.0], [4.0, 2.0]])
if show_plots:
plot_mesh(vertices, triangles, pins_xy)
arap = ARAP(pins_xy, triangles=triangles, vertices=vertices)
pins_xy = np.array([[-5.0, 0.0], [5.0, 0.0]])
v = arap.solve(pins_xy)
if show_plots:
plot_mesh(v, triangles, pins_xy)
assert np.isclose(v, np.array([
[-5.0, 0.0],
[0.0, 1.0],
[5.0, 0.0]
])).all()
def test_two_triangle_mesh():
show_plots = False # Make true if you'd like to see mesh viz during test run
vertices = np.array([
[1.0, 0.0],
[1.0, 1.0],
[2.0, 1.0],
[2.0, 0.0],
])
triangles = np.array([
[0, 1, 2],
[0, 2, 3],
], np.int32)
pins_xy = np.array([[1.0, 0.0], [2.0, 0.0]])
new = ARAP(pins_xy, triangles=triangles, vertices=vertices)
if show_plots:
plot_mesh(vertices, triangles, pins_xy)
pins_xy = np.array([[1.0, 0.0], [1.7, 0.7]])
v = new.solve(pins_xy)
if show_plots:
plot_mesh(v, triangles, pins_xy)
assert np.isclose(v, np.array([
[9.99999989e-01, -1.13708135e-08],
[2.91471856e-01, 7.05685418e-01],
[9.97157285e-01, 1.41137085e+00],
[1.70000001e+00, 7.00000011e-01]
])).all()
def test_four_triangle_mesh():
show_plots = False # Make true if you'd like to see mesh viz during test run
vertices = np.array([
[0.0, 0.0],
[0.0, 1.0],
[1.0, 1.0],
[1.0, 0.0],
[2.0, 1.0],
[2.0, 0.0],
[0.0, 2.0],
[1.0, 2.0],
[2.0, 2.0],
])
triangles = np.array([
[0, 1, 2],
[0, 2, 3],
[3, 2, 4],
[3, 4, 5],
[1, 6, 7],
[1, 7, 2],
[2, 7, 8],
[2, 8, 4]
], np.int32)
pins_xy = np.array([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0]])
if show_plots:
plot_mesh(vertices, triangles, pins_xy)
new = ARAP(pins_xy, triangles=triangles, vertices=vertices)
new_pins_xy = np.array([[0.0, 0.0], [0.0, 3.0], [6.0, 0.0]])
v = new.solve(new_pins_xy)
if show_plots:
plot_mesh(v, triangles, new_pins_xy)
assert np.isclose(v, np.array([
[3.19325865e-06, 1.08194488e-06],
[6.78428061e-01, 1.37166545e+00],
[2.14606263e+00, 1.19790398e+00],
[2.81917351e+00, 1.12790606e-02],
[3.95163838e+00, 1.34725960e+00],
[5.99999596e+00, 5.51801260e-07],
[8.44193478e-07, 2.99999837e+00],
[1.46633111e+00, 2.60720416e+00],
[2.82413859e+00, 2.62209072e+00]
])).all()
| AnimatedDrawings-main | tests/test_arap.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.model.quaternions import Quaternions
from animated_drawings.model.vectors import Vectors
import numpy as np
def test_not_four_elements():
try:
Quaternions(np.array([0, 1, 2])) # should fail
except AssertionError:
return
assert False
def test_initialize_with_ndarray():
q = Quaternions(np.array([1, 0, 0, 0])) # should succeed
assert np.array_equal(q.qs, np.array([[1, 0, 0, 0]]))
def test_from_angle_axis():
angle = np.array([1.0])
axis = Vectors(np.array([1.0, 1.0, 1.0]))
q6 = Quaternions.from_angle_axis(angle, axis)
assert np.allclose(q6.qs, np.array(
[[0.87758256, 0.27679646, 0.27679646, 0.27679646]]))
def test_multiple_from_angle_axis():
angles = np.array([[1.0], [1.0]])
axis = Vectors(
np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=np.float32))
q1 = Quaternions.from_angle_axis(angles, axis)
assert np.allclose(q1.qs, np.array([
[0.87758256, 0.27679646, 0.27679646, 0.27679646],
[0.87758256, 0.27679646, 0.27679646, 0.27679646]]))
def test_to_rotation_matrix():
angles = np.array([[np.pi / 2]])
axis = Vectors(np.array([1.0, 0.0, 0.0], dtype=np.float32))
q1 = Quaternions.from_angle_axis(angles, axis)
assert np.allclose(q1.to_rotation_matrix(), np.array([
[1.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00],
[0.000000e+00, 0.000000e+00, -1.000000e+00, 0.000000e+00],
[0.000000e+00, 1.000000e+00, 0.000000e+00, 0.000000e+00],
[0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]]))
def test_from_rotation_matrix():
angles = np.array([[np.pi / 2]])
axis = np.array([1.0, 1.0, 0.0], dtype=np.float32)
q1 = Quaternions.from_angle_axis(angles, Vectors(axis))
q2 = Quaternions.from_rotation_matrix(q1.to_rotation_matrix())
assert np.allclose(q1.qs, q2.qs)
def test_to_euler_angles():
# TODO add test coverage for from_euler_angles
pass
def test_multiply():
# TODO add test coverage for quaternion multiplication
pass
| AnimatedDrawings-main | tests/test_quaternions.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.model.animated_drawing import AnimatedDrawing
from animated_drawings.config import Config
from pkg_resources import resource_filename
def test_init():
import OpenGL.GL as GL
import glfw
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
win = glfw.create_window(100, 100, 'Viewer', None, None)
glfw.make_context_current(win)
mvc_cfg_fn = resource_filename(__name__, 'test_animated_drawing_files/test_mvc.yaml')
mvc_config = Config(mvc_cfg_fn)
char_cfg, retarget_cfg, motion_cfg = mvc_config.scene.animated_characters[0]
AnimatedDrawing(char_cfg, retarget_cfg, motion_cfg)
assert True
| AnimatedDrawings-main | tests/test_animated_drawing.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings import render
from pkg_resources import resource_filename
import os
import pytest
@pytest.mark.skipif(os.environ.get('IS_CI_RUNNER') == 'True', reason='skipping video rendering for CI/CD')
def test_render_gif():
render_gif_cfg_fn = resource_filename(__name__, 'test_render_files/mvc_render_gif.yaml')
render.start(render_gif_cfg_fn)
assert os.path.exists('.tests/test_render_files/video.gif')
assert os.path.getsize('.tests/test_render_files/video.gif') > 100
os.remove('.tests/test_render_files/video.gif')
@pytest.mark.skipif(os.environ.get('IS_CI_RUNNER') == 'True', reason='skipping video rendering for CI/CD')
def test_render_mp4():
render_mp4_cfg_fn = resource_filename(__name__, 'test_render_files/mvc_render_mp4.yaml')
render.start(render_mp4_cfg_fn)
assert os.path.exists('.tests/test_render_files/video.mp4')
assert os.path.getsize('.tests/test_render_files/video.mp4') > 100
os.remove('.tests/test_render_files/video.mp4')
| AnimatedDrawings-main | tests/test_render.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.model.transform import Transform
from animated_drawings.model.quaternions import Quaternions
import numpy as np
def test_init():
t = Transform()
for m in [t._rotate_m, t._translate_m, t._scale_m, t._local_transform, t._world_transform]:
assert np.array_equal(m, np.identity(4))
def test_set_position():
t = Transform()
t.set_position(np.array([1.0, 1.0, 1.0]))
t.set_position(np.array([2.0, 2.0, 2.0]))
t.update_transforms()
assert np.array_equal(
t._local_transform[:-1, -1], np.array([2.0, 2.0, 2.0]))
def test_offset():
t = Transform()
t.offset(np.array([1.0, 1.0, 1.0]))
t.offset(np.array([2.0, 2.0, 2.0]))
t.update_transforms()
assert np.array_equal(
t._local_transform[:-1, -1], np.array([3.0, 3.0, 3.0]))
def test_update_transforms():
t1 = Transform()
t2 = Transform()
t1.add_child(t2)
t1.set_position(np.array([3.0, 0.0, 0.0]))
t1.update_transforms()
assert np.array_equal(
t2._world_transform[:-1, -1], np.array([3.0, 0.0, 0.0]))
def test_rotate():
t = Transform()
q = Quaternions.from_euler_angles('y', np.array([-90]))
t.set_rotation(q)
t.update_transforms()
m = np.identity(4)
m[0, 0] = 0.0
m[2, 0] = 1.0
m[2, 2] = 0.0
m[0, 2] = -1.0
assert np.isclose(t._local_transform, m).all()
def test_look_at():
t = Transform()
fwd = np.array([0, 0, -1])
t.look_at(fwd)
t.update_transforms()
m = np.identity(4)
m[0, 0] = -1.0
m[2, 2] = -1.0
assert np.isclose(t._local_transform, m).all()
| AnimatedDrawings-main | tests/test_transforms.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from image_to_annotations import image_to_annotations
from annotations_to_animation import annotations_to_animation
from pathlib import Path
import logging
import sys
from pkg_resources import resource_filename
def image_to_animation(img_fn: str, char_anno_dir: str, motion_cfg_fn: str, retarget_cfg_fn: str):
"""
Given the image located at img_fn, create annotation files needed for animation.
Then create animation from those animations and motion cfg and retarget cfg.
"""
# create the annotations
image_to_annotations(img_fn, char_anno_dir)
# create the animation
annotations_to_animation(char_anno_dir, motion_cfg_fn, retarget_cfg_fn)
if __name__ == '__main__':
log_dir = Path('./logs')
log_dir.mkdir(exist_ok=True, parents=True)
logging.basicConfig(filename=f'{log_dir}/log.txt', level=logging.DEBUG)
img_fn = sys.argv[1]
char_anno_dir = sys.argv[2]
if len(sys.argv) > 3:
motion_cfg_fn = sys.argv[3]
else:
motion_cfg_fn = resource_filename(__name__, 'config/motion/dab.yaml')
if len(sys.argv) > 4:
retarget_cfg_fn = sys.argv[4]
else:
retarget_cfg_fn = resource_filename(__name__, 'config/retarget/fair1_ppf.yaml')
image_to_animation(img_fn, char_anno_dir, motion_cfg_fn, retarget_cfg_fn)
| AnimatedDrawings-main | examples/image_to_animation.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import animated_drawings.render
import logging
from pathlib import Path
import sys
import yaml
from pkg_resources import resource_filename
def annotations_to_animation(char_anno_dir: str, motion_cfg_fn: str, retarget_cfg_fn: str):
"""
Given a path to a directory with character annotations, a motion configuration file, and a retarget configuration file,
creates an animation and saves it to {annotation_dir}/video.png
"""
# package character_cfg_fn, motion_cfg_fn, and retarget_cfg_fn
animated_drawing_dict = {
'character_cfg': str(Path(char_anno_dir, 'char_cfg.yaml').resolve()),
'motion_cfg': str(Path(motion_cfg_fn).resolve()),
'retarget_cfg': str(Path(retarget_cfg_fn).resolve())
}
# create mvc config
mvc_cfg = {
'scene': {'ANIMATED_CHARACTERS': [animated_drawing_dict]}, # add the character to the scene
'controller': {
'MODE': 'video_render', # 'video_render' or 'interactive'
'OUTPUT_VIDEO_PATH': str(Path(char_anno_dir, 'video.gif').resolve())} # set the output location
}
# write the new mvc config file out
output_mvc_cfn_fn = str(Path(char_anno_dir, 'mvc_cfg.yaml'))
with open(output_mvc_cfn_fn, 'w') as f:
yaml.dump(dict(mvc_cfg), f)
# render the video
animated_drawings.render.start(output_mvc_cfn_fn)
if __name__ == '__main__':
log_dir = Path('./logs')
log_dir.mkdir(exist_ok=True, parents=True)
logging.basicConfig(filename=f'{log_dir}/log.txt', level=logging.DEBUG)
char_anno_dir = sys.argv[1]
if len(sys.argv) > 2:
motion_cfg_fn = sys.argv[2]
else:
motion_cfg_fn = resource_filename(__name__, 'config/motion/dab.yaml')
if len(sys.argv) > 3:
retarget_cfg_fn = sys.argv[3]
else:
retarget_cfg_fn = resource_filename(__name__, 'config/retarget/fair1_ppf.yaml')
annotations_to_animation(char_anno_dir, motion_cfg_fn, retarget_cfg_fn)
| AnimatedDrawings-main | examples/annotations_to_animation.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import base64
from flask import Flask, render_template, request
import json
import os
import sys
import yaml
global cfg_path
global char_folder
app = Flask(__name__, template_folder=os.path.abspath("./fixer_app/"))
def load_cfg(path):
with open(path, "r") as f:
cfg_text = f.read()
cfg_yaml = yaml.load(cfg_text, Loader=yaml.Loader)
return cfg_yaml
def write_cfg(path, cfg):
with open(path, "w") as f:
yaml.dump(cfg, f)
@app.route("/")
def index():
global cfg_path
global char_folder
cfg = load_cfg(cfg_path)
base64_img = {"data": ""}
with open(os.path.join(char_folder, "texture.png"), "rb") as image_file:
base64_img['data'] = str(base64.b64encode(image_file.read()), "utf-8")
return render_template('dist/index.html', cfg=cfg, image=base64_img)
@app.route("/annotations/submit", methods=["POST"])
def post_cfg():
output, message = process(request)
if output:
print(output)
return render_template('submit.html', code=output, message=message)
def process(request):
try:
formdata = request.form.get('data')
except Exception as e:
return None, f"Error parsing data from request. No JSON data was found: {e}"
try:
jsondata = json.loads(formdata)
except Exception as e:
return None, f"Error parsing submission data into JSON. Invalid format?: {e}"
# convert joint locations from floats to ints
for joint in jsondata['skeleton']:
joint['loc'][0] = round(joint['loc'][0])
joint['loc'][1] = round(joint['loc'][1])
try:
new_cfg = yaml.dump(jsondata)
except Exception as e:
return None, f"Error converting submission to YAML data. Invalid format?: {e}"
try:
write_cfg(os.path.join(cfg_path), jsondata)
except Exception as e:
return None, f"Error saving down file to `{cfg_path}: {e}`"
return new_cfg, f"Successfully saved config to `{cfg_path}`"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('char_folder', type=str, help="the location of the character bundle")
parser.add_argument('--port', type=int, default=5050, help="the port the tool launches on")
args = parser.parse_args()
char_folder = args.char_folder
cfg_path = os.path.join(char_folder, "char_cfg.yaml")
if not os.path.isfile(cfg_path):
print(f"[Error] File not found. Expected config file at: {cfg_path}")
sys.exit(1)
app.run(port=args.port, debug=False)
| AnimatedDrawings-main | examples/fix_annotations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import requests
import cv2
import json
import numpy as np
from skimage import measure
from scipy import ndimage
from pathlib import Path
import yaml
import logging
def image_to_annotations(img_fn: str, out_dir: str) -> None:
"""
Given the RGB image located at img_fn, runs detection, segmentation, and pose estimation for drawn character within it.
Crops the image and saves texture, mask, and character config files necessary for animation. Writes to out_dir.
Params:
img_fn: path to RGB image
out_dir: directory where outputs will be saved
"""
# create output directory
outdir = Path(out_dir)
outdir.mkdir(exist_ok=True)
# read image
img = cv2.imread(img_fn)
# copy the original image into the output_dir
cv2.imwrite(str(outdir/'image.png'), img)
# ensure it's rgb
if len(img.shape) != 3:
msg = f'image must have 3 channels (rgb). Found {len(img.shape)}'
logging.critical(msg)
assert False, msg
# resize if needed
if np.max(img.shape) > 1000:
scale = 1000 / np.max(img.shape)
img = cv2.resize(img, (round(scale * img.shape[1]), round(scale * img.shape[0])))
# convert to bytes and send to torchserve
img_b = cv2.imencode('.png', img)[1].tobytes()
request_data = {'data': img_b}
resp = requests.post("http://localhost:8080/predictions/drawn_humanoid_detector", files=request_data, verify=False)
if resp is None or resp.status_code >= 300:
raise Exception(f"Failed to get bounding box, please check if the 'docker_torchserve' is running and healthy, resp: {resp}")
detection_results = json.loads(resp.content)
# error check detection_results
if isinstance(detection_results, dict) and 'code' in detection_results.keys() and detection_results['code'] == 404:
assert False, f'Error performing detection. Check that drawn_humanoid_detector.mar was properly downloaded. Response: {detection_results}'
# order results by score, descending
detection_results.sort(key=lambda x: x['score'], reverse=True)
# if no drawn humanoids detected, abort
if len(detection_results) == 0:
msg = 'Could not detect any drawn humanoids in the image. Aborting'
logging.critical(msg)
assert False, msg
# otherwise, report # detected and score of highest.
msg = f'Detected {len(detection_results)} humanoids in image. Using detection with highest score {detection_results[0]["score"]}.'
logging.info(msg)
# calculate the coordinates of the character bounding box
bbox = np.array(detection_results[0]['bbox'])
l, t, r, b = [round(x) for x in bbox]
# dump the bounding box results to file
with open(str(outdir/'bounding_box.yaml'), 'w') as f:
yaml.dump({
'left': l,
'top': t,
'right': r,
'bottom': b
}, f)
# crop the image
cropped = img[t:b, l:r]
# get segmentation mask
mask = segment(cropped)
# send cropped image to pose estimator
data_file = {'data': cv2.imencode('.png', cropped)[1].tobytes()}
resp = requests.post("http://localhost:8080/predictions/drawn_humanoid_pose_estimator", files=data_file, verify=False)
if resp is None or resp.status_code >= 300:
raise Exception(f"Failed to get skeletons, please check if the 'docker_torchserve' is running and healthy, resp: {resp}")
pose_results = json.loads(resp.content)
# error check pose_results
if isinstance(pose_results, dict) and 'code' in pose_results.keys() and pose_results['code'] == 404:
assert False, f'Error performing pose estimation. Check that drawn_humanoid_pose_estimator.mar was properly downloaded. Response: {pose_results}'
# if more than one skeleton detected, abort
if len(pose_results) == 0:
msg = 'Could not detect any skeletons within the character bounding box. Expected exactly 1. Aborting.'
logging.critical(msg)
assert False, msg
# if more than one skeleton detected,
if 1 < len(pose_results):
msg = f'Detected {len(pose_results)} skeletons with the character bounding box. Expected exactly 1. Aborting.'
logging.critical(msg)
assert False, msg
# get x y coordinates of detection joint keypoints
kpts = np.array(pose_results[0]['keypoints'])[:, :2]
# use them to build character skeleton rig
skeleton = []
skeleton.append({'loc' : [round(x) for x in (kpts[11]+kpts[12])/2], 'name': 'root' , 'parent': None})
skeleton.append({'loc' : [round(x) for x in (kpts[11]+kpts[12])/2], 'name': 'hip' , 'parent': 'root'})
skeleton.append({'loc' : [round(x) for x in (kpts[5]+kpts[6])/2 ], 'name': 'torso' , 'parent': 'hip'})
skeleton.append({'loc' : [round(x) for x in kpts[0] ], 'name': 'neck' , 'parent': 'torso'})
skeleton.append({'loc' : [round(x) for x in kpts[6] ], 'name': 'right_shoulder', 'parent': 'torso'})
skeleton.append({'loc' : [round(x) for x in kpts[8] ], 'name': 'right_elbow' , 'parent': 'right_shoulder'})
skeleton.append({'loc' : [round(x) for x in kpts[10] ], 'name': 'right_hand' , 'parent': 'right_elbow'})
skeleton.append({'loc' : [round(x) for x in kpts[5] ], 'name': 'left_shoulder' , 'parent': 'torso'})
skeleton.append({'loc' : [round(x) for x in kpts[7] ], 'name': 'left_elbow' , 'parent': 'left_shoulder'})
skeleton.append({'loc' : [round(x) for x in kpts[9] ], 'name': 'left_hand' , 'parent': 'left_elbow'})
skeleton.append({'loc' : [round(x) for x in kpts[12] ], 'name': 'right_hip' , 'parent': 'root'})
skeleton.append({'loc' : [round(x) for x in kpts[14] ], 'name': 'right_knee' , 'parent': 'right_hip'})
skeleton.append({'loc' : [round(x) for x in kpts[16] ], 'name': 'right_foot' , 'parent': 'right_knee'})
skeleton.append({'loc' : [round(x) for x in kpts[11] ], 'name': 'left_hip' , 'parent': 'root'})
skeleton.append({'loc' : [round(x) for x in kpts[13] ], 'name': 'left_knee' , 'parent': 'left_hip'})
skeleton.append({'loc' : [round(x) for x in kpts[15] ], 'name': 'left_foot' , 'parent': 'left_knee'})
# create the character config dictionary
char_cfg = {'skeleton': skeleton, 'height': cropped.shape[0], 'width': cropped.shape[1]}
# convert texture to RGBA and save
cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2BGRA)
cv2.imwrite(str(outdir/'texture.png'), cropped)
# save mask
cv2.imwrite(str(outdir/'mask.png'), mask)
# dump character config to yaml
with open(str(outdir/'char_cfg.yaml'), 'w') as f:
yaml.dump(char_cfg, f)
# create joint viz overlay for inspection purposes
joint_overlay = cropped.copy()
for joint in skeleton:
x, y = joint['loc']
name = joint['name']
cv2.circle(joint_overlay, (int(x), int(y)), 5, (0, 0, 0), 5)
cv2.putText(joint_overlay, name, (int(x), int(y+15)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, 2)
cv2.imwrite(str(outdir/'joint_overlay.png'), joint_overlay)
def segment(img: np.ndarray):
""" threshold """
img = np.min(img, axis=2)
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 8)
img = cv2.bitwise_not(img)
""" morphops """
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=2)
img = cv2.morphologyEx(img, cv2.MORPH_DILATE, kernel, iterations=2)
""" floodfill """
mask = np.zeros([img.shape[0]+2, img.shape[1]+2], np.uint8)
mask[1:-1, 1:-1] = img.copy()
# im_floodfill is results of floodfill. Starts off all white
im_floodfill = np.full(img.shape, 255, np.uint8)
# choose 10 points along each image side. use as seed for floodfill.
h, w = img.shape[:2]
for x in range(0, w-1, 10):
cv2.floodFill(im_floodfill, mask, (x, 0), 0)
cv2.floodFill(im_floodfill, mask, (x, h-1), 0)
for y in range(0, h-1, 10):
cv2.floodFill(im_floodfill, mask, (0, y), 0)
cv2.floodFill(im_floodfill, mask, (w-1, y), 0)
# make sure edges aren't character. necessary for contour finding
im_floodfill[0, :] = 0
im_floodfill[-1, :] = 0
im_floodfill[:, 0] = 0
im_floodfill[:, -1] = 0
""" retain largest contour """
mask2 = cv2.bitwise_not(im_floodfill)
mask = None
biggest = 0
contours = measure.find_contours(mask2, 0.0)
for c in contours:
x = np.zeros(mask2.T.shape, np.uint8)
cv2.fillPoly(x, [np.int32(c)], 1)
size = len(np.where(x == 1)[0])
if size > biggest:
mask = x
biggest = size
if mask is None:
msg = 'Found no contours within image'
logging.critical(msg)
assert False, msg
mask = ndimage.binary_fill_holes(mask).astype(int)
mask = 255 * mask.astype(np.uint8)
return mask.T
if __name__ == '__main__':
log_dir = Path('./logs')
log_dir.mkdir(exist_ok=True, parents=True)
logging.basicConfig(filename=f'{log_dir}/log.txt', level=logging.DEBUG)
img_fn = sys.argv[1]
out_dir = sys.argv[2]
image_to_annotations(img_fn, out_dir)
| AnimatedDrawings-main | examples/image_to_annotations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
from collections import defaultdict
from pathlib import Path
from typing import Union, List, Tuple, Dict, TypedDict, Optional
import yaml
from pkg_resources import resource_filename
from animated_drawings.utils import resolve_ad_filepath
class Config():
def __init__(self, user_mvc_cfg_fn: str) -> None:
# get the base mvc config
with open(resource_filename(__name__, "mvc_base_cfg.yaml"), 'r') as f:
base_cfg = defaultdict(dict, yaml.load(f, Loader=yaml.FullLoader) or {}) # pyright: ignore[reportUnknownMemberType])
# search for the user-specified mvc config
user_mvc_cfg_p: Path = resolve_ad_filepath(user_mvc_cfg_fn, 'user mvc config')
logging.info(f'Using user-specified mvc config file located at {user_mvc_cfg_p.resolve()}')
with open(str(user_mvc_cfg_p), 'r') as f:
user_cfg = defaultdict(dict, yaml.load(f, Loader=yaml.FullLoader) or {}) # pyright: ignore[reportUnknownMemberType]
# overlay user specified mvc options onto base mvc, use to generate subconfig classes
self.view: ViewConfig = ViewConfig({**base_cfg['view'], **user_cfg['view']})
self.scene: SceneConfig = SceneConfig({**base_cfg['scene'], **user_cfg['scene']})
self.controller: ControllerConfig = ControllerConfig({**base_cfg['controller'], **user_cfg['controller']})
# cannot use an interactive controller with a headless mesa viewer
if self.controller.mode == 'interact':
try:
assert self.view.use_mesa is False, 'cannot use interactive controller when USE_MESA is True'
except AssertionError as e:
msg = f'Config error: {e}'
logging.critical(msg)
assert False, msg
# output video path must be set for render controller
if self.controller.mode == 'video_render':
try:
assert self.controller.output_video_path is not None, 'output_video_path must be set when using video_render controller'
except AssertionError as e:
msg = f'Config error: {e}'
logging.critical(msg)
assert False, msg
# output video codec must be set for render controller with .mp4 output filetype
if self.controller.mode == 'video_render' and self.controller.output_video_path is not None and self.controller.output_video_path.endswith('.mp4'):
try:
assert self.controller.output_video_codec is not None, 'output_video_codec must be set when using video_render controller'
except AssertionError as e:
msg = f'Config error: {e}'
logging.critical(msg)
assert False, msg
class SceneConfig():
def __init__(self, scene_cfg: dict) -> None:
# show or hide the floor
try:
self.add_floor: bool = scene_cfg['ADD_FLOOR']
assert isinstance(self.add_floor, bool), 'is not bool'
except (AssertionError, ValueError) as e:
msg = f'Error in ADD_FLOOR config parameter: {e}'
logging.critical(msg)
assert False, msg
# show or hide visualization of BVH motion driving characters
try:
self.add_ad_retarget_bvh: bool = scene_cfg['ADD_AD_RETARGET_BVH']
assert isinstance(self.add_ad_retarget_bvh, bool), 'is not bool'
except (AssertionError, ValueError) as e:
msg = f'Error in ADD_AD_RETARGET_BVH config parameter: {e}'
logging.critical(msg)
assert False, msg
# config files for characters, driving motions, and retargeting
self.animated_characters: List[Tuple[CharacterConfig, RetargetConfig, MotionConfig]] = []
each: Dict[str, str]
for each in scene_cfg['ANIMATED_CHARACTERS']:
char_cfg_fn: str = each['character_cfg']
motion_cfg_fn: str = each['motion_cfg']
retarget_cfg_fn: str = each['retarget_cfg']
self.animated_characters.append((
CharacterConfig(char_cfg_fn),
RetargetConfig(retarget_cfg_fn),
MotionConfig(motion_cfg_fn)
))
class ViewConfig():
def __init__(self, view_cfg: dict) -> None: # noqa: C901
# set color used to clear render buffer
try:
self.clear_color: list[Union[float, int]] = view_cfg["CLEAR_COLOR"]
assert len(self.clear_color) == 4, 'length not four'
for val in self.clear_color:
assert isinstance(val, (float, int)), f'{val} not float or int'
assert val <= 1.0, 'values must be <= 1.0'
assert val >= 0.0, 'values must be >= 0.0'
except (AssertionError, ValueError) as e:
msg = f'Error in CLEAR_COLOR config parameter: {e}'
logging.critical(msg)
assert False, msg
# set an image to use for the background, if desired
try:
self.background_image: Union[None, str] = view_cfg["BACKGROUND_IMAGE"]
assert isinstance(self.background_image, (NoneType, str)), 'type not NoneType or str'
except (AssertionError, ValueError) as e:
msg = f'Error in BACKGROUND_IMAGE config parameter: {e}'
logging.critical(msg)
assert False, msg
# set the dimensions of the window or output video
try:
self.window_dimensions: tuple[int, int] = view_cfg["WINDOW_DIMENSIONS"]
assert len(self.window_dimensions) == 2, 'length is not 2'
for val in self.window_dimensions:
assert val > 0, f'{val} must be > 0'
assert isinstance(val, int), 'type not int'
except (AssertionError, ValueError) as e:
msg = f'Error in WINDOW_DIMENSIONS config parameter: {e}'
logging.critical(msg)
assert False, msg
# set whether we want the character rigs to be visible
try:
self.draw_ad_rig: bool = view_cfg['DRAW_AD_RIG']
assert isinstance(self.draw_ad_rig, bool), 'value is not bool type'
except (AssertionError, ValueError) as e:
msg = f'Error in DRAW_AD_RIG config parameter: {e}'
logging.critical(msg)
assert False, msg
# set whether we want the character textures to be visible
try:
self.draw_ad_txtr: bool = view_cfg['DRAW_AD_TXTR']
assert isinstance(self.draw_ad_txtr, bool), 'value is not bool type'
except (AssertionError, ValueError) as e:
msg = f'Error in DRAW_AD_TXTR config parameter: {e}'
logging.critical(msg)
assert False, msg
# set whether we want the character triangle->bone assignment colors to be visible
try:
self.draw_ad_color: bool = view_cfg['DRAW_AD_COLOR']
assert isinstance(self.draw_ad_color, bool), 'value is not bool type'
except (AssertionError, ValueError) as e:
msg = f'Error in DRAW_AD_COLOR config parameter: {e}'
logging.critical(msg)
assert False, msg
# set whether we want the character mesh lines to be visible
try:
self.draw_ad_mesh_lines: bool = view_cfg['DRAW_AD_MESH_LINES']
assert isinstance(self.draw_ad_mesh_lines, bool), 'value is not bool type'
except (AssertionError, ValueError) as e:
msg = f'Error in DRAW_AD_MESH_LINES config parameter: {e}'
logging.critical(msg)
assert False, msg
# set whether we want to use mesa on the back end (necessary for headless rendering)
try:
self.use_mesa: bool = view_cfg['USE_MESA']
assert isinstance(self.use_mesa, bool), 'value is not bool type'
except (AssertionError, ValueError) as e:
msg = f'Error in USE_MESA config parameter: {e}'
logging.critical(msg)
assert False, msg
# set the position of the view camera
try:
self.camera_pos: list[Union[float, int]] = view_cfg['CAMERA_POS']
assert len(self.camera_pos) == 3, 'length != 3'
for val in self.camera_pos:
assert isinstance(val, (float, int)), f' {val} is not float or int'
except (AssertionError, ValueError) as e:
msg = f'Error in CAMERA_POS config parameter: {e}'
logging.critical(msg)
assert False, msg
# set the forward vector of the view camera (but it renders out of it's rear)
try:
self.camera_fwd: list[Union[float, int]] = view_cfg['CAMERA_FWD']
assert len(self.camera_fwd) == 3, 'length != 3'
for val in self.camera_fwd:
assert isinstance(val, (float, int)), f' {val} is not float or int'
except (AssertionError, ValueError) as e:
msg = f'Error in CAMERA_FWD config parameter: {e}'
logging.critical(msg)
assert False, msg
class ControllerConfig():
def __init__(self, controller_cfg: dict) -> None:
# set controller mode
try:
self.mode: str = controller_cfg["MODE"]
assert isinstance(self.mode, str), 'is not str'
assert self.mode in ('interactive', 'video_render'), 'mode not interactive or video_render'
except (AssertionError, ValueError) as e:
msg = f'Error in MODE config parameter: {e}'
logging.critical(msg)
assert False, msg
# set timestep for user interactions in interactive mode
try:
self.keyboard_timestep: Union[float, int] = controller_cfg["KEYBOARD_TIMESTEP"]
assert isinstance(self.keyboard_timestep, (float, int)), 'is not floar or int'
assert self.keyboard_timestep > 0, 'timestep val must be > 0'
except (AssertionError, ValueError) as e:
msg = f'Error in KEYBOARD_TIMESTEP config parameter: {e}'
logging.critical(msg)
assert False, msg
# set output video path (only use in video_render mode)
try:
self.output_video_path: Union[None, str] = controller_cfg['OUTPUT_VIDEO_PATH']
assert isinstance(self.output_video_path, (NoneType, str)), 'type is not None or str'
if isinstance(self.output_video_path, str):
assert Path(self.output_video_path).suffix in ('.gif', '.mp4'), 'output video extension not .gif or .mp4 '
except (AssertionError, ValueError) as e:
msg = f'Error in OUTPUT_VIDEO_PATH config parameter: {e}'
logging.critical(msg)
assert False, msg
# set output video codec (only use in video_render mode with .mp4)
try:
self.output_video_codec: Union[None, str] = controller_cfg['OUTPUT_VIDEO_CODEC']
assert isinstance(self.output_video_codec, (NoneType, str)), 'type is not None or str'
except (AssertionError, ValueError) as e:
msg = f'Error in OUTPUT_VIDEO_CODEC config parameter: {e}'
logging.critical(msg)
assert False, msg
class CharacterConfig():
class JointDict(TypedDict):
loc: List[float]
name: str
parent: Union[None, str]
def __init__(self, char_cfg_fn: str) -> None: # noqa: C901
character_cfg_p = resolve_ad_filepath(char_cfg_fn, 'character cfg')
with open(str(character_cfg_p), 'r') as f:
char_cfg = yaml.load(f, Loader=yaml.FullLoader)
# validate image height
try:
self.img_height: int = char_cfg['height']
assert isinstance(self.img_height, int), 'type not int'
assert self.img_height > 0, 'must be > 0'
except (AssertionError, ValueError) as e:
msg = f'Error in character height config parameter: {e}'
logging.critical(msg)
assert False, msg
# validate image width
try:
self.img_width: int = char_cfg['width']
assert isinstance(self.img_width, int), 'type not int'
assert self.img_width > 0, 'must be > 0'
except (AssertionError, ValueError) as e:
msg = f'Error in character width config parameter: {e}'
logging.critical(msg)
assert False, msg
# based on height and width, determine what final img dimension will be (post padding)
self.img_dim: int = max(self.img_height, self.img_width)
# validate skeleton
try:
self.skeleton: List[CharacterConfig.JointDict] = []
for joint in char_cfg['skeleton']:
# ensure loc input is valid...
loc: List[int] = joint['loc']
assert len(loc) == 2, 'joint loc must be of length 2'
assert loc[0] >= 0, 'x val must be >= 0'
assert loc[0] < self.img_width, 'x val must be < image width'
assert loc[1] >= 0, 'y val must be >= 0'
assert loc[1] < self.img_height, 'y val must be < image height'
# ... then scale to between 0-1 based on img dim
loc_x: float = loc[0] / self.img_dim # width
loc_y: float = loc[1] / self.img_dim + (1 - self.img_height / self.img_dim) # height
# validate joint name
name: str = joint['name']
assert isinstance(name, str), 'name must be str'
# validate joint parent
parent: Union[None, str] = joint['parent']
assert isinstance(parent, (NoneType, str)), 'parent must be str or NoneType'
self.skeleton.append({'loc': [loc_x, loc_y], 'name': name, 'parent': parent})
except AssertionError as e:
msg = f'Error in character skeleton: {e}'
logging.critical(msg)
assert False, msg
# validate skeleton joint parents
try:
names: List[str] = [joint['name'] for joint in self.skeleton]
for joint in self.skeleton:
assert isinstance(joint['parent'], NoneType) or joint['parent'] in names, f'joint.parent not None and not valid joint name: {joint}'
except AssertionError as e:
msg = f'Error in character skeleton: {e}'
logging.critical(msg)
assert False, msg
# validate mask and texture files
try:
self.mask_p: Path = character_cfg_p.parent / 'mask.png'
self.txtr_p: Path = character_cfg_p.parent / 'texture.png'
assert self.mask_p.exists(), f'cannot find character mask: {self.mask_p}'
assert self.txtr_p.exists(), f'cannot find character texture: {self.txtr_p}'
except AssertionError as e:
msg = f'Error validating character files: {e}'
logging.critical(msg)
assert False, msg
class MotionConfig():
def __init__(self, motion_cfg_fn: str) -> None: # noqa: C901
motion_cfg_p = resolve_ad_filepath(motion_cfg_fn, 'motion cfg')
with open(str(motion_cfg_p), 'r') as f:
motion_cfg = yaml.load(f, Loader=yaml.FullLoader)
# validate start_frame_idx
try:
self.start_frame_idx: int = motion_cfg.get('start_frame_idx', 0)
assert isinstance(self.start_frame_idx, int), 'type not int'
assert self.start_frame_idx >= 0, 'start_frame_idx must be > 0'
except (AssertionError, ValueError) as e:
msg = f'Error validating start_frame_idx: {e}'
logging.critical(msg)
assert False, msg
# validate end_frame_idx
try:
self.end_frame_idx: Optional[int] = motion_cfg.get('end_frame_idx', None)
assert isinstance(self.end_frame_idx, (NoneType, int)), 'type not NoneType or int'
if isinstance(self.end_frame_idx, int):
assert self.end_frame_idx >= self.start_frame_idx, 'end_frame_idx must be > start_frame_idx'
except (AssertionError, ValueError) as e:
msg = f'Error validating end_frame_idx: {e}'
logging.critical(msg)
assert False, msg
# validate frame time override
try:
self.frame_time: Optional[float] = motion_cfg.get('frame_time', None)
assert isinstance(self.frame_time, (NoneType, float)), 'is not None or float'
except (AssertionError, ValueError) as e:
msg = f'Error in frame_time config parameter: {e}'
logging.critical(msg)
assert False, msg
# validate groundplane joint
try:
self.groundplane_joint: str = motion_cfg['groundplane_joint']
assert isinstance(self.groundplane_joint, str), 'groundplane joint must be str'
except (AssertionError, ValueError) as e:
msg = f'Error validating groundplane joint: {e}'
logging.critical(msg)
assert False, msg
# validate forward_perp_joint_vectors
try:
self.forward_perp_joint_vectors: List[Tuple[str, str]] = motion_cfg['forward_perp_joint_vectors']
assert len(self.forward_perp_joint_vectors) > 0, 'forward_perp_joint_vectors len must be > 0'
for each in self.forward_perp_joint_vectors:
assert len(each) == 2, 'each list in forrward_perp_joint_vectors must have len = 2'
except (AssertionError, ValueError) as e:
msg = f'Error validating forward_perp_joint_vectors: {e}'
logging.critical(msg)
assert False, msg
# validate scale
try:
self.scale: float = motion_cfg['scale']
assert isinstance(self.scale, (int, float)), 'scale must be float or int'
assert self.scale > 0, 'scale must be > 0'
except (AssertionError, ValueError) as e:
msg = f'Error validating scale: {e}'
logging.critical(msg)
assert False, msg
# validate up
try:
self.up: str = motion_cfg['up']
assert self.up in ['+y', '+z'], 'up must be "+y" or "+z'
except (AssertionError, ValueError) as e:
msg = f'Error validating up: {e}'
logging.critical(msg)
assert False, msg
# validate bvh_p
try:
self.bvh_p: Path = resolve_ad_filepath(motion_cfg['filepath'], 'bvh filepath')
except (AssertionError, ValueError) as e:
msg = f'Error validating bvh_p: {e}'
logging.critical(msg)
assert False, msg
def validate_bvh(self, bvh_joint_names: List[str]) -> None:
""" Performs all the validation steps that depend upon knowing the BVH joint names. This should be called once the BVH had been loaded."""
try:
for prox_joint_name, dist_joint_name in self.forward_perp_joint_vectors:
assert prox_joint_name in bvh_joint_names, f'invalid prox_joint name in motion_cfg.forward_perp_joint_vectors: {prox_joint_name}'
assert dist_joint_name in bvh_joint_names, f'invalid dist_joint name in motion_cfg.forward_perp_joint_vectors: {dist_joint_name}'
except (AssertionError, ValueError) as e:
msg = f'Error validating forward_perp_joint_vector joints: {e}'
logging.critical(msg)
assert False, msg
class RetargetConfig():
class BvhProjectionBodypartGroup(TypedDict):
bvh_joint_names: List[str]
method: str
name: str
class CharBodypartGroup(TypedDict):
bvh_depth_drivers: List[str]
char_joints: List[str]
class CharBvhRootOffset(TypedDict):
bvh_projection_bodypart_group_for_offset: str
bvh_joints: List[List[str]]
char_joints: List[List[str]]
def __init__(self, retarget_cfg_fn: str) -> None: # noqa: C901
retarget_cfg_p = resolve_ad_filepath(retarget_cfg_fn, 'retarget cfg')
with open(str(retarget_cfg_p), 'r') as f:
retarget_cfg = yaml.load(f, Loader=yaml.FullLoader)
# validate character starting location
try:
self.char_start_loc = retarget_cfg['char_starting_location']
assert len(self.char_start_loc) == 3, 'char start loc must be of len 3'
for val in self.char_start_loc:
assert isinstance(val, (float, int)), 'type must be float or int'
except (AssertionError, ValueError) as e:
msg = f'Error validating char start location: {e}'
logging.critical(msg)
assert False, msg
# validate bvh project bodypart groups
self.bvh_projection_bodypart_groups: List[RetargetConfig.BvhProjectionBodypartGroup]
try:
self.bvh_projection_bodypart_groups = retarget_cfg['bvh_projection_bodypart_groups']
for group in self.bvh_projection_bodypart_groups:
assert group['method'] in ['pca', 'sagittal', 'frontal'], 'group method must be "pca", "sagittal", or "frontal"'
except (AssertionError, ValueError) as e:
msg = f'Error validating bvh_projection_bodypart_groups: {e}'
logging.critical(msg)
assert False, msg
# Check that group names are unique
try:
group_names = [group['name'] for group in self.bvh_projection_bodypart_groups]
assert len(group_names) == len(set(group_names)), 'group names are not unique'
except AssertionError as e:
msg = f'Error validating bvh_projection_bodypart_groups: {e}'
logging.critical(msg)
assert False, msg
# validate char bodypart groups
self.char_bodypart_groups: List[RetargetConfig.CharBodypartGroup]
try:
self.char_bodypart_groups = retarget_cfg['char_bodypart_groups']
for group in self.char_bodypart_groups:
assert len(group['bvh_depth_drivers']) > 0, 'bvh_depth_drivers must have at least one joint specified'
except (AssertionError, ValueError) as e:
msg = f'Error validating char_bodypart_groups: {e}'
logging.critical(msg)
assert False, msg
# validate char bvh root offset
self.char_bvh_root_offset: RetargetConfig.CharBvhRootOffset
try:
self.char_bvh_root_offset = retarget_cfg['char_bvh_root_offset']
assert len(self.char_bvh_root_offset['bvh_joints']) > 0, 'bvh_joints list must be greater than zero'
for each in self.char_bvh_root_offset['bvh_joints']:
assert len(each) > 0, 'each list in bvh_joints must have len > 0'
assert len(self.char_bvh_root_offset['char_joints']) > 0, 'char_joints list must be greater than zero'
for each in self.char_bvh_root_offset['char_joints']:
assert len(each) > 0, 'each list in char_joints must have len > 0'
assert isinstance(self.char_bvh_root_offset['bvh_projection_bodypart_group_for_offset'], str), 'bvh_projection_bodypart_group_for_offset must be str'
except (AssertionError, ValueError) as e:
msg = f'Error validating char_bvh_root_offset: {e}'
logging.critical(msg)
assert False, msg
# validate char joint bvh joints mapping
self.char_joint_bvh_joints_mapping: Dict[str, Tuple[str, str]]
try:
self.char_joint_bvh_joints_mapping = retarget_cfg['char_joint_bvh_joints_mapping']
for key, val in self.char_joint_bvh_joints_mapping.items():
assert isinstance(key, str), 'key must be str'
assert isinstance(val, tuple), 'val must be tuple'
assert len(val) == 2, 'val must be of len 2'
assert isinstance(val[0], str) and isinstance(val[1], str), 'values must be str'
except (AssertionError, ValueError) as e:
msg = f'Error validating char_bvh_root_offset: {e}'
logging.critical(msg)
assert False, msg
# validate char runtime checks
self.char_runtime_checks: List[str]
try:
self.char_runtime_checks = retarget_cfg['char_runtime_checks']
for check in self.char_runtime_checks:
assert check[0] in ['above'], 'currently only above check is supported'
if check[0] == 'above':
assert len(check) == 4, 'above check needs 3 additional parameters'
except (AssertionError, ValueError) as e:
msg = f'Error validating char_runtime_checks: {e}'
logging.critical(msg)
assert False, msg
def validate_char_and_bvh_joint_names(self, char_joint_names: List[str], bvh_joint_names: List[str]) -> None: # noqa: C901
# validate bvh_projection_bodypart_groups
try:
for group in self.bvh_projection_bodypart_groups:
for bvh_joint_name in group['bvh_joint_names']:
assert bvh_joint_name in bvh_joint_names, f'bvh_joint_name not valid: {bvh_joint_name}'
except AssertionError as e:
msg = f'Error validating bvh_projection_bodypart_groups: {e}'
logging.critical(msg)
assert False, msg
# validate char_bodypart_groups
try:
for group in self.char_bodypart_groups:
# check that bvh joint drivers are valid bvh joints
for bvh_joint_name in group['bvh_depth_drivers']:
assert bvh_joint_name in bvh_joint_names, f'bvh_depth_driver joint name invalid: {bvh_joint_name}'
# check that all char_joints are valid character joints
for char_joint_name in group['char_joints']:
assert char_joint_name in char_joint_names, f'char_joints joint name invalid: {char_joint_name}'
except AssertionError as e:
msg = f'Error validating char_bodypart_groups: {e}'
logging.critical(msg)
assert False, msg
# validate char_bvh_root_offset
try:
# check that bvh_projection_bodypart_group_for_offset matches a bvh_projection_bodypart_group name
group_names = [group['name'] for group in self.bvh_projection_bodypart_groups]
assert self.char_bvh_root_offset['bvh_projection_bodypart_group_for_offset'] in group_names, 'invalid bvh_projection_bodypart_group_for_offset'
# check bvh_joints contains valid joints
for bvh_joint_name_group in self.char_bvh_root_offset['bvh_joints']:
for joint_name in bvh_joint_name_group:
assert joint_name in bvh_joint_names, f'invalid joint name in bvh_joints: {joint_name}'
# check char_joints are valid joints
for char_joint_name_group in self.char_bvh_root_offset['char_joints']:
for joint_name in char_joint_name_group:
assert joint_name in char_joint_names, f'invalid joint name in char_joints: {joint_name}'
except AssertionError as e:
msg = f'Error validating char_bvh_root_offset: {e}'
logging.critical(msg)
assert False, msg
# validate char_joint_bvh_joints_mapping
try:
# check that dict keys correspond to valid character joints
for char_joint_name in self.char_joint_bvh_joints_mapping.keys():
assert char_joint_name in char_joint_names, f'invalid char_joint_name: {char_joint_name}'
# check that dict values correspond to valid bvh joints
for bvh_prox_joint_name, bvh_dist_joint_name in self.char_joint_bvh_joints_mapping.values():
assert bvh_prox_joint_name in bvh_joint_names, f'invalid bvh_prox_joint_name: {bvh_prox_joint_name}'
assert bvh_dist_joint_name in bvh_joint_names, f'invalid bvh_dist_joint_name: {bvh_dist_joint_name}'
except AssertionError as e:
msg = f'Error validating char_joint_bvh_joints_mapping: {e}'
logging.critical(msg)
assert False, msg
# validate char runtime checks
try:
for check in self.char_runtime_checks:
if check[0] == 'above':
# check that, if above test, following 3 params are valid character joint names
_, target_joint_name, joint1_name, joint2_name = check
assert target_joint_name in char_joint_names, f'above test target_joint_name invalid {target_joint_name}'
assert joint1_name in char_joint_names, f'above test joint1_name invalid {joint1_name}'
assert joint2_name in char_joint_names, f'above test joint2_name invalid {joint2_name}'
except AssertionError as e:
msg = f'Error validating char_runtime_checks: {e}'
logging.critical(msg)
assert False, msg
NoneType = type(None) # needed for type checking
| AnimatedDrawings-main | animated_drawings/config.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import sys
def start(user_mvc_cfg_fn: str):
# build cfg
from animated_drawings.config import Config
cfg: Config = Config(user_mvc_cfg_fn)
# create view
from animated_drawings.view.view import View
view = View.create_view(cfg.view)
# create scene
from animated_drawings.model.scene import Scene
scene = Scene(cfg.scene)
# create controller
from animated_drawings.controller.controller import Controller
controller = Controller.create_controller(cfg.controller, scene, view)
# start the run loop
controller.run()
if __name__ == '__main__':
logging.basicConfig(filename='log.txt', level=logging.DEBUG)
# user-specified mvc configuration filepath. Can be absolute, relative to cwd, or relative to ${AD_ROOT_DIR}
user_mvc_cfg_fn = sys.argv[1]
start(user_mvc_cfg_fn)
| AnimatedDrawings-main | animated_drawings/render.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| AnimatedDrawings-main | animated_drawings/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from PIL import Image, ImageOps
import numpy as np
import numpy.typing as npt
import cv2
from pathlib import Path
import logging
from pkg_resources import resource_filename
TOLERANCE = 10**-5
def resolve_ad_filepath(file_name: str, file_type: str) -> Path:
"""
Given input filename, attempts to find the file, first by relative to cwd,
then by absolute, the relative to animated_drawings root directory.
If not found, prints error message indicating which file_type it is.
"""
if Path(file_name).exists():
return Path(file_name)
elif Path.joinpath(Path.cwd(), file_name).exists():
return Path.joinpath(Path.cwd(), file_name)
elif Path(resource_filename(__name__, file_name)).exists():
return Path(resource_filename(__name__, file_name))
elif Path(resource_filename(__name__, str(Path('..', file_name)))):
return Path(resource_filename(__name__, str(Path('..', file_name))))
msg = f'Could not find the {file_type} specified: {file_name}'
logging.critical(msg)
assert False, msg
def read_background_image(file_name: str) -> npt.NDArray[np.uint8]:
"""
Given path to input image file, opens it, flips it based on EXIF tags, if present, and returns image with proper orientation.
"""
# Check the file path
file_path = resolve_ad_filepath(file_name, 'background_image')
# Open the image and rotate as needed depending upon exif tag
image = Image.open(str(file_path))
image = ImageOps.exif_transpose(image)
# Convert to numpy array and flip rightside up
image_np = np.asarray(image)
image_np = cv2.flip(image_np, 0)
# Ensure we have RGBA
if len(image_np.shape) == 3 and image_np.shape[-1] == 3: # if RGB
image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2RGBA)
if len(image_np.shape) == 2: # if grayscale
image_np = cv2.cvtColor(image_np, cv2.COLOR_GRAY2RGBA)
return image_np.astype(np.uint8)
| AnimatedDrawings-main | animated_drawings/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Controller Abstract Base Class Module """
from __future__ import annotations
from typing import Optional
from abc import abstractmethod
import logging
from animated_drawings.model.scene import Scene
from animated_drawings.view.view import View
from animated_drawings.config import ControllerConfig
class Controller():
"""
Base Controller class from which all other Controllers will be derived.
Controllers are responsible for:
- running the game loop.
- handling user input and forwarding it to the view or scene.
- triggering the scene's update method
- trigger the view's render method
"""
def __init__(self, cfg: ControllerConfig, scene: Scene) -> None:
self.cfg: ControllerConfig = cfg
self.scene: Scene = scene
self.view: Optional[View] = None
def set_scene(self, scene: Scene) -> None:
""" Sets the scene attached to this controller."""
self.scene = scene
def set_view(self, view: View) -> None:
""" Sets the view attached to this controller."""
self.view = view
@abstractmethod
def _tick(self) -> None:
"""Subclass and add logic is necessary to progress time"""
@abstractmethod
def _update(self) -> None:
"""Subclass and add logic is necessary to update scene after progressing time"""
@abstractmethod
def _is_run_over(self) -> bool:
"""Subclass and add logic is necessary to end the scene"""
@abstractmethod
def _start_run_loop_iteration(self) -> None:
"""Subclass and add code to start run loop iteration"""
@abstractmethod
def _handle_user_input(self) -> None:
"""Subclass and add code to handle user input"""
@abstractmethod
def _render(self) -> None:
"""Subclass and add logic needed to have viewer render the scene"""
@abstractmethod
def _finish_run_loop_iteration(self) -> None:
"""Subclass and add steps necessary before starting next iteration of run loop. """
@abstractmethod
def _prep_for_run_loop(self) -> None:
"""Subclass and add anything necessary to do immediately prior to run loop. """
@abstractmethod
def _cleanup_after_run_loop(self) -> None:
"""Subclass and add anything necessary to do after run loop has finished. """
def run(self) -> None:
""" The run loop. Subclassed controllers should overload and define functionality for each step in this function."""
self._prep_for_run_loop()
while not self._is_run_over():
self._start_run_loop_iteration()
self._update()
self._render()
self._tick()
self._handle_user_input()
self._finish_run_loop_iteration()
self._cleanup_after_run_loop()
@staticmethod
def create_controller(controller_cfg: ControllerConfig, scene: Scene, view: View) -> Controller:
""" Takes in a controller dictionary from mvc config file, scene, and view. Constructs and return appropriate controller."""
if controller_cfg.mode == 'video_render':
from animated_drawings.controller.video_render_controller import VideoRenderController
return VideoRenderController(controller_cfg, scene, view,)
elif controller_cfg.mode == 'interactive':
from animated_drawings.controller.interactive_controller import InteractiveController
from animated_drawings.view.window_view import WindowView
assert isinstance(view, WindowView) # for static analysis. checks elsewhere ensure this always passes
return InteractiveController(controller_cfg, scene, view)
else:
msg = f'Unknown controller mode specified: {controller_cfg.mode}'
logging.critical(msg)
assert False, msg
| AnimatedDrawings-main | animated_drawings/controller/controller.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Interactive Controller Class Module """
import time
from typing import Optional
import glfw
from animated_drawings.controller.controller import Controller
from animated_drawings.model.scene import Scene
from animated_drawings.view.window_view import WindowView
from animated_drawings.config import ControllerConfig
class InteractiveController(Controller):
""" Interactive Controller Class """
def __init__(self, cfg: ControllerConfig, scene: Scene, view: WindowView) -> None:
super().__init__(cfg, scene)
self.view: WindowView = view
self.prev_time: float = 0.0 # tracks real-world time passing between run loops
self.pause: bool = False # tracks whether time progresses in the scene
glfw.init()
glfw.set_key_callback(self.view.win, self._on_key)
def _on_key(self, _win, key: int, _scancode, action, _mods) -> None: # noqa: C901
if action not in (glfw.PRESS, glfw.REPEAT):
return
# close window
if key in (glfw.KEY_ESCAPE, glfw.KEY_Q):
glfw.set_window_should_close(self.view.win, True)
# move camera forward
elif key == glfw.KEY_W:
_, _, fwd = self.view.camera.get_right_up_fwd_vectors()
self.view.camera.offset(-0.1 * fwd)
# move camera back
elif key == glfw.KEY_S:
_, _, fwd = self.view.camera.get_right_up_fwd_vectors()
self.view.camera.offset(0.1 * fwd)
# move camera right
elif key == glfw.KEY_A:
right, _, _ = self.view.camera.get_right_up_fwd_vectors()
self.view.camera.offset(-0.1 * right)
# move camera left
elif key == glfw.KEY_D:
right, _, _ = self.view.camera.get_right_up_fwd_vectors()
self.view.camera.offset(0.1 * right)
# move camera up
elif key == glfw.KEY_E:
_, up, _ = self.view.camera.get_right_up_fwd_vectors()
self.view.camera.offset(0.1 * up)
# move camera down
elif key == glfw.KEY_R:
_, up, _ = self.view.camera.get_right_up_fwd_vectors()
self.view.camera.offset(-0.1 * up)
# toggle start/stop time
elif key == glfw.KEY_SPACE:
self.pause = not self.pause
self.prev_time = time.time()
# step forward in time
elif key == glfw.KEY_RIGHT:
self._tick(self.cfg.keyboard_timestep)
# step backward in time
elif key == glfw.KEY_LEFT:
self._tick(-self.cfg.keyboard_timestep)
def _is_run_over(self) -> None:
return glfw.window_should_close(self.view.win)
def _prep_for_run_loop(self) -> None:
self.prev_time = time.time()
def _start_run_loop_iteration(self) -> None:
self.view.clear_window()
def _tick(self, delta_t: Optional[float] = None) -> None:
# if passed a specific value to progress time by, do so
if delta_t:
self.scene.progress_time(delta_t)
# otherwise, if scene is paused, do nothing
elif self.pause:
pass
# otherwise, calculate real time passed since last call and progress scene by that amount
else:
cur_time = time.time()
self.scene.progress_time(cur_time - self.prev_time)
self.prev_time = cur_time
def _update(self) -> None:
self.scene.update_transforms()
def _handle_user_input(self) -> None:
glfw.poll_events()
def _render(self) -> None:
self.view.render(self.scene)
def _finish_run_loop_iteration(self) -> None:
self.view.swap_buffers()
def _cleanup_after_run_loop(self) -> None:
self.view.cleanup()
| AnimatedDrawings-main | animated_drawings/controller/interactive_controller.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Video Render Controller Class Module """
from __future__ import annotations
import time
import logging
from typing import List
from pathlib import Path
from abc import abstractmethod
import numpy as np
import numpy.typing as npt
import cv2
from OpenGL import GL
from tqdm import tqdm
from animated_drawings.controller.controller import Controller
from animated_drawings.model.scene import Scene
from animated_drawings.model.animated_drawing import AnimatedDrawing
from animated_drawings.view.view import View
from animated_drawings.config import ControllerConfig
NoneType = type(None) # for type checking below
class VideoRenderController(Controller):
""" Video Render Controller is used to non-interactively generate a video file """
def __init__(self, cfg: ControllerConfig, scene: Scene, view: View) -> None:
super().__init__(cfg, scene)
self.view: View = view
self.scene: Scene = scene
self.frames_left_to_render: int # when this becomes zero, stop rendering
self.delta_t: float # amount of time to progress scene between renders
self._set_frames_left_to_render_and_delta_t()
self.render_start_time: float # track when we started to render frames (for performance stats)
self.frames_rendered: int = 0 # track how many frames we've rendered
self.video_width: int
self.video_height: int
self.video_width, self.video_height = self.view.get_framebuffer_size()
self.video_writer: VideoWriter = VideoWriter.create_video_writer(self)
self.frame_data = np.empty([self.video_height, self.video_width, 4], dtype='uint8') # 4 for RGBA
self.progress_bar = tqdm(total=self.frames_left_to_render)
def _set_frames_left_to_render_and_delta_t(self) -> None:
"""
Based upon the animated drawings within the scene, computes maximum number of frames in a BVH.
Checks that all frame times within BVHs are equal, logs a warning if not.
Uses results to determine number of frames and frame time for output video.
"""
max_frames = 0
frame_time: List[float] = []
for child in self.scene.get_children():
if not isinstance(child, AnimatedDrawing):
continue
max_frames = max(max_frames, child.retargeter.bvh.frame_max_num)
frame_time.append(child.retargeter.bvh.frame_time)
if not all(x == frame_time[0] for x in frame_time):
msg = f'frame time of BVH files don\'t match. Using first value: {frame_time[0]}'
logging.warning(msg)
self.frames_left_to_render = max_frames
self.delta_t = frame_time[0]
def _prep_for_run_loop(self) -> None:
self.run_loop_start_time = time.time()
def _is_run_over(self) -> bool:
return self.frames_left_to_render == 0
def _start_run_loop_iteration(self) -> None:
self.view.clear_window()
def _update(self) -> None:
self.scene.update_transforms()
def _render(self) -> None:
self.view.render(self.scene)
def _tick(self) -> None:
self.scene.progress_time(self.delta_t)
def _handle_user_input(self) -> None:
""" ignore all user input when rendering video file """
def _finish_run_loop_iteration(self) -> None:
# get pixel values from the frame buffer, send them to the video writer
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, 0)
GL.glReadPixels(0, 0, self.video_width, self.video_height, GL.GL_BGRA, GL.GL_UNSIGNED_BYTE, self.frame_data)
self.video_writer.process_frame(self.frame_data[::-1, :, :].copy())
# update our counts and progress_bar
self.frames_left_to_render -= 1
self.frames_rendered += 1
self.progress_bar.update(1)
def _cleanup_after_run_loop(self) -> None:
logging.info(f'Rendered {self.frames_rendered} frames in {time.time()-self.run_loop_start_time} seconds.')
self.view.cleanup()
_time = time.time()
self.video_writer.cleanup()
logging.info(f'Wrote video to file in in {time.time()-_time} seconds.')
class VideoWriter():
""" Wrapper to abstract the different backends necessary for writing different video filetypes """
def __init__(self) -> None:
pass
@abstractmethod
def process_frame(self, frame: npt.NDArray[np.uint8]) -> None:
""" Subclass must specify how to handle each frame of data received. """
pass
@abstractmethod
def cleanup(self) -> None:
""" Subclass must specify how to finish up after all frames have been received. """
pass
@staticmethod
def create_video_writer(controller: VideoRenderController) -> VideoWriter:
assert isinstance(controller.cfg.output_video_path, str) # for static analysis
output_p = Path(controller.cfg.output_video_path)
output_p.parent.mkdir(exist_ok=True, parents=True)
msg = f' Writing video to: {output_p.resolve()}'
logging.info(msg)
print(msg)
if output_p.suffix == '.gif':
return GIFWriter(controller)
elif output_p.suffix == '.mp4':
return MP4Writer(controller)
else:
msg = f'Unsupported output video file extension ({output_p.suffix}). Only .gif and .mp4 are supported.'
logging.critical(msg)
assert False, msg
class GIFWriter(VideoWriter):
""" Video writer for creating transparent, animated GIFs with Pillow """
def __init__(self, controller: VideoRenderController) -> None:
assert isinstance(controller.cfg.output_video_path, str) # for static analysis
self.output_p = Path(controller.cfg.output_video_path)
self.duration = int(controller.delta_t*1000)
if self.duration < 20:
msg = f'Specified duration of .gif is too low, replacing with 20: {self.duration}'
logging.warn(msg)
self.duration = 20
self.frames: List[npt.NDArray[np.uint8]] = []
def process_frame(self, frame: npt.NDArray[np.uint8]) -> None:
""" Reorder channels and save frames as they arrive"""
self.frames.append(cv2.cvtColor(frame, cv2.COLOR_BGRA2RGBA).astype(np.uint8))
def cleanup(self) -> None:
""" Write all frames to output path specified."""
from PIL import Image
self.output_p.parent.mkdir(exist_ok=True, parents=True)
logging.info(f'VideoWriter will write to {self.output_p.resolve()}')
ims = [Image.fromarray(a_frame) for a_frame in self.frames]
ims[0].save(self.output_p, save_all=True, append_images=ims[1:], duration=self.duration, disposal=2, loop=0)
class MP4Writer(VideoWriter):
""" Video writer for creating mp4 videos with cv2.VideoWriter """
def __init__(self, controller: VideoRenderController) -> None:
# validate and prep output path
if isinstance(controller.cfg.output_video_path, NoneType):
msg = 'output video path not specified for mp4 video writer'
logging.critical(msg)
assert False, msg
output_p = Path(controller.cfg.output_video_path)
output_p.parent.mkdir(exist_ok=True, parents=True)
logging.info(f'VideoWriter will write to {output_p.resolve()}')
# validate and prep codec
if isinstance(controller.cfg.output_video_codec, NoneType):
msg = 'output video codec not specified for mp4 video writer'
logging.critical(msg)
assert False, msg
fourcc = cv2.VideoWriter_fourcc(*controller.cfg.output_video_codec)
logging.info(f'Using codec {controller.cfg.output_video_codec}')
# calculate video writer framerate
frame_rate = round(1/controller.delta_t)
# initialize the video writer
self.video_writer = cv2.VideoWriter(str(output_p), fourcc, frame_rate, (controller.video_width, controller.video_height))
def process_frame(self, frame: npt.NDArray[np.uint8]) -> None:
""" Remove the alpha channel and send to the video writer as it arrives. """
self.video_writer.write(frame[:, :, :3])
def cleanup(self) -> None:
self.video_writer.release()
| AnimatedDrawings-main | animated_drawings/controller/video_render_controller.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations # so we can refer to class Type inside class
import numpy as np
import numpy.typing as npt
import logging
from typing import Union, Iterable, Tuple
from numbers import Number
from copy import copy
from animated_drawings.utils import TOLERANCE
class Vectors():
"""
Wrapper class around ndarray interpreted as one or more vectors of equal dimensionality
When passing in existing Vectors, new Vectors object will share the underlying nparray, so be careful.
"""
def __init__(self, vs_: Union[Iterable[Union[float, int, Vectors, npt.NDArray[np.float32]]], Vectors]) -> None: # noqa: C901
self.vs: npt.NDArray[np.float32]
# initialize from single ndarray
if isinstance(vs_, np.ndarray):
if len(vs_.shape) == 1:
vs_ = np.expand_dims(vs_, axis=0)
self.vs = vs_
# initialize from tuple or list of numbers
elif isinstance(vs_, (tuple, list)) and isinstance(vs_[0], Number):
try:
vs_ = np.array(vs_)
if len(vs_.shape) == 1:
vs_ = np.expand_dims(vs_, axis=0)
except Exception as e:
msg = f'Error initializing Vectors: {str(e)}'
logging.critical(msg)
assert False, msg
self.vs = vs_
# initialize from tuple or list of ndarrays
elif isinstance(vs_, (tuple, list)) and isinstance(vs_[0], np.ndarray):
try:
vs_ = np.stack(vs_) # pyright: ignore[reportGeneralTypeIssues]
except Exception as e:
msg = f'Error initializing Vectors: {str(e)}'
logging.critical(msg)
assert False, msg
self.vs = vs_ # pyright: ignore[reportGeneralTypeIssues]
# initialize from tuple or list of Vectors
elif isinstance(vs_, (tuple, list)) and isinstance(vs_[0], Vectors):
try:
vs_ = np.stack([v.vs.squeeze() for v in vs_]) # pyright: ignore[reportGeneralTypeIssues]
except Exception as e:
msg = f'Error initializing Vectors: {str(e)}'
logging.critical(msg)
assert False, msg
self.vs = vs_
# initialize from single Vectors
elif isinstance(vs_, Vectors):
self.vs = vs_.vs
else:
msg = 'Vectors must be constructed from Vectors, ndarray, or Tuples/List of floats/ints or Vectors'
logging.critical(msg)
assert False, msg
def norm(self) -> None:
ns: npt.NDArray[np.float64] = np.linalg.norm(self.vs, axis=-1)
if np.min(ns) < TOLERANCE:
logging.info(f"Encountered values close to zero in vector norm. Replacing with {TOLERANCE}")
ns[ns < TOLERANCE] = TOLERANCE
self.vs = self.vs / np.expand_dims(ns, axis=-1)
def cross(self, v2: Vectors) -> Vectors:
""" Cross product of a series of 2 or 3 dimensional vectors. All dimensions of vs must match."""
if self.vs.shape != v2.vs.shape:
msg = f'Cannot cross product different sized vectors: {self.vs.shape} {v2.vs.shape}.'
logging.critical(msg)
assert False, msg
if not self.vs.shape[-1] in [2, 3]:
msg = f'Cannot cross product vectors of size: {self.vs.shape[-1]}. Must be 2 or 3.'
logging.critical(msg)
assert False, msg
return Vectors(np.cross(self.vs, v2.vs))
def perpendicular(self, ccw: bool = True) -> Vectors:
"""
Returns ndarray of vectors perpendicular to the original ones.
Only 2D and 3D vectors are supported.
By default returns the counter clockwise vector, but passing ccw=False returns clockwise
"""
if not self.vs.shape[-1] in [2, 3]:
msg = f'Cannot get perpendicular of vectors of size: {self.vs.shape[-1]}. Must be 2 or 3.'
logging.critical(msg)
assert False, msg
v_up: Vectors = Vectors(np.tile([0.0, 1.0, 0.0], [*self.shape[:-1], 1]))
v_perp = v_up.cross(self)
v_perp.norm()
if not ccw:
v_perp *= -1
return v_perp
def average(self) -> Vectors:
""" Return the average of a collection of vectors, along the first axis"""
return Vectors(np.mean(self.vs, axis=0))
def copy(self) -> Vectors:
return copy(self)
@property
def shape(self) -> Tuple[int, ...]:
return self.vs.shape
@property
def length(self) -> npt.NDArray[np.float32]:
return np.linalg.norm(self.vs, axis=-1).astype(np.float32)
def __mul__(self, val: float) -> Vectors:
return Vectors(self.vs * val)
def __truediv__(self, scale: Union[int, float]) -> Vectors:
return Vectors(self.vs / scale)
def __sub__(self, other: Vectors) -> Vectors:
if self.vs.shape != other.vs.shape:
msg = 'Attempted to subtract Vectors with different dimensions'
logging.critical(msg)
assert False, msg
return Vectors(np.subtract(self.vs, other.vs))
def __add__(self, other: Vectors) -> Vectors:
if self.vs.shape != other.vs.shape:
msg = 'Attempted to add Vectors with different dimensions'
logging.critical(msg)
assert False, msg
return Vectors(np.add(self.vs, other.vs))
def __copy__(self) -> Vectors:
return Vectors(self)
def __str__(self) -> str:
return f"Vectors({str(self.vs)})"
def __repr__(self) -> str:
return f"Vectors({str(self.vs)})"
| AnimatedDrawings-main | animated_drawings/model/vectors.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import OpenGL.GL as GL
import ctypes
from animated_drawings.model.transform import Transform
class Box(Transform):
def __init__(self, shader_name: str = 'color_shader') -> None:
super().__init__()
self.points = np.array([
[ 0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[ 0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[-0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[ 0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[-0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[ 0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[-0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[-0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[ 0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[ 0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[ 0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[ 0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[-0.5, 0.5, 0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
[-0.5, 0.5, -0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
[-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
[-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
[-0.5, -0.5, 0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
[-0.5, 0.5, 0.5, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
[-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
[ 0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
[ 0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
[ 0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
[-0.5, -0.5, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
[-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
[ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[ 0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[-0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[-0.5, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[-0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[ 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
], np.float32)
self.indices = np.array([2, 1, 0,
5, 4, 3,
6, 7, 8,
9, 10, 11,
14, 13, 12,
17, 16, 15,
18, 19, 20,
21, 22, 23,
24, 25, 26,
27, 28, 29,
32, 31, 30,
35, 34, 33
], np.uint32)
self.material = {
'ambient': np.array([0.4, 0.4, 0.4], np.float32),
'diffuse': np.array([0.4, 0.4, 0.4], np.float32),
'specular': np.array([0.3, 0.0, 0.0], np.float32),
'shininess': 32
}
self.shader_name: str = shader_name
self._is_opengl_initialized: bool = False # keep track of whether self._initialize_opengl_resources was called.
def _initialize_opengl_resources(self) -> None:
""" Method to initialize the OpenGL arrays and buffers necessary to draw the object.
It's better to not initialize these things unless we're definitely going to be drawing the object,
as calling GL functions without calling glfw.init() first can cause a mysterious segfault.
This way, unit tests and other non-rendering operations can proceed without requiring a Controller.
"""
self.vao = GL.glGenVertexArrays(1)
self.vbo = GL.glGenBuffers(1)
self.ebo = GL.glGenBuffers(1)
GL.glBindVertexArray(self.vao)
# buffer vertex data
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.points, GL.GL_STATIC_DRAW)
# buffer element index data
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, self.indices, GL.GL_STATIC_DRAW)
vert_bytes = 4 * self.points.shape[1] # 4 is byte size of np.float32
pos_offset = 4 * 0
color_offset = 4 * 3
normals_offset = 4 * 6
# position attributes
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(pos_offset))
GL.glEnableVertexAttribArray(0)
# color attributes
GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(color_offset))
GL.glEnableVertexAttribArray(1)
# normals attributes
GL.glVertexAttribPointer(2, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(normals_offset))
GL.glEnableVertexAttribArray(2)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
self._is_opengl_initialized = True
def rebuffer_vertex_data(self) -> None:
GL.glBindVertexArray(self.vao)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.points, GL.GL_STATIC_DRAW)
def _draw(self, **kwargs) -> None:
if not self._is_opengl_initialized:
self._initialize_opengl_resources()
GL.glUseProgram(kwargs['shader_ids'][self.shader_name])
model_loc = GL.glGetUniformLocation(kwargs['shader_ids'][self.shader_name], "model")
GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE, self._world_transform.T)
GL.glBindVertexArray(self.vao)
GL.glDrawArrays(GL.GL_TRIANGLES, 0, 36)
| AnimatedDrawings-main | animated_drawings/model/box.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations # so we can refer to class Type inside class
import numpy as np
import numpy.typing as npt
import logging
from typing import Union, Iterable, List, Tuple
from animated_drawings.model.vectors import Vectors
import math
from animated_drawings.utils import TOLERANCE
from functools import reduce
class Quaternions:
"""
Wrapper class around ndarray interpreted as one or more quaternions. Quaternion order is [w, x, y, z]
When passing in existing Quaternions, new Quaternions object will share the underlying nparray, so be careful.
Strongly influenced by Daniel Holden's excellent Quaternions class.
"""
def __init__(self, qs: Union[Iterable[Union[int, float]], npt.NDArray[np.float32], Quaternions]) -> None:
self.qs: npt.NDArray[np.float32]
if isinstance(qs, np.ndarray):
if not qs.shape[-1] == 4:
msg = f'Final dimension passed to Quaternions must be 4. Found {qs.shape[-1]}'
logging.critical(msg)
assert False, msg
if len(qs.shape) == 1:
qs = np.expand_dims(qs, axis=0)
self.qs = qs
elif isinstance(qs, tuple) or isinstance(qs, list):
try:
qs = np.array(qs)
assert qs.shape[-1] == 4
except Exception:
msg = 'Could not convert quaternion data to ndarray with shape[-1] == 4'
logging.critical(msg)
assert False, msg
if len(qs.shape) == 1:
qs = np.expand_dims(qs, axis=0)
self.qs = qs
elif isinstance(qs, Quaternions):
self.qs = qs.qs
else:
msg = 'Quaternions must be constructed from Quaternions or numpy array'
logging.critical(msg)
assert False, msg
self.normalize()
def normalize(self) -> None:
self.qs = self.qs / np.expand_dims(np.sum(self.qs ** 2.0, axis=-1) ** 0.5, axis=-1)
def to_rotation_matrix(self) -> npt.NDArray[np.float32]:
"""
From Ken Shoemake
https://www.ljll.math.upmc.fr/~frey/papers/scientific%20visualisation/Shoemake%20K.,%20Quaternions.pdf
:return: 4x4 rotation matrix representation of quaternions
"""
w = self.qs[..., 0].squeeze()
x = self.qs[..., 1].squeeze()
y = self.qs[..., 2].squeeze()
z = self.qs[..., 3].squeeze()
xx, yy, zz = x**2, y**2, z**2
wx, wy, wz = w*x, w*y, w*z
xy, xz = x*y, x*z # no
yz = y*z
# Row 1
r00 = 1 - 2 * (yy + zz)
r01 = 2 * (xy - wz)
r02 = 2 * (xz + wy)
# Row 2
r10 = 2 * (xy + wz)
r11 = 1 - 2 * (xx + zz)
r12 = 2 * (yz - wx)
# Row 3
r20 = 2 * (xz - wy)
r21 = 2 * (yz + wx)
r22 = 1 - 2 * (xx + yy)
return np.array([[r00, r01, r02, 0.0],
[r10, r11, r12, 0.0],
[r20, r21, r22, 0.0],
[0.0, 0.0, 0.0, 1.0]], dtype=np.float32)
@classmethod
def rotate_between_vectors(cls, v1: Vectors, v2: Vectors) -> Quaternions:
""" Computes quaternion rotating from v1 to v2. """
xyz: List[float] = v1.cross(v2).vs.squeeze().tolist()
w: float = math.sqrt((v1.length**2) * (v2.length**2)) + np.dot(v1.vs.squeeze(), v2.vs.squeeze())
ret_q = Quaternions([w, *xyz])
ret_q.normalize()
return ret_q
@classmethod
def from_angle_axis(cls, angles: npt.NDArray[np.float32], axes: Vectors) -> Quaternions:
axes.norm()
if len(angles.shape) == 1:
angles = np.expand_dims(angles, axis=0)
ss = np.sin(angles / 2.0)
cs = np.cos(angles / 2.0)
return Quaternions(np.concatenate([cs, axes.vs * ss], axis=-1))
@classmethod
def identity(cls, ret_shape: Tuple[int]) -> Quaternions:
qs = np.broadcast_to(np.array([1.0, 0.0, 0.0, 0.0]), [*ret_shape, 4])
return Quaternions(qs)
@classmethod
def from_euler_angles(cls, order: str, angles: npt.NDArray[np.float32]) -> Quaternions:
"""
Applies a series of euler angle rotations. Angles applied from right to left
:param order: string comprised of x, y, and/or z
:param angles: angles in degrees
"""
if len(angles.shape) == 1:
angles = np.expand_dims(angles, axis=0)
if len(order) != angles.shape[-1]:
msg = 'length of orders and angles does not match'
logging.critical(msg)
assert False, msg
_quats = [Quaternions.identity(angles.shape[:-1])]
for axis_char, pos in zip(order, range(len(order))):
angle = angles[..., pos] * np.pi / 180
angle = np.expand_dims(angle, axis=1)
axis_char = axis_char.lower()
if axis_char not in 'xyz':
msg = f'order contained unsupported char:{axis_char}'
logging.critical(msg)
assert False, msg
axis = np.zeros([*angles.shape[:-1], 3])
axis[..., ord(axis_char) - ord('x')] = 1.0
_quats.insert(0, Quaternions.from_angle_axis(angle, Vectors(axis)))
ret_q = reduce(lambda a, b: b * a, _quats)
return ret_q
@classmethod
def from_rotation_matrix(cls, M: npt.NDArray[np.float32]) -> Quaternions:
"""
As described here: https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf
"""
is_orthogonal = np.isclose(M @ M.T, np.identity(4), atol=TOLERANCE)
if not is_orthogonal.all():
msg = "attempted to create quaternion from non-orthogonal rotation matrix"
logging.critical(msg)
assert False, msg
if not np.isclose(np.linalg.det(M), 1.0):
msg = "attempted to create quaternion from rotation matrix with det != 1"
logging.critical(msg)
assert False, msg
# Note: Mike Day's article uses row vectors, whereas we used column, so here use transpose of matrix
MT = M.T
m00, m01, m02 = MT[0, 0], MT[0, 1], MT[0, 2]
m10, m11, m12 = MT[1, 0], MT[1, 1], MT[1, 2]
m20, m21, m22 = MT[2, 0], MT[2, 1], MT[2, 2]
if m22 < 0:
if m00 > m11:
t = 1 + m00 - m11 - m22
q = np.array([m12-m21, t, m01+m10, m20+m02])
else:
t = 1 - m00 + m11 - m22
q = np.array([m20-m02, m01+m10, t, m12+m21])
else:
if m00 < -m11:
t = 1 - m00 - m11 + m22
q = np.array([m01-m10, m20+m02, m12+m21, t])
else:
t = 1 + m00 + m11 + m22
q = np.array([ t, m12-m21, m20-m02, m01-m10])
q *= (0.5 / math.sqrt(t))
ret_q = Quaternions(q)
ret_q.normalize()
return ret_q
def __mul__(self, other: Quaternions):
"""
From https://danceswithcode.net/engineeringnotes/quaternions/quaternions.html
"""
s0 = self.qs[..., 0]
s1 = self.qs[..., 1]
s2 = self.qs[..., 2]
s3 = self.qs[..., 3]
r0 = other.qs[..., 0]
r1 = other.qs[..., 1]
r2 = other.qs[..., 2]
r3 = other.qs[..., 3]
t = np.empty(self.qs.shape)
t[..., 0] = r0*s0 - r1*s1 - r2*s2 - r3*s3
t[..., 1] = r0*s1 + r1*s0 - r2*s3 + r3*s2
t[..., 2] = r0*s2 + r1*s3 + r2*s0 - r3*s1
t[..., 3] = r0*s3 - r1*s2 + r2*s1 + r3*s0
return Quaternions(t)
def __neg__(self):
return Quaternions(self.qs * np.array([1, -1, -1, -1]))
def __str__(self):
return f"Quaternions({str(self.qs)})"
def __repr__(self):
return f"Quaternions({str(self.qs)})"
| AnimatedDrawings-main | animated_drawings/model/quaternions.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import ctypes
import heapq
import math
import time
from typing import Dict, List, Tuple, Optional, TypedDict, DefaultDict
from collections import defaultdict
from pathlib import Path
import cv2
import numpy as np
import numpy.typing as npt
from skimage import measure
from shapely import geometry
from OpenGL import GL
from scipy.spatial import Delaunay
from animated_drawings.model.transform import Transform
from animated_drawings.model.time_manager import TimeManager
from animated_drawings.model.retargeter import Retargeter
from animated_drawings.model.arap import ARAP
from animated_drawings.model.joint import Joint
from animated_drawings.model.quaternions import Quaternions
from animated_drawings.model.vectors import Vectors
from animated_drawings.config import CharacterConfig, MotionConfig, RetargetConfig
class AnimatedDrawingMesh(TypedDict):
vertices: npt.NDArray[np.float32]
triangles: List[npt.NDArray[np.int32]]
class AnimatedDrawingsJoint(Joint):
""" Joints within Animated Drawings Rig."""
def __init__(self, name: str, x: float, y: float):
super().__init__(name=name, offset=np.array([x, 1 - y, 0]))
self.starting_theta: float
self.current_theta: float
class AnimatedDrawingRig(Transform):
""" The skeletal rig used to deform the character """
def __init__(self, char_cfg: CharacterConfig):
""" Initializes character rig. """
super().__init__()
# create dictionary populated with joints
joints_d: Dict[str, AnimatedDrawingsJoint]
joints_d = {joint['name']: AnimatedDrawingsJoint(joint['name'], *joint['loc']) for joint in char_cfg.skeleton}
# assign joints within dictionary as childre of their parents
for joint_d in char_cfg.skeleton:
if joint_d['parent'] is None:
continue
joints_d[joint_d['parent']].add_child(joints_d[joint_d['name']])
# updates joint positions to reflect local offsets from their parent joints
def _update_positions(t: Transform):
""" Now that kinematic parent-> child chain is formed, subtract parent world positions to get actual child offsets"""
parent: Optional[Transform] = t.get_parent()
if parent is not None:
offset = np.subtract(t.get_local_position(), parent.get_world_position())
t.set_position(offset)
for c in t.get_children():
_update_positions(c)
_update_positions(joints_d['root'])
# compute the starting rotation (CCW from +Y axis) of each joint
for _, joint in joints_d.items():
parent = joint.get_parent()
if parent is None:
joint.starting_theta = 0
continue
v1_xy = np.array([0.0, 1.0])
v2 = Vectors([np.subtract(joint.get_world_position(), parent.get_world_position())])
v2.norm()
v2_xy: npt.NDArray[np.float32] = v2.vs[0, :2]
theta = np.arctan2(v2_xy[1], v2_xy[0]) - np.arctan2(v1_xy[1], v1_xy[0])
theta = np.degrees(theta)
theta = theta % 360.0
theta = np.where(theta < 0.0, theta + 360, theta)
joint.starting_theta = float(theta)
# attach root joint
self.root_joint = joints_d['root']
self.add_child(self.root_joint)
# cache for later
self.joint_count = joints_d['root'].joint_count()
# set up buffer for visualizing vertices
self.vertices = np.zeros([2 * (self.joint_count - 1), 6], np.float32)
self._is_opengl_initialized: bool = False
self._vertex_buffer_dirty_bit: bool = True
def set_global_orientations(self, bvh_frame_orientations: Dict[str, float]) -> None:
""" Applies orientation from bvh_frame_orientation to the rig. """
self._set_global_orientations(self.root_joint, bvh_frame_orientations)
self._vertex_buffer_dirty_bit = True
def get_joints_2D_positions(self) -> npt.NDArray[np.float32]:
""" Returns array of 2D joints positions for rig. """
return np.array(self.root_joint.get_chain_worldspace_positions()).reshape([-1, 3])[:, :2]
def _compute_buffer_vertices(self, parent: Optional[Transform], pointer: List[int]) -> None:
""" Recomputes values to pass to vertex buffer. Called recursively, pointer is List[int] to emulate pass-by-reference """
if parent is None:
parent = self.root_joint
for c in parent.get_children():
p1 = c.get_world_position()
p2 = parent.get_world_position()
self.vertices[pointer[0], 0:3] = p1
self.vertices[pointer[0] + 1, 0:3] = p2
pointer[0] += 2
self._compute_buffer_vertices(c, pointer)
def _initialize_opengl_resources(self):
self.vao = GL.glGenVertexArrays(1)
self.vbo = GL.glGenBuffers(1)
GL.glBindVertexArray(self.vao)
# buffer vertex data
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.vertices, GL.GL_STATIC_DRAW)
vert_bytes: int = 4 * self.vertices.shape[1] # 4 is byte size of np.float32
# position attributes
pos_offset = 4 * 0
GL.glVertexAttribPointer( 0, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(pos_offset))
GL.glEnableVertexAttribArray(0)
# color attributes
color_offset = 4 * 3
GL.glVertexAttribPointer( 1, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(color_offset))
GL.glEnableVertexAttribArray(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
self._is_opengl_initialized = True
def _compute_and_buffer_vertex_data(self):
self._compute_buffer_vertices(parent=self.root_joint, pointer=[0])
GL.glBindVertexArray(self.vao)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.vertices, GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
self._vertex_buffer_dirty_bit = False
def _set_global_orientations(self, joint: AnimatedDrawingsJoint, bvh_orientations: Dict[str, float]) -> None:
if joint.name in bvh_orientations.keys():
theta: float = bvh_orientations[str(joint.name)] - joint.starting_theta
theta = np.radians(theta)
joint.current_theta = theta
parent = joint.get_parent()
assert isinstance(parent, AnimatedDrawingsJoint)
if hasattr(parent, 'current_theta'):
theta = theta - parent.current_theta
rotation_q = Quaternions.from_angle_axis(np.array([theta]), axes=Vectors([0.0, 0.0, 1.0]))
parent.set_rotation(rotation_q)
parent.update_transforms()
for c in joint.get_children():
if isinstance(c, AnimatedDrawingsJoint):
self._set_global_orientations(c, bvh_orientations)
def _draw(self, **kwargs):
if not kwargs['viewer_cfg'].draw_ad_rig:
return
if not self._is_opengl_initialized:
self._initialize_opengl_resources()
if self._vertex_buffer_dirty_bit:
self._compute_and_buffer_vertex_data()
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glUseProgram(kwargs['shader_ids']['color_shader'])
model_loc = GL.glGetUniformLocation(kwargs['shader_ids']['color_shader'], "model")
GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE, self._world_transform.T)
GL.glBindVertexArray(self.vao)
GL.glDrawArrays(GL.GL_LINES, 0, len(self.vertices))
GL.glEnable(GL.GL_DEPTH_TEST)
class AnimatedDrawing(Transform, TimeManager):
"""
The drawn character to be animated.
An AnimatedDrawings object consists of four main parts:
1. A 2D mesh textured with the original drawing, the 'visual' representation of the character
2. A 2D skeletal rig
3. An ARAP module which uses rig joint positions to deform the mesh
4. A retargeting module which reposes the rig.
After initializing the object, the retarger must be initialized by calling initialize_retarger_bvh().
Afterwars, only the update() method needs to be called.
"""
def __init__(self, char_cfg: CharacterConfig, retarget_cfg: RetargetConfig, motion_cfg: MotionConfig):
super().__init__()
self.char_cfg: CharacterConfig = char_cfg
self.retarget_cfg: RetargetConfig = retarget_cfg
self.img_dim: int = self.char_cfg.img_dim
# load mask and pad to square
self.mask: npt.NDArray[np.uint8] = self._load_mask()
# load texture and pad to square
self.txtr: npt.NDArray[np.uint8] = self._load_txtr()
# generate the mesh
self.mesh: AnimatedDrawingMesh
self._generate_mesh()
self.rig = AnimatedDrawingRig(self.char_cfg)
self.add_child(self.rig)
# perform runtime checks for character pose, modify retarget config accordingly
self._modify_retargeting_cfg_for_character()
self.joint_to_tri_v_idx: Dict[str, npt.NDArray[np.int32]]
self._initialize_joint_to_triangles_dict()
self.indices: npt.NDArray[np.int32] = np.stack(self.mesh['triangles']).flatten() # order in which to render triangles
self.retargeter: Retargeter
self._initialize_retargeter_bvh(motion_cfg, retarget_cfg)
# initialize arap solver with original joint positions
self.arap = ARAP(self.rig.get_joints_2D_positions(), self.mesh['triangles'], self.mesh['vertices'])
self.vertices: npt.NDArray[np.float32]
self._initialize_vertices()
self._is_opengl_initialized: bool = False
self._vertex_buffer_dirty_bit: bool = True
# pose the animated drawing using the first frame of the bvh
self.update()
def _modify_retargeting_cfg_for_character(self):
"""
If the character is drawn in particular poses, the orientation-matching retargeting framework produce poor results.
Therefore, the retargeter config can specify a number of runtime checks and retargeting modifications to make if those checks fail.
"""
for position_test, target_joint_name, joint1_name, joint2_name in self.retarget_cfg.char_runtime_checks:
if position_test == 'above':
""" Checks whether target_joint is 'above' the vector from joint1 to joint2. If it's below, removes it.
This was added to account for head flipping when nose was below shoulders. """
# get joints 1, 2 and target joint
joint1 = self.rig.root_joint.get_transform_by_name(joint1_name)
if joint1 is None:
msg = f'Could not find joint1 in runtime check: {joint1_name}'
logging.critical(msg)
assert False, msg
joint2 = self.rig.root_joint.get_transform_by_name(joint2_name)
if joint2 is None:
msg = f'Could not find joint2 in runtime check: {joint2_name}'
logging.critical(msg)
assert False, msg
target_joint = self.rig.root_joint.get_transform_by_name(target_joint_name)
if target_joint is None:
msg = f'Could not find target_joint in runtime check: {target_joint_name}'
logging.critical(msg)
assert False, msg
# get world positions
joint1_xyz = joint1.get_world_position()
joint2_xyz = joint2.get_world_position()
target_joint_xyz = target_joint.get_world_position()
# rotate target vector by inverse of test_vector angle. If then below x axis discard it.
test_vector = np.subtract(joint2_xyz, joint1_xyz)
target_vector = np.subtract(target_joint_xyz, joint1_xyz)
angle = math.atan2(test_vector[1], test_vector[0])
if (math.sin(-angle) * target_vector[0] + math.cos(-angle) * target_vector[1]) < 0:
logging.info(f'char_runtime_check failed, removing {target_joint_name} from retargeter :{target_joint_name, position_test, joint1_name, joint2_name}')
del self.retarget_cfg.char_joint_bvh_joints_mapping[target_joint_name]
else:
msg = f'Unrecognized char_runtime_checks position_test: {position_test}'
logging.critical(msg)
assert False, msg
def _initialize_retargeter_bvh(self, motion_cfg: MotionConfig, retarget_cfg: RetargetConfig):
""" Initializes the retargeter used to drive the animated character. """
# initialize retargeter
self.retargeter = Retargeter(motion_cfg, retarget_cfg)
# validate the motion and retarget config files, now that we know char/bvh joint names
char_joint_names: List[str] = self.rig.root_joint.get_chain_joint_names()
bvh_joint_names = self.retargeter.bvh_joint_names
motion_cfg.validate_bvh(bvh_joint_names)
retarget_cfg.validate_char_and_bvh_joint_names(char_joint_names, bvh_joint_names)
# a shorter alias
char_bvh_root_offset: RetargetConfig.CharBvhRootOffset = self.retarget_cfg.char_bvh_root_offset
# compute ratio of character's leg length to bvh skel leg length
c_limb_length = 0
c_joint_groups: List[List[str]] = char_bvh_root_offset['char_joints']
for b_joint_group in c_joint_groups:
while len(b_joint_group) >= 2:
c_dist_joint = self.rig.root_joint.get_transform_by_name(b_joint_group[1])
c_prox_joint = self.rig.root_joint.get_transform_by_name(b_joint_group[0])
assert isinstance(c_dist_joint, AnimatedDrawingsJoint)
assert isinstance(c_prox_joint, AnimatedDrawingsJoint)
c_dist_joint_pos = c_dist_joint.get_world_position()
c_prox_joint_pos = c_prox_joint.get_world_position()
c_limb_length += np.linalg.norm(np.subtract(c_dist_joint_pos, c_prox_joint_pos))
b_joint_group.pop(0)
b_limb_length = 0
b_joint_groups: List[List[str]] = char_bvh_root_offset['bvh_joints']
for b_joint_group in b_joint_groups:
while len(b_joint_group) >= 2:
b_dist_joint = self.retargeter.bvh.root_joint.get_transform_by_name(b_joint_group[1])
b_prox_joint = self.retargeter.bvh.root_joint.get_transform_by_name(b_joint_group[0])
assert isinstance(b_dist_joint, Joint)
assert isinstance(b_prox_joint, Joint)
b_dist_joint_pos = b_dist_joint.get_world_position()
b_prox_joint_pos = b_prox_joint.get_world_position()
b_limb_length += np.linalg.norm(np.subtract(b_dist_joint_pos, b_prox_joint_pos))
b_joint_group.pop(0)
# compute character-bvh scale factor and send to retargeter
scale_factor = float(c_limb_length / b_limb_length)
projection_bodypart_group_for_offset = char_bvh_root_offset['bvh_projection_bodypart_group_for_offset']
self.retargeter.scale_root_positions_for_character(scale_factor, projection_bodypart_group_for_offset)
# compute the necessary orienations
for char_joint_name, (bvh_prox_joint_name, bvh_dist_joint_name) in self.retarget_cfg.char_joint_bvh_joints_mapping.items():
self.retargeter.compute_orientations(bvh_prox_joint_name, bvh_dist_joint_name, char_joint_name)
def update(self):
"""
This method receives the delta t, the amount of time to progress the character's internal time keeper.
This method passes its time to the retargeter, which returns bone orientations.
Orientations are passed to rig to calculate new joint positions.
The updated joint positions are passed into the ARAP module, which computes the new vertex locations.
The new vertex locations are stored and the dirty bit is set.
"""
# get retargeted motion data
frame_orientations: Dict[str, float]
joint_depths: Dict[str, float]
root_position: npt.NDArray[np.float32]
frame_orientations, joint_depths, root_position = self.retargeter.get_retargeted_frame_data(self.get_time())
# update the rig's root position and reorient all of its joints
self.rig.root_joint.set_position(root_position)
self.rig.set_global_orientations(frame_orientations)
# using new joint positions, calculate new mesh vertex xy positions
control_points: npt.NDArray[np.float32] = self.rig.get_joints_2D_positions() - root_position[:2]
self.vertices[:, :2] = self.arap.solve(control_points) + root_position[:2]
# use the z position of the rig's root joint for all mesh vertices
self.vertices[:, 2] = self.rig.root_joint.get_world_position()[2]
self._vertex_buffer_dirty_bit = True
# using joint depths, determine the correct order in which to render the character
self._set_draw_indices(joint_depths)
def _set_draw_indices(self, joint_depths: Dict[str, float]):
# sort segmentation groups by decreasing depth_driver's distance to camera
_bodypart_render_order: List[Tuple[int, np.float32]] = []
for idx, bodypart_group_dict in enumerate(self.retarget_cfg.char_bodypart_groups):
bodypart_depth: np.float32 = np.mean([joint_depths[joint_name] for joint_name in bodypart_group_dict['bvh_depth_drivers']])
_bodypart_render_order.append((idx, bodypart_depth))
_bodypart_render_order.sort(key=lambda x: float(x[1]))
# Add vertices belonging to joints in each segment group in the order they will be rendered
indices: List[npt.NDArray[np.int32]] = []
for idx, dist in _bodypart_render_order:
intra_bodypart_render_order = 1 if dist > 0 else -1 # if depth driver is behind plane, render bodyparts in reverse order
for joint_name in self.retarget_cfg.char_bodypart_groups[idx]['char_joints'][::intra_bodypart_render_order]:
indices.append(self.joint_to_tri_v_idx.get(joint_name, np.array([], dtype=np.int32)))
self.indices = np.hstack(indices)
def _initialize_joint_to_triangles_dict(self) -> None: # noqa: C901
"""
Uses BFS to find and return the closest joint bone (line segment between joint and parent) to each triangle centroid.
"""
shortest_distance = np.full(self.mask.shape, 1 << 12, dtype=np.int32) # to nearest joint
closest_joint_idx = np.full(self.mask.shape, -1, dtype=np.int8) # track joint idx nearest each point
# temp dictionary to help with seed generation
joints_d: Dict[str, CharacterConfig.JointDict] = {}
for joint in self.char_cfg.skeleton:
joints_d[joint['name']] = joint
joints_d[joint['name']]['loc'][1] = 1 - joints_d[joint['name']]['loc'][1]
# store joint names and later reference by element location
joint_name_to_idx: List[str] = [joint['name'] for joint in self.char_cfg.skeleton]
# seed generation
heap: List[Tuple[float, Tuple[int, Tuple[int, int]]]] = [] # [(dist, (joint_idx, (x, y))]
for _, joint in joints_d.items():
if joint['parent'] is None: # skip root joint
continue
joint_idx = joint_name_to_idx.index(joint['name'])
dist_joint_xy: List[float] = joint['loc']
prox_joint_xy: List[float] = joints_d[joint['parent']]['loc']
seeds_xy = (self.img_dim * np.linspace(dist_joint_xy, prox_joint_xy, num=20, endpoint=False)).round()
heap.extend([(0, (joint_idx, tuple(seed_xy.astype(np.int32)))) for seed_xy in seeds_xy])
# BFS search
start_time: float = time.time()
logging.info('Starting joint -> mask pixel BFS')
while heap:
distance, (joint_idx, (x, y)) = heapq.heappop(heap)
neighbors = [(x-1, y-1), (x, y-1), (x+1, y-1), (x-1, y), (x+1, y), (x-1, y+1), (x, y+1), (x+1, y+1)]
n_dist = [1.414, 1.0, 1.414, 1.0, 1.0, 1.414, 1.0, 1.414]
for (n_x, n_y), n_dist in zip(neighbors, n_dist):
n_distance = distance + n_dist
if not 0 <= n_x < self.img_dim or not 0 <= n_y < self.img_dim:
continue # neighbor is outside image bounds- ignore
if not self.mask[n_x, n_y]:
continue # outside character mask
if shortest_distance[n_x, n_y] <= n_distance:
continue # a closer joint exists
closest_joint_idx[n_x, n_y] = joint_idx
shortest_distance[n_x, n_y] = n_distance
heapq.heappush(heap, (n_distance, (joint_idx, (n_x, n_y))))
logging.info(f'Finished joint -> mask pixel BFS in {time.time() - start_time} seconds')
# create map between joint name and triangle centroids it is closest to
joint_to_tri_v_idx_and_dist: DefaultDict[str, List[Tuple[npt.NDArray[np.int32], np.int32]]] = defaultdict(list)
for tri_v_idx in self.mesh['triangles']:
tri_verts = np.array([self.mesh['vertices'][v_idx] for v_idx in tri_v_idx])
centroid_x, centroid_y = list((tri_verts.mean(axis=0) * self.img_dim).round().astype(np.int32))
tri_centroid_closest_joint_idx: np.int8 = closest_joint_idx[centroid_x, centroid_y]
dist_from_tri_centroid_to_bone: np.int32 = shortest_distance[centroid_x, centroid_y]
joint_to_tri_v_idx_and_dist[joint_name_to_idx[tri_centroid_closest_joint_idx]].append((tri_v_idx, dist_from_tri_centroid_to_bone))
joint_to_tri_v_idx: Dict[str, npt.NDArray[np.int32]] = {}
for key, val in joint_to_tri_v_idx_and_dist.items():
# sort by distance, descending
val.sort(key=lambda x: float(x[1]), reverse=True)
# retain vertex indices, remove distance info
val = [v[0] for v in val]
# convert to np array and save in dictionary
joint_to_tri_v_idx[key] = np.array(val).flatten() # type: ignore
self.joint_to_tri_v_idx = joint_to_tri_v_idx
def _load_mask(self) -> npt.NDArray[np.uint8]:
""" Load and perform preprocessing upon the mask """
mask_p: Path = self.char_cfg.mask_p
try:
_mask: npt.NDArray[np.uint8] = cv2.imread(str(mask_p), cv2.IMREAD_GRAYSCALE).astype(np.uint8)
if _mask.shape[0] != self.char_cfg.img_height:
raise AssertionError('height in character config and mask height do not match')
if _mask.shape[1] != self.char_cfg.img_width:
raise AssertionError('width in character config and mask height do not match')
except Exception as e:
msg = f'Error loading mask {mask_p}: {str(e)}'
logging.critical(msg)
assert False, msg
_mask = np.rot90(_mask, 3, ) # rotate to upright
# pad to square
mask = np.zeros([self.img_dim, self.img_dim], _mask.dtype)
mask[0:_mask.shape[0], 0:_mask.shape[1]] = _mask
return mask
def _load_txtr(self) -> npt.NDArray[np.uint8]:
""" Load and perform preprocessing upon the drawing image """
txtr_p: Path = self.char_cfg.txtr_p
try:
_txtr: npt.NDArray[np.uint8] = cv2.imread(str(txtr_p), cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_UNCHANGED).astype(np.uint8)
_txtr = cv2.cvtColor(_txtr, cv2.COLOR_BGRA2RGBA).astype(np.uint8)
if _txtr.shape[-1] != 4:
raise AssertionError('texture must be RGBA')
if _txtr.shape[0] != self.char_cfg.img_height:
raise AssertionError('height in character config and txtr height do not match')
if _txtr.shape[1] != self.char_cfg.img_width:
raise AssertionError('width in character config and txtr height do not match')
except Exception as e:
msg = f'Error loading texture {txtr_p}: {str(e)}'
logging.critical(msg)
assert False, msg
_txtr = np.rot90(_txtr, 3, ) # rotate to upright
# pad to square
txtr = np.zeros([self.img_dim, self.img_dim, _txtr.shape[-1]], _txtr.dtype)
txtr[0:_txtr.shape[0], 0:_txtr.shape[1], :] = _txtr
txtr[np.where(self.mask == 0)][:, 3] = 0 # make pixels outside mask transparent
return txtr
def _generate_mesh(self) -> None:
try:
contours: List[npt.NDArray[np.float64]] = measure.find_contours(self.mask, 128)
except Exception as e:
msg = f'Error finding contours for character mesh: {str(e)}'
logging.critical(msg)
assert False, msg
# if multiple distinct polygons are in the mask, use largest and discard the rest
if len(contours) > 1:
msg = f'{len(contours)} separate polygons found in mask. Using largest.'
logging.info(msg)
contours.sort(key=len, reverse=True)
outside_vertices: npt.NDArray[np.float64] = measure.approximate_polygon(contours[0], tolerance=0.25)
character_outline = geometry.Polygon(contours[0])
# add some internal vertices to ensure a good mesh is created
inside_vertices_xy: List[Tuple[np.float32, np.float32]] = []
_x = np.linspace(0, self.img_dim, 40)
_y = np.linspace(0, self.img_dim, 40)
xv, yv = np.meshgrid(_x, _y)
for x, y in zip(xv.flatten(), yv.flatten()):
if character_outline.contains(geometry.Point(x, y)):
inside_vertices_xy.append((x, y))
inside_vertices: npt.NDArray[np.float64] = np.array(inside_vertices_xy)
vertices: npt.NDArray[np.float32] = np.concatenate([outside_vertices, inside_vertices]).astype(np.float32)
"""
Create a convex hull containing the character.
Then remove unnecessary edges by discarding triangles whose centroid
falls outside the character's outline.
"""
convex_hull_triangles = Delaunay(vertices)
triangles: List[npt.NDArray[np.int32]] = []
for _triangle in convex_hull_triangles.simplices:
tri_vertices = np.array(
[vertices[_triangle[0]], vertices[_triangle[1]], vertices[_triangle[2]]])
tri_centroid = geometry.Point(np.mean(tri_vertices, 0))
if character_outline.contains(tri_centroid):
triangles.append(_triangle)
vertices /= self.img_dim # scale vertices so they lie between 0-1
self.mesh = {'vertices': vertices, 'triangles': triangles}
def _initialize_vertices(self) -> None:
"""
Prepare the ndarray that will be sent to rendering pipeline.
Later, x and y vertex positions will change, but z pos, u v texture, and rgb color won't.
"""
self.vertices = np.zeros((self.mesh['vertices'].shape[0], 8), np.float32)
# initialize xy positions of mesh vertices
self.vertices[:, :2] = self.arap.solve(self.rig.get_joints_2D_positions()).reshape([-1, 2])
# initialize texture coordinates
self.vertices[:, 6] = self.mesh['vertices'][:, 1] # u tex
self.vertices[:, 7] = self.mesh['vertices'][:, 0] # v tex
# set per-joint triangle colors
color_set: set[Tuple[np.float32, np.float32, np.float32]] = set()
r = g = b = np.linspace(0, 1, 4, dtype=np.float32)
while len(color_set) < len(self.joint_to_tri_v_idx):
color = (np.random.choice(r), np.random.choice(g), np.random.choice(b))
color_set.add(color)
colors: npt.NDArray[np.float32] = np.array(list(color_set), np.float32)
for c_idx, v_idxs in enumerate(self.joint_to_tri_v_idx.values()):
self.vertices[v_idxs, 3:6] = colors[c_idx] # rgb colors
def _initialize_opengl_resources(self) -> None:
h, w, _ = self.txtr.shape
# # initialize the texture
self.txtr_id = GL.glGenTextures(1)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 4)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.txtr_id)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, w, h,
0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, self.txtr)
self.vao = GL.glGenVertexArrays(1)
self.vbo = GL.glGenBuffers(1)
self.ebo = GL.glGenBuffers(1)
GL.glBindVertexArray(self.vao)
# buffer vertex data
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.vertices, GL.GL_DYNAMIC_DRAW)
# buffer element index data
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER,
self.indices, GL.GL_STATIC_DRAW)
# position attributes
GL.glVertexAttribPointer(
0, 3, GL.GL_FLOAT, False, 4 * self.vertices.shape[1], None)
GL.glEnableVertexAttribArray(0)
# color attributes
GL.glVertexAttribPointer(
1, 3, GL.GL_FLOAT, False, 4 * self.vertices.shape[1], ctypes.c_void_p(4 * 3))
GL.glEnableVertexAttribArray(1)
# texture attributes
GL.glVertexAttribPointer(
2, 2, GL.GL_FLOAT, False, 4 * self.vertices.shape[1], ctypes.c_void_p(4 * 6))
GL.glEnableVertexAttribArray(2)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
self._is_opengl_initialized = True
def _rebuffer_vertex_data(self):
GL.glBindVertexArray(self.vao)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.vertices, GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
# buffer element index data
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER,
self.indices, GL.GL_STATIC_DRAW)
GL.glBindVertexArray(0)
self._vertex_buffer_dirty_bit = False
def _draw(self, **kwargs):
if not self._is_opengl_initialized:
self._initialize_opengl_resources()
if self._vertex_buffer_dirty_bit:
self._rebuffer_vertex_data()
GL.glBindVertexArray(self.vao)
if kwargs['viewer_cfg'].draw_ad_txtr:
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.txtr_id)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glUseProgram(kwargs['shader_ids']['texture_shader'])
model_loc = GL.glGetUniformLocation(kwargs['shader_ids']['texture_shader'], "model")
GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE, self._world_transform.T)
GL.glDrawElements(GL.GL_TRIANGLES, self.indices.shape[0], GL.GL_UNSIGNED_INT, None)
GL.glEnable(GL.GL_DEPTH_TEST)
if kwargs['viewer_cfg'].draw_ad_color:
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glUseProgram(kwargs['shader_ids']['color_shader'])
model_loc = GL.glGetUniformLocation(kwargs['shader_ids']['color_shader'], "model")
GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE, self._world_transform.T)
GL.glDrawElements(GL.GL_TRIANGLES, self.indices.shape[0], GL.GL_UNSIGNED_INT, None)
GL.glEnable(GL.GL_DEPTH_TEST)
if kwargs['viewer_cfg'].draw_ad_mesh_lines:
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glUseProgram(kwargs['shader_ids']['color_shader'])
model_loc = GL.glGetUniformLocation(kwargs['shader_ids']['color_shader'], "model")
GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE, self._world_transform.T)
color_black_loc = GL.glGetUniformLocation(kwargs['shader_ids']['color_shader'], "color_black")
GL.glUniform1i(color_black_loc, 1)
GL.glDrawElements(GL.GL_TRIANGLES, self.indices.shape[0], GL.GL_UNSIGNED_INT, None)
GL.glUniform1i(color_black_loc, 0)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glBindVertexArray(0)
| AnimatedDrawings-main | animated_drawings/model/animated_drawing.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations # so we can refer to class Type inside class
import logging
from pathlib import Path
from typing import List, Tuple, Optional
import numpy as np
import numpy.typing as npt
from animated_drawings.model.transform import Transform
from animated_drawings.model.box import Box
from animated_drawings.model.quaternions import Quaternions
from animated_drawings.model.vectors import Vectors
from animated_drawings.model.joint import Joint
from animated_drawings.model.time_manager import TimeManager
from animated_drawings.utils import resolve_ad_filepath
class BVH_Joint(Joint):
"""
Joint class with channel order attribute and specialized vis widget
"""
def __init__(self, channel_order: List[str] = [], widget: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.channel_order = channel_order
self.widget: Optional[Transform] = None
if widget:
self.widget = Box()
self.add_child(self.widget)
def _draw(self, **kwargs):
if self.widget:
self.widget.draw(**kwargs)
class BVH(Transform, TimeManager):
"""
Class to encapsulate BVH (Biovision Hierarchy) animation data.
Include a single skeletal hierarchy defined in the BVH, frame count and speed,
and skeletal pos/rot data for each frame
"""
def __init__(self,
name: str,
root_joint: BVH_Joint,
frame_max_num: int,
frame_time: float,
pos_data: npt.NDArray[np.float32],
rot_data: npt.NDArray[np.float32]
) -> None:
"""
Don't recommend calling this method directly. Instead, use BVH.from_file().
"""
super().__init__()
self.name: str = name
self.frame_max_num: int = frame_max_num
self.frame_time: float = frame_time
self.pos_data: npt.NDArray[np.float32] = pos_data
self.rot_data: npt.NDArray[np.float32] = rot_data
self.root_joint = root_joint
self.add_child(self.root_joint)
self.joint_num = self.root_joint.joint_count()
self.cur_frame = 0 # initialize skeleton pose to first frame
self.apply_frame(self.cur_frame)
def get_joint_names(self) -> List[str]:
""" Get names of joints in skeleton in the order in which BVH rotation data is stored. """
return self.root_joint.get_chain_joint_names()
def update(self) -> None:
"""Based upon internal time, determine which frame should be displayed and apply it"""
cur_time: float = self.get_time()
cur_frame = round(cur_time / self.frame_time) % self.frame_max_num
self.apply_frame(cur_frame)
def apply_frame(self, frame_num: int) -> None:
""" Apply root position and joint rotation data for specified frame_num """
self.root_joint.set_position(self.pos_data[frame_num])
self._apply_frame_rotations(self.root_joint, frame_num, ptr=np.array(0))
def _apply_frame_rotations(self, joint: BVH_Joint, frame_num: int, ptr: npt.NDArray[np.int32]) -> None:
q = Quaternions(self.rot_data[frame_num, ptr])
joint.set_rotation(q)
ptr += 1
for c in joint.get_children():
if not isinstance(c, BVH_Joint):
continue
self._apply_frame_rotations(c, frame_num, ptr)
def get_skeleton_fwd(self, forward_perp_vector_joint_names: List[Tuple[str, str]], update: bool = True) -> Vectors:
"""
Get current forward vector of skeleton in world coords. If update=True, ensure skeleton transforms are current.
Input forward_perp_vector_joint_names, a list of pairs of joint names (e.g. [[leftshould, rightshoulder], [lefthip, righthip]])
Finds average of vectors between joint pairs, then returns vector perpendicular to their average.
"""
if update:
self.root_joint.update_transforms(update_ancestors=True)
vectors_cw_perpendicular_to_fwd: List[Vectors] = []
for (start_joint_name, end_joint_name) in forward_perp_vector_joint_names:
start_joint = self.root_joint.get_transform_by_name(start_joint_name)
if not start_joint:
msg = f'Could not find BVH joint with name: {start_joint_name}'
logging.critical(msg)
assert False, msg
end_joint = self.root_joint.get_transform_by_name(end_joint_name)
if not end_joint:
msg = f'Could not find BVH joint with name: {end_joint_name}'
logging.critical(msg)
assert False, msg
bone_vector: Vectors = Vectors(end_joint.get_world_position()) - Vectors(start_joint.get_world_position())
bone_vector.norm()
vectors_cw_perpendicular_to_fwd.append(bone_vector)
return Vectors(vectors_cw_perpendicular_to_fwd).average().perpendicular()
@classmethod
def from_file(cls, bvh_fn: str, start_frame_idx: int = 0, end_frame_idx: Optional[int] = None) -> BVH:
""" Given a path to a .bvh, constructs and returns BVH object"""
# search for the BVH file specified
bvh_p: Path = resolve_ad_filepath(bvh_fn, 'bvh file')
logging.info(f'Using BVH file located at {bvh_p.resolve()}')
with open(str(bvh_p), 'r') as f:
lines = f.read().splitlines()
if lines.pop(0) != 'HIERARCHY':
msg = f'Malformed BVH in line preceding {lines}'
logging.critical(msg)
assert False, msg
# Parse the skeleton
root_joint: BVH_Joint = BVH._parse_skeleton(lines)
if lines.pop(0) != 'MOTION':
msg = f'Malformed BVH in line preceding {lines}'
logging.critical(msg)
assert False, msg
# Parse motion metadata
frame_max_num = int(lines.pop(0).split(':')[-1])
frame_time = float(lines.pop(0).split(':')[-1])
# Parse motion data
frames = [list(map(float, line.strip().split(' '))) for line in lines]
if len(frames) != frame_max_num:
msg = f'framenum specified ({frame_max_num}) and found ({len(frames)}) do not match'
logging.critical(msg)
assert False, msg
# Split logically distinct root position data from joint euler angle rotation data
pos_data: npt.NDArray[np.float32]
rot_data: npt.NDArray[np.float32]
pos_data, rot_data = BVH._process_frame_data(root_joint, frames)
# Set end_frame if not passed in
if not end_frame_idx:
end_frame_idx = frame_max_num
# Ensure end_frame_idx <= frame_max_num
if frame_max_num < end_frame_idx:
msg = f'config specified end_frame_idx > bvh frame_max_num ({end_frame_idx} > {frame_max_num}). Replacing with frame_max_num.'
logging.warning(msg)
end_frame_idx = frame_max_num
# slice position and rotation data using start and end frame indices
pos_data = pos_data[start_frame_idx:end_frame_idx, :]
rot_data = rot_data[start_frame_idx:end_frame_idx, :]
# new frame_max_num based is end_frame_idx minus start_frame_idx
frame_max_num = end_frame_idx - start_frame_idx
return BVH(bvh_p.name, root_joint, frame_max_num, frame_time, pos_data, rot_data)
@classmethod
def _parse_skeleton(cls, lines: List[str]) -> BVH_Joint:
"""
Called recursively to parse and construct skeleton from BVH
:param lines: partially-processed contents of BVH file. Is modified in-place.
:return: Joint
"""
# Get the joint name
if lines[0].strip().startswith('ROOT'):
_, joint_name = lines.pop(0).strip().split(' ')
elif lines[0].strip().startswith('JOINT'):
_, joint_name = lines.pop(0).strip().split(' ')
elif lines[0].strip().startswith('End Site'):
joint_name = lines.pop(0).strip()
else:
msg = f'Malformed BVH. Line: {lines[0]}'
logging.critical(msg)
assert False, msg
if lines.pop(0).strip() != '{':
msg = f'Malformed BVH in line preceding {lines}'
logging.critical(msg)
assert False, msg
# Get offset
if not lines[0].strip().startswith('OFFSET'):
msg = f'Malformed BVH in line preceding {lines}'
logging.critical(msg)
assert False, msg
_, *xyz = lines.pop(0).strip().split(' ')
offset = Vectors(list(map(float, xyz)))
# Get channels
if lines[0].strip().startswith('CHANNELS'):
channel_order = lines.pop(0).strip().split(' ')
_, channel_num, *channel_order = channel_order
else:
channel_num, channel_order = 0, []
if int(channel_num) != len(channel_order):
msg = f'Malformed BVH in line preceding {lines}'
logging.critical(msg)
assert False, msg
# Recurse for children
children: List[BVH_Joint] = []
while lines[0].strip() != '}':
children.append(BVH._parse_skeleton(lines))
lines.pop(0) # }
return BVH_Joint(name=joint_name, offset=offset, channel_order=channel_order, children=children)
@classmethod
def _process_frame_data(cls, skeleton: BVH_Joint, frames: List[List[float]]) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]:
""" Given skeleton and frame data, return root position data and joint quaternion data, separately"""
def _get_frame_channel_order(joint: BVH_Joint, channels=[]):
channels.extend(joint.channel_order)
for child in [child for child in joint.get_children() if isinstance(child, BVH_Joint)]:
_get_frame_channel_order(child, channels)
return channels
channels = _get_frame_channel_order(skeleton)
# create a mask so we retain only joint rotations and root position
mask = np.array(list(map(lambda x: True if 'rotation' in x else False, channels)))
mask[:3] = True # hack to make sure we keep root position
frames = np.array(frames, dtype=np.float32)[:, mask]
# split root pose data and joint euler angle data
pos_data, ea_rots = np.split(np.array(frames, dtype=np.float32), [3], axis=1)
# quaternion rot data will go here
rot_data = np.empty([len(frames), skeleton.joint_count(), 4], dtype=np.float32)
BVH._pose_ea_to_q(skeleton, ea_rots, rot_data)
return pos_data, rot_data
@classmethod
def _pose_ea_to_q(cls, joint: BVH_Joint, ea_rots: npt.NDArray[np.float32], q_rots: npt.NDArray[np.float32], p1: int = 0, p2: int = 0) -> Tuple[int, int]:
"""
Given joint and array of euler angle rotation data, converts to quaternions and stores in q_rots.
Only called by _process_frame_data(). Modifies q_rots inplace.
:param p1: pointer to find where in ea_rots to read euler angles from
:param p2: pointer to determine where in q_rots to input quaternion
"""
axis_chars = "".join([c[0].lower() for c in joint.channel_order if c.endswith('rotation')]) # e.g. 'xyz'
q_rots[:, p2] = Quaternions.from_euler_angles(axis_chars, ea_rots[:, p1:p1+len(axis_chars)]).qs
p1 += len(axis_chars)
p2 += 1
for child in joint.get_children():
if isinstance(child, BVH_Joint):
p1, p2 = BVH._pose_ea_to_q(child, ea_rots, q_rots, p1, p2)
return p1, p2
| AnimatedDrawings-main | animated_drawings/model/bvh.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.model.transform import Transform
from animated_drawings.model.time_manager import TimeManager
from animated_drawings.config import SceneConfig
from animated_drawings.model.floor import Floor
from animated_drawings.model.animated_drawing import AnimatedDrawing
class Scene(Transform, TimeManager):
"""
The scene is the singular 'world' object.
It contains all objects that need to be drawn.
It keeps track of global time.
"""
def __init__(self, cfg: SceneConfig) -> None:
""" Takes in the scene dictionary from an mvc config file and prepares the scene. """
super().__init__()
# add floor if required
if cfg.add_floor:
self.add_child(Floor())
# Add the Animated Drawings
for each in cfg.animated_characters:
ad = AnimatedDrawing(*each)
self.add_child(ad)
# add bvh to the scene if we're going to visualize it
if cfg.add_ad_retarget_bvh:
self.add_child(ad.retargeter.bvh)
def progress_time(self, delta_t: float) -> None:
"""
Entry point called to update time in the scene by delta_t seconds.
Because animatable object within the scene may have their own individual timelines,
we recurvisely go through objects in the scene and call tick() on each TimeManager.
"""
self._progress_time(self, delta_t)
def _progress_time(self, t: Transform, delta_t: float) -> None:
""" Recursively calls tick() on all TimeManager objects. """
if isinstance(t, TimeManager):
t.tick(delta_t)
for c in t.get_children():
self._progress_time(c, delta_t)
| AnimatedDrawings-main | animated_drawings/model/scene.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.model.rectangle import Rectangle
from animated_drawings.model.transform import Transform
import numpy as np
class Floor(Transform):
def __init__(self):
super().__init__()
for idx in range(-5, 5):
for jdx in range(-5, 5):
color = 'white' if (idx + jdx) % 2 else 'black'
tile = Rectangle(color=color)
tile.offset(np.array([float(idx), 0, float(jdx)]))
self.add_child(tile)
| AnimatedDrawings-main | animated_drawings/model/floor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.model.transform import Transform
import numpy as np
import numpy.typing as npt
import OpenGL.GL as GL
import ctypes
class TransformWidget(Transform):
def __init__(self, shader_name: str = 'color_shader'):
super().__init__()
self.points: npt.NDArray[np.float32] = np.array([
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
], np.float32)
self.shader_name: str = shader_name
self._is_opengl_initialized: bool = False
def _initialize_opengl_resources(self):
self.vao = GL.glGenVertexArrays(1)
self.vbo = GL.glGenBuffers(1)
GL.glBindVertexArray(self.vao)
# buffer vertex data
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.points, GL.GL_STATIC_DRAW)
vert_bytes = 4 * self.points.shape[1] # 4 is byte size of np.float32
pos_offset = 4 * 0
color_offset = 4 * 3
# position attributes
GL.glVertexAttribPointer(
0, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(pos_offset))
GL.glEnableVertexAttribArray(0)
# color attributes
GL.glVertexAttribPointer(
1, 3, GL.GL_FLOAT, False, vert_bytes, ctypes.c_void_p(color_offset))
GL.glEnableVertexAttribArray(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
self._is_opengl_initialized = True
def _draw(self, **kwargs):
if not self._is_opengl_initialized:
self._initialize_opengl_resources()
GL.glUseProgram(kwargs['shader_ids'][self.shader_name])
model_loc = GL.glGetUniformLocation(
kwargs['shader_ids'][self.shader_name], "model")
GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE,
self._world_transform.T)
GL.glBindVertexArray(self.vao)
GL.glDrawArrays(GL.GL_LINES, 0, len(self.points))
GL.glBindVertexArray(0)
| AnimatedDrawings-main | animated_drawings/model/transform_widget.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import numpy.typing as npt
from collections import defaultdict
import logging
from typing import List, Dict, Set, Tuple
import scipy.sparse.linalg as spla
import scipy.sparse as sp
csr_matrix = sp._csr.csr_matrix # for typing # pyright: ignore[reportPrivateUsage]
class ARAP():
"""
Implementation of:
Takeo Igarashi and Yuki Igarashi.
"Implementing As-Rigid-As-Possible Shape Manipulation and Surface Flattening."
Journal of Graphics, GPU, and Game Tools, A.K.Peters, Volume 14, Number 1, pp.17-30, ISSN:2151-237X, June, 2009.
https://www-ui.is.s.u-tokyo.ac.jp/~takeo/papers/takeo_jgt09_arapFlattening.pdf
General idea is this:
Start with an an input mesh, comprised of vertices (v in V) and edges (e in E),
and an initial set of pins (or control handle) locations.
Then, given new positions for the pins, find new vertex locations (v' in V')
such that the edges (e' in E') are as similar as possible, in a least squares sense, to the original edges (e in E).
Translation and rotation aren't penalized, but edge scaling is.
Not allowing rotation makes this tricky, as edges are directed vectors.
Solution involves finding vertex locations twice. First, you do so while allowing both rotation and scaling to be free.
Then you collect the per-edge rotation transforms found by this solution.
During the second solve, you rotate the original edges (e in E) by the rotation matrix prior to computing the difference
between (e' in E') and (e in E). This way, rotation is essentially free, while scaling is not.
"""
def __init__(self, pins_xy: npt.NDArray[np.float32], triangles: List[npt.NDArray[np.int32]], vertices: npt.NDArray[np.float32], w: int = 1000): # noqa: C901
"""
Sets up the matrices needed for later solves.
pins_xy: ndarray [N, 2] specifying initial xy positions of N control points
vertices: ndarray [N, 2] containing xy positions of N vertices. A vertex's order within array is it's vertex ID
triangles: ndarray [N, 3] triplets of vertex IDs that make up triangles comprising the mesh
w: int the weights to use for control points in solve. Default value should work.
"""
self.w = w
self.vertices = np.copy(vertices)
# build a deduplicated list of edge->vertex IDS...
self.e_v_idxs: List[Tuple[np.int32, np.int32]] = []
for v0, v1, v2 in triangles:
self.e_v_idxs.append(tuple(sorted((v0, v1))))
self.e_v_idxs.append(tuple(sorted((v1, v2))))
self.e_v_idxs.append(tuple(sorted((v2, v0))))
self.e_v_idxs = list(set(self.e_v_idxs)) # ...and deduplicate it
# build list of edge vectors
_edge_vectors: List[npt.NDArray[np.float32]] = []
for vi_idx, vj_idx in self.e_v_idxs:
vi = self.vertices[vi_idx]
vj = self.vertices[vj_idx]
_edge_vectors.append(vj - vi)
self.edge_vectors: npt.NDArray[np.float32] = np.array(_edge_vectors)
# get barycentric coordinates of pins, and mask denoting which pins were initially outside the mesh
pins_bc: List[Tuple[Tuple[np.int32, np.float32], Tuple[np.int32, np.float32], Tuple[np.int32, np.float32]]]
self.pin_mask = npt.NDArray[np.bool8]
pins_bc, self.pin_mask = self._xy_to_barycentric_coords(pins_xy, vertices, triangles)
v_vnbr_idxs: Dict[np.int32, Set[np.int32]] = defaultdict(set) # build a dict mapping vertex ID -> neighbor vertex IDs
for v0, v1, v2 in triangles:
v_vnbr_idxs[v0] |= {v1, v2}
v_vnbr_idxs[v1] |= {v2, v0}
v_vnbr_idxs[v2] |= {v0, v1}
self.edge_num = len(self.e_v_idxs)
self.vert_num = len(self.vertices)
self.pin_num = len(pins_xy[self.pin_mask])
self.A1: npt.NDArray[np.float32] = np.zeros([2 * (self.edge_num + self.pin_num), 2 * self.vert_num], dtype=np.float32)
G: npt.NDArray[np.float32] = np.zeros([2 * self.edge_num, 2 * self.vert_num], dtype=np.float32) # holds edge rotation calculations
# populate top half of A1, one row per edge
for k, (vi_idx, vj_idx) in enumerate(self.e_v_idxs):
# initialize self.A1 with 1, -1 denoting beginning and end of x and y dims of vector
self.A1[2*k:2*(k+1), 2*vi_idx:2*(vi_idx+1)] = -np.identity(2)
self.A1[2*k:2*(k+1), 2*vj_idx:2*(vj_idx+1)] = np.identity(2)
# Find the 'neighbor' vertices for this edge: {v_i, v_j,v_r, v_l}
vi_vnbr_idxs: Set[np.int32] = v_vnbr_idxs[vi_idx]
vj_vnbr_idxs: Set[np.int32] = v_vnbr_idxs[vj_idx]
e_vnbr_idxs: List[np.int32] = list(vi_vnbr_idxs.intersection(vj_vnbr_idxs))
e_vnbr_idxs.insert(0, vi_idx)
e_vnbr_idxs.insert(1, vj_idx)
e_vnbr_xys: Tuple[np.float32, np.float32] = tuple([self.vertices[v_idx] for v_idx in e_vnbr_idxs])
_: List[Tuple[float, float]] = []
for v in e_vnbr_xys[1:]:
vx: float = v[0] - e_vnbr_xys[0][0]
vy: float = v[1] - e_vnbr_xys[0][1]
_.extend(((vx, vy), (vy, -vx)))
G_k: npt.NDArray[np.float32] = np.array(_)
G_k_star: npt.NDArray[np.float32] = np.linalg.inv(G_k.T @ G_k) @ G_k.T
e_kx, e_ky = self.edge_vectors[k]
e = np.array([
[e_kx, e_ky],
[e_ky, -e_kx]
], np.float32)
edge_matrix = np.hstack([np.tile(-np.identity(2), (len(e_vnbr_idxs)-1, 1)), np.identity(2*(len(e_vnbr_idxs)-1))])
g = np.dot(G_k_star, edge_matrix)
h = np.dot(e, g)
for h_offset, v_idx in enumerate(e_vnbr_idxs):
self.A1[2*k:2*(k+1), 2*v_idx:2*(v_idx+1)] -= h[:, 2*h_offset:2*(h_offset+1)]
G[2*k:2*(k+1), 2*v_idx:2*(v_idx+1)] = g[:, 2*h_offset:2*(h_offset+1)]
# populate bottom row of A1, one row per constraint-dimension
for pin_idx, pin_bc in enumerate(pins_bc):
for v_idx, v_w in pin_bc:
self.A1[2*self.edge_num + 2*pin_idx , 2*v_idx] = self.w * v_w # x component
self.A1[2*self.edge_num + 2*pin_idx+1, 2*v_idx + 1] = self.w * v_w # y component
A2_top: npt.NDArray[np.float32] = np.zeros([self.edge_num, self.vert_num], dtype=np.float32)
for k, (vi_idx, vj_idx) in enumerate(self.e_v_idxs):
A2_top[k, vi_idx] = -1
A2_top[k, vj_idx] = 1
A2_bot: npt.NDArray[np.float32] = np.zeros([self.pin_num, self.vert_num], dtype=np.float32)
for pin_idx, pin_bc in enumerate(pins_bc):
for v_idx, v_w in pin_bc:
A2_bot[pin_idx, v_idx] = self.w * v_w
self.A2: npt.NDArray[np.float32] = np.vstack([A2_top, A2_bot])
# for speed, convert to sparse matrices and cache for later
self.tA1: csr_matrix = sp.csr_matrix(self.A1.transpose())
self.tA2: csr_matrix = sp.csr_matrix(self.A2.transpose())
self.G: csr_matrix = sp.csr_matrix(G)
# perturbing singular matrix and calling det can trigger overflow warning- ignore it
old_settings = np.seterr(over='ignore')
# ensure tA1xA1 matrix isn't singular and cache sparse repsentation
tA1xA1_dense: npt.NDArray[np.float32] = self.tA1 @ self.A1
while np.linalg.det(tA1xA1_dense) == 0.0:
logging.info('tA1xA1 is singular. perturbing...')
tA1xA1_dense += 0.00000001 * np.identity(tA1xA1_dense.shape[0])
self.tA1xA1: csr_matrix = sp.csr_matrix(tA1xA1_dense)
# ensure tA2xA2 matrix isn't singular and cache sparse repsentation
tA2xA2_dense: npt.NDArray[np.float32] = self.tA2 @ self.A2
while np.linalg.det(tA2xA2_dense) == 0.0:
logging.info('tA2xA2 is singular. perturbing...')
tA2xA2_dense += 0.00000001 * np.identity(tA2xA2_dense.shape[0])
self.tA2xA2: csr_matrix = sp.csr_matrix(tA2xA2_dense)
# revert np overflow warnings behavior
np.seterr(**old_settings)
def solve(self, pins_xy_: npt.NDArray[np.float32]) -> npt.NDArray[np.float64]:
"""
After ARAP has been initialized, pass in new pin xy positions and receive back the new mesh vertex positions
pins *must* be in the same order they were passed in during initialization
pins_xy: ndarray [N, 2] with new pin xy positions
return: ndarray [N, 2], the updated xy locations of each vertex in the mesh
"""
# remove any pins that were orgininally outside the mesh
pins_xy: npt.NDArray[np.float32] = pins_xy_[self.pin_mask] # pyright: ignore[reportGeneralTypeIssues]
assert len(pins_xy) == self.pin_num
self.b1: npt.NDArray[np.float64] = np.hstack([np.zeros([2 * self.edge_num], dtype=np.float64), self.w * pins_xy.reshape([-1, ])])
v1: npt.NDArray[np.float64] = spla.spsolve(self.tA1xA1, self.tA1 @ self.b1.T)
T1: npt.NDArray[np.float64] = self.G @ v1
b2_top = np.empty([self.edge_num, 2], dtype=np.float64)
for idx, e0 in enumerate(self.edge_vectors):
c: np.float64 = T1[2*idx]
s: np.float64 = T1[2*idx + 1]
scale = 1.0 / np.sqrt(c * c + s * s)
c *= scale
s *= scale
T2 = np.asarray(((c, s), (-s, c))) # create rotation matrix
e1 = np.dot(T2, e0) # and rotate old vector to get new
b2_top[idx] = e1
b2 = np.vstack([b2_top, self.w * pins_xy])
b2x = b2[:, 0]
b2y = b2[:, 1]
v2x: npt.NDArray[np.float64] = spla.spsolve(self.tA2xA2, self.tA2 @ b2x)
v2y: npt.NDArray[np.float64] = spla.spsolve(self.tA2xA2, self.tA2 @ b2y)
return np.vstack((v2x, v2y)).T
def _xy_to_barycentric_coords(self,
points: npt.NDArray[np.float32],
vertices: npt.NDArray[np.float32],
triangles: List[npt.NDArray[np.int32]]
) -> Tuple[List[Tuple[Tuple[np.int32, np.float32], Tuple[np.int32, np.float32], Tuple[np.int32, np.float32]]],
npt.NDArray[np.bool8]]:
"""
Given and array containing xy locations and the vertices & triangles making up a mesh,
find the triangle that each points in within and return it's representation using barycentric coordinates.
points: ndarray [N,2] of point xy coords
vertices: ndarray of vertex locations, row position is index id
triangles: ndarraywith ordered vertex ids of vertices that make up each mesh triangle
Is point inside triangle? : https://mathworld.wolfram.com/TriangleInterior.html
Returns a list of barycentric coords for points inside the mesh,
and a list of True/False values indicating whether a given pin was inside the mesh or not.
Needed for removing pins during subsequent solve steps.
"""
def det(u: npt.NDArray[np.float32], v: npt.NDArray[np.float32]) -> npt.NDArray[np.float32]:
""" helper function returns determinents of two [N,2] arrays"""
ux, uy = u[:, 0], u[:, 1]
vx, vy = v[:, 0], v[:, 1]
return ux*vy - uy*vx
tv_locs: npt.NDArray[np.float32] = np.asarray([vertices[t].flatten() for t in triangles]) # triangle->vertex locations, [T x 6] array
v0 = tv_locs[:, :2]
v1 = np.subtract(tv_locs[:, 2:4], v0)
v2 = np.subtract(tv_locs[:, 4: ], v0)
b_coords: List[Tuple[Tuple[np.int32, np.float32], Tuple[np.int32, np.float32], Tuple[np.int32, np.float32]]] = []
pin_mask: List[bool] = []
for p_xy in points:
p_xy = np.expand_dims(p_xy, axis=0)
a = (det(p_xy, v2) - det(v0, v2)) / det(v1, v2)
b = -(det(p_xy, v1) - det(v0, v1)) / det(v1, v2)
# find the indices of triangle containing
in_triangle = np.bitwise_and(np.bitwise_and(a > 0, b > 0), a + b < 1)
containing_t_idxs = np.argwhere(in_triangle)
# if length is zero, check if on triangle(s) perimeters
if not len(containing_t_idxs):
on_triangle_perimeter = np.bitwise_and(np.bitwise_and(a >= 0, b >= 0), a + b <= 1)
containing_t_idxs = np.argwhere(on_triangle_perimeter)
# point is outside mesh. Log a warning and continue
if not len(containing_t_idxs):
msg = f'point {p_xy} not inside or on edge of any triangle in mesh. Skipping it'
print(msg)
logging.warning(msg)
pin_mask.append(False)
continue
# grab the id of first triangle the point is in or on
t_idx = int(containing_t_idxs[0])
vertex_ids = triangles[t_idx] # get ids of verts in triangle
a_xy, b_xy, c_xy = vertices[vertex_ids] # get xy coords of verts
uvw = self._get_barycentric_coords(p_xy, a_xy, b_xy, c_xy) # get barycentric coords
b_coords.append(list(zip(vertex_ids, uvw))) # append to our list # pyright: ignore[reportGeneralTypeIssues]
pin_mask.append(True)
return (b_coords, np.array(pin_mask, dtype=np.bool8))
def _get_barycentric_coords(self,
p: npt.NDArray[np.float32],
a: npt.NDArray[np.float32],
b: npt.NDArray[np.float32],
c: npt.NDArray[np.float32]
) -> npt.NDArray[np.float32]:
"""
As described in Christer Ericson's Real-Time Collision Detection.
p: the input point
a, b, c: the vertices of the triangle
Returns ndarray [u, v, w], the barycentric coordinates of p wrt vertices a, b, c
"""
v0: npt.NDArray[np.float32] = np.subtract(b, a)
v1: npt.NDArray[np.float32] = np.subtract(c, a)
v2: npt.NDArray[np.float32] = np.subtract(p, a)
d00: np.float32 = np.dot(v0, v0)
d01: np.float32 = np.dot(v0, v1)
d11: np.float32 = np.dot(v1, v1)
d20: np.float32 = np.dot(v2, v0)
d21: np.float32 = np.dot(v2, v1)
denom = d00 * d11 - d01 * d01
v: npt.NDArray[np.float32] = (d11 * d20 - d01 * d21) / denom # pyright: ignore[reportGeneralTypeIssues]
w: npt.NDArray[np.float32] = (d00 * d21 - d01 * d20) / denom # pyright: ignore[reportGeneralTypeIssues]
u: npt.NDArray[np.float32] = 1.0 - v - w
return np.array([u, v, w]).squeeze()
def plot_mesh(vertices, triangles, pins_xy):
""" Helper function to visualize mesh deformation outputs """
import matplotlib.pyplot as plt
for tri in triangles:
x_points = []
y_points = []
v0, v1, v2 = tri.tolist()
x_points.append(vertices[v0][0])
y_points.append(vertices[v0][1])
x_points.append(vertices[v1][0])
y_points.append(vertices[v1][1])
x_points.append(vertices[v2][0])
y_points.append(vertices[v2][1])
x_points.append(vertices[v0][0])
y_points.append(vertices[v0][1])
plt.plot(x_points, y_points)
plt.ylim((-15, 15))
plt.xlim((-15, 15))
for pin in pins_xy:
plt.plot(pin[0], pin[1], color='red', marker='o')
plt.show()
| AnimatedDrawings-main | animated_drawings/model/arap.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from abc import abstractmethod
class TimeManager():
""" Mixin class designed to be used by objects that must keep track of their own time (e.g. time-varying animations) """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._time: float = 0.0 # object's internal time, in seconds
self._is_paused: bool = False
def tick(self, delta_t: float) -> None:
""" Progress objects interval time by delta_t seconds if not paused """
if not self._is_paused:
self._time += delta_t
self.update()
@abstractmethod
def update(self):
""" Contains logic needed to update subclass after tick() """
pass
def set_pause(self, pause: Optional[bool]) -> None:
if pause is None:
self._is_paused = not self._is_paused
else:
self._is_paused = pause
def set_time(self, time: float) -> None:
self._time = time
def get_time(self) -> float:
return self._time
| AnimatedDrawings-main | animated_drawings/model/time_manager.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.model.transform import Transform
from animated_drawings.model.vectors import Vectors
from typing import Union, List
class Camera(Transform):
def __init__(
self,
pos: Union[Vectors, List[Union[float, int]]] = Vectors([0.0, 0.0, 0.0]),
fwd: Union[Vectors, List[Union[float, int]]] = Vectors([0.0, 0.0, 1.0])
):
super().__init__()
if not isinstance(pos, Vectors):
pos = Vectors(pos)
self.set_position(pos)
if not isinstance(fwd, Vectors):
fwd = Vectors(fwd)
self.look_at(fwd)
| AnimatedDrawings-main | animated_drawings/model/camera.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations # so we can refer to class Type inside class
import numpy as np
import numpy.typing as npt
from animated_drawings.model.vectors import Vectors
from animated_drawings.model.quaternions import Quaternions
import logging
from typing import Union, Optional, List, Tuple
class Transform():
"""Base class from which all other scene objects descend"""
def __init__(self,
parent: Optional[Transform] = None,
name: Optional[str] = None,
children: List[Transform] = [],
offset: Union[npt.NDArray[np.float32], Vectors, None] = None,
**kwargs
) -> None:
super().__init__(**kwargs)
self._parent: Optional[Transform] = parent
self._children: List[Transform] = []
for child in children:
self.add_child(child)
self.name: Optional[str] = name
self._translate_m: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
self._rotate_m: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
self._scale_m: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
if offset is not None:
self.offset(offset)
self._local_transform: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
self._world_transform: npt.NDArray[np.float32] = np.identity(4, dtype=np.float32)
self.dirty_bit: bool = True # are world/local transforms stale?
def update_transforms(self, parent_dirty_bit: bool = False, recurse_on_children: bool = True, update_ancestors: bool = False) -> None:
"""
Updates transforms if stale.
If own dirty bit is set, recompute local matrix
If own or parent's dirty bit is set, recompute world matrix
If own or parent's dirty bit is set, recurses on children, unless param recurse_on_children is false.
If update_ancestors is true, first find first ancestor, then call update_transforms upon it.
Set dirty bit back to false.
"""
if update_ancestors:
ancestor, ancestor_parent = self, self.get_parent()
while ancestor_parent is not None:
ancestor, ancestor_parent = ancestor_parent, ancestor_parent.get_parent()
ancestor.update_transforms()
if self.dirty_bit:
self.compute_local_transform()
if self.dirty_bit | parent_dirty_bit:
self.compute_world_transform()
if recurse_on_children:
for c in self.get_children():
c.update_transforms(self.dirty_bit | parent_dirty_bit)
self.dirty_bit = False
def compute_local_transform(self) -> None:
self._local_transform = self._translate_m @ self._rotate_m @ self._scale_m
def compute_world_transform(self) -> None:
self._world_transform = self._local_transform
if self._parent:
self._world_transform = self._parent._world_transform @ self._world_transform
def get_world_transform(self, update_ancestors: bool = True) -> npt.NDArray[np.float32]:
"""
Get the transform's world matrix.
If update is true, check to ensure the world_transform is current
"""
if update_ancestors:
self.update_transforms(update_ancestors=True)
return np.copy(self._world_transform)
def set_scale(self, scale: float) -> None:
self._scale_m[:-1, :-1] = scale * np.identity(3, dtype=np.float32)
self.dirty_bit = True
def set_position(self, pos: Union[npt.NDArray[np.float32], Vectors]) -> None:
""" Set the absolute values of the translational elements of transform """
if isinstance(pos, Vectors):
pos = pos.vs
if pos.shape == (1, 3):
pos = np.squeeze(pos)
elif pos.shape == (3,):
pass
else:
msg = f'bad vector dim passed to set_position. Found: {pos.shape}'
logging.critical(msg)
assert False, msg
self._translate_m[:-1, -1] = pos
self.dirty_bit = True
def get_local_position(self) -> npt.NDArray[np.float32]:
""" Ensure local transform is up-to-date and return local xyz coordinates """
if self.dirty_bit:
self.compute_local_transform()
return np.copy(self._local_transform[:-1, -1])
def get_world_position(self, update_ancestors: bool = True) -> npt.NDArray[np.float32]:
"""
Ensure all parent transforms are update and return world xyz coordinates
If update_ancestor_transforms is true, update ancestor transforms to ensure
up-to-date world_transform before returning
"""
if update_ancestors:
self.update_transforms(update_ancestors=True)
return np.copy(self._world_transform[:-1, -1])
def offset(self, pos: Union[npt.NDArray[np.float32], Vectors]) -> None:
""" Translational offset by the specified amount """
if isinstance(pos, Vectors):
pos = pos.vs[0]
assert isinstance(pos, np.ndarray)
self.set_position(self._translate_m[:-1, -1] + pos)
def look_at(self, fwd_: Union[npt.NDArray[np.float32], Vectors, None]) -> None:
"""Given a forward vector, rotate the transform to face that position"""
if fwd_ is None:
fwd_ = Vectors(self.get_world_position())
elif isinstance(fwd_, np.ndarray):
fwd_ = Vectors(fwd_)
fwd: Vectors = fwd_.copy() # norming will change the vector
if fwd.vs.shape != (1, 3):
msg = f'look_at fwd_ vector must have shape [1,3]. Found: {fwd.vs.shape}'
logging.critical(msg)
assert False, msg
tmp: Vectors = Vectors([0.0, 1.0, 0.0])
# if fwd and tmp are same vector, modify tmp to avoid collapse
if np.isclose(fwd.vs, tmp.vs).all() or np.isclose(fwd.vs, -tmp.vs).all():
tmp.vs[0] += 0.001
right: Vectors = tmp.cross(fwd)
up: Vectors = fwd.cross(right)
fwd.norm()
right.norm()
up.norm()
rotate_m = np.identity(4, dtype=np.float32)
rotate_m[:-1, 0] = np.squeeze(right.vs)
rotate_m[:-1, 1] = np.squeeze(up.vs)
rotate_m[:-1, 2] = np.squeeze(fwd.vs)
self._rotate_m = rotate_m
self.dirty_bit = True
def get_right_up_fwd_vectors(self) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32], npt.NDArray[np.float32]]:
inverted: npt.NDArray[np.float32] = np.linalg.inv(self.get_world_transform())
right: npt.NDArray[np.float32] = inverted[:-1, 0]
up: npt.NDArray[np.float32] = inverted[:-1, 1]
fwd: npt.NDArray[np.float32] = inverted[:-1, 2]
return right, up, fwd
def set_rotation(self, q: Quaternions) -> None:
if q.qs.shape != (1, 4):
msg = f'set_rotate q must have dimension (1, 4). Found: {q.qs.shape}'
logging.critical(msg)
assert False, msg
self._rotate_m = q.to_rotation_matrix()
self.dirty_bit = True
def rotation_offset(self, q: Quaternions) -> None:
if q.qs.shape != (1, 4):
msg = f'set_rotate q must have dimension (1, 4). Found: {q.qs.shape}'
logging.critical(msg)
assert False, msg
self._rotate_m = (q * Quaternions.from_rotation_matrix(self._rotate_m)).to_rotation_matrix()
self.dirty_bit = True
def add_child(self, child: Transform) -> None:
self._children.append(child)
child.set_parent(self)
def get_children(self) -> List[Transform]:
return self._children
def set_parent(self, parent: Transform) -> None:
self._parent = parent
self.dirty_bit = True
def get_parent(self) -> Optional[Transform]:
return self._parent
def get_transform_by_name(self, name: str) -> Optional[Transform]:
""" Search self and children for transform with matching name. Return it if found, None otherwise. """
# are we match?
if self.name == name:
return self
# recurse to check if a child is match
for c in self.get_children():
transform_or_none = c.get_transform_by_name(name)
if transform_or_none: # if we found it
return transform_or_none
# no match
return None
def draw(self, recurse: bool = True, **kwargs) -> None:
""" Draw this transform and recurse on children """
self._draw(**kwargs)
if recurse:
for child in self.get_children():
child.draw(**kwargs)
def _draw(self, **kwargs) -> None:
"""Transforms default to not being drawn. Subclasses must implement how they appear"""
| AnimatedDrawings-main | animated_drawings/model/transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import OpenGL.GL as GL
from animated_drawings.model.transform import Transform
import ctypes
class Rectangle(Transform):
def __init__(self, color: str = 'white') -> None:
super().__init__()
if color == 'white':
c = np.array([1.0, 1.0, 1.0], np.float32)
elif color == 'black':
c = np.array([0.3, 0.3, 0.3], np.float32)
elif color == 'blue':
c = np.array([0.00, 0.0, 1.0], np.float32)
else:
assert len(color) == 3
c = np.array([*color], np.float32)
self.points = np.array([
[0.5, 0.0, 0.5, *c], # top right
[-0.5, 0.0, -0.5, *c], # bottom left
[-0.5, 0.0, 0.5, *c], # top left
[0.5, 0.0, -0.5, *c], # bottom right
[-0.5, 0.0, -0.5, *c], # bottom left
[0.5, 0.0, 0.5, *c], # top right
], np.float32)
self.vao = GL.glGenVertexArrays(1)
self.vbo = GL.glGenBuffers(1)
GL.glBindVertexArray(self.vao)
# buffer vertex data
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.points, GL.GL_STATIC_DRAW)
# position attributes
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 4 * self.points.shape[1], None)
GL.glEnableVertexAttribArray(0)
# color attributes
GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, False, 4 * self.points.shape[1], ctypes.c_void_p(4 * 3))
GL.glEnableVertexAttribArray(1)
# texture attributes
GL.glVertexAttribPointer(2, 2, GL.GL_FLOAT, False, 4 * self.points.shape[1], ctypes.c_void_p(4 * 6))
GL.glEnableVertexAttribArray(2)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
def _draw(self, **kwargs) -> None:
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glUseProgram(kwargs['shader_ids']['color_shader'])
model_loc = GL.glGetUniformLocation(kwargs['shader_ids']['color_shader'], "model")
GL.glUniformMatrix4fv(model_loc, 1, GL.GL_FALSE, self._world_transform.T)
GL.glBindVertexArray(self.vao)
GL.glDrawArrays(GL.GL_TRIANGLES, 0, 6)
| AnimatedDrawings-main | animated_drawings/model/rectangle.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from animated_drawings.model.transform import Transform
from typing import List
class Joint(Transform):
"""
Skeletal joint used representing character poses.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def joint_count(self) -> int:
""" Returns 1 + the number of Joint children in this joint's kinematic chain (recursive) """
count: int = 1
for c in self.get_children():
if isinstance(c, Joint):
count += c.joint_count()
return count
def get_chain_worldspace_positions(self) -> List[float]:
""" Get xzy worldspace coordinates of all joints within the chain. """
self.update_transforms(update_ancestors=True)
return self._get_chain_worldspace_positions(self, [])
def _get_chain_worldspace_positions(self, joint: Joint, position_list: List[float]) -> List[float]:
position_list.extend(joint.get_world_position(update_ancestors=False))
for c in joint.get_children():
if not isinstance(c, Joint):
continue
self._get_chain_worldspace_positions(c, position_list)
return position_list
def get_chain_joint_names(self):
""" Traverse through joint in depth-first order and return names of joints in the order they are visited. """
joint_names: List[str] = []
return self._get_chain_joint_names(self, joint_names)
def _get_chain_joint_names(self, joint: Joint, joint_name_list: List[str]) -> List[str]:
joint_name_list.append(str(joint.name))
for c in joint.get_children():
if not isinstance(c, Joint):
continue
self._get_chain_joint_names(c, joint_name_list)
return joint_name_list
| AnimatedDrawings-main | animated_drawings/model/joint.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from animated_drawings.model.bvh import BVH
import numpy as np
import numpy.typing as npt
import math
from animated_drawings.model.joint import Joint
from sklearn.decomposition import PCA
from typing import Tuple, List, Dict
from animated_drawings.model.vectors import Vectors
from animated_drawings.model.quaternions import Quaternions
from animated_drawings.config import MotionConfig, RetargetConfig
x_axis = np.array([1.0, 0.0, 0.0], dtype=np.float32)
z_axis = np.array([0.0, 0.0, 1.0], dtype=np.float32)
class Retargeter():
"""
Retargeter class takes in a motion_cfg file and retarget_cfg file.
Using the specifications listed within retarget_cfg, it converts the motion
specified in motion_cfg into a formal that can be applied to an animated drawing.
It is responsible for project 3D joint locations onto 2D planes, determining resulting
bone orientations, joint 'depths', and root offsets for each frame.
"""
def __init__(self, motion_cfg: MotionConfig, retarget_cfg: RetargetConfig) -> None:
# instantiate the bvh
try:
self.bvh = BVH.from_file(str(motion_cfg.bvh_p), motion_cfg.start_frame_idx, motion_cfg.end_frame_idx)
except Exception as e:
msg = f'Error loading BVH: {e}'
logging.critical(msg)
assert False, msg
# get and cache bvh joint names for later
self.bvh_joint_names = self.bvh.get_joint_names()
# bvh joints defining a set of vectors that skeleton's fwd is perpendicular to
self.forward_perp_vector_joint_names: List[Tuple[str, str]] = motion_cfg.forward_perp_joint_vectors
# override the frame_time, if one was specified within motion_cfg
if motion_cfg.frame_time:
self.bvh.frame_time = motion_cfg.frame_time
# rotate BVH skeleton so up is +Y
if motion_cfg.up == '+y':
pass # no rotation needed
elif motion_cfg.up == '+z':
self.bvh.set_rotation(Quaternions.from_euler_angles('yx', np.array([-90.0, -90.0])))
else:
msg = f'up value not implemented: {motion_cfg.up}'
logging.critical(msg)
assert False, msg
# rotate BVH skeleton so forward is +X
skeleton_fwd: Vectors = self.bvh.get_skeleton_fwd(self.forward_perp_vector_joint_names)
q: Quaternions = Quaternions.rotate_between_vectors(skeleton_fwd, Vectors([1.0, 0.0, 0.0]))
self.bvh.rotation_offset(q)
# scale BVH
self.bvh.set_scale(motion_cfg.scale)
# position above origin
self.bvh.offset(-self.bvh.root_joint.get_world_position())
# adjust bvh skeleton y pos by getting groundplane joint...
try:
groundplane_joint = self.bvh.root_joint.get_transform_by_name(motion_cfg.groundplane_joint)
assert isinstance(groundplane_joint, Joint), f'could not find joint by name: {motion_cfg.groundplane_joint}'
except Exception as e:
msg = f'Error getting groundplane joint: {e}'
logging.warning(msg)
assert False
# ... and moving the bvh so it is on the y=0 plane
bvh_groundplane_y = groundplane_joint.get_world_position()[1]
self.bvh.offset(np.array([0, -bvh_groundplane_y, 0]))
self.joint_positions: npt.NDArray[np.float32]
self.fwd_vectors: npt.NDArray[np.float32]
self.bvh_root_positions: npt.NDArray[np.float32]
self._compute_normalized_joint_positions_and_fwd_vectors()
# cache the starting worldspace location of character's root joint
self.character_start_loc: npt.NDArray[np.float32] = np.array(retarget_cfg.char_start_loc, dtype=np.float32)
# holds world coordinates of character root joint after retargeting
self.char_root_positions: npt.NDArray[np.float32]
# get & save projection planes
self.joint_group_name_to_projection_plane: Dict[ str, npt.NDArray[np.float32]] = {}
self.joint_to_projection_plane: Dict[ str, npt.NDArray[np.float32]] = {}
for joint_projection_group in retarget_cfg.bvh_projection_bodypart_groups:
group_name = joint_projection_group['name']
joint_names = joint_projection_group['bvh_joint_names']
projection_method = joint_projection_group['method']
projection_plane = self._determine_projection_plane_normal(group_name, joint_names, projection_method)
self.joint_group_name_to_projection_plane[joint_projection_group['name']] = projection_plane
for joint_name in joint_projection_group['bvh_joint_names']:
self.joint_to_projection_plane[joint_name] = projection_plane
# map character joint names to its orientations
self.char_joint_to_orientation: Dict[str, npt.NDArray[np.float32]] = {}
# map bvh joint names to its distance to project plane (useful for rendering order)
self.bvh_joint_to_projection_depth: Dict[str, npt.NDArray[np.float32]] = self._compute_depths()
def _compute_normalized_joint_positions_and_fwd_vectors(self) -> None:
"""
Called during initialization.
Computes fwd vector for bvh skeleton at each frame.
Extracts all bvh skeleton joint locations for all frames.
Repositions them so root is above the origin.
Rotates them so skeleton faces along the +X axis.
"""
# get joint positions and forward vectors
self.joint_positions = np.empty([self.bvh.frame_max_num, 3 * self.bvh.joint_num], dtype=np.float32)
self.fwd_vectors = np.empty([self.bvh.frame_max_num, 3], dtype=np.float32)
for frame_idx in range(self.bvh.frame_max_num):
self.bvh.apply_frame(frame_idx)
self.joint_positions[frame_idx] = self.bvh.root_joint.get_chain_worldspace_positions()
self.fwd_vectors[frame_idx] = self.bvh.get_skeleton_fwd(self.forward_perp_vector_joint_names).vs[0]
# reposition over origin
self.bvh_root_positions = self.joint_positions[:, :3]
self.joint_positions = self.joint_positions - np.tile(self.bvh_root_positions, [1, len(self.bvh_joint_names)])
# compute angle between skeleton's forward vector and x axis
v1 = np.tile(np.array([1.0, 0.0], dtype=np.float32), reps=(self.joint_positions.shape[0], 1))
v2 = self.fwd_vectors
dot: npt.NDArray[np.float32] = v1[:, 0]*v2[:, 0] + v1[:, 1]*v2[:, 2]
det: npt.NDArray[np.float32] = v1[:, 0]*v2[:, 2] - v2[:, 0]*v1[:, 1]
angle: npt.NDArray[np.float32] = np.arctan2(det, dot).astype(np.float32)
angle %= 2*np.pi
angle = np.where(angle < 0.0, angle + 2*np.pi, angle)
# rotate the skeleton's joint so it faces +X axis
for idx in range(self.joint_positions.shape[0]):
rot_mat = np.identity(3).astype(np.float32)
rot_mat[0, 0] = math.cos(angle[idx])
rot_mat[0, 2] = math.sin(angle[idx])
rot_mat[2, 0] = -math.sin(angle[idx])
rot_mat[2, 2] = math.cos(angle[idx])
rotated_joints: npt.NDArray[np.float32] = rot_mat @ self.joint_positions[idx].reshape([-1, 3]).T
self.joint_positions[idx] = rotated_joints.T.reshape(self.joint_positions[idx].shape)
def _determine_projection_plane_normal(self, group_name: str, joint_names: List[str], projection_method: str) -> npt.NDArray[np.float32]:
"""
Given a joint_projection_group dictionary object, computes the projection plane normal used for the group.
Called during initialization.
"""
if projection_method == 'frontal':
logging.info(f'{group_name} projection_method is {projection_method}. Using {x_axis}')
return x_axis
elif projection_method == 'sagittal':
logging.info(f'{group_name} projection_method is {projection_method}. Using {z_axis}')
return z_axis
elif projection_method == 'pca':
logging.info(f'{group_name} projection_method is {projection_method}. Running PCA on {joint_names}')
pass # pca code is below
else:
msg = f'bad project method for {group_name}: {projection_method}'
logging.critical(msg)
assert False, msg
# get the xyz locations of joints within this joint_projection_group
joints_idxs = [self.bvh_joint_names.index(joint_name) for joint_name in joint_names]
joints_mask = np.full(self.joint_positions.shape[1], False, dtype=np.bool8)
for idx in joints_idxs:
joints_mask[3*idx:3*(idx+1)] = True
joints_points = self.joint_positions[:, joints_mask]
joints_points = joints_points.reshape([-1, 3])
# do PCA and get 3rd component
pca = PCA()
pca.fit(joints_points)
pc3: npt.NDArray[np.float32] = pca.components_[2] # pyright: ignore[reportGeneralTypeIssues]
# see if it is closer to the x axis or z axis
x_cos_sim: float = np.dot(x_axis, pc3) / (np.linalg.norm(x_axis) * np.linalg.norm(pc3))
z_cos_sim: float = np.dot(z_axis, pc3) / (np.linalg.norm(z_axis) * np.linalg.norm(pc3))
# return close of the two
if abs(x_cos_sim) > abs(z_cos_sim):
logging.info(f'PCA complete. {group_name} using {x_axis}')
return x_axis
else:
logging.info(f'PCA complete. {group_name} using {z_axis}')
return z_axis
def _compute_depths(self) -> Dict[str, npt.NDArray[np.float32]]:
"""
For each BVH joint within bvh_projection_mapping_groups, compute distance to projection plane.
This distance used if the joint is a char_body_segmentation_groups depth_driver.
"""
bvh_joint_to_projection_depth: Dict[str, npt.NDArray[np.float32]] = {}
for joint_name in self.bvh_joint_names:
joint_idx = self.bvh_joint_names.index(joint_name)
joint_xyz = self.joint_positions[:, 3*joint_idx:3*(joint_idx+1)]
try:
projection_plane_normal = self.joint_to_projection_plane[joint_name]
except Exception:
msg = f' error finding projection plane for joint_name: {joint_name}'
logging.info(msg)
continue
# project bone onto 2D plane
if np.array_equal(projection_plane_normal, x_axis):
joint_depths = joint_xyz[:, 0]
elif np.array_equal(projection_plane_normal, z_axis):
joint_depths = joint_xyz[:, 2]
else:
msg = 'error projection_plane_normal'
logging.critical(msg)
assert False, msg
bvh_joint_to_projection_depth[joint_name] = joint_depths
return bvh_joint_to_projection_depth
def scale_root_positions_for_character(self, char_to_bvh_scale: float, projection_bodypart_group_for_offset: str) -> None:
"""
Uses projection plane of projection_bodypart_group_for_offset to determine bvh skeleton's projected root offset.
Scales that offset to account for differences in lengths of character and bvh skeleton limbs.
"""
try:
projection_plane = self.joint_group_name_to_projection_plane[projection_bodypart_group_for_offset]
except Exception as e:
msg = f'Error getting projection plane: {str(e)}'
logging.critical(msg)
assert False, msg
self.char_root_positions = np.empty([self.bvh_root_positions.shape[0], 2], dtype=np.float32)
self.char_root_positions[0] = [0, 0]
for idx in range(1, self.bvh_root_positions.shape[0]):
if np.array_equal(projection_plane, np.array([0.0, 0.0, 1.0])): # if sagittal projection
v1 = self.fwd_vectors[idx] # we're interested in forward motion
else: # if frontal projection
v1 = self.fwd_vectors[idx][::-1]*np.array([-1, 1, -1]) # we're interested in lateral motion
delta = self.bvh_root_positions[idx] - self.bvh_root_positions[idx-1]
# scale root delta for both x and y offsets. Project onto v1 for x offset
self.char_root_positions[idx, 0] = self.char_root_positions[idx-1, 0] + char_to_bvh_scale * np.dot(v1, delta) # x
self.char_root_positions[idx, 1] = self.char_root_positions[idx-1, 1] + char_to_bvh_scale * delta[1] # y
def compute_orientations(self, bvh_prox_joint_name: str, bvh_dist_joint_name: str, char_joint_name: str) -> None:
"""
Calculates the orientation (degrees CCW of +Y axis) of the vector from bvh_prox_joint->bvh_dist_joint using the
projection plane of bvh_dist_joint. Results are saved into a dictionary using char_joint_name as the key.
"""
# get distal end joint
dist_joint = self.bvh.root_joint.get_transform_by_name(bvh_dist_joint_name)
if dist_joint is None or not isinstance(dist_joint, Joint) or dist_joint.name is None:
msg = 'error finding joint {bvh_dist_joint_name}'
logging.critical(msg)
assert False, msg
# get prox joint
prox_joint = self.bvh.root_joint.get_transform_by_name(bvh_prox_joint_name)
if prox_joint is None or not isinstance(prox_joint, Joint) or prox_joint.name is None:
msg = 'joint {bvh_prox_joint_name} has no parent joint, therefore no bone orientation. Returning zero'
logging.info(msg)
self.char_joint_to_orientation[char_joint_name] = np.zeros(self.joint_positions.shape[0], dtype=np.float32)
return
# get joint xyz locations
dist_joint_idx = self.bvh_joint_names.index(dist_joint.name)
dist_joint_xyz = self.joint_positions[:, 3*dist_joint_idx:3*(dist_joint_idx+1)]
prox_joint_idx = self.bvh_joint_names.index(prox_joint.name)
prox_joint_xyz = self.joint_positions[:, 3*prox_joint_idx:3*(prox_joint_idx+1)]
# compute the bone vector
bone_vector = dist_joint_xyz - prox_joint_xyz # type: ignore
# get distal joint's projection plane
try:
projection_plane_normal = self.joint_to_projection_plane[bvh_dist_joint_name]
except Exception:
msg = f' error finding projection plane for bvh_end_joint_name: {bvh_dist_joint_name}'
logging.critical(msg)
assert False, msg
# project bone onto 2D plane
if np.array_equal(projection_plane_normal, x_axis):
projected_bone_xy = np.stack((-bone_vector[:, 2], bone_vector[:, 1]), axis=1)
elif np.array_equal(projection_plane_normal, z_axis):
projected_bone_xy = np.stack((bone_vector[:, 0], bone_vector[:, 1]), axis=1)
else:
msg = 'error projection_plane_normal'
logging.critical(msg)
assert False, msg
# get angles between y axis and bone
projected_bone_xy /= np.expand_dims(np.linalg.norm(projected_bone_xy, axis=1), axis=-1) # norm vector
y_axis = np.tile(np.array([0.0, 1.0]), reps=(projected_bone_xy.shape[0], 1))
at1 = np.arctan2(projected_bone_xy[:, 1], projected_bone_xy[:, 0], dtype=np.float32)
at2 = np.arctan2(y_axis[:, 1], y_axis[:, 0], dtype=np.float32)
theta: npt.NDArray[np.float32] = at1 - at2 # type: ignore
theta = np.degrees(theta) % 360.0
theta = np.where(theta < 0.0, theta + 360, theta)
# save it
self.char_joint_to_orientation[char_joint_name] = np.array(theta)
def get_retargeted_frame_data(self, time: float) -> Tuple[Dict[str, float], Dict[str, float], npt.NDArray[np.float32]]:
"""
Input: time, in seconds, used to select the correct BVH frame.
Calculate the proper frame and, for it, returns:
- orientations, dictionary mapping from character joint names to world orientations (degrees CCW from +Y axis)
- joint_depths, dictionary mapping from BVH skeleton's joint names to distance from joint to projection plane
- root_positions, the position of the character's root at this frame.
"""
frame_idx = int(round(time / self.bvh.frame_time, 0))
if frame_idx < 0:
logging.info(f'invalid frame_idx ({frame_idx}), replacing with 0')
frame_idx = 0
if self.bvh.frame_max_num <= frame_idx:
logging.info(f'invalid frame_idx ({frame_idx}), replacing with last frame {self.bvh.frame_max_num-1}')
frame_idx = self.bvh.frame_max_num-1
orientations = {key: val[frame_idx] for (key, val) in self.char_joint_to_orientation.items()}
joint_depths = {key: val[frame_idx] for (key, val) in self.bvh_joint_to_projection_depth.items()}
root_position = np.array([self.char_root_positions[frame_idx, 0], self.char_root_positions[frame_idx, 1], 0.0], dtype=np.float32)
root_position += self.character_start_loc # offset by character's starting location
return orientations, joint_depths, root_position
| AnimatedDrawings-main | animated_drawings/model/retargeter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from animated_drawings.view.view import View
from animated_drawings.view.shaders.shader import Shader
from animated_drawings.view.utils import get_projection_matrix
from animated_drawings.utils import read_background_image
from animated_drawings.model.scene import Scene
from animated_drawings.model.camera import Camera
from animated_drawings.model.transform import Transform
from animated_drawings.config import ViewConfig
import glfw
import OpenGL.GL as GL
import logging
from typing import Tuple, Dict
import numpy as np
import numpy.typing as npt
from pathlib import Path
from pkg_resources import resource_filename
class WindowView(View):
"""Window View for rendering into a visible window"""
def __init__(self, cfg: ViewConfig) -> None:
super().__init__(cfg)
glfw.init()
self.camera: Camera = Camera(cfg.camera_pos, cfg.camera_fwd)
self.win: glfw._GLFWwindow
self._create_window(*cfg.window_dimensions) # pyright: ignore[reportGeneralTypeIssues]
self.shaders: Dict[str, Shader] = {}
self.shader_ids: Dict[str, int] = {}
self._prep_shaders()
self.fboId: GL.GLint
self._prep_background_image()
self._set_shader_projections(get_projection_matrix(*self.get_framebuffer_size()))
def _prep_background_image(self) -> None:
""" Initialize framebuffer object for background image, if specified. """
# if nothing specified, return
if not self.cfg.background_image:
return
# load background image
_txtr: npt.NDArray[np.uint8] = read_background_image(self.cfg.background_image)
# create the opengl texture and send it data
self.txtr_h, self.txtr_w, _ = _txtr.shape
self.txtr_id = GL.glGenTextures(1)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 4)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.txtr_id)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, self.txtr_w, self.txtr_h, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, _txtr)
# make framebuffer object
self.fboId: GL.GLint = GL.glGenFramebuffers(1)
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fboId)
GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D, self.txtr_id, 0)
def _prep_shaders(self) -> None:
BVH_VERT = Path(resource_filename(__name__, "shaders/bvh.vert"))
BVH_FRAG = Path(resource_filename(__name__, "shaders/bvh.frag"))
self._initiatize_shader('bvh_shader', str(BVH_VERT), str(BVH_FRAG))
COLOR_VERT = Path(resource_filename(__name__, "shaders/color.vert"))
COLOR_FRAG = Path(resource_filename(__name__, "shaders/color.frag"))
self._initiatize_shader('color_shader', str(COLOR_VERT), str(COLOR_FRAG))
TEXTURE_VERT = Path(resource_filename(__name__, "shaders/texture.vert"))
TEXTURE_FRAG = Path(resource_filename(__name__, "shaders/texture.frag"))
self._initiatize_shader('texture_shader', str(TEXTURE_VERT), str(TEXTURE_FRAG), texture=True)
def _update_shaders_view_transform(self, camera: Camera) -> None:
try:
view_transform: npt.NDArray[np.float32] = np.linalg.inv(camera.get_world_transform())
except Exception as e:
msg = f'Error inverting camera world transform: {e}'
logging.critical(msg)
assert False, msg
for shader_name in self.shaders:
GL.glUseProgram(self.shader_ids[shader_name])
view_loc = GL.glGetUniformLocation(self.shader_ids[shader_name], "view")
GL.glUniformMatrix4fv(view_loc, 1, GL.GL_FALSE, view_transform.T)
def _set_shader_projections(self, proj_m: npt.NDArray[np.float32]) -> None:
for shader_id in self.shader_ids.values():
GL.glUseProgram(shader_id)
proj_loc = GL.glGetUniformLocation(shader_id, "proj")
GL.glUniformMatrix4fv(proj_loc, 1, GL.GL_FALSE, proj_m.T)
def _initiatize_shader(self, shader_name: str, vert_path: str, frag_path: str, **kwargs) -> None:
self.shaders[shader_name] = Shader(vert_path, frag_path)
self.shader_ids[shader_name] = self.shaders[shader_name].glid # pyright: ignore[reportGeneralTypeIssues]
if 'texture' in kwargs and kwargs['texture'] is True:
GL.glUseProgram(self.shader_ids[shader_name])
GL.glUniform1i(GL.glGetUniformLocation(
self.shader_ids[shader_name], 'texture0'), 0)
def _create_window(self, width: int, height: int) -> None:
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.RESIZABLE, False)
self.win = glfw.create_window(width, height, 'Viewer', None, None)
glfw.make_context_current(self.win)
GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glClearColor(*self.cfg.clear_color)
logging.info(f'OpenGL Version: {GL.glGetString(GL.GL_VERSION).decode()}') # pyright: ignore[reportGeneralTypeIssues]
logging.info(f'GLSL: { GL.glGetString(GL.GL_SHADING_LANGUAGE_VERSION).decode()}') # pyright: ignore[reportGeneralTypeIssues]
logging.info(f'Renderer: {GL.glGetString(GL.GL_RENDERER).decode()}') # pyright: ignore[reportGeneralTypeIssues]
def set_scene(self, scene: Scene) -> None:
self.scene = scene
def render(self, scene: Transform) -> None:
GL.glViewport(0, 0, *self.get_framebuffer_size())
# draw the background image if exists
if self.cfg.background_image:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, 0)
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fboId)
win_w, win_h = self.get_framebuffer_size()
GL.glBlitFramebuffer(0, 0, self.txtr_w, self.txtr_h, 0, 0, win_w, win_h, GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
self._update_shaders_view_transform(self.camera)
scene.draw(shader_ids=self.shader_ids, viewer_cfg=self.cfg)
def get_framebuffer_size(self) -> Tuple[int, int]:
""" Return (width, height) of view's window. """
return glfw.get_framebuffer_size(self.win)
def swap_buffers(self) -> None:
glfw.swap_buffers(self.win)
def clear_window(self) -> None:
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) # type: ignore
def cleanup(self) -> None:
""" Destroy the window when it's no longer being used. """
glfw.destroy_window(self.win)
| AnimatedDrawings-main | animated_drawings/view/window_view.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from abc import abstractmethod
from typing import Tuple
from animated_drawings.config import ViewConfig
class View:
"""
Base View class which all other Views must be derived.
Views are responsible for controlling what is and isn't visible to them.
Views are responsible for initiating the 'draw' methods for each object which they want to render.
"""
def __init__(self, cfg: ViewConfig):
self.cfg: ViewConfig = cfg
pass
@abstractmethod
def render(self, scene) -> None: # pyright: ignore[reportUnknownParameterType,reportMissingParameterType]
""" Called by the controller to render the scene. """
@abstractmethod
def clear_window(self) -> None:
""" Clear output from previous render loop. """
@abstractmethod
def cleanup(self) -> None:
""" Cleanup after render loop is finished. """
@abstractmethod
def get_framebuffer_size(self) -> Tuple[int, int]:
""" Return (width, height) of framebuffer. """
@staticmethod
def create_view(view_cfg: ViewConfig) -> View:
""" Takes in a view dictionary from mvc config file and returns the appropriate view. """
# create view
if view_cfg.use_mesa:
from animated_drawings.view.mesa_view import MesaView
return MesaView(view_cfg)
else:
from animated_drawings.view.window_view import WindowView
return WindowView(view_cfg)
| AnimatedDrawings-main | animated_drawings/view/view.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import numpy.typing as npt
import logging
def get_projection_matrix(buffer_w: int, buffer_h: int, type_: str = 'perspective') -> npt.NDArray[np.float32]:
if type_ == 'perspective':
fov = 35.0
near = 0.1
aspect = buffer_w / buffer_h
top = near * np.tan(fov * np.pi / 360)
right = top * aspect
far = 10000.0
bottom = -top
left = -right
M_0_0 = (2 * near) / (right - left)
M_0_2 = (left + right) / (left - right)
M_1_1 = (2 * near) / (top - bottom)
M_1_2 = (bottom + top) / (bottom-top)
M_2_2 = (far + near) / (near - far)
M_2_3 = (2 * far * near) / (near - far)
M_3_2 = -1
M: npt.NDArray[np.float32] = np.zeros([4, 4], dtype=np.float32)
M[0, 0] = M_0_0
M[0, 2] = M_0_2
M[1, 1] = M_1_1
M[1, 2] = M_1_2
M[2, 2] = M_2_2
M[2, 3] = M_2_3
M[3, 2] = M_3_2
return M
else:
logging.critical(f'unsupported camera type specified: {type_}')
assert False
| AnimatedDrawings-main | animated_drawings/view/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ['PYOPENGL_PLATFORM'] = "osmesa"
os.environ['MESA_GL_VERSION_OVERRIDE'] = "3.3"
from OpenGL import GL, osmesa
from animated_drawings.model.camera import Camera
from animated_drawings.model.scene import Scene
from animated_drawings.model.transform import Transform
from animated_drawings.view.view import View
from animated_drawings.view.utils import get_projection_matrix
from animated_drawings.utils import read_background_image
from animated_drawings.view.shaders.shader import Shader
from animated_drawings.config import ViewConfig
import logging
from typing import Tuple, Dict
import numpy as np
import numpy.typing as npt
from pathlib import Path
from pkg_resources import resource_filename
class MesaView(View):
""" Mesa View for Headless Rendering """
def __init__(self, cfg: ViewConfig) -> None:
super().__init__(cfg)
self.camera: Camera = Camera(self.cfg.camera_pos, self.cfg.camera_fwd)
self.ctx: osmesa.OSMesaContext
self.buffer: npt.NDArray[np.uint8]
self._initialize_mesa()
self.shaders: Dict[str, Shader] = {}
self.shader_ids: Dict[str, int] = {}
self._prep_shaders()
self._prep_background_image()
self._set_shader_projections(get_projection_matrix(*self.get_framebuffer_size()))
def _prep_background_image(self) -> None:
""" Initialize framebuffer object for background image, if specified. """
# if nothing specified, return
if not self.cfg.background_image:
return
_txtr = read_background_image(self.cfg.background_image)
self.txtr_h, self.txtr_w, _ = _txtr.shape
self.txtr_id = GL.glGenTextures(1)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 4)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.txtr_id)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, self.txtr_w, self.txtr_h, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, _txtr)
self.fboId: GL.GLint = GL.glGenFramebuffers(1)
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fboId)
GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D, self.txtr_id, 0)
def _prep_shaders(self) -> None:
BVH_VERT = Path(resource_filename(__name__, "shaders/bvh.vert"))
BVH_FRAG = Path(resource_filename(__name__, "shaders/bvh.frag"))
self._initiatize_shader('bvh_shader', str(BVH_VERT), str(BVH_FRAG))
COLOR_VERT = Path(resource_filename(__name__, "shaders/color.vert"))
COLOR_FRAG = Path(resource_filename(__name__, "shaders/color.frag"))
self._initiatize_shader('color_shader', str(COLOR_VERT), str(COLOR_FRAG))
TEXTURE_VERT = Path(resource_filename(__name__, "shaders/texture.vert"))
TEXTURE_FRAG = Path(resource_filename(__name__, "shaders/texture.frag"))
self._initiatize_shader('texture_shader', str(TEXTURE_VERT), str(TEXTURE_FRAG), texture=True)
def _update_shaders_view_transform(self, camera: Camera) -> None:
try:
view_transform: npt.NDArray[np.float32] = np.linalg.inv(camera.get_world_transform())
except Exception as e:
msg = f'Error inverting camera world transform: {e}'
logging.critical(msg)
assert False, msg
for shader_name in self.shaders:
GL.glUseProgram(self.shader_ids[shader_name])
view_loc = GL.glGetUniformLocation(self.shader_ids[shader_name], "view")
GL.glUniformMatrix4fv(view_loc, 1, GL.GL_FALSE, view_transform.T)
def _set_shader_projections(self, proj_m: npt.NDArray[np.float32]) -> None:
for shader_id in self.shader_ids.values():
GL.glUseProgram(shader_id)
proj_loc = GL.glGetUniformLocation(shader_id, "proj")
GL.glUniformMatrix4fv(proj_loc, 1, GL.GL_FALSE, proj_m.T)
def _initiatize_shader(self, shader_name: str, vert_path: str, frag_path: str, **kwargs) -> None:
self.shaders[shader_name] = Shader(vert_path, frag_path)
self.shader_ids[shader_name] = self.shaders[shader_name].glid # pyright: ignore[reportGeneralTypeIssues]
if 'texture' in kwargs and kwargs['texture'] is True:
GL.glUseProgram(self.shader_ids[shader_name])
GL.glUniform1i(GL.glGetUniformLocation(
self.shader_ids[shader_name], 'texture0'), 0)
def _initialize_mesa(self) -> None:
width, height = self.cfg.window_dimensions
self.ctx = osmesa.OSMesaCreateContext(osmesa.OSMESA_RGBA, None)
self.buffer: npt.NDArray[np.uint8] = GL.arrays.GLubyteArray.zeros((height, width, 4)) # type: ignore
osmesa.OSMesaMakeCurrent(self.ctx, self.buffer, GL.GL_UNSIGNED_BYTE, width, height)
GL.glClearColor(*self.cfg.clear_color)
def set_scene(self, scene: Scene) -> None:
self.scene = scene
def render(self, scene: Transform) -> None:
GL.glViewport(0, 0, *self.get_framebuffer_size())
# Draw the background
if self.cfg.background_image:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, 0)
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fboId)
win_w, win_h = self.get_framebuffer_size()
GL.glBlitFramebuffer(0, 0, self.txtr_w, self.txtr_h, 0, 0, win_w, win_h, GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
self._update_shaders_view_transform(self.camera)
scene.draw(shader_ids=self.shader_ids, viewer_cfg=self.cfg)
def get_framebuffer_size(self) -> Tuple[int, int]:
""" Return (width, height) of view's window. """
return self.buffer.shape[:2][::-1]
def clear_window(self) -> None:
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) # type: ignore
def cleanup(self) -> None:
""" Destroy the context when it is finished. """
osmesa.OSMesaDestroyContext(self.ctx)
| AnimatedDrawings-main | animated_drawings/view/mesa_view.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import OpenGL.GL as GL
import logging
class Shader:
"""Class to create shader programs"""
@staticmethod
def _compile_shader(src: str, shader_type):
with open(src, 'r') as f:
src = f.read()
shader = GL.glCreateShader(shader_type)
GL.glShaderSource(shader, src)
GL.glCompileShader(shader)
status: bool = GL.glGetShaderiv(shader, GL.GL_COMPILE_STATUS)
if not status:
log = GL.glGetShaderInfoLog(shader).decode('ascii')
src = '\n'.join([f'{idx + 1}: {l}' for idx, l in enumerate(src.splitlines())])
msg = f'Compile failed for {shader_type}\n{log}\n{src}'
logging.critical(msg)
assert False, msg
return shader
def __init__(self, vertex_source, fragment_source):
"""Takes paths to shader code"""
vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)
frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)
if not (vert and frag):
msg = 'Error compiling shaders'
logging.critical(msg)
assert False, msg
self.glid = GL.glCreateProgram()
GL.glAttachShader(self.glid, vert)
GL.glAttachShader(self.glid, frag)
GL.glLinkProgram(self.glid)
GL.glDeleteShader(vert)
GL.glDeleteShader(frag)
status: bool = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)
if not status:
msg = f'Error creating shader program: {GL.glGetProgramInfoLog(self.glid).decode("ascii")}'
logging.critical(msg)
assert False, msg
| AnimatedDrawings-main | animated_drawings/view/shaders/shader.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import argparse
from src.dataset import BinauralDataset
from src.models import BinauralNetwork
from src.trainer import Trainer
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_directory",
type=str,
default="./data/trainset",
help="path to the training data")
parser.add_argument("--artifacts_directory",
type=str,
default="./outputs",
help="directory to write model files to")
parser.add_argument("--num_gpus",
type=int,
default=4,
help="number of GPUs used during training")
parser.add_argument("--blocks",
type=int,
default=3)
args = parser.parse_args()
config = {
"artifacts_dir": args.artifacts_directory,
"learning_rate": 0.001,
"newbob_decay": 0.5,
"newbob_max_decay": 0.01,
"batch_size": 32,
"mask_beginning": 1024,
"loss_weights": {"l2": 1.0, "phase": 0.01},
"save_frequency": 10,
"epochs": 100,
"num_gpus": args.num_gpus,
}
os.makedirs(config["artifacts_dir"], exist_ok=True)
dataset = BinauralDataset(dataset_directory=args.dataset_directory, chunk_size_ms=200, overlap=0.5)
net = BinauralNetwork(view_dim=7,
warpnet_layers=4,
warpnet_channels=64,
wavenet_blocks=args.blocks,
layers_per_block=10,
wavenet_channels=64
)
print(f"receptive field: {net.receptive_field()}")
print(f"train on {len(dataset.chunks)} chunks")
print(f"number of trainable parameters: {net.num_trainable_parameters()}")
trainer = Trainer(config, net, dataset)
trainer.train()
| BinauralSpeechSynthesis-main | train.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import argparse
import numpy as np
import torch as th
import torchaudio as ta
from src.models import BinauralNetwork
from src.losses import L2Loss, AmplitudeLoss, PhaseLoss
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_directory",
type=str,
default="./data/testset",
help="path to the test data")
parser.add_argument("--model_file",
type=str,
default="./outputs/binaural_network.net",
help="model file containing the trained binaural network weights")
parser.add_argument("--artifacts_directory",
type=str,
default="./outputs",
help="directory to write binaural outputs to")
parser.add_argument("--blocks",
type=int,
default=3)
args = parser.parse_args()
def chunked_forwarding(net, mono, view):
'''
binauralized the mono input given the view
:param net: binauralization network
:param mono: 1 x T tensor containing the mono audio signal
:param view: 7 x K tensor containing the view as 3D positions and quaternions for orientation (K = T / 400)
:return: 2 x T tensor containing binauralized audio signal
'''
net.eval().cuda()
mono, view = mono.cuda(), view.cuda()
chunk_size = 480000 # forward in chunks of 10s
rec_field = net.receptive_field() + 1000 # add 1000 samples as "safe bet" since warping has undefined rec. field
rec_field -= rec_field % 400 # make sure rec_field is a multiple of 400 to match audio and view frequencies
chunks = [
{
"mono": mono[:, max(0, i-rec_field):i+chunk_size],
"view": view[:, max(0, i-rec_field)//400:(i+chunk_size)//400]
}
for i in range(0, mono.shape[-1], chunk_size)
]
for i, chunk in enumerate(chunks):
with th.no_grad():
mono = chunk["mono"].unsqueeze(0)
view = chunk["view"].unsqueeze(0)
binaural = net(mono, view)["output"].squeeze(0)
if i > 0:
binaural = binaural[:, -(mono.shape[-1]-rec_field):]
chunk["binaural"] = binaural
binaural = th.cat([chunk["binaural"] for chunk in chunks], dim=-1)
binaural = th.clamp(binaural, min=-1, max=1).cpu()
return binaural
def compute_metrics(binauralized, reference):
'''
compute l2 error, amplitude error, and angular phase error for the given binaural and reference singal
:param binauralized: 2 x T tensor containing predicted binaural signal
:param reference: 2 x T tensor containing reference binaural signal
:return: errors as a scalar value for each metric and the number of samples in the sequence
'''
binauralized, reference = binauralized.unsqueeze(0), reference.unsqueeze(0)
# compute error metrics
l2_error = L2Loss()(binauralized, reference)
amplitude_error = AmplitudeLoss(sample_rate=48000)(binauralized, reference)
phase_error = PhaseLoss(sample_rate=48000, ignore_below=0.2)(binauralized, reference)
return{
"l2": l2_error,
"amplitude": amplitude_error,
"phase": phase_error,
"samples": binauralized.shape[-1]
}
# binauralized and evaluate test sequence for the eight subjects and the validation sequence
test_sequences = [f"subject{i+1}" for i in range(8)] + ["validation_sequence"]
# initialize network
net = BinauralNetwork(view_dim=7,
warpnet_layers=4,
warpnet_channels=64,
wavenet_blocks=args.blocks,
layers_per_block=10,
wavenet_channels=64
)
net.load_from_file(args.model_file)
os.makedirs(f"{args.artifacts_directory}", exist_ok=True)
errors = []
for test_sequence in test_sequences:
print(f"binauralize {test_sequence}...")
# load mono input and view conditioning
mono, sr = ta.load(f"{args.dataset_directory}/{test_sequence}/mono.wav")
view = np.loadtxt(f"{args.dataset_directory}/{test_sequence}/tx_positions.txt").transpose().astype(np.float32)
view = th.from_numpy(view)
# sanity checks
if not sr == 48000:
raise Exception(f"sampling rate is expected to be 48000 but is {sr}.")
if not view.shape[-1] * 400 == mono.shape[-1]:
raise Exception(f"mono signal is expected to have 400x the length of the position/orientation sequence.")
# binauralize and save output
binaural = chunked_forwarding(net, mono, view)
ta.save(f"{args.artifacts_directory}/{test_sequence}.wav", binaural, sr)
# compute error metrics
reference, sr = ta.load(f"{args.dataset_directory}/{test_sequence}/binaural.wav")
errors.append(compute_metrics(binaural, reference))
# accumulate errors
sequence_weights = np.array([err["samples"] for err in errors])
sequence_weights = sequence_weights / np.sum(sequence_weights)
l2_error = sum([err["l2"] * sequence_weights[i] for i, err in enumerate(errors)])
amplitude_error = sum([err["amplitude"] * sequence_weights[i] for i, err in enumerate(errors)])
phase_error = sum([err["phase"] * sequence_weights[i] for i, err in enumerate(errors)])
# print accumulated errors on testset
print(f"l2 (x10^3): {l2_error * 1000:.3f}")
print(f"amplitude: {amplitude_error:.3f}")
print(f"phase: {phase_error:.3f}")
| BinauralSpeechSynthesis-main | evaluate.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class TimeWarperFunction(th.autograd.Function):
@staticmethod
def forward(ctx, input, warpfield):
'''
:param ctx: autograd context
:param input: input signal (B x 2 x T)
:param warpfield: the corresponding warpfield (B x 2 x T)
:return: the warped signal (B x 2 x T)
'''
ctx.save_for_backward(input, warpfield)
# compute index list to lookup warped input values
idx_left = warpfield.floor().type(th.long)
idx_right = th.clamp(warpfield.ceil().type(th.long), max=input.shape[-1]-1)
# compute weight for linear interpolation
alpha = warpfield - warpfield.floor()
# linear interpolation
output = (1 - alpha) * th.gather(input, 2, idx_left) + alpha * th.gather(input, 2, idx_right)
return output
@staticmethod
def backward(ctx, grad_output):
input, warpfield = ctx.saved_tensors
# compute index list to lookup warped input values
idx_left = warpfield.floor().type(th.long)
idx_right = th.clamp(warpfield.ceil().type(th.long), max=input.shape[-1]-1)
# warpfield gradient
grad_warpfield = th.gather(input, 2, idx_right) - th.gather(input, 2, idx_left)
grad_warpfield = grad_output * grad_warpfield
# input gradient
grad_input = th.zeros(input.shape, device=input.device)
alpha = warpfield - warpfield.floor()
grad_input = grad_input.scatter_add(2, idx_left, grad_output * (1 - alpha)) + \
grad_input.scatter_add(2, idx_right, grad_output * alpha)
return grad_input, grad_warpfield
class TimeWarper(nn.Module):
def __init__(self):
super().__init__()
self.warper = TimeWarperFunction().apply
def _to_absolute_positions(self, warpfield, seq_length):
# translate warpfield from relative warp indices to absolute indices ([1...T] + warpfield)
temp_range = th.arange(seq_length, dtype=th.float)
temp_range = temp_range.cuda() if warpfield.is_cuda else temp_range
return th.clamp(warpfield + temp_range[None, None, :], min=0, max=seq_length-1)
def forward(self, input, warpfield):
'''
:param input: audio signal to be warped (B x 2 x T)
:param warpfield: the corresponding warpfield (B x 2 x T)
:return: the warped signal (B x 2 x T)
'''
warpfield = self._to_absolute_positions(warpfield, input.shape[-1])
warped = self.warper(input, warpfield)
return warped
class MonotoneTimeWarper(TimeWarper):
def forward(self, input, warpfield):
'''
:param input: audio signal to be warped (B x 2 x T)
:param warpfield: the corresponding warpfield (B x 2 x T)
:return: the warped signal (B x 2 x T), ensured to be monotonous
'''
warpfield = self._to_absolute_positions(warpfield, input.shape[-1])
# ensure monotonicity: each warp must be at least as big as previous_warp-1
warpfield = th.cummax(warpfield, dim=-1)[0]
# warp
warped = self.warper(input, warpfield)
return warped
class GeometricTimeWarper(TimeWarper):
def __init__(self, sampling_rate=48000):
super().__init__()
self.sampling_rate = sampling_rate
def displacements2warpfield(self, displacements, seq_length):
distance = th.sum(displacements**2, dim=2) ** 0.5
distance = F.interpolate(distance, size=seq_length)
warpfield = -distance / 343.0 * self.sampling_rate
return warpfield
def forward(self, input, displacements):
'''
:param input: audio signal to be warped (B x 2 x T)
:param displacements: sequence of 3D displacement vectors for geometric warping (B x 3 x T)
:return: the warped signal (B x 2 x T)
'''
warpfield = self.displacements2warpfield(displacements, input.shape[-1])
warped = super().forward(input, warpfield)
return warped
| BinauralSpeechSynthesis-main | src/warping.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import scipy.linalg
from scipy.spatial.transform import Rotation as R
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from src.hyperconv import HyperConvBlock
from src.warping import GeometricTimeWarper, MonotoneTimeWarper
from src.utils import Net
class GeometricWarper(nn.Module):
def __init__(self, sampling_rate=48000):
super().__init__()
self.warper = GeometricTimeWarper(sampling_rate=sampling_rate)
def _transmitter_mouth(self, view):
# offset between tracking markers and real mouth position in the dataset
mouth_offset = np.array([0.09, 0, -0.20])
quat = view[:, 3:, :].transpose(2, 1).contiguous().detach().cpu().view(-1, 4).numpy()
# make sure zero-padded values are set to non-zero values (else scipy raises an exception)
norms = scipy.linalg.norm(quat, axis=1)
eps_val = (norms == 0).astype(np.float32)
quat = quat + eps_val[:, None]
transmitter_rot_mat = R.from_quat(quat)
transmitter_mouth = transmitter_rot_mat.apply(mouth_offset, inverse=True)
transmitter_mouth = th.Tensor(transmitter_mouth).view(view.shape[0], -1, 3).transpose(2, 1).contiguous()
if view.is_cuda:
transmitter_mouth = transmitter_mouth.cuda()
return transmitter_mouth
def _3d_displacements(self, view):
transmitter_mouth = self._transmitter_mouth(view)
# offset between tracking markers and ears in the dataset
left_ear_offset = th.Tensor([0, -0.08, -0.22]).cuda() if view.is_cuda else th.Tensor([0, -0.08, -0.22])
right_ear_offset = th.Tensor([0, 0.08, -0.22]).cuda() if view.is_cuda else th.Tensor([0, 0.08, -0.22])
# compute displacements between transmitter mouth and receiver left/right ear
displacement_left = view[:, 0:3, :] + transmitter_mouth - left_ear_offset[None, :, None]
displacement_right = view[:, 0:3, :] + transmitter_mouth - right_ear_offset[None, :, None]
displacement = th.stack([displacement_left, displacement_right], dim=1)
return displacement
def _warpfield(self, view, seq_length):
return self.warper.displacements2warpfield(self._3d_displacements(view), seq_length)
def forward(self, mono, view):
'''
:param mono: input signal as tensor of shape B x 1 x T
:param view: rx/tx position/orientation as tensor of shape B x 7 x K (K = T / 400)
:return: warped: warped left/right ear signal as tensor of shape B x 2 x T
'''
return self.warper(th.cat([mono, mono], dim=1), self._3d_displacements(view))
class Warpnet(nn.Module):
def __init__(self, layers=4, channels=64, view_dim=7):
super().__init__()
self.layers = [nn.Conv1d(view_dim if l == 0 else channels, channels, kernel_size=2) for l in range(layers)]
self.layers = nn.ModuleList(self.layers)
self.linear = nn.Conv1d(channels, 2, kernel_size=1)
self.neural_warper = MonotoneTimeWarper()
self.geometric_warper = GeometricWarper()
def neural_warpfield(self, view, seq_length):
warpfield = view
for layer in self.layers:
warpfield = F.pad(warpfield, pad=[1, 0])
warpfield = F.relu(layer(warpfield))
warpfield = self.linear(warpfield)
warpfield = F.interpolate(warpfield, size=seq_length)
return warpfield
def forward(self, mono, view):
'''
:param mono: input signal as tensor of shape B x 1 x T
:param view: rx/tx position/orientation as tensor of shape B x 7 x K (K = T / 400)
:return: warped: warped left/right ear signal as tensor of shape B x 2 x T
'''
geometric_warpfield = self.geometric_warper._warpfield(view, mono.shape[-1])
neural_warpfield = self.neural_warpfield(view, mono.shape[-1])
warpfield = geometric_warpfield + neural_warpfield
# ensure causality
warpfield = -F.relu(-warpfield)
warped = self.neural_warper(th.cat([mono, mono], dim=1), warpfield)
return warped
class HyperConvWavenet(nn.Module):
def __init__(self, z_dim, channels=64, blocks=3, layers_per_block=10, conv_len=2):
super().__init__()
self.layers = []
self.rectv_field = 1
for b in range(blocks):
for l in range(layers_per_block):
self.layers += [HyperConvBlock(channels, channels, z_dim, kernel_size=conv_len, dilation=2**l)]
self.rectv_field += self.layers[-1].receptive_field() - 1
self.layers = nn.ModuleList(self.layers)
def forward(self, x, z):
'''
:param x: input signal as a B x channels x T tensor
:param z: weight-generating input as a B x z_dim z K tensor (K = T / 400)
:return: x: output signal as a B x channels x T tensor
skips: skip signal for each layer as a list of B x channels x T tensors
'''
skips = []
for layer in self.layers:
x, skip = layer(x, z)
skips += [skip]
return x, skips
def receptive_field(self):
return self.rectv_field
class WaveoutBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.first = nn.Conv1d(channels, channels, kernel_size=1)
self.first.weight.data.uniform_(-np.sqrt(6.0 / channels), np.sqrt(6.0 / channels))
self.second = nn.Conv1d(channels, 2, kernel_size=1)
def forward(self, x):
x = th.sin(self.first(x))
return self.second(x)
class BinauralNetwork(Net):
def __init__(self,
view_dim=7,
warpnet_layers=4,
warpnet_channels=64,
wavenet_blocks=3,
layers_per_block=10,
wavenet_channels=64,
model_name='binaural_network',
use_cuda=True):
super().__init__(model_name, use_cuda)
self.warper = Warpnet(warpnet_layers, warpnet_channels)
self.input = nn.Conv1d(2, wavenet_channels, kernel_size=1)
self.input.weight.data.uniform_(-np.sqrt(6.0 / 2), np.sqrt(6.0 / 2))
self.hyperconv_wavenet = HyperConvWavenet(view_dim, wavenet_channels, wavenet_blocks, layers_per_block)
self.output_net = nn.ModuleList([WaveoutBlock(wavenet_channels)
for _ in range(wavenet_blocks*layers_per_block)])
if self.use_cuda:
self.cuda()
def forward(self, mono, view):
'''
:param mono: the input signal as a B x 1 x T tensor
:param view: the receiver/transmitter position as a B x 7 x T tensor
:return: out: the binaural output produced by the network
intermediate: a two-channel audio signal obtained from the output of each intermediate layer
as a list of B x 2 x T tensors
'''
warped = self.warper(mono, view)
x = self.input(warped)
_, skips = self.hyperconv_wavenet(x, view)
# collect output and skips after each layer
x = []
for k in range(len(skips), 0, -1):
y = th.mean(th.stack(skips[:k], dim=0), dim=0)
y = self.output_net[k-1](y)
x += [y]
x += [warped]
return {"output": x[0], "intermediate": x[1:]}
def receptive_field(self):
return self.hyperconv_wavenet.receptive_field()
| BinauralSpeechSynthesis-main | src/models.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import tqdm
import torchaudio as ta
import numpy as np
class BinauralDataset:
'''
dataset_directory: (str) base directory of the dataset
chunk_size_ms: (int) length of an acoustic chunk in ms
overlap: (float) overlap ratio between two neighboring audio chunks, must be in range [0, 1)
'''
def __init__(self,
dataset_directory,
chunk_size_ms=200,
overlap=0.5
):
super().__init__()
# load audio data and relative transmitter/receiver position/orientation
self.mono, self.binaural, self.view = [], [], []
pbar = tqdm.tqdm(range(8))
for subject_id in pbar:
pbar.set_description(f"loading data: subject {subject_id + 1}")
mono, _ = ta.load(f"{dataset_directory}/subject{subject_id + 1}/mono.wav")
binaural, _ = ta.load(f"{dataset_directory}/subject{subject_id + 1}/binaural.wav")
# receiver is fixed at origin in this dataset, so we only need transmitter view
tx_view = np.loadtxt(f"{dataset_directory}/subject{subject_id + 1}/tx_positions.txt").transpose()
self.mono.append(mono)
self.binaural.append(binaural)
self.view.append(tx_view.astype(np.float32))
# ensure that chunk_size is a multiple of 400 to match audio (48kHz) and receiver/transmitter positions (120Hz)
self.chunk_size = chunk_size_ms * 48
if self.chunk_size % 400 > 0:
self.chunk_size = self.chunk_size + 400 - self.chunk_size % 400
# compute chunks
self.chunks = []
for subject_id in range(8):
last_chunk_start_frame = self.mono[subject_id].shape[-1] - self.chunk_size + 1
hop_length = int((1 - overlap) * self.chunk_size)
for offset in range(0, last_chunk_start_frame, hop_length):
self.chunks.append({'subject': subject_id, 'offset': offset})
def __len__(self):
'''
:return: number of training chunks in dataset
'''
return len(self.chunks)
def __getitem__(self, idx):
'''
:param idx: index of the chunk to be returned
:return: mono audio as 1 x T tensor
binaural audio as 2 x T tensor
relative rx/tx position as 7 x K tensor, where K = T / 400 (120Hz tracking vs. 48000Hz audio)
'''
subject = self.chunks[idx]['subject']
offset = self.chunks[idx]['offset']
mono = self.mono[subject][:, offset:offset+self.chunk_size]
binaural = self.binaural[subject][:, offset:offset+self.chunk_size]
view = self.view[subject][:, offset//400:(offset+self.chunk_size)//400]
return mono, binaural, view
| BinauralSpeechSynthesis-main | src/dataset.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class HyperConv(nn.Module):
def __init__(self, input_size, ch_in, ch_out, kernel_size, dilation=1):
'''
HyperConv implements a temporal convolution that has different convolution weights for each time step.
:param input_size: (int) dimension of the weight generating input variable
:param ch_in: (int) number of input channels of the temporal convolution
:param ch_out: (int) number of output channels of the temporal convolution
:param kernel_size: (int) kernel size of the temporal convolution
:param dilation: (int) dilation of the temporal convolution
'''
super().__init__()
weight_regressor_hidden_size = 32
self.ch_in = ch_in
self.ch_out = ch_out
self.kernel_size = kernel_size
self.dilation = dilation
self.weight_model = nn.Sequential(
nn.Conv1d(input_size, weight_regressor_hidden_size, kernel_size=1),
nn.ReLU(),
nn.Conv1d(weight_regressor_hidden_size, ch_in * ch_out * kernel_size, kernel_size=1)
)
self.bias_model = nn.Sequential(
nn.Conv1d(input_size, weight_regressor_hidden_size, kernel_size=1),
nn.ReLU(),
nn.Conv1d(weight_regressor_hidden_size, ch_out, kernel_size=1)
)
# initialize weights such that regressed weights are distributed in a suitable way for sine activations
self.weight_model[0].weight.data.zero_()
self.weight_model[0].bias.data.zero_()
self.weight_model[-1].bias.data.uniform_(-np.sqrt(6.0/(self.ch_in*self.kernel_size)),
np.sqrt(6.0/(self.ch_in*self.kernel_size)))
def forward(self, x, z):
'''
:param x: the input signal as a B x ch_in x T tensor
:param z: the weight-generating input as a B x z_dim x K tensor (K s.t. T is a multiple of K)
:return: a B x ch_out x T tensor as the result of the hyper-convolution
'''
B = x.shape[0]
assert x.shape[-1] % z.shape[-1] == 0
# padding
padding = self.dilation * (self.kernel_size - 1)
x = F.pad(x, [padding, 0])
# linearize input by appending receptive field in channels
start, end = padding, x.shape[-1]
x = th.cat([x[:, :, start-i*self.dilation:end-i*self.dilation] for i in range(self.kernel_size)], dim=1)
# rearrange input to blocks for matrix multiplication
x = x.permute(0, 2, 1).contiguous().view(x.shape[0] * z.shape[-1], x.shape[-1]//z.shape[-1], x.shape[1])
# compute weights and bias
weight = self.weight_model(z).view(B, self.ch_in * self.kernel_size, self.ch_out, z.shape[-1])
weight = weight.permute(0, 3, 1, 2).contiguous().view(B * z.shape[-1], self.ch_in * self.kernel_size, self.ch_out)
bias = self.bias_model(z).view(B, self.ch_out, z.shape[-1])
bias = bias.permute(0, 2, 1).contiguous().view(B * z.shape[-1], self.ch_out)
# compute result of dynamic convolution
y = th.bmm(x, weight)
y = y + bias[:, None, :]
y = y.view(B, -1, self.ch_out).permute(0, 2, 1).contiguous()
return y
class HyperConvBlock(nn.Module):
def __init__(self, ch_in, ch_out, z_dim, kernel_size, dilation=1):
'''
:param ch_in: (int) input channels
:param ch_out: (int) output channels
:param z_dim: (int) dimension of the weight-generating input
:param kernel_size: (int) size of the filter
:param dilation: (int) dilation
'''
super().__init__()
self.kernel_size = kernel_size
self.dilation = dilation
self.ch_in = ch_in
self.ch_out = ch_out
self.conv = HyperConv(z_dim, ch_in, ch_out, kernel_size, dilation)
self.residual = nn.Conv1d(ch_out, ch_out, kernel_size=1)
self.residual.weight.data.uniform_(-np.sqrt(6.0/ch_out), np.sqrt(6.0/ch_out))
self.skip = nn.Conv1d(ch_out, ch_out, kernel_size=1)
self.skip.weight.data.uniform_(-np.sqrt(6.0/ch_out), np.sqrt(6.0/ch_out))
if not ch_in == ch_out:
self.equalize_channels = nn.Conv1d(ch_in, ch_out, kernel_size=1)
self.equalize_channels.weight.data.uniform_(-np.sqrt(6.0 / ch_in), np.sqrt(6.0 / ch_in))
def forward(self, x, z):
'''
:param x: input signal as a B x ch_in x T tensor
:param z: weight-generating input as a B x z_dim x K tensor (K s.t. T is a multiple of K)
:return: output: B x ch_out x T tensor as layer output
skip: B x ch_out x T tensor as skip connection output
'''
assert x.shape[-1] % z.shape[-1] == 0
y = self.conv(x, z)
y = th.sin(y)
# residual and skip
residual = self.residual(y)
if not self.ch_in == self.ch_out:
x = self.equalize_channels(x)
skip = self.skip(y)
return (residual + x) / 2, skip
def receptive_field(self):
return (self.kernel_size - 1) * self.dilation + 1
| BinauralSpeechSynthesis-main | src/hyperconv.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch as th
import torchaudio as ta
class Net(th.nn.Module):
def __init__(self, model_name="network", use_cuda=True):
super().__init__()
self.use_cuda = use_cuda
self.model_name = model_name
def save(self, model_dir, suffix=''):
'''
save the network to model_dir/model_name.suffix.net
:param model_dir: directory to save the model to
:param suffix: suffix to append after model name
'''
if self.use_cuda:
self.cpu()
if suffix == "":
fname = f"{model_dir}/{self.model_name}.net"
else:
fname = f"{model_dir}/{self.model_name}.{suffix}.net"
th.save(self.state_dict(), fname)
if self.use_cuda:
self.cuda()
def load_from_file(self, model_file):
'''
load network parameters from model_file
:param model_file: file containing the model parameters
'''
if self.use_cuda:
self.cpu()
states = th.load(model_file)
self.load_state_dict(states)
if self.use_cuda:
self.cuda()
print(f"Loaded: {model_file}")
def load(self, model_dir, suffix=''):
'''
load network parameters from model_dir/model_name.suffix.net
:param model_dir: directory to load the model from
:param suffix: suffix to append after model name
'''
if suffix == "":
fname = f"{model_dir}/{self.model_name}.net"
else:
fname = f"{model_dir}/{self.model_name}.{suffix}.net"
self.load_from_file(fname)
def num_trainable_parameters(self):
'''
:return: the number of trainable parameters in the model
'''
return sum(p.numel() for p in self.parameters() if p.requires_grad)
class NewbobAdam(th.optim.Adam):
def __init__(self,
weights,
net,
artifacts_dir,
initial_learning_rate=0.001,
decay=0.5,
max_decay=0.01
):
'''
Newbob learning rate scheduler
:param weights: weights to optimize
:param net: the network, must be an instance of type src.utils.Net
:param artifacts_dir: (str) directory to save/restore models to/from
:param initial_learning_rate: (float) initial learning rate
:param decay: (float) value to decrease learning rate by when loss doesn't improve further
:param max_decay: (float) maximum decay of learning rate
'''
super().__init__(weights, lr=initial_learning_rate)
self.last_epoch_loss = np.inf
self.total_decay = 1
self.net = net
self.decay = decay
self.max_decay = max_decay
self.artifacts_dir = artifacts_dir
# store initial state as backup
if decay < 1.0:
net.save(artifacts_dir, suffix="newbob")
def update_lr(self, loss):
'''
update the learning rate based on the current loss value and historic loss values
:param loss: the loss after the current iteration
'''
if loss > self.last_epoch_loss and self.decay < 1.0 and self.total_decay > self.max_decay:
self.total_decay = self.total_decay * self.decay
print(f"NewbobAdam: Decay learning rate (loss degraded from {self.last_epoch_loss} to {loss})."
f"Total decay: {self.total_decay}")
# restore previous network state
self.net.load(self.artifacts_dir, suffix="newbob")
# decrease learning rate
for param_group in self.param_groups:
param_group['lr'] = param_group['lr'] * self.decay
else:
self.last_epoch_loss = loss
# save last snapshot to restore it in case of lr decrease
if self.decay < 1.0 and self.total_decay > self.max_decay:
self.net.save(self.artifacts_dir, suffix="newbob")
class FourierTransform:
def __init__(self,
fft_bins=2048,
win_length_ms=40,
frame_rate_hz=100,
causal=False,
preemphasis=0.0,
sample_rate=48000,
normalized=False):
self.sample_rate = sample_rate
self.frame_rate_hz = frame_rate_hz
self.preemphasis = preemphasis
self.fft_bins = fft_bins
self.win_length = int(sample_rate * win_length_ms / 1000)
self.hop_length = int(sample_rate / frame_rate_hz)
self.causal = causal
self.normalized = normalized
if self.win_length > self.fft_bins:
print('FourierTransform Warning: fft_bins should be larger than win_length')
def _convert_format(self, data, expected_dims):
if not type(data) == th.Tensor:
data = th.Tensor(data)
if len(data.shape) < expected_dims:
data = data.unsqueeze(0)
if not len(data.shape) == expected_dims:
raise Exception(f"FourierTransform: data needs to be a Tensor with {expected_dims} dimensions but got shape {data.shape}")
return data
def _preemphasis(self, audio):
if self.preemphasis > 0:
return th.cat((audio[:, 0:1], audio[:, 1:] - self.preemphasis * audio[:, :-1]), dim=1)
return audio
def _revert_preemphasis(self, audio):
if self.preemphasis > 0:
for i in range(1, audio.shape[1]):
audio[:, i] = audio[:, i] + self.preemphasis * audio[:, i-1]
return audio
def _magphase(self, complex_stft):
mag, phase = ta.functional.magphase(complex_stft, 1.0)
return mag, phase
def stft(self, audio):
'''
wrapper around th.stft
audio: wave signal as th.Tensor
'''
hann = th.hann_window(self.win_length)
hann = hann.cuda() if audio.is_cuda else hann
spec = th.stft(audio, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length,
window=hann, center=not self.causal, normalized=self.normalized)
return spec.contiguous()
def complex_spectrogram(self, audio):
'''
audio: wave signal as th.Tensor
return: th.Tensor of size channels x frequencies x time_steps (channels x y_axis x x_axis)
'''
self._convert_format(audio, expected_dims=2)
audio = self._preemphasis(audio)
return self.stft(audio)
def magnitude_phase(self, audio):
'''
audio: wave signal as th.Tensor
return: tuple containing two th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
'''
stft = self.complex_spectrogram(audio)
return self._magphase(stft)
def mag_spectrogram(self, audio):
'''
audio: wave signal as th.Tensor
return: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
'''
return self.magnitude_phase(audio)[0]
def power_spectrogram(self, audio):
'''
audio: wave signal as th.Tensor
return: power spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
'''
return th.pow(self.mag_spectrogram(audio), 2.0)
def phase_spectrogram(self, audio):
'''
audio: wave signal as th.Tensor
return: phase spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
'''
return self.magnitude_phase(audio)[1]
def mel_spectrogram(self, audio, n_mels):
'''
audio: wave signal as th.Tensor
n_mels: number of bins used for mel scale warping
return: mel spectrogram as th.Tensor of size channels x n_mels x time_steps for magnitude and phase spectrum
'''
spec = self.power_spectrogram(audio)
mel_warping = ta.transforms.MelScale(n_mels, self.sample_rate)
return mel_warping(spec)
def complex_spec2wav(self, complex_spec, length):
'''
inverse stft
complex_spec: complex spectrum as th.Tensor of size channels x frequencies x time_steps x 2 (real part/imaginary part)
length: length of the audio to be reconstructed (in frames)
'''
complex_spec = self._convert_format(complex_spec, expected_dims=4)
hann = th.hann_window(self.win_length)
hann = hann.cuda() if complex_spec.is_cuda else hann
wav = ta.functional.istft(complex_spec, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length, window=hann, length=length, center=not self.causal)
wav = self._revert_preemphasis(wav)
return wav
def magphase2wav(self, mag_spec, phase_spec, length):
'''
reconstruction of wav signal from magnitude and phase spectrum
mag_spec: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps
phase_spec: phase spectrum as th.Tensor of size channels x frequencies x time_steps
length: length of the audio to be reconstructed (in frames)
'''
mag_spec = self._convert_format(mag_spec, expected_dims=3)
phase_spec = self._convert_format(phase_spec, expected_dims=3)
complex_spec = th.stack([mag_spec * th.cos(phase_spec), mag_spec * th.sin(phase_spec)], dim=-1)
return self.complex_spec2wav(complex_spec, length)
| BinauralSpeechSynthesis-main | src/utils.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch as th
from src.utils import FourierTransform
class Loss(th.nn.Module):
def __init__(self, mask_beginning=0):
'''
base class for losses that operate on the wave signal
:param mask_beginning: (int) number of samples to mask at the beginning of the signal
'''
super().__init__()
self.mask_beginning = mask_beginning
def forward(self, data, target):
'''
:param data: predicted wave signals in a B x channels x T tensor
:param target: target wave signals in a B x channels x T tensor
:return: a scalar loss value
'''
data = data[..., self.mask_beginning:]
target = target[..., self.mask_beginning:]
return self._loss(data, target)
def _loss(self, data, target):
pass
class L2Loss(Loss):
def _loss(self, data, target):
'''
:param data: predicted wave signals in a B x channels x T tensor
:param target: target wave signals in a B x channels x T tensor
:return: a scalar loss value
'''
return th.mean((data - target).pow(2))
class AmplitudeLoss(Loss):
def __init__(self, sample_rate, mask_beginning=0):
'''
:param sample_rate: (int) sample rate of the audio signal
:param mask_beginning: (int) number of samples to mask at the beginning of the signal
'''
super().__init__(mask_beginning)
self.fft = FourierTransform(sample_rate=sample_rate)
def _transform(self, data):
return self.fft.stft(data.view(-1, data.shape[-1]))
def _loss(self, data, target):
'''
:param data: predicted wave signals in a B x channels x T tensor
:param target: target wave signals in a B x channels x T tensor
:return: a scalar loss value
'''
data, target = self._transform(data), self._transform(target)
data = th.sum(data**2, dim=-1) ** 0.5
target = th.sum(target**2, dim=-1) ** 0.5
return th.mean(th.abs(data - target))
class PhaseLoss(Loss):
def __init__(self, sample_rate, mask_beginning=0, ignore_below=0.1):
'''
:param sample_rate: (int) sample rate of the audio signal
:param mask_beginning: (int) number of samples to mask at the beginning of the signal
'''
super().__init__(mask_beginning)
self.ignore_below = ignore_below
self.fft = FourierTransform(sample_rate=sample_rate)
def _transform(self, data):
return self.fft.stft(data.view(-1, data.shape[-1]))
def _loss(self, data, target):
'''
:param data: predicted wave signals in a B x channels x T tensor
:param target: target wave signals in a B x channels x T tensor
:return: a scalar loss value
'''
data, target = self._transform(data).view(-1, 2), self._transform(target).view(-1, 2)
# ignore low energy components for numerical stability
target_energy = th.sum(th.abs(target), dim=-1)
pred_energy = th.sum(th.abs(data.detach()), dim=-1)
target_mask = target_energy > self.ignore_below * th.mean(target_energy)
pred_mask = pred_energy > self.ignore_below * th.mean(target_energy)
indices = th.nonzero(target_mask * pred_mask).view(-1)
data, target = th.index_select(data, 0, indices), th.index_select(target, 0, indices)
# compute actual phase loss in angular space
data_angles, target_angles = th.atan2(data[:, 0], data[:, 1]), th.atan2(target[:, 0], target[:, 1])
loss = th.abs(data_angles - target_angles)
# positive + negative values in left part of coordinate system cause angles > pi
# => 2pi -> 0, 3/4pi -> 1/2pi, ... (triangle function over [0, 2pi] with peak at pi)
loss = np.pi - th.abs(loss - np.pi)
return th.mean(loss)
| BinauralSpeechSynthesis-main | src/losses.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import tqdm
import time
import torch as th
from torch.utils.data import DataLoader
from src.utils import NewbobAdam
from src.losses import L2Loss, PhaseLoss
class Trainer:
def __init__(self, config, net, dataset):
'''
:param config: a dict containing parameters
:param net: the network to be trained, must be of type src.utils.Net
:param dataset: the dataset to be trained on
'''
self.config = config
self.dataset = dataset
self.dataloader = DataLoader(dataset, batch_size=config["batch_size"], shuffle=True, num_workers=1)
gpus = [i for i in range(config["num_gpus"])]
self.net = th.nn.DataParallel(net, gpus)
weights = filter(lambda x: x.requires_grad, net.parameters())
self.optimizer = NewbobAdam(weights,
net,
artifacts_dir=config["artifacts_dir"],
initial_learning_rate=config["learning_rate"],
decay=config["newbob_decay"],
max_decay=config["newbob_max_decay"])
self.l2_loss = L2Loss(mask_beginning=config["mask_beginning"])
self.phase_loss = PhaseLoss(sample_rate=48000, mask_beginning=config["mask_beginning"])
self.total_iters = 0
# switch to training mode
self.net.train()
def save(self, suffix=""):
self.net.module.save(self.config["artifacts_dir"], suffix)
def train(self):
for epoch in range(self.config["epochs"]):
t_start = time.time()
loss_stats = {}
data_pbar = tqdm.tqdm(self.dataloader)
for data in data_pbar:
loss_new = self.train_iteration(data)
# logging
for k, v in loss_new.items():
loss_stats[k] = loss_stats[k]+v if k in loss_stats else v
data_pbar.set_description(f"loss: {loss_new['accumulated_loss'].item():.7f}")
for k in loss_stats:
loss_stats[k] /= len(self.dataloader)
self.optimizer.update_lr(loss_stats["accumulated_loss"])
t_end = time.time()
loss_str = " ".join([f"{k}:{v:.4}" for k, v in loss_stats.items()])
time_str = f"({time.strftime('%H:%M:%S', time.gmtime(t_end-t_start))})"
print(f"epoch {epoch+1} " + loss_str + " " + time_str)
# Save model
if self.config["save_frequency"] > 0 and (epoch + 1) % self.config["save_frequency"] == 0:
self.save(suffix='epoch-' + str(epoch+1))
print("Saved model")
# Save final model
self.save()
def train_iteration(self, data):
'''
one optimization step
:param data: tuple of tensors containing mono, binaural, and quaternion data
:return: dict containing values for all different losses
'''
# forward
self.optimizer.zero_grad()
mono, binaural, quats = data
mono, binaural, quats = mono.cuda(), binaural.cuda(), quats.cuda()
prediction = self.net.forward(mono, quats)
l2 = self.l2_loss(prediction["output"], binaural)
phase = self.phase_loss(prediction["output"], binaural)
intermediate_binaural = th.cat([binaural] * len(prediction["intermediate"]), dim=1)
intermediate_prediction = th.cat(prediction["intermediate"], dim=1)
intermediate_l2 = self.l2_loss(intermediate_prediction, intermediate_binaural)
intermediate_phase = self.phase_loss(intermediate_prediction, intermediate_binaural)
loss = (l2 + intermediate_l2) * self.config["loss_weights"]["l2"] + \
(phase + intermediate_phase) * self.config["loss_weights"]["phase"]
# update model parameters
loss.backward()
self.optimizer.step()
self.total_iters += 1
return {
"l2": l2,
"phase": phase,
"intermediate_l2": intermediate_l2,
"intermediate_phase": intermediate_phase,
"accumulated_loss": loss,
}
| BinauralSpeechSynthesis-main | src/trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import torch
from torch import nn
from torch.nn import functional as F
class AdaptiveEmbedding(nn.Module):
""" An adaptive embedding module from "Adaptive Input Representations for
Neural Language Modeling" (https://arxiv.org/abs/1809.10853) """
def __init__(self, n_tokens, d_embed, d_proj, cutoffs, div_val=4):
super(AdaptiveEmbedding, self).__init__()
self.n_tokens = n_tokens
self.d_embed = d_embed
self.d_proj = d_proj
assert 0 < min(cutoffs) <= max(cutoffs) < n_tokens
self.cutoffs = cutoffs + [n_tokens]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
assert self.div_val > 1
assert len(self.cutoffs) > 1
self.emb_scale = d_proj ** 0.5
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
# embedding layers / projections
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
self.emb_projs.append(nn.Linear(d_emb_i, d_proj).weight)
def forward(self, indices):
param = self.emb_layers[0].weight.data
idx_flat = indices.contiguous().view(-1)
emb_flat = torch.zeros([idx_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
# for each cluster
for i in range(len(self.cutoffs)):
# find elements in that cluster
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (idx_flat >= l_idx) & (idx_flat < r_idx)
# if there are no elements, continue
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
# add embeddings from this cluster
idx_i = idx_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](idx_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat = emb_flat.type_as(emb_i) if emb_flat.dtype != emb_i.dtype else emb_flat # small hack for AMP-O1
emb_flat.index_copy_(0, indices_i, emb_i)
# reshape embeddings
embed = emb_flat.view(*indices.size(), self.d_proj)
# rescale embeddings
embed.mul_(self.emb_scale)
return embed
class ProjectedAdaptiveLogSoftmax(nn.Module):
""" An efficient softmax implementation from "Efficient softmax
approximation for GPUs" (http://arxiv.org/abs/1609.04309). """
def __init__(self, n_tokens, d_embed, d_proj, cutoffs, div_val=4):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_tokens = n_tokens
self.d_embed = d_embed
self.d_proj = d_proj
assert 0 < min(cutoffs) <= max(cutoffs) < n_tokens
self.cutoffs = cutoffs + [n_tokens]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
assert self.div_val > 1
assert len(self.cutoffs) > 1
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# clusters parameters
self.cluster_proj = nn.Linear(self.d_embed, self.n_clusters)
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
# output layers / projections
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(nn.Linear(d_emb_i, d_proj).weight)
self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx))
def _compute_logit(self, hidden, weight, bias, proj):
proj_hid = F.linear(hidden, proj.t().contiguous()) # TODO: .contiguous() not necessary?
logit = F.linear(proj_hid, weight, bias=bias)
return logit
def forward(self, hidden, target):
"""
Input:
- `hidden` FloatTensor(shape + (d_proj,))
- `target` LongTensor(shape)
Output:
- `nll` FloatTensor(shape)
"""
assert hidden.shape[-1] == self.d_proj
assert hidden.shape[:-1] == target.shape
shape = target.shape
hidden = hidden.view(-1, self.d_proj)
target = target.view(-1)
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_proj.weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_proj.bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
# head / cluster assignments
head_logit = self._compute_logit(hidden, weights[0], biases[0], self.out_projs[0])
head_logprob = F.log_softmax(head_logit.float(), dim=1)
# final log-probabilities
nll = torch.zeros_like(target, dtype=torch.float32, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
# for each cluster
for i in range(len(cutoff_values) - 1):
# select the target tokens in that cluster
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
# if there are not any, there is nothing to do
if indices_i.numel() == 0:
continue
# index in current cluster
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
# for targets in the head cluster, there is just the head score
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
# otherwise, we sum the cluster assignment (head) and target scores
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weights[i], biases[i], self.out_projs[i])
tail_logprob_i = F.log_softmax(tail_logit_i.float(), dim=1)
logprob_i = head_logprob_i[:, -i] + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
# populate output
nll.index_copy_(0, indices_i, -logprob_i)
offset += logprob_i.size(0)
return nll.view(shape)
def compute_dummy_loss(in_emb, out_emb):
# hack to fix adaptive ou/in with distributed code
dummy_loss = 0 * (
sum(x.weight[0, 0] for x in in_emb.emb_layers) +
sum(x[0, 0] for x in in_emb.emb_projs) +
sum(x[0, 0] for x in out_emb.out_projs) +
sum(x.weight[0, 0] for x in out_emb.out_layers) +
sum(x.bias[0] for x in out_emb.out_layers)
)
return dummy_loss
def build_adaptive_io(vocab_size, hidden_size, adapt_io_cutoffs,
adapt_io_divval, adapt_io_tied, **kargs):
in_emb = AdaptiveEmbedding(
vocab_size, hidden_size, hidden_size,
cutoffs=adapt_io_cutoffs,
div_val=adapt_io_divval)
out_emb = ProjectedAdaptiveLogSoftmax(
vocab_size, hidden_size, hidden_size,
cutoffs=adapt_io_cutoffs,
div_val=adapt_io_divval)
if adapt_io_tied:
for i in range(len(adapt_io_cutoffs) + 1):
out_emb.out_layers[i].weight = in_emb.emb_layers[i].weight
out_emb.out_projs[i] = in_emb.emb_projs[i]
return in_emb, out_emb
| adaptive-span-main | adaptive_io.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# command-line arguments with their default values
PARAMS_CONFIG = {
# env-specific
'env_params': {
'--distributed': {
'action': 'store_true',
'default': False,
'help': 'enable distributed training.'
'(otherwise will use all available GPUs with dataparallel)',
'dest': 'distributed'
},
'--local_rank': {
'type': int,
'default': 0,
'help': 'used in distributed training',
'dest': 'local_rank'
},
},
# data-specific
'data_params': {
'--data': {
'type': str,
'default': 'data/text8',
'help': 'data location '
'(must contain train.txt, valid.txt and test.txt)',
'dest': 'data_path'
},
'--data-unit': {
'type': str,
'default': 'bpc',
'choices': ['bpc', 'ppl'],
'help': 'loss unit to log',
'dest': 'data_unit'
},
},
# model-specific
'model_params': {
'--hid-sz': {
'type': int,
'default': 256,
'help': 'hidden size (i.e. model size)',
'dest': 'hidden_size'
},
'--inner-hid-sz': {
'type': int,
'default': 1024,
'help': 'inner hidden size of FF layer',
'dest': 'inner_hidden_size'
},
'--nlayers': {
'type': int,
'default': 8,
'help': 'number of layers',
'dest': 'nb_layers'
},
'--block-sz': {
'type': int,
'default': 64,
'help': 'block size '
'(the length of sequence to process in parallel)',
'dest': 'block_size'
},
'--nheads': {
'type': int,
'default': 2,
'help': 'number of self-attention heads',
'dest': 'nb_heads'
},
'--attn-span': {
'type': int,
'default': 32,
'help': 'length of the attention span',
'dest': 'attn_span'
},
'--dropout': {
'type': float,
'default': 0.2,
'help': 'dropout rate of ReLU and attention',
'dest': 'dropout'
},
'--emb-dropout': {
'type': float,
'default': 0.,
'help': 'the dropout rate applied on I/O embeddings',
'dest': 'emb_dropout'
},
},
# optimization-specific
'optim_params': {
'--lr': {
'type': float,
'default': 0.03,
'help': 'learning rate',
'dest': 'lr'
},
'--momentum': {
'type': float,
'default': 0.9,
'help': 'SGD momentum',
'dest': 'momentum'
},
'--optim': {
'type': str,
'default': 'sgd',
'help': 'optimization method: sgd | adagrad',
'dest': 'optim'
},
'--lr-warmup': {
'type': int,
'default': 0,
'help': 'linearly increase LR from 0 '
'during first lr_warmup updates',
'dest': 'lr_warmup'
},
'--grad-clip': {
'type': float,
'default': 0,
'help': '[only works with adagrad!] '
'clip gradient of each module parameters by a given '
'value',
'dest': 'grad_clip'
},
},
# trainer-specific
'trainer_params': {
'--batch-sz': {
'type': int,
'default': 64,
'help': 'batch size',
'dest': 'batch_size'
},
'--batch-split': {
'type': int,
'default': 1,
'help': 'split a batch into smaller parts to fit in GPU memory',
'dest': 'batch_split'
},
'--nbatches': {
'type': int,
'default': 1000,
'help': 'number of batches in each iteration',
'dest': 'nb_batches_per_iter'
},
'--niter': {
'type': int,
'default': 1000,
'help': 'number of iterations to train',
'dest': 'nb_iter'
},
'--checkpoint': {
'type': str,
'default': '',
'help': 'path to save/load model',
'dest': 'checkpoint_path'
},
'--full-eval-mode': {
'action': 'store_true',
'default': False,
'help': 'do evaluation on the whole validation and the test data',
'dest': 'full_eval_mode'
},
},
# adaptive I/O specific params
'adapt_io_params': {
'--adapt-io': {
'action': 'store_true',
'default': False,
'help': 'enable adaptive input and output representations',
'dest': 'adapt_io_enabled'
},
'--adapt-io-tied': {
'action': 'store_true',
'default': False,
'help': 'tie the input parameters with the output parameters',
'dest': 'adapt_io_tied'
},
'--adapt-io-divval': {
'type': int,
'default': 4,
'help': 'dimension division value',
'dest': 'adapt_io_divval'
},
'--adapt-io-cutoffs': {
'type': int,
'default': [20000, 40000, 200000],
'help': 'cutoffs values',
'dest': 'adapt_io_cutoffs'
},
},
# adaptive attention span specific params
'adapt_span_params': {
'--adapt-span': {
'action': 'store_true',
'default': False,
'help': 'enable adaptive attention span',
'dest': 'adapt_span_enabled'
},
'--adapt-span-loss': {
'type': float,
'default': 0,
'help': 'the loss coefficient for span lengths',
'dest': 'adapt_span_loss'
},
'--adapt-span-ramp': {
'type': int,
'default': 32,
'help': 'ramp length of the soft masking function',
'dest': 'adapt_span_ramp'
},
'--adapt-span-init': {
'type': float,
'default': 0,
'help': 'initial attention span ratio',
'dest': 'adapt_span_init'
},
'--adapt-span-cache': {
'action': 'store_true',
'default': False,
'help': 'adapt cache size as well to reduce memory usage',
'dest': 'adapt_span_cache'
},
},
# persistent memory specific params
'pers_mem_params': {
'--pers-mem-size': {
'type': int,
'default': 0,
'help': 'the number of persistent memory vectors',
'dest': 'pers_mem_size'
},
},
}
| adaptive-span-main | config.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveMask(nn.Module):
"""Soft masking function for adaptive size.
It masks out the last K values of an input. The masking value
goes from 1 to 0 gradually, so K can be learned with
back-propagation.
Args:
max_size: maximum size (i.e. input dimension)
ramp_size: size of the ramp going from 0 to 1
init_val: initial size proportion not to be masked out
shape: learn multiple sizes independent of each other
"""
def __init__(self, max_size, ramp_size, init_val=0, shape=(1,)):
nn.Module.__init__(self)
self._max_size = max_size
self._ramp_size = ramp_size
self.current_val = nn.Parameter(torch.zeros(*shape) + init_val)
mask_template = torch.linspace(1 - max_size, 0, steps=max_size)
self.register_buffer('mask_template', mask_template)
def forward(self, x):
mask = self.mask_template + self.current_val * self._max_size
mask = mask / self._ramp_size + 1
mask = mask.clamp(0, 1)
if x.size(-1) < self._max_size:
# the input could have been trimmed beforehand to save computation
mask = mask[:, :, -x.size(-1):]
x = x * mask
return x
def get_current_max_size(self, include_ramp=True):
current_size = math.ceil(self.current_val.max().item() * self._max_size)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def get_current_avg_size(self, include_ramp=True):
current_size = math.ceil(self.current_val.mean().item() * self._max_size)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def clamp_param(self):
"""this need to be called after each update"""
self.current_val.data.clamp_(0, 1)
class AdaptiveSpan(nn.Module):
"""Adaptive attention span for Transformerself.
This module learns an attention span length from data for each
self-attention head.
Args:
attn_span: maximum attention span
adapt_span_loss: loss coefficient for the span length
adapt_span_ramp: length of the masking ramp
adapt_span_init: initial size ratio
adapt_span_cache: adapt cache size to reduce memory usage
"""
def __init__(self, attn_span, adapt_span_loss, adapt_span_ramp,
adapt_span_init, adapt_span_cache, nb_heads, **kargs):
nn.Module.__init__(self)
self._adapt_cache = adapt_span_cache
self._max_span = attn_span
self._loss_coeff = adapt_span_loss
self._nb_heads = nb_heads
self._mask = AdaptiveMask(max_size=self._max_span,
ramp_size=adapt_span_ramp,
init_val=adapt_span_init,
shape=(nb_heads, 1, 1))
def forward(self, attn, normalize=True):
"""mask attention with the right span"""
# batch and head dimensions are merged together, so separate them first
B = attn.size(0) # batch size
M = attn.size(1) # block size
attn = attn.reshape(B // self._nb_heads, self._nb_heads, M, -1)
attn = self._mask(attn)
if normalize:
attn = attn / (attn.sum(-1, keepdim=True) + 1e-8) # normalize so sum is 1
attn = attn.view(B, M, -1)
return attn
def get_trim_len(self):
"""how much of memory can be trimmed to reduce computation"""
L = self._max_span
trim_len = min(L - 1, L - self._mask.get_current_max_size())
# too fine granularity might be bad for the memory management
trim_len = math.floor(trim_len / 64) * 64
return trim_len
def trim_memory(self, query, key, value, key_pe):
"""trim out unnecessary memory beforehand to reduce computation"""
trim_len = self.get_trim_len()
cache_size = key.size(1) - query.size(1)
trim_len_cache = trim_len - (self._max_span - cache_size)
if trim_len_cache > 0:
key = key[:, trim_len_cache:, :]
value = value[:, trim_len_cache:, :]
elif trim_len_cache < 0:
# cache is too short! this happens when validation resumes
# after a lot of updates.
key = F.pad(key, [0, 0, -trim_len_cache, 0])
value = F.pad(value, [0, 0, -trim_len_cache, 0])
if trim_len > 0:
if key_pe is not None:
key_pe = key_pe[:, :, trim_len:]
return key, value, key_pe
def get_cache_size(self):
"""determine how long the cache should be"""
if self._adapt_cache:
trim_len = self.get_trim_len()
# give a buffer of 64 steps since a span might increase
# in future updates
return min(self._max_span, self._max_span - trim_len + 64)
else:
return self._max_span
def get_loss(self):
"""a loss term for regularizing the span length"""
return self._loss_coeff * self._max_span * self._mask.current_val.mean()
def get_current_max_span(self):
return self._mask.get_current_max_size()
def get_current_avg_span(self):
return self._mask.get_current_avg_size()
def clamp_param(self):
self._mask.clamp_param()
| adaptive-span-main | adaptive_span.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from adaptive_span import AdaptiveSpan
from persistent_memory import PersistentMemory
from adaptive_io import build_adaptive_io, compute_dummy_loss
# Size notations:
# B = batch_size, H = hidden_size, M = block_size, L = attn_span
def _skew(X, pad_value):
"""shift every row 1 step to right"""
# X = B x M x L
B, M, L = X.size()
X = F.pad(X, (0, M + 1), value=pad_value) # B x M x (L+M+1)
X = X.view(B, -1) # B x ML+MM+M
X = X[:, :-M] # B x ML+MM
X = X.view(B, M, M + L) # B x M x L+M
return X
def _unskew(X):
"""reverse _skew operation"""
# X = B x M x L+M
B, M, L = X.size()
L -= M
X = X.view(B, -1) # B x ML+MM
X = F.pad(X, (0, M)) # B x ML+MM+M
X = X.view(B, M, M + L + 1) # B x M x L+M+1
X = X[:, :, :L] # B x M x L
return X
class SeqAttention(nn.Module):
"""Sequential self-attention layer.
Each token will attend to its previous fixed number of steps.
Note that attention doesn't include the current step itself.
"""
def __init__(self, hidden_size, nb_heads, attn_span,
dropout, adapt_span_params, pers_mem_params, **kargs):
nn.Module.__init__(self)
self.dropout = nn.Dropout(dropout)
self.hidden_size = hidden_size # size of a single head
self.attn_span = attn_span
self.adapt_span_enabled = adapt_span_params['adapt_span_enabled']
if self.adapt_span_enabled:
self.adaptive_span = AdaptiveSpan(attn_span=attn_span, nb_heads=nb_heads,
**adapt_span_params, **kargs)
self.persistent_memory = None
if pers_mem_params['pers_mem_size'] > 0:
self.persistent_memory = PersistentMemory(
pers_mem_params['pers_mem_size'], nb_heads, hidden_size, dropout)
if self.adapt_span_enabled:
self.persistent_memory.adaptive_span = self.adaptive_span
def forward(self, query, key, value, key_pe):
# query size = B x M x H
# key, value sizes = B x (M+L) x H
if self.adapt_span_enabled:
# [optional] trim out memory to reduce unnecessary computation
key, value, key_pe = self.adaptive_span.trim_memory(
query, key, value, key_pe)
# compute attention from context
# B x M (dest) x (M+L) (src)
attn_cont = torch.matmul(query, key.transpose(-1, -2))
attn_cont = _unskew(attn_cont) # B x M x L
# compute the effect of position embedding
attn_pos = torch.matmul(query, key_pe) # B x M x L_pos
attn = attn_cont + attn_pos
if self.persistent_memory is not None:
attn, pers_mem_out = self.persistent_memory(query, attn)
else:
attn = attn / math.sqrt(self.hidden_size) # B x M X L_pos
attn = F.softmax(attn, dim=-1)
if self.adapt_span_enabled:
# trim attention lengths according to the learned span
attn = self.adaptive_span(attn)
attn = self.dropout(attn) # B x M X L_pos
attn_cont = _skew(attn, 0) # B x M X (L+M)
out = torch.matmul(attn_cont, value) # B x M x H
if self.persistent_memory is not None:
out = out + pers_mem_out
return out
def get_cache_size(self):
if self.adapt_span_enabled:
return self.adaptive_span.get_cache_size()
else:
return self.attn_span
class MultiHeadSeqAttention(nn.Module):
def __init__(self, hidden_size, nb_heads, **kargs):
nn.Module.__init__(self)
assert hidden_size % nb_heads == 0
self.nb_heads = nb_heads
self.head_dim = hidden_size // nb_heads
self.attn = SeqAttention(
hidden_size=self.head_dim, nb_heads=nb_heads, **kargs)
self.proj_query = nn.Linear(hidden_size, hidden_size, bias=False)
self.proj_out = nn.Linear(hidden_size, hidden_size, bias=False)
self.proj_val = nn.Linear(hidden_size, hidden_size, bias=False)
self.proj_key = nn.Linear(hidden_size, hidden_size, bias=False)
def head_reshape(self, x):
K = self.nb_heads
D = self.head_dim
x = x.view(x.size()[:-1] + (K, D)) # B x (M+L) x K x D
x = x.transpose(1, 2).contiguous() # B x K x (M+L) x D
x = x.view(-1, x.size(-2), x.size(-1)) # B_K x (M+L) x D
return x
def forward(self, query, key, value, key_pe):
B = query.size(0)
K = self.nb_heads
D = self.head_dim
M = query.size(1)
query = self.proj_query(query)
query = self.head_reshape(query)
value = self.proj_val(value)
value = self.head_reshape(value)
key = self.proj_key(key)
key = self.head_reshape(key)
out = self.attn(query, key, value, key_pe) # B_K x M x D
out = out.view(B, K, M, D) # B x K x M x D
out = out.transpose(1, 2).contiguous() # B x M x K x D
out = out.view(B, M, -1) # B x M x K_D
out = self.proj_out(out)
return out
class FeedForwardLayer(nn.Module):
def __init__(self, hidden_size, inner_hidden_size, dropout, **kargs):
nn.Module.__init__(self)
self.fc1 = nn.Linear(hidden_size, inner_hidden_size)
self.fc2 = nn.Linear(inner_hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, h):
h1 = F.relu(self.fc1(h))
h1 = self.dropout(h1)
h2 = self.fc2(h1)
return h2
class TransformerSeqLayer(nn.Module):
def __init__(self, hidden_size, **kargs):
nn.Module.__init__(self)
self.attn = MultiHeadSeqAttention(hidden_size=hidden_size, **kargs)
self.norm1 = nn.LayerNorm(hidden_size)
if kargs['pers_mem_params']['pers_mem_size'] > 0:
# replacing FF with persistent memory
self.ff = None
else:
self.ff = FeedForwardLayer(hidden_size=hidden_size, **kargs)
self.norm2 = nn.LayerNorm(hidden_size)
def forward(self, h, h_cache, key_pe):
# h = B x M x H
# h_cache = B x L x H
h_all = torch.cat([h_cache, h], dim=1) # B x (M+L) x H
attn_out = self.attn(h, h_all, h_all, key_pe)
h = self.norm1(h + attn_out) # B x M x H
if self.ff is not None:
ff_out = self.ff(h)
out = self.norm2(h + ff_out) # B x M x H
else:
out = h
return out
class TransformerSeq(nn.Module):
def __init__(self, vocab_size, hidden_size, nb_heads, nb_layers,
attn_span, emb_dropout, adapt_io_params, **kargs):
nn.Module.__init__(self)
# token embeddings
self.adapt_io = adapt_io_params['adapt_io_enabled']
if self.adapt_io:
self.in_emb, self.out_emb = build_adaptive_io(
vocab_size, hidden_size, **adapt_io_params)
else:
self.in_emb = nn.Embedding(vocab_size, hidden_size)
self.out_emb = nn.Linear(hidden_size, vocab_size)
if emb_dropout > 0:
self.emb_dropout = nn.Dropout(emb_dropout)
else:
self.emb_dropout = None
# position embeddings
self.key_pe = nn.Parameter(
torch.randn(1, hidden_size // nb_heads, attn_span))
self.layers = nn.ModuleList()
self.layers.extend(
TransformerSeqLayer(
hidden_size=hidden_size, nb_heads=nb_heads,
attn_span=attn_span, **kargs)
for _ in range(nb_layers))
def forward(self, x, h_cache, target=None):
# x size = B x M
block_size = x.size(1)
h = self.in_emb(x) # B x M x H
if self.emb_dropout is not None:
h = self.emb_dropout(h)
h_cache_next = []
for l, layer in enumerate(self.layers):
cache_size = layer.attn.attn.get_cache_size()
if cache_size > block_size:
h_cache_next_l = torch.cat(
[h_cache[l][:, -cache_size + block_size:, :], h],
dim=1).detach()
else:
h_cache_next_l = h[:, -cache_size:, :].detach()
h_cache_next.append(h_cache_next_l)
h = layer(h, h_cache[l], self.key_pe) # B x M x H
if self.emb_dropout is not None:
h = self.emb_dropout(h)
if self.adapt_io:
# loss is computed here
out = self.out_emb(h, target)
dummy_loss = compute_dummy_loss(self.in_emb, self.out_emb)
else:
out = F.log_softmax(self.out_emb(h), dim=-1)
dummy_loss = None
return out, h_cache_next, dummy_loss
| adaptive-span-main | models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import os
import math
import argparse
import torch
from adagrad_with_grad_clip import AdagradWithGradClip
def _parse_args(params_config, args):
parser = argparse.ArgumentParser()
for params_category in params_config: # e.g., 'model_params'
for param_flag, param_config in params_config[params_category].items():
# e.g., param_flag = '--block-sz'
parser.add_argument(param_flag, **param_config)
return parser.parse_args(args)
def get_params(params_config, args=None):
namespace = _parse_args(params_config, args)
return {
params_category: {
param_config['dest']:
namespace.__getattribute__(param_config['dest'])
for param_config in params_config[params_category].values()
}
for params_category in params_config
}
##############################################################################
# ENVIRONMENT
##############################################################################
def _torch_distributed_init_process_group(local_rank):
torch.distributed.init_process_group(
backend='nccl',
init_method='env://'
)
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
print('my rank={} local_rank={}'.format(rank, local_rank))
torch.cuda.set_device(local_rank)
return {
'rank': rank,
'world_size': world_size,
}
def set_up_env(env_params):
assert torch.cuda.is_available()
if env_params['distributed']:
env_params.update(
_torch_distributed_init_process_group(
local_rank=env_params['local_rank']))
env_params['device'] = torch.device('cuda')
##############################################################################
# OPTIMIZER AND SCHEDULER
##############################################################################
def _get_grad_requiring_params(model):
nb_parameters = 0
grad_requiring_params = []
for param in model.parameters():
if param.requires_grad:
nb_parameters += param.numel()
grad_requiring_params.append(param)
print('nb_parameters={:.2f}M'.format(nb_parameters / 1e6))
return grad_requiring_params
def _get_optimizer(model,
optim,
lr: float,
momentum: float,
grad_clip: float):
if optim == 'sgd':
optimizer = torch.optim.SGD(_get_grad_requiring_params(model),
lr=lr,
momentum=momentum)
optimizer.grad_clip = grad_clip
return optimizer
elif optim == 'adagrad':
optimizer = AdagradWithGradClip(_get_grad_requiring_params(model),
lr=lr,
grad_clip=grad_clip)
optimizer.grad_clip = 0 # done internally
return optimizer
elif optim == 'adam':
optimizer = torch.optim.Adam(_get_grad_requiring_params(model),
lr=lr)
optimizer.grad_clip = grad_clip
return optimizer
else:
raise RuntimeError("wrong type of optimizer "
"- must be 'sgd', 'adagrad' or 'adam'")
def _get_scheduler(optimizer, lr_warmup):
if lr_warmup > 0:
return torch.optim.lr_scheduler.LambdaLR(
optimizer, lambda ep: min(1, ep / lr_warmup))
return None
def get_optimizer_and_scheduler(model, optim_params):
optimizer = _get_optimizer(model=model,
optim=optim_params['optim'],
lr=optim_params['lr'],
momentum=optim_params['momentum'],
grad_clip=optim_params['grad_clip'])
scheduler = _get_scheduler(optimizer=optimizer,
lr_warmup=optim_params['lr_warmup'])
return optimizer, scheduler
##############################################################################
# CHECKPOINT
##############################################################################
def _load_checkpoint(checkpoint_path, model, optimizer, scheduler, logger,
distributed):
print('loading from a checkpoint at {}'.format(checkpoint_path))
if distributed:
# the model is saved from gpu0 so we need to map it to CPU first
checkpoint_state = torch.load(
checkpoint_path, map_location=lambda storage, loc: storage)
else:
checkpoint_state = torch.load(checkpoint_path)
iter_init = checkpoint_state['iter_no'] + 1 # next iteration
model.load_state_dict(checkpoint_state['model'])
optimizer.load_state_dict(checkpoint_state['optimizer'])
logger.load_state_dict(checkpoint_state['logger'])
if 'scheduler_iter' in checkpoint_state:
# we only need the step count
scheduler.step(checkpoint_state['scheduler_iter'])
return iter_init
def load_checkpoint(checkpoint_path, model, optimizer, scheduler, logger,
distributed):
if checkpoint_path and os.path.exists(checkpoint_path):
return _load_checkpoint(checkpoint_path=checkpoint_path,
model=model,
optimizer=optimizer,
scheduler=scheduler,
logger=logger,
distributed=distributed)
return 0
def save_checkpoint(checkpoint_path, iter_no, model,
optimizer, scheduler, logger):
if checkpoint_path:
checkpoint_state = {
'iter_no': iter_no, # last completed iteration
'model': model.state_dict(),
'logger': logger.state_dict(),
'optimizer': optimizer.state_dict(),
}
if scheduler is not None:
checkpoint_state['scheduler_iter'] = scheduler.last_epoch
torch.save(checkpoint_state, checkpoint_path)
##############################################################################
# LOGGER
##############################################################################
class Logger:
def __init__(self, data_unit):
self.data_unit = data_unit
self._state_dict = dict()
def load_state_dict(self, state_dict):
self._state_dict = state_dict
def state_dict(self):
return self._state_dict
def _log(self, title, value):
if title not in self._state_dict:
self._state_dict[title] = []
self._state_dict[title].append(value)
def log_iter(self, iter_no, nb_batches_per_iter, loss_train, loss_val,
elapsed, model):
step = (iter_no + 1) * nb_batches_per_iter
self._log(title='step', value=step)
msg = 'steps: {}'.format(step)
if self.data_unit == 'bpc':
train_bpc = float(loss_train / math.log(2))
val_bpc = float(loss_val / math.log(2))
msg += '\ttrain: {:.3f}bpc\tval: {:.3f}bpc'.format(train_bpc, val_bpc)
self._log(title='train_bpc', value=train_bpc)
self._log(title='val_bpc', value=val_bpc)
else:
train_ppl = math.exp(loss_train)
val_ppl = math.exp(loss_val)
msg += '\ttrain: {:.2f}ppl\tval: {:.2f}ppl'.format(train_ppl, val_ppl)
self._log(title='train_ppl', value=train_ppl)
self._log(title='val_ppl', value=val_ppl)
msg += '\tms/batch: {:.1f}'.format(elapsed)
if model.module.layers[0].attn.attn.adapt_span_enabled:
avg_spans = []
max_spans = []
for layer in model.module.layers:
avg_spans.append(
layer.attn.attn.adaptive_span.get_current_avg_span())
max_spans.append(
layer.attn.attn.adaptive_span.get_current_max_span())
span_avg = float(sum(avg_spans)) / len(avg_spans)
span_max = float(max(max_spans))
self._log('span_avg', span_avg)
self._log('span_max', span_max)
msg += "\tspan_avg: {:.0f}\tspan_max: {:.0f}".format(span_avg, span_max)
print(msg)
| adaptive-span-main | utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import math
import random
import torch
def _train_step(model, X, Y, h_cache, eval_only, loss_div=1):
"""Single training step."""
out, h_cache, dummy_loss = model(X, h_cache, target=Y)
if model.module.adapt_io:
loss = out.mean() + dummy_loss.sum()
else:
out = out.view(-1, out.size(-1))
loss = torch.nn.functional.nll_loss(out, Y.view(-1))
loss_value = loss.item() / loss_div
if not eval_only:
# loss term from adaptive-span
if model.module.layers[0].attn.attn.adapt_span_enabled:
loss += sum(layer.attn.attn.adaptive_span.get_loss()
for layer in model.module.layers)
(loss / loss_div).backward()
return loss_value, h_cache
def _train_batch(model, optimizer, scheduler, X, Y, h_cache,
eval_only, batch_split):
"""Train on a batch."""
optimizer.zero_grad()
if batch_split == 1:
# process a batch in a single step (default behaviour)
loss_value, h_cache = _train_step(model, X, Y, h_cache, eval_only)
else:
# split a batch into multiple pieces that each can fit in memory
assert X.size(0) % batch_split == 0
split_size = X.size(0) // batch_split
loss_value = 0
h_cache_list = []
for split_ind in range(batch_split):
split_slice = slice(split_ind*split_size, (split_ind+1)*split_size)
split_h_cache = [h[split_slice,:,:] for h in h_cache]
split_loss_value, split_h_cache = _train_step(
model, X[split_slice,:], Y[split_slice],
split_h_cache, eval_only, batch_split)
loss_value += split_loss_value
h_cache_list.append(split_h_cache)
h_cache = [
torch.cat(
[h_cache_list[i][l] for i in range(batch_split)]
, dim=0) for l in range(len(h_cache))]
if not eval_only:
if scheduler is not None:
scheduler.step()
if optimizer.grad_clip > 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), optimizer.grad_clip)
optimizer.step()
# make sure span parameters are in a correct range
if model.module.layers[0].attn.attn.adapt_span_enabled:
for layer in model.module.layers:
layer.attn.attn.adaptive_span.clamp_param()
return loss_value, h_cache
def train_iteration(model, optimizer, scheduler, data, nb_batches_per_iter,
block_size, eval_only, train_pos, h_cache, batch_split):
"""Single training iteration."""
if eval_only:
model.eval()
else:
model.train()
nb_batches_per_iter_max = nb_batches_per_iter
if eval_only:
# eval on fewer batches during training for speed-up
nb_batches_per_iter_max = max(1, nb_batches_per_iter // 10)
nb_batches_per_iter_max = min(nb_batches_per_iter_max,
math.ceil(data.size(1) / block_size))
loss_all = 0
actual_nb_batches_per_iter = 0
for _ in range(nb_batches_per_iter_max):
actual_nb_batches_per_iter += 1
X = data[:, train_pos: train_pos + block_size].contiguous()
Y = data[:, train_pos + 1: train_pos + block_size + 1].contiguous()
loss, h_cache = _train_batch(
model=model,
optimizer=optimizer,
scheduler=scheduler,
X=X, Y=Y,
h_cache=h_cache,
eval_only=eval_only,
batch_split=batch_split)
loss_all += loss
train_pos += block_size
if train_pos >= data.size(1) - block_size:
# reached the end. randomize the offset to reduce overfitting
train_pos = random.randrange(block_size)
# reset the cache
for h in h_cache:
h.fill_(0)
loss_all = loss_all / actual_nb_batches_per_iter
return loss_all, train_pos, h_cache
# do full evaluation
def full_eval(model, optimizer, scheduler, data, block_size, hidden_size):
model.eval()
train_pos = 0
nb_batches_per_iter_max = math.ceil(data.size(1) / block_size)
h_cache = [
torch.zeros(
data.size(0),
layer.attn.attn.get_cache_size(),
hidden_size).to(data.device)
for layer in model.module.layers]
loss_all = 0
actual_nb_batches_per_iter = 0
for _ in range(nb_batches_per_iter_max):
actual_nb_batches_per_iter += 1
X = data[:, train_pos: train_pos + block_size].contiguous()
Y = data[:, train_pos + 1: train_pos + block_size + 1].contiguous()
loss, h_cache = _train_batch(
model=model,
optimizer=optimizer,
scheduler=scheduler,
X=X, Y=Y,
h_cache=h_cache,
eval_only=True,
batch_split=1)
loss_all += loss
train_pos += block_size
if train_pos >= data.size(1) - block_size:
# Skip the remaining tokens as it can't make a whole block.
# An effect on performance should be negligable for a large data.
break
loss_all = loss_all / actual_nb_batches_per_iter
return loss_all
| adaptive-span-main | trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
from argparse import Namespace
import math
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class PersistentMemory(nn.Module):
def __init__(self, size, nb_heads, head_dim, dropout):
super(PersistentMemory, self).__init__()
self.size = size
self.nb_heads = nb_heads
self.head_dim = head_dim
# different heads have different vectors
self.key = nn.Parameter(torch.randn(self.nb_heads, self.head_dim, self.size) / math.sqrt(self.head_dim))
self.val = nn.Parameter(torch.randn(self.nb_heads, self.size, self.head_dim) / math.sqrt(self.size))
self.dropout = nn.Dropout(dropout)
self.adaptive_span = None
def forward(self, query, attn):
key = self.key.unsqueeze(0)
val = self.val.unsqueeze(0)
query = query.view((-1, self.nb_heads) + query.size()[1:])
attn_pers = torch.matmul(query, key * math.sqrt(self.head_dim))
attn_pers = attn_pers.view((-1,) + attn_pers.size()[2:])
# compute softmax jointly
attn = torch.cat((attn, attn_pers), dim=-1)
attn = attn / math.sqrt(self.head_dim) # B x M X L_total
attn = F.softmax(attn, dim=-1)
attn_pers = attn[:, :, -key.size(-1):]
attn = attn[:, :, :-key.size(-1)] # B x M X L
# adapt attention span
if self.adaptive_span is not None:
attn = self.adaptive_span(attn, normalize=False)
# normalize the sum jointly!
attn = torch.cat((attn, attn_pers), dim=-1)
attn = attn / (attn.sum(-1, keepdim=True) + 1e-8)
attn_pers = attn[:, :, -key.size(-1):]
attn = attn[:, :, :-key.size(-1)] # B x M X L
attn_pers = self.dropout(attn_pers) # B x M X L
attn_pers = attn_pers.view((-1, self.nb_heads) + attn_pers.size()[1:])
out = torch.matmul(attn_pers, val * math.sqrt(self.size))
out = out.view((-1,) + out.size()[2:])
return attn, out
| adaptive-span-main | persistent_memory.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import math
import time
import torch
from config import PARAMS_CONFIG
from data import get_train_val_test_data
from models import TransformerSeq
from trainer import train_iteration, full_eval
from utils import (
get_params,
set_up_env,
get_optimizer_and_scheduler,
load_checkpoint,
save_checkpoint,
Logger)
def launch(env_params,
model_params,
adapt_io_params,
adapt_span_params,
pers_mem_params,
optim_params,
data_params,
trainer_params):
# ENVIRONMENT (device, distributed, etc.)
set_up_env(env_params)
device = env_params['device']
distributed = env_params['distributed']
if distributed == False or env_params['rank'] == 0:
print('model_params:\t', model_params)
print('optim_params:\t', optim_params)
print('data_params:\t', data_params)
print('trainer_params:\t', trainer_params)
print('adapt_io_params:\t', adapt_io_params)
print('adapt_span_params:\t', adapt_span_params)
print('pers_mem_params:\t', pers_mem_params)
# DATA
train_data, val_data, test_data = get_train_val_test_data(
data_params=data_params,
env_params=env_params,
batch_size=trainer_params['batch_size'],
device=device,
sort_dict=adapt_io_params['adapt_io_enabled'])
# MODEL
model = TransformerSeq(
vocab_size=data_params['vocab_size'], **model_params,
adapt_io_params=adapt_io_params,
adapt_span_params=adapt_span_params,
pers_mem_params=pers_mem_params)
if distributed:
local_rank = env_params['local_rank']
model = model.to(device)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank)
else:
model = torch.nn.DataParallel(model)
model = model.to(device)
# OPTIMIZER AND SCHEDULER
optimizer, scheduler = get_optimizer_and_scheduler(
model=model, optim_params=optim_params)
# create logger
logger = Logger(data_params['data_unit'])
# resume training from last checkpoint if exists
iter_init = load_checkpoint(
trainer_params['checkpoint_path'], model, optimizer, scheduler,
logger, distributed)
if trainer_params['full_eval_mode']:
# evaluate the model on test data
with torch.no_grad():
loss_val = full_eval(model, optimizer, scheduler, val_data,
model_params['block_size'],
model_params['hidden_size'])
loss_test = full_eval(model, optimizer, scheduler, test_data,
model_params['block_size'],
model_params['hidden_size'])
if distributed:
# collect results into rank0
stats = torch.tensor(
[loss_val, loss_test]).to(device)
torch.distributed.reduce(stats, 0)
if env_params['rank'] == 0:
loss_val = stats[0] / env_params['world_size']
loss_test = stats[1] / env_params['world_size']
else:
return
if data_params['data_unit'] == 'bpc':
print('val: {:.3f}bpc'.format(loss_val / math.log(2)))
print('test: {:.3f}bpc'.format(loss_test / math.log(2)))
else:
print('val: {:.2f}ppl'.format(math.exp(loss_val)))
print('test: {:.2f}ppl'.format(math.exp(loss_test)))
return
# position of current batch
data_pos = [0] * 2
# initialize caches for train and valid
hid_cache = [[
torch.zeros(
train_data.size(0),
layer.attn.attn.get_cache_size(),
model_params['hidden_size']).to(device)
for layer in model.module.layers] for _ in range(2)]
nb_batches_per_iter = trainer_params['nb_batches_per_iter']
for iter_no in range(iter_init, trainer_params['nb_iter']):
t_sta = time.time()
loss_train, data_pos[0], hid_cache[0] = train_iteration(
model, optimizer, scheduler, train_data, nb_batches_per_iter,
model_params['block_size'], False, data_pos[0], hid_cache[0],
trainer_params['batch_split'])
elapsed = 1000 * (time.time() - t_sta) / nb_batches_per_iter
with torch.no_grad():
loss_val, data_pos[1], hid_cache[1] = train_iteration(
model, optimizer, scheduler, val_data, nb_batches_per_iter,
model_params['block_size'], True, data_pos[1], hid_cache[1],
trainer_params['batch_split'])
if distributed:
# collect results into rank0
stats = torch.tensor(
[loss_train, loss_val]).to(device)
torch.distributed.reduce(stats, 0)
if env_params['rank'] == 0:
loss_train = stats[0] / env_params['world_size']
loss_val = stats[1] / env_params['world_size']
else:
continue
logger.log_iter(iter_no, nb_batches_per_iter, loss_train,
loss_val, elapsed, model)
save_checkpoint(trainer_params['checkpoint_path'],
iter_no, model, optimizer, scheduler, logger)
if __name__ == '__main__':
launch(**get_params(params_config=PARAMS_CONFIG))
| adaptive-span-main | main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import os
import torch
class Dictionary(object):
def __init__(self, path, sort_dict=False):
self.word2idx = {}
self.word2count = {}
self.idx2word = []
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
for line in f:
words = line.split() + ['<eos>']
for word in words:
if sort_dict:
self.word2count[word] = self.word2count.get(word, 0) + 1
elif word not in self.word2idx:
self.word2idx[word] = len(self.idx2word)
self.idx2word.append(word)
if sort_dict:
# Sort dictionary by count and build indices accordingly:
sorted_dict = sorted(self.word2count.items(), key=lambda kv: kv[1])[::-1]
for i in range(len(sorted_dict)):
word = sorted_dict[i][0]
self.word2idx[word] = i
self.idx2word.append(word)
def __len__(self):
return len(self.idx2word)
def _tokenize(text_path, dictionary):
"""Tokenizes a text file."""
print('Tokenizing {}'.format(text_path))
assert os.path.exists(text_path)
# Assign to each token its identifier
ids = []
with open(text_path, 'r', encoding="utf8") as f:
for line in f:
tokens = line.split() + ['<eos>']
for token in tokens:
ids.append(dictionary[token])
ids = torch.LongTensor(ids)
return ids
class Corpus:
def __init__(self, data_path, sort_dict):
print('Building dictionary')
self._dictionary = Dictionary(os.path.join(data_path, 'train.txt'), sort_dict)
self.train = _tokenize(
text_path=os.path.join(data_path, 'train.txt'),
dictionary=self._dictionary.word2idx)
self.valid = _tokenize(
text_path=os.path.join(data_path, 'valid.txt'),
dictionary=self._dictionary.word2idx)
self.test = _tokenize(
text_path=os.path.join(data_path, 'test.txt'),
dictionary=self._dictionary.word2idx)
@property
def vocab_size(self):
return len(self._dictionary)
def _batchify(data_tensor, batch_size):
nb_batches = data_tensor.size(0) // batch_size
# trim away some tokens to make whole batches
data_tensor = data_tensor.narrow(0, 0, nb_batches * batch_size)
data_tensor = data_tensor.view(batch_size, -1).contiguous()
return data_tensor
def _build_corpus(data_path, env_params, sort_dict):
# save the corpus to a file so that it's faster next time
if sort_dict:
corpus_path = os.path.join(data_path, 'corpus_sorted.pt')
else:
corpus_path = os.path.join(data_path, 'corpus.pt')
if os.path.exists(corpus_path):
print('Loading an existing corpus file from {}'.format(corpus_path))
corpus = torch.load(corpus_path)
else:
print('Creating a corpus file at {}'.format(corpus_path))
if env_params['distributed']:
# only one process need to create a corpus file
if env_params['rank'] == 0:
corpus = Corpus(data_path, sort_dict)
torch.save(corpus, corpus_path)
# sync with other processes
torch.distributed.broadcast(torch.zeros(1).cuda(), src=0)
else:
print('Waiting rank0 to create a corpus file.')
# sync with rank0
torch.distributed.broadcast(torch.zeros(1).cuda(), src=0)
corpus = torch.load(corpus_path)
else:
corpus = Corpus(data_path, sort_dict)
torch.save(corpus, corpus_path)
return corpus
def _get_train_val_test_data(corpus, batch_size):
return [
_batchify(corpus.train, batch_size),
_batchify(corpus.valid, batch_size),
_batchify(corpus.test, batch_size)
]
def get_train_val_test_data(data_params, env_params, batch_size, device, sort_dict):
corpus = _build_corpus(data_params['data_path'], env_params, sort_dict)
data_params['vocab_size'] = corpus.vocab_size
train_data, val_data, test_data = _get_train_val_test_data(
corpus=corpus, batch_size=batch_size)
if env_params['distributed']:
# split the data into equal parts
assert batch_size % env_params['world_size'] == 0
device_batch_size = batch_size // env_params['world_size']
slice_data = slice(
device_batch_size * env_params['rank'],
device_batch_size * (env_params['rank'] + 1))
train_data = train_data[slice_data]
val_data = val_data[slice_data]
test_data = test_data[slice_data]
train_data = train_data.to(device)
val_data = val_data.to(device)
test_data = test_data.to(device)
return train_data, val_data, test_data
| adaptive-span-main | data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
from torch.optim import Adagrad
def _clip_grad(clr, grad, group_grad_clip):
if group_grad_clip > 0:
norm = grad.norm(2).item()
if norm > group_grad_clip:
clr *= group_grad_clip / (norm + 1e-10)
return clr
class AdagradWithGradClip(Adagrad):
"""Adagrad algoritm with custom gradient clipping"""
def __init__(self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
grad_clip=0):
Adagrad.__init__(self,
params,
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value)
self.defaults['grad_clip'] = grad_clip
self.param_groups[0].setdefault('grad_clip', grad_clip)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is "
"not compatible with sparse "
"gradients")
grad = grad.add(group['weight_decay'], p.data)
clr = (group['lr'] /
(1 + (state['step'] - 1) * group['lr_decay']))
# clip
clr = _clip_grad(clr=clr,
grad=grad,
group_grad_clip=group['grad_clip'])
if grad.is_sparse:
# the update is non-linear so indices must be unique
grad = grad.coalesce()
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
| adaptive-span-main | adagrad_with_grad_clip.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import html
import json
import os
import re
from lxml import etree
def extract_text(input_text, keep_markup):
input_text = input_text.replace("\n", "").replace("\r", "")
if keep_markup:
text = html.unescape(input_text)
elif not keep_markup:
text_root = etree.HTML(input_text)
if text_root is None:
return None
text = " ".join(text_root.itertext())
text = re.sub(" +", " ", text)
text = text.encode("ascii", "xmlcharrefreplace").decode("utf-8")
text = html.unescape(text)
return text
def generate_closed_book_format(data_path, only_english, keep_markup, output_path):
with open(data_path, "r") as f:
question_list = []
answer_list = []
for website in f:
content = json.loads(website)
if only_english and content["Fasttext_language"] != "en":
continue
questions = content["Questions"]
for question in questions:
question_text = ""
if "name_markup" in question.keys():
extracted_text = extract_text(question["name_markup"], keep_markup)
if extracted_text is not None:
question_text += extracted_text + " "
if "text_markup" in question.keys():
extracted_text = extract_text(question["text_markup"], keep_markup)
if extracted_text is not None:
question_text += extracted_text
if len(question_text) > 0:
for answer in question["Answers"]:
if "text_markup" in answer.keys():
answer_text = extract_text(
answer["text_markup"], keep_markup
)
if (
answer_text is not None
and len(answer_text.replace("\n", "").replace("\r", "")) > 0
):
question_list.append(question_text)
answer_list.append(answer_text)
with open(output_path + ".source", "w") as f:
for element in question_list:
f.write(element.replace("\n", "").replace("\r", "") + "\n")
with open(output_path + ".target", "w") as f:
for element in answer_list:
f.write(element.replace("\n", "").replace("\r", "") + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate sequence-to-sequence input and output for closed-book QA"
)
parser.add_argument("--data_path", help="Path to the json dataset")
parser.add_argument("--output_path", help="Path to the output file")
parser.add_argument(
"--only_english",
action="store_true",
help="Only keep english samples in the dataset",
)
parser.add_argument(
"--keep_markup", action="store_true", help="Keep the HTML markup"
)
args = parser.parse_args()
generate_closed_book_format(
args.data_path, args.only_english, args.keep_markup, args.output_path
)
| CCQA-main | python/closed_book_processing.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import html
import json
import multiprocessing
import os
import time
import uuid
import fasttext
import lxml.html
from lxml import etree
def collect_question(node):
question = {}
# name
name_node = find_itemprop(node, "name")
if name_node is not None:
name_node = text_cleanup(name_node)
question["name_markup"] = turn_into_string(name_node)
# text
text_node = find_itemprop(node, "text")
if text_node is not None:
text_node = text_cleanup(text_node)
question["text_markup"] = turn_into_string(text_node)
# date/time {created|modified|published}
date_created = find_itemprop(node, "dateCreated")
if date_created is not None:
date_created = date_created.get("datetime")
question["date_created"] = date_created
date_modified = find_itemprop(node, "dateModified")
if date_modified is not None:
date_modified = date_modified.get("datetime")
question["date_modified"] = date_modified
date_published = find_itemprop(node, "datePublished")
if date_published is not None:
date_published = date_published.get("datetime")
question["date_published"] = date_published
# upvote count
upvote_count = find_itemprop(node, "upvoteCount")
if upvote_count is not None:
if upvote_count.tag == "meta":
upvote_count = upvote_count.get("content")
else:
upvote_count = upvote_count.text
question["upvote_count"] = upvote_count
# downvote count
downvote_count = find_itemprop(node, "downvoteCount")
if downvote_count is not None:
if downvote_count.tag == "meta":
downvote_count = downvote_count.get("content")
else:
downvote_count = downvote_count.text
question["downvote_count"] = downvote_count
# comment count
comment_count = find_itemprop(node, "commentCount")
if comment_count is not None:
if comment_count.tag == "meta":
comment_count = comment_count.get("content")
else:
comment_count = comment_count.text
question["comment_count"] = comment_count
# Answer count
answer_count = find_itemprop(node, "answerCount")
if answer_count is not None:
if answer_count.tag == "meta":
answer_count = answer_count.get("content")
else:
answer_count = answer_count.text
question["answer_count"] = answer_count
return question
def collect_answer(node):
answer = {}
# text
text_node = find_itemprop(node, "text")
if text_node is not None:
text_node = text_cleanup(text_node)
answer["text_markup"] = turn_into_string(text_node)
# suggested|accepted
suggested_accepted = node.get("itemprop")
answer["status"] = suggested_accepted
# date/time {created|modified|published}
date_created = find_itemprop(node, "dateCreated")
if date_created is not None:
date_created = date_created.get("datetime")
answer["date_created"] = date_created
date_modified = find_itemprop(node, "dateModified")
if date_modified is not None:
date_modified = date_modified.get("datetime")
answer["date_modified"] = date_modified
date_published = find_itemprop(node, "datePublished")
if date_published is not None:
date_published = date_published.get("datetime")
answer["date_published"] = date_published
# upvote count
upvote_count = find_itemprop(node, "upvoteCount")
if upvote_count is not None:
if upvote_count.tag == "meta":
upvote_count = upvote_count.get("content")
else:
upvote_count = upvote_count.text
answer["upvote_count"] = upvote_count
# downvote count
downvote_count = find_itemprop(node, "downvoteCount")
if downvote_count is not None:
if downvote_count.tag == "meta":
downvote_count = downvote_count.get("content")
else:
downvote_count = downvote_count.text
answer["downvote_count"] = downvote_count
# comment count
comment_count = find_itemprop(node, "commentCount")
if comment_count is not None:
if comment_count.tag == "meta":
comment_count = comment_count.get("content")
else:
comment_count = comment_count.text
answer["comment_count"] = comment_count
return answer
def predict_majority_language(languages):
frequency = {}
for language in languages:
if language in frequency:
frequency[language] += 1
else:
frequency[language] = 1
language, appearances = "-", 0
for key in frequency.keys():
if frequency[key] > appearances:
appearances = frequency[key]
language = key
return language
def collect_person(node):
person = {}
relevant_node = find_itemprop(node, "name")
if relevant_node is None:
# If name not defined, try author, which seems to be used sometimes
relevant_node = find_itemprop(node, "author")
if relevant_node is None:
return None
else:
if relevant_node.tag == "meta":
person["author"] = relevant_node.get("content")
else:
person["author"] = relevant_node.text
return person
def text_cleanup(node):
# Only keep text elements from https://developer.mozilla.org/en-US/docs/Web/HTML/Element
valid_tags = [
"blockquote",
"dd",
"div",
"dl",
"dt",
"figcaption",
"hr",
"li",
"ol",
"p",
"pre",
"ul",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"a",
"abbr",
"b",
"bdi",
"bdo",
"br",
"cite",
"code",
"data",
"dfn",
"em",
"i",
"kbd",
"mark",
"q",
"rb",
"rp",
"rt",
"rtc",
"ruby",
"s",
"samp",
"small",
"span",
"strong",
"sub",
"sup",
"time",
"u",
"var",
"wbr",
"caption",
"col",
"colgroup",
"table",
"tbody",
"td",
"tfoot",
"th",
"thead",
"tr",
]
remove_all_but_text_nodes(node, valid_tags)
return node
def turn_into_string(node):
text_string = lxml.html.tostring(node).decode("utf-8")
# Remove the most outer tag, since that is the itemprop tag, which is not relevant anymore
text_string = text_string[text_string.find(">") + 1 :]
text_string = text_string[: text_string.rfind("</")]
return text_string
def remove_all_but_text_nodes(node, valid_tags):
for child in node:
remove_all_but_text_nodes(child, valid_tags)
if node.tag not in valid_tags and "itemprop" not in node.keys():
for valid_child in node:
node.addnext(valid_child)
if node.getparent() is not None:
node.getparent().remove(node)
def find_itemprop(node, prop):
if "itemprop" in node.keys():
if prop in node.get("itemprop"):
return node
for child in node:
value = find_itemprop(child, prop)
if value is not None:
return value
return None
def get_all_questions(node, question_list):
if "itemtype" in node.keys():
if "//schema.org/Question" in node.get("itemtype"):
question_list.append(node)
return
for child in node:
get_all_questions(child, question_list)
def predict_question_language(json_question, ft_model):
if "text_markup" in json_question.keys():
language = ft_model.predict(html.unescape(json_question["text_markup"]))[0][
0
].replace("__label__", "")
elif "name_markup" in json_question.keys():
language = ft_model.predict(html.unescape(json_question["name_markup"]))[0][
0
].replace("__label__", "")
else:
for answer in json_question["Answers"]:
if "text_markup" in answer.keys():
language = ft_model.predict(html.unescape(answer["text_markup"]))[0][
0
].replace("__label__", "")
break
return language
def has_at_least_Q_or_A(json_question):
if "name_markup" in json_question.keys() or "text_markup" in json_question.keys():
return True
for answer in json_question["Answers"]:
if "text_markup" in answer.keys():
return True
return False
def search_tree(node, json_context):
if "itemtype" in node.keys() and "//schema.org/Answer" in node.get("itemtype"):
if "Answers" not in json_context.keys():
# Stacked question (not in the schema.org definition)
if node.getparent() is not None:
node.getparent().remove(node)
return
else:
json_context["Answers"].append({})
json_context = json_context["Answers"][-1]
for child in node:
search_tree(child, json_context)
if "itemtype" in node.keys():
if "//schema.org/Question" in node.get("itemtype"):
if "Answers" not in json_context.keys():
# Stacked question (not in the schema.org definition)
if node.getparent() is not None:
node.getparent().remove(node)
return
else:
element = collect_question(node)
json_context.update(element)
if node.getparent() is not None:
node.getparent().remove(node)
elif "//schema.org/Answer" in node.get("itemtype"):
element = collect_answer(node)
json_context.update(element)
if node.getparent() is not None:
node.getparent().remove(node)
elif "//schema.org/Person" in node.get("itemtype"):
element = collect_person(node)
if element is not None:
json_context.update(element)
if node.getparent() is not None:
node.getparent().remove(node)
def generate_structured_json(files, output_folder, output_file, fasttext_bin):
ft_model = fasttext.load_model(fasttext_bin)
for warc_file in files:
with open(warc_file) as f, open(
os.path.join(
output_folder,
output_file.replace(
"PLACEHOLDER", os.path.basename(warc_file).replace(".mhtml", "")
),
),
"a+",
) as g:
webpages = json.loads(f.read())
for idx, element in enumerate(webpages):
document = {}
html_content = element["mhtml"]
language = element["language"]
uri = element["uri"]
html_root = etree.HTML(html_content)
html_questions, json_questions, questions_language = [], [], []
get_all_questions(html_root, html_questions)
for html_question in html_questions:
json_question = {"Answers": []}
search_tree(html_question, json_question)
# Remove everything that does not have a question name || question text || answer text for the same instance
has_Q_or_A = has_at_least_Q_or_A(json_question)
if has_Q_or_A:
questions_language.append(
predict_question_language(json_question, ft_model)
)
json_questions.append(json_question)
if len(json_questions) > 0:
question_uuid = str(uuid.uuid4())
predicted_language = predict_majority_language(questions_language)
json_record = json.dumps(
{
"Language": language,
"Fasttext_language": predicted_language,
"URI": uri,
"UUID": question_uuid,
"WARC_ID": os.path.basename(warc_file).replace(
".mhtml", ""
),
"Questions": json_questions,
}
)
g.write(json_record + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert mhtml objects into json")
parser.add_argument("--fasttext_path", help="Path to the fasttext lid.176.bin file")
parser.add_argument("--input_folder", help="Path to the mhtml folder")
parser.add_argument("--output_folder", help="Path to the output folder")
args = parser.parse_args()
fasttext_bin = args.fasttext_path
input_folder = args.input_folder
output_folder = args.output_folder
output_file = "ccqa_PLACEHOLDER.json"
if os.path.isfile(os.path.join(output_folder, output_file)):
print("Output files already exist and will be replaced...")
os.remove(os.path.join(output_folder, output_file))
files = [
os.path.join(input_folder, f)
for f in os.listdir(input_folder)
if f.endswith(".mhtml")
]
generate_structured_json(files, output_folder, output_file, fasttext_bin)
| CCQA-main | python/mhtml_to_json.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import html
import json
import argparse
import os
import random
import re
import time
from lxml import etree
def extract_text(input_text, keep_markup):
if keep_markup:
text = input_text.encode("ascii", "xmlcharrefreplace").decode("utf-8")
text = html.unescape(input_text)
text = text.replace("\n", "~").replace("\r", "~")
elif not keep_markup:
text_root = etree.HTML(input_text)
if text_root is None:
return None
text = " ".join(text_root.itertext())
text = re.sub(" +", " ", text)
text = text.encode("ascii", "xmlcharrefreplace").decode("utf-8")
text = html.unescape(text)
text = text.replace("\n", "~").replace("\r", "~")
return text
def clean_votes(vote):
try:
vote = int(vote)
except Exception:
try:
vote = vote.replace(" ", "").replace("~", "")
vote = int(vote)
except Exception:
try:
vote = re.sub("/[^0-9.]/g", "", vote)
vote = int(vote)
except Exception:
vote = 0
return vote
def find_markup_options(answers):
contains_accepted = False
contains_suggested = False
contains_vote = False
for answer in answers:
if "text_markup" in answer.keys():
if "status" in answer.keys() and answer["status"] == "acceptedAnswer":
contains_accepted = True
if "status" in answer.keys() and answer["status"] == "suggestedAnswer":
contains_suggested = True
if "upvote_count" in answer.keys():
contains_vote = True
return contains_accepted, contains_suggested, contains_vote
def clean_answer(acc_answers, sugg_answers):
cleaned_acc_answers, cleaned_sugg_answers = [], []
if sugg_answers is not None and len(sugg_answers) > 0:
for answer in sugg_answers:
if answer is not None and len(answer) > 0:
cleaned_sugg_answers.append(answer)
if acc_answers is not None and len(acc_answers) > 0:
has_non_empty_answer = False
for answer in acc_answers:
if answer is not None and len(answer) > 0:
has_non_empty_answer = True
cleaned_acc_answers.append(answer)
return cleaned_acc_answers, cleaned_sugg_answers, has_non_empty_answer
def full_info(answers, question_text, keep_markup):
acc_answers, sugg_answers = [], []
for answer in answers:
if answer["status"] == "acceptedAnswer":
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
acc_answers.append(answer_text)
if answer["status"] == "suggestedAnswer":
if "upvote_count" in answer.keys():
if int(clean_votes(answer["upvote_count"])) < 2:
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
sugg_answers.append(answer_text)
if int(clean_votes(answer["upvote_count"])) >= 2:
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
acc_answers.append(answer_text)
acc_answers, sugg_answers, has_non_empty_answer = clean_answer(
acc_answers, sugg_answers
)
if acc_answers is not None and len(acc_answers) > 0:
if has_non_empty_answer:
return {
"question": question_text,
"answers": [],
"positive_ctxs": [
{"title": "", "text": acc_answer} for acc_answer in acc_answers
],
"hard_negative_ctxs": [
{"title": "", "text": sugg_answer} for sugg_answer in sugg_answers
],
}
def acc_sugg_info(answers, question_text, keep_markup):
acc_answers, sugg_answers = [], []
for answer in answers:
if answer["status"] == "acceptedAnswer":
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
acc_answers.append(answer_text)
if answer["status"] == "suggestedAnswer":
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
sugg_answers.append(answer_text)
acc_answers, sugg_answers, has_non_empty_answer = clean_answer(
acc_answers, sugg_answers
)
if acc_answers is not None and len(acc_answers) > 0:
if has_non_empty_answer:
return {
"question": question_text,
"answers": [],
"positive_ctxs": [
{"title": "", "text": acc_answer} for acc_answer in acc_answers
],
"hard_negative_ctxs": [
{"title": "", "text": sugg_answer} for sugg_answer in sugg_answers
],
}
def vote_info(answers, question_text, keep_markup):
best_up_count = -999
top_answers = []
top_answer = None
bottom_answers = []
for idx, answer in enumerate(answers):
if "upvote_count" in answer.keys():
if int(clean_votes(answer["upvote_count"])) > best_up_count:
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
top_answer = answer_text
best_up_count = int(clean_votes(answer["upvote_count"]))
best_idx = idx
if top_answer is None:
for idx, answer in enumerate(answers):
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
top_answer = answer_text
best_idx = idx
top_answers.append(top_answer)
answers.pop(best_idx)
for answer in answers:
if "upvote_count" in answer.keys():
if int(clean_votes(answer["upvote_count"])) > 1:
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
top_answers.append(answer_text)
else:
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
bottom_answers.append(answer_text)
top_answers, bottom_answers, has_non_empty_answer = clean_answer(
top_answers, bottom_answers
)
if top_answers is not None and len(top_answers) > 0:
if has_non_empty_answer:
return {
"question": question_text,
"answers": [],
"positive_ctxs": [
{"title": "", "text": top_answer} for top_answer in top_answers
],
"hard_negative_ctxs": [
{"title": "", "text": bottom_answer}
for bottom_answer in bottom_answers
],
}
def no_info(answers, question_text, keep_markup):
random.Random(13).shuffle(answers)
selected_answer = ""
for answer in answers:
if "text_markup" in answer.keys():
answer_text = extract_text(answer["text_markup"], keep_markup)
selected_answer = answer_text
break
if selected_answer is not None and len(selected_answer) > 0:
return {
"question": question_text,
"answers": [],
"positive_ctxs": [{"title": "", "text": selected_answer}],
"hard_negative_ctxs": [],
}
def generate_passage_retrieval_files(data_path, only_english, keep_markup, output_path):
instances = []
with open(data_path, "r") as f:
for website in f:
# Process the question
content = json.loads(website)
if only_english and content["Fasttext_language"] != "en":
continue
questions = content["Questions"]
for question in questions:
question_text = ""
if "name_markup" in question.keys():
extracted_text = extract_text(question["name_markup"], keep_markup)
if extracted_text is not None:
question_text += extracted_text + " "
if "text_markup" in question.keys():
extracted_text = extract_text(question["text_markup"], keep_markup)
if extracted_text is not None:
question_text += extracted_text
# If question exists, check the answers for their markup capacities
if len(question_text) > 0:
accepted, suggested, vote = find_markup_options(question["Answers"])
# All information available
if accepted and suggested and vote:
instances.append(
full_info(question["Answers"], question_text, keep_markup)
)
# If no votes are available, pick at random from accepted and suggested
elif accepted and suggested:
instances.append(
acc_sugg_info(
question["Answers"], question_text, keep_markup
)
)
# If only votes are available use above/below 2
elif vote:
instances.append(
vote_info(question["Answers"], question_text, keep_markup)
)
# Otherwise just select one at random to be a positive ctx and no hard negatives
else:
instances.append(
no_info(question["Answers"], question_text, keep_markup)
)
with open(output_path + ".jsonl", "w") as f:
for sample in instances:
json_record = json.dumps(sample)
f.write(json_record + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate passage retrieval objects for open-book QA"
)
parser.add_argument("--data_path", help="Path to the json dataset")
parser.add_argument("--output_path", help="Path to the output file")
parser.add_argument(
"--only_english",
action="store_true",
help="Only keep english samples in the dataset",
)
parser.add_argument(
"--keep_markup", action="store_true", help="Keep the HTML markup"
)
args = parser.parse_args()
generate_passage_retrieval_files(
args.data_path, args.only_english, args.keep_markup, args.output_path
)
| CCQA-main | python/passage_retrieval_processing.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import html
import json
import re
import string
from lxml import etree
def extract_text(input_text):
text_root = etree.HTML(input_text)
if text_root is None:
return None
text = " ".join(text_root.itertext())
text = re.sub(" +", " ", text)
text = text.encode("ascii", "xmlcharrefreplace").decode("utf-8")
text = html.unescape(text)
return text
def normalize_answer(s):
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
def remove_tilda(text):
return text.replace("\n", "").replace("~", "").strip()
return remove_tilda(white_space_fix(remove_articles(remove_punc(lower(s)))))
def get_full_question(question):
question_text = ""
if "name_markup" in question.keys():
extracted_text = extract_text(question["name_markup"])
if extracted_text is not None:
question_text += extracted_text + " "
if "text_markup" in question.keys():
extracted_text = extract_text(question["text_markup"])
if extracted_text is not None:
question_text += extracted_text
return question_text
def get_full_answer(answer):
answer_text = ""
if "text_markup" in answer.keys():
extracted_text = extract_text(answer["text_markup"])
if extracted_text is not None:
answer_text += extracted_text
return answer_text
def generate_new_datapoint(line, dataset):
dataset[line["URI"]] = {
"Language": line["Language"],
"Fasttext_language": line["Fasttext_language"],
"URI": line["URI"],
"UUID": line["UUID"],
"WARC_ID": line["WARC_ID"],
}
dataset[line["URI"]]["Questions"] = {}
for question in line["Questions"]:
condensed_question = copy.copy(question)
# Remove answers to only look at questions
condensed_question.pop("Answers")
dataset[line["URI"]]["Questions"][
normalize_answer(get_full_question(condensed_question))
] = condensed_question
dataset[line["URI"]]["Questions"][
normalize_answer(get_full_question(condensed_question))
]["Answers"] = {}
for answer in question["Answers"]:
dataset[line["URI"]]["Questions"][
normalize_answer(get_full_question(condensed_question))
]["Answers"][normalize_answer(get_full_answer(answer))] = answer
return dataset
def update_datapoint(line, dataset):
curr_object = dataset[line["URI"]]
for new_question in line["Questions"]:
new_question_text = get_full_question(new_question)
if len(new_question_text) > 0:
new_question_text = normalize_answer(new_question_text)
if new_question_text in curr_object["Questions"].keys():
for new_answer in new_question["Answers"]:
new_answer_text = get_full_answer(new_answer)
if len(new_answer_text) > 0:
new_answer_text = normalize_answer(new_answer_text)
if (
new_answer_text
not in curr_object["Questions"][new_question_text][
"Answers"
]
):
curr_object["Questions"][new_question_text]["Answers"][
new_answer_text
] = new_answer
else:
condensed_question = copy.copy(new_question)
condensed_question.pop("Answers")
curr_object["Questions"][
normalize_answer(get_full_question(condensed_question))
] = condensed_question
dataset[line["URI"]]["Questions"][
normalize_answer(get_full_question(condensed_question))
]["Answers"] = {}
for answer in new_question["Answers"]:
curr_object["Questions"][
normalize_answer(get_full_question(condensed_question))
]["Answers"][normalize_answer(get_full_answer(answer))] = answer
return dataset
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Merge duplicate URL questions and answers into single objects"
)
parser.add_argument("--json_dataset_path", help="Path to the json dataset")
parser.add_argument("--output_path", help="Path to the output file")
args = parser.parse_args()
dataset_path = args.json_dataset_path
with open(dataset_path, "r") as data_file:
dataset = {}
for idx, line in enumerate(data_file):
line = json.loads(line)
# Add in dictionary format for better runtime
if line["URI"] not in dataset.keys():
dataset = generate_new_datapoint(line, dataset)
else:
dataset = update_datapoint(line, dataset)
# Save in original format
with open(args.output_path, "w") as f:
for url in dataset.keys():
data_object = {
"Language": dataset[url]["Language"],
"Fasttext_language": dataset[url]["Fasttext_language"],
"URI": dataset[url]["URI"],
"UUID": dataset[url]["UUID"],
"WARC_ID": dataset[url]["WARC_ID"],
}
data_object["Questions"] = []
questions = [
dataset[url]["Questions"][key]
for key in dataset[url]["Questions"].keys()
]
for question in questions:
answers = [
question["Answers"][key] for key in question["Answers"].keys()
]
question.pop("Answers")
data_object["Questions"].append(question)
data_object["Questions"][-1]["Answers"] = answers
json_record = json.dumps(data_object)
f.write(json_record + "\n")
| CCQA-main | python/json_duplicate_filter.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import gzip
import itertools
import json
import logging
import os.path as osp
import time
import hydra
import pandas as pd
from omegaconf import DictConfig
from tqdm import tqdm
import numpy as np
logger = logging.getLogger(__name__)
def parse(path):
g = gzip.open(path, "rb")
for l in g:
yield json.loads(l)
def getDF(path):
i = 0
df = {}
for d in tqdm(parse(path)):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient="index")
def convert_price_string(x):
x = x.replace("$", "").replace(" ", "").replace(",", "")
price_range = x.split("-")
for price_limit in price_range:
try:
float(price_limit)
except:
return pd.NA
return np.array(price_range).astype(float).mean()
def verify_closed_set(fbt_set: pd.DataFrame):
# Verify it is a closed set
asins_also_buy = list(itertools.chain(*fbt_set.also_buy.tolist()))
set1, set2 = set(asins_also_buy), set(fbt_set.asin.tolist())
assert len(set1) == len(set2)
# Keep only also_buy items that exist in asins
def filter_also_buy(also_buy_list_, asins_):
return list(set(also_buy_list_) & asins_)
def reduce_to_closed_set(fbt_df: pd.DataFrame) -> pd.DataFrame:
# Keep only items that exist in also_buy
start_len, finish_len = -1, 1
fbt_df_start = fbt_df
round_ = 0
while start_len != finish_len:
t1 = time.time()
asins_also_buy = list(itertools.chain(*fbt_df_start.also_buy.tolist()))
fbt_df_set_step0 = fbt_df_start[fbt_df_start.asin.isin(asins_also_buy)]
asins = set(fbt_df_set_step0.asin.tolist())
fbt_df_set_step0["also_buy"] = fbt_df_set_step0.also_buy.apply(
lambda also_buy_list: filter_also_buy(also_buy_list, asins)
)
# Filter
mask = fbt_df_set_step0["also_buy"].apply(
lambda x: True if len(x) > 0 else False
)
fbt_df_set_step1 = fbt_df_set_step0[mask]
fbt_df_finish = fbt_df_set_step1
# Anlysis if some rows where reduced
start_len, finish_len = len(fbt_df_start), len(fbt_df_finish)
logger.info(
f"reduce_to_closed_set: Round {round_}. [pre post]=[{start_len} {finish_len}]. {time.time()-t1:.2f} sec"
)
fbt_df_start = fbt_df_finish
round_ += 1
return fbt_df_start
@hydra.main(version_base="1.2", config_path="../configs/", config_name="process_meta")
def process_meta(cfg: DictConfig):
t0 = time.time()
logger.info(cfg)
category_name = cfg.category_name
out_path_pkl = osp.join(cfg.data_processed_dir, f"{category_name}_processed.pkl")
meta_file_name = osp.join(cfg.data_raw_dir, f"meta_{category_name}.json.gz")
fbt_file_name = osp.join(cfg.data_processed_dir, f"{category_name}_fbt.pkl")
# Read meta file
if not osp.exists(fbt_file_name):
logger.info(f"Reading {meta_file_name}")
t1 = time.time()
fbt_df = getDF(meta_file_name)
fbt_df.to_pickle(fbt_file_name)
logger.info(f"getDF in {time.time()-t1:.1f} sec")
t1 = time.time()
fbt_df = pd.read_pickle(fbt_file_name)[
["asin", "category", "also_buy", "price", "imageURLHighRes"]
]
logger.info(f"fbt_df read_pickle in {time.time()-t1:.1f} sec")
# First cleaning meta
t1 = time.time()
len0 = len(fbt_df)
fbt_df = fbt_df.dropna()
fbt_df = fbt_df[fbt_df["imageURLHighRes"].apply(lambda urls: len(urls) > 0)]
fbt_df["imageURLHighRes"] = fbt_df["imageURLHighRes"].apply(
lambda urls: urls[0]
) # Keep only 1 url
fbt_df["price"] = fbt_df["price"].apply(convert_price_string)
fbt_df = fbt_df.dropna()
logger.info(
f"First cleaning meta in {time.time()-t1:1f} sec. [pre post]=[{len0} {len(fbt_df)}]"
)
# Megrge duplicate entries
t1 = time.time()
len0 = len(fbt_df)
fbt_df = fbt_df.groupby(["asin"]).agg(
{
"also_buy": "sum",
"category": "first",
"price": "first",
"imageURLHighRes": "first",
},
as_index=False,
)
fbt_df.also_buy = fbt_df.also_buy.apply(lambda also_buy: list(set(also_buy)))
fbt_df = fbt_df.reset_index()
logger.info(
f"merge_duplicate_entries in {time.time() -t1:.1f} sec. [pre post]=[{len0} {len(fbt_df)}]"
)
# Keep only items that exist in also_buy
t1 = time.time()
len0 = len(fbt_df)
fbt_set = reduce_to_closed_set(fbt_df)
verify_closed_set(fbt_set)
logger.info(
f"reduce_to_closed_set in {time.time() -t1:.1f} sec. [pre post]=[{len0} {len(fbt_set)}]"
)
# Save dataframe
t1 = time.time()
len0 = len(fbt_set)
fbt_set = fbt_set.dropna()
fbt_set = fbt_set.reset_index()
fbt_set["img_path"] = fbt_set["imageURLHighRes"].apply(
lambda url: osp.join(cfg.data_raw_dir, category_name, osp.basename(url))
)
fbt_set.to_pickle(out_path_pkl)
logger.info(
f"saved dataframe in {time.time() -t1:.1f} sec. [pre post]=[{len0} {len(fbt_set)}]"
)
logger.info(out_path_pkl)
logger.info(f"Finish in {time.time()-t0:.1f} sec")
if __name__ == "__main__":
process_meta()
| cycle_gan_for_complementary_item_recommendations-main | src/main_process_meta.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
import types
import hydra
import pandas as pd
import torch
import torchvision.models as models
from omegaconf import DictConfig
from torch import Tensor
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataset_utils import get_image_dataset
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
features = torch.flatten(x, 1)
x = self.fc(features)
return x, features
@hydra.main(
version_base="1.2", config_path="../configs/", config_name="create_embeddings"
)
def create_embeddings(cfg: DictConfig):
t0 = time.time()
logger.info(cfg)
os.chdir(hydra.utils.get_original_cwd())
pkl_path = osp.join(cfg.data_processed_dir, f"{cfg.category_name}_processed_w_imgs.pkl")
out_path = osp.join(cfg.data_final_dir, f"{cfg.category_name}_embeddings.pkl")
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
# Load data
df = pd.read_pickle(pkl_path)
df = df[df["img_path"].apply(lambda path: osp.exists(path))]
dataset = get_image_dataset(df)
dataloader = DataLoader(
dataset, batch_size=cfg.batch_size, num_workers=cfg.num_workers, pin_memory=True
)
logger.info(f"Creating dataset: {len(dataset)=}")
# Load pretrained model
model = models.resnet152(pretrained=True)
model.forward = types.MethodType(_forward_impl, model)
model = model.to(device)
embeddings = []
with torch.no_grad():
for batch in tqdm(dataloader):
_, embedding = model(batch.to(device))
embeddings.append(embedding.to("cpu"))
embeddings = torch.vstack(embeddings)
# Save to file
df["img_embedding"] = embeddings.tolist()
df = df[["asin", "img_path", "img_embedding"]]
df.to_pickle(out_path)
logger.info(f"Finish in {time.time()-t0:.1f} sec. {out_path=}")
if __name__ == "__main__":
create_embeddings()
| cycle_gan_for_complementary_item_recommendations-main | src/main_create_embeddings.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import logging
import os
from os.path import join as osj
from time import time
import hydra
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig
from sklearn.metrics import ndcg_score
from tqdm import tqdm
from dataset_utils import FbtDataset, load_dfs
from lit.eval_utils import (
calc_topk,
create_pop_cat_aware_predictor,
create_pop_predictor,
generate_test_set_hot_labels,
)
from inferece_utils import calc_ndcg_per_category, calc_performance, get_dataloaders
logger = logging.getLogger(__name__)
plt.style.use(["science", "ieee"])
def build_hot_labels_for_popularity_based_preictor(dataset, fbt_df):
# Build hot labels
t1 = time()
fbt_df_test = fbt_df[fbt_df["set_name"] == "test"]
fbt_by_asin_src = dataset.fbt_by_asin_src
asins = np.unique(np.hstack([fbt_df.asin_src, fbt_df.asin_target]))
indices = np.array(
list(fbt_df_test.groupby(by=["asin_src", "category_int_target"]).indices.keys())
)
asin_src_test, category_pos_test = indices[:, 0], indices[:, 1].astype(int)
hot_labels, asin_src_test = generate_test_set_hot_labels(
asin_src_test, category_pos_test, fbt_by_asin_src, asins
)
return hot_labels, asins, asin_src_test, category_pos_test
def execute_analyse(cfg: DictConfig, out_dir):
fbt_df, emb_df = load_dfs(cfg)
fbt_df_train, fbt_df_test = (
fbt_df[fbt_df["set_name"] == "train"],
fbt_df[fbt_df["set_name"] == "test"],
)
dataset = FbtDataset(fbt_df, emb_df)
logger.info(f"{[len(fbt_df_train), len(fbt_df_test), len(dataset)]=}")
# Build hot labels
t1 = time()
(
hot_labels,
asins,
asin_src_test,
category_pos_test,
) = build_hot_labels_for_popularity_based_preictor(dataset, fbt_df)
logger.info(f"{hot_labels.shape=} {time()-t1:.1f}s")
# ------------------------------------------------
# Most popular predictor
# ------------------------------------------------
logger.info("Most popular predictor")
t1 = time()
probs = create_pop_predictor(fbt_df_train, asins)
# Predict based on popularity
probs = torch.from_numpy(probs).repeat(hot_labels.shape[0], 1)
# Calculate retrival metrics
calc_performance(hot_labels, probs, cfg)
# ------------------------------------------------
# Most popular predictor: category aware
# ------------------------------------------------
logger.info("Most popular predictor: category aware")
t1 = time()
pred_dicts = create_pop_cat_aware_predictor(fbt_df_train, asins)
probs = torch.tensor(
np.vstack([pred_dicts[cat][np.newaxis, :] for cat in category_pos_test])
)
# Calculate retrival metrics
calc_performance(hot_labels, probs, cfg)
calc_ndcg_per_category(
hot_labels,
probs,
asin_src_test,
category_pos_test,
f"pop_category_aware_pred.pkl_{cfg.category_name}",
out_dir,
)
@hydra.main(version_base="1.2", config_path="../configs/", config_name="inference")
def execute_most_pop_inference(cfg: DictConfig):
out_dir = os.getcwd()
os.chdir(hydra.utils.get_original_cwd())
pl.seed_everything(cfg.seed)
for category_name in cfg.most_pop_category_names:
cfg.category_name = category_name
execute_analyse(cfg, out_dir)
if __name__ == "__main__":
execute_most_pop_inference()
| cycle_gan_for_complementary_item_recommendations-main | src/main_inference_most_pop.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os.path as osp
import random
import pandas as pd
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
logger = logging.getLogger(__name__)
def load_dfs(cfg):
pkl_path = osp.join(cfg["data_dir"], cfg["category_name"] + "_sets.pkl")
fbt_df = pd.read_pickle(pkl_path)
emb_path = osp.join(cfg["data_dir"], cfg["category_name"] + "_embeddings.pkl")
emb_df = pd.read_pickle(emb_path)
return fbt_df, emb_df
class ImgDataset(Dataset):
def __init__(self, image_paths, transform=None):
self.image_paths = image_paths
self.transform = transform
def __getitem__(self, index):
image_path = self.image_paths[index]
x = Image.open(image_path)
if self.transform is not None:
x = self.transform(x)
return x
def __len__(self):
return len(self.image_paths)
def repeate_to_rgb(x):
if x.size(0) < 3:
x = x.repeat(3, 1, 1)
return x
def get_image_dataset(df):
return ImgDataset(
df.img_path.tolist(),
transform=transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Lambda(repeate_to_rgb),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
),
)
class FbtDataset(Dataset):
def __init__(self, fbt_df: pd.DataFrame, df_embs: pd.DataFrame):
# Database
self.fbt_df = fbt_df
self.fbt_df_price = (
fbt_df[["asin_target", "price_bin_target"]]
.drop_duplicates()
.set_index("asin_target")
)
self.df_embs = df_embs.set_index("asin")
# Map cateogry to asing: Needed for negative from the same target cateogry
self.asin_per_categroy = fbt_df.groupby("category_int_target")[
"asin_target"
].apply(list)
# Valid categories
self.valid_categories = self.fbt_df.groupby("category_int_src")[
"category_int_target"
].apply(list)
# For evaluation
self.fbt_by_asin_src = fbt_df.groupby(["asin_src", "category_int_target"])[
"asin_target"
].apply(list)
def __getitem__(self, index):
asin_src = self.fbt_df.iloc[index]["asin_src"]
asin_pos = self.fbt_df.iloc[index]["asin_target"]
category_src = self.fbt_df.iloc[index]["category_int_src"]
category_pos = self.fbt_df.iloc[index]["category_int_target"]
emb_src = torch.tensor(self.df_embs.loc[asin_src].img_embedding)
emb_pos = torch.tensor(self.df_embs.loc[asin_pos].img_embedding)
set_name = self.fbt_df.iloc[index]["set_name"]
# Negative sample
asin_neg = asin_pos
while asin_pos == asin_neg:
asins_in_category = self.asin_per_categroy.loc[category_pos]
asin_neg = random.choice(asins_in_category)
emb_neg = torch.tensor(self.df_embs.loc[asin_neg].img_embedding)
# Price
price_bin_src = self.fbt_df.iloc[index]["price_bin_src"]
price_bin_pos = self.fbt_df.iloc[index]["price_bin_target"]
fbt_df_price = self.fbt_df_price.copy() # thread safe
price_bin_neg = fbt_df_price.loc[asin_neg]["price_bin_target"]
# Valid category:
random_valid_category = random.sample(
self.valid_categories.loc[category_src], 1
)[0]
return (
emb_src,
emb_pos,
emb_neg,
price_bin_src,
price_bin_pos,
price_bin_neg,
category_src,
category_pos,
random_valid_category,
asin_src,
asin_pos,
set_name,
)
def __len__(self):
return len(self.fbt_df)
class FbtCandidateDataset(Dataset):
def __init__(self, fbt_df: pd.DataFrame, df_embs: pd.DataFrame):
self.fbt_df = fbt_df
self.df_price = (
fbt_df[["asin_target", "price_bin_target"]]
.drop_duplicates()
.set_index("asin_target")
)
self.df_embs = df_embs.set_index("asin")
self.fbt_candidates = (
fbt_df[["asin_target", "category_int_target"]]
.drop_duplicates()
.reset_index()
)
def __getitem__(self, index):
asin_src = self.fbt_candidates.iloc[index]["asin_target"]
category_src = self.fbt_candidates.iloc[index]["category_int_target"]
emb_src = torch.tensor(self.df_embs.loc[asin_src].img_embedding)
price_bin_src = self.df_price.loc[asin_src]["price_bin_target"]
return (
emb_src,
price_bin_src,
category_src,
asin_src,
)
def __len__(self):
return len(self.fbt_candidates)
class FbtInferenceDataset(Dataset):
def __init__(self, fbt_df: pd.DataFrame, df_embs: pd.DataFrame):
self.fbt_df_test = fbt_df[fbt_df["set_name"] == "test"]
self.fbt_df_test = (
self.fbt_df_test.groupby(["asin_src", "category_int_src"])["asin_target"]
.agg(list)
.reset_index()
)
# Valid categories to transfrom to
fbt_df_train = fbt_df[fbt_df["set_name"] == "train"]
self.valid_categories = fbt_df_train.groupby(["category_int_src"])[
"category_int_target"
].agg(list)
self.df_price = (
fbt_df[["asin_src", "price_bin_src"]]
.drop_duplicates()
.set_index("asin_src")
)
self.df_embs = df_embs.set_index("asin")
def __getitem__(self, index):
asin_src = self.fbt_df_test.iloc[index]["asin_src"]
category_src = self.fbt_df_test.iloc[index]["category_int_src"]
emb_src = torch.tensor(self.df_embs.loc[asin_src].img_embedding)
price_bin_src = self.df_price.loc[asin_src]["price_bin_src"]
# Labels
asin_targets = self.fbt_df_test.iloc[index]["asin_target"]
# Categories to transform to
valid_categories = self.valid_categories.loc[category_src]
return (
emb_src,
price_bin_src,
category_src,
asin_src,
valid_categories,
asin_targets,
)
def __len__(self):
return len(self.fbt_df_test)
| cycle_gan_for_complementary_item_recommendations-main | src/dataset_utils.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import logging
import pandas as pd
import torch
from sklearn.metrics import ndcg_score
from tqdm import tqdm
import numpy as np
from lit.eval_utils import calc_topk
from os.path import join as osj
from dataset_utils import FbtCandidateDataset, FbtDataset, FbtInferenceDataset, load_dfs
from torch.utils.data import DataLoader
logger = logging.getLogger(__name__)
def calc_performance(hot_labels, probs, cfg):
logger.info(f"{cfg.model_weight_dir}")
if isinstance(hot_labels, np.ndarray):
hot_labels = torch.from_numpy(hot_labels)
if isinstance(probs, np.ndarray):
probs = torch.from_numpy(probs)
logger.info("\nCoverage")
_, sort_idxs = torch.sort(probs, dim=-1, descending=True)
logger.info(
f"TOP10 unique items {sort_idxs[:, :10].unique().shape}. {sort_idxs.shape=}"
)
logger.info("NDCG")
ndcg_val_at_k = {}
for k in cfg.top_k + [probs.shape[-1]]:
ndcg_val_at_k[k] = ndcg_score(hot_labels, probs, k=k)
logger.info(json.dumps(ndcg_val_at_k, sort_keys=True, indent=4))
logger.info("\nTOPK")
topk_d = calc_topk(probs, hot_labels, top_k_list=cfg.top_k)
topk_d = {int(key.replace("topk/top", "")): value for key, value in topk_d.items()}
logger.info(json.dumps(topk_d, sort_keys=True, indent=4))
def calc_ndcg_per_category(
hot_labels, probs, asin_src_test, category_pos_test, model_base_dir, out_dir
):
if isinstance(hot_labels, np.ndarray):
hot_labels = torch.from_numpy(hot_labels)
if isinstance(probs, np.ndarray):
probs = torch.from_numpy(probs)
ndcg_vals = [
ndcg_score(hot_label_i.unsqueeze(0), prob_i.unsqueeze(0))
for hot_label_i, prob_i in tqdm(zip(hot_labels, probs))
]
out_path = osj(out_dir, f"{model_base_dir}.pkl")
pd.DataFrame(
{
"asins": asin_src_test,
"category_pos_test": category_pos_test,
"ndcg": ndcg_vals,
}
).to_pickle(out_path)
logger.info(f"Finish predictor. {out_path}")
def get_dataloaders(cfg):
fbt_df, emb_df = load_dfs(cfg)
fbt_df_train, fbt_df_test = (
fbt_df[fbt_df["set_name"] == "train"],
fbt_df[fbt_df["set_name"] == "test"],
)
dataset = FbtDataset(fbt_df, emb_df)
logger.info(f"{[len(fbt_df_train), len(fbt_df_test), len(dataset)]=}")
candidate_dataset = FbtCandidateDataset(fbt_df, emb_df)
inference_dataset = FbtInferenceDataset(fbt_df, emb_df)
candidate_loader = DataLoader(
candidate_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
shuffle=False,
)
inference_loader = DataLoader(
inference_dataset,
batch_size=1,
num_workers=0,
pin_memory=True,
shuffle=False,
)
return candidate_loader, inference_loader, dataset
| cycle_gan_for_complementary_item_recommendations-main | src/inferece_utils.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
import hydra
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from omegaconf import DictConfig
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import KBinsDiscretizer, LabelEncoder
logger = logging.getLogger(__name__)
def remove_unique_categories(
df: pd.DataFrame, category_freq_threshold: int = 20
) -> pd.DataFrame:
categories = df.category.tolist()
criterion, num_iter = 1, 0
while criterion:
categories = df.category.tolist()
category_unique, category_count = np.unique(categories, return_counts=True)
df["category_count"] = df.category.apply(
lambda cat: category_count[cat == category_unique][0]
)
# If category is too rare: remove the leaf (..//..)
df.category = df[["category", "category_count"]].apply(
lambda row: row.category.rsplit("//", 1)[0]
if row.category_count < category_freq_threshold
else row.category,
axis=1,
)
criterion = (df.category_count < category_freq_threshold).sum()
print(f"{criterion=} {num_iter=}")
num_iter += 1
return df
@hydra.main(config_path="../configs/", config_name="create_train_test_sets")
def create_train_test_sets(cfg: DictConfig):
t0 = time.time()
logger.info(cfg)
pl.seed_everything(1234)
category_name = cfg.category_name
out_dir = os.getcwd()
processed_pkl_path = osp.join(
cfg.data_processed_dir, f"{category_name}_processed_w_imgs.pkl"
)
out_pkl_path = osp.join(cfg.data_final_dir, f"{category_name}_sets.pkl")
# Load data
t1 = time.time()
df = pd.read_pickle(processed_pkl_path)
df.category = df.category.apply(lambda list_str: "//".join(list_str)).astype(str)
emb_path = osp.join(cfg.data_final_dir, cfg.category_name + "_embeddings.pkl")
df_embs = pd.read_pickle(emb_path)
logger.info(f"Load df in {time.time()-t1:.1f} sec")
# Original category
category_unique, category_count = np.unique(
df.category.tolist(), return_counts=True
)
idxs = np.argsort(category_count)[::-1]
pd.DataFrame(
{"category_name": category_unique[idxs], "count": category_count[idxs]}
).to_csv(osp.join(out_dir, "categories_pre.csv"), index=False)
# Combine categories
df_trans = df.copy()
df_trans = remove_unique_categories(
df_trans, category_freq_threshold=cfg.category_freq_threshold
)
# New categories
category_unique, category_count = np.unique(
df_trans.category.tolist(), return_counts=True
)
idxs = np.argsort(category_count)[::-1]
pd.DataFrame(
{"category_name": category_unique[idxs], "count": category_count[idxs]}
).to_csv(osp.join(out_dir, "categories_post.csv"), index=False)
# Map categories to one hot
df_trans["category_int"] = LabelEncoder().fit_transform(df_trans.category)
df_fbt = df_trans.explode(column="also_buy").reset_index()
df_merge = pd.merge(
df_trans,
df_fbt,
how="inner",
left_on="asin",
right_on="also_buy",
suffixes=["_src", "_target"],
)
df_merge = df_merge[
[
"asin_src",
"category_src",
"category_int_src",
"price_src",
"asin_target",
"category_target",
"category_int_target",
"price_target",
]
]
logger.info(f"[pre explode merge]=[{len(df_trans)} {len(df_fbt)} {len(df_merge)}]")
# Keep only pairs with different category
df_with_set_split = df_merge.copy()
df_with_set_split = df_with_set_split[
df_with_set_split.category_int_src != df_with_set_split.category_int_target
]
# Keep only asins with images
asins_to_keep = df_embs.asin.tolist()
df_with_set_split = df_with_set_split[
df_with_set_split["asin_src"].isin(asins_to_keep)
& df_with_set_split["asin_target"].isin(asins_to_keep)
]
# source in test won't be in target
asin_srcs = df_with_set_split.asin_src.unique()
asin_src_train, asin_src_test = train_test_split(
asin_srcs, train_size=cfg.train_set_ratio, random_state=cfg.seed
)
df_with_set_split["set_name"] = None
df_with_set_split["set_name"][
df_with_set_split.asin_src.isin(asin_src_train)
] = "train"
df_with_set_split["set_name"][
df_with_set_split.asin_src.isin(asin_src_test)
] = "test"
train_ratio = (df_with_set_split.set_name == "train").sum() / len(df_with_set_split)
test_ratio = (df_with_set_split.set_name == "test").sum() / len(df_with_set_split)
logger.info(f"{[train_ratio, test_ratio]=}. {df_with_set_split.shape=}")
# Remove pairs in training were the target is a source in test
fbt_df_train = df_with_set_split[df_with_set_split["set_name"] == "train"]
fbt_df_test = df_with_set_split[df_with_set_split["set_name"] == "test"]
len0 = len(fbt_df_train)
fbt_df_train = fbt_df_train[~fbt_df_train.asin_target.isin(fbt_df_test.asin_src)]
logger.info(
f"Remove pairs in fbt_df_train. Size [pre post]=[{len0} {len(fbt_df_train)}]"
)
# Price bin
est = KBinsDiscretizer(n_bins=cfg.price_n_bins, encode="ordinal")
est.fit(
np.hstack([fbt_df_train["price_src"], fbt_df_train["price_target"]])[
:, np.newaxis
]
)
fbt_df_train["price_bin_src"] = (
est.transform(fbt_df_train["price_src"].to_numpy()[:, np.newaxis])
.squeeze()
.astype(int)
)
fbt_df_train["price_bin_target"] = (
est.transform(fbt_df_train["price_target"].to_numpy()[:, np.newaxis])
.squeeze()
.astype(int)
)
fbt_df_test["price_bin_src"] = (
est.transform(fbt_df_test["price_src"].to_numpy()[:, np.newaxis])
.squeeze()
.astype(int)
)
fbt_df_test["price_bin_target"] = (
est.transform(fbt_df_test["price_target"].to_numpy()[:, np.newaxis])
.squeeze()
.astype(int)
)
# Concate
df_with_set_split = pd.concat([fbt_df_train, fbt_df_test])
logger.info(f"{[len(fbt_df_train), len(fbt_df_test)]=}")
df_with_set_split.to_pickle(out_pkl_path)
logger.info(f"Finish in {time.time()-t0:.1f} sec. {out_pkl_path=}")
if __name__ == "__main__":
create_train_test_sets()
| cycle_gan_for_complementary_item_recommendations-main | src/main_create_train_test_set.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
import hydra
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import wandb
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from torch.utils.data import DataLoader, TensorDataset
from lit.lit_utils import LitCategoryClassifier
logger = logging.getLogger(__name__)
@hydra.main(
version_base="1.2",
config_path="../configs/",
config_name="execute_train_classifier",
)
def execute_train(cfg: DictConfig):
t0 = time.time()
out_dir = os.getcwd()
os.chdir(hydra.utils.get_original_cwd())
name = osp.basename(out_dir)
pl.seed_everything(cfg.seed)
wandb.init(
project=cfg.wandb.project,
dir=out_dir,
config=OmegaConf.to_container(cfg),
job_type="train_classifier",
name=name,
)
logger.info(f"out_dir={out_dir}")
logger.info(cfg)
# Load data
pkl_path = osp.join(cfg.data_dir, cfg.category_name + "_sets.pkl")
fbt_df = pd.read_pickle(pkl_path)
fbt_df = fbt_df[["asin_src", "category_int_src"]].drop_duplicates().reset_index()
emb_path = osp.join(cfg.data_dir, cfg.category_name + "_embeddings.pkl")
emb_df = pd.read_pickle(emb_path)
df = pd.merge(fbt_df, emb_df, "inner", left_on="asin_src", right_on="asin")
num_classes = df.category_int_src.max() + 1
category_onehot = F.one_hot(
torch.tensor(df["category_int_src"].tolist()), num_classes=num_classes
)
embs = torch.tensor(df["img_embedding"].tolist())
input_emb_dim = embs.size(1)
# Dataset
dataset = TensorDataset(embs, category_onehot)
len_train = int(len(dataset) * cfg.train_ratio)
trainset, testset = torch.utils.data.random_split(
dataset, [len_train, len(dataset) - len_train]
)
logger.info(f"Dataset size: [train test]=[{len(trainset)} {len(testset)}]")
logger.info("Category bincount:")
logger.info(torch.bincount(torch.tensor(fbt_df.category_int_src.tolist())))
train_loader = DataLoader(
trainset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
shuffle=True,
)
test_loader = DataLoader(
testset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
shuffle=False,
)
# Initalize model
lit_model = LitCategoryClassifier(input_emb_dim, num_classes, cfg)
trainer = pl.Trainer(
max_epochs=cfg.epochs,
min_epochs=cfg.epochs,
gpus=1 if torch.cuda.is_available() else None,
logger=WandbLogger(experimnet=wandb.run),
callbacks=[LearningRateMonitor()],
num_sanity_val_steps=0,
default_root_dir=None,
accelerator="mps",
)
trainer.fit(lit_model, train_loader, test_loader)
logger.info(f"Finish execute_train in {time.time()-t0:.1f} sec")
if __name__ == "__main__":
execute_train()
| cycle_gan_for_complementary_item_recommendations-main | src/main_execute_train_calssifier.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import itertools
import logging
import os
import os.path as osp
from os.path import join as osj
from time import time
import hydra
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm
from inferece_utils import calc_ndcg_per_category, calc_performance, get_dataloaders
from lit.eval_utils import generate_test_set_hot_labels, get_unique_asins
from lit.lit_utils import LitFbt
logger = logging.getLogger(__name__)
def model_inference(model, category, price_bin, img_emb):
category_emb = model.category_embs(category)
price_emb = model.price_embs(price_bin)
emb = model.img_encoder(img_emb, category_emb, price_emb)
return emb
def evalaute_gan(candidate_loader, inference_loader, cfg):
# Load weights
checkpoint = osj(cfg.model_weight_dir, "checkpoint.ckpt")
lit_model = LitFbt.load_from_checkpoint(checkpoint)
lit_model.eval()
# Iterate on candidates
candidates, asins, category_candidates = [], [], []
with torch.no_grad():
for batch in tqdm(candidate_loader):
(
img_emb_candidate,
price_bin_candidate,
category_candidate,
asin_candidate,
) = batch
candidate = model_inference(
lit_model, category_candidate, price_bin_candidate, img_emb_candidate
)
candidates.append(candidate.detach().cpu())
asins.append(asin_candidate)
category_candidates.append(category_candidate.detach().cpu())
candidates = torch.vstack(candidates)
asins = np.array(list(itertools.chain(*asins)))
category_candidates = torch.hstack(category_candidates)
# Get valid cateogires
hot_labels, dists, valid_categories_list = [], [], []
with torch.no_grad():
for batch in tqdm(inference_loader):
(
img_emb_test,
price_bin_test,
category_test,
_,
valid_categories,
asin_targets,
) = batch
src = model_inference(
lit_model, category_test, price_bin_test, img_emb_test
)
# Transform test to valid categories (categories that apeared in the training set)
valid_categories_hstack = torch.hstack(valid_categories)
src_repeat = src.repeat(len(valid_categories), 1)
category_dst_emb = lit_model.category_embs(valid_categories_hstack)
src_fbt = lit_model.fbt_ae(src_repeat, category_dst_emb)
dists_i = torch.cdist(src_fbt, candidates, p=2)
dists_i = dists_i.min(axis=0).values
# Create ground true label
hot_labels_i = np.in1d(
asins, np.array([asin_target[0] for asin_target in asin_targets])
)
# Save
valid_categories_list.append(valid_categories_hstack)
dists.append(dists_i)
hot_labels.append(hot_labels_i)
assert hot_labels_i.sum() > 0
# Calcualte probability
dists = torch.vstack(dists)
probs = torch.softmax(-dists, axis=-1)
hot_labels = np.vstack(hot_labels)
# Retrival metrics
calc_performance(hot_labels, probs, cfg)
def evaluate_category_aware(dataset, cfg, out_dir):
path = cfg.model_weight_dir
model_base_dir = osp.basename(path)
asin_src = torch.load(osj(path, "asin_src.pth"))
asin_pos = torch.load(osj(path, "asin_pos.pth"))
category_src = torch.from_numpy(torch.load(osj(path, "category_src.pth")))
category_pos = torch.from_numpy(torch.load(osj(path, "category_pos.pth")))
src_fbt = torch.load(osj(path, "src_fbt.pth"))
src = torch.load(osj(path, "src.pth"))
pos = torch.load(osj(path, "pos.pth"))
set_name = torch.load(osj(path, "set_name.pth"))
# Test sources: have a unique pair of (source,target-category)
src_fbt_test = src_fbt[set_name == "test"]
asin_src_test = asin_src[set_name == "test"]
category_pos_test = category_pos[set_name == "test"]
locs = list(zip(asin_src_test, category_pos_test))
_, unique_idxs = np.unique(np.array(locs), axis=0, return_index=True)
src_fbt_test, asin_src_test, category_pos_test = (
src_fbt_test[unique_idxs],
asin_src_test[unique_idxs],
category_pos_test[unique_idxs],
)
# Candidate to compare with
asins = np.hstack([asin_src, asin_pos])
embs = torch.vstack([src, pos])
categories = torch.hstack([category_src, category_pos])
asins, embs, categories = get_unique_asins(asins, embs, categories)
# Build hot label
hot_labels, asin_src_test = generate_test_set_hot_labels(
asin_src_test=asin_src_test,
category_pos_test=category_pos_test.numpy(),
fbt_by_asin_src=dataset.fbt_by_asin_src,
asins=asins,
)
# Find distance of the candidates
dists = torch.cdist(src_fbt_test, embs, p=2)
probs = torch.softmax(-dists, axis=-1)
# Constrain to target
for n, cat in enumerate(category_pos_test):
probs[n, categories != cat] = 0
# Calculate retrival metrics
calc_performance(hot_labels, probs, cfg)
calc_ndcg_per_category(
hot_labels, probs, asin_src_test, category_pos_test, model_base_dir, out_dir
)
@hydra.main(version_base="1.2", config_path="../configs/", config_name="inference")
def execute_gan_inference(cfg: DictConfig):
t0 = time()
out_dir = os.getcwd()
os.chdir(hydra.utils.get_original_cwd())
name = osp.basename(out_dir)
pl.seed_everything(cfg.seed)
wandb.init(
project=cfg.wandb.project,
dir=out_dir,
config=OmegaConf.to_container(cfg),
job_type="analysis",
name="analysis_" + name,
)
logger.info(f"out_dir={out_dir}")
logger.info(cfg)
logger.info(f"{torch.backends.mps.is_available()=}")
for category_name, model_gan_weight_dir in zip(
cfg.category_names, cfg.model_gan_weight_dirs
):
t1 = time()
cfg.category_name = category_name
cfg.model_weight_dir = model_gan_weight_dir
candidate_loader, inference_loader, dataset = get_dataloaders(cfg)
evalaute_gan(candidate_loader, inference_loader, cfg)
evaluate_category_aware(dataset, cfg, out_dir)
logger.info(f"Finish {category_name} in {time() - t1:.1f} s")
logger.info(f"Finish execute_gan_inference in {time() - t0:.1f} s")
if __name__ == "__main__":
execute_gan_inference()
| cycle_gan_for_complementary_item_recommendations-main | src/main_inference_gan.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
from time import time
import hydra
import pytorch_lightning as pl
import torch
import wandb
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from torch.utils.data import DataLoader
from dataset_utils import FbtDataset, load_dfs
from lit.lit_utils import LitFbt
from lit.lit_dcf import LitDCF
from lit.lit_pcomp import LitPcomp
logger = logging.getLogger(__name__)
@hydra.main(version_base="1.2", config_path="../configs/", config_name="execute_train")
def execute_train(cfg: DictConfig):
t0 = time()
out_dir = os.getcwd()
os.chdir(hydra.utils.get_original_cwd())
name = osp.basename(out_dir)
pl.seed_everything(cfg.seed)
wandb.init(
project=cfg.wandb.project,
dir=out_dir,
config=OmegaConf.to_container(cfg),
job_type=cfg.method,
name=name,
)
logger.info(f"out_dir={out_dir}")
logger.info(cfg)
logger.info(f"{torch.backends.mps.is_available()=}")
# Dataset
t1 = time()
fbt_df, emb_df = load_dfs(cfg)
trainset = FbtDataset(fbt_df[fbt_df["set_name"] == "train"], emb_df)
testset = FbtDataset(fbt_df[fbt_df["set_name"] == "test"], emb_df)
dataset_all = FbtDataset(fbt_df, emb_df)
logger.info(f"[train test]=[{len(trainset)} {len(testset)}]. {time()-t1:.1f}s")
train_loader = DataLoader(
trainset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
shuffle=True,
)
test_loader = DataLoader(
testset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
shuffle=False,
)
data_all_loader = DataLoader(
dataset_all,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
shuffle=False,
)
# Initalize model
num_categories = fbt_df["category_int_src"].max() + 1
num_price_bins = fbt_df[["price_bin_src", "price_bin_target"]].max().max() + 1
img_emb_dim = len(emb_df.img_embedding.iloc[0])
logger.info(f"{[num_categories, num_price_bins, img_emb_dim]=}")
if cfg.method == "gan":
lit_model = LitFbt(
img_emb_dim,
cfg.img_encoder_output_dim,
num_categories,
cfg.category_emb_size,
num_price_bins,
cfg.price_emb_size,
cfg,
out_dir,
)
elif cfg.method == "dcf":
lit_model = LitDCF(
img_emb_dim,
cfg.img_encoder_output_dim,
num_categories,
cfg.category_emb_size,
num_price_bins,
cfg.price_emb_size,
cfg,
out_dir,
)
elif cfg.method == "pcomp":
lit_model = LitPcomp(
img_emb_dim,
cfg.img_encoder_output_dim,
num_categories,
cfg.category_emb_size,
num_price_bins,
cfg.price_emb_size,
cfg,
out_dir,
)
trainer = pl.Trainer(
max_epochs=cfg.epochs,
min_epochs=cfg.epochs,
gradient_clip_val=cfg.gradient_clip_val,
gradient_clip_algorithm="value",
devices=1,
logger=WandbLogger(experimnet=wandb.run),
callbacks=[LearningRateMonitor()],
num_sanity_val_steps=0,
default_root_dir=out_dir,
accelerator="cpu",
check_val_every_n_epoch=cfg.check_val_every_n_epoch,
)
t1 = time()
trainer.fit(lit_model, train_loader, [test_loader, data_all_loader])
logger.info(f"trainer.fit in {time()-t1:.1f} s")
# Upload fiels
wandb.save(osp.join(out_dir, "*.pth"))
wandb.save(osp.join(out_dir, "*.ckpt"))
logger.info(f"Finish execute_train in {time()-t0:.1f} s")
if __name__ == "__main__":
execute_train()
| cycle_gan_for_complementary_item_recommendations-main | src/main_execute_train.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import itertools
import logging
import os
import os.path as osp
from os.path import join as osj
from time import time
import hydra
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm
from inferece_utils import calc_ndcg_per_category, calc_performance, get_dataloaders
from lit.eval_utils import generate_test_set_hot_labels, get_unique_asins
from lit.lit_dcf import LitDCF
logger = logging.getLogger(__name__)
def dcf_model_inference(lit_model, category_src, price_bin, img_emb, candidates):
category_emb = lit_model.category_embs(category_src)
price_emb = lit_model.price_embs(price_bin)
src = lit_model.src_encoder(img_emb, category_emb, price_emb)
# Fusion layer
logits_i = lit_model.fusion_model(
src.repeat(len(candidates), 1), candidates
).squeeze()
return logits_i
def get_candidate_embeddings(lit_model, candidate_loader):
candidates, asins, categories = [], [], []
with torch.no_grad():
for img_emb, price_bin, category, asin in tqdm(candidate_loader):
category_emb = lit_model.category_embs(category)
price_emb = lit_model.price_embs(price_bin)
candidate = lit_model.candidate_encoder(img_emb, category_emb, price_emb)
candidates.append(candidate.detach().cpu())
asins.append(asin)
categories.append(category.detach().cpu())
candidates = torch.vstack(candidates)
asins = np.array(list(itertools.chain(*asins)))
categories = torch.hstack(categories)
return candidates, categories, asins
def evaluate_dcf(
lit_model, inference_loader, candidates, category_candidates, candidate_asins, cfg
):
# Get valid cateogires
hot_labels, logits, category_targets, asin_srcs = [], [], [], []
for (
img_emb,
price_bin,
category_src,
asin_src,
_,
target_asins,
) in tqdm(inference_loader):
logits_i = dcf_model_inference(
lit_model, category_src, price_bin, img_emb, candidates
)
# Create ground true label
target_asins = [asin_target[0] for asin_target in target_asins]
hot_labels_i = np.in1d(
candidate_asins,
target_asins,
)
assert hot_labels_i.sum() > 0
# Save
hot_labels.append(hot_labels_i)
logits.append(logits_i.squeeze())
category_targets_i = category_candidates[hot_labels_i]
category_targets.append(category_targets_i.unique().tolist())
asin_srcs.append(asin_src)
# Calcualte probability
logits = torch.vstack(logits)
hot_labels = np.vstack(hot_labels)
# Retrival metrics
calc_performance(hot_labels, logits, cfg)
# def evaluate_category_aware_dcf(lit_model, dataset, cfg, out_dir):
def evaluate_category_aware_dcf(lit_model, dataset, cfg, out_dir):
path = cfg.model_weight_dir
model_base_dir = osp.basename(path)
asin_src = torch.load(osj(path, "asin_src.pth"))
asin_pos = torch.load(osj(path, "asin_pos.pth"))
category_src = torch.from_numpy(torch.load(osj(path, "category_src.pth")))
category_pos = torch.from_numpy(torch.load(osj(path, "category_pos.pth")))
src = torch.load(osj(path, "src.pth"))
pos = torch.load(osj(path, "pos.pth"))
set_name = torch.load(osj(path, "set_name.pth"))
# Test sources: have a unique pair of (source,target-category)
src_test = src[set_name == "test"]
asin_src_test = asin_src[set_name == "test"]
category_pos_test = category_pos[set_name == "test"]
locs = list(zip(asin_src_test, category_pos_test))
_, unique_idxs = np.unique(np.array(locs), axis=0, return_index=True)
src_test, asin_src_test, category_pos_test = (
src_test[unique_idxs],
asin_src_test[unique_idxs],
category_pos_test[unique_idxs],
)
# Candidate to compare with
asins = np.hstack([asin_src, asin_pos])
embs = torch.vstack([src, pos])
categories = torch.hstack([category_src, category_pos])
asins, embs, categories = get_unique_asins(asins, embs, categories)
# Build hot label
hot_labels, asin_src_test = generate_test_set_hot_labels(
asin_src_test=asin_src_test,
category_pos_test=category_pos_test.numpy(),
fbt_by_asin_src=dataset.fbt_by_asin_src,
asins=asins,
)
# Find distance of the candidates
probs = []
for src_test_i in tqdm(src_test):
src_to_inference = src_test_i.repeat(len(embs), 1)
logits = lit_model.fusion_model(src_to_inference, embs)
prob = logits.sigmoid().squeeze()
probs.append(prob)
probs = torch.vstack(probs).cpu().numpy()
# Constrain to target
for n, cat in enumerate(category_pos_test):
probs[n, categories != cat] = 0
# Calculate retrival metrics
calc_performance(hot_labels, probs, cfg)
calc_ndcg_per_category(
hot_labels, probs, asin_src_test, category_pos_test, model_base_dir, out_dir
)
@hydra.main(version_base="1.2", config_path="../configs/", config_name="inference")
def execute_dcf_inference(cfg: DictConfig):
t0 = time()
out_dir = os.getcwd()
os.chdir(hydra.utils.get_original_cwd())
name = osp.basename(out_dir)
pl.seed_everything(cfg.seed)
wandb.init(
project=cfg.wandb.project,
dir=out_dir,
config=OmegaConf.to_container(cfg),
job_type="analysis",
name="analysis_" + name,
)
logger.info(f"out_dir={out_dir}")
logger.info(cfg)
logger.info(f"{torch.backends.mps.is_available()=}")
for category_name, model_dcf_weight_dir in zip(
cfg.dcf_category_names, cfg.model_dcf_weight_dirs
):
t1 = time()
logger.info(category_name)
# Dataset
cfg.category_name = category_name
cfg.model_weight_dir = model_dcf_weight_dir
candidate_loader, inference_loader, dataset = get_dataloaders(cfg)
# Load weights
checkpoint = osj(cfg.model_weight_dir, "checkpoint.ckpt")
logger.info(checkpoint)
lit_model = LitDCF.load_from_checkpoint(checkpoint)
lit_model.eval()
torch.set_grad_enabled(False)
candidates, category_candidates, candidate_asins = get_candidate_embeddings(
lit_model, candidate_loader
)
evaluate_dcf(
lit_model,
inference_loader,
candidates,
category_candidates,
candidate_asins,
cfg,
)
evaluate_category_aware_dcf(
lit_model,
dataset,
cfg,
out_dir,
)
logger.info(f"Finish {category_name} in {time() - t1:.1f} s")
logger.info(f"Finish execute_dcf_inference in {time() - t0:.1f} s")
if __name__ == "__main__":
execute_dcf_inference()
| cycle_gan_for_complementary_item_recommendations-main | src/main_inference_dcf.py |
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
import urllib.request
import hydra
import pandas as pd
from omegaconf import DictConfig
from tqdm import tqdm
from main_process_meta import reduce_to_closed_set, verify_closed_set
logger = logging.getLogger(__name__)
@hydra.main(version_base="1.2",config_path="../configs/", config_name="process_meta")
def download_imgs(cfg: DictConfig):
logger.info(cfg)
os.chdir(hydra.utils.get_original_cwd())
img_out_dir = osp.join(cfg.data_raw_dir, cfg.category_name)
os.makedirs(img_out_dir, exist_ok=True)
t0 = time.time()
fbt_file_name = osp.join(cfg.data_processed_dir, f"{cfg.category_name}_processed.pkl")
fbt_file_name_out = osp.join(
cfg.data_processed_dir, f"{cfg.category_name}_processed_w_imgs.pkl"
)
logger.info(cfg)
# Load data
t1 = time.time()
fbt_df = pd.read_pickle(fbt_file_name)
logger.info(f"fbt_df read_pickle in {time.time()-t1:.1f}")
# Download images
logger.info("Download images")
succeeds = []
for i, (url, img_path) in tqdm(
enumerate(zip(fbt_df.imageURLHighRes, fbt_df.img_path)), total=len(fbt_df)
):
succeed = True
if not osp.exists(img_path):
try:
urllib.request.urlretrieve(url, img_path)
except Exception as e:
logger.info(f"[{i}/{len(fbt_df)}] Fail {img_path}")
succeed = False
succeeds.append(succeed)
# Reduce set
len0 = len(fbt_df)
fbt_df = fbt_df.iloc[succeeds]
logger.info(f"succeeds [pre post]=[{len0} {len(fbt_df)}]")
len0 = len(fbt_df)
fbt_df = reduce_to_closed_set(fbt_df)
verify_closed_set(fbt_df)
logger.info(f"reduce_to_closed_set [pre post]=[{len0} {len(fbt_df)}]")
logger.info(f"Saving to {fbt_file_name_out=}")
fbt_df.to_pickle(fbt_file_name_out)
logger.info(f"Finish in {time.time()-t0:.1f} sec")
if __name__ == "__main__":
download_imgs()
| cycle_gan_for_complementary_item_recommendations-main | src/main_download_imgs.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.