python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import warnings
from mmseg.models.builder import MODELS
ESTIMATORS = MODELS
def build_estimator(cfg, train_cfg=None, test_cfg=None):
"""Build estimator."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return ESTIMATORS.build(
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
| CODD-main | model/builder.py |
from mmcv.runner import HOOKS, LrUpdaterHook
import mmcv
@HOOKS.register_module()
class MultiGammaLrUpdaterHook(LrUpdaterHook):
"""Step LR scheduler.
Args:
step (list[int]): Step to decay the LR. If an int value is given,
regard it as the decay interval. If a list is given, decay LR at
these steps.
gamma (list[float]): LR change ratios at certain steps.
"""
def __init__(self, step, gamma, **kwargs):
assert mmcv.is_list_of(step, int)
assert mmcv.is_list_of(gamma, float)
assert len(gamma) == len(step)
assert all([s > 0 for s in step])
self.step = step
self.gamma = gamma
super(MultiGammaLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = runner.epoch if self.by_epoch else runner.iter
# calculate exponential term
gamma = 1
for i, s in enumerate(self.step):
if progress < s:
break
gamma *= self.gamma[i]
return base_lr * gamma
| CODD-main | model/lr_updater.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .fusion import Fusion
from .others import NullFusion, GTFusion, KalmanFusion
__all__ = ["NullFusion", "GTFusion", "KalmanFusion", "Fusion"]
| CODD-main | model/fusion/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init, kaiming_init, normal_init, trunc_normal_init
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmseg.models import builder as builder_oss
from mmseg.models.builder import MODELS
from utils import disp_warp
from ..motion.raft3d.raft3d import GradientClip
class BasicBlock(nn.Module):
"""ResNet BasicBlock"""
expansion = 1
def __init__(self, c1, c2, s, p, d):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
c1, c2, kernel_size=3, stride=s, padding=d if d > 1 else p, dilation=d
),
nn.Mish(inplace=True),
)
self.conv2 = nn.Conv2d(
c2, c2, kernel_size=3, stride=1, padding=d if d > 1 else p, dilation=d
)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out += x
return out
@MODELS.register_module()
class Fusion(nn.Module):
def __init__(
self, in_channels, fusion_channel, loss=None, corr_cfg=dict(), ds_scale=4
):
"""fusion network
Args:
in_channels (int): stereo feature channels
fusion_channel (int): fusion feature channels
loss (dict, optional): config for loss. Defaults to None.
corr_cfg (dict, optional): config for correlation. Defaults to dict().
ds_scale (int, optional): low res scale. Defaults to 4.
"""
super(Fusion, self).__init__()
if loss is not None:
self.loss = builder_oss.build_loss(loss)
else:
self.loss = None
self.fusion_channel = fusion_channel
self.ds_scale = ds_scale
self.in_channels = in_channels
# configs
self.patch_size = corr_cfg.get("patch_size", 3)
self.unfold_op = nn.Unfold(
kernel_size=(self.patch_size, self.patch_size),
padding=self.patch_size - 1,
dilation=2,
)
self.key_layer = nn.Sequential(
nn.Conv2d(in_channels, self.fusion_channel, 1, 1, 0, 1), # 1x1
nn.ReLU(inplace=True),
BasicBlock(self.fusion_channel, self.fusion_channel, s=1, p=1, d=1), # 3x3
nn.ReLU(inplace=True),
nn.Conv2d(self.fusion_channel, self.fusion_channel, 1, 1, 0, 1), # 1x1
)
cross_attn_channels = self.patch_size ** 2
stereo_cost_channels = 3 * 2
self_attn_channels = (self.patch_size ** 2 - 1) * 2
flow_channels = 6
binary_channels = 1
feature_channels = self.fusion_channel
# define network
self.conv_corr = nn.Sequential( # for feat and disp corr
nn.Conv2d(
self_attn_channels + cross_attn_channels + stereo_cost_channels,
self.fusion_channel * 2, 1, padding=0, bias=True
), # 1x1
nn.ReLU(inplace=True),
nn.Conv2d(
self.fusion_channel * 2, self.fusion_channel, 1, padding=0, bias=True
),
nn.ReLU(inplace=True),
)
self.conv_disp = nn.Sequential( # for disparity
nn.Conv2d(2, self.fusion_channel, 7, padding=3), # 7x7
nn.ReLU(inplace=True),
nn.Conv2d(
self.fusion_channel, self.fusion_channel, 3, padding=1, bias=True
), # 3x3
nn.ReLU(inplace=True),
)
self.motion_conv = nn.Sequential(
nn.Conv2d(
self.fusion_channel * 2, self.fusion_channel - 2, 7, padding=3, bias=True
),
nn.ReLU(inplace=True),
)
self.weight_head = nn.Sequential(
nn.Conv2d(
self.fusion_channel, self.fusion_channel, 3, padding=1, bias=True
),
nn.Conv2d(self.fusion_channel, 1, 1, padding=0, bias=True),
GradientClip(),
nn.Sigmoid(),
)
self.forget_head = nn.Sequential(
nn.Conv2d(
flow_channels + self_attn_channels + cross_attn_channels + binary_channels,
16, 1, padding=0, bias=True
),
nn.Conv2d(16, 8, 3, padding=1, bias=True),
nn.Conv2d(8, 1, 1, padding=0, bias=True),
GradientClip(),
nn.Sigmoid(),
)
self.residual_conv = nn.Sequential(
nn.Conv2d(
self.fusion_channel + feature_channels, self.fusion_channel, 3, padding=1, bias=True
),
nn.ReLU(inplace=True),
)
self.init_weights()
n_parameters = sum(p.numel() for n, p in self.named_parameters())
print(
"PARAM STATUS: total number of parameters %.3fM in fusion network"
% (n_parameters / 1000 ** 2)
)
def init_weights(self):
"""weight initialization"""
for n, m in self.named_modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m.weight, std=0.02)
if m.bias is not None:
if "ffn" in n:
normal_init(m.bias, std=1e-6)
else:
constant_init(m.bias, 0)
elif isinstance(m, (nn.Conv2d, nn.Conv3d)):
kaiming_init(m.weight, mode="fan_in")
if m.bias is not None:
constant_init(m.bias, 0)
elif isinstance(
m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm, nn.InstanceNorm2d)
):
constant_init(m.bias, 0)
constant_init(m.weight, 1.0)
def _px2patch_corr(self, k, memory_k, B, C, H, W, self_corr=False):
"""pixel-patch correlation
Args:
k (Tensor): keys
memory_k (Tensor): memory keys
B (int): batch size
C (int): channel size
H (int): height
W (int): width
self_corr (bool, optional): Defaults to False.
Returns:
Tensor: correlation value
"""
q = k.reshape(B, C, H * W).unsqueeze(2) # B,C,1,HW
memory_k = self.unfold_feat(memory_k, self.patch_size) # B,C,pq,HW
if C == 1:
kk = q - memory_k # B, pq, W
else:
kk = (q * memory_k).sum(1) # B, pq, HW
kk = kk.view(B, self.patch_size ** 2, H, W) # B, pq, H, W
if self_corr:
mask = (
torch.ones(self.patch_size ** 2).bool().to(k.device)
) # drop self-self
mask[(self.patch_size ** 2) // 2] = False
kk = kk[:, mask]
# normalize
kk = kk / math.sqrt(C)
return kk
def disparity_confidence(self, pred_curr, pred_warp, fea_l, fea_r):
"""approximate disparity confidence
Args:
pred_curr (Tensor): Nx1xHxW
pred_warp (Tensor): Nx1xHxW
fea_l (Tensor): left feature
fea_r (Tensor): right feature
Returns:
Tensor: disparity confidence
"""
pred_curr, pred_warp = (
pred_curr[
...,
self.ds_scale // 2 - 1:: self.ds_scale,
self.ds_scale // 2 - 1:: self.ds_scale,
],
pred_warp[
...,
self.ds_scale // 2 - 1:: self.ds_scale,
self.ds_scale // 2 - 1:: self.ds_scale,
],
)
local_cv_warp = []
local_cv_pred = []
for k in range(-1, 2, 1):
local_warp = pred_warp / self.ds_scale + k
local_pred = pred_curr / self.ds_scale + k
warp_fea_r, _ = disp_warp(fea_r, local_warp, padding_mode="zeros")
pred_fea_r, _ = disp_warp(fea_r, local_pred, padding_mode="zeros")
cv_warp = torch.norm(fea_l - warp_fea_r, 1, 1, keepdim=True) / (self.in_channels / 24.0)
cv_pred = torch.norm(fea_l - pred_fea_r, 1, 1, keepdim=True) / (self.in_channels / 24.0)
local_cv_warp.append(cv_warp)
local_cv_pred.append(cv_pred)
# local cost volume for all the disp hypothesis[B, 3, H/scale, W/scale]
local_cv_warp = torch.cat(local_cv_warp, 1)
local_cv_pred = torch.cat(local_cv_pred, 1)
return local_cv_pred, local_cv_warp
def compute_input_cues(
self,
pred_curr,
pred_warp,
feat_curr,
feat_warp,
flow_warp,
confidence_warp,
fea_l,
fea_r,
):
"""compute input cues to regress weights
Args:
pred_curr ([type]): Nx1xHxW
pred_warp ([type]): Nx1xHxW
feat_curr ([type]): NxCxHxW
feat_warp ([type]): NxCxHxW
flow_warp ([type]): Nx3xHxW
confidence_warp ([type]): Nx3xHxW
fea_l ([type]): NxCxHxW
fea_r ([type]): NxCxHxW
Returns:
Tensor, Tensor: input cues at two resolutions
"""
B = feat_curr.shape[0]
H, W = feat_curr.shape[-2:]
# get hypothesis cost from stereo
cost_curr, cost_warp = self.disparity_confidence(
pred_curr, pred_warp, fea_l, fea_r
)
# get attention features
feat_cat = torch.cat([feat_curr, feat_warp], dim=0)
disp_cat_fr = torch.cat([pred_curr, pred_warp], dim=0)
feat_cross_attn = self._px2patch_corr(
feat_curr, feat_warp, B, self.fusion_channel, H, W
)
feat_self_attn = self._px2patch_corr(
feat_cat, feat_cat, 2 * B, self.fusion_channel, H, W, self_corr=True
)
disp_cross_attn = self._px2patch_corr(
pred_curr, pred_warp, B, 1, pred_curr.shape[-2], pred_curr.shape[-1]
)
disp_self_attn_fr = self._px2patch_corr(
disp_cat_fr,
disp_cat_fr,
2 * B,
1,
disp_cat_fr.shape[-2],
disp_cat_fr.shape[-1],
self_corr=True,
)
feat_self_attn = torch.cat(torch.chunk(feat_self_attn, 2, dim=0), dim=1)
disp_self_attn_fr = torch.cat(torch.chunk(disp_self_attn_fr, 2, dim=0), dim=1)
disp_cross_attn = disp_cross_attn.abs()
disp_self_attn_fr = disp_self_attn_fr.abs()
# concat
corr_feat = [feat_cross_attn, feat_self_attn, cost_curr, cost_warp]
corr_feat_fr = [
disp_cross_attn,
disp_self_attn_fr,
flow_warp,
(pred_warp > 0).float(),
confidence_warp,
]
corr_feat = torch.cat(corr_feat, dim=1)
corr_feat_fr = torch.cat(corr_feat_fr, dim=1)
return corr_feat, corr_feat_fr
def fuse(self, corr_feat, pred_curr, pred_warp, feat_curr):
"""estimate fusion weights
Args:
corr_feat (Tensor): NxCorrxHxW
pred_curr (Tensor): Nx1xHxW
pred_warp (Tensor): Nx1xHxW
feat_curr (Tensor): NxCxHxW
Returns:
Tensor: fusion weights
"""
pred_curr, pred_warp = (
pred_curr[
...,
self.ds_scale // 2 - 1:: self.ds_scale,
self.ds_scale // 2 - 1:: self.ds_scale,
],
pred_warp[
...,
self.ds_scale // 2 - 1:: self.ds_scale,
self.ds_scale // 2 - 1:: self.ds_scale,
],
)
# compute features
corr = self.conv_corr(corr_feat)
disp = self.conv_disp(torch.cat([pred_curr, pred_warp], dim=1))
mo = self.motion_conv(torch.cat([corr, disp], dim=1))
inp = torch.cat([feat_curr, mo, pred_curr, pred_warp], dim=1)
net = self.residual_conv(inp) + corr # long skip connection
# output fusion weight
fusion_weights = self.weight_head(net)
fusion_weights = F.interpolate(fusion_weights, scale_factor=self.ds_scale)
return fusion_weights
def memory_query(self, outputs, state, *args, **kwargs):
"""query memory state and produce fused disparity"""
left_feat, pred_curr = outputs["left_feat"], outputs["pred_disp"]
feat_curr = self.key_layer(left_feat)
if "memory" not in state:
outputs["left_feat"] = feat_curr # update left feat after projection
else:
# compute input cues
left_img_prev, feat_warp, confidence_warp, pred_warp, flow_warp = state[
"memory"
]
fea_l, fea_r = outputs["left_feat"], outputs["right_feat"]
corr_feat, corr_feat_fr = self.compute_input_cues(
pred_curr,
pred_warp,
feat_curr,
feat_warp,
flow_warp,
confidence_warp,
fea_l,
fea_r,
)
# fuse
fusion_weights = self.fuse(corr_feat, pred_curr, pred_warp, feat_curr)
fusion_weights = (
fusion_weights * (pred_warp > 0.0).float()
) # To prevent gradient shortcut
reset_weights = self.forget_head(corr_feat_fr)
reset_weights = (
reset_weights * (pred_warp > 0.0).float()
) # To prevent gradient shortcut
disp_fused = (
pred_curr * (1 - fusion_weights * reset_weights)
+ pred_warp * fusion_weights * reset_weights
)
outputs["pred_disp"] = disp_fused
outputs["fusion_weights"] = fusion_weights
outputs["reset_weights"] = reset_weights
outputs["pred_curr"] = pred_curr
outputs["pred_warp"] = pred_warp
outputs["left_feat"] = feat_curr
def memory_update(self, outputs, state, *args, **kwargs):
"""update memory state"""
state["memory"] = [
outputs["left_img"],
outputs["left_feat"],
outputs["pred_disp"].squeeze(1),
]
def unfold_feat(self, feat, p=3):
"""unfolding feature for pix-patch correlation
Args:
feat (NxCxHxW): feature to be unfolded
p (int, optional): patch size. Defaults to 3.
Returns:
Tensor: unfolded tensor
"""
B, C, H, W = feat.shape
feat = self.unfold_op(feat) # B,C*p^2,HW
feat = feat.view(B, C, p ** 2, H * W)
return feat
def losses(self, loss, outputs, gt_disp, mask, idx, state, meta):
disp_fused = outputs["pred_disp"]
fusion_weights = outputs["fusion_weights"]
reset_weights = outputs["reset_weights"]
pred_curr = outputs["pred_curr"]
pred_warp = outputs["pred_warp"]
self.loss(
disp_fused,
gt_disp,
fusion_weights,
reset_weights,
pred_curr,
pred_warp,
idx,
loss,
)
def freeze(self):
self.eval()
self.loss.eval()
for param in self.parameters():
param.requires_grad = False
| CODD-main | model/fusion/fusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
from mmseg.models.builder import MODELS
@MODELS.register_module()
class NullFusion(nn.Module):
"""Implements a NULL memory module that does not do anything"""
def __init__(
self,
**kwargs,
):
super(NullFusion, self).__init__()
self.loss = None
def init_weights(self, pretrained=None):
pass
def forward(self, x):
"""This function should never be called"""
pass
def memory_query(self, outputs, state, *args, **kwargs):
"""This function should update pred disp"""
pass
def memory_update(self, outputs, state, *args, **kwargs):
"""This function should update memory"""
state["memory"] = [
outputs["left_img"],
outputs["left_feat"],
outputs["pred_disp"].squeeze(1),
]
@MODELS.register_module()
class GTFusion(nn.Module):
def __init__(
self,
**kwargs,
):
super(GTFusion, self).__init__()
self.loss = None
def init_weights(self, pretrained=None):
pass
def forward(self, x):
"""This function should never be called"""
pass
def memory_query(self, outputs, state, *args, **kwargs):
if "memory" in state:
"""This function should update pred disp"""
gt_disp = state["gt_disp"][-1]
pred_disp = outputs["pred_disp"]
_, _, _, pred_disp_warp, _ = state["memory"]
# pad gt size so dimension matches
h, w = pred_disp.shape[-2:]
h_pad, w_pad = h - gt_disp.shape[-2], w - gt_disp.shape[-1]
gt_disp = torch.nn.functional.pad(gt_disp, (0, w_pad, 0, h_pad))
err_curr = (pred_disp.squeeze() - gt_disp).abs()
err_warp = (pred_disp_warp.squeeze() - gt_disp).abs()
pred_disp_fused = torch.empty_like(pred_disp)
# select curr better
mask = (err_curr - err_warp) < -1
pred_disp_fused[mask] = pred_disp[mask]
# select warp better
mask = (err_curr - err_warp) > 1
pred_disp_fused[mask] = pred_disp_warp[mask]
# average two
mask = ((err_curr - err_warp) <= 1) & ((err_curr - err_warp) >= -1)
pred_disp_fused[mask] = (pred_disp[mask] + pred_disp_warp[mask]) / 2
# skip invalid
mask = pred_disp_warp <= 0.0
pred_disp_fused[mask] = pred_disp[mask]
valid_mask = gt_disp > 0.0
pred_disp_fused[~valid_mask] = pred_disp[~valid_mask]
outputs["pred_disp"] = pred_disp_fused
# dummy outputs so we keep everything else consistent
outputs["fusion_weights"] = torch.zeros_like(pred_disp).to(pred_disp.device)
outputs["fusion_weights"].requires_grad = True
outputs["reset_weights"] = torch.zeros_like(pred_disp).to(pred_disp.device)
outputs["pred_curr"] = pred_disp
outputs["pred_warp"] = pred_disp_warp
def memory_update(self, outputs, state, *args, **kwargs):
"""This function should update memory"""
state["memory"] = [
outputs["left_img"],
outputs["left_feat"],
outputs["pred_disp"].squeeze(1),
]
@MODELS.register_module()
class KalmanFusion(nn.Module):
def __init__(
self,
R=1e-5,
Q=1e-5,
**kwargs,
):
"""
R: measurment variance, decrease to upweigh current estimation
Q: process variance, decrease to downweight current estimation
"""
super(KalmanFusion, self).__init__()
self.R = R
self.Q = Q
self.loss = None
def init_weights(self, pretrained=None):
pass
def forward(self, x):
"""This function should never be called"""
pass
def memory_query(self, outputs, state, *args, **kwargs):
if "memory" in state:
"""This function should update pred disp"""
_, _, _, pred_disp_warp, _ = state["memory"]
pred_disp = outputs["pred_disp"]
if self.P is None: # P has not been initialized:
self.P = torch.zeros_like(pred_disp).to(
pred_disp.device
) # we process each pixel individually
Pminus = self.P + self.Q
# measurement update
K = Pminus / (Pminus + self.R)
pred_disp_fused = pred_disp_warp + K * (
pred_disp - pred_disp_warp
) # weighted sum
outliers = (pred_disp_warp - pred_disp).abs() > 1
pred_disp_fused[pred_disp_warp <= 0.0] = pred_disp[
pred_disp_warp <= 0.0
] # skip invalid
pred_disp_fused[outliers] = pred_disp[outliers] # skip assumed outliers
outputs["pred_disp"] = pred_disp_fused
# dummy outputs so we keep everything else consistent
outputs["fusion_weights"] = torch.zeros_like(pred_disp).to(pred_disp.device)
outputs["fusion_weights"].requires_grad = True
outputs["reset_weights"] = torch.zeros_like(pred_disp).to(pred_disp.device)
outputs["pred_curr"] = pred_disp
outputs["pred_warp"] = pred_disp_warp
else:
self.P = None
def memory_update(self, outputs, state, *args, **kwargs):
"""This function should update memory"""
state["memory"] = [
outputs["left_img"],
outputs["left_feat"],
outputs["pred_disp"].squeeze(1),
]
| CODD-main | model/fusion/others.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models import LOSSES
@LOSSES.register_module()
class FusionLoss(nn.Module):
def __init__(
self, min_disp=1, max_disp=192, loss_weight=(1.0), wr_weight=1.0, wf_weight=1.0
):
"""fusion loss
Args:
min_disp (int, optional): minimum valid disparity. Defaults to 1.
max_disp (int, optional): maximum valid disparity. Defaults to 192.
loss_weight (tuple, optional): weight of loss. Defaults to (1.0).
wr_weight (float, optional): weight of reset weight loss. Defaults to 1.0.
wf_weight (float, optional): weight of fusion weight loss. Defaults to 1.0.
"""
super(FusionLoss, self).__init__()
self.min_disp = min_disp
self.max_disp = max_disp
self.loss_weight = loss_weight
self.wr_weight = wr_weight
self.wf_weight = wf_weight
self.C1 = 1.0
self.C2 = 5.0
def fusion_weight_loss(self, disp_curr, disp_warp, gt_disp, weight_warp, mask):
weight_curr = 1 - weight_warp
err_curr = torch.abs(disp_curr - gt_disp)
err_warp = torch.abs(disp_warp - gt_disp)
curr_closer_mask = (err_curr - err_warp) < -self.C1
curr_further_mask = (err_curr - err_warp) > self.C1
curr_same_mask = (err_curr - err_warp).abs() <= self.C1
wf_loss_closer = torch.mean(
weight_warp[curr_closer_mask & mask]
) # curr closer, impose loss on warp
wf_loss_further = torch.mean(
weight_curr[curr_further_mask & mask]
) # curr further, impose loss on curr
wf_loss_same = torch.mean(
torch.abs(weight_curr[curr_same_mask & mask] - 0.5)
) # constrain the weights in roughly even region
return wf_loss_closer, wf_loss_further, wf_loss_same
def reset_weight_loss(self, disp_curr, disp_warp, gt_disp, weight_warp, mask):
weight_curr = 1 - weight_warp
err_curr = torch.abs(disp_curr - gt_disp)
err_warp = torch.abs(disp_warp - gt_disp)
curr_closer_mask = (err_curr - err_warp) < -self.C2
curr_further_mask = (err_curr - err_warp) > self.C2
wr_loss_closer = torch.mean(
weight_warp[curr_closer_mask & mask]
) # curr closer, impose loss on warp
wr_loss_further = torch.mean(
weight_curr[curr_further_mask & mask]
) # curr further, impose loss on curr
return wr_loss_closer, wr_loss_further
def forward(
self,
pred_disp,
gt_disp,
fusion_weight,
reset_weight,
disp_curr,
disp_warp,
idx,
loss,
**kwargs,
):
if torch.any(torch.tensor(self.loss_weight) > 0.0):
mask = (gt_disp >= self.min_disp) & (gt_disp <= self.max_disp)
disp_loss = F.smooth_l1_loss(
pred_disp[mask], gt_disp[mask], reduction="mean"
)
mask = mask & (disp_warp > 0) # impose loss on valid projection only
wf_loss_closer, wf_loss_further, wf_loss_same = self.fusion_weight_loss(
disp_curr, disp_warp, gt_disp, fusion_weight, mask
)
wr_loss_closer, wr_loss_further = self.reset_weight_loss(
disp_curr, disp_warp, gt_disp, reset_weight, mask
)
total_loss = (
disp_loss
+ (wf_loss_closer + wf_loss_further + wf_loss_same * 0.2)
* self.wf_weight
+ (wr_loss_closer + wr_loss_further) * self.wr_weight
)
loss["loss_temporal{}".format(idx)] = total_loss * self.loss_weight
else:
loss["loss_temporal{}".format(idx)] = torch.tensor(
[0.0], requires_grad=True, device=gt_disp.device
)
return
@LOSSES.register_module()
class MotionLoss(nn.Module):
def __init__(self, loss_weight=(1.0)):
super(MotionLoss, self).__init__()
self.loss_weight = loss_weight
self.RV_WEIGHT = 0.2
self.DZ_WEIGHT = 210.0
self.gamma = 0.9
def forward(self, flow2d_est, flow2d_rev, flow_gt, mask, idx, loss, **kwargs):
valid_mask = mask.permute(0, 2, 3, 1)
N = len(flow2d_est)
loss_total = 0.0
for i in range(N):
w = self.gamma ** (N - i - 1)
fl_rev = flow2d_rev[i]
fl_est, dz_est = flow2d_est[i].split([2, 1], dim=-1)
fl_gt, dz_gt = flow_gt.split([2, 1], dim=-1)
loss_total += w * (valid_mask * (fl_est - fl_gt).abs()).mean()
loss_total += (
w * self.DZ_WEIGHT * (valid_mask * (dz_est - dz_gt).abs()).mean()
)
loss_total += (
w * self.RV_WEIGHT * (valid_mask * (fl_rev - fl_gt).abs()).mean()
)
loss["loss_warp{}".format(idx)] = loss_total * self.loss_weight
with torch.no_grad():
epe_2d = (fl_est - fl_gt).norm(dim=-1)
epe_2d = epe_2d.view(-1)[valid_mask.view(-1)]
epe_dz = (dz_est - dz_gt).norm(dim=-1)
epe_dz = epe_dz.view(-1)[valid_mask.view(-1)]
metrics = {
"epe2d_warp{}".format(idx): epe_2d.mean(),
"epedz_warp{}".format(idx): epe_dz.mean(),
"1px_warp{}".format(idx): (epe_2d < 1).float().mean(),
"3px_warp{}".format(idx): (epe_2d < 3).float().mean(),
"5px_warp{}".format(idx): (epe_2d < 5).float().mean(),
}
loss.update(metrics)
return
| CODD-main | model/losses/temporal.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models import LOSSES
def subpix_cost(cost: torch.Tensor, disp: torch.Tensor, maxdisp: int):
"""
phi, e.g. eqt(9) in HITNet paper
:param cost:
:param disp:
:return:
"""
# pdb.set_trace()
disp[disp >= maxdisp - 1] = maxdisp - 2
disp[disp < 0] = 0
disp_floor = disp.floor()
sub_cost = (disp - disp_floor) * torch.gather(cost, 1, disp_floor.long() + 1) + (
disp_floor + 1 - disp
) * torch.gather(cost, 1, disp_floor.long())
# pdb.set_trace()
return sub_cost
def get_non_match_disp(pred_init_cost: torch.Tensor, d_gt: torch.Tensor):
"""
HITNet paper, eqt (11)
:param pred_init_cost: B, D, H, W
:param d_gt: B, 1, H, W
:return: LongTensor: min_non_match_disp: B, 1, H, W
"""
B, D, H, W = pred_init_cost.size()
disp_cand = (
torch.arange(0, D, step=1, device=d_gt.device).view(1, -1, 1, 1).repeat(B, 1, H, W).float()
)
match_disp_lower_bound = d_gt - 1.5
match_disp_upper_bound = d_gt + 1.5
tmp_cost = torch.where(
(disp_cand < match_disp_lower_bound) | (disp_cand > match_disp_upper_bound),
pred_init_cost,
torch.tensor(float("inf"), device=d_gt.device),
)
# pdb.set_trace()
__, min_non_match_disp = torch.min(tmp_cost, dim=1, keepdim=True)
# pdb.set_trace()
return min_non_match_disp
def echo_loss(x, alpha, c):
"""
https://arxiv.org/pdf/1701.03077.pdf
An amazing loss function presented in paper: A General and Adaptive Robust Loss Function (CVPR 2019).
The name prefix 'echo' is the name of a hero in Overwatch who can become any other hero during her ultimate
:param x: torch.Tensor
:param alpha: shape param
:param c > 0: scale param
:return: torch.Tensor: loss
"""
loss = (abs(alpha - 2) / alpha) * ((((x / c) ** 2) / abs(alpha - 2) + 1) ** (alpha / 2) - 1)
return loss
@LOSSES.register_module()
class HITLoss(nn.Module):
"""
https://arxiv.org/pdf/2007.12140.pdf
"""
def __init__(
self, max_disp=320, lambda_init=1, lambda_prop=1, lambda_slant=1, lambda_w=1, alpha=0.9, c=0.1
):
super(HITLoss, self).__init__()
self.maxdisp = max_disp
self.lambda_init = lambda_init
self.lambda_prop = lambda_prop
self.lambda_slant = lambda_slant
self.lambda_w = lambda_w
self.alpha = alpha
self.c = c
# edge_kx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
# edge_ky = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
# edge_k = np.stack((edge_kx, edge_ky))
# edge_k = torch.from_numpy(edge_k).float().view(2, 1, 3, 3)
# self.register_buffer("edge_k", edge_k)
A = torch.zeros(81, 3)
for i in range(81):
A[i, 0] = i // 9 - 4
A[i, 1] = i % 9 - 4
A[i, 2] = 1
A_T = A.t()
A_inverse = (A_T.mm(A)).inverse()
# B = (A_t*A)^-1*A_t
B = A_inverse.mm(A_T)
convy_weight = torch.unsqueeze(torch.unsqueeze(B[0, :].view(9, 9), dim=0), dim=0)
convx_weight = torch.unsqueeze(torch.unsqueeze(B[1, :].view(9, 9), dim=0), dim=0)
self.convy = nn.Conv2d(1, 1, 9, stride=1, padding=4, bias=False)
self.convy.weight = nn.Parameter(convy_weight)
self.convx = nn.Conv2d(1, 1, 9, stride=1, padding=4, bias=False)
self.convx.weight = nn.Parameter(convx_weight)
def img_grad(self, img):
img_grad = F.conv2d(img, self.edge_k, padding=1)
img_dx = img_grad[:, 0, :, :].contiguous().view_as(img) # h direction
img_dy = img_grad[:, 1, :, :].contiguous().view_as(img) # w direction
return img_dx, img_dy
def init_loss(self, pred_init_cost: torch.Tensor, d_gt: torch.Tensor, maxdisp, beta=1):
"""
Initialization loss, HITNet paper eqt(10
:param pred_init_cost:
:param d_gt:
:param beta:
:return: init loss [B*1*H*W]
"""
cost_gt = subpix_cost(pred_init_cost, d_gt, maxdisp)
cost_nm = torch.gather(pred_init_cost, 1, get_non_match_disp(pred_init_cost, d_gt))
loss = cost_gt + F.relu(beta - cost_nm)
return loss
def prop_loss(self, d_diff, A=1, alpha=1, c=0.1):
"""
Loss from HITNet eqt(12
:param d_diff: |d^gt - d^|
:param A: The truncation value
:param alpha: shape param
:param c > 0: scale param
:return: torch.Tensor: L^prop [B*1*H*W]
"""
loss = echo_loss(torch.clamp(d_diff, max=A), alpha, c)
# pdb.set_trace()
return loss
def slant_loss(self, dx, dy, dx_gt, dy_gt, d_diff, mask, B=1):
closer_mask = d_diff < B
mask = mask * closer_mask # mask and
slant_diff = torch.cat([dx_gt - dx, dy_gt - dy], dim=1)
loss = torch.norm(slant_diff, p=1, dim=1, keepdim=True)[mask]
return loss # 1-dim vector
def w_loss(self, conf, diff, mask, C1=1, C2=1.5):
"""
:param conf: aka omega
:param diff: |d^gt - d^|
:param C1:
:param C2:
:return: torch.Tensor: loss
"""
closer_mask = diff < C1
further_mask = diff > C2
mask = mask * (closer_mask + further_mask) # mask and
closer_item = F.relu(1 - conf)
further_item = F.relu(conf)
# pdb.set_trace()
loss = closer_item * closer_mask.float() + further_item * further_mask.float()
return loss[mask] # 1-dim vector
def forward(
self, init_cv_cost_pyramid, prop_disp_pyramid, dx_pyramid, dy_pyramid, w_pyramid, d_gt, seg_gt=None
):
"""
:param init_cv_cost_pyramid:
:param prop_disp_pyramid:
:param slant_pyramid:
:param w_pyramid:
:param d_gt:
:param maxdisp:
:param loss_init:
:param loss_prop:
:param loss_slant:
:param loss_w:
:param lambda_init:
:param lambda_prop:
:param lambda_slant:
:param lambda_w:
:return:
"""
# dx_gt, dy_gt = self.img_grad(d_gt)
if seg_gt is not None:
d_gt[seg_gt == 0] = 0
dx_gt = self.convx(d_gt)
dy_gt = self.convy(d_gt)
d_gt_pyramid = []
for i in range(len(init_cv_cost_pyramid)):
scale = 4 * (2 ** i) # 4,8,16,32,64
d_gt_pyramid.append(torch.nn.MaxPool2d(scale, scale)(d_gt) / (scale / 4))
d_gt_pyramid.reverse() # disp ground truth generation. From small to large.
init_loss_pyramid = []
for i, cv in enumerate(init_cv_cost_pyramid):
# pdb.set_trace()
mask = (d_gt_pyramid[i] > 0) & (
d_gt_pyramid[i] < self.maxdisp / (2 ** (len(init_cv_cost_pyramid) - 1 - i))
)
init_loss_pyramid.append(
self.lambda_init
* self.init_loss(
cv, d_gt_pyramid[i], self.maxdisp / (2 ** (len(init_cv_cost_pyramid) - 1 - i))
)[mask]
)
# pdb.set_trace()
init_loss_vec = torch.cat(init_loss_pyramid, dim=0) # 1-dim vector
# pdb.set_trace()
prop_loss_pyramid = [] # masked
prop_diff_pyramid = [] # not masked
mask = (d_gt > 0) & (d_gt < self.maxdisp)
prop_loss_weights = [
1 / 64,
1 / 32,
1 / 32,
1 / 16,
1 / 16,
1 / 8,
1 / 8,
1 / 4,
1 / 4,
1 / 4,
1 / 2,
1,
]
A = [1] * 9 + [10000] * 3
for i, disp in enumerate(prop_disp_pyramid):
prop_diff_pyramid.append(torch.abs(d_gt - disp))
prop_loss_pyramid.append(
self.lambda_prop
* prop_loss_weights[i]
* self.prop_loss(prop_diff_pyramid[-1], A=A[i], alpha=self.alpha, c=self.c)[mask]
)
# pdb.set_trace()
prop_loss_vec = torch.cat(prop_loss_pyramid, dim=0)
# pdb.set_trace()
slant_loss_pyramid = []
slant_loss_weights = [
1 / 64,
1 / 32,
1 / 32,
1 / 16,
1 / 16,
1 / 8,
1 / 8,
1 / 4,
1 / 4,
1 / 4,
1 / 2,
1,
]
for i in range(len(dx_pyramid)):
slant_loss_pyramid.append(
self.lambda_slant
* slant_loss_weights[i]
* self.slant_loss(
dx_pyramid[i], dy_pyramid[i], dx_gt, dy_gt, prop_diff_pyramid[i], mask
)
)
slant_loss_vec = torch.cat(slant_loss_pyramid, dim=0)
# pdb.set_trace()
w_loss_pyramid = []
w_loss_weights = [1 / 32, 1 / 32, 1 / 16, 1 / 16, 1 / 8, 1 / 8, 1 / 4, 1 / 4]
for i, w in enumerate(w_pyramid):
w_loss_pyramid.append(
self.lambda_w
* w_loss_weights[i]
* self.w_loss(
w, prop_diff_pyramid[i + 1], mask
) # index for prop_diff_pyramid plus 1 since there is no confidence at 1st level
)
w_loss_vec = torch.cat(w_loss_pyramid, dim=0)
# pdb.set_trace()
total_loss_vec = torch.cat(
[init_loss_vec, prop_loss_vec, slant_loss_vec, w_loss_vec], dim=0
)
# pdb.set_trace()
losses = {
"init_loss": torch.mean(init_loss_vec),
"prop_loss": torch.mean(prop_loss_vec),
"slant_loss": torch.mean(slant_loss_vec),
"w_loss": torch.mean(w_loss_vec),
}
return torch.mean(total_loss_vec), losses
@LOSSES.register_module()
class HITLossWithDepth(HITLoss):
"""
https://arxiv.org/pdf/2007.12140.pdf
"""
def __init__(
self, lambda_depth=1, lambda_depth_grad=1, lambda_depth_normal=1, eps=1e-8, **kwargs
):
super(HITLossWithDepth, self).__init__(**kwargs)
self.lambda_depth = lambda_depth
self.lambda_depth_grad = lambda_depth_grad
self.lambda_depth_normal = lambda_depth_normal
self.calib = 1
self.eps = eps
self.cos = nn.CosineSimilarity(dim=1, eps=self.eps)
def edgenorm_loss(self, depth, target, mask):
depth = depth * mask.float()
target = target * mask.float()
target_grad_dx, target_grad_dy = self.img_grad(target)
depth_grad_dx, depth_grad_dy = self.img_grad(depth)
ones = torch.ones_like(depth)[mask]
depth_normal = torch.stack((-depth_grad_dx[mask], -depth_grad_dy[mask], ones), dim=1)
target_normal = torch.stack((-target_grad_dx[mask], -target_grad_dy[mask], ones), dim=1)
loss_dx = self.comp_err(depth_grad_dx[mask], target_grad_dx[mask]).mean()
loss_dy = self.comp_err(depth_grad_dy[mask], target_grad_dy[mask]).mean()
loss_normal = torch.abs(1 - self.cos(target_normal, depth_normal)).mean()
return loss_dx + loss_dy, loss_normal
def comp_err(self, depth, target):
return torch.log(torch.abs(depth - target).add(1.0))
def forward(
self, init_cv_cost_pyramid, prop_disp_pyramid, dx_pyramid, dy_pyramid, w_pyramid, d_gt
):
hitnet_loss, loss_dict = super(HITLossWithDepth, self).forward(
init_cv_cost_pyramid, prop_disp_pyramid, dx_pyramid, dy_pyramid, w_pyramid, d_gt
)
loss_dict['hitnet_loss'] = hitnet_loss
pred_disp = prop_disp_pyramid[-1]
pred_depth = self.calib / (pred_disp + self.eps)
target_depth = self.calib / (d_gt + self.eps)
mask = (d_gt > 0) & (d_gt < self.maxdisp)
depth_loss = self.lambda_depth * self.comp_err(pred_depth[mask], target_depth[mask]).mean()
loss_dict["depth_loss"] = depth_loss
total_loss = hitnet_loss + depth_loss
if (self.lambda_depth_grad > 0) or (self.lambda_depth_normal > 0):
grad_loss, normal_loss = self.edgenorm_loss(pred_depth, target_depth, mask)
depth_grad_loss = self.lambda_depth_grad * grad_loss
depth_normal_loss = self.lambda_depth_normal * normal_loss
total_loss = total_loss + depth_grad_loss + depth_normal_loss
loss_dict.update({"depth_grad_loss": depth_grad_loss, "depth_normal_loss": depth_normal_loss})
return total_loss, loss_dict
| CODD-main | model/losses/hitnet.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .hitnet import *
from .temporal import *
| CODD-main | model/losses/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .hitnet import HITNetMF
| CODD-main | model/stereo/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
from mmseg.models.builder import BACKBONES
def conv_down(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(oup, oup, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def conv_up(inp, oup):
return nn.Sequential(
nn.ConvTranspose2d(inp, oup, 2, stride=2, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def conv_merge(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(oup, oup, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(oup, oup, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def conv(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
@BACKBONES.register_module()
class HITUNet(nn.Module):
def __init__(self):
super().__init__()
# 16,16,24,24,32
self.conv1 = conv(3, 16)
self.down1 = conv_down(16, 16)
self.down2 = conv_down(16, 24)
self.down3 = conv_down(24, 24)
self.down4 = nn.Sequential(
conv_down(24, 32),
nn.Conv2d(32, 32, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(32, 32, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.up4 = conv_up(32, 24)
self.up3 = conv_up(24, 24)
self.up2 = conv_up(24, 16)
self.up1 = conv_up(16, 16)
self.merge4 = conv_merge(24 + 24, 24)
self.merge3 = conv_merge(24 + 24, 24)
self.merge2 = conv_merge(16 + 16, 16)
self.merge1 = conv_merge(16 + 16, 16)
def forward(self, x):
x_down = self.conv1(x) # 16*320*960
x_down1 = self.down1(x_down) # 16*160*480
x_down2 = self.down2(x_down1) # 24*96*320
x_down3 = self.down3(x_down2) # 24*48*160
x_down4 = self.down4(x_down3) # 32*24*80
x_up4 = self.up4(x_down4)
x_up4 = self.merge4(torch.cat((x_down3, x_up4), dim=1)) # 24*48*160
x_up3 = self.up3(x_up4)
x_up3 = self.merge3(torch.cat((x_down2, x_up3), dim=1)) # 24*96*320
x_up2 = self.up2(x_up3)
x_up2 = self.merge2(torch.cat((x_down1, x_up2), dim=1)) # 16*192*640
x_up1 = self.up1(x_up2)
x_up1 = self.merge1(torch.cat((x_down, x_up1), dim=1)) # 16*384*1280
return [x_down4, x_up4, x_up3, x_up2, x_up1]
| CODD-main | model/stereo/hitnet/backbone.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models import builder as builder_oss
from mmseg.models.builder import MODELS
from utils import thres_metric
from ...builder import ESTIMATORS
@ESTIMATORS.register_module()
class HITNetMF(nn.Module):
"""Implementation of HITNet
<https://arxiv.org/abs/1904.07850>.
"""
def __init__(
self,
backbone,
initialization,
propagation,
loss=None,
):
super(HITNetMF, self).__init__()
self.backbone = builder_oss.build_backbone(backbone)
self.tile_init = MODELS.build(initialization)
self.tile_update = MODELS.build(propagation)
self.freezed = False
if loss is not None:
self.loss = builder_oss.build_loss(loss)
else:
self.loss = None
n_parameters = sum(p.numel() for n, p in self.named_parameters())
print(
"PARAM STATUS: total number of parameters %.3fM in stereo network"
% (n_parameters / 1000 ** 2)
)
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
return x
def losses(self, loss, outputs, gt_disp, mask_disp, idx, gt_semantic_seg, meta):
init_cv_pyramid = outputs["init_cv_pyramid"]
prop_disp_pyramid = outputs["prop_disp_pyramid"]
dx_pyramid = outputs["dx_pyramid"]
dy_pyramid = outputs["dy_pyramid"]
w_pyramid = outputs["w_pyramid"]
loss["loss_disp{}".format(idx)], loss_dict = self.loss(
init_cv_pyramid,
prop_disp_pyramid,
dx_pyramid,
dy_pyramid,
w_pyramid,
gt_disp,
gt_semantic_seg,
)
for k, v in loss_dict.items():
loss[k + "{}".format(idx)] = v
with torch.no_grad():
pred_disp = outputs["pred_disp"]
loss["epe{}".format(idx)] = F.l1_loss(
gt_disp[mask_disp], pred_disp[mask_disp], reduction="mean"
)
loss["thres3"] = thres_metric(pred_disp, gt_disp, mask_disp, 3.0)
def stereo_matching(self, left_img, right_img, img_metas=None, state=None):
left_fea_pyramid = self.extract_feat(left_img)
right_fea_pyramid = self.extract_feat(right_img)
init_cv_pyramid, init_tile_pyramid = self.tile_init(
left_fea_pyramid, right_fea_pyramid
)
outputs = self.tile_update(
left_fea_pyramid, right_fea_pyramid, init_tile_pyramid
)
if self.training and not self.freezed:
outputs["init_cv_pyramid"] = init_cv_pyramid
outputs["pred_disp"] = outputs["prop_disp_pyramid"][-1]
outputs["left_feat"] = left_fea_pyramid[2]
outputs["right_feat"] = right_fea_pyramid[2]
else:
outputs = dict(
pred_disp=outputs,
left_feat=left_fea_pyramid[2],
right_feat=right_fea_pyramid[2],
)
outputs["left_img"] = left_img
if len(outputs["pred_disp"].shape) == 3:
outputs["pred_disp"] = outputs["pred_disp"].unsqueeze(1)
return outputs
def freeze(self):
self.tile_update.eval()
for param in self.tile_update.parameters():
param.requires_grad = False
self.tile_init.eval()
for param in self.tile_init.parameters():
param.requires_grad = False
feature_extractor = (
self.backbone if self.backbone is not None else self.feature_extractor
)
feature_extractor.eval()
for param in feature_extractor.parameters():
param.requires_grad = False
self.loss.eval()
for param in self.loss.parameters():
param.requires_grad = False
self.freezed = True
| CODD-main | model/stereo/hitnet/hitnet.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .backbone import HITUNet
from .initialization import TileInitialization
from .propagation import TilePropagation
from .hitnet import HITNetMF
| CODD-main | model/stereo/hitnet/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models.builder import MODELS
def make_grid(h, w, device):
gridh = torch.arange(h, device=device).float()
gridw = torch.arange(w, device=device).float()
gridh, gridw = torch.meshgrid(gridh, gridw)
grid = torch.stack((gridw, gridh), dim=-1)
return grid
def calc_init_disp(feature_left, feature_right, max_disp):
# n,c,h,w = feature_left.size()
grid = make_grid(feature_left.size(2), feature_right.size(3), feature_right.device)
a = torch.zeros((grid.shape[0], grid.shape[1], 1), device=grid.device)
grid = torch.cat((grid, a), dim=-1)
grid = grid.unsqueeze(0).unsqueeze(0).float()
# grid[:, :, :, :, 0] = grid[:,:,:,:,0] -1
grid[:, :, :, :, 0] = grid[:, :, :, :, 0] / (feature_right.size(3) - 1) * 2 - 1
grid[:, :, :, :, 1] = grid[:, :, :, :, 1] / (feature_right.size(2) - 1) * 2 - 1
grid = grid[:, ::4, :, ::4, :]
grid = grid.repeat(feature_right.size(0), max_disp, 1, 1, 1)
max_disp = torch.arange(max_disp, dtype=grid.dtype, device=grid.device)
max_disp = max_disp / (feature_right.size(3) - 1) * 2
max_disp = max_disp.view(1, -1, 1, 1)
grid[:, :, :, :, 0] = grid[:, :, :, :, 0] - max_disp
feature_right = feature_right.unsqueeze(2)
# size = n,c,d,h,w
feature_right = F.grid_sample(
feature_right, grid, mode="nearest", align_corners=True, padding_mode="zeros"
)
cv = feature_left.unsqueeze(2) - feature_right
cv = torch.norm(cv, p=1, dim=1)
return cv
@MODELS.register_module()
class TileInitialization(nn.Module):
"""
Tile hypothesis initialization
input: dual feature pyramid
output: initial tile hypothesis pyramid
"""
def __init__(self, max_disp, fea_c=[16, 16, 24, 24, 32]):
super().__init__()
self.maxdisp = max_disp
fea_c1x, fea_c2x, fea_c4x, fea_c8x, fea_c16x = fea_c
self.pad = nn.ZeroPad2d((0, 3, 0, 0))
self.tile_conv1x = nn.Sequential(
nn.Conv2d(fea_c1x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.tile_conv2x = nn.Sequential(
nn.Conv2d(fea_c2x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.tile_conv4x = nn.Sequential(
nn.Conv2d(fea_c4x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.tile_conv8x = nn.Sequential(
nn.Conv2d(fea_c8x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.tile_conv16x = nn.Sequential(
nn.Conv2d(fea_c16x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
# D in Eq. (4)
self.tile_fea_dscrpt16x = nn.Sequential(
nn.Conv2d(17, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.tile_fea_dscrpt8x = nn.Sequential(
nn.Conv2d(17, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.tile_fea_dscrpt4x = nn.Sequential(
nn.Conv2d(33, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.tile_fea_dscrpt2x = nn.Sequential(
nn.Conv2d(25, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.tile_fea_dscrpt1x = nn.Sequential(
nn.Conv2d(25, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
def tile_features(self, fea_l, fea_r):
tile_fea_l1x = self.tile_conv1x(fea_l[-1])
padded_fea_r1x = self.pad(fea_r[-1])
self.tile_conv1x[0].stride = (4, 1)
tile_fea_r1x = self.tile_conv1x(padded_fea_r1x)
self.tile_conv1x[0].stride = (4, 4)
tile_fea_l2x = self.tile_conv2x(fea_l[-2])
padded_fea_r2x = self.pad(fea_r[-2])
self.tile_conv2x[0].stride = (4, 1)
tile_fea_r2x = self.tile_conv2x(padded_fea_r2x)
self.tile_conv2x[0].stride = (4, 4)
tile_fea_l4x = self.tile_conv4x(fea_l[-3])
padded_fea_r4x = self.pad(fea_r[-3])
self.tile_conv4x[0].stride = (4, 1)
tile_fea_r4x = self.tile_conv4x(padded_fea_r4x)
self.tile_conv4x[0].stride = (4, 4)
tile_fea_l8x = self.tile_conv8x(fea_l[-4])
padded_fea_r8x = self.pad(fea_r[-4])
self.tile_conv8x[0].stride = (4, 1)
tile_fea_r8x = self.tile_conv8x(padded_fea_r8x)
self.tile_conv8x[0].stride = (4, 4)
tile_fea_l16x = self.tile_conv16x(fea_l[-5])
padded_fea_r16x = self.pad(fea_r[-5])
self.tile_conv16x[0].stride = (4, 1)
tile_fea_r16x = self.tile_conv16x(padded_fea_r16x)
self.tile_conv16x[0].stride = (4, 4)
return [
[tile_fea_l16x, tile_fea_r16x],
[tile_fea_l8x, tile_fea_r8x],
[tile_fea_l4x, tile_fea_r4x],
[tile_fea_l2x, tile_fea_r2x],
[tile_fea_l1x, tile_fea_r1x],
]
def tile_hypothesis_pyramid(self, tile_feature_pyramid, fea_l_pyramid):
# Eq. (2)
init_tile_cost16x = calc_init_disp(tile_feature_pyramid[0][0], tile_feature_pyramid[0][1], self.maxdisp // 16)
init_tile_cost8x = calc_init_disp(tile_feature_pyramid[1][0], tile_feature_pyramid[1][1], self.maxdisp // 8)
init_tile_cost4x = calc_init_disp(tile_feature_pyramid[2][0], tile_feature_pyramid[2][1], self.maxdisp // 4)
init_tile_cost2x = calc_init_disp(tile_feature_pyramid[3][0], tile_feature_pyramid[3][1], self.maxdisp // 2)
init_tile_cost1x = calc_init_disp(tile_feature_pyramid[4][0], tile_feature_pyramid[4][1], self.maxdisp)
# Eq. (3)
min_tile_cost16x, min_tile_disp16x = torch.min(init_tile_cost16x, 1)
min_tile_cost8x, min_tile_disp8x = torch.min(init_tile_cost8x, 1)
min_tile_cost4x, min_tile_disp4x = torch.min(init_tile_cost4x, 1)
min_tile_cost2x, min_tile_disp2x = torch.min(init_tile_cost2x, 1)
min_tile_cost1x, min_tile_disp1x = torch.min(init_tile_cost1x, 1)
min_tile_cost16x = torch.unsqueeze(min_tile_cost16x, 1)
min_tile_cost8x = torch.unsqueeze(min_tile_cost8x, 1)
min_tile_cost4x = torch.unsqueeze(min_tile_cost4x, 1)
min_tile_cost2x = torch.unsqueeze(min_tile_cost2x, 1)
min_tile_cost1x = torch.unsqueeze(min_tile_cost1x, 1)
min_tile_disp16x = min_tile_disp16x.float().unsqueeze(1)
min_tile_disp8x = min_tile_disp8x.float().unsqueeze(1)
min_tile_disp4x = min_tile_disp4x.float().unsqueeze(1)
min_tile_disp2x = min_tile_disp2x.float().unsqueeze(1)
min_tile_disp1x = min_tile_disp1x.float().unsqueeze(1)
# Eq. (4)
tile_dscrpt16x = self.tile_fea_dscrpt16x(torch.cat([min_tile_cost16x, tile_feature_pyramid[0][0]], 1))
tile_dscrpt8x = self.tile_fea_dscrpt8x(torch.cat([min_tile_cost8x, tile_feature_pyramid[1][0]], 1))
tile_dscrpt4x = self.tile_fea_dscrpt4x(torch.cat([min_tile_cost4x, fea_l_pyramid[0]], 1))
tile_dscrpt2x = self.tile_fea_dscrpt2x(torch.cat([min_tile_cost2x, fea_l_pyramid[1]], 1))
tile_dscrpt1x = self.tile_fea_dscrpt1x(torch.cat([min_tile_cost1x, fea_l_pyramid[2]], 1))
tile_dx16x = torch.zeros_like(min_tile_disp16x)
tile_dx8x = torch.zeros_like(min_tile_disp8x)
tile_dx4x = torch.zeros_like(min_tile_disp4x)
tile_dx2x = torch.zeros_like(min_tile_disp2x)
tile_dx1x = torch.zeros_like(min_tile_disp1x)
tile_dy16x = torch.zeros_like(min_tile_disp16x)
tile_dy8x = torch.zeros_like(min_tile_disp8x)
tile_dy4x = torch.zeros_like(min_tile_disp4x)
tile_dy2x = torch.zeros_like(min_tile_disp2x)
tile_dy1x = torch.zeros_like(min_tile_disp1x)
tile_hyp16x = torch.cat([min_tile_disp16x, tile_dx16x, tile_dy16x, tile_dscrpt16x], 1)
tile_hyp8x = torch.cat([min_tile_disp8x, tile_dx8x, tile_dy8x, tile_dscrpt8x], 1)
tile_hyp4x = torch.cat([min_tile_disp4x, tile_dx4x, tile_dy4x, tile_dscrpt4x], 1)
tile_hyp2x = torch.cat([min_tile_disp2x, tile_dx2x, tile_dy2x, tile_dscrpt2x], 1)
tile_hyp1x = torch.cat([min_tile_disp1x, tile_dx1x, tile_dy1x, tile_dscrpt1x], 1)
return [
[
init_tile_cost16x,
init_tile_cost8x,
init_tile_cost4x,
init_tile_cost2x,
init_tile_cost1x,
],
[
tile_hyp16x,
tile_hyp8x,
tile_hyp4x,
tile_hyp2x,
tile_hyp1x,
]
]
def forward(self, fea_l_pyramid, fea_r_pyramid):
tile_feature_duo_pyramid = self.tile_features(fea_l_pyramid, fea_r_pyramid)
init_cv_pyramid, init_hypo_pyramid = self.tile_hypothesis_pyramid(tile_feature_duo_pyramid, fea_l_pyramid)
return [init_cv_pyramid, init_hypo_pyramid]
| CODD-main | model/stereo/hitnet/initialization.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models.builder import MODELS
def to_plane(d, dx, dy, size=4):
c = torch.linspace(-(size - 1) / 2, (size - 1) / 2, size, device=d.device)
a = c.view([1, 1, size])
a = torch.unsqueeze(a.repeat(1, d.shape[2] * size, d.shape[3]), dim=1)
b = c.view([1, size, 1])
b = torch.unsqueeze(b.repeat(1, d.shape[2], d.shape[3] * size), dim=1)
d_4 = F.interpolate(d, scale_factor=size, mode="nearest")
dx_4 = F.interpolate(dx, scale_factor=size, mode="nearest")
dy_4 = F.interpolate(dy, scale_factor=size, mode="nearest")
d_plane = d_4 + a * dx_4 + b * dy_4
return d_plane
def upsample(h, scale=2, size=2):
d = h[:, 0:1, :, :]
dx = h[:, 1:2, :, :]
dy = h[:, 2:3, :, :]
d = to_plane(d, dx, dy, size=size) * scale
h_up = torch.cat((d, F.interpolate(h[:, 1:, :, :], scale_factor=size, mode="nearest")), dim=1)
return h_up
def warp(x, disp):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W, device=x.device).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H, device=x.device).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
vgrid = torch.cat((xx, yy), 1).float()
# vgrid = Variable(grid)
vgrid[:, :1, :, :] = vgrid[:, :1, :, :] - disp
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = F.grid_sample(x, vgrid, align_corners=True)
return output
class TileWarping(nn.Module):
def __init__(self):
super(TileWarping, self).__init__()
self.unshuffle = torch.nn.PixelUnshuffle(4)
def forward(self, tile_plane: torch.Tensor, fea_l: torch.Tensor, fea_r: torch.Tensor):
"""
local cost volume
:param tile_plane: d, dx, dy
:param fea_l:
:param fea_r:
:return: local cost volume
"""
tile_d = tile_plane[:, :1, :, :]
tile_dx = tile_plane[:, 1:2, :, :]
tile_dy = tile_plane[:, 2:3, :, :]
local_cv = []
for k in range(-1, 2):
local_d = to_plane(tile_d + k, tile_dx, tile_dy, size=4) # Eq. (5)
warped_fea_r = warp(fea_r, local_d)
cv = torch.norm(fea_l - warped_fea_r, 1, 1, keepdim=True)
cv = self.unshuffle(cv)
local_cv.append(cv)
# local cost volume for all the disp hypothesis[B, 48, H/4, W/4]
local_cv = torch.cat(local_cv, 1)
return local_cv
def convbn(in_channel, out_channel, kernel_size, stride, pad, dilation):
# no bn
return nn.Sequential(
nn.Conv2d(
in_channel,
out_channel,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation > 1 else pad,
dilation=dilation,
)
)
class BasicBlock(nn.Module):
"""ResNet BasicBlock"""
expansion = 1
def __init__(self, c1, c2, s, downsample, p, d):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
convbn(c1, c2, 3, s, p, d),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.conv2 = convbn(c2, c2, 3, 1, p, d)
self.stride = s
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out += x
return out
class TileUpdate0(nn.Module):
"""
Tile Update for a single resolution
forward input: fea duo from current resolution, tile hypothesis from current resolution
forward output: refined tile hypothesis and confidence (if available)
"""
def __init__(self, in_c, out_c, hid_c):
super(TileUpdate0, self).__init__()
self.tile_warping = TileWarping()
self.decrease = nn.Sequential(
nn.Conv2d(64, 16, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.conv0 = nn.Sequential(
nn.Conv2d(in_c, hid_c, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.resblock0 = nn.Sequential(
BasicBlock(32, 32, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.resblock1 = nn.Sequential(
BasicBlock(32, 32, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.lastconv = nn.Conv2d(hid_c, out_c, 3, 1, 1)
self.unshuffle = torch.nn.PixelUnshuffle(4)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, fea_r, current_hypothesis):
fea = self.unshuffle(torch.norm(fea_l, 1, 1, keepdim=True))
# local cost volume in Eq. (7)
current_tile_local_cv = self.tile_warping(current_hypothesis[:, :3, :, :], fea_l, fea_r)
current_tile_local_cv = self.decrease(torch.cat([fea, current_tile_local_cv], 1))
# a_l in Eq. (7)
aug_current_tile_hypothesis = torch.cat([current_hypothesis, current_tile_local_cv], 1)
# U_l
tile_hypothesis_update = self.conv0(aug_current_tile_hypothesis)
tile_hypothesis_update = self.resblock0(tile_hypothesis_update)
tile_hypothesis_update = self.resblock1(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
refined_hypothesis = current_hypothesis + tile_hypothesis_update
refined_hypothesis[:, :1, :, :] = F.relu(refined_hypothesis[:, :1, :, :].clone())
return [refined_hypothesis]
class TileUpdate(nn.Module):
"""
Tile Update for a single resolution
forward input: fea duo from current resolution, tile hypothesis from current and previous resolution
forward output: refined tile hypothesis and confidence (if available)
"""
def __init__(self):
super(TileUpdate, self).__init__()
self.tile_warping = TileWarping()
self.decrease = nn.Sequential(
nn.Conv2d(64, 16, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.conv0 = nn.Sequential(
nn.Conv2d(64, 32, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.resblock0 = nn.Sequential(
BasicBlock(32, 32, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.resblock1 = nn.Sequential(
BasicBlock(32, 32, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.lastconv = nn.Conv2d(32, 34, 3, 1, 1)
self.unshuffle = torch.nn.PixelUnshuffle(4)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, fea_r, current_hypothesis, prev_hypothesis):
fea = self.unshuffle(torch.norm(fea_l, 1, 1, keepdim=True))
current_tile_local_cv = self.tile_warping(current_hypothesis[:, :3, :, :], fea_l, fea_r)
current_tile_local_cv = self.decrease(torch.cat([fea, current_tile_local_cv], 1))
up_prev_hypothesis = upsample(prev_hypothesis, 2)
up_prev_tile_local_cv = self.tile_warping(up_prev_hypothesis[:, :3, :, :], fea_l, fea_r)
up_prev_tile_local_cv = self.decrease(torch.cat([fea, up_prev_tile_local_cv], 1))
aug_hypothesis_set = torch.cat(
(current_hypothesis, current_tile_local_cv, up_prev_hypothesis, up_prev_tile_local_cv),
1,
)
tile_hypothesis_update = self.conv0(aug_hypothesis_set)
tile_hypothesis_update = self.resblock0(tile_hypothesis_update)
tile_hypothesis_update = self.resblock1(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
conf = tile_hypothesis_update[:, :2, :, :] # [:, 0, :, :] is for pre
prev_delta_hypothesis = tile_hypothesis_update[:, 2:18, :, :]
current_delta_hypothesis = tile_hypothesis_update[:, 18:34, :, :]
_, hypothesis_select_mask = torch.max(conf, dim=1, keepdim=True)
hypothesis_select_mask = hypothesis_select_mask.float()
update_current_hypothesis = current_hypothesis + current_delta_hypothesis
update_current_hypothesis[:, :1, :, :] = F.relu(
update_current_hypothesis[:, :1, :, :].clone()) # Force disp to be positive
update_prev_hypothesis = up_prev_hypothesis + prev_delta_hypothesis
update_prev_hypothesis[:, :1, :, :] = F.relu(
update_prev_hypothesis[:, :1, :, :].clone()) # Force disp to be positive
refined_hypothesis = hypothesis_select_mask * update_current_hypothesis + (
1 - hypothesis_select_mask) * update_prev_hypothesis
update_current_hypothesis_and_conf = torch.cat((update_current_hypothesis, conf[:, 1:2, :, :]), 1)
update_prev_hypothesis_and_conf = torch.cat((update_prev_hypothesis, conf[:, :1, :, :]), 1)
return [
refined_hypothesis,
update_current_hypothesis_and_conf, update_prev_hypothesis_and_conf,
]
class PostTileUpdate(nn.Module):
"""
Post Tile Update for a single resolution: decrease tile size, e.g. upsampling tile hypothesis, and do tile warping
forward input: fea duo from the largest resolution, tile hypothesis from previous resolution
forward output: refined tile hypothesis
"""
def __init__(self, in_c, out_c, hid_c, resblk_num):
super(PostTileUpdate, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c, hid_c, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(hid_c, hid_c, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
resblks = nn.ModuleList()
for i in range(resblk_num):
dilation = 3 if i == 1 else 1
resblks.append(
nn.Sequential(
BasicBlock(hid_c, hid_c, s=1, p=1, downsample=None, d=dilation),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
)
self.resblocks = nn.Sequential(*resblks)
self.lastconv = nn.Conv2d(hid_c, out_c, kernel_size=3, padding=1)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, prev_hypothesis):
guided_prev_tile_hypothesis = torch.cat([fea_l, prev_hypothesis], 1)
tile_hypothesis_update = self.conv1(guided_prev_tile_hypothesis)
tile_hypothesis_update = self.resblocks(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
refined_hypothesis = prev_hypothesis + tile_hypothesis_update
refined_hypothesis[:, :1, :, :] = F.relu(refined_hypothesis[:, :1, :, :].clone()) # Force disp to be positive
return refined_hypothesis
class FinalTileUpdate(nn.Module):
"""
Final Tile Update: only predicts disp
forward input: fea duo from the largest resolution, tile hypothesis from previous resolution
forward output: refined tile hypothesis
"""
def __init__(self, in_c, out_c, hid_c, resblk_num):
super(FinalTileUpdate, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c, hid_c, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(hid_c, hid_c, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
resblks = nn.ModuleList()
for _ in range(resblk_num):
resblks.append(
nn.Sequential(
BasicBlock(hid_c, hid_c, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
)
self.resblocks = nn.Sequential(*resblks)
self.lastconv = nn.Conv2d(hid_c, out_c, kernel_size=3, padding=1)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, prev_hypothesis):
guided_prev_tile_hypothesis = torch.cat([fea_l, prev_hypothesis], 1)
tile_hypothesis_update = self.conv1(guided_prev_tile_hypothesis)
tile_hypothesis_update = self.resblocks(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
refined_hypothesis = prev_hypothesis[:, 0, :, :].unsqueeze(1) + tile_hypothesis_update
refined_hypothesis = F.relu(refined_hypothesis.clone()) # Force disp to be positive
return refined_hypothesis
@MODELS.register_module()
class TilePropagation(nn.Module):
"""
Tile hypothesis initialization
input: dual feature pyramid
output: initial tile hypothesis pyramid
"""
def __init__(self):
super(TilePropagation, self).__init__()
self.tile_update0 = TileUpdate0(32, 16, 32) # 1/16 tile refine
self.tile_update1 = TileUpdate() # 1/8 tile refine
self.tile_update2 = TileUpdate() # 1/4 tile refine
self.tile_update3 = TileUpdate() # 1/2 tile refine
self.tile_update4 = TileUpdate() # 1/1 tile refine
self.tile_update4_1 = PostTileUpdate(40, 16, 32, 4) # 1/1 tile refine
self.tile_update5 = PostTileUpdate(32, 16, 32, 4) # 2/1 tile refine tile_size=2
self.tile_update6 = FinalTileUpdate(32, 3, 16, 2) # 2/1 tile refine tile_size=1
# For final disparity and each supervision signal to be positive
# self.relu = nn.ReLU(inplace=True)
def forward(self, left_fea_pyramid, right_fea_pyramid, init_tile_pyramid):
refined_tile16x = self.tile_update0(left_fea_pyramid[0], right_fea_pyramid[0], init_tile_pyramid[0])
tile_update8x = self.tile_update1(left_fea_pyramid[1], right_fea_pyramid[1], init_tile_pyramid[1],
refined_tile16x[0])
tile_update4x = self.tile_update2(left_fea_pyramid[2], right_fea_pyramid[2], init_tile_pyramid[2],
tile_update8x[0])
tile_update2x = self.tile_update3(left_fea_pyramid[3], right_fea_pyramid[3], init_tile_pyramid[3],
tile_update4x[0])
tile_update1x = self.tile_update4(left_fea_pyramid[4], right_fea_pyramid[4], init_tile_pyramid[4],
tile_update2x[0])
refined_tile1x = self.tile_update4_1(left_fea_pyramid[2], tile_update1x[0])
refined_tile05x = self.tile_update5(left_fea_pyramid[3], upsample(refined_tile1x, 1))
refined_tile025x = self.tile_update6(left_fea_pyramid[4], upsample(refined_tile05x, 1))
final_disp = refined_tile025x[:, 0:1, :, :]
if self.training:
# For training phase, we need to upsample disps using slant equation
# For training phase, we need to upsample dx and dy using nearest interpolation
up_plane_16x = upsample(refined_tile16x[0], 16, 64)
up_plane_8x_cur = upsample(tile_update8x[1], 8, 32)
up_plane_8x_pre = upsample(tile_update8x[2], 8, 32)
up_plane_4x_cur = upsample(tile_update4x[1], 4, 16)
up_plane_4x_pre = upsample(tile_update4x[2], 4, 16)
up_plane_2x_cur = upsample(tile_update2x[1], 2, 8)
up_plane_2x_pre = upsample(tile_update2x[2], 2, 8)
up_plane_1x_cur = upsample(tile_update1x[1], 1, 4)
up_plane_1x_pre = upsample(tile_update1x[2], 1, 4)
up_plane_1x = upsample(refined_tile1x, 1, 4)
up_plane_05x = upsample(refined_tile05x, 1, 2)
prop_disp_pyramid = [
up_plane_16x[:, :1, :, :],
up_plane_8x_cur[:, :1, :, :],
up_plane_8x_pre[:, :1, :, :],
up_plane_4x_cur[:, :1, :, :],
up_plane_4x_pre[:, :1, :, :],
up_plane_2x_cur[:, :1, :, :],
up_plane_2x_pre[:, :1, :, :],
up_plane_1x_cur[:, :1, :, :],
up_plane_1x_pre[:, :1, :, :],
up_plane_1x[:, :1, :, :],
up_plane_05x[:, :1, :, :],
refined_tile025x[:, :1, :, :]
]
# WARNING: EACH PYRAMID MUST ALIGN ACCORDING TO PRE-CUR ORDER AND RESOLUTION ORDER SINCE SUPERVISION WOULDN'T SEE THE ORDER
dx_pyramid = [
up_plane_16x[:, 1:2, :, :],
up_plane_8x_cur[:, 1:2, :, :],
up_plane_8x_pre[:, 1:2, :, :],
up_plane_4x_cur[:, 1:2, :, :],
up_plane_4x_pre[:, 1:2, :, :],
up_plane_2x_cur[:, 1:2, :, :],
up_plane_2x_pre[:, 1:2, :, :],
up_plane_1x_cur[:, 1:2, :, :],
up_plane_1x_pre[:, 1:2, :, :],
up_plane_1x[:, 1:2, :, :],
up_plane_05x[:, 1:2, :, :],
refined_tile025x[:, 1:2, :, :]
]
dy_pyramid = [
up_plane_16x[:, 2:3, :, :],
up_plane_8x_cur[:, 2:3, :, :],
up_plane_8x_pre[:, 2:3, :, :],
up_plane_4x_cur[:, 2:3, :, :],
up_plane_4x_pre[:, 2:3, :, :],
up_plane_2x_cur[:, 2:3, :, :],
up_plane_2x_pre[:, 2:3, :, :],
up_plane_1x_cur[:, 2:3, :, :],
up_plane_1x_pre[:, 2:3, :, :],
up_plane_1x[:, 2:3, :, :],
up_plane_05x[:, 2:3, :, :],
refined_tile025x[:, 2:3, :, :],
]
w_pyramid = [
up_plane_8x_cur[:, 3:4, :, :],
up_plane_8x_pre[:, 3:4, :, :],
up_plane_4x_cur[:, 3:4, :, :],
up_plane_4x_pre[:, 3:4, :, :],
up_plane_2x_cur[:, 3:4, :, :],
up_plane_2x_pre[:, 3:4, :, :],
up_plane_1x_cur[:, 3:4, :, :],
up_plane_1x_pre[:, 3:4, :, :],
]
outputs = {
"prop_disp_pyramid": prop_disp_pyramid,
"dx_pyramid": dx_pyramid,
"dy_pyramid": dy_pyramid,
"w_pyramid": w_pyramid,
}
return outputs
else:
return final_disp
| CODD-main | model/stereo/hitnet/propagation.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .motion import Motion
from .others import GTMotion
__all__ = ["Motion", "GTMotion"]
| CODD-main | model/motion/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models import builder as builder_oss
from mmseg.models.builder import MODELS
from pytorch3d.renderer import (
PerspectiveCameras,
PointsRasterizationSettings,
PointsRenderer,
PointsRasterizer,
AlphaCompositor,
)
# Data structures and functions for rendering
from pytorch3d.structures import Pointclouds
from utils import compute_valid_mask, compute_gt_disp_change
from .raft3d.projective_ops import inv_project
class PointsRendererWithDepth(PointsRenderer):
"""Augment PointsRenderer to output depth"""
def __init__(self, rasterizer, compositor) -> None:
super(PointsRendererWithDepth, self).__init__(rasterizer, compositor)
def forward(self, point_clouds, **kwargs) -> torch.Tensor:
fragments = self.rasterizer(point_clouds, **kwargs)
# Construct weights based on the distance of a point to the true point.
# However, this could be done differently: e.g. predicted as opposed
# to a function of the weights.
r = self.rasterizer.raster_settings.radius
dists2 = fragments.dists.permute(0, 3, 1, 2)
weights = 1 - dists2 / (r * r)
images = self.compositor(fragments.idx.long().permute(0, 3, 1, 2), weights,
point_clouds.features_packed().permute(1, 0), **kwargs)
zbuf = fragments.zbuf.permute(0, 3, 1, 2)
return images, F.relu(zbuf)
BF_DEFAULT = 1050 * 0.2 # baseline * focal length
@MODELS.register_module()
class Motion(nn.Module):
def __init__(self, raft3d=None, ds_scale=4, iters=16, loss=None):
"""motion network
Args:
raft3d (dict, optional): config for raft3d. Defaults to None.
ds_scale (int, optional): low res scale. Defaults to 4.
iters (int, optional): optimization iterations. Defaults to 16.
loss (dict, optional): config for losses. Defaults to None.
"""
super(Motion, self).__init__()
self.renderer = PointsRendererWithDepth(
rasterizer=PointsRasterizer(),
compositor=AlphaCompositor(),
)
self.ds_scale = ds_scale
self.iters = iters
# scene flow estimator
self.raft3d = MODELS.build(raft3d)
if loss is not None:
self.loss = builder_oss.build_loss(loss)
else:
self.loss = None
n_parameters = sum(p.numel() for n, p in self.named_parameters())
print(
"PARAM STATUS: total number of parameters %.3fM in motion network"
% (n_parameters / 1000 ** 2)
)
def transform_and_project(self, Ts, depth, feat, intrinsics, radius):
"""transform the previous state and project into the current frame
Args:
Ts (Tensor): NxHxW, transformation
depth (Tensor): NxHxW
feat (Tensor): NxCxHxW, feature
intrinsics (intrinsics): Nx4, fx, fy, cx, cy
radius (float): rendering radius, increase to aggregate more points
Returns:
Tensor, Tensor: aligned feature and depth
"""
# transform
X1 = inv_project(depth, intrinsics)
X2_est = Ts * X1
# create pc
B = intrinsics.shape[0]
verts = X2_est.reshape(B, -1, 3).contiguous()
C = feat.shape[1]
feat = feat.permute(0, 2, 3, 1).reshape(B, -1, C)
verts[..., 0] = verts[..., 0] * -1
verts[..., 1] = verts[..., 1] * -1
point_cloud = Pointclouds(points=verts, features=feat)
# project
h, w = depth.shape[-2:]
cameras = PerspectiveCameras(
device=depth.device,
principal_point=intrinsics[:, -2:].float(),
focal_length=intrinsics[:, :2].float(),
image_size=((h, w),),
in_ndc=False,
)
# The radius (in NDC units) of the disk to be rasterized.
raster_settings = PointsRasterizationSettings(image_size=(h, w), radius=1.0 / h * radius, points_per_pixel=8)
self.renderer.rasterizer.cameras = cameras
self.renderer.rasterizer.raster_settings = raster_settings
feat_warp, zbuf = self.renderer(
point_cloud,
gamma=(1e-4,),
bg_col=torch.tensor([0.0] * 9, dtype=torch.float32, device=depth.device),
eps=1e-5,
)
return feat_warp, zbuf[:, 0].unsqueeze(1)
def forward(self, state, outputs, img_metas, train_mode=False, **kwargs):
"""
Args:
state (dict): memory states
outputs (dict): outputs
img_metas (dict): dataset metas
train_mode (bool, optional): if True, return auxiliary outputs from raft3d. Defaults to False.
"""
img_curr = outputs["left_img"]
if "memory" not in state:
self.raft3d(
img_curr, None, None, None, state, outputs, train_mode=train_mode
)
return
else:
B = outputs["pred_disp"].shape[0]
intrinsics = img_metas[0]["intrinsics"]
intrinsics = (
torch.tensor(intrinsics).to(outputs["pred_disp"].device).unsqueeze(0).expand(B, -1)
)
depth_scale = BF_DEFAULT / intrinsics[0, 0]
img_prev, feat_prev, disp_prev = state["memory"]
disp_curr = outputs["pred_disp"]
depth_prev = (
depth_scale * intrinsics[0, 0] / (disp_prev + 1e-5)
) # convert to depth (up to scale)
depth_prev = torch.clip(depth_prev, max=BF_DEFAULT, min=0).squeeze(1) # avoid inf in depth
depth_curr = depth_scale * intrinsics[0, 0] / (disp_curr + 1e-5)
depth_curr = torch.clip(depth_curr, max=BF_DEFAULT, min=0).squeeze(1)
# Raft3D takes depth as NxHxW, not Nx1xHxW
self.raft3d(
img_curr,
depth_prev,
depth_curr,
intrinsics,
state,
outputs,
iters=self.iters,
train_mode=train_mode,
)
Ts = outputs["Ts"]
# full res depth warping
w = depth_curr.shape[-1]
flow2d_est = outputs["flow2d_est_induced"].permute(0, 3, 1, 2) # N3HW
confidence = outputs["weight"] # N3HW
to_proj = torch.cat([img_prev, flow2d_est, confidence], dim=1)
warped, depth_warp = self.transform_and_project(
Ts, depth_prev, to_proj, intrinsics, radius=2.0
)
img_warp, flow_warp, confidence_warp = (warped[:, :3], warped[:, 3:6], warped[:, 6:])
disp_warp = (
depth_scale * intrinsics[0, 0] / (depth_warp + 1e-5)
) # convert back to disp
disp_warp[disp_warp > w] = 0.0
# low res feature warping
Ts = Ts[:, self.ds_scale // 2 - 1:: self.ds_scale, self.ds_scale // 2 - 1:: self.ds_scale]
depth_prev = depth_prev[:, self.ds_scale // 2 - 1:: self.ds_scale, self.ds_scale // 2 - 1:: self.ds_scale]
intrinsics = (
intrinsics.float() / self.ds_scale
) # adjust focal length here so points can be projected, otherwise focal too long
feat_warp, _ = self.transform_and_project(Ts, depth_prev, feat_prev, intrinsics, radius=4.0)
if len(disp_warp.shape) == 3:
disp_warp = disp_warp.unsqueeze(1)
state["memory"] = [img_warp, feat_warp, confidence_warp, disp_warp, flow_warp]
return
def losses(self, loss, outputs, idx, state, meta):
gt_disp_prev = state["gt_disp"][-2]
gt_disp_curr = state["gt_disp"][-1]
gt_flow = state["gt_flow"][-2]
if len(state["gt_disp_change"]) != 0:
gt_disp_change = state["gt_disp_change"][-2]
elif len(state["gt_flow_occ"]) != 0: # no gt disp change
gt_flow_occ_prev = state["gt_flow_occ"][-2] # True for occluded
gt_disp_change, gt_disp_curr_warp = compute_gt_disp_change(gt_flow_occ_prev, gt_disp_prev,
gt_disp_curr, gt_flow)
elif len(state["gt_disp2"]) != 0:
gt_disp2 = state["gt_disp2"][-2] # this is in previous frame
gt_disp_change = gt_disp2 - gt_disp_prev
gt_disp_change[gt_disp2 <= 0.0] = BF_DEFAULT
gt_disp_change[gt_disp_prev <= 0.0] = BF_DEFAULT
else:
raise Exception("No disp change provided to train.")
gt_flow = state["gt_flow"][-2] # Nx2xHxW
flowz = (
gt_disp_change / BF_DEFAULT
) # estimation is inverse depth, supervising inverse depth needs to divide disparity by BF_DEFAULT
mask = state["mask_disp"][-2]
mask &= compute_valid_mask(
gt_disp_prev, meta, gt_flow_prev=gt_flow, gt_disp_change=gt_disp_change
)
flowxyz = torch.cat([gt_flow, flowz], dim=1).permute(0, 2, 3, 1) # NxHxWx3
flow2d_est = outputs["flow2d_est"]
flow2d_rev = outputs["flow2d_rev"]
self.loss(flow2d_est, flow2d_rev, flowxyz, mask, idx, loss)
def freeze(self):
self.eval()
self.loss.eval()
for param in self.parameters():
param.requires_grad = False
| CODD-main | model/motion/motion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
from lietorch import SE3
from mmseg.models.builder import MODELS
from utils import flow_warp
@MODELS.register_module()
class GTMotion(nn.Module):
def __init__(self):
super(GTMotion, self).__init__()
self.loss = None
def forward(self, state, outputs, img_metas, train_mode=False, **kwargs):
if "memory" not in state:
return
else:
img_prev, feat_prev, disp_prev = state["memory"]
gt_disp_change = state["gt_disp_change"][
-1
] # read lastest disp change as Into_Past
gt_flow = state["gt_flow"][-1] # read lastest flow as Into_Past
gt_flow_occ = state["gt_flow_occ"][-1] # read lastest flow as Into_Past
# pad gt size so dimension matches
batch_size = disp_prev.shape[0]
h, w = disp_prev.shape[-2:]
h_pad, w_pad = h - gt_flow.shape[-2], w - gt_flow.shape[-1]
gt_flow = torch.nn.functional.pad(gt_flow, (0, w_pad, 0, h_pad))
gt_disp_change = torch.nn.functional.pad(
gt_disp_change, (0, w_pad, 0, h_pad)
)
gt_flow_occ = torch.nn.functional.pad(gt_flow_occ, (0, w_pad, 0, h_pad))
to_warp = torch.cat([img_prev, disp_prev.unsqueeze(1)], dim=1)
to_warp, valid = flow_warp(
to_warp, gt_flow, padding_mode="zeros", mode="nearest"
)
to_warp[~valid] = 0.0
to_warp[gt_flow_occ.expand_as(to_warp) > 0] = 0.0
img_warp = to_warp[:, :3]
disp_warp = to_warp[:, -1]
disp_warp = disp_warp - gt_disp_change
disp_warp[~valid[:, 0].unsqueeze(1)] = 0.0 # mask out invalid region
disp_warp[gt_flow_occ > 0] = 0.0
feat_warp, valid = flow_warp(
feat_prev,
gt_flow[:, :, 2::4, 2::4],
padding_mode="zeros",
mode="nearest",
)
feat_warp[~valid] = 0.0
gt_flow = torch.cat([gt_flow, gt_disp_change], dim=1)
gt_confidence = torch.ones_like(gt_flow, device=gt_flow.device)
state["memory"] = [img_warp, feat_warp, gt_confidence, disp_warp, gt_flow]
# dummy outputs
outputs["Ts"] = SE3.Identity(batch_size, h, w, device=gt_flow.device)
return
| CODD-main | model/motion/others.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import lietorch_extras
import torch
import torch.nn.functional as F
from lietorch import SE3
from . import projective_ops as pops
class SE3BuilderInplace(torch.autograd.Function):
@staticmethod
def forward(ctx, se3, ae, pts, target, weight, intrinsics, radius=32):
""" Build linear system Hx = b """
ctx.radius = radius
ctx.save_for_backward(se3, ae, pts, target, weight, intrinsics)
H, b = lietorch_extras.se3_build_inplace(
se3, ae, pts, target, weight, intrinsics, radius)
return H, b
@staticmethod
def backward(ctx, grad_H, grad_b):
se3, ae, pts, target, weight, intrinsics = ctx.saved_tensors
ae_grad, target_grad, weight_grad = lietorch_extras.se3_build_inplace_backward(
se3, ae, pts, target, weight, intrinsics, grad_H, grad_b, ctx.radius)
return None, ae_grad, None, target_grad, weight_grad, None
class SE3Builder(torch.autograd.Function):
@staticmethod
def forward(ctx, attn, se3, pts, target, weight, intrinsics, radius=32):
""" Build linear system Hx = b """
ctx.radius = radius
ctx.save_for_backward(attn, se3, pts, target, weight, intrinsics)
H, b = lietorch_extras.se3_build(
attn, se3, pts, target, weight, intrinsics, radius)
return H, b
@staticmethod
def backward(ctx, grad_H, grad_b):
attn, se3, pts, target, weight, intrinsics = ctx.saved_tensors
grad_H = grad_H.contiguous()
grad_b = grad_b.contiguous()
attn_grad, target_grad, weight_grad = lietorch_extras.se3_build_backward(
attn, se3, pts, target, weight, intrinsics, grad_H, grad_b, ctx.radius)
return attn_grad, None, None, target_grad, weight_grad, None
class SE3Solver(torch.autograd.Function):
@staticmethod
def forward(ctx, H, b):
ctx.save_for_backward(H, b)
x, = lietorch_extras.cholesky6x6_forward(H, b)
return x
@staticmethod
def backward(ctx, grad_x):
H, b = ctx.saved_tensors
grad_x = grad_x.contiguous()
grad_H, grad_b = lietorch_extras.cholesky6x6_backward(H, b, grad_x)
return grad_H, grad_b
class CholeskySolver(torch.autograd.Function):
@staticmethod
def forward(ctx, H, b):
# don't crash training if cholesky decomp fails
try:
U = torch.cholesky(H)
xs = torch.cholesky_solve(b, U)
ctx.save_for_backward(U, xs)
ctx.failed = False
except Exception as e:
print(e)
ctx.failed = True
xs = torch.zeros_like(b)
return xs
@staticmethod
def backward(ctx, grad_x):
if ctx.failed:
return None, None
U, xs = ctx.saved_tensors
dz = torch.cholesky_solve(grad_x, U)
dH = -torch.matmul(xs, dz.transpose(-1, -2))
return dH, dz
def block_solve(H, b, ep=0.1, lm=0.0001):
""" solve normal equations """
B, N, _, D, _ = H.shape
I = torch.eye(D).to(H.device)
H = H + (ep + lm * H) * I
H = H.permute(0, 1, 3, 2, 4)
H = H.reshape(B, N * D, N * D)
b = b.reshape(B, N * D, 1)
x = CholeskySolver.apply(H, b)
return x.reshape(B, N, D)
def attention_matrix(X):
""" compute similiarity matrix between all pairs of embeddings """
batch, ch, ht, wd = X.shape
X = X.view(batch, ch, ht * wd) / 8.0
dist = -torch.sum(X ** 2, dim=1).view(batch, 1, ht * wd) + \
-torch.sum(X ** 2, dim=1).view(batch, ht * wd, 1) + \
2 * torch.matmul(X.transpose(1, 2), X)
A = torch.sigmoid(dist)
return A.view(batch, ht, wd, ht, wd)
def step(Ts, ae, target, weight, depth, intrinsics, lm=.0001, ep=10.0):
""" dense gauss newton update """
pts = pops.inv_project(depth, intrinsics)
pts = pts.permute(0, 3, 1, 2).contiguous()
attn = attention_matrix(ae)
se3 = Ts.matrix().permute(0, 3, 4, 1, 2).contiguous()
# build the linear system
H, b = SE3Builder.apply(attn, se3, pts, target, weight, intrinsics)
I = torch.eye(6, device=H.device)[..., None, None]
H = H + (lm * H + ep) * I # damping
dx = SE3Solver.apply(H, b)
dx = dx.permute(0, 3, 4, 1, 2).squeeze(-1).contiguous()
Ts = SE3.exp(dx) * Ts
return Ts
def step_inplace(Ts, ae, target, weight, depth, intrinsics, lm=.0001, ep=10.0):
""" dense gauss newton update with computing similiarity matrix """
pts = pops.inv_project(depth, intrinsics)
pts = pts.permute(0, 3, 1, 2).contiguous()
# tensor representation of SE3
se3 = Ts.data.permute(0, 3, 1, 2).contiguous()
ae = ae / 8.0
# build the linear system
H, b = SE3BuilderInplace.apply(se3, ae, pts, target, weight, intrinsics)
I = torch.eye(6, device=H.device)[..., None, None]
H = H + (lm * H + ep) * I # damping
dx = SE3Solver.apply(H, b)
dx = dx.permute(0, 3, 4, 1, 2).squeeze(-1).contiguous()
Ts = SE3.exp(dx) * Ts
return Ts
def cvx_upsample(data, mask):
""" convex combination upsampling (see RAFT) """
batch, ht, wd, dim = data.shape
data = data.permute(0, 3, 1, 2)
mask = mask.view(batch, 1, 9, 8, 8, ht, wd)
mask = torch.softmax(mask, dim=2)
up_data = F.unfold(data, [3, 3], padding=1)
up_data = up_data.view(batch, dim, 9, 1, 1, ht, wd)
up_data = torch.sum(mask * up_data, dim=2)
up_data = up_data.permute(0, 4, 2, 5, 3, 1)
up_data = up_data.reshape(batch, 8 * ht, 8 * wd, dim)
return up_data
def upsample_se3(Ts, mask):
""" upsample a se3 field """
tau_phi = Ts.log()
return SE3.exp(cvx_upsample(tau_phi, mask))
def upsample_flow(flow, mask):
""" upsample a flow field """
flow = flow * torch.as_tensor([8.0, 8.0, 1.0]).to(flow.device)
return cvx_upsample(flow, mask)
| CODD-main | model/motion/raft3d/se3_field.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import torch
import torch.nn as nn
import torch.nn.functional as F
# lietorch for tangent space backpropogation
from lietorch import SE3
from mmseg.models import builder as builder_oss
from mmseg.models.builder import MODELS
from . import projective_ops as pops
from . import se3_field
from .blocks.corr import CorrBlock
from .blocks.extractor import BasicEncoder
from .blocks.gru import ConvGRU
from .sampler_ops import depth_sampler
GRAD_CLIP = 0.01
class GradClip(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
o = torch.zeros_like(grad_x)
grad_x = torch.where(grad_x.abs() > GRAD_CLIP, o, grad_x)
grad_x = torch.where(torch.isnan(grad_x), o, grad_x)
return grad_x
class GradientClip(nn.Module):
def __init__(self):
super(GradientClip, self).__init__()
def forward(self, x):
return GradClip.apply(x)
class BasicUpdateBlock(nn.Module):
def __init__(self, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.gru = ConvGRU(hidden_dim)
self.corr_enc = nn.Sequential(
nn.Conv2d(196, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 3 * 128, 1, padding=0),
)
self.flow_enc = nn.Sequential(
nn.Conv2d(9, 128, 7, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(128, 3 * 128, 1, padding=0),
)
self.ae = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 32, 1, padding=0),
GradientClip(),
)
self.delta = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 3, 1, padding=0),
GradientClip(),
)
self.weight = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 3, 1, padding=0),
nn.Sigmoid(),
GradientClip(),
)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64 * 9, 1, padding=0),
GradientClip(),
)
def forward(self, net, inp, corr, flow, twist, dz, upsample=True):
motion_info = torch.cat([flow, 10 * dz, 10 * twist], dim=-1)
motion_info = motion_info.clamp(-50.0, 50.0).permute(0, 3, 1, 2)
mot = self.flow_enc(motion_info)
cor = self.corr_enc(corr)
net = self.gru(net, inp, cor, mot)
ae = self.ae(net)
mask = self.mask(net)
delta = self.delta(net)
weight = self.weight(net)
return net, mask, ae, delta, weight
class ResizeConcatConv(nn.Module):
"""
Resize + concat + 1 layer conv
"""
def __init__(self, in_channels, out_channels=32):
super(ResizeConcatConv, self).__init__()
assert isinstance(in_channels, (list, tuple))
self.in_channels = in_channels
self.out_channels = out_channels
self.convs = nn.Sequential(
nn.Conv2d(sum(in_channels), self.out_channels, kernel_size=1, padding=0, stride=1, bias=False),
nn.ReLU(inplace=True),
)
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
upsampled_inputs = [
F.interpolate(
input=x, size=inputs[1].shape[2:], mode="bilinear", align_corners=True
)
for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
outputs = self.convs(inputs)
return outputs
@MODELS.register_module()
class RAFT3D(nn.Module):
def __init__(self, cnet_cfg=None):
super(RAFT3D, self).__init__()
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
self.corr_levels = 4
self.corr_radius = 3
# feature network, context network, and update block
self.fnet = BasicEncoder(output_dim=128, norm_fn="instance")
if cnet_cfg is None:
self.cnet = FPN(output_dim=hdim + 3 * hdim)
else:
self.cnet = nn.Sequential(
builder_oss.build_backbone(cnet_cfg),
ResizeConcatConv(cnet_cfg["extra"]["stage4"]["num_channels"], 128 * 4),
)
if cnet_cfg.get('init_cfg', None) is not None:
self.cnet[0].init_weights()
self.update_block = BasicUpdateBlock(hidden_dim=hdim)
def initializer(self, image1):
"""Initialize coords and transformation maps"""
batch_size, ch, ht, wd = image1.shape
device = image1.device
y0, x0 = torch.meshgrid(torch.arange(ht // 8), torch.arange(wd // 8))
coords0 = torch.stack([x0, y0], dim=-1).float()
coords0 = coords0[None].repeat(batch_size, 1, 1, 1).to(device)
Ts = SE3.Identity(batch_size, ht // 8, wd // 8, device=device)
return Ts, coords0
def features_and_correlation(self, net_inp, fmap_prev, image_curr):
# extract features and build correlation volume
fmap_curr = self.fnet(image_curr)
corr_fn = CorrBlock(fmap_prev, fmap_curr, radius=self.corr_radius)
# extract context features using Resnet50
net, inp = net_inp.split([128, 128 * 3], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
return corr_fn, net, inp, fmap_curr
def forward(
self,
image_curr,
depth_prev,
depth_curr,
intrinsics,
state,
outputs,
iters=12,
train_mode=False,
):
"""Estimate optical flow between pair of frames"""
if "memory" not in state:
state["raft_feat"] = self.fnet(image_curr)
state["raft_netinp"] = self.cnet(image_curr)
return
else:
fmap_prev = state["raft_feat"]
net_inp = state["raft_netinp"]
Ts, coords0 = self.initializer(image_curr)
corr_fn, net, inp, fmap_curr = self.features_and_correlation(
net_inp, fmap_prev, image_curr
)
# intrinsics and depth at 1/8 resolution
intrinsics_r8 = intrinsics / 8.0
depth1_r8 = depth_prev[:, 3::8, 3::8]
depth2_r8 = depth_curr[:, 3::8, 3::8]
flow_est_list = []
flow_rev_list = []
for _ in range(iters):
Ts = Ts.detach()
coords1_xyz, _ = pops.projective_transform(Ts, depth1_r8, intrinsics_r8)
coords1, zinv_proj = coords1_xyz.split([2, 1], dim=-1)
zinv, _ = depth_sampler(1.0 / depth2_r8, coords1)
corr = corr_fn(coords1.permute(0, 3, 1, 2).contiguous())
flow = coords1 - coords0
dz = zinv.unsqueeze(-1) - zinv_proj
twist = Ts.log()
net, mask, ae, delta, weight = self.update_block(
net, inp, corr, flow, dz, twist
)
target = coords1_xyz.permute(0, 3, 1, 2) + delta
target = target.contiguous()
# Gauss-Newton step
# Ts = se3_field.step(Ts, ae, target, weight, depth1_r8, intrinsics_r8)
Ts = se3_field.step_inplace(Ts, ae, target, weight, depth1_r8, intrinsics_r8)
if train_mode:
flow2d_rev = target.permute(0, 2, 3, 1)[..., :2] - coords0
flow2d_rev = se3_field.cvx_upsample(8 * flow2d_rev, mask)
Ts_up = se3_field.upsample_se3(Ts, mask)
flow2d_est, flow3d_est, valid = pops.induced_flow(
Ts_up, depth_prev, intrinsics
)
flow_est_list.append(flow2d_est)
flow_rev_list.append(flow2d_rev)
if train_mode:
outputs[
"flow2d_est"
] = flow_est_list
outputs["flow2d_rev"] = flow_rev_list
Ts_up = se3_field.upsample_se3(Ts, mask)
outputs["Ts"] = Ts_up
flow2d_est, _, _ = pops.induced_flow(Ts_up, depth_prev, intrinsics)
outputs["flow2d_est_induced"] = flow2d_est
weight = se3_field.cvx_upsample(weight.permute(0, 2, 3, 1), mask).permute(
0, 3, 1, 2
)
outputs["weight"] = weight
# update state
state["raft_feat"] = fmap_curr
state["raft_netinp"] = self.cnet(image_curr)
return
| CODD-main | model/motion/raft3d/raft3d.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .raft3d import RAFT3D
| CODD-main | model/motion/raft3d/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import torch
import torch.nn.functional as F
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
""" Wrapper for grid_sample, uses pixel coordinates """
H, W = img.shape[-2:]
xgrid, ygrid = coords.split([1, 1], dim=-1)
xgrid = 2 * xgrid / (W - 1) - 1
ygrid = 2 * ygrid / (H - 1) - 1
grid = torch.cat([xgrid, ygrid], dim=-1)
img = F.grid_sample(img, grid, align_corners=True)
if mask:
mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
return img, mask.float()
return img
def depth_sampler(depths, coords):
depths_proj, valid = bilinear_sampler(depths[:, None], coords, mask=True)
return depths_proj.squeeze(dim=1), valid
| CODD-main | model/motion/raft3d/sampler_ops.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
from .sampler_ops import *
MIN_DEPTH = 0.05
EPS = 1e-5
def project(Xs, intrinsics):
""" Pinhole camera projection """
X, Y, Z = Xs.unbind(dim=-1)
Z = Z + EPS
fx, fy, cx, cy = intrinsics[:, None, None].unbind(dim=-1)
x = fx * (X / Z) + cx
y = fy * (Y / Z) + cy
d = 1.0 / Z
coords = torch.stack([x, y, d], dim=-1)
return coords
def inv_project(depths, intrinsics):
""" Pinhole camera inverse-projection """
ht, wd = depths.shape[-2:]
fx, fy, cx, cy = \
intrinsics[:, None, None].unbind(dim=-1)
y, x = torch.meshgrid(
torch.arange(ht).to(depths.device).float(),
torch.arange(wd).to(depths.device).float())
X = depths * ((x - cx) / fx)
Y = depths * ((y - cy) / fy)
Z = depths
return torch.stack([X, Y, Z], dim=-1)
def projective_transform(Ts, depth, intrinsics):
""" Project points from I1 to I2 """
X0 = inv_project(depth, intrinsics)
X1 = Ts * X0
x1 = project(X1, intrinsics)
valid = (X0[..., -1] > MIN_DEPTH) & (X1[..., -1] > MIN_DEPTH)
return x1, valid.float()
def induced_flow(Ts, depth, intrinsics):
""" Compute 2d and 3d flow fields """
X0 = inv_project(depth, intrinsics)
X1 = Ts * X0
x0 = project(X0, intrinsics)
x1 = project(X1, intrinsics)
flow2d = x1 - x0
flow3d = X1 - X0
valid = (X0[..., -1] > MIN_DEPTH) & (X1[..., -1] > MIN_DEPTH)
return flow2d, flow3d, valid.float()
def backproject_flow3d(flow2d, depth0, depth1, intrinsics):
""" compute 3D flow from 2D flow + depth change """
ht, wd = flow2d.shape[0:2]
fx, fy, cx, cy = \
intrinsics[None].unbind(dim=-1)
y0, x0 = torch.meshgrid(
torch.arange(ht).to(depth0.device).float(),
torch.arange(wd).to(depth0.device).float())
x1 = x0 + flow2d[..., 0]
y1 = y0 + flow2d[..., 1]
X0 = depth0 * ((x0 - cx) / fx)
Y0 = depth0 * ((y0 - cy) / fy)
Z0 = depth0
X1 = depth1 * ((x1 - cx) / fx)
Y1 = depth1 * ((y1 - cy) / fy)
Z1 = depth1
flow3d = torch.stack([X1 - X0, Y1 - Y0, Z1 - Z0], dim=-1)
return flow3d
def backproject_flow2d_to_pts(flow2d, z0, intrinsics):
"""
flow2d: NHW3
z0: NHW
intrinsics: N4
"""
ht, wd = flow2d.shape[1:3]
fx, fy, cx, cy = intrinsics.unbind(dim=-1)
y0, x0 = torch.meshgrid( # HW
torch.arange(ht).to(z0.device).float(),
torch.arange(wd).to(z0.device).float())
y0 = y0[None].expand_as(z0)
x0 = x0[None].expand_as(z0)
x1 = x0 + flow2d[..., 0]
y1 = y0 + flow2d[..., 1]
z1 = z0 + flow2d[..., 2]
depth1 = fx.unsqueeze(-1).unsqueeze(-1) / (z1 + EPS)
X1 = depth1 * ((x1 - cx.unsqueeze(-1).unsqueeze(-1)) / fx.unsqueeze(-1).unsqueeze(-1))
Y1 = depth1 * ((y1 - cy.unsqueeze(-1).unsqueeze(-1)) / fy.unsqueeze(-1).unsqueeze(-1))
Z1 = depth1
return torch.stack([X1, Y1, Z1], dim=0)
| CODD-main | model/motion/raft3d/projective_ops.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import lietorch_extras
import torch
import torch.nn.functional as F
class CorrSampler(torch.autograd.Function):
""" Index from correlation pyramid """
@staticmethod
def forward(ctx, volume, coords, radius):
ctx.save_for_backward(volume, coords)
ctx.radius = radius
corr, = lietorch_extras.corr_index_forward(volume, coords, radius)
return corr
@staticmethod
def backward(ctx, grad_output):
volume, coords = ctx.saved_tensors
grad_output = grad_output.contiguous()
grad_volume, = lietorch_extras.corr_index_backward(volume, coords, grad_output, ctx.radius)
return grad_volume, None, None
class CorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid = []
# all pairs correlation
corr = CorrBlock.corr(fmap1, fmap2)
batch, h1, w1, h2, w2 = corr.shape
corr = corr.reshape(batch * h1 * w1, 1, h2, w2)
for i in range(self.num_levels):
self.corr_pyramid.append(
corr.view(batch, h1, w1, h2 // 2 ** i, w2 // 2 ** i))
corr = F.avg_pool2d(corr, 2, stride=2)
return
def __call__(self, coords):
out_pyramid = []
bz, _, ht, wd = coords.shape
for i in range(self.num_levels):
corr = CorrSampler.apply(self.corr_pyramid[i], coords / 2 ** i, self.radius)
out_pyramid.append(corr.view(bz, -1, ht, wd))
return torch.cat(out_pyramid, dim=1)
@staticmethod
def corr(fmap1, fmap2):
batch, dim, ht, wd = fmap1.shape
fmap1 = fmap1.view(batch, dim, ht * wd) / 4.0
fmap2 = fmap2.view(batch, dim, ht * wd) / 4.0
corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
return corr.view(batch, ht, wd, ht, wd)
| CODD-main | model/motion/raft3d/blocks/corr.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import time
import numpy as np
import scipy.sparse
import torch
import torch.nn.functional as F
from sksparse import cholmod
class GridCholeskySolver(torch.autograd.Function):
@staticmethod
def forward(ctx, chols, J, w, b):
""" Solve linear system """
B, H, W, M, N = J.shape
D = b.shape[-1]
bs = b.detach().reshape(B, -1, D).cpu().numpy()
xs = []
for i in range(len(chols)):
xs += [chols[i](bs[i])]
xs = np.stack(xs).astype(np.float32)
xs = torch.from_numpy(xs).to(J.device)
xs = xs.view(B, H, W, N // 4, D)
ctx.chols = chols
ctx.save_for_backward(xs, J, w, b)
return xs
@staticmethod
def backward(ctx, grad_output):
xs, J, w, b = ctx.saved_tensors
B, H, W, M, N = J.shape
D = b.shape[-1]
gs = grad_output.reshape(B, -1, D).cpu().numpy()
chols = ctx.chols
dz = []
for i in range(len(chols)):
dz += [chols[i](gs[i])]
dz = np.stack(dz, axis=0).astype(np.float32)
dz = torch.from_numpy(dz).to(J.device).view(*xs.shape)
J = GridFactor(A=J, w=w)
grad_J = torch.matmul(-w[..., None] * J.A(dz), J._unfold(xs).transpose(-1, -2)) + \
torch.matmul(-w[..., None] * J.A(xs), J._unfold(dz).transpose(-1, -2))
grad_w = -torch.sum(J.A(xs) * J.A(dz), -1)
return None, grad_J, grad_w, dz
sym_factor = None
sym_shape = None
class GridFactor:
""" Generalized grid factors """
def __init__(self, A=None, w=None):
self.factors = []
self.weights = []
self.residuals = []
self.chols = None
self.Af = A
self.wf = w
def _build_factors(self):
self.Af = torch.cat(self.factors, dim=3)
self.wf = torch.cat(self.weights, dim=3)
def add_factor(self, Js, ws=None, rs=None, ftype='u'):
""" Add factor to graph """
B, H, W, M, N = Js[0].shape
device = Js[0].device
A = torch.zeros([B, H, W, M, N, 2, 2]).to(device)
w = torch.zeros([B, H, W, M]).to(device)
# unary factor
if ftype == 'u':
A[..., 0, 0] = Js[0]
w[:] = ws[:]
# horizontal pairwise factor
elif ftype == 'h':
A[..., 0, 0] = Js[0]
A[..., 0, 1] = Js[1]
w[:, :, :-1, :] = ws[:, :, :-1, :]
# verticle pairwise factor
elif ftype == 'v':
A[..., 0, 0] = Js[0]
A[..., 1, 0] = Js[1]
w[:, :-1, :, :] = ws[:, :-1, :, :]
A = A.view(B, H, W, M, 2 * 2 * N)
self.factors.append(A)
self.weights.append(w)
if rs is not None:
self.residuals.append(rs)
def _fold(self, x):
""" Transposed fold operator """
B, H, W, M, D = x.shape
x = x.transpose(-1, -2)
x = x.reshape(B, H, W, M * D)
x = F.pad(x, [0, 0, 1, 0, 1, 0])
x = x.reshape(B, (H + 1) * (W + 1), M * D).permute(0, 2, 1)
x = F.fold(x, [H, W], [2, 2], padding=1)
x = x.permute(0, 2, 3, 1).reshape(B, H, W, D, M // 4)
return x.transpose(-1, -2)
def _unfold(self, x):
""" Transposed unfold operator """
B, H, W, N, D = x.shape
x = x.transpose(-1, -2)
x = F.pad(x.view(B, H, W, N * D), [0, 0, 0, 1, 0, 1])
x = x.permute(0, 3, 1, 2)
x = F.unfold(x, [2, 2], padding=0)
x = x.permute(0, 2, 1).reshape(B, H, W, D, 4 * N)
return x.transpose(-1, -2)
def A(self, x, w=False):
""" Linear operator """
return torch.matmul(self.Af, self._unfold(x))
def At(self, y):
""" Adjoint operator """
w = self.wf.unsqueeze(dim=-1)
At = self.Af.transpose(-1, -2)
return self._fold(torch.matmul(At, w * y))
def to_csc(self):
""" Convert linear operator into scipy csc matrix"""
if self.Af is None:
self._build_factors()
with torch.no_grad():
B, H, W, N, M = self.Af.shape
dims = [torch.arange(d).cuda() for d in (H, W, N, M // 4)]
i0, j0, k0, h0 = \
[x.reshape(-1) for x in torch.meshgrid(*dims)]
# repeats are ok because edge weights get zeroed
s = [W * (M // 4), M // 4, 1]
i1 = i0 + 1
j1 = j0 + 1
i1[i1 >= H] = H - 1
j1[j1 >= W] = W - 1
col_idx = torch.stack([
s[0] * i0 + s[1] * j0 + s[2] * h0,
s[0] * i0 + s[1] * j1 + s[2] * h0,
s[0] * i1 + s[1] * j0 + s[2] * h0,
s[0] * i1 + s[1] * j1 + s[2] * h0
], dim=-1).view(-1)
dense_shape = [H * W * N, H * W * (M // 4)]
col_idx = col_idx.cpu().numpy()
row_idx = M * np.arange(0, H * W * N + 1)
A = self.Af.detach().view(B, H * W * N, M)
wsqrt = self.wf.detach().sqrt().view(B, H * W * N, 1)
vals = (wsqrt * A).cpu().numpy()
sparse_matricies = []
for batch_ix in range(B):
data = (vals[batch_ix].reshape(-1), col_idx, row_idx)
mat = scipy.sparse.csr_matrix(data, shape=dense_shape)
mat.sum_duplicates()
sparse_matricies.append(mat.T)
return sparse_matricies
def factorAAt(self):
""" Peform sparse cholesky factorization """
global sym_factor, sym_shape
with torch.no_grad():
self.chols = []
start = time.time()
As = self.to_csc()
if sym_factor is None or As[0].shape != sym_shape:
sym_factor = cholmod.analyze_AAt(As[0], ordering_method='best')
sym_shape = As[0].shape
for A in As:
chol = sym_factor.cholesky_AAt(A)
self.chols.append(chol)
return self.chols
def solveAAt(self, b=None):
if self.chols is None:
self.factorAAt()
if b is None:
r = torch.cat(self.residuals, -2)
b = self.At(r)
x = GridCholeskySolver.apply(self.chols, self.Af, self.wf, b)
return x.reshape(*b.shape)
| CODD-main | model/motion/raft3d/blocks/grid.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
| CODD-main | model/motion/raft3d/blocks/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x + y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes // 4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes // 4, planes // 4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes // 4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes // 4)
self.norm2 = nn.BatchNorm2d(planes // 4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes // 4)
self.norm2 = nn.InstanceNorm2d(planes // 4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x + y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0, depth_input=False):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
if depth_input:
self.conv1a = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x, y=None):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
if y is not None:
y = torch.cat(y, dim=0)
x = self.conv1(x)
if y is not None:
x = x + self.conv1a(y)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
| CODD-main | model/motion/raft3d/blocks/extractor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import torch
import torch.nn as nn
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128, dilation=4):
super(ConvGRU, self).__init__()
self.hidden_dim = hidden_dim
self.convz1 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convz2 = nn.Conv2d(hidden_dim, hidden_dim, 3, dilation=dilation, padding=dilation)
self.convr1 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convr2 = nn.Conv2d(hidden_dim, hidden_dim, 3, dilation=dilation, padding=dilation)
self.convq1 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convq2 = nn.Conv2d(hidden_dim, hidden_dim, 3, dilation=dilation, padding=dilation)
def forward(self, h, *inputs):
iz, ir, iq = 0, 0, 0
for inp in inputs:
inp = inp.split([self.hidden_dim] * 3, dim=1)
iz = iz + inp[0]
ir = ir + inp[1]
iq = iq + inp[2]
z = torch.sigmoid(self.convz1(h) + self.convz2(h) + iz)
r = torch.sigmoid(self.convr1(h) + self.convr2(h) + ir)
q = torch.tanh(self.convq1(r * h) + self.convq2(r * h) + iq)
h = (1 - z) * h + z * q
return h
| CODD-main | model/motion/raft3d/blocks/gru.py |
"""For pip."""
from setuptools import find_packages, setup
exec(open("pdftotree/_version.py").read())
setup(
name="pdftotree",
version=__version__,
description="Convert PDF into hOCR with text, tables, and figures being recognized and preserved.",
long_description=open("README.rst").read(),
packages=find_packages(),
install_requires=[
"IPython",
"beautifulsoup4",
"keras>=2.4.0",
"numpy",
"pandas",
"pdfminer.six>=20191020",
"pillow",
"selectivesearch",
"sklearn",
"tabula-py",
"tensorflow>=2.2",
"wand",
],
keywords=["pdf", "parsing", "html", "hocr"],
setup_requires=["pytest-runner"],
tests_require=["pytest"],
url="https://github.com/HazyResearch/pdftotree",
scripts=["bin/pdftotree", "bin/extract_tables"],
classifiers=[ # https://pypi.python.org/pypi?:action=list_classifiers
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
],
project_urls={
"Tracker": "https://github.com/HazyResearch/pdftotree/issues",
"Source": "https://github.com/HazyResearch/pdftotree",
},
python_requires=">=3.6",
author="Hazy Research",
author_email="[email protected]",
license="MIT",
)
| pdftotree-master | setup.py |
from typing import Tuple
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from wand.display import display
else:
from IPython.display import display
from wand.color import Color
from wand.drawing import Drawing
from wand.image import Image
class TreeVisualizer:
"""
Object to display bounding boxes on a pdf document
"""
def __init__(self, pdf_file):
"""
:param pdf_path: directory where documents are stored
:return:
"""
self.pdf_file = pdf_file
def display_boxes(self, tree, html_path, filename_prefix, alternate_colors=False):
"""
Displays each of the bounding boxes passed in 'boxes' on images of the pdf
pointed to by pdf_file
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
imgs = []
colors = {
"section_header": Color("blue"),
"figure": Color("green"),
"figure_caption": Color("green"),
"table_caption": Color("red"),
"list": Color("yellow"),
"paragraph": Color("gray"),
"table": Color("red"),
"header": Color("brown"),
}
for i, page_num in enumerate(tree.keys()):
img = self.pdf_to_img(page_num)
draw = Drawing()
draw.fill_color = Color("rgba(0, 0, 0, 0.0)")
for clust in tree[page_num]:
for (pnum, pwidth, pheight, top, left, bottom, right) in tree[page_num][
clust
]:
draw.stroke_color = colors[clust]
draw.rectangle(left=left, top=top, right=right, bottom=bottom)
draw.push()
draw.font_size = 20
draw.font_weight = 10
draw.fill_color = colors[clust]
if int(left) > 0 and int(top) > 0:
draw.text(x=int(left), y=int(top), body=clust)
draw.pop()
draw(img)
img.save(filename=html_path + filename_prefix + "_page_" + str(i) + ".png")
imgs.append(img)
return imgs
def display_candidates(self, tree, html_path, filename_prefix):
"""
Displays the bounding boxes corresponding to candidates on an image of the pdf
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
imgs = self.display_boxes(
tree, html_path, filename_prefix, alternate_colors=True
)
return display(*imgs)
def pdf_to_img(self, page_num, pdf_dim=None):
"""
Converts pdf file into image
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object
"""
if not pdf_dim:
pdf_dim = get_pdf_dim(self.pdf_file)
page_width, page_height = pdf_dim
img = Image(filename="{}[{}]".format(self.pdf_file, page_num - 1))
img.resize(page_width, page_height)
return img
def get_pdf_dim(pdf_file) -> Tuple[int, int]:
with open(pdf_file, "rb") as f:
parser = PDFParser(f)
doc = PDFDocument(parser)
# Look at the 1st page only.
page = next(PDFPage.create_pages(doc))
_, _, page_width, page_height = page.mediabox
return page_width, page_height
| pdftotree-master | pdftotree/TreeVisualizer.py |
__version__ = "0.5.1+dev"
| pdftotree-master | pdftotree/_version.py |
#!/usr/bin/env python
# At the top level, prevent logging output in absense of logging config.
import logging
from pdftotree._version import __version__
from pdftotree.core import parse
logging.getLogger(__name__).addHandler(logging.NullHandler())
__all__ = ["__version__", "parse"]
| pdftotree-master | pdftotree/__init__.py |
"""
This script takes a PDF document and extracts it's tree structure and then
writes the HTML based on that tree structure. The components of the tree
structure are:
- Tables
- Table Captions
- Figures
- Figure Captions
- Section Headers
- Paragraphs
- List (References in research papers)
- Page Headers
Tables are detected using a Machine learning model, provide the path in
model_path argument = TreeStructure/data/paleo/ml/model.pkl.
Other tree parts are detected using heuristic methods.
"""
import codecs
import logging
import os
import pickle
from pdftotree.TreeExtract import TreeExtractor
from pdftotree.TreeVisualizer import TreeVisualizer
logger = logging.getLogger(__name__)
def load_model(model_type, model_path):
logger.info("Loading pretrained {} model for table detection".format(model_type))
if model_type == "ml":
model = pickle.load(open(model_path, "rb"))
else:
from keras.models import load_model as load_vision_model
model = load_vision_model(model_path)
logger.info("Model loaded!")
return model
def visualize_tree(pdf_file, pdf_tree, html_path):
v = TreeVisualizer(pdf_file)
filename_prefix = os.path.basename(pdf_file)
v.display_candidates(pdf_tree, html_path, filename_prefix)
def parse(
pdf_file,
html_path=None,
model_type=None,
model_path=None,
visualize=False,
):
model = None
if model_type is not None and model_path is not None:
model = load_model(model_type, model_path)
extractor = TreeExtractor(pdf_file)
if extractor.is_scanned():
logger.warning("Document looks scanned, the result may be far from expected.")
else:
logger.info("Digitized PDF detected, building tree structure...")
pdf_tree = extractor.get_tree_structure(model_type, model)
logger.info("Tree structure built, creating html...")
pdf_html = extractor.get_html_tree()
logger.info("HTML created.")
# TODO: what is the following substition for and is it required?
# pdf_html = re.sub(r"[\x00-\x1F]+", "", pdf_html)
if html_path is None:
return pdf_html
with codecs.open(html_path, encoding="utf-8", mode="w") as f:
f.write(pdf_html)
if visualize:
visualize_tree(pdf_file, pdf_tree, html_path)
| pdftotree-master | pdftotree/core.py |
import logging
import os
import tempfile
from base64 import b64encode
from functools import cmp_to_key
from typing import Any, Dict, List, Optional, Tuple
from xml.dom.minidom import Document, Element
import numpy as np
import tabula
from pdfminer.image import ImageWriter
from pdfminer.layout import LAParams, LTChar, LTImage, LTPage, LTTextLine
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from pdfminer.utils import Plane
from pdftotree._version import __version__
from pdftotree.ml.features import get_lines_features, get_mentions_within_bbox
from pdftotree.utils.bbox_utils import bbox2str, get_rectangles
from pdftotree.utils.lines_utils import (
extend_horizontal_lines,
extend_vertical_lines,
get_vertical_and_horizontal,
merge_horizontal_lines,
merge_vertical_lines,
reorder_lines,
)
from pdftotree.utils.pdf.pdf_parsers import parse_layout, parse_tree_structure
from pdftotree.utils.pdf.pdf_utils import CustomPDFPageAggregator, PDFElems
from pdftotree.utils.pdf.vector_utils import column_order, reading_order
logger = logging.getLogger(__name__)
class TreeExtractor(object):
"""
Object to extract tree structure from pdf files
"""
def __init__(self, pdf_file):
self.pdf_file = pdf_file
self.elems: Dict[int, PDFElems] = {} # key represents page_num
self.font_stats: Dict[int, Any] = {} # key represents page_num
self.iou_thresh = 0.8
self.scanned = False
self.tree: Dict[
int, Dict[str, Tuple[int, int, int, float, float, float, float]]
] = {} # key represents page_num
def identify_scanned_page(self, boxes, page_bbox, page_width, page_height):
plane = Plane(page_bbox)
plane.extend(boxes)
# initialize clusters
cid2obj = [set([i]) for i in range(len(boxes))]
# default object map to cluster with its own index
obj2cid = list(range(len(boxes)))
prev_clusters = obj2cid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
box1 = b1.bbox
box2 = b2.bbox
if (
box1[0] == box2[0]
and box1[2] == box2[2]
and round(box1[3]) == round(box2[1])
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
if (
len(clusters) == 1
and clusters[0][0].bbox[0] < -0.0
and clusters[0][0].bbox[1] <= 0
and abs(clusters[0][0].bbox[2] - page_width) <= 5
and abs(clusters[0][0].bbox[3] - page_height) <= 5
):
return True
return False
def parse(self):
is_scanned = False
lin_seg_present = False
layouts: List[LTPage] = []
log = logging.getLogger(__name__)
# Open a PDF file.
with open(os.path.realpath(self.pdf_file), "rb") as fp:
# Create a PDF parser object associated with the file object.
parser = PDFParser(fp)
# Create a PDF document object that stores the document structure.
# Supply the password for initialization.
document = PDFDocument(parser, password="")
# Create a PDF resource manager object that stores shared resources.
rsrcmgr = PDFResourceManager()
# Set parameters for analysis.
laparams = LAParams(char_margin=1.0, word_margin=0.1, detect_vertical=True)
# Create a PDF page aggregator object.
device = CustomPDFPageAggregator(rsrcmgr, laparams=laparams)
# Create a PDF interpreter object.
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Process each page contained in the document.
for page_num, page in enumerate(PDFPage.create_pages(document)):
try:
interpreter.process_page(page)
except OverflowError as oe:
log.exception(
"{}, skipping page {} of {}".format(oe, page_num, self.pdf_file)
)
continue
layout = device.get_result()
layouts.append(layout)
for page_num, layout in enumerate(layouts):
page_num += 1 # indexes start at 1
elems, font_stat = device.normalize_pdf(layout, scaler=1)
self.elems[page_num] = elems
self.font_stats[page_num] = font_stat
# code to detect if the page is scanned
if len(elems.segments) > 0:
lin_seg_present = True
for fig in elems.figures:
if (
fig.bbox[0] <= 0.0
and fig.bbox[1] <= 0.0
and round(fig.bbox[2]) == round(elems.layout.width)
and round(fig.bbox[3]) == round(elems.layout.height)
):
is_scanned = True
page_scanned = self.identify_scanned_page(
elems.figures,
elems.layout.bbox,
elems.layout.width,
elems.layout.height,
)
# doc is scanned if any page is scanned
if page_scanned:
is_scanned = True
if is_scanned or not lin_seg_present:
self.scanned = True
def is_scanned(self):
if len(self.elems) == 0:
self.parse()
return self.scanned
def get_tables_page_num(self, page_num):
page_boxes, _ = self.get_candidates_and_features_page_num(page_num)
tables = page_boxes
return tables
def get_candidates_and_features_page_num(self, page_num):
elems = self.elems[page_num]
# font_stat = self.font_stats[page_num]
# lines_bboxes = self.get_candidates_lines(page_num, elems)
alignments_bboxes, alignment_features = self.get_candidates_alignments(
page_num, elems
)
boxes = alignments_bboxes
if len(boxes) == 0:
logger.info("No boxes were found on page {}.".format(page_num))
return [], []
lines_features = get_lines_features(boxes, elems)
features = np.concatenate(
(np.array(alignment_features), np.array(lines_features)), axis=1
)
return boxes, features
def get_candidates_lines(self, page_num, elems):
page_width = int(elems.layout.width)
page_height = int(elems.layout.height)
lines = reorder_lines(elems.segments)
vertical_lines, horizontal_lines = get_vertical_and_horizontal(lines)
extended_vertical_lines = extend_vertical_lines(horizontal_lines)
extended_horizontal_lines = extend_horizontal_lines(vertical_lines)
vertical_lines = merge_vertical_lines(
sorted(extended_vertical_lines + vertical_lines)
)
horizontal_lines = merge_horizontal_lines(
sorted(extended_horizontal_lines + horizontal_lines)
)
rects = get_rectangles(sorted(vertical_lines), sorted(horizontal_lines))
return [(page_num, page_width, page_height) + bbox for bbox in rects]
def get_candidates_alignments(self, page_num, elems):
page_width = int(elems.layout.width)
page_height = int(elems.layout.height)
font_stat = self.font_stats[page_num]
try:
nodes, features = parse_layout(elems, font_stat)
except Exception as e:
logger.exception(e)
nodes, features = [], []
return (
[
(page_num, page_width, page_height)
+ (node.y0, node.x0, node.y1, node.x1)
for node in nodes
],
features,
)
def get_elems(self):
return self.elems
def get_font_stats(self):
return self.font_stats
def get_tree_structure(self, model_type, model) -> Dict[str, Any]:
tables: Dict[int, List[Tuple(int, int, int, float, float, float, float)]] = {}
# use vision to get tables
if model_type == "vision":
from pdftotree.visual.visual_utils import get_bboxes, predict_heatmap
for page_num in self.elems.keys():
page_width = int(self.elems[page_num].layout.width)
page_height = int(self.elems[page_num].layout.height)
image, pred = predict_heatmap(
self.pdf_file, page_num - 1, model
) # index start at 0 with wand
bboxes, _ = get_bboxes(image, pred)
tables[page_num] = [
(page_num, page_width, page_height)
+ (top, left, top + height, left + width)
for (left, top, width, height) in bboxes
]
# use ML to get tables
elif model_type == "ml":
for page_num in self.elems.keys():
t_cands, cand_feats = self.get_candidates_and_features_page_num(
page_num
)
tables[page_num] = []
if len(cand_feats) != 0:
table_predictions = model.predict(cand_feats)
tables[page_num] = [
t_cands[i]
for i in range(len(t_cands))
if table_predictions[i] > 0.5
]
# use heuristics to get tables if no model_type is provided
else:
for page_num in self.elems.keys():
tables[page_num] = self.get_tables_page_num(page_num)
# Manage References - indicator to indicate if reference has been seen
ref_page_seen = False
for page_num in self.elems.keys():
# Get Tree Structure for this page
self.tree[page_num], ref_page_seen = parse_tree_structure(
self.elems[page_num],
self.font_stats[page_num],
page_num,
ref_page_seen,
tables[page_num],
)
return self.tree
def get_html_tree(self) -> str:
# Create a temp folder where images are temporarily saved.
dirname = tempfile.mkdtemp()
imagewriter = ImageWriter(dirname)
doc = Document()
self.doc = doc
html = doc.createElement("html")
doc.appendChild(html)
head = doc.createElement("head")
html.appendChild(head)
# meta
meta = doc.createElement("meta")
head.appendChild(meta)
meta.setAttribute("name", "ocr-system")
meta.setAttribute("content", f"Converted from PDF by pdftotree {__version__}")
meta = doc.createElement("meta")
head.appendChild(meta)
meta.setAttribute("name", "ocr-capabilities")
meta.setAttribute(
"content", "ocr_page ocr_table ocrx_block ocrx_line ocrx_word"
)
meta = doc.createElement("meta")
head.appendChild(meta)
meta.setAttribute("name", "ocr-number-of-pages")
meta.setAttribute("content", f"{len(self.elems.keys())}")
# body
body = doc.createElement("body")
html.appendChild(body)
for page_num in self.elems.keys(): # 1-based
boxes: List[Tuple[str, float, float, float, float]] = []
for clust in self.tree[page_num]:
for (pnum, pwidth, pheight, top, left, bottom, right) in self.tree[
page_num
][clust]:
boxes += [
(clust.lower().replace(" ", "_"), top, left, bottom, right)
]
page = doc.createElement("div")
page.setAttribute("class", "ocr_page")
page.setAttribute("id", f"page_{page_num}")
width = int(self.elems[page_num].layout.width)
height = int(self.elems[page_num].layout.height)
page.setAttribute(
"title",
f"bbox 0 0 {width} {height}; ppageno {page_num-1}",
)
body.appendChild(page)
# TODO: We need to detect columns and sort acccordingly.
boxes.sort(key=cmp_to_key(column_order))
for box in boxes:
if box[0] == "table":
table = box[1:] # bbox
table_element = self.get_html_table(table, page_num)
page.appendChild(table_element)
elif box[0] == "figure":
elems: List[LTTextLine] = get_mentions_within_bbox(
box, self.elems[page_num].figures
)
fig_element = doc.createElement("figure")
page.appendChild(fig_element)
top, left, bottom, right = [int(i) for i in box[1:]]
fig_element.setAttribute(
"title", f"bbox {left} {top} {right} {bottom}"
)
for img in [img for elem in elems for img in elem]:
if not isinstance(img, LTImage):
continue
filename = imagewriter.export_image(img)
with open(os.path.join(dirname, filename), "rb") as f:
base64 = b64encode(f.read()).decode("ascii")
if filename.endswith("jpg"):
mediatype = "jpeg"
elif filename.endswith("bmp"):
mediatype = "bmp"
else:
logger.info(f"Skipping an unknown type image: {filename}.")
continue
logger.info(f"Embedding a known type image: {filename}.")
img_element = doc.createElement("img")
fig_element.appendChild(img_element)
img_element.setAttribute("title", bbox2str(img.bbox))
img_element.setAttribute(
"src", f"data:image/{mediatype};base64,{base64}"
)
else:
element = self.get_html_others(box[0], box[1:], page_num)
page.appendChild(element)
return doc.toprettyxml()
def get_word_boundaries(
self, mention: LTTextLine
) -> List[Tuple[str, float, float, float, float]]:
"""Split a line of text into words.
:param mention: a line of text
:return: a list of words
"""
mention_text = mention.get_text()
mention_chars: List[Tuple[str, int, int, int, int]] = []
for obj in mention:
if isinstance(obj, LTChar):
x0, y0, x1, y1 = obj.bbox
mention_chars.append([obj.get_text(), y0, x0, y1, x1])
words = []
mention_words: List[str] = mention_text.split() # word split by " " (space)
char_idx = 0
for word in mention_words:
curr_word = [word, float("Inf"), float("Inf"), float("-Inf"), float("-Inf")]
len_idx = 0
while len_idx < len(word):
char: str = mention_chars[char_idx][0]
if char in [" ", "\xa0"]:
char_idx += 1
continue
if word[len_idx : len_idx + len(char)] != char:
logger.warning(
"Out of order ({}, {})".format(word, mention_chars[char_idx][0])
)
curr_word[1] = min(curr_word[1], mention_chars[char_idx][1])
curr_word[2] = min(curr_word[2], mention_chars[char_idx][2])
curr_word[3] = max(curr_word[3], mention_chars[char_idx][3])
curr_word[4] = max(curr_word[4], mention_chars[char_idx][4])
len_idx += len(mention_chars[char_idx][0])
char_idx += 1
words.append(curr_word)
return words
def get_char_boundaries(self, mention):
# mention_text = mention.get_text()
mention_chars = []
for obj in mention:
if isinstance(obj, LTChar):
x0, y0, x1, y1 = obj.bbox
mention_chars.append([obj.get_text(), y0, x0, y1, x1])
return mention_chars
def get_html_others(self, tag: str, box: List[float], page_num: int) -> Element:
element = self.doc.createElement("div")
element.setAttribute("class", "ocrx_block")
element.setAttribute("pdftotree", tag) # for backward-compatibility
top, left, bottom, right = [int(x) for x in box]
element.setAttribute("title", f"bbox {left} {top} {right} {bottom}")
elems: List[LTTextLine] = get_mentions_within_bbox(
box, self.elems[page_num].mentions
)
elems.sort(key=cmp_to_key(reading_order))
for elem in elems:
line_element = self.doc.createElement("span")
element.appendChild(line_element)
line_element.setAttribute("class", "ocrx_line")
line_element.setAttribute("title", bbox2str(elem.bbox))
words = self.get_word_boundaries(elem)
for word in words:
top, left, bottom, right = [int(x) for x in word[1:]]
word_element = self.doc.createElement("span")
line_element.appendChild(word_element)
word_element.setAttribute("class", "ocrx_word")
word_element.setAttribute(
"title", f"bbox {left} {top} {right} {bottom}"
)
# No need to escape text here as minidom will do.
word_element.appendChild(self.doc.createTextNode(word[0]))
return element
def get_html_table(self, table: List[float], page_num) -> Optional[Element]:
"""Recognize a table using tabula and return a DOM element.
:param table: bbox for a table (top,left,bottom,right)
:param page_num: 1-based page number
:return: DOM element for a table
"""
logger.debug(f"Calling tabula at page: {page_num} and area: {table}.")
loglevel = logging.getLogger("pdftotree").getEffectiveLevel()
table_json = tabula.read_pdf(
self.pdf_file,
pages=page_num,
area=table,
output_format="json",
silent=False if loglevel <= logging.DEBUG else True,
)
logger.debug(f"Tabula recognized {len(table_json)} table(s).")
if len(table_json) == 0:
return None
table_element = self.doc.createElement("table")
table_element.setAttribute("class", "ocr_table")
top = int(table_json[0]["top"])
left = int(table_json[0]["left"])
bottom = int(table_json[0]["bottom"])
right = int(table_json[0]["right"])
table_element.setAttribute("title", f"bbox {left} {top} {right} {bottom}")
for i, row in enumerate(table_json[0]["data"]):
row_element = self.doc.createElement("tr")
table_element.appendChild(row_element)
for j, cell in enumerate(row):
# It is not explicitly stated anywhere but tabula seems to use the cell
# bbox to represent that of cell itself rather than that of text inside.
# Note: bbox could be [0, 0, 0, 0] if tabula recognizes no text inside.
box: List[float] = [
cell["top"],
cell["left"],
cell["top"] + cell["height"],
cell["left"] + cell["width"],
]
cell_element = self.doc.createElement("td")
row_element.appendChild(cell_element)
elems = get_mentions_within_bbox(box, self.elems[page_num].mentions)
if len(elems) == 0:
continue
cell_element.setAttribute(
"title",
f"bbox {int(box[1])} {int(box[0])} {int(box[3])} {int(box[2])}",
)
elems.sort(key=cmp_to_key(reading_order))
for elem in elems:
line_element = self.doc.createElement("span")
cell_element.appendChild(line_element)
line_element.setAttribute("class", "ocrx_line")
line_element.setAttribute("title", bbox2str(elem.bbox))
words = self.get_word_boundaries(elem)
for word in words:
top = int(word[1])
left = int(word[2])
bottom = int(word[3])
right = int(word[4])
word_element = self.doc.createElement("span")
line_element.appendChild(word_element)
word_element.setAttribute("class", "ocrx_word")
word_element.setAttribute(
"title", f"bbox {left} {top} {right} {bottom}"
)
# No need to escape text here as minidom will do.
word_element.appendChild(self.doc.createTextNode(word[0]))
return table_element
| pdftotree-master | pdftotree/TreeExtract.py |
TOLERANCE = 5
def reorder_lines(lines, tol=TOLERANCE):
"""
Changes the line coordinates to be given as (top, left, bottom, right)
:param lines: list of lines coordinates
:return: reordered list of lines coordinates
"""
reordered_lines = []
for line in lines:
# we divide by tol and multiply by tol to truncate numbers, stairs function
reordered_lines += [
(
int(round(line.y0 / tol) * tol),
int(round(line.x0 / tol) * tol),
int(round(line.y1 / tol) * tol),
int(round(line.x1 / tol) * tol),
)
]
return reordered_lines
def merge_vertical_lines(lines, tol=TOLERANCE):
"""
This function merges lines segment when they are vertically aligned
:param lines: list of lines coordinates (top, left, bottom, right)
:return: list of merged lines coordinates
"""
if len(lines) == 0:
return []
merged_lines = [lines[0]]
for line in lines[1:]:
last_line = merged_lines[-1]
if line[1] == last_line[1]: # lines are vertically aligned
if line[0] <= last_line[2] + tol: # lines intersect
y0, x0, y1, x1 = merged_lines[-1]
merged_lines[-1] = (y0, x0, line[2], x1)
else:
merged_lines.append(
line
) # lines are vertically aligned but do not intersect
else:
merged_lines.append(line)
return merged_lines
def merge_horizontal_lines(lines, tol=TOLERANCE):
"""
This function merges horizontal lines when they are horizontally aligned
:param lines: list of lines coordinates (top, left, bottom, right)
:return: list of merged lines coordinates
"""
if len(lines) == 0:
return []
merged_lines = [lines[0]]
for line in lines[1:]:
last_line = merged_lines[-1]
if line[0] == last_line[0]: # lines are horizontally aligned
if line[1] <= last_line[3] + tol: # lines intersect
y0, x0, y1, x1 = merged_lines[-1]
merged_lines[-1] = (y0, x0, y1, line[3])
else:
merged_lines.append(
line
) # lines are horizontally aligned but do not intersect
else:
merged_lines.append(line)
return merged_lines
def get_vertical_and_horizontal(lines):
"""
Extracts vertical and horizontal lines lists
:param lines: list of lines coordinates
:return: vertical_lines, horitontal_lines (2 lists of coordinates)
"""
# TODO: add some angle tolerance when lines are not perfectly aligned (eg:
# scanned pdf)
vertical_lines = sorted(
[e for e in lines if e[1] == e[3]], key=lambda tup: (tup[1], tup[0])
)
horitontal_lines = sorted([e for e in lines if e[0] == e[2]])
if len(vertical_lines) > 0:
vertical_lines = merge_vertical_lines(vertical_lines)
if len(horitontal_lines) > 0:
horitontal_lines = merge_horizontal_lines(horitontal_lines)
return vertical_lines, horitontal_lines
def extend_vertical_lines(horizontal_lines, tol=TOLERANCE):
widths = {}
for i, line in enumerate(horizontal_lines):
try:
widths[(line[1], line[3])] += [i]
except KeyError:
widths[(line[1], line[3])] = [i]
new_vertical_lines = []
for (x0, x1) in widths.keys():
if len(widths[(x0, x1)]) > 1:
lines = [horizontal_lines[i] for i in widths[(x0, x1)]]
y0 = min([h[0] for h in lines])
y1 = max([h[2] for h in lines])
new_vertical_lines += [(y0, x0, y1, x0), (y0, x1, y1, x1)]
return new_vertical_lines
def extend_horizontal_lines(vertical_lines, tol=TOLERANCE):
heights = {}
for i, line in enumerate(vertical_lines):
try:
heights[(line[0], line[2])] += [i]
except KeyError:
heights[(line[0], line[2])] = [i]
new_horizontal_lines = []
for (y0, y1) in heights.keys():
if len(heights[(y0, y1)]) > 1:
lines = [vertical_lines[i] for i in heights[(y0, y1)]]
x0 = min([h[1] for h in lines])
x1 = max([h[3] for h in lines])
new_horizontal_lines += [(y0, x0, y0, x1), (y1, x0, y1, x1)]
return new_horizontal_lines
| pdftotree-master | pdftotree/utils/lines_utils.py |
import numpy as np
from wand.color import Color
from wand.display import display
from wand.drawing import Drawing
from wand.image import Image
def display_bounding_boxes(img, blocks, alternatecolors=False, color=Color("blue")):
"""
Displays each of the bounding boxes passed in 'boxes' on an image of the pdf
pointed to by pdf_file
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
draw = Drawing()
draw.fill_color = Color("rgba(0, 0, 0, 0)")
draw.stroke_color = color
for block in blocks:
top, left, bottom, right = block[-4:]
if alternatecolors:
draw.stroke_color = Color(
"rgba({},{},{}, 1)".format(
str(np.random.randint(255)),
str(np.random.randint(255)),
str(np.random.randint(255)),
)
)
draw.rectangle(
left=float(left), top=float(top), right=float(right), bottom=float(bottom)
)
draw(img)
display(img)
def display_bounding_boxes_within_notebook(
page_num, extractor, blocks, alternatecolors=False, color=Color("blue")
):
"""
Displays each of the bounding boxes passed in 'boxes' on an image of the pdf
pointed to by pdf_file
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
elems = extractor.elems[page_num]
page_width, page_height = int(elems.layout.width), int(elems.layout.height)
img = pdf_to_img(extractor.pdf_file, page_num, page_width, page_height)
draw = Drawing()
draw.fill_color = Color("rgba(0, 0, 0, 0)")
draw.stroke_color = color
for block in blocks:
top, left, bottom, right = block[-4:]
if alternatecolors:
draw.stroke_color = Color(
"rgba({},{},{}, 1)".format(
str(np.random.randint(255)),
str(np.random.randint(255)),
str(np.random.randint(255)),
)
)
draw.rectangle(
left=float(left), top=float(top), right=float(right), bottom=float(bottom)
)
draw(img)
return img
def pdf_to_img(pdf_file, page_num, page_width, page_height):
"""
Converts pdf file into image
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object
"""
img = Image(filename="{}[{}]".format(pdf_file, page_num - 1))
img.resize(page_width, page_height)
return img
| pdftotree-master | pdftotree/utils/display_utils.py |
pdftotree-master | pdftotree/utils/__init__.py |
|
from typing import Tuple
TOLERANCE = 5
def doOverlap(bbox1, bbox2):
"""
:param bbox1: bounding box of the first rectangle
:param bbox2: bounding box of the second rectangle
:return: 1 if the two rectangles overlap
"""
if bbox1[2] < bbox2[0] or bbox2[2] < bbox1[0]:
return False
if bbox1[3] < bbox2[1] or bbox2[3] < bbox1[1]:
return False
return True
def isContained(bbox1, bbox2, tol=TOLERANCE):
"""
:param bbox1: bounding box of the first rectangle
:param bbox2: bounding box of the second rectangle
:return: True if bbox1 is contaned in bbox2
"""
if bbox1[0] > bbox2[0] - tol and bbox1[1] > bbox2[1] - tol:
if bbox1[2] < bbox2[2] + tol and bbox1[3] < bbox2[3] + tol:
return True
return False
def mergeBboxes(bbox1, bbox2):
"""
:param bbox1: (top, left, bottom, right)
:param bbox2: (top, left, bottom, right)
:return: Merge bounding boxes
"""
if isContained(bbox1, bbox2):
return bbox2
elif isContained(bbox2, bbox1):
return bbox1
else:
return (
min(bbox1[0], bbox2[0]),
min(bbox1[1], bbox2[1]),
max(bbox1[2], bbox2[2]),
max(bbox1[3], bbox2[3]),
)
def get_rectangles(vertical_lines, horizontal_lines):
"""
:param vertical_lines: list of vertical lines coordinates
:param horizontal_lines: list of horizontal lines coordinates
:return: List of bounding boxes for tables
"""
rectangles = []
i = 0
j = 0
while i < len(horizontal_lines) and j < len(vertical_lines):
if int(horizontal_lines[i][0]) == vertical_lines[j][0]:
if int(horizontal_lines[i][1]) == int(vertical_lines[j][1]):
h = horizontal_lines[i]
v = vertical_lines[j]
rectangles += [(v[0], h[1], v[2], h[3])]
i += 1
j += 1
elif int(horizontal_lines[i][1]) < int(vertical_lines[j][1]):
i += 1
else:
j += 1
elif int(horizontal_lines[i][0]) < int(vertical_lines[j][0]):
i += 1
else:
j += 1
rectangles = [
r
for r in rectangles
if ((r[2] - r[0]) > TOLERANCE and (r[3] - r[1]) > TOLERANCE)
]
return rectangles
def get_outer_bounding_boxes(rectangles):
"""
:param rectangles: list of bounding boxes (top, left, bottom, right)
:return: outer bounding boxes (only the largest bbox when bboxes intersect)
"""
if len(rectangles) == 0:
return []
outer_bboxes = [rectangles[0]]
for bbox2 in rectangles[1:]:
overlap_indexes = []
for i, bbox1 in enumerate(outer_bboxes): # TODO: optimize this !!
if doOverlap(bbox1, bbox2):
overlap_indexes.append(i)
for i in overlap_indexes:
bbox2 = mergeBboxes(bbox2, outer_bboxes[i])
for i in sorted(overlap_indexes, reverse=True):
del outer_bboxes[i]
outer_bboxes.append(bbox2)
return outer_bboxes
def get_intersection(bbox1, bbox2):
"""
:param bbox1: (page, width, height, top, left, bottom, right)
:param bbox2: (page, width, height, top, left, bottom, right)
:return: intersection if bboxes are in the same page and intersect
"""
intersection = []
page_1, page_width, page_height, top_1, left_1, bottom_1, right_1 = bbox1
page_2, _, _, top_2, left_2, bottom_2, right_2 = bbox2
if page_1 == page_2:
if doOverlap(
(top_1, left_1, bottom_1, right_1), (top_2, left_2, bottom_2, right_2)
):
intersection += [
(
page_1,
page_width,
page_height,
max(top_1, top_2),
max(left_1, left_2),
min(bottom_1, bottom_2),
min(right_1, right_2),
)
]
return intersection
def compute_iou(bbox1, bbox2):
"""
:param bbox1: (page, width, height, top, left, bottom, right)
:param bbox2: (page, width, height, top, left, bottom, right)
:return: intersection over union if bboxes are in the same page and intersect
"""
top_1, left_1, bottom_1, right_1 = bbox1
top_2, left_2, bottom_2, right_2 = bbox2
if doOverlap(
(top_1, left_1, bottom_1, right_1), (top_2, left_2, bottom_2, right_2)
):
intersection = (min(bottom_1, bottom_2) - max(top_1, top_2)) * (
min(right_1, right_2) - max(left_1, left_2)
)
union = (
(bottom_1 - top_1) * (right_1 - left_1)
+ (bottom_2 - top_2) * (right_2 - left_2)
- intersection
)
return float(intersection) / float(union)
return 0.0
def bbox2str(bbox: Tuple[float, float, float, float]) -> str:
"""Return a string representation suited for hOCR.
:param bbox: a bounding box (left, top, right, bottom)
:return: a string representation for hOCR
"""
(x0, y0, x1, y1) = bbox
return f"bbox {int(x0)} {int(y0)} {int(x1)} {int(y1)}"
| pdftotree-master | pdftotree/utils/bbox_utils.py |
"""
Created on Oct 11, 2015
@author: xiao
"""
import os
from sys import platform as _platform
import numpy as np
from pdfminer.layout import LTAnno
from PIL import Image, ImageDraw, ImageFont
from pdftotree.utils.pdf.vector_utils import center
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
default_font_size = 10
_font_cache = {}
def lazy_load_font(font_size=default_font_size):
"""
Lazy loading font according to system platform
"""
if font_size not in _font_cache:
if _platform.startswith("darwin"):
font_path = "/Library/Fonts/Arial.ttf"
elif _platform.startswith("linux"):
font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf"
elif _platform.startswith("win32"):
font_path = "C:\\Windows\\Fonts\\arial.ttf"
_font_cache[font_size] = ImageFont.truetype(font_path, font_size)
return _font_cache[font_size]
def normalize_bbox(coords, ymax, scaler=2):
"""
scales all coordinates and flip y axis due to different
origin coordinates (top left vs. bottom left)
"""
return [
coords[0] * scaler,
ymax - (coords[3] * scaler),
coords[2] * scaler,
ymax - (coords[1] * scaler),
]
def normalize_pts(pts, ymax, scaler=2):
"""
scales all coordinates and flip y axis due to different
origin coordinates (top left vs. bottom left)
"""
return [(x * scaler, ymax - (y * scaler)) for x, y in pts]
def create_img(bbox=(0, 0, 200, 200)):
# create new white image
img = Image.new("RGBA", bbox[-2:], white)
return img, ImageDraw.Draw(img)
def render(draw, bbox, text=None, align=None, fill=None, outline=black):
draw.rectangle(bbox, outline=outline, fill=fill)
if text:
coord = center(bbox) if align == "center" else bbox[:2]
draw.text(coord, text, black, font=lazy_load_font())
def load_image(pdf_path, page_num):
pdf_file = os.path.basename(pdf_path)
basename = pdf_file[:-4]
image_name = "%s-%06d.png" % (basename, page_num + 1)
scan = Image.open(os.path.join("private/imgs/", image_name))
# scan.filter(ImageFilter.GaussianBlur(2)).show()
# make it black and white for simplicity
return scan.convert("1")
def load_pixels(pdf_path, page_num):
scan_img = load_image(pdf_path, page_num)
raw_data = np.array(scan_img.getdata())
return (raw_data > 0).reshape(scan_img.height, scan_img.width), scan_img
def fill(mat, orig_bbox, margin):
pass
def render_debug_img(
file_name,
page_num,
elems,
nodes=[],
scaler=1,
print_segments=False,
print_curves=True,
print_table_bbox=True,
print_text_as_rect=True,
):
"""
Shows an image rendering of the pdf page along with debugging
info printed
"""
# For debugging show the boolean pixels in black white grayscale
height = scaler * int(elems.layout.height)
width = scaler * int(elems.layout.width)
debug_img, draw = create_img((0, 0, width, height))
font = lazy_load_font()
large_font = lazy_load_font(24)
if print_curves:
for i, c in enumerate(elems.curves):
if len(c.pts) > 1:
draw.polygon(c.pts, outline=blue)
draw.rectangle(c.bbox, fill=blue)
# for fig in elems.figures:
# draw.rectangle(fig.bbox, fill = blue)
for i, m in enumerate(elems.mentions):
if isinstance(m, LTAnno):
continue
if print_text_as_rect:
fill = "pink" if hasattr(m, "feats") and m.feats["is_cell"] else green
# fill = green
draw.rectangle(m.bbox, fill=fill)
# draw.text(center(m.bbox), str(i), black, font = font) # Draw id
draw.text(
m.bbox[:2], m.get_text(), black, font=font
) # Draw mention content
else:
draw.text(m.bbox[:2], m.get_text(), "black", font=font)
if print_segments:
# draw skeleton for all segments
for i, s in enumerate(elems.segments):
draw.line(s.bbox, fill="black")
if print_table_bbox:
for node in nodes:
is_table = node.is_table()
color = "red" if is_table else "green"
draw.rectangle(node.bbox, outline=color)
if is_table:
# text = 'Borderless' if node.is_borderless() else 'Bordered'
text = "Table"
draw.rectangle(node.bbox, outline=color)
draw.text(node.bbox[:2], text, red, font=large_font)
# Water mark with file name so we can identify among multiple images
if file_name and page_num is not None:
water_mark = (
file_name + ":page " + str(page_num + 1) + "@%dx%d" % (width, height)
)
draw.text((10, 10), water_mark, black, font=font)
debug_img.show()
return debug_img
| pdftotree-master | pdftotree/utils/img_utils.py |
"""
Created on Jan 25, 2016
@author: xiao
"""
import collections
import logging
from builtins import range
from itertools import chain
import numpy as np
from pdfminer.layout import LTAnno
from pdftotree.utils.pdf.vector_utils import inside, intersect
def get_near_items(tree, tree_key):
"""
Check both possible neighbors for key
in a binary tree
"""
try:
yield tree.floor_item(tree_key)
except KeyError:
pass
try:
yield tree.ceiling_item(tree_key)
except KeyError:
pass
def align_add(tree, key, item, align_thres=2.0):
"""
Adding the item object to a binary tree with the given
key while allow for small key differences
close_enough_func that checks if two keys are
within threshold
"""
for near_key, near_list in get_near_items(tree, key):
if abs(key - near_key) < align_thres:
near_list.append(item)
return
# Create a new group if no items are close
tree[key] = [item]
right_wall = lambda m: (m.x1, m.y0, m.x1, m.y1)
left_wall = lambda m: (m.x0, m.y0, m.x0, m.y1)
top_wall = lambda m: (m.x0, m.y0, m.x1, m.y0)
bot_wall = lambda m: (m.x0, m.y1, m.x1, m.y1)
def vlines_between(plane, prev, m):
if not prev or not m:
return []
if prev.xc > m.xc:
prev, m = m, prev
query = (prev.xc, prev.yc, m.xc, prev.yc)
return [l for l in plane.find(query) if l.x1 - l.x0 < 0.1]
def hlines_between(plane, prev, m):
if not prev or not m:
return []
if prev.yc > m.yc:
prev, m = m, prev
query = (prev.xc, prev.yc, prev.xc, m.yc)
return [l for l in plane.find(query) if l.y1 - l.y0 < 0.1]
def is_same_row(m1, m2):
# Corner case for row separation
# ------
# -prev- ------
# ------ ---m--
# ------
return m1 and m2 and m2.yc > m1.y0 and m2.yc < m1.y1
def is_vline(l):
return l.x1 - l.x0 < 0.1
def is_hline(l):
return l.y1 - l.y0 < 0.1
#
# def align_add_to_tree(tree, key, item, close_enough_func):
# '''
# Adding the item object to a binary tree with the given
# key while allow for small key differences
# close_enough_func that checks if two keys are
# within threshold
# '''
# has_neighbor = False
# for near_key, near_list in get_near_items(tree, key):
# if close_enough_func(key, near_key):
# near_list.append(item)
# has_neighbor = True
# break
#
# # Create a new group if no items are close
# if not has_neighbor:
# tree[key] = [item]
#
def collect_table_content(table_bboxes, elems):
"""
Returns a list of elements that are contained inside
the corresponding supplied bbox.
"""
# list of table content chars
table_contents = [[] for _ in range(len(table_bboxes))]
prev_content = None
prev_bbox = None
for cid, c in enumerate(elems):
# Annotations should not fall outside alone
if isinstance(c, LTAnno):
if prev_content is not None:
prev_content.append(c)
continue
# Generally speaking table contents should be included sequentially
# and we can avoid checking all tables for elems inside
# Elements only need to intersect the bbox for table as some
# formatting of fonts may result in slightly out of bbox text
if prev_bbox is not None and intersect(prev_bbox, c.bbox):
prev_content.append(c)
continue
# Search the rest of the tables for membership when done with
# the current one
for table_id, table_bbox in enumerate(table_bboxes):
if intersect(table_bbox, c.bbox):
prev_bbox = table_bbox
prev_content = table_contents[table_id]
prev_content.append(c)
break
return table_contents
_bbox = collections.namedtuple("_bbox", ["bbox"])
_inf_bbox = _bbox([float("inf")] * 4)
def _gaps_from(intervals):
"""
From a list of intervals extract
a list of sorted gaps in the form of [(g,i)]
where g is the size of the ith gap.
"""
sliding_window = zip(intervals, intervals[1:])
gaps = [b[0] - a[1] for a, b in sliding_window]
return gaps
def project_onto(objs, axis, min_gap_size=4.0):
"""
Projects object bboxes onto the axis and return the
unioned intervals and groups of objects in intervals.
"""
if axis == "x":
axis = 0
if axis == "y":
axis = 1
axis_end = axis + 2
if axis == 0: # if projecting onto X axis
objs.sort(key=lambda o: o.x0)
else:
objs.sort(key=lambda o: o.y0)
intervals = []
groups = []
start_i = 0
start = objs[0].bbox[axis]
end = objs[0].bbox[axis_end]
# Use _inf_bbox to trigger the last interval divide
for o_i, o in enumerate(chain(objs, [_inf_bbox])):
# Get current interval
o_start = o.bbox[axis]
o_end = o.bbox[axis_end]
# start new interval when gap with previous end is big
if o_start > end + min_gap_size:
# Append new interval coordinates for children
intervals.append((start, end))
# Append child object group on page
groups.append(objs[start_i:o_i])
# Mark next obj list range
start_i = o_i
start = o_start
# Always check to extend current interval to new end
if o_end > end:
end = o_end
# else do nothing
return intervals, groups
def recursive_xy_divide(elems, avg_font_size):
"""
Recursively group/divide the document by white stripes
by projecting elements onto alternating axes as intervals.
avg_font_size: the minimum gap size between elements below
which we consider interval continuous.
"""
log = logging.getLogger(__name__)
log.info(avg_font_size)
objects = list(elems.mentions)
objects.extend(elems.segments)
bboxes = []
# A tree that is a list of its children
# bboxes can be recursively reconstructed from
# the leaves
def divide(objs, bbox, h_split=True, is_single=False):
"""
Recursive wrapper for splitting a list of objects
with bounding boxes.
h_split: whether to split along y axis, otherwise
we split along x axis.
"""
if not objs:
return []
# range start/end indices
axis = 1 if h_split else 0
intervals, groups = project_onto(objs, axis, avg_font_size)
# base case where we can not actually divide
single_child = len(groups) == 1
# Can not divide in both X and Y, stop
if is_single and single_child:
bboxes.append(bbox)
return objs
else:
children = []
for interval, group in zip(intervals, groups):
# Create the bbox for the subgroup
sub_bbox = np.array(bbox)
sub_bbox[[axis, axis + 2]] = interval
# Append the sub-document tree
child = divide(group, sub_bbox, not h_split, single_child)
children.append(child)
return children
full_page_bbox = (0, 0, elems.layout.width, elems.layout.height)
# Filter out invalid objects
objects = [o for o in objects if inside(full_page_bbox, o.bbox)]
log.info("avg_font_size for dividing", avg_font_size)
tree = divide(objects, full_page_bbox) if objects else []
return bboxes, tree
| pdftotree-master | pdftotree/utils/pdf/layout_utils.py |
"""
Created on Dec 2, 2015
@author: xiao
"""
import bisect
import logging
from builtins import object, range, zip
from collections import defaultdict
from functools import cmp_to_key
from pprint import pformat
import numpy as np
import pandas as pd
from pdfminer.utils import Plane
from pdftotree.utils.pdf.vector_utils import inside, reading_order
logger = logging.getLogger(__name__)
class Cell(object):
"""Represents a cell with no visual dividers inside"""
def __init__(self, origin, texts=[], rowspan=1, colspan=1):
"""
origin: the top left grid coordinate of the cell
"""
self.rowstart, self.colstart = origin
self.rowend = self.rowstart + rowspan
self.colend = self.colstart + colspan
self.texts = texts
def __str__(self, *args, **kwargs):
return ",".join([m.get_text().encode("utf8") for m in self.texts])
class Grid(object):
"""
A rendered grid to capture structural layout info
"""
def __init__(self, mentions, lines, region, min_cell_size=6.0):
"""
Constructor
"""
self.min_cell_size = min_cell_size
vlines, hlines = _split_vlines_hlines(lines)
self.xs = [v.xc for v in vlines]
self.ys = [h.yc for h in hlines]
# Remove closely clustered lines
# Also make sure there is at least 1 mega column for the table
self.xs = _retain_centroids(self.xs + [region.x0, region.x1], min_cell_size)
self.ys = _retain_centroids(self.ys + [region.y0, region.y1], min_cell_size)
self.xranges = list(zip(self.xs, self.xs[1:]))
self.yranges = list(zip(self.ys, self.ys[1:]))
self.num_cols = len(self.xranges)
self.num_rows = len(self.yranges)
# Grid contents
self._grid = np.full(
[self.num_rows, self.num_cols], None, dtype=np.dtype(object)
)
grid = self._grid
# Record whether a particular cell boundary is present
line_plane = Plane(region.bbox)
line_plane.extend(lines)
vbars, hbars = self._mark_grid_bounds(line_plane, region)
cells = []
# Establish cell regions
for i in range(self.num_rows):
for j in range(self.num_cols):
if grid[i, j]:
continue # Skip already marked cells
# Merge with cell above
if i > 0 and not hbars[i, j]:
grid[i, j] = cell = grid[i - 1, j]
cell.rowend = i + 1
# Merge with cell left
elif j > 0 and not vbars[i, j]:
grid[i, j] = cell = grid[i, j - 1]
cell.colend = j + 1
# Create new cell otherwise
else:
grid[i, j] = cell = Cell([i, j])
cells.append(cell)
# Now get the cell's contents by using its boundary
text_plane = Plane(region.bbox)
text_plane.extend(mentions)
for cell in cells:
x0 = self.xs[cell.colstart]
x1 = self.xs[cell.colend]
y0 = self.ys[cell.rowstart]
y1 = self.ys[cell.rowend]
bbox = (x0, y0, x1, y1)
# Keep mentions whose centers are inside the cell
cell.texts = [
m for m in text_plane.find(bbox) if inside(bbox, (m.xc, m.yc) * 2)
]
# TODO: provide HTML conversion here
self.get_normalized_grid()
def to_dataframe(self):
return pd.DataFrame(self._grid)
def to_html(self):
return self.to_dataframe().to_html(index=False, header=False)
def get_normalized_grid(self):
"""
Analyzes subcell structure
"""
# Resolve multirow mentions, TODO: validate against all PDFs
# subcol_count = 0
mega_rows = []
for row_id, row in enumerate(self._grid):
# maps yc_grid -> [mentions]
subrow_across_cell = defaultdict(list)
for col_id, cell in enumerate(row):
# Keep cell text in reading order
cell.texts.sort(key=cmp_to_key(reading_order))
logger.debug("=" * 50)
for m in cell.texts:
subrow_across_cell[m.yc_grid].append(m)
# prev = m
logger.debug(pformat(dict(subrow_across_cell)))
mega_rows.append(subrow_across_cell)
# Multiline paragraph check
# Subrow/Subcolumn
return mega_rows
def _mark_grid_bounds(self, plane, region_bbox):
"""
Assume all lines define a complete grid over the region_bbox.
Detect which lines are missing so that we can recover merged
cells.
"""
# Grid boundaries
vbars = np.zeros([self.num_rows, self.num_cols + 1], dtype=np.bool)
hbars = np.zeros([self.num_rows + 1, self.num_cols], dtype=np.bool)
def closest_idx(arr, elem):
left = bisect.bisect_left(arr, elem) - 1
right = bisect.bisect_right(arr, elem) - 1
return left if abs(arr[left] - elem) < abs(arr[right] - elem) else right
# Figure out which separating segments are missing, i.e. merge cells
for row, (y0, y1) in enumerate(self.yranges):
yc = (y0 + y1) // 2
for l in plane.find((region_bbox.x0, yc, region_bbox.x1, yc)):
vbars[row, closest_idx(self.xs, l.xc)] = True
for col, (x0, x1) in enumerate(self.xranges):
xc = (x0 + x1) // 2
for l in plane.find((xc, region_bbox.y0, xc, region_bbox.y1)):
hbars[closest_idx(self.ys, l.yc), col] = True
return vbars, hbars
############################
# Utilities
############################
def _retain_centroids(numbers, thres):
"""Only keep one number for each cluster within thres of each other"""
numbers.sort()
prev = -1
ret = []
for n in numbers:
if prev < 0 or n - prev > thres:
ret.append(n)
prev = n
return ret
def _split_vlines_hlines(lines):
"""Separates lines into horizontal and vertical ones"""
vlines, hlines = [], []
for line in lines:
(vlines if line.x1 - line.x0 < 0.1 else hlines).append(line)
return vlines, hlines
def _npiter(arr):
"""Wrapper for iterating numpy array"""
for a in np.nditer(arr, flags=["refs_ok"]):
c = a.item()
if c is not None:
yield c
| pdftotree-master | pdftotree/utils/pdf/grid.py |
"""
Handles abstract rendering of the layout
in order to extract local visual features
Created on Jan 28, 2016
@author: xiao
"""
import logging
import numpy as np
from pdf.vector_utils import x0, x1, y0, y1
logger = logging.getLogger(__name__)
class Renderer(object):
"""
enumeration objects to be placed into the
rendered image
"""
empty = 0
horizontal_line = -1
vertical_line = -2
text = -3
img = -4
curve = -5
misc = -6
def __init__(self, elems, scaler=1):
"""
Initializes the rendered object grid with specified
scaler so we can map original coordinates into the
new grid map.
"""
self.scaler = scaler
layout = elems.layout
width = int(np.ceil(scaler * layout.width))
height = int(np.ceil(scaler * layout.height))
self.grid = np.zeros((width, height), dtype=np.int8)
# Estimates the grid size in megabytes
logger.info(self.grid.nbytes / float(1048576))
for line in elems.segments:
if line.height < 0.1: # Horizontal lines
self.draw_rect(line.bbox, self.horizontal_line)
elif line.width < 0.1: # Vertical lines
self.draw_rect(line.bbox, self.vertical_line)
for mention in elems.mentions:
self.draw_rect(mention.bbox, self.text)
for figure in elems.figures:
self.draw_rect(figure.bbox, self.img)
def draw_rect(self, bbox, cell_val):
"""
Fills the bbox with the content values
Float bbox values are normalized to have non-zero area
"""
new_x0 = int(bbox[x0])
new_y0 = int(bbox[y0])
new_x1 = max(new_x0 + 1, int(bbox[x1]))
new_y1 = max(new_y0 + 1, int(bbox[y1]))
self.grid[new_x0:new_x1, new_y0:new_y1] = cell_val
@staticmethod
def is_mention(cell_val):
"""
Nonnegative values in grid cells are reserved for mention ids
"""
return cell_val >= 0
| pdftotree-master | pdftotree/utils/pdf/render.py |
"""
Created on Oct 12, 2015
Various routines to work with pdf objects
extracted with PDFminer
@author: xiao
"""
import collections
import re
import string
from collections import Counter
from typing import List, NamedTuple, Optional, Tuple, Union
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import (
LTAnno,
LTChar,
LTComponent,
LTContainer,
LTCurve,
LTFigure,
LTLayoutContainer,
LTLine,
LTPage,
LTTextContainer,
LTTextLine,
)
from pdfminer.utils import INF, apply_matrix_pt
from pdftotree.utils.img_utils import normalize_bbox, normalize_pts
# from pdftotree.utils.pdf.vector_utils import *
# Compact wrapper representation for the pdf
class PDFElems(NamedTuple):
mentions: List[LTTextLine]
segments: List[LTLine]
curves: List[LTCurve]
figures: List[LTFigure]
layout: LTPage
chars: List[Union[LTChar, LTAnno]]
class CustomPDFPageAggregator(PDFPageAggregator):
"""
A custom version of the default pdf miner stateful draw call
interpreter. Handles the creation of python object from pdf draw
calls.
Changes the way LTCurves are created - break up large polylines
and rectangles into standard segments.
"""
line_only_shape = re.compile("ml+h?")
def paint_path(self, gstate, stroke, fill, evenodd, path):
"""
Converting long paths to small segments each time we m=Move
or h=ClosePath for polygon
"""
shape = "".join(x[0] for x in path)
prev_split = 0
for i in range(len(shape)):
if shape[i] == "m" and prev_split != i:
self.paint_single_path(
gstate, stroke, fill, evenodd, path[prev_split:i]
)
prev_split = i
if shape[i] == "h":
self.paint_single_path(
gstate, stroke, fill, evenodd, path[prev_split : i + 1]
)
prev_split = i + 1
# clean up remaining segments
if prev_split < len(shape):
self.paint_single_path(gstate, stroke, fill, evenodd, path[prev_split:])
def paint_single_path(self, gstate, stroke, fill, evenodd, path):
"""
Converting a single path draw command into lines and curves objects
"""
if len(path) < 2:
return
shape = "".join(x[0] for x in path)
pts = []
for p in path:
for i in range(1, len(p), 2):
pts.append(apply_matrix_pt(self.ctm, (p[i], p[i + 1])))
# Line mode
if self.line_only_shape.match(shape):
# check for sloped lines first
has_slope = False
for i in range(len(pts) - 1):
if pts[i][0] != pts[i + 1][0] and pts[i][1] != pts[i + 1][1]:
has_slope = True
break
if not has_slope:
for i in range(len(pts) - 1):
self.cur_item.add(LTLine(gstate.linewidth, pts[i], pts[i + 1]))
# Adding the closing line for a polygon, especially rectangles
if shape.endswith("h"):
self.cur_item.add(LTLine(gstate.linewidth, pts[0], pts[-1]))
return
# Add the curve as an arbitrary polyline (belzier curve info is lost here)
self.cur_item.add(LTCurve(gstate.linewidth, pts))
def normalize_pdf(self, layout: LTPage, scaler) -> Tuple[PDFElems, Counter]:
"""
Normalizes pdf object coordinates (bot left) to image
conventions (top left origin).
Returns the list of chars and average char size
"""
chars = []
mentions: List[LTTextContainer] = []
height = scaler * layout.height
font_size_counter = collections.Counter()
# Lines longer than this are included in segments
pts_thres = 2.0 * scaler
segments = []
curves = []
figures = []
container: LTContainer = None
_font = None
def processor(m, parent):
"""Convert pdfminer.six's LT* into pdftotree's PDFElems."""
# Traverse
if isinstance(m, LTContainer):
for child in m:
processor(child, m)
# Normalizes the coordinate system to be consistent with
# image library conventions (top left as origin)
if isinstance(m, LTComponent):
m.set_bbox(normalize_bbox(m.bbox, height, scaler))
# Assign LT* into PDFElems
if isinstance(m, LTCurve):
m.pts = normalize_pts(m.pts, height, scaler)
# Only keep longer lines here
if isinstance(m, LTLine) and max(m.width, m.height) > pts_thres:
segments.append(m)
else: # Here we exclude straight lines from curves
curves.append(m)
elif isinstance(m, LTFigure):
figures.append(m)
elif isinstance(m, LTChar):
if not isinstance(parent, LTTextLine):
# Construct LTTextContainer from LTChar(s) that are not
# children of LTTextLine, then group LTChar(s) into LTTextLine
nonlocal _font
nonlocal container
font = (m.fontname, m.size)
dummy_bbox = (+INF, +INF, -INF, -INF)
if font != _font:
if _font is not None:
layout_container = LTLayoutContainer(dummy_bbox)
for textline in layout_container.group_objects(
self.laparams, container
):
cleaned_textline = _clean_textline(textline)
if cleaned_textline is not None:
mentions.append(cleaned_textline)
container = LTContainer(dummy_bbox)
_font = font
container.add(m)
# Collect chars for later stats analysis
chars.append(m)
# fonts could be rotated 90/270 degrees
font_size = _font_size_of(m)
font_size_counter[font_size] += 1
elif isinstance(m, LTTextLine):
cleaned_textline = _clean_textline(m)
if cleaned_textline is not None:
mentions.append(cleaned_textline)
elif isinstance(m, LTAnno): # Also include non character annotations
chars.append(m)
return
processor(layout, None)
# Resets mention y0 to the first y0 of alphanum character instead of
# considering exotic unicode symbols and sub/superscripts to reflect
# accurate alignment info
for m in mentions:
# best_y1 = min(c.y1 for c in m if isinstance(c, LTChar))
alphanum_c = next((c for c in m if c.get_text().isalnum()), None)
if alphanum_c:
m.set_bbox((m.x0, alphanum_c.y0, m.x1, alphanum_c.y1))
# mentions.sort(key = lambda m: (m.y0,m.x0))
elems = PDFElems(mentions, segments, curves, figures, layout, chars)
return elems, font_size_counter
def _print_dict(elem_dict):
"""
Print a dict in a readable way
"""
for key, value in sorted(elem_dict.iteritems()):
if isinstance(value, collections.Iterable):
print(key, len(value))
else:
print(key, value)
def _font_size_of(ch):
if isinstance(ch, LTChar):
return max(map(abs, ch.matrix[:4]))
return -1
def _clean_textline(item: LTTextLine) -> Optional[LTTextLine]:
clean_text = keep_allowed_chars(item.get_text()).strip()
# Skip empty and invalid lines
if clean_text:
# TODO: add subscript detection and use latex underscore
# or superscript
item.clean_text = clean_text
item.font_name, item.font_size = _font_of_mention(item)
return item
else:
return None
def _font_of_mention(m):
"""
Returns the font type and size of the first alphanumeric
char in the text or None if there isn't any.
"""
for ch in m:
if isinstance(ch, LTChar) and ch.get_text().isalnum():
return (ch.fontname, _font_size_of(ch))
return (None, 0)
# Initialize the set of chars allowed in output
_ascii_allowed = [False] * 128
_forbidden_chars = "\n\t"
for c in string.printable:
_ascii_allowed[ord(c)] = True
for c in _forbidden_chars:
_ascii_allowed[ord(c)] = False
def _allowed_char(c):
"""
Returns whether the given unicode char is allowed in output
"""
c = ord(c)
if c < 0:
return False
if c < 128:
return _ascii_allowed[c]
# Genereally allow unicodes, TODO: check for unicode control characters
# characters
return True
def keep_allowed_chars(text):
"""
Cleans the text for output
"""
# print ','.join(str(ord(c)) for c in text)
return "".join(" " if c == "\n" else c for c in text.strip() if _allowed_char(c))
| pdftotree-master | pdftotree/utils/pdf/pdf_utils.py |
"""
Created on Oct 26, 2015
Parsing raw PDF data into python data structures
@author: xiao
"""
import logging
import math
import operator
import sys
from builtins import filter, range, str, zip
from collections import Counter, defaultdict
from functools import cmp_to_key
from typing import Any, Dict, List, Tuple
import numpy as np
from pdfminer.layout import LTFigure, LTTextLine
from pdfminer.utils import Plane
from pdftotree.utils.pdf.node import Node
from pdftotree.utils.pdf.pdf_utils import PDFElems
from pdftotree.utils.pdf.vector_utils import center, intersect, l1, xy_reading_order
logger = logging.getLogger(__name__)
def parse_layout(elems, font_stat, combine=False):
"""
Parses pdf texts into a hypergraph grouped into rows
and columns and then output
"""
boxes_segments = elems.segments
boxes_curves = elems.curves
boxes_figures = elems.figures
page_width = elems.layout.width
# page_height = elems.layout.height
boxes = elems.mentions
avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat)
width = get_page_width(boxes + boxes_segments + boxes_figures + boxes_curves)
char_width = get_char_width(boxes)
grid_size = avg_font_pts / 2.0
for i, m in enumerate(boxes + elems.figures):
m.id = i
m.feats = defaultdict(bool)
prefix = ""
if isinstance(m, LTTextLine) and m.font_name:
prefix = m.font_name + "-" + str(m.font_size) + "-"
m.xc = (m.x0 + m.x1) / 2.0
m.yc = (m.y0 + m.y1) / 2.0
m.feats[prefix + "x0"] = m.x0_grid = m.x0 // grid_size
m.feats[prefix + "x1"] = m.x1_grid = m.x1 // grid_size
m.feats[prefix + "xc"] = m.xc_grid = m.xc // grid_size
m.feats[prefix + "yc"] = m.yc_grid = m.yc // grid_size
tbls, tbl_features = cluster_vertically_aligned_boxes(
boxes,
elems.layout.bbox,
avg_font_pts,
width,
char_width,
boxes_segments,
boxes_curves,
boxes_figures,
page_width,
combine,
)
return tbls, tbl_features
def cluster_vertically_aligned_boxes(
boxes,
page_bbox,
avg_font_pts,
width,
char_width,
boxes_segments,
boxes_curves,
boxes_figures,
page_width,
combine,
):
# Filter out boxes with zero width or height
filtered_boxes = []
for bbox in boxes:
if bbox.x1 - bbox.x0 > 0 and bbox.y1 - bbox.y0 > 0:
filtered_boxes.append(bbox)
boxes = filtered_boxes
# Too many "." in the Table of Content pages
if len(boxes) == 0:
logger.warning("No boxes were found to cluster.")
return [], []
elif len(boxes) > 3500:
logger.warning("Too many '.' in the Table of Content pages?")
return [], []
plane = Plane(page_bbox)
plane.extend(boxes)
# initialize clusters
cid2obj = [set([i]) for i in range(len(boxes))]
# default object map to cluster with its own index
obj2cid = list(range(len(boxes)))
prev_clusters = obj2cid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
):
# can probably do better if we find the average space
# between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
or ((box1[0] < box2[0]) and (box1[2] > box2[0]))
or ((box1[0] > box2[0]) and (box2[2] > box1[0]))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
# default object map to cluster with its own index
obj2rid = list(range(len(boxes)))
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or ((abs(box1[3] - box2[3]) < 0.11 * avg_font_pts))
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
not_merge = set()
for i1, b1 in enumerate(boxes):
for i2 in cid2obj[obj2cid[i1]]:
if i1 == i2:
continue
row1 = obj2rid[i1]
row2 = obj2rid[i2]
if row1 == row2:
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
text_1 = 0.0
for obj in rid2obj[row1]:
text_1 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
text_2 = 0.0
for obj in rid2obj[row2]:
text_2 += boxes[obj].bbox[2] - boxes[obj].bbox[0]
if abs(text_1 - text_2) / width > 0.1:
min_i = min(i1, i2)
max_i = max(i1, i2)
not_merge.add((min_i, max_i))
# Alignment Features
# If text boxes are very close in a row
if_row_connected = defaultdict(int)
num_row_connected = defaultdict(lambda: 1)
# If text is merged using span code in adjacent rows, this feature tells
# the number of times the cluster went through span based clustering
if_connected_by_span = defaultdict(int)
num_connected_by_span = defaultdict(lambda: 1)
# If columns were merged using cluster alignment
if_connected_by_align = defaultdict(int)
num_connected_by_align = defaultdict(lambda: 1)
# If vertical columns were merged
if_vertical_columns_merged = defaultdict(int)
num_vertical_columns_merged = defaultdict(lambda: 1)
# Number of Line Segments, Curves and Figures
num_segments = defaultdict(int)
num_curves = defaultdict(int)
num_figures = defaultdict(int)
# Average Word Space
total_word_space = defaultdict(float)
avg_word_space = defaultdict(float)
avg_word_space_norm = defaultdict(float)
node_space = defaultdict(float)
avg_node_space = defaultdict(float)
avg_node_space_norm = defaultdict(float)
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_row_connected[cid1] = 1
if_row_connected[cid2] = 0
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3
or abs(box1[2] - box2[2]) < 3
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
if (min_i, max_i) not in not_merge:
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
# Features
if_connected_by_span[cid1] = 1
if_connected_by_span[cid2] = 0
if (
if_row_connected[cid1] == 1
or if_row_connected[cid2] == 1
):
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
num_connected_by_span[cid1] = (
num_connected_by_span[cid1]
+ num_connected_by_span[cid2]
)
num_connected_by_span[cid2] = 0
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# blacklist nearly half-page wide clusters before horizontal merging
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
blacklist = set()
blacklist_obj = set()
for cid_iter in range(len(cid2obj2)):
cid = cid2obj2[cid_iter]
xmin = float("Inf")
xmax = float("-Inf")
for obj in cid:
xmin = min(xmin, boxes[obj].bbox[0])
xmax = max(xmax, boxes[obj].bbox[2])
if ((xmax - xmin) > width / 2.75 and (xmax - xmin) < width / 2) or (
(xmax - xmin) > 0.9 * width
):
blacklist.add(cid_iter)
for obj in cid:
blacklist_obj.add(obj)
for obj_iter in rid2obj[obj2rid[obj]]:
if (
boxes[obj_iter].bbox[0] >= xmin
and boxes[obj_iter].bbox[2] <= xmax
):
blacklist_obj.add(obj_iter)
# create a cluster span
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = sys.maxsize
cid2span[cid]["min_y"] = sys.maxsize
cid2span[cid]["max_x"] = -sys.maxsize - 1
cid2span[cid]["max_y"] = -sys.maxsize - 1
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
cid2cid = {}
cid_pair_compared = set()
cid2cid2 = [cid for cid in range(len(cid2obj))]
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if i1 == i2:
continue
if i1 in blacklist_obj or i2 in blacklist_obj:
continue
cid1 = obj2cid[i1]
cid2 = obj2cid[i2]
if (min(cid1, cid2), max(cid1, cid2)) in cid_pair_compared:
continue
if cid1 == cid2:
continue
if obj2rid[i1] == obj2rid[i2]:
continue
if cid1 not in cid2cid:
cid2cid[cid1] = set()
if cid2 not in cid2cid:
cid2cid[cid2] = set()
if cid2span[cid1]["min_y"] < cid2span[cid2]["min_y"]:
box1 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
box2 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
else:
box1 = [
cid2span[cid2]["min_x"],
cid2span[cid2]["min_y"],
cid2span[cid2]["max_x"],
cid2span[cid2]["max_y"],
]
box2 = [
cid2span[cid1]["min_x"],
cid2span[cid1]["min_y"],
cid2span[cid1]["max_x"],
cid2span[cid1]["max_y"],
]
if ((box1[1] < box2[1]) and (box1[3] > box2[1])) or (
(box1[1] > box2[1]) and (box1[1] < box2[3])
):
continue
cid_pair_compared.add((min(cid1, cid2), max(cid1, cid2)))
query_rect = (
min(box1[0], box2[0]),
min(box1[1], box2[1]),
max(box1[2], box2[2]),
max(box1[3], box2[3]),
)
connected = True
for i3, b3 in enumerate(boxes):
if (i3 == i1) or (i3 == i2):
continue
if obj2cid[i1] == obj2cid[i3] or obj2cid[i2] == obj2cid[i3]:
continue
box3 = b3.bbox
if intersect(query_rect, box3):
connected = False
break
if (
(round(box1[0]) == round(box2[0]) or round(box1[2]) == round(box2[2]))
and connected
) or (
round((box1[0] + box1[2]) / 2) == round((box2[0] + box2[2]) / 2)
and connected
):
cid2cid[min(cid1, cid2)].add(max(cid1, cid2))
min_cid = min(cid1, cid2)
max_cid = max(cid1, cid2)
for cid_iter in cid2cid2:
if cid2cid2[cid_iter] == cid2cid2[max_cid]:
cid2cid2[cid_iter] = cid2cid2[min_cid]
# post-process cid2cid
cid2obj2 = cid2obj[:]
obj2cid2 = obj2cid[:]
for cid in range(len(cid2cid2)):
cid_merge = cid2cid2[cid]
if cid != cid_merge:
for obj_iter in cid2obj2[cid]:
cid2obj2[cid_merge].add(obj_iter)
obj2cid2[obj_iter] = cid_merge
cid2obj2[cid] = set()
# Features
if_connected_by_align[cid_merge] = 1
if_connected_by_align[cid] = 0
if if_row_connected[cid_merge] == 1 or if_row_connected[cid] == 1:
if_row_connected[cid_merge] = 1
num_row_connected[cid_merge] += num_row_connected[cid]
num_row_connected[cid] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid_merge] == 1 or if_connected_by_span[cid] == 1:
if_connected_by_span[cid_merge] = 1
num_connected_by_span[cid_merge] += num_connected_by_span[cid]
num_connected_by_span[cid] = 0
if_connected_by_span[cid] = 0
num_connected_by_align[cid_merge] += num_connected_by_align[cid]
num_connected_by_align[cid] = 0
# code to merge columns for table
prev_clusters = obj2cid2
while True:
for obj1, b1 in enumerate(boxes):
cid1 = obj2cid2[obj1]
rid1 = obj2rid[obj1]
if cid1 in blacklist:
continue
if obj1 in blacklist_obj:
continue
for obj2, b2 in enumerate(boxes):
if obj1 == obj2:
continue
if obj2cid2[obj2] == cid1:
rid2 = obj2rid[obj2]
if rid1 == rid2:
continue
for obj3 in rid2obj[rid2]:
cid3 = obj2cid2[obj3]
if obj3 in blacklist_obj:
continue
if cid1 != cid3:
for obj4 in cid2obj2[cid3]:
if obj4 == obj3:
continue
if obj2rid[obj4] == rid1:
min_cid = min(cid1, cid3)
max_cid = max(cid1, cid3)
for obj_iter in cid2obj2[max_cid]:
cid2obj2[min_cid].add(obj_iter)
obj2cid2[obj_iter] = min_cid
cid2obj2[max_cid] = set()
# Features
if_vertical_columns_merged[min_cid] = 1
if_vertical_columns_merged[max_cid] = 0
num_vertical_columns_merged[
min_cid
] += num_vertical_columns_merged[max_cid]
num_vertical_columns_merged[max_cid] = 0
if (
if_row_connected[min_cid] == 1
or if_row_connected[max_cid] == 1
):
if_row_connected[min_cid] = 1
num_row_connected[min_cid] += num_row_connected[
max_cid
]
num_row_connected[max_cid] = 0
if_row_connected[max_cid] = 0
if (
if_connected_by_span[min_cid] == 1
or if_connected_by_span[max_cid] == 1
):
if_connected_by_span[min_cid] = 1
num_connected_by_span[
min_cid
] += num_connected_by_span[max_cid]
num_connected_by_span[max_cid] = 0
if_connected_by_span[max_cid] = 0
if (
if_connected_by_align[min_cid] == 1
or if_connected_by_align[max_cid] == 1
):
if_connected_by_align[min_cid] = 1
num_connected_by_align[
min_cid
] += num_connected_by_align[max_cid]
num_connected_by_align[max_cid] = 0
if_connected_by_align[max_cid] = 0
break
if prev_clusters == obj2cid2:
break
prev_clusters = obj2cid2
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj2)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj2) if x]
merge_indices = [i for i in range(len(node_indices))]
merge_indices = merge_nodes(nodes, merge_indices)
# Features
for idx in range(len(merge_indices)):
if merge_indices[idx] != idx:
cid1 = node_indices[merge_indices[idx]]
cid2 = node_indices[idx]
if if_row_connected[cid1] == 1 or if_row_connected[cid2] == 1:
if_row_connected[cid1] = 1
num_row_connected[cid1] += num_row_connected[cid2]
num_row_connected[cid2] = 0
if_row_connected[cid2] = 0
if if_connected_by_span[cid1] == 1 or if_connected_by_span[cid2] == 1:
if_connected_by_span[cid1] = 1
num_connected_by_span[cid1] += num_connected_by_span[cid2]
num_connected_by_span[cid2] = 0
if_connected_by_span[cid2] = 0
if if_connected_by_align[cid1] == 1 or if_connected_by_align[cid2] == 1:
if_connected_by_align[cid1] = 1
num_connected_by_align[cid1] += num_connected_by_align[cid2]
num_connected_by_align[cid2] = 0
if_connected_by_align[cid2] = 0
if (
if_vertical_columns_merged[cid1] == 1
or if_vertical_columns_merged[cid2] == 1
):
if_vertical_columns_merged[cid1] = 1
num_vertical_columns_merged[cid1] += num_vertical_columns_merged[cid2]
num_vertical_columns_merged[cid2] = 0
if_vertical_columns_merged[cid2] = 0
# Get Word Spacing Features
rid2space = defaultdict(float)
rid2space_norm = defaultdict(float)
row_indices = [i for i, x in enumerate(rid2obj) if x]
for rid in row_indices:
obj_list = list(rid2obj[rid])
if len(obj_list) == 1:
rid2space[rid] = 0
continue
obj_boxes = [boxes[obj].bbox[0] for obj in obj_list]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
rid2space[rid] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
rid2space_norm[rid] = rid2space[rid] / (len(obj_list) - 1)
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
if merge_indices[idx] == idx:
obj_list = []
for idx_iter in range(len(merge_indices)):
if merge_indices[idx_iter] == idx:
obj_list += list(cid2obj2[node_indices[idx_iter]])
obj_list = list(set(obj_list))
rid_list = list(set([obj2rid[obj] for obj in obj_list]))
for rid in rid_list:
total_word_space[node_idx] += rid2space[rid]
avg_word_space_norm[node_idx] += rid2space_norm[rid]
obj_boxes = [
boxes[obj].bbox[0] for obj in rid2obj if obj in cid2obj2[node_idx]
]
sorted_obj_idx = [
i[0] for i in sorted(enumerate(obj_boxes), key=lambda x: x[1])
]
for obj_idx in range(len(sorted_obj_idx) - 1):
node_space[node_idx] += (
boxes[obj_list[sorted_obj_idx[obj_idx + 1]]].bbox[2]
- boxes[obj_list[sorted_obj_idx[obj_idx]]].bbox[0]
)
avg_node_space_norm[node_idx] += node_space[node_idx] / (
len(obj_boxes) - 1
)
avg_word_space[node_idx] = total_word_space[node_idx] / len(rid_list)
avg_word_space_norm[node_idx] /= len(rid_list)
avg_node_space[node_idx] = node_space[node_idx] / len(rid_list)
avg_node_space_norm[node_idx] /= len(rid_list)
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
nodes = new_nodes
node_indices = new_node_indices
# Features
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_bbox = (node.x0, node.y0, node.x1, node.y1)
for i1, b1 in enumerate(boxes_segments):
if intersect(node_bbox, b1.bbox):
num_segments[node_idx] += 1
for i1, b1 in enumerate(boxes_figures):
if intersect(node_bbox, b1.bbox):
num_figures[node_idx] += 1
for i1, b1 in enumerate(boxes_curves):
if intersect(node_bbox, b1.bbox):
num_curves[node_idx] += 1
tables = []
table_indices = []
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
isTable = True
if node.is_table():
for elem in node.elems:
if "table" in elem.get_text().lower():
continue
if (node.width - elem.bbox[2] + elem.bbox[0]) < 2 * char_width:
isTable = False
if isTable:
tables.append(node)
table_indices.append(node_idx)
if combine:
node_features = [0] * 17
for idx, node in enumerate(nodes):
node_idx = node_indices[idx]
node_features = [
sum(x)
for x in zip(
node_features,
[
if_row_connected[node_idx],
num_row_connected[node_idx],
if_connected_by_span[node_idx],
num_connected_by_span[node_idx],
if_connected_by_align[node_idx],
num_connected_by_align[node_idx],
if_vertical_columns_merged[node_idx],
num_vertical_columns_merged[node_idx],
num_segments[node_idx],
num_curves[node_idx],
num_figures[node_idx],
total_word_space[node_idx],
avg_word_space[node_idx],
avg_word_space_norm[node_idx],
node_space[node_idx],
avg_node_space[node_idx],
avg_node_space_norm[node_idx],
],
)
]
return [], node_features
else:
table_features = []
for idx, table in enumerate(tables):
table_idx = table_indices[idx]
table_features += [
[
if_row_connected[table_idx],
num_row_connected[table_idx],
if_connected_by_span[table_idx],
num_connected_by_span[table_idx],
if_connected_by_align[table_idx],
num_connected_by_align[table_idx],
if_vertical_columns_merged[table_idx],
num_vertical_columns_merged[table_idx],
num_segments[table_idx],
num_curves[table_idx],
num_figures[table_idx],
total_word_space[table_idx],
avg_word_space[table_idx],
avg_word_space_norm[table_idx],
node_space[table_idx],
avg_node_space[table_idx],
avg_node_space_norm[table_idx],
]
]
return tables, table_features
def parse_tree_structure(
elems: PDFElems,
font_stat: Counter,
page_num: int,
ref_page_seen: bool,
tables_page: List[Tuple[int, int, int, float, float, float, float]],
) -> Tuple[Dict[str, Any], bool]:
boxes_segments = elems.segments
boxes_curves = elems.curves
boxes_figures: List[LTFigure] = elems.figures
page_width = elems.layout.width
page_height = elems.layout.height
mentions: List[LTTextLine] = elems.mentions
avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat)
width = get_page_width(mentions + boxes_segments + boxes_figures + boxes_curves)
char_width = get_char_width(mentions)
grid_size = avg_font_pts / 2.0
# Atomic features and marking initialization
for i, m in enumerate(mentions + boxes_figures):
m.id = i
m.feats = defaultdict(bool)
prefix = ""
if isinstance(m, LTTextLine) and m.font_name:
prefix = m.font_name + "-" + str(m.font_size) + "-"
# center X coordinate
m.xc = (m.x0 + m.x1) / 2.0
m.yc = (m.y0 + m.y1) / 2.0
# Here we snap the elements to its closest grid line to detect rows/columns
m.feats[prefix + "x0"] = m.x0_grid = m.x0 // grid_size
m.feats[prefix + "x1"] = m.x1_grid = m.x1 // grid_size
m.feats[prefix + "xc"] = m.xc_grid = m.xc // grid_size
m.feats[prefix + "yc"] = m.yc_grid = m.yc // grid_size
# Figures for this page
nodes = get_figures(boxes_figures)
if len(nodes) == 0:
logger.warning("No boxes to get figures from on page {}.".format(page_num))
figures_page: Tuple[int, int, int, float, float, float, float] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in nodes
]
# Eliminate tables from these boxes
boxes: List[LTTextLine] = []
for idx1, box in enumerate(mentions):
intersect = False
for idx2, table in enumerate(tables_page):
table_box = tuple(table[3:])
bool_overlap = (
table_box[1] <= box.bbox[2]
and box.bbox[0] <= table_box[3]
and table_box[0] <= box.bbox[3]
and box.bbox[1] <= table_box[2]
)
if bool_overlap:
intersect = True
break
if not intersect:
boxes.append(box)
text_candidates, ref_page_seen = extract_text_candidates(
boxes,
elems.layout.bbox,
avg_font_pts,
width,
char_width,
page_num,
ref_page_seen,
boxes_figures,
page_width,
page_height,
)
text_candidates["figure"] = figures_page
text_candidates["table"] = tables_page
return text_candidates, ref_page_seen
def extract_text_candidates(
boxes: List[LTTextLine],
page_bbox,
avg_font_pts,
width,
char_width,
page_num,
ref_page_seen,
boxes_figures,
page_width,
page_height,
) -> Tuple[Dict[str, List], bool]:
# Filter out boxes with zero width or height
filtered_boxes = []
for bbox in boxes:
if bbox.x1 - bbox.x0 > 0 and bbox.y1 - bbox.y0 > 0:
filtered_boxes.append(bbox)
boxes = filtered_boxes
# Too many "." in the Table of Content pages - ignore because it takes a lot of time
if len(boxes) == 0 or len(boxes) > 3500:
return {}, False
plane = Plane(page_bbox)
plane.extend(boxes)
# Row level clustering - identify objects that have same horizontal alignment
rid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2rid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2rid
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2rid[i1] == obj2rid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if (
(abs(box1[1] - box2[1]) < 0.11 * avg_font_pts)
or ((abs(box1[3] - box2[3]) < 0.11 * avg_font_pts))
or (
round((box1[1] + box1[3]) / 2) == round((box2[1] + box2[3]) / 2)
)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
rid1 = obj2rid[min_i]
rid2 = obj2rid[max_i]
for obj_iter in rid2obj[rid2]:
rid2obj[rid1].add(obj_iter)
obj2rid[obj_iter] = rid1
rid2obj[rid2] = set()
if prev_clusters == obj2rid:
break
prev_clusters = obj2rid
cid2obj = [set([i]) for i in range(len(boxes))] # initialize clusters
obj2cid = list(
range(len(boxes))
) # default object map to cluster with its own index
prev_clusters = obj2cid
# add the code for merging close text boxes in particular row
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
box1 = b1.bbox
box2 = b2.bbox
if obj2rid[i1] == obj2rid[i2]:
if (
(b1.bbox[0] < b2.bbox[0])
and ((b2.bbox[0] - b1.bbox[2]) <= 2 * char_width)
) or (
(b2.bbox[0] < b1.bbox[0])
and ((b1.bbox[0] - b2.bbox[2]) <= 2 * char_width)
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# vertical alignment code
while True:
for i1, b1 in enumerate(boxes):
for i2, b2 in enumerate(boxes):
if (i1 == i2) or (obj2cid[i1] == obj2cid[i2]):
continue
if b1.bbox[1] < b2.bbox[1]:
box1 = b1.bbox
box2 = b2.bbox
elif b2.bbox[1] < b1.bbox[1]:
box1 = b2.bbox
box2 = b1.bbox
else:
# horizontally aligned
continue
if abs((box2[3] - box2[1]) - (box1[3] - box1[1])) > 0.5 * avg_font_pts:
continue
if (
box2[1] < box1[3]
or (box2[1] - box1[1] < 1.5 * avg_font_pts)
or (box2[3] - box1[3] < 1.5 * avg_font_pts)
): # can probably do better if we find the average space between words
if (
abs(box1[0] - box2[0]) < 3 * char_width
or abs(box1[2] - box2[2]) < 3 * char_width
or (((box1[0] + box1[2]) / 2) == ((box2[0] + box2[2]) / 2))
):
min_i = min(i1, i2)
max_i = max(i1, i2)
cid1 = obj2cid[min_i]
cid2 = obj2cid[max_i]
# move all objects from cluster cid2 to cid1
# reassign cluster ids for all such objects as well
for obj_iter in cid2obj[cid2]:
cid2obj[cid1].add(obj_iter)
obj2cid[obj_iter] = cid1
cid2obj[cid2] = set()
if prev_clusters == obj2cid:
break
prev_clusters = obj2cid
# get cluster spans
cid2span = {}
for cid in range(len(cid2obj)):
cid2span[cid] = {}
cid2span[cid]["min_x"] = sys.maxsize
cid2span[cid]["min_y"] = sys.maxsize
cid2span[cid]["max_x"] = -sys.maxsize - 1
cid2span[cid]["max_y"] = -sys.maxsize - 1
for obj in cid2obj[cid]:
cid2span[cid]["min_x"] = min(cid2span[cid]["min_x"], boxes[obj].bbox[0])
cid2span[cid]["max_x"] = max(cid2span[cid]["max_x"], boxes[obj].bbox[2])
cid2span[cid]["min_y"] = min(cid2span[cid]["min_y"], boxes[obj].bbox[1])
cid2span[cid]["max_y"] = max(cid2span[cid]["max_y"], boxes[obj].bbox[3])
# Don't split up references
references_bbox = []
references_cid = set()
for cid in range(len(cid2obj)):
if len(cid2obj[cid]) == 1:
if boxes[list(cid2obj[cid])[0]].get_text().lower() == "references":
references_bbox = [
cid2span[cid]["min_x"],
cid2span[cid]["min_y"],
cid2span[cid]["max_x"],
cid2span[cid]["max_y"],
]
for cid2 in range(len(cid2obj)):
if (
round(cid2span[cid]["min_x"]) == round(cid2span[cid2]["min_x"])
and cid2span[cid]["max_y"] < cid2span[cid2]["min_y"]
):
references_cid.add(cid2)
cid2span[cid2]["min_x"] = cid2span[cid]["min_x"]
cid2span[cid2]["max_x"] = cid2span[cid]["max_x"]
# get a list of empty cids
empty_cids = [cid for cid in range(len(cid2obj)) if len(cid2obj[cid]) == 0]
empty_idx = 0
# Split paras based on whitespaces - seems to work
if not ref_page_seen:
for cid in range(len(cid2obj)):
if (
len(cid2obj[cid]) > 0
and cid not in empty_cids
and cid not in references_cid
):
cid_maxx = max([boxes[obj].bbox[2] for obj in cid2obj[cid]])
cid_minx = min([boxes[obj].bbox[0] for obj in cid2obj[cid]])
rid_list = set([obj2rid[obj] for obj in cid2obj[cid]])
# Get min_y for each row
rid_miny = {}
for rid in rid_list:
rid_miny[rid] = min(
[
boxes[obj].bbox[1] if obj in cid2obj[cid] else 10000
for obj in rid2obj[rid]
]
)
sorted_rid_miny = sorted(
list(rid_miny.items()), key=operator.itemgetter(1)
)
last_rid = 0
for i in range(len(sorted_rid_miny) - 1):
row1 = sorted_rid_miny[i][0]
row2 = sorted_rid_miny[i + 1][0]
row1_maxx = max(
[
boxes[obj].bbox[2] if obj in cid2obj[cid] else -1
for obj in rid2obj[row1]
]
)
row2_minx = min(
[
boxes[obj].bbox[0] if obj in cid2obj[cid] else 10000
for obj in rid2obj[row2]
]
)
if row1_maxx <= cid_maxx and (row2_minx - char_width) > cid_minx:
# split cluster cid
new_cid_idx = empty_cids[empty_idx]
empty_idx += 1
for i_iter in range(last_rid, i + 1):
obj_list = [
obj
for obj in rid2obj[sorted_rid_miny[i_iter][0]]
if obj2cid[obj] == cid
]
for obj in obj_list:
cid2obj[cid].remove(obj)
cid2obj[new_cid_idx].add(obj)
obj2cid[obj] = new_cid_idx
last_rid = i + 1
clusters = [[boxes[i] for i in cluster] for cluster in filter(bool, cid2obj)]
nodes = [Node(elems) for elems in clusters]
node_indices = [i for i, x in enumerate(cid2obj) if x]
merge_indices = [i for i in range(len(node_indices))]
merge_indices = merge_nodes(nodes, merge_indices)
# Merging Nodes
new_nodes = []
new_node_indices = []
for idx in range(len(merge_indices)):
if merge_indices[idx] == idx:
new_nodes.append(nodes[idx])
new_node_indices.append(node_indices[idx])
# Heuristics for Node type
# ref_nodes = []
new_ref_page_seen = False
if len(references_cid) > 0 or ref_page_seen or references_bbox != []:
new_ref_page_seen = True
ref_seen_in_node = False or ref_page_seen
all_boxes = boxes + boxes_figures
min_y_page = float("Inf")
for idx, box in enumerate(all_boxes):
min_y_page = min(min_y_page, box.bbox[1])
if page_num == -1:
# handle title, authors and abstract here
logger.error("TODO: no way to handle title authors abstract yet.")
else:
# eliminate header, footer, page number
# sort other text and classify as header/paragraph
new_nodes.sort(key=cmp_to_key(xy_reading_order))
for idx, node in enumerate(new_nodes):
if idx < len(new_nodes) - 1:
if (
round(node.y0) == round(min_y_page)
or math.floor(node.y0) == math.floor(min_y_page)
) and node.y1 - node.y0 < 2 * avg_font_pts: # can be header
idx_new = idx + 1
if idx_new < len(new_nodes) - 1:
while idx_new < len(new_nodes) - 1 and (
(round(node.y0) == round(new_nodes[idx_new].y0))
or (
math.floor(node.y0) == math.floor(new_nodes[idx_new].y0)
)
):
idx_new += 1
if idx_new < len(new_nodes) - 1:
if new_nodes[idx_new].y0 - node.y0 > 1.5 * avg_font_pts:
node.type = "Header"
continue
# get captions - first word is fig/figure/table
first_elem = None
for elem in node.elems:
if round(elem.bbox[0]) == round(node.x0) and round(
elem.bbox[1]
) == round(node.y0):
first_elem = elem
break
if first_elem is not None:
text = first_elem.get_text()
if len(text) > 10:
text = first_elem.get_text()[0:10]
if "Table" in text:
node.type = "Table Caption"
continue
if "Fig" in text or "Figure" in text:
node.type = "Figure Caption"
continue
if first_elem.get_text().lower() == "references":
node.type = "Section Header"
ref_seen_in_node = True
continue
if ref_seen_in_node:
node.type = "List"
continue
if references_bbox != [] or ref_seen_in_node:
if (
node.y0 > references_bbox[3]
and node.x0 <= references_bbox[0]
and node.x1 > references_bbox[2]
):
node.type = "List"
continue
if node.y1 - node.y0 <= 2.0 * avg_font_pts: # one lines - section
node.type = "Section Header"
else: # multiple lines - para
node.type = "Paragraph"
# handle references
newer_nodes = []
ref_indices = [False for idx in range(len(new_nodes))]
for idx1, node1 in enumerate(new_nodes):
if ref_indices[idx1]:
continue
if node1.type != "List":
newer_nodes.append(node1)
continue
x0, x1 = node1.x0, node1.x1
newer_node = node1
ref_indices[idx1] = True
for idx2, node2 in enumerate(new_nodes):
if idx1 != idx2:
if node2.type == "List" and not ref_indices[idx2]:
if (node2.x0 <= x0 and node2.x1 >= x0) or (
x0 <= node2.x0 and x1 >= node2.x0
):
newer_node.merge(node2)
ref_indices[idx2] = True
newer_nodes.append(newer_node)
# handle figures
for fig_box in boxes_figures:
node_fig = Node(fig_box)
node_fig.type = "Figure"
newer_nodes.append(node_fig)
tree = {}
tree["section_header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Section Header"
]
tree["header"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Header"
]
tree["paragraph"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Paragraph"
]
# tree["figure"] = [(page_num, page_width, page_height) +
# (node.y0, node.x0, node.y1, node.x1)
# for node in newer_nodes if node.type == "Figure"]
tree["figure_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Figure Caption"
]
tree["table_caption"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "Table Caption"
]
tree["list"] = [
(page_num, page_width, page_height) + (node.y0, node.x0, node.y1, node.x1)
for node in newer_nodes
if node.type == "List"
]
return tree, new_ref_page_seen
def get_figures(
boxes: List[LTFigure],
) -> List[Node]:
# Filter out boxes with zero width or height
boxes = [bbox for bbox in boxes if not bbox.is_empty()]
if len(boxes) == 0:
return []
# Convert LTFigure to Node
nodes: List[Node] = [Node(fig_box) for fig_box in boxes]
# Merge and retain only the most outer nodes
merge_indices = list(range(len(nodes)))
merge_indices = merge_nodes(nodes, merge_indices)
new_nodes = [node for idx, node in enumerate(nodes) if merge_indices[idx] == idx]
return new_nodes
def merge_nodes(nodes: List[Node], merge_indices: List[int]) -> List[int]:
"""Merges overlapping nodes.
:param nodes: Nodes to be merged
:param merge_indices: Indices of nodes
:return: a list of indices, indicating which node is its most outer node.
"""
# Merge inner boxes to the best outer box
# nodes.sort(key=Node.area)
to_be_removed = set()
for inner_idx in range(len(nodes)):
inner = nodes[inner_idx]
outers = []
outers_indices = []
for outer_idx in range(len(nodes)):
outer = nodes[outer_idx]
if outer is inner or outer in to_be_removed:
continue
if intersect(outer.bbox, inner.bbox):
outers.append(outer)
outers_indices.append(outer_idx)
if not outers:
continue
# Best is defined as min L1 distance to outer center
best_outer = min(
outers, key=lambda outer: l1(center(outer.bbox), center(inner.bbox))
)
best_outer_idx = outers_indices[outers.index(best_outer)]
to_be_removed.add(inner)
best_outer.merge(inner)
for cid_iter in range(len(merge_indices)):
if merge_indices[cid_iter] == merge_indices[inner_idx]:
merge_indices[cid_iter] = merge_indices[best_outer_idx]
return merge_indices
def get_most_common_font_pts(mentions, font_stat):
"""
font_stat: Counter object of font sizes
"""
try:
# default min font size of 1 pt in case no font present
most_common_font_size = font_stat.most_common(1)[0][0]
count = 0.01 # avoid division by zero
height_sum = 0.02 # default to pts 2.0
for m in mentions:
if m.font_size == most_common_font_size:
height_sum += m.height
count += 1
return height_sum / count
except IndexError:
logger.info("No text found on page. Default most_common_font_pts to 2.0")
return 2.0
def get_page_width(boxes):
xmin = float("Inf")
xmax = float("-Inf")
for i, b in enumerate(boxes):
xmin = min(xmin, b.bbox[0])
xmax = max(xmax, b.bbox[2])
return xmax - xmin
def get_char_width(boxes: List[LTTextLine]) -> float:
box_len_sum = np.sum([b.bbox[2] - b.bbox[0] for b in boxes])
num_char_sum = np.sum([len(b.get_text()) for b in boxes])
try:
return box_len_sum / num_char_sum
except ZeroDivisionError:
logger.warning("No text found. Defaulting to char_width = 2.0.")
return 2.0
| pdftotree-master | pdftotree/utils/pdf/pdf_parsers.py |
pdftotree-master | pdftotree/utils/pdf/__init__.py |
|
"""
Created on Oct 21, 2015
@author: xiao
"""
from collections import namedtuple
import numpy as np
# bbox indices
x0 = 0
y0 = 1
x1 = 2
y1 = 3
class Segment(namedtuple("Segment", ["e", "vector"])):
__slots__ = ()
@property
def length(self):
return self.vector[x0] if self.vector[x0] else self.vector[y0]
def horizontal(self):
return bool(self.vector[x0])
def vertical(self):
return bool(self.vector[y0])
def __str__(self, *args, **kwargs):
return " ".join(str(x) for x in [self.e, self.vector, self.e.linewidth])
def vectorize(e, tolerance=0.1):
"""
vectorizes the pdf object's bounding box
min_width is the width under which we consider it a line
instead of a big rectangle
"""
tolerance = max(tolerance, e.linewidth)
is_high = e.height > tolerance
is_wide = e.width > tolerance
# if skewed towards a line
if is_wide and not is_high:
return (e.width, 0.0)
if is_high and not is_wide:
return (0.0, e.height)
def aligned(e1, e2):
"""
alignment is determined by two boxes having one exactly the same
attribute, which could mean parallel, perpendicularly forming a
corner etc.
"""
return (
any(close(c1, c2) for c1, c2 in zip(e1.bbox, e2.bbox))
or x_center_aligned(e1, e2)
or y_center_aligned(e1, e2)
)
def x_center_aligned(e1, e2):
return close(e1.x0 + e1.x1, e2.x0 + e2.x1)
def x_aligned(a, b):
return x_center_aligned(a, b) or close(a.x0, b.x0) or close(a.x1, b.x1)
def y_center_aligned(e1, e2):
return close(e1.y0 + e1.y1, e2.y0 + e2.y1)
def close(f1, f2, thres=2.0):
return abs(f1 - f2) < thres
def origin(bbox):
return bbox[:2]
def center(bbox):
return ((bbox[x0] + bbox[x1]) / 2, (bbox[y0] + bbox[y1]) / 2)
def area(bbox):
return (bbox[x1] - bbox[x0]) * (bbox[y1] - bbox[y0])
def l1(c1, c2):
return sum(abs(v1 - v2) for v1, v2 in zip(c1, c2))
def segment_diff(s1, s2):
"""
Returns the sum of absolute difference between
two segments' end points.
Only perfectly aligned segments return 0
"""
return abs(s1[0] - s2[0]) + abs(s1[1] - s2[1])
def bound_bboxes(bboxes):
"""
Finds the minimal bbox that contains all given bboxes
"""
group_x0 = min(map(lambda l: l[x0], bboxes))
group_y0 = min(map(lambda l: l[y0], bboxes))
group_x1 = max(map(lambda l: l[x1], bboxes))
group_y1 = max(map(lambda l: l[y1], bboxes))
return (group_x0, group_y0, group_x1, group_y1)
def bound_elems(elems):
"""
Finds the minimal bbox that contains all given elems
"""
group_x0 = min(map(lambda l: l.x0, elems))
group_y0 = min(map(lambda l: l.y0, elems))
group_x1 = max(map(lambda l: l.x1, elems))
group_y1 = max(map(lambda l: l.y1, elems))
return (group_x0, group_y0, group_x1, group_y1)
def intersect(a, b):
"""
Check if two rectangles intersect
"""
if a[x0] == a[x1] or a[y0] == a[y1]:
return False
if b[x0] == b[x1] or b[y0] == b[y1]:
return False
return a[x0] <= b[x1] and b[x0] <= a[x1] and a[y0] <= b[y1] and b[y0] <= a[y1]
def inside(outer, inner):
return (
inner[x0] >= outer[x0]
and inner[x1] <= outer[x1]
and inner[y0] >= outer[y0]
and inner[y0] <= outer[y1]
)
_stretch_dir = np.array([-1, -1, 1, 1])
def enlarge(bbox, delta):
return np.array(bbox) + delta * _stretch_dir
def reading_order(e1, e2):
"""
A comparator to sort bboxes from top to bottom, left to right
"""
b1 = e1.bbox
b2 = e2.bbox
if round(b1[y0]) == round(b2[y0]) or round(b1[y1]) == round(b2[y1]):
return float_cmp(b1[x0], b2[x0])
return float_cmp(b1[y0], b2[y0])
def xy_reading_order(e1, e2):
"""
A comparator to sort bboxes from left to right, top to bottom
"""
b1 = e1.bbox
b2 = e2.bbox
if round(b1[x0]) == round(b2[x0]):
return float_cmp(b1[y0], b2[y0])
return float_cmp(b1[x0], b2[x0])
def column_order(b1, b2):
"""
A comparator that sorts bboxes first by "columns", where a column is made
up of all bboxes that overlap, then by vertical position in each column.
b1 = [b1.type, b1.top, b1.left, b1.bottom, b1.right]
b2 = [b2.type, b2.top, b2.left, b2.bottom, b2.right]
"""
(top, left, bottom) = (1, 2, 3)
# TODO(senwu): Reimplement the functionality of this comparator to
# detect the number of columns, and sort those in reading order.
# TODO: This is just a simple top to bottom, left to right comparator
# for now.
if round(b1[top]) == round(b2[top]) or round(b1[bottom]) == round(b2[bottom]):
return float_cmp(b1[left], b2[left])
return float_cmp(b1[top], b2[top])
# if((b1[left] >= b2[left] and b1[left] <= b2[right]) or
# (b2[left] >= b1[left] and b2[left] <= b1[right])):
# return float_cmp(b1[top], b2[top])
#
# # Return leftmost columns first
# return float_cmp(b1[left], b2[left])
def float_cmp(f1, f2):
if f1 > f2:
return 1
elif f1 < f2:
return -1
else:
return 0
def merge_intervals(elems, overlap_thres=2.0):
"""
Project in x axis
Sort by start
Go through segments and keep max x1
Return a list of non-overlapping intervals
"""
overlap_thres = max(0.0, overlap_thres)
ordered = sorted(elems, key=lambda e: e.x0)
intervals = []
cur = [-overlap_thres, -overlap_thres]
for e in ordered:
if e.x0 - cur[1] > overlap_thres:
# Check interval validity
if cur[1] > 0.0:
intervals.append(cur)
cur = [e.x0, e.x1]
continue
cur[1] = max(cur[1], e.x1)
intervals.append(cur)
# Freeze the interval to tuples
return map(tuple, intervals)
| pdftotree-master | pdftotree/utils/pdf/vector_utils.py |
"""
Created on Jun 10, 2016
@author: xiao
"""
import numbers
from collections import Counter
from typing import List, Union
from pdfminer.layout import LTComponent, LTCurve, LTFigure, LTLine, LTTextLine
from pdftotree.utils.pdf.grid import Grid
from pdftotree.utils.pdf.layout_utils import is_same_row, is_vline
from pdftotree.utils.pdf.vector_utils import bound_bboxes, bound_elems
def elem_type(elem):
if isinstance(elem, LTLine):
return "line"
if isinstance(elem, LTCurve):
return "curve"
if isinstance(elem, LTTextLine):
return "text"
if isinstance(elem, LTFigure):
return "figure"
return "unkown"
class Node(LTComponent):
"""
A rectangular region in the document representing certain local semantics.
Also holds its data and features.
"""
def __init__(self, elems: Union[List[LTComponent], LTComponent]):
"""
Constructor
"""
if not isinstance(elems, list):
elems = [elems]
self.elems = elems
self.sum_elem_bbox = 0
for elem in elems:
self.sum_elem_bbox = self.sum_elem_bbox + abs(
(elem.bbox[0] - elem.bbox[2]) * (elem.bbox[1] - elem.bbox[3])
)
# # self.sum_elem_bbox = self.sum_elem_bbox + len(elem.get_text())
self.table_area_threshold = 0.7
self.set_bbox(bound_elems(elems))
# self.table_indicator = True
self.type_counts = Counter(map(elem_type, elems))
if elem_type(elems) not in ["figure", "unknown"]:
self.feat_counts = Counter(kv for e in elems for kv in e.feats.items())
else:
self.feat_counts = 0
self.type = "UNK"
def merge(self, other):
self.elems.extend(other.elems)
self.set_bbox(bound_bboxes([self.bbox, other.bbox]))
self.type_counts += other.type_counts
self.feat_counts += other.feat_counts
def area(self):
return self.height * self.width
def is_borderless(self):
# at least this many segments for a table
return self.type_counts["line"] < 6
def is_table(self):
"""
Count the node's number of mention al ignment in both axes to determine
if the node is a table.
"""
if self.type_counts["text"] < 6 or "figure" in self.type_counts:
return False
for e in self.elems:
# Characters written as curve are usually small, discard diagrams here
if elem_type(e) == "curve" and e.height * e.width > 100:
return False
# import re
# space_re = '\\s+'
# ws_arr = []
# whitespace_aligned = False
# for elem in self.elems:
# elem_ws = []
# for m in re.finditer(space_re, elem.get_text()):
# elem_ws.append(m.start())
# # print elem, elem_ws
# if(len(elem_ws)>0):
# ws_arr.append(elem_ws)
# # print ws_arr
# if(len(ws_arr)>0):
# count_arr = max([ws_arr.count(i) for i in ws_arr])
# if(float(count_arr)/len(ws_arr) > 0.75):
# return True
if (
self.sum_elem_bbox / (self.height * self.width)
) > self.table_area_threshold:
return False
has_many_x_align = False
has_many_y_align = False
for k, v in self.feat_counts.items():
font_key = k[0]
if (
v >= 2 and "-" in font_key
): # Text row or column with more than 2 elements
if font_key[-2] == "x":
has_many_x_align = True
if font_key[-2] == "y":
has_many_y_align = True
return has_many_x_align and has_many_y_align
# return 0.5
def get_grid(self):
"""
Standardize the layout of the table into grids
"""
mentions, lines = _split_text_n_lines(self.elems)
# Sort mentions in reading order where y values are snapped to half
# height-sized grid
mentions.sort(key=lambda m: (m.yc_grid, m.xc))
grid = Grid(mentions, lines, self)
return grid
def _find_vbars_for_row(self, plane, row):
align_grid_size = (
sum(m.height for m in row) / 2.0 / len(row)
) # half the avg height
# Find all x_coords of vertical bars crossing this row
ryc = sum(m.yc for m in row) / len(row) # avg yc
query_rect = (self.x0, ryc, self.x1, ryc)
vbars = filter(is_vline, plane.find(query_rect)) # vbars in this row
vbars = [(v.xc, v.xc_grid) for v in vbars]
vbars.sort()
# Group bars less than min cell width apart as one bar
prev_xc = -1
clustered_vbars = []
for xc, xc_grid in vbars:
if prev_xc < 0 or xc - prev_xc > align_grid_size:
clustered_vbars.append(xc_grid) # only keep snapped coord
prev_xc = xc
return clustered_vbars
def __str__(self, *args, **kwargs):
return "\t".join(
r.get_text().encode("utf8", "replace")
for r in self.elems
if isinstance(r, LTTextLine)
)
#############################################
# Static utilities
#############################################
def _split_text_n_lines(elems):
texts = []
lines = []
for e in elems:
if isinstance(e, LTTextLine):
texts.append(e)
elif isinstance(e, LTLine):
lines.append(e)
return texts, lines
def _left_bar(content, default_val):
last_bar = default_val
for _coord, val in content:
if not isinstance(val, LTTextLine):
last_bar = val
yield last_bar
def _right_bar(content, default_val):
return reversed(list(_left_bar(reversed(content), default_val)))
def _find_col_parent_for_row(content):
pass
def _get_cols(row_content):
"""
Counting the number columns based on the content of this row
"""
cols = []
subcell_col = []
prev_bar = None
for _coord, item in row_content:
if isinstance(item, LTTextLine):
subcell_col.append(item)
else: # bar, add column content
# When there is no content, we count a None column
if prev_bar:
bar_ranges = (prev_bar, item)
col_items = subcell_col if subcell_col else [None]
cols.extend([bar_ranges, col_items])
prev_bar = item
subcell_col = []
# Remove extra column before first bar
return cols
def _row_str(row_content):
def strfy(r):
if r is None:
return "None"
if isinstance(r, tuple):
_c, r = r
if isinstance(r, LTTextLine):
return r.get_text().encode("utf8", "replace")
if isinstance(r, numbers.Number):
return "|"
return str(r)
return "\t".join(strfy(r) for r in row_content)
def _get_rows(mentions):
curr_row = []
rows = []
prev = None
for m in mentions:
if not is_same_row(prev, m):
if curr_row:
rows.append(curr_row)
curr_row = []
curr_row.append(m)
prev = m
# Finish up last row
if curr_row:
rows.append(curr_row)
return rows
def _one_contains_other(s1, s2):
"""
Whether one set contains the other
"""
return min(len(s1), len(s2)) == len(s1 & s2)
| pdftotree-master | pdftotree/utils/pdf/node.py |
pdftotree-master | pdftotree/ml/__init__.py |
|
import string
from builtins import str
from collections import defaultdict
from typing import Any, List
from pdfminer.layout import LTComponent, LTTextLine
from pdftotree.utils.bbox_utils import isContained
from pdftotree.utils.pdf.pdf_parsers import (
cluster_vertically_aligned_boxes,
get_char_width,
get_most_common_font_pts,
get_page_width,
)
from pdftotree.utils.pdf.vector_utils import intersect
# ******************* Table Coverage Features *************************************
def get_area_coverage(bbox):
b = bbox[-4:]
return ((b[2] - b[0]) * (b[3] - b[1])) / float(bbox[1] * bbox[2])
def get_width_coverage(bbox):
b = bbox[-4:]
return (b[3] - b[1]) / float(bbox[1])
def get_height_coverage(bbox):
b = bbox[-4:]
return (b[2] - b[0]) / float(bbox[2])
# ******************* Text Coverage Features *************************************
def get_mentions_within_bbox(
bbox: List[Any], mentions: List[LTComponent]
) -> List[LTComponent]:
"""Get textlines within bbox.
:param bbox: a list containing (top, left, bottom, right) in the last 4 digits
:param mentions: a list of textlines
:return: a list of textlines within the given bbox
"""
mentions_within_bbox = []
for mention in mentions:
# Compute the centroid
xc = int((mention.x0 + mention.x1) / 2)
yc = int((mention.y0 + mention.y1) / 2)
bbox_mention = (
yc,
xc,
yc,
xc,
)
# See if the centroid is contained by the bbox.
if isContained(bbox_mention, bbox[-4:]):
mentions_within_bbox += [mention]
return mentions_within_bbox
def get_text_sparsity(bbox, mentions):
"""
computes text area coverage
:param mentions:
:return:
"""
b = bbox[-4:]
bbox_area = (b[2] - b[0]) * (b[3] - b[1])
text_area = 0
for m in mentions:
text_area += (m.x1 - m.x0) * (m.y1 - m.y0)
try:
return 1.0 * text_area / bbox_area
except ZeroDivisionError:
return 0.0
def symbols_and_numbers_density(bbox, mentions):
symbols = set(string.punctuation)
num_symbols = sum([1 for elem in mentions if elem.get_text() in symbols])
num_numbers = 0
for elem in mentions:
num_numbers += sum([c.isdigit() for c in elem.get_text()])
return [num_symbols, num_numbers]
# ******************* Lines Features *************************************
def get_lines_within_bbox(bbox, segments):
lines_within_bbox = []
for line in segments:
bbox_line = (int(line.y0), int(line.x0), int(line.y1), int(line.x1))
if isContained(bbox_line, bbox[-4:]):
lines_within_bbox += [line]
return lines_within_bbox
def get_lines_features(bboxes, elems):
features = []
for bbox in bboxes:
mentions = get_mentions_within_bbox(bbox, elems.mentions)
segments = get_lines_within_bbox(bbox, elems.segments)
feat = [get_area_coverage(bbox)]
feat += [get_height_coverage(bbox)]
feat += [get_width_coverage(bbox)]
feat += [get_text_sparsity(bbox, mentions)]
feat += symbols_and_numbers_density(bbox, mentions)
feat += [len(segments)]
features += [feat]
return features
# ******************* Alignments Features *************************************
def get_alignment_features(line_bboxes, elems, font_stat):
alignment_features = []
for line_bbox in line_bboxes:
line_bbox_ordered = (line_bbox[4], line_bbox[3], line_bbox[6], line_bbox[5])
boxes = [
elem for elem in elems.mentions if intersect(line_bbox_ordered, elem.bbox)
]
boxes_segments = [
elem for elem in elems.segments if intersect(line_bbox_ordered, elem.bbox)
]
boxes_figures = [
elem for elem in elems.figures if intersect(line_bbox_ordered, elem.bbox)
]
boxes_curves = [
elem for elem in elems.curves if intersect(line_bbox_ordered, elem.bbox)
]
page_width = elems.layout.width
# page_height = elems.layout.height
avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat)
width = get_page_width(boxes + boxes_segments + boxes_figures + boxes_curves)
if len(boxes) == 0:
alignment_features += [[0] * 17]
continue
char_width = get_char_width(boxes)
grid_size = avg_font_pts / 2.0
for i, m in enumerate(boxes + elems.figures):
m.id = i
m.feats = defaultdict(bool)
prefix = ""
if isinstance(m, LTTextLine) and m.font_name:
prefix = m.font_name + "-" + str(m.font_size) + "-"
m.xc = (m.x0 + m.x1) / 2.0
m.yc = (m.y0 + m.y1) / 2.0
m.feats[prefix + "x0"] = m.x0_grid = m.x0 // grid_size
m.feats[prefix + "x1"] = m.x1_grid = m.x1 // grid_size
m.feats[prefix + "xc"] = m.xc_grid = m.xc // grid_size
m.feats[prefix + "yc"] = m.yc_grid = m.yc // grid_size
nodes, nodes_features = cluster_vertically_aligned_boxes(
boxes,
elems.layout.bbox,
avg_font_pts,
width,
char_width,
boxes_segments,
boxes_curves,
boxes_figures,
page_width,
True,
)
if len(nodes_features) == 0:
alignment_features += [[0] * 17]
else:
alignment_features += [nodes_features]
return alignment_features
| pdftotree-master | pdftotree/ml/features.py |
import logging
import numpy as np
from wand.color import Color
from wand.drawing import Drawing
from pdftotree.ml.features import get_alignment_features, get_lines_features
from pdftotree.TreeExtract import TreeExtractor
from pdftotree.utils.bbox_utils import compute_iou
from pdftotree.utils.display_utils import pdf_to_img
logger = logging.getLogger(__name__)
class TableExtractorML(TreeExtractor):
"""
Object to extract tables regions from pdf files
"""
def __init__(self, pdf_file):
super().__init__(pdf_file)
self.lines_bboxes = []
self.alignments_bboxes = []
self.intersection_bboxes = []
self.bboxes = []
self.candidates = []
self.features = []
def get_candidates(self):
if len(self.elems) == 0:
self.parse()
if self.scanned:
return []
for page_num in list(self.elems.keys()):
page_boxes, page_features = self.get_candidates_and_features_page_num(
page_num
)
self.candidates += page_boxes
self.features += list(page_features)
return self.candidates
def get_candidates_and_features(self):
self.parse()
if self.scanned:
logger.info("{} is scanned.".format(self.pdf_file))
return [], [], self.scanned
for page_num in list(self.elems.keys()):
page_boxes, page_features = self.get_candidates_and_features_page_num(
page_num
)
self.candidates += page_boxes
self.features += list(page_features)
return self.candidates, self.features, self.scanned
def get_candidates_and_features_page_num(self, page_num):
elems = self.elems[page_num]
font_stat = self.font_stats[page_num]
lines_bboxes = self.get_candidates_lines(page_num, elems)
boxes = []
# Filter out bboxes that are zero width or height
for bbox in lines_bboxes:
if bbox[5] - bbox[3] > 0 and bbox[6] - bbox[4] > 0:
boxes += [bbox]
alignments_bboxes, alignment_features = self.get_candidates_alignments(
page_num, elems
)
logger.info(
"Page Num: {}, Line bboxes: {}, Alignment bboxes: {}".format(
page_num, len(lines_bboxes), len(alignments_bboxes)
)
)
alignment_features += get_alignment_features(lines_bboxes, elems, font_stat)
# Filter out bboxes that are zero width or height
for bbox in alignments_bboxes:
if bbox[5] - bbox[3] > 0 and bbox[6] - bbox[4] > 0:
boxes += [bbox]
# boxes = alignments_bboxes + lines_bboxes
if len(boxes) == 0:
return [], []
lines_features = get_lines_features(boxes, elems)
features = np.concatenate(
(np.array(alignment_features), np.array(lines_features)), axis=1
)
return boxes, features
def get_labels(self, gt_tables):
"""
:param gt_tables: dict, keys are page number and values are list of
tables bbox within that page
:return:
"""
labels = np.zeros(len(self.candidates))
for i, candidate in enumerate(self.candidates):
page_num = candidate[0]
try:
tables = gt_tables[page_num]
for gt_table in tables:
page_width, page_height, y0, x0, y1, x1 = gt_table
w_ratio = float(candidate[1]) / page_width
h_ratio = float(candidate[2]) / page_height
rescaled_gt_table = (
y0 * h_ratio,
x0 * w_ratio,
y1 * h_ratio,
x1 * w_ratio,
)
iou = compute_iou(candidate[-4:], rescaled_gt_table)
if iou > self.iou_thresh:
# candidate region is a table
labels[i] = 1
except KeyError:
# any of the candidates is a true table, all zero labels
pass
return labels
def display_bounding_boxes(self, page_num, bboxes, alternate_colors=True):
elems = self.elems[page_num]
page_width, page_height = int(elems.layout.width), int(elems.layout.height)
img = pdf_to_img(self.pdf_file, page_num, page_width, page_height)
draw = Drawing()
draw.fill_color = Color("rgba(0, 0, 0, 0)")
color = Color("blue")
draw.stroke_color = color
for block in bboxes:
top, left, bottom, right = block[-4:]
draw.stroke_color = Color(
"rgba({},{},{}, 1)".format(
str(np.random.randint(255)),
str(np.random.randint(255)),
str(np.random.randint(255)),
)
)
draw.rectangle(
left=float(left),
top=float(top),
right=float(right),
bottom=float(bottom),
)
draw(img)
return img
| pdftotree-master | pdftotree/ml/TableExtractML.py |
pdftotree-master | pdftotree/visual/__init__.py |
|
import os
from typing import Tuple
import keras.backend as K
import numpy as np
import selectivesearch
from keras.preprocessing.image import img_to_array, load_img
from numpy import ndarray
from wand.color import Color
from wand.image import Image
def predict_heatmap(
pdf_path, page_num, model, img_dim=448, img_dir="tmp/img"
) -> Tuple[ndarray, ndarray]:
"""
Return an image corresponding to the page of the pdf
documents saved at pdf_path. If the image is not found in img_dir this
function creates it and saves it in img_dir.
:param pdf_path: path to the pdf document.
:param page_num: page number to create image from in the pdf file.
:return:
"""
if not os.path.isdir(img_dir):
print("\nCreating image folder at {}".format(img_dir))
os.makedirs(img_dir)
pdf_name = os.path.splitext(os.path.basename(pdf_path))[0]
# TODO: add hashing function to make sure name is unique
# TODO: add parallelization
img_path = os.path.join(img_dir, pdf_name + "-{}.png".format(page_num))
if not os.path.isfile(img_path):
# create image for a page in the pdf document and save it in img_dir
save_image(pdf_path, img_path, page_num)
image = load_img(img_path, grayscale=True, target_size=(img_dim, img_dim))
image = img_to_array(image, data_format=K.image_data_format())
image = (
image.reshape((img_dim, img_dim, 1))
.repeat(3, axis=2)
.reshape((1, img_dim, img_dim, 3))
)
return (
image.astype(np.uint8).reshape((img_dim, img_dim, 3)),
model.predict(image).reshape((img_dim, img_dim)),
)
def save_image(pdf_path, img_path, page_num):
"""
Creates images for a page of the input pdf document and saves it
at img_path.
:param pdf_path: path to pdf to create images for.
:param img_path: path where to save the images.
:param page_num: page number to create image from in the pdf file.
:return:
"""
pdf_img = Image(filename="{}[{}]".format(pdf_path, page_num))
with pdf_img.convert("png") as converted:
# Set white background.
converted.background_color = Color("white")
converted.alpha_channel = "remove"
converted.save(filename=img_path)
def do_intersect(bb1, bb2):
"""
Helper function that returns True if two bounding boxes overlap.
"""
if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]:
return False
if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]:
return False
return True
def get_bboxes(
img,
mask,
nb_boxes=100,
score_thresh=0.5,
iou_thresh=0.2,
prop_size=0.09,
prop_scale=1.2,
):
"""
Uses selective search to generate candidate bounding boxes and keeps the
ones that have the largest iou with the predicted mask.
:param img: original image
:param mask: predicted mask
:param nb_boxes: max number of candidate bounding boxes
:param score_thresh: scre threshold to consider prediction is True
:param iou_thresh: iou threshold to consider a candidate is a correct region
:param prop_size: selective search parameter
:param prop_scale: selective search parameter, larger prop_scale favorizes
large boudning boxes
:return: list of bounding boxes and ious, boudning boxes are tuples (left,
top, width, height)
"""
min_size = int(img.shape[0] * prop_size * img.shape[1] * prop_size)
scale = int(img.shape[0] * prop_scale)
# TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes
img_lbl, regions = selectivesearch.selective_search(
img, scale=scale, sigma=0.8, min_size=min_size
)
rect = [None] * nb_boxes
max_iou = -1 * np.ones(nb_boxes)
mask = 1.0 * (mask > score_thresh)
# compute iou for each candidate bounding box and save top nb_bboxes
for region in regions:
left, top, width, height = region["rect"]
intersection = mask[top : top + height, left : left + width].sum()
union = height * width + mask.sum() - intersection
iou = intersection / union
idx = np.argmin(max_iou)
if iou > max_iou[idx]:
max_iou[idx] = iou
rect[idx] = region["rect"]
# Exclusive maximum
remove_indexes = max_iou == -1
bboxes = []
filtered_ious = []
for idx in np.argsort([-x for x in max_iou]):
if remove_indexes[idx]:
# no more tables bounding boxes
break
if len(bboxes) == 0:
# first candidate table bounding box
if max_iou[idx] > iou_thresh:
bboxes += [rect[idx]]
filtered_ious += [max_iou[idx]]
else:
# No tables in this document
break
else:
# If it doensn't intersect with any other bounding box
if not any(
[do_intersect(rect[idx], bboxes[k]) for k in range(len(bboxes))]
):
if max_iou[idx] > iou_thresh:
bboxes += [rect[idx]]
filtered_ious += [max_iou[idx]]
return bboxes, filtered_ious
| pdftotree-master | pdftotree/visual/visual_utils.py |
import logging
import os
from subprocess import PIPE, Popen
from typing import Optional
from bs4 import BeautifulSoup
from bs4.element import Tag
from shapely.geometry import box
import pdftotree
# Adapted from https://github.com/ocropus/hocr-tools/blob/v1.3.0/hocr-check
def get_prop(node: Tag, name: str) -> Optional[str]:
title = node.get("title")
if not title:
return None
props = title.split(";")
for prop in props:
(key, args) = prop.split(None, 1)
if key == name:
return args
return None
# Adapted from https://github.com/ocropus/hocr-tools/blob/v1.3.0/hocr-check
def get_bbox(node: Tag) -> box:
bbox = get_prop(node, "bbox")
if not bbox:
return None
return box(*[int(x) for x in bbox.split()])
def test_heuristic_completion():
"""Simply test that parse runs to completion without errors."""
output = pdftotree.parse("tests/input/paleo.pdf")
assert output is not None
def test_cli_should_output_at_given_path(tmp_path):
"""Test if CLI produces an HTML at a given path."""
html_path = os.path.join(tmp_path, "paleo.html")
pdftotree.parse("tests/input/paleo.pdf", html_path)
assert os.path.isfile(html_path)
def test_output_should_conform_to_hocr(tmp_path):
"""Test if an exported file conform to hOCR."""
html_path = os.path.join(tmp_path, "md.html")
pdftotree.parse("tests/input/md.pdf", html_path)
with Popen(["hocr-check", html_path], stderr=PIPE) as proc:
assert all([line.decode("utf-8").startswith("ok") for line in proc.stderr])
# Check detailed things that hocr-check does not check.
with open(html_path) as fp:
soup = BeautifulSoup(fp, "lxml")
capabilities = soup.find("meta", attrs={"name": "ocr-capabilities"})
# Check the list as hocr-check only checks the existence of "ocr-capabilities".
assert capabilities["content"].split() == [
"ocr_page",
"ocr_table",
"ocrx_block",
"ocrx_line",
"ocrx_word",
]
def test_no_out_of_order(caplog):
"""Test if no out of order warning is issued."""
pdftotree.parse("tests/input/md.pdf")
assert "Out of order" not in caplog.text
pdftotree.parse("tests/input/paleo.pdf")
assert "Out of order" not in caplog.text
def test_tabula_warning_suppressed(caplog):
"""Test if tabula warnings are suppressed."""
# Warnings suppressed by default
pdftotree.parse("tests/input/112823.pdf")
assert "org.apache.pdfbox" not in caplog.text
# Not to suppress warnings
log = logging.getLogger("pdftotree")
log.setLevel(logging.DEBUG)
pdftotree.parse("tests/input/112823.pdf")
assert "org.apache.pdfbox" in caplog.text
def test_visualize_output(tmp_path):
"""Test if an output can be visualzied."""
html_path = os.path.join(tmp_path, "md.html")
pdftotree.parse("tests/input/md.pdf", html_path, visualize=True)
def test_looks_scanned():
"""Test on a PDF that looks like a scanned one but not.
CaseStudy_ACS.pdf contains a transparent image overlaying the entire page.
This overlaying transparent image fools TreeExtractor into thinking it is scanned.
"""
output = pdftotree.parse("tests/input/CaseStudy_ACS.pdf")
soup = BeautifulSoup(output, "lxml")
assert len(soup.find_all(class_="ocrx_word")) >= 1000
assert len(soup.find_all("figure")) == 3
# Check if words are extracted even though they are overlapped by a figure (#77).
page = soup.find(class_="ocr_page") # checking only 1st page is good enough.
words = [get_bbox(word) for word in page.find_all(class_="ocrx_word")]
figure = get_bbox(page.find("figure"))
assert all([figure.contains(word) for word in words])
def test_LTChar_under_LTFigure(tmp_path):
"""Test on a PDF where LTChar(s) are children of LTFigure."""
html_path = os.path.join(tmp_path, "paleo.html")
pdftotree.parse("tests/input/CentralSemiconductorCorp_2N4013.pdf", html_path)
with open(html_path) as f:
soup = BeautifulSoup(f, "lxml")
line: Tag = soup.find(class_="ocrx_line")
assert [word.text for word in line.find_all(class_="ocrx_word")] == [
"Small",
"Signal",
"Transistors",
]
# The table in the 1st page should contain 18 columns
page = soup.find(class_="ocr_page")
table = page.find(class_="ocr_table")
assert len(table.find("tr").find_all("td")) == 18
assert get_bbox(table) is not None
# Find a cell containing one or more of ocrx_word and check if it has bbox
cell = table.find(class_="ocrx_word").parent.parent
assert get_bbox(cell) is not None
with Popen(["hocr-check", html_path], stderr=PIPE) as proc:
assert all([line.decode("utf-8").startswith("ok") for line in proc.stderr])
def test_overflowerror_should_not_happen():
"""Test if OverflowError does not happen (#104)."""
output = pdftotree.parse(
"tests/input/UACompanionSpecificationforIEC611313Model_p41.pdf"
)
assert output is not None
def test_ml_completion():
"""Simply test that ML-based parse runs without errors."""
output = pdftotree.parse(
"tests/input/paleo.pdf",
model_type="ml",
model_path="tests/input/paleo_model.pkl",
)
assert output is not None
def test_vision_completion():
"""Simply test that vision-based parse runs without errors."""
output = pdftotree.parse(
"tests/input/paleo.pdf",
model_type="vision",
model_path="tests/input/paleo_visual_model.h5",
)
soup = BeautifulSoup(output, "lxml")
assert len(soup.find_all("table")) == 2
| pdftotree-master | tests/test_basic.py |
"""Test table area detection."""
from bs4 import BeautifulSoup
import pdftotree
from pdftotree.core import load_model
from pdftotree.visual.visual_utils import predict_heatmap
def test_vision_model():
"""Check if the vision model runs and returns results in expected format."""
pdf_file = "tests/input/paleo.pdf"
model_path = "tests/input/paleo_visual_model.h5"
model = load_model("vision", model_path)
page_num = 0
image, pred = predict_heatmap(
pdf_file, page_num, model
) # index start at 0 with wand
assert image.shape == (448, 448, 3)
assert pred.shape == (448, 448)
# TODO: add test_ml_model and test_heuristic_model
def test_cell_values_not_missing():
output = pdftotree.parse("tests/input/md.pdf")
soup = BeautifulSoup(output, "lxml")
table = soup.find(class_="ocr_table")
assert list(table.find_all("tr")[3].stripped_strings) == [
"Erin",
"lamb",
"madras",
"HOT",
"$5",
]
| pdftotree-master | tests/test_table_detection.py |
pdftotree-master | tests/__init__.py |
|
"""Test figures."""
from bs4 import BeautifulSoup
import pdftotree
def test_figures():
output = pdftotree.parse("tests/input/md.pdf")
soup = BeautifulSoup(output, "lxml")
imgs = soup.find_all("img")
assert len(imgs) == 1
output = pdftotree.parse("tests/input/CaseStudy_ACS.pdf")
soup = BeautifulSoup(output, "lxml")
imgs = soup.find_all("img")
# 3 jpg, 2 bmp, 5 total images
assert len(imgs) == 5
assert len([img for img in imgs if img["src"].startswith("data:image/jpeg")]) == 3
assert len([img for img in imgs if img["src"].startswith("data:image/bmp")]) == 2
| pdftotree-master | tests/test_figures.py |
"""Test extracted text."""
import re
from bs4 import BeautifulSoup
import pdftotree
def test_text_is_escaped():
"""Test if text is properly escaped."""
output = pdftotree.parse("tests/input/md.pdf")
soup = BeautifulSoup(output, "lxml")
words = soup.find_all(class_="ocrx_word")
# Use str() instead of .text as the latter gives unescaped text.
m = re.search(r">(.+?)<", str(words[66]))
assert m[1] == "'bar';."
output = pdftotree.parse("tests/input/112823.pdf")
soup = BeautifulSoup(output, "lxml")
words = soup.find_all(class_="ocrx_word")
m = re.search(r">(.+?)<", str(words[152]))
assert m[1] == "&"
| pdftotree-master | tests/test_text.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import pylab
import torch
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn.cluster import KMeans
from utils import mic_acc_cal, shot_acc
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, default='')
parser.add_argument('--type', type=str, default='test')
args = parser.parse_args()
# ----------------------------------------------------------------------------------
root = args.root
train_file = 'trainfeat_all.pkl'
test_file = '{}feat_all.pkl'.format(args.type)
# load data
with open(os.path.join(root, train_file), 'rb') as f:
trainset = pickle.load(f)
if args.type == 'train':
testset = trainset
else:
with open(os.path.join(root, test_file), 'rb') as f:
testset = pickle.load(f)
testsize = len(testset['feats'])
batch_size = 512
# Calculate centriods
centroids = []
c_labels = []
for i in np.unique(trainset['labels']):
c_labels.append(i)
centroids.append(np.mean(trainset['feats'][trainset['labels']==i], axis=0))
centroids = torch.Tensor(np.stack(centroids))
c_labels = np.array(c_labels)
# ----------------------------------------------------------------------------------
# load weight
x = torch.load(os.path.join(root, 'final_model_checkpoint.pth'), map_location=torch.device('cpu'))
weights = x['state_dict_best']['classifier']['module.fc.weight'].cpu()
bias = x['state_dict_best']['classifier']['module.fc.bias'].cpu()
def cos_similarity(A, B):
feat_dim = A.size(1)
normB = torch.norm(B, 2, 1, keepdim=True)
B = B / normB
AB = torch.mm(A, B.t())
return AB
def linear_classifier(inputs, weights, bias):
return torch.addmm(bias, inputs, weights.t())
def logits2preds(logits, labels):
_, nns = logits.max(dim=1)
preds = np.array([labels[i] for i in nns])
return preds
def preds2accs(preds, testset, trainset):
top1_all = mic_acc_cal(preds, testset['labels'])
many, median, low, cls_accs = shot_acc(preds, testset['labels'], trainset['labels'], acc_per_cls=True)
top1_all = np.mean(cls_accs)
print("{:.2f} \t {:.2f} \t {:.2f} \t {:.2f}".format(
many * 100, median*100, low*100, top1_all*100))
def dotproduct_similarity(A, B):
feat_dim = A.size(1)
AB = torch.mm(A, B.t())
return AB
def forward(weights):
total_logits = []
for i in range(testsize // batch_size + 1):
# if i%10 == 0:
# print('{}/{}'.format(i, testsize // batch_size + 1))
feat = testset['feats'][batch_size*i:min(batch_size*(i+1), testsize)]
feat = torch.Tensor(feat)
logits = dotproduct_similarity(feat, weights)
total_logits.append(logits)
total_logits = torch.cat(total_logits)
return total_logits
def pnorm(weights, p):
normB = torch.norm(weights, 2, 1)
ws = weights.clone()
for i in range(weights.size(0)):
ws[i] = ws[i] / torch.pow(normB[i], p)
return ws
for p in np.linspace(0,2,21):
ws = pnorm(weights, p)
logits = forward(ws)
preds = logits2preds(logits, c_labels)
preds2accs(preds, testset, trainset)
| classifier-balancing-main | tau_norm.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import yaml
import csv
import h5py
class Logger(object):
def __init__(self, logdir):
self.logdir = logdir
if not os.path.isdir(logdir):
os.makedirs(logdir)
self.cfg_file = os.path.join(self.logdir, 'cfg.yaml')
self.acc_file = os.path.join(self.logdir, 'acc.csv')
self.loss_file = os.path.join(self.logdir, 'loss.csv')
self.ws_file = os.path.join(self.logdir, 'ws.h5')
self.acc_keys = None
self.loss_keys = None
self.logging_ws = False
def log_cfg(self, cfg):
print('===> Saving cfg parameters to: ', self.cfg_file)
with open(self.cfg_file, 'w') as f:
yaml.dump(cfg, f)
def log_acc(self, accs):
if self.acc_keys is None:
self.acc_keys = [k for k in accs.keys()]
with open(self.acc_file, 'w') as f:
writer = csv.DictWriter(f, fieldnames=self.acc_keys)
writer.writeheader()
writer.writerow(accs)
else:
with open(self.acc_file, 'a') as f:
writer = csv.DictWriter(f, fieldnames=self.acc_keys)
writer.writerow(accs)
def log_loss(self, losses):
# valid_losses = {k: v for k, v in losses.items() if v is not None}
valid_losses = losses
if self.loss_keys is None:
self.loss_keys = [k for k in valid_losses.keys()]
with open(self.loss_file, 'w') as f:
writer = csv.DictWriter(f, fieldnames=self.loss_keys)
writer.writeheader()
writer.writerow(valid_losses)
else:
with open(self.loss_file, 'a') as f:
writer = csv.DictWriter(f, fieldnames=self.loss_keys)
writer.writerow(valid_losses)
def log_ws(self, e, ws):
mode = 'a' if self.logging_ws else 'w'
self.logging_ws = True
key = 'Epoch{:02d}'.format(e)
with h5py.File(self.ws_file, mode) as f:
g = f.create_group(key)
for k, v in ws.items():
g.create_dataset(k, data=v)
| classifier-balancing-main | logger.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import f1_score
import torch.nn.functional as F
import importlib
import pdb
def source_import(file_path):
"""This function imports python module directly from source code using importlib"""
spec = importlib.util.spec_from_file_location('', file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def batch_show(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(20,20))
plt.imshow(inp)
if title is not None:
plt.title(title)
def print_write(print_str, log_file):
print(*print_str)
if log_file is None:
return
with open(log_file, 'a') as f:
print(*print_str, file=f)
def init_weights(model, weights_path, caffe=False, classifier=False):
"""Initialize weights"""
print('Pretrained %s weights path: %s' % ('classifier' if classifier else 'feature model',
weights_path))
weights = torch.load(weights_path)
if not classifier:
if caffe:
weights = {k: weights[k] if k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
weights = weights['state_dict_best']['feat_model']
weights = {k: weights['module.' + k] if 'module.' + k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
weights = weights['state_dict_best']['classifier']
weights = {k: weights['module.fc.' + k] if 'module.fc.' + k in weights else model.state_dict()[k]
for k in model.state_dict()}
model.load_state_dict(weights)
return model
def shot_acc (preds, labels, train_data, many_shot_thr=100, low_shot_thr=20, acc_per_cls=False):
if isinstance(train_data, np.ndarray):
training_labels = np.array(train_data).astype(int)
else:
training_labels = np.array(train_data.dataset.labels).astype(int)
if isinstance(preds, torch.Tensor):
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
elif isinstance(preds, np.ndarray):
pass
else:
raise TypeError('Type ({}) of preds not supported'.format(type(preds)))
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(len(labels[labels == l]))
class_correct.append((preds[labels == l] == labels[labels == l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] > many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] < low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
if len(many_shot) == 0:
many_shot.append(0)
if len(median_shot) == 0:
median_shot.append(0)
if len(low_shot) == 0:
low_shot.append(0)
if acc_per_cls:
class_accs = [c / cnt for c, cnt in zip(class_correct, test_class_count)]
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot), class_accs
else:
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)
def weighted_shot_acc (preds, labels, ws, train_data, many_shot_thr=100, low_shot_thr=20):
training_labels = np.array(train_data.dataset.labels).astype(int)
if isinstance(preds, torch.Tensor):
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
elif isinstance(preds, np.ndarray):
pass
else:
raise TypeError('Type ({}) of preds not supported'.format(type(preds)))
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(ws[labels==l].sum())
class_correct.append(((preds[labels==l] == labels[labels==l]) * ws[labels==l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] > many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] < low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)
def F_measure(preds, labels, openset=False, theta=None):
if openset:
# f1 score for openset evaluation
true_pos = 0.
false_pos = 0.
false_neg = 0.
for i in range(len(labels)):
true_pos += 1 if preds[i] == labels[i] and labels[i] != -1 else 0
false_pos += 1 if preds[i] != labels[i] and labels[i] != -1 and preds[i] != -1 else 0
false_neg += 1 if preds[i] != labels[i] and labels[i] == -1 else 0
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
return 2 * ((precision * recall) / (precision + recall + 1e-12))
else:
# Regular f1 score
return f1_score(labels.detach().cpu().numpy(), preds.detach().cpu().numpy(), average='macro')
def mic_acc_cal(preds, labels):
if isinstance(labels, tuple):
assert len(labels) == 3
targets_a, targets_b, lam = labels
acc_mic_top1 = (lam * preds.eq(targets_a.data).cpu().sum().float() \
+ (1 - lam) * preds.eq(targets_b.data).cpu().sum().float()) / len(preds)
else:
acc_mic_top1 = (preds == labels).sum().item() / len(labels)
return acc_mic_top1
def weighted_mic_acc_cal(preds, labels, ws):
acc_mic_top1 = ws[preds == labels].sum() / ws.sum()
return acc_mic_top1
def class_count (data):
labels = np.array(data.dataset.labels)
class_data_num = []
for l in np.unique(labels):
class_data_num.append(len(labels[labels == l]))
return class_data_num
# def dataset_dist (in_loader):
# """Example, dataset_dist(data['train'][0])"""
# label_list = np.array([x[1] for x in in_loader.dataset.samples])
# total_num = len(data_list)
# distribution = []
# for l in np.unique(label_list):
# distribution.append((l, len(label_list[label_list == l])/total_num))
# return distribution
# New Added
def torch2numpy(x):
if isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
elif isinstance(x, (list, tuple)):
return tuple([torch2numpy(xi) for xi in x])
else:
return x
def logits2score(logits, labels):
scores = F.softmax(logits, dim=1)
score = scores.gather(1, labels.view(-1, 1))
score = score.squeeze().cpu().numpy()
return score
def logits2entropy(logits):
scores = F.softmax(logits, dim=1)
scores = scores.cpu().numpy() + 1e-30
ent = -scores * np.log(scores)
ent = np.sum(ent, 1)
return ent
def logits2CE(logits, labels):
scores = F.softmax(logits, dim=1)
score = scores.gather(1, labels.view(-1, 1))
score = score.squeeze().cpu().numpy() + 1e-30
ce = -np.log(score)
return ce
def get_priority(ptype, logits, labels):
if ptype == 'score':
ws = 1 - logits2score(logits, labels)
elif ptype == 'entropy':
ws = logits2entropy(logits)
elif ptype == 'CE':
ws = logits2CE(logits, labels)
return ws
def get_value(oldv, newv):
if newv is not None:
return newv
else:
return oldv
| classifier-balancing-main | utils.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import os
import copy
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from utils import *
from logger import Logger
import time
import numpy as np
import warnings
import pdb
class model ():
def __init__(self, config, data, test=False):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.config = config
self.training_opt = self.config['training_opt']
self.memory = self.config['memory']
self.data = data
self.test_mode = test
self.num_gpus = torch.cuda.device_count()
self.do_shuffle = config['shuffle'] if 'shuffle' in config else False
# Setup logger
self.logger = Logger(self.training_opt['log_dir'])
# Initialize model
self.init_models()
# Load pre-trained model parameters
if 'model_dir' in self.config and self.config['model_dir'] is not None:
self.load_model(self.config['model_dir'])
# Under training mode, initialize training steps, optimizers, schedulers, criterions, and centroids
if not self.test_mode:
# If using steps for training, we need to calculate training steps
# for each epoch based on actual number of training data instead of
# oversampled data number
print('Using steps for training.')
self.training_data_num = len(self.data['train'].dataset)
self.epoch_steps = int(self.training_data_num \
/ self.training_opt['batch_size'])
# Initialize model optimizer and scheduler
print('Initializing model optimizer.')
self.scheduler_params = self.training_opt['scheduler_params']
self.model_optimizer, \
self.model_optimizer_scheduler = self.init_optimizers(self.model_optim_params_list)
self.init_criterions()
if self.memory['init_centroids']:
self.criterions['FeatureLoss'].centroids.data = \
self.centroids_cal(self.data['train_plain'])
# Set up log file
self.log_file = os.path.join(self.training_opt['log_dir'], 'log.txt')
if os.path.isfile(self.log_file):
os.remove(self.log_file)
self.logger.log_cfg(self.config)
else:
if 'KNNClassifier' in self.config['networks']['classifier']['def_file']:
self.load_model()
if not self.networks['classifier'].initialized:
cfeats = self.get_knncentroids()
print('===> Saving features to %s' %
os.path.join(self.training_opt['log_dir'], 'cfeats.pkl'))
with open(os.path.join(self.training_opt['log_dir'], 'cfeats.pkl'), 'wb') as f:
pickle.dump(cfeats, f)
self.networks['classifier'].update(cfeats)
self.log_file = None
def init_models(self, optimizer=True):
networks_defs = self.config['networks']
self.networks = {}
self.model_optim_params_list = []
print("Using", torch.cuda.device_count(), "GPUs.")
for key, val in networks_defs.items():
# Networks
def_file = val['def_file']
# model_args = list(val['params'].values())
# model_args.append(self.test_mode)
model_args = val['params']
model_args.update({'test': self.test_mode})
self.networks[key] = source_import(def_file).create_model(**model_args)
if 'KNNClassifier' in type(self.networks[key]).__name__:
# Put the KNN classifier on one single GPU
self.networks[key] = self.networks[key].cuda()
else:
self.networks[key] = nn.DataParallel(self.networks[key]).cuda()
if 'fix' in val and val['fix']:
print('Freezing feature weights except for self attention weights (if exist).')
for param_name, param in self.networks[key].named_parameters():
# Freeze all parameters except self attention parameters
if 'selfatt' not in param_name and 'fc' not in param_name:
param.requires_grad = False
# print(' | ', param_name, param.requires_grad)
# Optimizer list
optim_params = val['optim_params']
self.model_optim_params_list.append({'params': self.networks[key].parameters(),
'lr': optim_params['lr'],
'momentum': optim_params['momentum'],
'weight_decay': optim_params['weight_decay']})
def init_criterions(self):
criterion_defs = self.config['criterions']
self.criterions = {}
self.criterion_weights = {}
for key, val in criterion_defs.items():
def_file = val['def_file']
loss_args = list(val['loss_params'].values())
self.criterions[key] = source_import(def_file).create_loss(*loss_args).cuda()
self.criterion_weights[key] = val['weight']
if val['optim_params']:
print('Initializing criterion optimizer.')
optim_params = val['optim_params']
optim_params = [{'params': self.criterions[key].parameters(),
'lr': optim_params['lr'],
'momentum': optim_params['momentum'],
'weight_decay': optim_params['weight_decay']}]
# Initialize criterion optimizer and scheduler
self.criterion_optimizer, \
self.criterion_optimizer_scheduler = self.init_optimizers(optim_params)
else:
self.criterion_optimizer = None
def init_optimizers(self, optim_params):
optimizer = optim.SGD(optim_params)
if self.config['coslr']:
print("===> Using coslr eta_min={}".format(self.config['endlr']))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, self.training_opt['num_epochs'], eta_min=self.config['endlr'])
else:
scheduler = optim.lr_scheduler.StepLR(optimizer,
step_size=self.scheduler_params['step_size'],
gamma=self.scheduler_params['gamma'])
return optimizer, scheduler
def batch_forward (self, inputs, labels=None, centroids=False, feature_ext=False, phase='train'):
'''
This is a general single batch running function.
'''
# Calculate Features
self.features, self.feature_maps = self.networks['feat_model'](inputs)
# If not just extracting features, calculate logits
if not feature_ext:
# During training, calculate centroids if needed to
if phase != 'test':
if centroids and 'FeatureLoss' in self.criterions.keys():
self.centroids = self.criterions['FeatureLoss'].centroids.data
torch.cat([self.centroids] * self.num_gpus)
else:
self.centroids = None
if self.centroids is not None:
centroids_ = torch.cat([self.centroids] * self.num_gpus)
else:
centroids_ = self.centroids
# Calculate logits with classifier
self.logits, self.direct_memory_feature = self.networks['classifier'](self.features, centroids_)
def batch_backward(self):
# Zero out optimizer gradients
self.model_optimizer.zero_grad()
if self.criterion_optimizer:
self.criterion_optimizer.zero_grad()
# Back-propagation from loss outputs
self.loss.backward()
# Step optimizers
self.model_optimizer.step()
if self.criterion_optimizer:
self.criterion_optimizer.step()
def batch_loss(self, labels):
self.loss = 0
# First, apply performance loss
if 'PerformanceLoss' in self.criterions.keys():
self.loss_perf = self.criterions['PerformanceLoss'](self.logits, labels)
self.loss_perf *= self.criterion_weights['PerformanceLoss']
self.loss += self.loss_perf
# Apply loss on features if set up
if 'FeatureLoss' in self.criterions.keys():
self.loss_feat = self.criterions['FeatureLoss'](self.features, labels)
self.loss_feat = self.loss_feat * self.criterion_weights['FeatureLoss']
# Add feature loss to total loss
self.loss += self.loss_feat
def shuffle_batch(self, x, y):
index = torch.randperm(x.size(0))
x = x[index]
y = y[index]
return x, y
def train(self):
# When training the network
print_str = ['Phase: train']
print_write(print_str, self.log_file)
time.sleep(0.25)
print_write(['Do shuffle??? --- ', self.do_shuffle], self.log_file)
# Initialize best model
best_model_weights = {}
best_model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
best_model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
best_acc = 0.0
best_epoch = 0
# best_centroids = self.centroids
end_epoch = self.training_opt['num_epochs']
# Loop over epochs
for epoch in range(1, end_epoch + 1):
for model in self.networks.values():
model.train()
torch.cuda.empty_cache()
# Set model modes and set scheduler
# In training, step optimizer scheduler and set model to train()
self.model_optimizer_scheduler.step()
if self.criterion_optimizer:
self.criterion_optimizer_scheduler.step()
# Iterate over dataset
total_preds = []
total_labels = []
for step, (inputs, labels, indexes) in enumerate(self.data['train']):
# Break when step equal to epoch step
if step == self.epoch_steps:
break
if self.do_shuffle:
inputs, labels = self.shuffle_batch(inputs, labels)
inputs, labels = inputs.cuda(), labels.cuda()
# If on training phase, enable gradients
with torch.set_grad_enabled(True):
# If training, forward with loss, and no top 5 accuracy calculation
self.batch_forward(inputs, labels,
centroids=self.memory['centroids'],
phase='train')
self.batch_loss(labels)
self.batch_backward()
# Tracking predictions
_, preds = torch.max(self.logits, 1)
total_preds.append(torch2numpy(preds))
total_labels.append(torch2numpy(labels))
# Output minibatch training results
if step % self.training_opt['display_step'] == 0:
minibatch_loss_feat = self.loss_feat.item() \
if 'FeatureLoss' in self.criterions.keys() else None
minibatch_loss_perf = self.loss_perf.item() \
if 'PerformanceLoss' in self.criterions else None
minibatch_loss_total = self.loss.item()
minibatch_acc = mic_acc_cal(preds, labels)
print_str = ['Epoch: [%d/%d]'
% (epoch, self.training_opt['num_epochs']),
'Step: %5d'
% (step),
'Minibatch_loss_feature: %.3f'
% (minibatch_loss_feat) if minibatch_loss_feat else '',
'Minibatch_loss_performance: %.3f'
% (minibatch_loss_perf) if minibatch_loss_perf else '',
'Minibatch_accuracy_micro: %.3f'
% (minibatch_acc)]
print_write(print_str, self.log_file)
loss_info = {
'Epoch': epoch,
'Step': step,
'Total': minibatch_loss_total,
'CE': minibatch_loss_perf,
'feat': minibatch_loss_feat
}
self.logger.log_loss(loss_info)
# Update priority weights if using PrioritizedSampler
# if self.training_opt['sampler'] and \
# self.training_opt['sampler']['type'] == 'PrioritizedSampler':
if hasattr(self.data['train'].sampler, 'update_weights'):
if hasattr(self.data['train'].sampler, 'ptype'):
ptype = self.data['train'].sampler.ptype
else:
ptype = 'score'
ws = get_priority(ptype, self.logits.detach(), labels)
# ws = logits2score(self.logits.detach(), labels)
inlist = [indexes.cpu().numpy(), ws]
if self.training_opt['sampler']['type'] == 'ClassPrioritySampler':
inlist.append(labels.cpu().numpy())
self.data['train'].sampler.update_weights(*inlist)
# self.data['train'].sampler.update_weights(indexes.cpu().numpy(), ws)
if hasattr(self.data['train'].sampler, 'get_weights'):
self.logger.log_ws(epoch, self.data['train'].sampler.get_weights())
if hasattr(self.data['train'].sampler, 'reset_weights'):
self.data['train'].sampler.reset_weights(epoch)
# After every epoch, validation
rsls = {'epoch': epoch}
rsls_train = self.eval_with_preds(total_preds, total_labels)
rsls_eval = self.eval(phase='val')
rsls.update(rsls_train)
rsls.update(rsls_eval)
# Reset class weights for sampling if pri_mode is valid
if hasattr(self.data['train'].sampler, 'reset_priority'):
ws = get_priority(self.data['train'].sampler.ptype,
self.total_logits.detach(),
self.total_labels)
self.data['train'].sampler.reset_priority(ws, self.total_labels.cpu().numpy())
# Log results
self.logger.log_acc(rsls)
# Under validation, the best model need to be updated
if self.eval_acc_mic_top1 > best_acc:
best_epoch = epoch
best_acc = self.eval_acc_mic_top1
best_centroids = self.centroids
best_model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
best_model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
print('===> Saving checkpoint')
self.save_latest(epoch)
print()
print('Training Complete.')
print_str = ['Best validation accuracy is %.3f at epoch %d' % (best_acc, best_epoch)]
print_write(print_str, self.log_file)
# Save the best model and best centroids if calculated
self.save_model(epoch, best_epoch, best_model_weights, best_acc, centroids=best_centroids)
# Test on the test set
self.reset_model(best_model_weights)
self.eval('test' if 'test' in self.data else 'val')
print('Done')
def eval_with_preds(self, preds, labels):
# Count the number of examples
n_total = sum([len(p) for p in preds])
# Split the examples into normal and mixup
normal_preds, normal_labels = [], []
mixup_preds, mixup_labels1, mixup_labels2, mixup_ws = [], [], [], []
for p, l in zip(preds, labels):
if isinstance(l, tuple):
mixup_preds.append(p)
mixup_labels1.append(l[0])
mixup_labels2.append(l[1])
mixup_ws.append(l[2] * np.ones_like(l[0]))
else:
normal_preds.append(p)
normal_labels.append(l)
# Calculate normal prediction accuracy
rsl = {'train_all':0., 'train_many':0., 'train_median':0., 'train_low': 0.}
if len(normal_preds) > 0:
normal_preds, normal_labels = list(map(np.concatenate, [normal_preds, normal_labels]))
n_top1 = mic_acc_cal(normal_preds, normal_labels)
n_top1_many, \
n_top1_median, \
n_top1_low, = shot_acc(normal_preds, normal_labels, self.data['train'])
rsl['train_all'] += len(normal_preds) / n_total * n_top1
rsl['train_many'] += len(normal_preds) / n_total * n_top1_many
rsl['train_median'] += len(normal_preds) / n_total * n_top1_median
rsl['train_low'] += len(normal_preds) / n_total * n_top1_low
# Calculate mixup prediction accuracy
if len(mixup_preds) > 0:
mixup_preds, mixup_labels, mixup_ws = \
list(map(np.concatenate, [mixup_preds*2, mixup_labels1+mixup_labels2, mixup_ws]))
mixup_ws = np.concatenate([mixup_ws, 1-mixup_ws])
n_top1 = weighted_mic_acc_cal(mixup_preds, mixup_labels, mixup_ws)
n_top1_many, \
n_top1_median, \
n_top1_low, = weighted_shot_acc(mixup_preds, mixup_labels, mixup_ws, self.data['train'])
rsl['train_all'] += len(mixup_preds) / 2 / n_total * n_top1
rsl['train_many'] += len(mixup_preds) / 2 / n_total * n_top1_many
rsl['train_median'] += len(mixup_preds) / 2 / n_total * n_top1_median
rsl['train_low'] += len(mixup_preds) / 2 / n_total * n_top1_low
# Top-1 accuracy and additional string
print_str = ['\n Training acc Top1: %.3f \n' % (rsl['train_all']),
'Many_top1: %.3f' % (rsl['train_many']),
'Median_top1: %.3f' % (rsl['train_median']),
'Low_top1: %.3f' % (rsl['train_low']),
'\n']
print_write(print_str, self.log_file)
return rsl
def eval(self, phase='val', openset=False, save_feat=False):
print_str = ['Phase: %s' % (phase)]
print_write(print_str, self.log_file)
time.sleep(0.25)
if openset:
print('Under openset test mode. Open threshold is %.1f'
% self.training_opt['open_threshold'])
torch.cuda.empty_cache()
# In validation or testing mode, set model to eval() and initialize running loss/correct
for model in self.networks.values():
model.eval()
self.total_logits = torch.empty((0, self.training_opt['num_classes'])).cuda()
self.total_labels = torch.empty(0, dtype=torch.long).cuda()
self.total_paths = np.empty(0)
get_feat_only = save_feat
feats_all, labels_all, idxs_all, logits_all = [], [], [], []
featmaps_all = []
# Iterate over dataset
for inputs, labels, paths in tqdm(self.data[phase]):
inputs, labels = inputs.cuda(), labels.cuda()
# If on training phase, enable gradients
with torch.set_grad_enabled(False):
# In validation or testing
self.batch_forward(inputs, labels,
centroids=self.memory['centroids'],
phase=phase)
if not get_feat_only:
self.total_logits = torch.cat((self.total_logits, self.logits))
self.total_labels = torch.cat((self.total_labels, labels))
self.total_paths = np.concatenate((self.total_paths, paths))
if get_feat_only:
logits_all.append(self.logits.cpu().numpy())
feats_all.append(self.features.cpu().numpy())
labels_all.append(labels.cpu().numpy())
idxs_all.append(paths.numpy())
if get_feat_only:
typ = 'feat'
if phase == 'train_plain':
name = 'train{}_all.pkl'.format(typ)
elif phase == 'test':
name = 'test{}_all.pkl'.format(typ)
elif phase == 'val':
name = 'val{}_all.pkl'.format(typ)
fname = os.path.join(self.training_opt['log_dir'], name)
print('===> Saving feats to ' + fname)
with open(fname, 'wb') as f:
pickle.dump({
'feats': np.concatenate(feats_all),
'labels': np.concatenate(labels_all),
'idxs': np.concatenate(idxs_all),
},
f, protocol=4)
return
probs, preds = F.softmax(self.total_logits.detach(), dim=1).max(dim=1)
if openset:
preds[probs < self.training_opt['open_threshold']] = -1
self.openset_acc = mic_acc_cal(preds[self.total_labels == -1],
self.total_labels[self.total_labels == -1])
print('\n\nOpenset Accuracy: %.3f' % self.openset_acc)
# Calculate the overall accuracy and F measurement
self.eval_acc_mic_top1= mic_acc_cal(preds[self.total_labels != -1],
self.total_labels[self.total_labels != -1])
self.eval_f_measure = F_measure(preds, self.total_labels, openset=openset,
theta=self.training_opt['open_threshold'])
self.many_acc_top1, \
self.median_acc_top1, \
self.low_acc_top1, \
self.cls_accs = shot_acc(preds[self.total_labels != -1],
self.total_labels[self.total_labels != -1],
self.data['train'],
acc_per_cls=True)
# Top-1 accuracy and additional string
print_str = ['\n\n',
'Phase: %s'
% (phase),
'\n\n',
'Evaluation_accuracy_micro_top1: %.3f'
% (self.eval_acc_mic_top1),
'\n',
'Averaged F-measure: %.3f'
% (self.eval_f_measure),
'\n',
'Many_shot_accuracy_top1: %.3f'
% (self.many_acc_top1),
'Median_shot_accuracy_top1: %.3f'
% (self.median_acc_top1),
'Low_shot_accuracy_top1: %.3f'
% (self.low_acc_top1),
'\n']
rsl = {phase + '_all': self.eval_acc_mic_top1,
phase + '_many': self.many_acc_top1,
phase + '_median': self.median_acc_top1,
phase + '_low': self.low_acc_top1,
phase + '_fscore': self.eval_f_measure}
if phase == 'val':
print_write(print_str, self.log_file)
else:
acc_str = ["{:.1f} \t {:.1f} \t {:.1f} \t {:.1f}".format(
self.many_acc_top1 * 100,
self.median_acc_top1 * 100,
self.low_acc_top1 * 100,
self.eval_acc_mic_top1 * 100)]
if self.log_file is not None and os.path.exists(self.log_file):
print_write(print_str, self.log_file)
print_write(acc_str, self.log_file)
else:
print(*print_str)
print(*acc_str)
if phase == 'test':
with open(os.path.join(self.training_opt['log_dir'], 'cls_accs.pkl'), 'wb') as f:
pickle.dump(self.cls_accs, f)
return rsl
def centroids_cal(self, data, save_all=False):
centroids = torch.zeros(self.training_opt['num_classes'],
self.training_opt['feature_dim']).cuda()
print('Calculating centroids.')
torch.cuda.empty_cache()
for model in self.networks.values():
model.eval()
feats_all, labels_all, idxs_all = [], [], []
# Calculate initial centroids only on training data.
with torch.set_grad_enabled(False):
for inputs, labels, idxs in tqdm(data):
inputs, labels = inputs.cuda(), labels.cuda()
# Calculate Features of each training data
self.batch_forward(inputs, feature_ext=True)
# Add all calculated features to center tensor
for i in range(len(labels)):
label = labels[i]
centroids[label] += self.features[i]
# Save features if requried
if save_all:
feats_all.append(self.features.cpu().numpy())
labels_all.append(labels.cpu().numpy())
idxs_all.append(idxs.numpy())
if save_all:
fname = os.path.join(self.training_opt['log_dir'], 'feats_all.pkl')
with open(fname, 'wb') as f:
pickle.dump({'feats': np.concatenate(feats_all),
'labels': np.concatenate(labels_all),
'idxs': np.concatenate(idxs_all)},
f)
# Average summed features with class count
centroids /= torch.tensor(class_count(data)).float().unsqueeze(1).cuda()
return centroids
def get_knncentroids(self):
datakey = 'train_plain'
assert datakey in self.data
print('===> Calculating KNN centroids.')
torch.cuda.empty_cache()
for model in self.networks.values():
model.eval()
feats_all, labels_all = [], []
# Calculate initial centroids only on training data.
with torch.set_grad_enabled(False):
for inputs, labels, idxs in tqdm(self.data[datakey]):
inputs, labels = inputs.cuda(), labels.cuda()
# Calculate Features of each training data
self.batch_forward(inputs, feature_ext=True)
feats_all.append(self.features.cpu().numpy())
labels_all.append(labels.cpu().numpy())
feats = np.concatenate(feats_all)
labels = np.concatenate(labels_all)
featmean = feats.mean(axis=0)
def get_centroids(feats_, labels_):
centroids = []
for i in np.unique(labels_):
centroids.append(np.mean(feats_[labels_==i], axis=0))
return np.stack(centroids)
# Get unnormalized centorids
un_centers = get_centroids(feats, labels)
# Get l2n centorids
l2n_feats = torch.Tensor(feats.copy())
norm_l2n = torch.norm(l2n_feats, 2, 1, keepdim=True)
l2n_feats = l2n_feats / norm_l2n
l2n_centers = get_centroids(l2n_feats.numpy(), labels)
# Get cl2n centorids
cl2n_feats = torch.Tensor(feats.copy())
cl2n_feats = cl2n_feats - torch.Tensor(featmean)
norm_cl2n = torch.norm(cl2n_feats, 2, 1, keepdim=True)
cl2n_feats = cl2n_feats / norm_cl2n
cl2n_centers = get_centroids(cl2n_feats.numpy(), labels)
return {'mean': featmean,
'uncs': un_centers,
'l2ncs': l2n_centers,
'cl2ncs': cl2n_centers}
def reset_model(self, model_state):
for key, model in self.networks.items():
weights = model_state[key]
weights = {k: weights[k] for k in weights if k in model.state_dict()}
model.load_state_dict(weights)
def load_model(self, model_dir=None):
model_dir = self.training_opt['log_dir'] if model_dir is None else model_dir
if not model_dir.endswith('.pth'):
model_dir = os.path.join(model_dir, 'final_model_checkpoint.pth')
print('Validation on the best model.')
print('Loading model from %s' % (model_dir))
checkpoint = torch.load(model_dir)
model_state = checkpoint['state_dict_best']
self.centroids = checkpoint['centroids'] if 'centroids' in checkpoint else None
for key, model in self.networks.items():
# if not self.test_mode and key == 'classifier':
if not self.test_mode and \
'DotProductClassifier' in self.config['networks'][key]['def_file']:
# Skip classifier initialization
print('Skiping classifier initialization')
continue
weights = model_state[key]
weights = {k: weights[k] for k in weights if k in model.state_dict()}
x = model.state_dict()
x.update(weights)
model.load_state_dict(x)
def save_latest(self, epoch):
model_weights = {}
model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
model_states = {
'epoch': epoch,
'state_dict': model_weights
}
model_dir = os.path.join(self.training_opt['log_dir'],
'latest_model_checkpoint.pth')
torch.save(model_states, model_dir)
def save_model(self, epoch, best_epoch, best_model_weights, best_acc, centroids=None):
model_states = {'epoch': epoch,
'best_epoch': best_epoch,
'state_dict_best': best_model_weights,
'best_acc': best_acc,
'centroids': centroids}
model_dir = os.path.join(self.training_opt['log_dir'],
'final_model_checkpoint.pth')
torch.save(model_states, model_dir)
def output_logits(self, openset=False):
filename = os.path.join(self.training_opt['log_dir'],
'logits_%s'%('open' if openset else 'close'))
print("Saving total logits to: %s.npz" % filename)
np.savez(filename,
logits=self.total_logits.detach().cpu().numpy(),
labels=self.total_labels.detach().cpu().numpy(),
paths=self.total_paths)
| classifier-balancing-main | run_networks.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import os
import argparse
import pprint
from data import dataloader
from run_networks import model
import warnings
import yaml
from utils import source_import, get_value
data_root = {'ImageNet': '/datasets01_101/imagenet_full_size/061417',
'Places': '/datasets01_101/Places365/041019',
'iNaturalist18': '/checkpoint/bykang/iNaturalist18'}
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default=None, type=str)
parser.add_argument('--test', default=False, action='store_true')
parser.add_argument('--batch_size', type=int, default=None)
parser.add_argument('--test_open', default=False, action='store_true')
parser.add_argument('--output_logits', default=False)
parser.add_argument('--model_dir', type=str, default=None)
parser.add_argument('--save_feat', type=str, default='')
# KNN testing parameters
parser.add_argument('--knn', default=False, action='store_true')
parser.add_argument('--feat_type', type=str, default='cl2n')
parser.add_argument('--dist_type', type=str, default='l2')
# Learnable tau
parser.add_argument('--val_as_train', default=False, action='store_true')
args = parser.parse_args()
def update(config, args):
# Change parameters
config['model_dir'] = get_value(config['model_dir'], args.model_dir)
config['training_opt']['batch_size'] = \
get_value(config['training_opt']['batch_size'], args.batch_size)
# Testing with KNN
if args.knn and args.test:
training_opt = config['training_opt']
classifier_param = {
'feat_dim': training_opt['feature_dim'],
'num_classes': training_opt['num_classes'],
'feat_type': args.feat_type,
'dist_type': args.dist_type,
'log_dir': training_opt['log_dir']}
classifier = {
'def_file': './models/KNNClassifier.py',
'params': classifier_param,
'optim_params': config['networks']['classifier']['optim_params']}
config['networks']['classifier'] = classifier
return config
# ============================================================================
# LOAD CONFIGURATIONS
with open(args.cfg) as f:
config = yaml.load(f)
config = update(config, args)
test_mode = args.test
test_open = args.test_open
if test_open:
test_mode = True
output_logits = args.output_logits
training_opt = config['training_opt']
relatin_opt = config['memory']
dataset = training_opt['dataset']
if not os.path.isdir(training_opt['log_dir']):
os.makedirs(training_opt['log_dir'])
print('Loading dataset from: %s' % data_root[dataset.rstrip('_LT')])
pprint.pprint(config)
def split2phase(split):
if split == 'train' and args.val_as_train:
return 'train_val'
else:
return split
if not test_mode:
sampler_defs = training_opt['sampler']
if sampler_defs:
if sampler_defs['type'] == 'ClassAwareSampler':
sampler_dic = {
'sampler': source_import(sampler_defs['def_file']).get_sampler(),
'params': {'num_samples_cls': sampler_defs['num_samples_cls']}
}
elif sampler_defs['type'] in ['MixedPrioritizedSampler',
'ClassPrioritySampler']:
sampler_dic = {
'sampler': source_import(sampler_defs['def_file']).get_sampler(),
'params': {k: v for k, v in sampler_defs.items() \
if k not in ['type', 'def_file']}
}
else:
sampler_dic = None
splits = ['train', 'train_plain', 'val']
if dataset not in ['iNaturalist18', 'ImageNet']:
splits.append('test')
data = {x: dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')],
dataset=dataset, phase=split2phase(x),
batch_size=training_opt['batch_size'],
sampler_dic=sampler_dic,
num_workers=training_opt['num_workers'])
for x in splits}
training_model = model(config, data, test=False)
training_model.train()
else:
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data",
UserWarning)
print('Under testing phase, we load training data simply to calculate \
training data number for each class.')
if 'iNaturalist' in training_opt['dataset']:
splits = ['train', 'val']
test_split = 'val'
else:
splits = ['train', 'val', 'test']
test_split = 'test'
if 'ImageNet' == training_opt['dataset']:
splits = ['train', 'val']
test_split = 'val'
if args.knn or True:
splits.append('train_plain')
data = {x: dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')],
dataset=dataset, phase=x,
batch_size=training_opt['batch_size'],
sampler_dic=None,
test_open=test_open,
num_workers=training_opt['num_workers'],
shuffle=False)
for x in splits}
training_model = model(config, data, test=True)
# training_model.load_model()
training_model.load_model(args.model_dir)
if args.save_feat in ['train_plain', 'val', 'test']:
saveit = True
test_split = args.save_feat
else:
saveit = False
training_model.eval(phase=test_split, openset=test_open, save_feat=saveit)
if output_logits:
training_model.output_logits(openset=test_open)
print('ALL COMPLETED.')
| classifier-balancing-main | main.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch
from torch import nn
from torch.nn import functional as F
import pdb
class ModulatedAttLayer(nn.Module):
def __init__(self, in_channels, reduction = 2, mode='embedded_gaussian'):
super(ModulatedAttLayer, self).__init__()
self.in_channels = in_channels
self.reduction = reduction
self.inter_channels = in_channels // reduction
self.mode = mode
assert mode in ['embedded_gaussian']
self.g = nn.Conv2d(self.in_channels, self.inter_channels, kernel_size = 1)
self.theta = nn.Conv2d(self.in_channels, self.inter_channels, kernel_size = 1)
self.phi = nn.Conv2d(self.in_channels, self.inter_channels, kernel_size = 1)
self.conv_mask = nn.Conv2d(self.inter_channels, self.in_channels, kernel_size = 1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc_spatial = nn.Linear(7 * 7 * self.in_channels, 7 * 7)
self.init_weights()
def init_weights(self):
msra_list = [self.g, self.theta, self.phi]
for m in msra_list:
nn.init.kaiming_normal_(m.weight.data)
m.bias.data.zero_()
self.conv_mask.weight.data.zero_()
def embedded_gaussian(self, x):
# embedded_gaussian cal self-attention, which may not strong enough
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
map_t_p = torch.matmul(theta_x, phi_x)
mask_t_p = F.softmax(map_t_p, dim=-1)
map_ = torch.matmul(mask_t_p, g_x)
map_ = map_.permute(0, 2, 1).contiguous()
map_ = map_.view(batch_size, self.inter_channels, x.size(2), x.size(3))
mask = self.conv_mask(map_)
x_flatten = x.view(-1, 7 * 7 * self.in_channels)
spatial_att = self.fc_spatial(x_flatten)
spatial_att = spatial_att.softmax(dim=1)
spatial_att = spatial_att.view(-1, 7, 7).unsqueeze(1)
spatial_att = spatial_att.expand(-1, self.in_channels, -1, -1)
final = spatial_att * mask + x
return final, [x, spatial_att, mask]
def forward(self, x):
if self.mode == 'embedded_gaussian':
output, feature_maps = self.embedded_gaussian(x)
else:
raise NotImplemented("The code has not been implemented.")
return output, feature_maps
| classifier-balancing-main | layers/ModulatedAttLayer.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch.nn as nn
def create_loss ():
print('Loading Softmax Loss.')
return nn.CrossEntropyLoss()
| classifier-balancing-main | loss/SoftmaxLoss.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch
import torch.nn as nn
from torch.autograd.function import Function
import pdb
class DiscCentroidsLoss(nn.Module):
def __init__(self, num_classes, feat_dim, size_average=True):
super(DiscCentroidsLoss, self).__init__()
self.num_classes = num_classes
self.centroids = nn.Parameter(torch.randn(num_classes, feat_dim))
self.disccentroidslossfunc = DiscCentroidsLossFunc.apply
self.feat_dim = feat_dim
self.size_average = size_average
def forward(self, feat, label):
batch_size = feat.size(0)
# calculate attracting loss
feat = feat.view(batch_size, -1)
# To check the dim of centroids and features
if feat.size(1) != self.feat_dim:
raise ValueError("Center's dim: {0} should be equal to input feature's \
dim: {1}".format(self.feat_dim,feat.size(1)))
batch_size_tensor = feat.new_empty(1).fill_(batch_size if self.size_average else 1)
loss_attract = self.disccentroidslossfunc(feat, label, self.centroids, batch_size_tensor).squeeze()
# calculate repelling loss
distmat = torch.pow(feat, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centroids, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, feat, self.centroids.t())
classes = torch.arange(self.num_classes).long().cuda()
labels_expand = label.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels_expand.eq(classes.expand(batch_size, self.num_classes))
distmat_neg = distmat
distmat_neg[mask] = 0.0
# margin = 50.0
margin = 10.0
loss_repel = torch.clamp(margin - distmat_neg.sum() / (batch_size * self.num_classes), 0.0, 1e6)
# loss = loss_attract + 0.05 * loss_repel
loss = loss_attract + 0.01 * loss_repel
return loss
class DiscCentroidsLossFunc(Function):
@staticmethod
def forward(ctx, feature, label, centroids, batch_size):
ctx.save_for_backward(feature, label, centroids, batch_size)
centroids_batch = centroids.index_select(0, label.long())
return (feature - centroids_batch).pow(2).sum() / 2.0 / batch_size
@staticmethod
def backward(ctx, grad_output):
feature, label, centroids, batch_size = ctx.saved_tensors
centroids_batch = centroids.index_select(0, label.long())
diff = centroids_batch - feature
# init every iteration
counts = centroids.new_ones(centroids.size(0))
ones = centroids.new_ones(label.size(0))
grad_centroids = centroids.new_zeros(centroids.size())
counts = counts.scatter_add_(0, label.long(), ones)
grad_centroids.scatter_add_(0, label.unsqueeze(1).expand(feature.size()).long(), diff)
grad_centroids = grad_centroids/counts.view(-1, 1)
return - grad_output * diff / batch_size, None, grad_centroids / batch_size, None
def create_loss (feat_dim=512, num_classes=1000):
print('Loading Discriminative Centroids Loss.')
return DiscCentroidsLoss(num_classes, feat_dim)
| classifier-balancing-main | loss/DiscCentroidsLoss.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch
import torch.nn as nn
from models.CosNormClassifier import CosNorm_Classifier
from utils import *
from os import path
import pdb
class MetaEmbedding_Classifier(nn.Module):
def __init__(self, feat_dim=2048, num_classes=1000):
super(MetaEmbedding_Classifier, self).__init__()
self.num_classes = num_classes
self.fc_hallucinator = nn.Linear(feat_dim, num_classes)
self.fc_selector = nn.Linear(feat_dim, feat_dim)
self.cosnorm_classifier = CosNorm_Classifier(feat_dim, num_classes)
def forward(self, x, centroids, *args):
# storing direct feature
direct_feature = x
batch_size = x.size(0)
feat_size = x.size(1)
# set up visual memory
x_expand = x.unsqueeze(1).expand(-1, self.num_classes, -1)
centroids_expand = centroids.unsqueeze(0).expand(batch_size, -1, -1)
keys_memory = centroids
# computing reachability
dist_cur = torch.norm(x_expand - centroids_expand, 2, 2)
values_nn, labels_nn = torch.sort(dist_cur, 1)
scale = 10.0
reachability = (scale / values_nn[:, 0]).unsqueeze(1).expand(-1, feat_size)
# computing memory feature by querying and associating visual memory
values_memory = self.fc_hallucinator(x)
values_memory = values_memory.softmax(dim=1)
memory_feature = torch.matmul(values_memory, keys_memory)
# computing concept selector
concept_selector = self.fc_selector(x)
concept_selector = concept_selector.tanh()
x = reachability * (direct_feature + concept_selector * memory_feature)
# storing infused feature
infused_feature = concept_selector * memory_feature
logits = self.cosnorm_classifier(x)
return logits, [direct_feature, infused_feature]
def create_model(feat_dim=2048, num_classes=1000, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Meta Embedding Classifier.')
clf = MetaEmbedding_Classifier(feat_dim, num_classes)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 Classifier Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
clf.fc_hallucinator = init_weights(model=clf.fc_hallucinator,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'),
classifier=True)
else:
print('Random initialized classifier weights.')
return clf
| classifier-balancing-main | models/MetaEmbeddingClassifier.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 101 Feature Model.')
resnet101 = ResNet(Bottleneck, [3, 4, 23, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 101 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet101 = init_weights(model=resnet101,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet101
| classifier-balancing-main | models/ResNet101Feature.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, caffe=False, log_dir=None, test=False):
print('Loading Scratch ResNet 152 Feature Model.')
resnet152 = ResNet(Bottleneck, [3, 8, 36, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
assert(caffe != stage1_weights)
if caffe:
print('Loading Caffe Pretrained ResNet 152 Weights.')
resnet152 = init_weights(model=resnet152,
weights_path='./logs/caffe_resnet152.pth',
caffe=True)
elif stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 152 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet152 = init_weights(model=resnet152,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet152
| classifier-balancing-main | models/ResNet152FeatureCaffe.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch.nn as nn
import torch.nn.functional as F
from layers.ModulatedAttLayer import ModulatedAttLayer
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, is_last=False):
super(Bottleneck, self).__init__()
width = int(planes * (base_width / 64.)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,
groups=groups, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.is_last = is_last
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNext(nn.Module):
def __init__(self, block, layers, groups=1, width_per_group=64,
use_modulatedatt=False, use_fc=False, dropout=None,
use_glore=False, use_gem=False):
self.inplanes = 64
super(ResNext, self).__init__()
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.use_fc = use_fc
self.use_dropout = True if dropout else False
if self.use_fc:
print('Using fc.')
self.fc_add = nn.Linear(512*block.expansion, 512)
if self.use_dropout:
print('Using dropout.')
self.dropout = nn.Dropout(p=dropout)
self.use_modulatedatt = use_modulatedatt
if self.use_modulatedatt:
print('Using self attention.')
self.modulatedatt = ModulatedAttLayer(in_channels=512*block.expansion)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, is_last=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,
groups=self.groups, base_width=self.base_width))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
groups=self.groups, base_width=self.base_width,
is_last=(is_last and i == blocks-1)))
return nn.Sequential(*layers)
def forward(self, x, *args):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.use_modulatedatt:
x, feature_maps = self.modulatedatt(x)
else:
feature_maps = None
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.use_fc:
x = F.relu(self.fc_add(x))
if self.use_dropout:
x = self.dropout(x)
return x, feature_maps | classifier-balancing-main | models/ResNextFeature.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 50 Feature Model.')
resnet50 = ResNet(Bottleneck, [3, 4, 6, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 10 Weights.' % dataset)
if log_dir is not None:
# subdir = log_dir.strip('/').split('/')[-1]
# subdir = subdir.replace('stage2', 'stage1')
# weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), subdir)
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet50 = init_weights(model=resnet50,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet50
| classifier-balancing-main | models/ResNet50Feature.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import math
import torch.nn as nn
import torch.nn.functional as F
from layers.ModulatedAttLayer import ModulatedAttLayer
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, use_modulatedatt=False, use_fc=False, dropout=None):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.use_fc = use_fc
self.use_dropout = True if dropout else False
if self.use_fc:
print('Using fc.')
self.fc_add = nn.Linear(512*block.expansion, 512)
if self.use_dropout:
print('Using dropout.')
self.dropout = nn.Dropout(p=dropout)
self.use_modulatedatt = use_modulatedatt
if self.use_modulatedatt:
print('Using self attention.')
self.modulatedatt = ModulatedAttLayer(in_channels=512*block.expansion)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, *args):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.use_modulatedatt:
x, feature_maps = self.modulatedatt(x)
else:
feature_maps = None
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.use_fc:
x = F.relu(self.fc_add(x))
if self.use_dropout:
x = self.dropout(x)
return x, feature_maps | classifier-balancing-main | models/ResNetFeature.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch.nn as nn
from utils import *
from os import path
class DotProduct_Classifier(nn.Module):
def __init__(self, num_classes=1000, feat_dim=2048, *args):
super(DotProduct_Classifier, self).__init__()
# print('<DotProductClassifier> contains bias: {}'.format(bias))
self.fc = nn.Linear(feat_dim, num_classes)
def forward(self, x, *args):
x = self.fc(x)
return x, None
def create_model(feat_dim, num_classes=1000, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Dot Product Classifier.')
clf = DotProduct_Classifier(num_classes, feat_dim)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 Classifier Weights.' % dataset)
if log_dir is not None:
subdir = log_dir.strip('/').split('/')[-1]
subdir = subdir.replace('stage2', 'stage1')
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), subdir)
# weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading classifier weights from %s' % weight_dir)
clf.fc = init_weights(model=clf.fc,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'),
classifier=True)
else:
print('Random initialized classifier weights.')
return clf | classifier-balancing-main | models/DotProductClassifier.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNextFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNext 101 Feature Model.')
resnext = ResNext(Bottleneck, [3, 4, 23, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None,
groups=32, width_per_group=4)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNext 101 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnext = init_weights(model=resnext,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnext
| classifier-balancing-main | models/ResNext101Feature.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from utils import *
from os import path
class DotProduct_Classifier(nn.Module):
def __init__(self, num_classes=1000, feat_dim=2048, *args):
super(DotProduct_Classifier, self).__init__()
# print('<DotProductClassifier> contains bias: {}'.format(bias))
self.fc = nn.Linear(feat_dim, num_classes)
self.scales = Parameter(torch.ones(num_classes))
for param_name, param in self.fc.named_parameters():
param.requires_grad = False
def forward(self, x, *args):
x = self.fc(x)
x *= self.scales
return x, None
def create_model(feat_dim, num_classes=1000, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Dot Product Classifier.')
clf = DotProduct_Classifier(num_classes, feat_dim)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 Classifier Weights.' % dataset)
if log_dir is not None:
subdir = log_dir.strip('/').split('/')[-1]
subdir = subdir.replace('stage2', 'stage1')
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), subdir)
# weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading classifier weights from %s' % weight_dir)
clf.fc = init_weights(model=clf.fc,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'),
classifier=True)
else:
print('Random initialized classifier weights.')
return clf | classifier-balancing-main | models/TauNormClassifier.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
import pdb
class CosNorm_Classifier(nn.Module):
def __init__(self, in_dims, out_dims, scale=16, margin=0.5, init_std=0.001):
super(CosNorm_Classifier, self).__init__()
self.in_dims = in_dims
self.out_dims = out_dims
self.scale = scale
self.margin = margin
self.weight = Parameter(torch.Tensor(out_dims, in_dims).cuda())
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, *args):
norm_x = torch.norm(input.clone(), 2, 1, keepdim=True)
ex = (norm_x / (1 + norm_x)) * (input / norm_x)
ew = self.weight / torch.norm(self.weight, 2, 1, keepdim=True)
return torch.mm(self.scale * ex, ew.t())
def create_model(in_dims=512, out_dims=1000):
print('Loading Cosine Norm Classifier.')
return CosNorm_Classifier(in_dims=in_dims, out_dims=out_dims) | classifier-balancing-main | models/CosNormClassifier.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 10 Feature Model.')
resnet10 = ResNet(BasicBlock, [1, 1, 1, 1], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 10 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet10 = init_weights(model=resnet10,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet10
| classifier-balancing-main | models/ResNet10Feature.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNextFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNext 152 Feature Model.')
resnext = ResNext(Bottleneck, [3, 8, 36, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None,
groups=32, width_per_group=4)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNext 152 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnext = init_weights(model=resnext,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnext
| classifier-balancing-main | models/ResNext152Feature.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
import numpy as np
import pickle
from os import path
class KNNClassifier(nn.Module):
def __init__(self, feat_dim=512, num_classes=1000, feat_type='cl2n', dist_type='l2'):
super(KNNClassifier, self).__init__()
assert feat_type in ['un', 'l2n', 'cl2n'], "feat_type is wrong!!!"
assert dist_type in ['l2', 'cos'], "dist_type is wrong!!!"
self.feat_dim = feat_dim
self.num_classes = num_classes
self.centroids = torch.randn(num_classes, feat_dim)
self.feat_mean = torch.randn(feat_dim)
self.feat_type = feat_type
self.dist_type = dist_type
self.initialized = False
def update(self, cfeats):
mean = cfeats['mean']
centroids = cfeats['{}cs'.format(self.feat_type)]
mean = torch.from_numpy(mean)
centroids = torch.from_numpy(centroids)
self.feat_mean.copy_(mean)
self.centroids.copy_(centroids)
if torch.cuda.is_available():
self.feat_mean = self.feat_mean.cuda()
self.centroids = self.centroids.cuda()
self.initialized = True
def forward(self, inputs, *args):
centroids = self.centroids
feat_mean = self.feat_mean
# Feature transforms
if self.feat_type == 'cl2n':
inputs = inputs - feat_mean
#centroids = centroids - self.feat_mean
if self.feat_type in ['l2n', 'cl2n']:
norm_x = torch.norm(inputs, 2, 1, keepdim=True)
inputs = inputs / norm_x
#norm_c = torch.norm(centroids, 2, 1, keepdim=True)
#centroids = centroids / norm_c
# Logit calculation
if self.dist_type == 'l2':
logit = self.l2_similarity(inputs, centroids)
elif self.dist_type == 'cos':
logit = self.cos_similarity(inputs, centroids)
return logit, None
def l2_similarity(self, A, B):
# input A: [bs, fd] (batch_size x feat_dim)
# input B: [nC, fd] (num_classes x feat_dim)
feat_dim = A.size(1)
AB = torch.mm(A, B.t())
AA = (A**2).sum(dim=1, keepdim=True)
BB = (B**2).sum(dim=1, keepdim=True)
dist = AA + BB.t() - 2*AB
return -dist
def cos_similarity(self, A, B):
feat_dim = A.size(1)
AB = torch.mm(A, B.t())
AB = AB / feat_dim
return AB
def create_model(feat_dim, num_classes=1000, feat_type='cl2n', dist_type='l2',
log_dir=None, test=False, *args):
print('Loading KNN Classifier')
print(feat_dim, num_classes, feat_type, dist_type, log_dir, test)
clf = KNNClassifier(feat_dim, num_classes, feat_type, dist_type)
if log_dir is not None:
fname = path.join(log_dir, 'cfeats.pkl')
if path.exists(fname):
print('===> Loading features from %s' % fname)
with open(fname, 'rb') as f:
data = pickle.load(f)
clf.update(data)
else:
print('Random initialized classifier weights.')
return clf
if __name__ == "__main__":
cens = np.eye(4)
mean = np.ones(4)
xs = np.array([
[0.9, 0.1, 0.0, 0.0],
[0.2, 0.1, 0.1, 0.6],
[0.3, 0.3, 0.4, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.25, 0.25, 0.25, 0.25]
])
xs = torch.Tensor(xs)
classifier = KNNClassifier(feat_dim=4, num_classes=4,
feat_type='un')
classifier.update(mean, cens)
import pdb; pdb.set_trace()
logits, _ = classifier(xs)
| classifier-balancing-main | models/KNNClassifier.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNextFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNext 50 Feature Model.')
resnext = ResNext(Bottleneck, [3, 4, 6, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None,
groups=32, width_per_group=4, use_glore=False, use_gem=False)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNext 50 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnext = init_weights(model=resnext,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnext | classifier-balancing-main | models/ResNext50Feature.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 152 Feature Model.')
resnet = ResNet(Bottleneck, [3, 8, 36, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 152 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet = init_weights(model=resnet,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet
| classifier-balancing-main | models/ResNet152Feature.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import random
import numpy as np
from torch.utils.data.sampler import Sampler
import pdb
##################################
## Class-aware sampling, partly implemented by frombeijingwithlove
##################################
class RandomCycleIter:
def __init__ (self, data, test_mode=False):
self.data_list = list(data)
self.length = len(self.data_list)
self.i = self.length - 1
self.test_mode = test_mode
def __iter__ (self):
return self
def __next__ (self):
self.i += 1
if self.i == self.length:
self.i = 0
if not self.test_mode:
random.shuffle(self.data_list)
return self.data_list[self.i]
def class_aware_sample_generator (cls_iter, data_iter_list, n, num_samples_cls=1):
i = 0
j = 0
while i < n:
# yield next(data_iter_list[next(cls_iter)])
if j >= num_samples_cls:
j = 0
if j == 0:
temp_tuple = next(zip(*[data_iter_list[next(cls_iter)]]*num_samples_cls))
yield temp_tuple[j]
else:
yield temp_tuple[j]
i += 1
j += 1
class ClassAwareSampler (Sampler):
def __init__(self, data_source, num_samples_cls=1,):
num_classes = len(np.unique(data_source.labels))
self.class_iter = RandomCycleIter(range(num_classes))
cls_data_list = [list() for _ in range(num_classes)]
for i, label in enumerate(data_source.labels):
cls_data_list[label].append(i)
self.data_iter_list = [RandomCycleIter(x) for x in cls_data_list]
self.num_samples = max([len(x) for x in cls_data_list]) * len(cls_data_list)
self.num_samples_cls = num_samples_cls
def __iter__ (self):
return class_aware_sample_generator(self.class_iter, self.data_iter_list,
self.num_samples, self.num_samples_cls)
def __len__ (self):
return self.num_samples
def get_sampler():
return ClassAwareSampler
################################## | classifier-balancing-main | data/ClassAwareSampler.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import random
import numpy as np
from torch.utils.data.sampler import Sampler
class PriorityTree(object):
def __init__(self, capacity, fixed_weights=None, fixed_scale=1.0,
init_weight=1.0):
"""
fixed_weights: weights that wont be updated by self.update()
"""
assert fixed_weights is None or len(fixed_weights) == capacity
self._capacity = capacity
self._tree_size = 2 * capacity - 1
self.fixed_scale = fixed_scale
self.fixed_weights = np.zeros(self._capacity) if fixed_weights is None \
else fixed_weights
self.tree = np.zeros(self._tree_size)
self._initialized = False
self.initialize(init_weight)
def initialize(self, init_weight):
"""Initialize the tree."""
# Rescale the fixed_weights if it is not zero
if self.fixed_weights.sum() > 0 and init_weight > 0:
self.fixed_weights *= self.fixed_scale * init_weight * self.capacity \
/ self.fixed_weights.sum()
print('FixedWeights: {}'.format(self.fixed_weights.sum()))
self.update_whole(init_weight + self.fixed_weights)
self._initialized = True
def reset_fixed_weights(self, fixed_weights, rescale=False):
""" Reset the manually designed weights and
update the whole tree accordingly.
@rescale: rescale the fixed_weights such that
fixed_weights.sum() = self.fixed_scale * adaptive_weights.sum()
"""
adaptive_weights = self.get_adaptive_weights()
fixed_sum = fixed_weights.sum()
if rescale and fixed_sum > 0:
scale = self.fixed_scale * adaptive_weights.sum() / fixed_sum
self.fixed_weights = fixed_weights * scale
else:
self.fixed_weights = fixed_weights
self.update_whole(self.fixed_weights + adaptive_weights)
def update_whole(self, total_weights):
""" Update the whole tree based on per-example sampling weights """
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
self.tree[lefti:righti+1] = total_weights
# Iteratively find a parent layer
while lefti != 0 and righti != 0:
lefti = (lefti - 1) // 2 if lefti != 0 else 0
righti = (righti - 1) // 2 if righti != 0 else 0
# Assign paraent weights from right to left
for i in range(righti, lefti-1, -1):
self.tree[i] = self.tree[2*i+1] + self.tree[2*i+2]
def get_adaptive_weights(self):
""" Get the instance-aware weights, that are not mannually designed"""
return self.get_total_weights() - self.fixed_weights
def get_total_weights(self):
""" Get the per-example sampling weights
return shape: [capacity]
"""
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
return self.tree[lefti:righti+1]
@property
def size(self):
return self._tree_size
@property
def capacity(self):
return self._capacity
def __len__(self):
return self.capacity
def pointer_to_treeidx(self, pointer):
assert pointer < self.capacity
return int(pointer + self.capacity - 1)
def update(self, pointer, priority):
assert pointer < self.capacity
tree_idx = self.pointer_to_treeidx(pointer)
priority += self.fixed_weights[pointer]
delta = priority - self.tree[tree_idx]
self.tree[tree_idx] = priority
while tree_idx != 0:
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += delta
def get_leaf(self, value):
assert self._initialized, 'PriorityTree not initialized!!!!'
assert self.total > 0, 'No priority weights setted!!'
parent = 0
while True:
left_child = 2 * parent + 1
right_child = 2 * parent + 2
if left_child >= len(self.tree):
tgt_leaf = parent
break
if value < self.tree[left_child]:
parent = left_child
else:
value -= self.tree[left_child]
parent = right_child
data_idx = tgt_leaf - self.capacity + 1
return data_idx, self.tree[tgt_leaf] # data idx, priority
@property
def total(self):
assert self._initialized, 'PriorityTree not initialized!!!!'
return self.tree[0]
@property
def max(self):
return np.max(self.tree[-self.capacity:])
@property
def min(self):
assert self._initialized, 'PriorityTree not initialized!!!!'
return np.min(self.tree[-self.capacity:])
def get_weights(self):
return {'fixed_weights': self.fixed_weights,
'total_weights': self.get_total_weights()}
class MixedPrioritizedSampler(Sampler):
"""
A sampler combining manually designed sampling strategy and prioritized
sampling strategy.
Manually disigned strategy contains two parts:
$$ manual_weights = lam * balanced_weights + (1-lam) uniform_weights
Here we use a generalized version of balanced weights as follows,
when n limits to infinity, balanced_weights = real_balanced_weights
$$ balanced_weights = uniform_weights ^ (1/n)
Then the balanced weights are scaled such that
$$ balanced_weights.sum() = balance_scale * uniform_weights.sum()
Note: above weights are per-class weights
Overall sampling weights are given as
$$ sampling_weights = manual_weights * fixed_scale + priority_weights
Arguments:
@dataset: A dataset
@balance_scale: The scale of balanced_weights
@lam: A weight to combine balanced weights and uniform weights
- None for shifting sampling
- 0 for uniform sampling
- 1 for balanced sampling
@fixed_scale: The scale of manually designed weights
@cycle: shifting strategy
- 0 for linear shifting: 3 -> 2 - > 1
- 1 for periodic shifting:
3 -> 2 - > 1 -> 3 -> 2 - > 1 -> 3 -> 2 - > 1
- 2 for cosine-like periodic shifting:
3 -> 2 - > 1 -> 1 -> 2 - > 3 -> 3 -> 2 - > 1
@nroot:
- None for truly balanced weights
- >= 2 for pseudo-balanced weights
@rescale: whether to rebalance the manual weights and priority weights
every epoch
@root_decay:
- 'exp': for exponential decay
- 'linear': for linear decay
"""
def __init__(self, dataset, balance_scale=1.0, fixed_scale=1.0,
lam=None, epochs=90, cycle=0, nroot=None, manual_only=False,
rescale=False, root_decay=None, decay_gap=30, ptype='score',
alpha=1.0):
"""
"""
self.dataset = dataset
self.balance_scale = balance_scale
self.fixed_scale = fixed_scale
self.epochs = epochs
self.lam = lam
self.cycle = cycle
self.nroot = nroot
self.rescale = rescale
self.manual_only = manual_only
self.root_decay = root_decay
self.decay_gap = decay_gap
self.ptype = ptype
self.num_samples = len(dataset)
self.alpha = alpha
# If using root_decay, reset relevent parameters
if self.root_decay in ['exp', 'linear', 'autoexp']:
self.lam = 1
self.manual_only = True
self.nroot = 1
if self.root_decay == 'autoexp':
self.decay_gap = 1
self.decay_factor = np.power(nroot, 1/(self.epochs-1))
else:
assert self.root_decay is None
assert self.nroot is None or self.nroot >= 2
print("====> Decay GAP: {}".format(self.decay_gap))
# Take care of lambdas
if self.lam is None:
self.freeze = False
if cycle == 0:
self.lams = np.linspace(0, 1, epochs)
elif cycle == 1:
self.lams = np.concatenate([np.linspace(0,1,epochs//3)] * 3)
elif cycle == 2:
self.lams = np.concatenate([np.linspace(0,1,epochs//3),
np.linspace(0,1,epochs//3)[::-1],
np.linspace(0,1,epochs//3)])
else:
raise NotImplementedError(
'cycle = {} not implemented'.format(cycle))
else:
self.lams = [self.lam]
self.freeze = True
# Get num of samples per class
self.cls_cnts = []
self.labels = labels = np.array(self.dataset.labels)
for l in np.unique(labels):
self.cls_cnts.append(np.sum(labels==l))
self.num_classes = len(self.cls_cnts)
self.cnts = np.array(self.cls_cnts).astype(float)
# Get per-class image indexes
self.cls_idxs = [[] for _ in range(self.num_classes)]
for i, label in enumerate(self.dataset.labels):
self.cls_idxs[label].append(i)
for ci in range(self.num_classes):
self.cls_idxs[ci] = np.array(self.cls_idxs[ci])
# Build balanced weights based on class counts
self.balanced_weights = self.get_balanced_weights(self.nroot)
self.manual_weights = self.get_manual_weights(self.lams[0])
# Setup priority tree
if self.ptype == 'score':
self.init_weight = 1.
elif self.ptype in ['CE', 'entropy']:
self.init_weight = 6.9
else:
raise NotImplementedError('ptype {} not implemented'.format(self.ptype))
if self.manual_only:
self.init_weight = 0.
self.init_weight = np.power(self.init_weight, self.alpha)
self.ptree = PriorityTree(self.num_samples, self.manual_weights,
fixed_scale=self.fixed_scale,
init_weight=self.init_weight)
def get_manual_weights(self, lam):
# Merge balanced weights and uniform weights
if lam == 1:
manual_weights = self.balanced_weights
elif lam == 0:
manual_weights = np.ones(len(self.balanced_weights))
else:
manual_weights = self.balanced_weights * lam + (1-lam)
return manual_weights
def get_balanced_weights(self, nroot):
""" Calculate normalized generalized balanced weights """
cnts = self.cnts
if nroot is None:
# Real balanced sampling weights
cls_ws = cnts.min() / cnts
elif nroot >= 1:
# Generalized balanced weights
cls_ws = cnts / cnts.sum()
cls_ws = np.power(cls_ws, 1./nroot) * cnts.sum()
cls_ws = cls_ws / cnts
else:
raise NotImplementedError('root:{} not implemented'.format(nroot))
# Get un-normalized weights
balanced_weights = np.zeros(self.num_samples)
for ci in range(self.num_classes):
balanced_weights[self.cls_idxs[ci]] = cls_ws[ci]
# Normalization and rescale
balanced_weights *= self.num_samples / balanced_weights.sum() * \
self.balance_scale
return balanced_weights
def __iter__(self):
for _ in range(self.num_samples):
w = random.random() * self.ptree.total
i, pri = self.ptree.get_leaf(w)
yield i
def __len__(self):
return self.num_samples
def reset_weights(self, epoch):
if not self.freeze and self.fixed_scale > 0:
if epoch >= self.epochs:
e = self.epochs - 1
elif epoch < 1:
e = 0
else:
e = epoch
self.manual_weights = self.get_manual_weights(self.lams[e])
self.ptree.reset_fixed_weights(self.manual_weights, self.rescale)
if self.root_decay in ['exp', 'linear', 'autoexp'] and epoch % self.decay_gap == 0:
if self.root_decay == 'exp':
self.nroot *= 2
elif self.root_decay == 'linear':
self.nroot += 1
elif self.root_decay == 'autoexp':
# self.nroot *= self.decay_factor
self.nroot = np.power(self.decay_factor, epoch)
bw = self.get_balanced_weights(self.nroot)
self.ptree.reset_fixed_weights(bw)
def update_weights(self, inds, weights):
""" Update priority weights """
if not self.manual_only:
weights = np.clip(weights, 0, self.init_weight)
weights = np.power(weights, self.alpha)
for i, w in zip(inds, weights):
self.ptree.update(i, w)
def get_weights(self):
return self.ptree.get_weights()
def get_sampler():
return MixedPrioritizedSampler
| classifier-balancing-main | data/MixedPrioritizedSampler.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import random
import numpy as np
from torch.utils.data.sampler import Sampler
class RandomCycleIter:
def __init__ (self, data, test_mode=False):
self.data_list = list(data)
self.length = len(self.data_list)
self.i = self.length - 1
self.test_mode = test_mode
def __iter__ (self):
return self
def __next__ (self):
self.i += 1
if self.i == self.length:
self.i = 0
if not self.test_mode:
random.shuffle(self.data_list)
return self.data_list[self.i]
class PriorityTree(object):
def __init__(self, capacity, init_weights, fixed_weights=None, fixed_scale=1.0,
alpha=1.0):
"""
fixed_weights: weights that wont be updated by self.update()
"""
assert fixed_weights is None or len(fixed_weights) == capacity
assert len(init_weights) == capacity
self.alpha = alpha
self._capacity = capacity
self._tree_size = 2 * capacity - 1
self.fixed_scale = fixed_scale
self.fixed_weights = np.zeros(self._capacity) if fixed_weights is None \
else fixed_weights
self.tree = np.zeros(self._tree_size)
self._initialized = False
self.initialize(init_weights)
def initialize(self, init_weights):
"""Initialize the tree."""
# Rescale the fixed_weights if it is not zero
self.fixed_scale_init = self.fixed_scale
if self.fixed_weights.sum() > 0 and init_weights.sum() > 0:
self.fixed_scale_init *= init_weights.sum() / self.fixed_weights.sum()
self.fixed_weights *= self.fixed_scale * init_weights.sum() \
/ self.fixed_weights.sum()
print('FixedWeights: {}'.format(self.fixed_weights.sum()))
self.update_whole(init_weights + self.fixed_weights)
self._initialized = True
def reset_adaptive_weights(self, adaptive_weights):
self.update_whole(self.fixed_weights + adaptive_weights)
def reset_fixed_weights(self, fixed_weights, rescale=False):
""" Reset the manually designed weights and
update the whole tree accordingly.
@rescale: rescale the fixed_weights such that
fixed_weights.sum() = self.fixed_scale * adaptive_weights.sum()
"""
adaptive_weights = self.get_adaptive_weights()
fixed_sum = fixed_weights.sum()
if rescale and fixed_sum > 0:
# Rescale fixedweight based on adaptive weights
scale = self.fixed_scale * adaptive_weights.sum() / fixed_sum
else:
# Rescale fixedweight based on previous fixedweight
scale = self.fixed_weights.sum() / fixed_sum
self.fixed_weights = fixed_weights * scale
self.update_whole(self.fixed_weights + adaptive_weights)
def update_whole(self, total_weights):
""" Update the whole tree based on per-example sampling weights """
if self.alpha != 1:
total_weights = np.power(total_weights, self.alpha)
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
self.tree[lefti:righti+1] = total_weights
# Iteratively find a parent layer
while lefti != 0 and righti != 0:
lefti = (lefti - 1) // 2 if lefti != 0 else 0
righti = (righti - 1) // 2 if righti != 0 else 0
# Assign paraent weights from right to left
for i in range(righti, lefti-1, -1):
self.tree[i] = self.tree[2*i+1] + self.tree[2*i+2]
def get_adaptive_weights(self):
""" Get the instance-aware weights, that are not mannually designed"""
if self.alpha == 1:
return self.get_total_weights() - self.fixed_weights
else:
return self.get_raw_total_weights() - self.fixed_weights
def get_total_weights(self):
""" Get the per-example sampling weights
return shape: [capacity]
"""
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
return self.tree[lefti:righti+1]
def get_raw_total_weights(self):
""" Get the per-example sampling weights
return shape: [capacity]
"""
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
return np.power(self.tree[lefti:righti+1], 1/self.alpha)
@property
def size(self):
return self._tree_size
@property
def capacity(self):
return self._capacity
def __len__(self):
return self.capacity
def pointer_to_treeidx(self, pointer):
assert pointer < self.capacity
return int(pointer + self.capacity - 1)
def update(self, pointer, priority):
assert pointer < self.capacity
tree_idx = self.pointer_to_treeidx(pointer)
priority += self.fixed_weights[pointer]
if self.alpha != 1:
priority = np.power(priority, self.alpha)
delta = priority - self.tree[tree_idx]
self.tree[tree_idx] = priority
while tree_idx != 0:
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += delta
def update_delta(self, pointer, delta):
assert pointer < self.capacity
tree_idx = self.pointer_to_treeidx(pointer)
ratio = 1- self.fixed_weights[pointer] / self.tree[tree_idx]
# delta *= ratio
if self.alpha != 1:
# Update delta
if self.tree[tree_idx] < 0 or \
np.power(self.tree[tree_idx], 1/self.alpha) + delta < 0:
import pdb; pdb.set_trace()
delta = np.power(np.power(self.tree[tree_idx], 1/self.alpha) + delta,
self.alpha) \
- self.tree[tree_idx]
self.tree[tree_idx] += delta
while tree_idx != 0:
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += delta
def get_leaf(self, value):
assert self._initialized, 'PriorityTree not initialized!!!!'
assert self.total > 0, 'No priority weights setted!!'
parent = 0
while True:
left_child = 2 * parent + 1
right_child = 2 * parent + 2
if left_child >= len(self.tree):
tgt_leaf = parent
break
if value < self.tree[left_child]:
parent = left_child
else:
value -= self.tree[left_child]
parent = right_child
data_idx = tgt_leaf - self.capacity + 1
return data_idx, self.tree[tgt_leaf] # data idx, priority
@property
def total(self):
assert self._initialized, 'PriorityTree not initialized!!!!'
return self.tree[0]
@property
def max(self):
return np.max(self.tree[-self.capacity:])
@property
def min(self):
assert self._initialized, 'PriorityTree not initialized!!!!'
return np.min(self.tree[-self.capacity:])
def get_weights(self):
wdict = {'fixed_weights': self.fixed_weights,
'total_weights': self.get_total_weights()}
if self.alpha != 1:
wdict.update({'raw_total_weights': self.get_raw_total_weights(),
'alpha': self.alpha})
return wdict
class ClassPrioritySampler(Sampler):
"""
A sampler combining manually designed sampling strategy and prioritized
sampling strategy.
Manually disigned strategy contains two parts:
$$ manual_weights = lam * balanced_weights + (1-lam) uniform_weights
Here we use a generalized version of balanced weights as follows,
when n limits to infinity, balanced_weights = real_balanced_weights
$$ balanced_weights = uniform_weights ^ (1/n)
Then the balanced weights are scaled such that
$$ balanced_weights.sum() = balance_scale * uniform_weights.sum()
Note: above weights are per-class weights
Overall sampling weights are given as
$$ sampling_weights = manual_weights * fixed_scale + priority_weights
Arguments:
@dataset: A dataset
@balance_scale: The scale of balanced_weights
@lam: A weight to combine balanced weights and uniform weights
- None for shifting sampling
- 0 for uniform sampling
- 1 for balanced sampling
@fixed_scale: The scale of manually designed weights
- fixed_scale < 0 means, the manually designed distribution will
be used as the backend distribution of priorities.
@cycle: shifting strategy
- 0 for linear shifting: 3 -> 2 - > 1
- 1 for periodic shifting:
3 -> 2 - > 1 -> 3 -> 2 - > 1 -> 3 -> 2 - > 1
- 2 for cosine-like periodic shifting:
3 -> 2 - > 1 -> 1 -> 2 - > 3 -> 3 -> 2 - > 1
@nroot:
- None for truly balanced weights
- >= 2 for pseudo-balanced weights
@rescale: whether to rebalance the manual weights and priority weights
every epoch
@root_decay:
- 'exp': for exponential decay
- 'linear': for linear decay
"""
def __init__(self, dataset, balance_scale=1.0, fixed_scale=1.0,
lam=None, epochs=90, cycle=0, nroot=None, manual_only=False,
rescale=False, root_decay=None, decay_gap=30, ptype='score',
pri_mode='train', momentum=0., alpha=1.0):
"""
"""
self.dataset = dataset
self.balance_scale = balance_scale
self.fixed_scale = fixed_scale
self.epochs = epochs
self.lam = lam
self.cycle = cycle
self.nroot = nroot
self.rescale = rescale
self.manual_only = manual_only
self.root_decay = root_decay
self.decay_gap = decay_gap
self.ptype = ptype
self.pri_mode = pri_mode
self.num_samples = len(dataset)
self.manual_as_backend = False
self.momentum = momentum
self.alpha = alpha
assert 0. <= self.momentum <= 1.0
assert 0. <= self.alpha
# Change the backend distribution of priority if needed
if self.fixed_scale < 0:
self.fixed_scale = 0
self.manual_as_backend = True
# If using root_decay, reset relevent parameters
if self.root_decay in ['exp', 'linear', 'autoexp']:
self.lam = 1
self.manual_only = True
self.nroot = 1
if self.root_decay == 'autoexp':
self.decay_gap = 1
self.decay_factor = np.power(nroot, 1/(self.epochs-1))
else:
assert self.root_decay is None
assert self.nroot is None or self.nroot > 1
print("====> Decay GAP: {}".format(self.decay_gap))
# Take care of lambdas
self.freeze = True
if self.lam is None:
self.freeze = False
if cycle == 0:
self.lams = np.linspace(0, 1, epochs)
elif cycle == 1:
self.lams = np.concatenate([np.linspace(0,1,epochs//3)] * 3)
elif cycle == 2:
self.lams = np.concatenate([np.linspace(0,1,epochs//3),
np.linspace(0,1,epochs//3)[::-1],
np.linspace(0,1,epochs//3)])
else:
raise NotImplementedError(
'cycle = {} not implemented'.format(cycle))
else:
self.lams = [self.lam]
# Get num of samples per class
self.cls_cnts = []
self.labels = labels = np.array(self.dataset.labels)
for l in np.unique(labels):
self.cls_cnts.append(np.sum(labels==l))
self.num_classes = len(self.cls_cnts)
self.cnts = np.array(self.cls_cnts).astype(float)
# Get per-class image indexes
self.cls_idxs = [[] for _ in range(self.num_classes)]
for i, label in enumerate(self.dataset.labels):
self.cls_idxs[label].append(i)
self.data_iter_list = [RandomCycleIter(x) for x in self.cls_idxs]
for ci in range(self.num_classes):
self.cls_idxs[ci] = np.array(self.cls_idxs[ci])
# Build balanced weights based on class counts
self.balanced_weights = self.get_balanced_weights(self.nroot)
self.uniform_weights = self.get_uniform_weights()
self.manual_weights = self.get_manual_weights(self.lams[0])
# back_weights = self.get_balanced_weights(1.5)
back_weights = self.uniform_weights
# Calculate priority ratios that reshape priority into target distribution
self.per_cls_ratios = self.get_cls_ratios(
self.manual_weights if self.manual_as_backend else back_weights)
self.per_example_ratios = self.broadcast(self.per_cls_ratios)
# Setup priority tree
if self.ptype == 'score':
self.init_weight = 1.
elif self.ptype in ['CE', 'entropy']:
self.init_weight = 6.9
else:
raise NotImplementedError('ptype {} not implemented'.format(self.ptype))
if self.manual_only:
self.init_weight = 0.
self.per_example_uni_weights = np.ones(self.num_samples) * self.init_weight
self.per_example_velocities = np.zeros(self.num_samples)
# init_priorities = np.power(self.init_weight, self.alpha) \
# * self.uniform_weights * self.per_cls_ratios
init_priorities = self.init_weight * self.uniform_weights * self.per_cls_ratios
self.ptree = PriorityTree(self.num_classes, init_priorities,
self.manual_weights.copy(), fixed_scale=self.fixed_scale,
alpha=self.alpha)
def get_cls_ratios(self, tgt_weights):
if tgt_weights is self.uniform_weights:
return np.ones_like(self.uniform_weights)
per_cls_ratios = tgt_weights / self.uniform_weights
per_cls_ratios *= self.uniform_weights.sum() / tgt_weights.sum()
return per_cls_ratios
def get_cls_weights(self):
ratioed_ws = self.per_example_uni_weights * self.per_example_ratios
return self.debroadcast_sum(ratioed_ws)
def broadcast(self, per_cls_info):
per_exmaple_info = np.zeros(self.num_samples)
# Braodcast per-cls info to each example
for ci in range(self.num_classes):
per_exmaple_info[self.cls_idxs[ci]] = per_cls_info[ci]
return per_exmaple_info
def debroadcast_sum(self, per_example_info):
per_cls_info = np.zeros(self.num_classes)
# DeBraodcast per-example info to each cls by summation
for ci in range(self.num_classes):
per_cls_info[ci] = per_example_info[self.cls_idxs[ci]].sum()
return per_cls_info
def get_manual_weights(self, lam):
# Merge balanced weights and uniform weights
if lam == 1:
manual_weights = self.balanced_weights.copy()
elif lam == 0:
manual_weights = self.uniform_weights.copy()
else:
manual_weights = self.balanced_weights * lam + (1-lam) * self.uniform_weights
return manual_weights
def get_uniform_weights(self):
return self.cnts.copy()
def get_balanced_weights(self, nroot):
""" Calculate normalized generalized balanced weights """
cnts = self.cnts
if nroot is None:
# Real balanced sampling weights, each class has the same weights
# Un-normalized !!!
cls_ws = np.ones(len(cnts))
elif nroot >= 1:
# Generalized balanced weights
# Un-normalized !!!
cls_ws = cnts / cnts.sum()
cls_ws = np.power(cls_ws, 1./nroot) * cnts.sum()
cls_ws = cls_ws
else:
raise NotImplementedError('root:{} not implemented'.format(nroot))
# Get un-normalized weights
balanced_weights = cls_ws
# Normalization and rescale
balanced_weights *= self.num_samples / balanced_weights.sum() * \
self.balance_scale
return balanced_weights
def __iter__(self):
for _ in range(self.num_samples):
w = random.random() * self.ptree.total
ci, pri = self.ptree.get_leaf(w)
yield next(self.data_iter_list[ci])
def __len__(self):
return self.num_samples
def reset_weights(self, epoch):
# If it is linear shifting
if not self.freeze:
e = np.clip(epoch, 0, self.epochs-1)
self.manual_weights = self.get_manual_weights(self.lams[e])
# make sure 'self.fixed_scale > 0' and 'self.manual_as_backend = True' are
# mutually exclusive
if self.fixed_scale > 0:
self.ptree.reset_fixed_weights(self.manual_weights, self.rescale)
if self.manual_as_backend:
self.update_backend_distribution(self.manual_weights)
# If it is root decay
if self.root_decay in ['exp', 'linear', 'autoexp'] and epoch % self.decay_gap == 0:
if self.root_decay == 'exp':
self.nroot *= 2
elif self.root_decay == 'linear':
self.nroot += 1
elif self.root_decay == 'autoexp':
# self.nroot *= self.decay_factor
self.nroot = np.power(self.decay_factor, epoch)
bw = self.get_balanced_weights(self.nroot)
if self.manual_as_backend:
self.update_backend_distribution(bw)
else:
self.ptree.reset_fixed_weights(bw)
def update_backend_distribution(self, tgt_weights):
# Recalculate the cls ratios based on the given target distribution
self.per_cls_ratios = self.get_cls_ratios(tgt_weights)
self.per_example_ratios = self.broadcast(self.per_cls_ratios)
# Recalculate the new per-class weights based on the new ratios
# new_backend_weights = self.init_weight * self.uniform_weights * self.per_cls_ratios
new_cls_weights = self.get_cls_weights()
self.ptree.reset_adaptive_weights(new_cls_weights)
def update_weights(self, inds, weights, labels):
""" Update priority weights """
if not self.manual_only and self.pri_mode == 'train':
weights = np.clip(weights, 0, self.init_weight)
# Iterate over all classes in the batch
for l in np.unique(labels):
# Calculate per-class delta weights
example_inds = inds[labels==l]
last_weights = self.per_example_uni_weights[example_inds]
# delta = np.power(weights[labels==l], self.alpha) - \
# np.power(last_weights, self.alpha)
delta = weights[labels==l] - last_weights
delta = self.momentum * self.per_example_velocities[example_inds] + \
(1-self.momentum) * delta
# Update velocities
self.per_example_velocities[example_inds] = delta
# Update per-example weights
# self.per_example_uni_weights[example_inds] = weights[labels==l]
self.per_example_uni_weights[example_inds] += delta
# Sacle the delta
# (ie, the per-example weights both before and after update)
delta *= self.per_example_ratios[example_inds]
# Update tree
if self.alpha == 1:
self.ptree.update_delta(l, delta.sum())
else:
self.ptree.update(l, self.per_example_uni_weights[self.cls_idxs[l]].sum())
def reset_priority(self, weights, labels):
if self.pri_mode == 'valid':
assert len(np.unique(labels)) == self.num_classes
weights = np.clip(weights, 0, self.init_weight)
cls_weights = np.zeros(self.num_classes)
for c in np.unique(labels):
cls_weights[c] = weights[labels==c].mean()
cls_weights *= self.cnts
cls_weights *= self.per_cls_ratios
self.ptree.reset_adaptive_weights(cls_weights)
def get_weights(self):
return self.ptree.get_weights()
def get_sampler():
return ClassPrioritySampler
| classifier-balancing-main | data/ClassPrioritySampler.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import numpy as np
import torchvision
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import transforms
import os
from PIL import Image
# Image statistics
RGB_statistics = {
'iNaturalist18': {
'mean': [0.466, 0.471, 0.380],
'std': [0.195, 0.194, 0.192]
},
'default': {
'mean': [0.485, 0.456, 0.406],
'std':[0.229, 0.224, 0.225]
}
}
# Data transformation with augmentation
def get_data_transform(split, rgb_mean, rbg_std, key='default'):
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]) if key == 'iNaturalist18' else transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
])
}
return data_transforms[split]
# Dataset
class LT_Dataset(Dataset):
def __init__(self, root, txt, transform=None):
self.img_path = []
self.labels = []
self.transform = transform
with open(txt) as f:
for line in f:
self.img_path.append(os.path.join(root, line.split()[0]))
self.labels.append(int(line.split()[1]))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
path = self.img_path[index]
label = self.labels[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, label, index
# Load datasets
def load_data(data_root, dataset, phase, batch_size, sampler_dic=None, num_workers=4, test_open=False, shuffle=True):
if phase == 'train_plain':
txt_split = 'train'
elif phase == 'train_val':
txt_split = 'val'
phase = 'train'
else:
txt_split = phase
txt = './data/%s/%s_%s.txt'%(dataset, dataset, txt_split)
# txt = './data/%s/%s_%s.txt'%(dataset, dataset, (phase if phase != 'train_plain' else 'train'))
print('Loading data from %s' % (txt))
if dataset == 'iNaturalist18':
print('===> Loading iNaturalist18 statistics')
key = 'iNaturalist18'
else:
key = 'default'
rgb_mean, rgb_std = RGB_statistics[key]['mean'], RGB_statistics[key]['std']
if phase not in ['train', 'val']:
transform = get_data_transform('test', rgb_mean, rgb_std, key)
else:
transform = get_data_transform(phase, rgb_mean, rgb_std, key)
print('Use data transformation:', transform)
set_ = LT_Dataset(data_root, txt, transform)
print(len(set_))
if phase == 'test' and test_open:
open_txt = './data/%s/%s_open.txt'%(dataset, dataset)
print('Testing with opensets from %s'%(open_txt))
open_set_ = LT_Dataset('./data/%s/%s_open'%(dataset, dataset), open_txt, transform)
set_ = ConcatDataset([set_, open_set_])
if sampler_dic and phase == 'train':
print('Using sampler: ', sampler_dic['sampler'])
# print('Sample %s samples per-class.' % sampler_dic['num_samples_cls'])
print('Sampler parameters: ', sampler_dic['params'])
return DataLoader(dataset=set_, batch_size=batch_size, shuffle=False,
sampler=sampler_dic['sampler'](set_, **sampler_dic['params']),
num_workers=num_workers)
else:
print('No sampler.')
print('Shuffle is %s.' % (shuffle))
return DataLoader(dataset=set_, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers)
| classifier-balancing-main | data/dataloader.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Usage:
1. Change "root" to your data path
2. python gen_lists.py
"""
import os
import json
from tqdm import tqdm
root = '/checkpoint/bykang/iNaturalist18'
json2txt = {
'train2018.json': 'iNaturalist18_train.txt',
'val2018.json': 'iNaturalist18_val.txt'
}
def convert(json_file, txt_file):
with open(json_file, 'r') as f:
data = json.load(f)
lines = []
for i in tqdm(range(len(data['images']))):
assert data['images'][i]['id'] == data['annotations'][i]['id']
img_name = data['images'][i]['file_name']
label = data['annotations'][i]['category_id']
lines.append(img_name + ' ' + str(label) + '\n')
with open(txt_file, 'w') as ftxt:
ftxt.writelines(lines)
for k, v in json2txt.items():
print('===> Converting {} to {}'.format(k, v))
srcfile = os.path.join(root, k)
convert(srcfile, v)
| classifier-balancing-main | data/iNaturalist18/gen_lists.py |
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import json
from tqdm import tqdm
root = '/datasets01_101/imagenet_full_size/061417'
split2txt = {
'train': 'ImageNet_train.txt',
'val': 'ImageNet_val.txt',
# 'test': 'ImageNet_test.txt',
}
def convert(split, txt_file):
clsnames = os.listdir(os.path.join(root, split))
clsnames.sort()
lines = []
for i, name in enumerate(clsnames):
imgs = os.listdir(os.path.join(root, split, name))
imgs.sort()
for img in imgs:
lines.append(os.path.join(split, name, img) + ' ' + str(i) + '\n')
with open(txt_file, 'w') as f:
f.writelines(lines)
for k, v in split2txt.items():
print('===> Converting {} to {}'.format(k, v))
convert(k, v)
| classifier-balancing-main | data/ImageNet/gen_txt.py |
import re
import sys
import os
import os.path
import random
import json
import time
import nltk.data
import spacy
import pandas as pd
import random
from multiprocessing import Pipe, Pool
from functools import partial
from collections import defaultdict, Counter
from tqdm import tqdm
sys.path.append("/checkpoint/simarora/KILT/")
# from kilt.knowledge_source import KnowledgeSource
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# https://github.com/egerber/spaCy-entity-linker
# initialize language model
nlp = spacy.load("en_core_web_md")
nlp.add_pipe("entityLinker", last=True)
random.seed(1)
INBOX = "dasovich-j"
MY_PATH = "/private/home/simarora/pqa/PersonalDataDemo/" # SET YOUR PATH!
VALID_NER_TYPES = ['ORG', 'PERSON', 'LOC', 'EVENT', 'PRODUCT', 'LANGUAGE', 'LAW']
NER_TYPES_DICT = {
'ORG': 'ORGANIZATION',
'PERSON': "PEOPLE",
'LOC': "LOCATION",
'EVENT': "EVENT",
'PRODUCT': "PRODUCT",
'LANGUAGE': "LANGUAGES",
'LAW': "LEGAL"
}
PUNCT = ["'", ";", ":", ".", ",", '"', "|", ">", "<", "/", "?", ":", ";", "(", ")"]
OVERRIDE = []
# CREATE THE LOCAL CORPUS (approximately 5500 seconds)
def remove_structure_tokens(body):
string_encode = body.encode("ascii", "ignore")
body = string_encode.decode()
body = body.strip()
body = body.strip("]")
body = body.strip("[")
CLEAN_PAIRS = [("\no", " "), ("\n", " "), ("\\n", " "), ("\\t", " "), ("\\", ""),
(" /", " "), (">", " "), ("=09", " "), ("=01", " "), ("=09=09=09=09", " "), ("---", " "),("|", " "),
("___", " "), ("[IMAGE]", " "), ("= ", " "), ("???", " "), ("**", " "), ("??", " "), ("\xa0", " "),
("=20", " "), ("0F", " "), (' " ', " "), (" ' ", " "), (". ?", ". "), ("=01", ""), ("=07", ""),
("National Assn.", "National Association")]
for clean in CLEAN_PAIRS:
body = body.replace(clean[0], clean[1])
# floating quotes
body_toks = body.split()
if body_toks and body_toks[0] in ['"', "'", "?"]:
body_toks = body_toks[1:]
clean_body_toks = []
for ind, tok in enumerate(body_toks):
filt = 0
if len(tok) == 1 and tok in PUNCT:
filt = 1
if all(ch == "?" for ch in tok): # or all(ch == "_" for ch in tok):
filt = 1
if ind > 0 and '.com' in body_toks[ind-1] and tok == 'o':
filt = 1
if len(tok) > 2 and tok[0] == "?":
tok = tok[1:]
if not filt:
clean_body_toks.append(tok.strip())
# get rid of 't o' and 'o f' type splits
combined_tok = ''
combined_toks = []
for ind, tok in enumerate(clean_body_toks):
if combined_tok:
if len(tok) == 1 and tok.islower():
combined_tok = combined_tok + tok
combined_toks.append(combined_tok)
combined_tok = ''
else:
combined_toks.append(combined_tok)
combined_toks.append(tok)
combined_tok = ''
else:
if len(tok) == 1 and tok.islower():
combined_tok = tok
else:
combined_toks.append(tok)
combined_tok = ''
body = " ".join(combined_toks)
# step 4: Wikiextractor cleaning steps
body = body.replace('\t', ' ')
body = body.replace('...', ' ')
body = re.sub(u' (,:\.\)\]»)', r'\1', body)
body = re.sub(u'(\[\(«) ', r'\1', body)
body = re.sub(r'\n\W+?\n', '\n', body, flags=re.U) # lines with only punctuations
body = body.replace(',,', ',').replace(',.', '.')
# Common abbreviations
body = body.replace("U.S. ", "United States ")
body = body.replace("Dept. ", "Department ")
body = body.replace(" ", " ")
return body
def identify_duplicates_by_text():
first_sentences = []
first_sentence_map = defaultdict(list)
duplicates_map = {}
num_duplicates = 0
sentences_matched = 0
with open(f"Enron_{INBOX}/EmailsCorpus.json") as f:
EnronPassages = json.load(f)
EnronPassages_New = {}
for key, passage in tqdm(EnronPassages.items()):
sents = passage['sents']
# check if it's a duplicate
is_duplicate = 0
for sent in sents:
if sent in first_sentences:
is_duplicate = 1
sentences_matched += 1
first_sentence_map[sent].append(key)
break
# save whether it's a duplicate or not
if not is_duplicate:
for sent in sents:
if len(sent.split()) > 1:
first_sentences.append(sent)
break
first_sentence_map[sents[0]].append(key)
duplicates_map[key] = False
else:
duplicates_map[key] = True
num_duplicates += 1
if not duplicates_map[key]:
EnronPassages_New[key] = passage
print(f"Marked {num_duplicates} passages as duplicates.")
print(f"For {sentences_matched} passages, the first sentences matched exactly.")
with open("first_sentence_map.json", "w") as f:
json.dump(first_sentence_map, f)
# only save the non-duplicates
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json", "w") as f:
json.dump(EnronPassages_New, f)
return duplicates_map
def identify_linked_entities(bodies_lst):
# want one mapping on entities to passages
linked_entities_lst = []
for body in bodies_lst:
doc = nlp(body)
# iterates over sentences and prints linked entities
linked_entities = []
for sent in doc.sents:
for entity in sent._.linkedEntities.__dict__['entities']:
entity_title = entity.__dict__['label']
identifier = entity.__dict__['identifier']
description = entity.__dict__['description']
entity_details = {
'title': entity_title,
'identifier': identifier,
'description': description
}
linked_entities.append(entity_details)
linked_entities_lst.append(linked_entities)
return linked_entities_lst
def get_ner_tags(bodies_lst):
ner_tags_lst = []
for body in bodies_lst:
doc = nlp(body)
ner_tags = []
for ent in doc.ents:
ner_tag = {
'text': ent.text,
'start_char': ent.start_char,
'end_char': ent.end_char,
'ner': ent.label_
}
ner_tags.append(ner_tag)
ner_tags_lst.append(ner_tags)
return ner_tags_lst
def split_body_to_sents(body):
MAXIMUM_WORDS = 150
MINIMUM_WORDS = 50
num_words = 0
body_sents, body_sents_lst = [], []
EDGE_CASES = ["Assn.", "Abbrev.", "Var.", "Gov.", "Mass.", "No.",
"Corp.", "Co.", "Cos.", "Inc.", "Pg.", "etc.", "?Pg.", "II.",
"Mr.", "Mrs.", "Ms.", "CH.", "Ch.", "Md.", "Cup."]
# split body into sentences
all_sents = tokenizer.tokenize(body)
new_all_sents = []
current_sent = []
for sent in all_sents:
if sent and sent != " ":
if (len(sent) > 1 and sent[-1] == "." and sent[-2].isdigit()) or (
len(sent) ==2 and sent[-1] == "." and sent[-2].isupper()) or (
len(sent) ==2 and sent[-1] == "(") or (
sent.split()[-1] in EDGE_CASES) or (
len([ch for ch in sent.split()[-1] if ch == "."]) > 1) or (
len(sent) > 2 and sent[-1] == "." and sent[-2].isupper() and sent[-3] == " "):
current_sent.append(sent)
else:
current_sent.append(sent)
sent = " ".join(current_sent.copy())
new_all_sents.append(sent)
current_sent = []
all_sents = new_all_sents.copy()
# split into chunks of some maximum length
for sent in all_sents:
if sent:
body_sents.append(sent)
num_words += len(sent.split())
if num_words > MAXIMUM_WORDS:
body_sents_lst.append(body_sents.copy())
body_sents = []
num_words = 0
# add the trailing/passages
if num_words >= MINIMUM_WORDS:
body_sents_lst.append(body_sents.copy())
body_sents = []
num_words = 0
bodies_lst = []
for body_sents in body_sents_lst:
body = " ".join(body_sents)
bodies_lst.append(body)
return bodies_lst.copy(), body_sents_lst.copy()
def create_local_documents(data, index):
passage2sents = {}
finalEntries = {}
entity2emailid = defaultdict(list)
email2entities = defaultdict(list)
email_key = index
assert type(index) == int, print("index is not the correct format")
psg_key = 0
row = data[index]
body = row["Body"]
if body.strip():
email_title = "EMAIL_" + str(email_key)
body = remove_structure_tokens(body)
# split the email into the MAX SEQ LENGTH sized chunks
bodies_lst, body_sents_lst = split_body_to_sents(body)
# get entity annotations
ner_tags_lst = get_ner_tags(bodies_lst)
linked_entities_lst = identify_linked_entities(bodies_lst)
for body, body_sents, linked_entities, ner_tags in zip(bodies_lst, body_sents_lst, linked_entities_lst, ner_tags_lst):
psg_title = f"PERSONAL_e{str(email_key)}_p{str(psg_key)}"
passage2sents[psg_title] = body_sents
new_id = f"e{str(email_key)}_p{str(psg_key)}"
finalEntries[new_id] = {
"id": new_id,
"email_title":email_title,
"title":psg_title,
"text":body,
"sents":body_sents,
"ner_tags_lst":ner_tags,
"linked_entities_lst":linked_entities
}
for ent in linked_entities:
entity2emailid[ent['title']].append(psg_title)
email2entities[psg_title].append(ent['title'])
psg_key += 1
return finalEntries, entity2emailid, email2entities, passage2sents
def create_local_passages_wrapper():
# unzips the raw data
pool = Pool(8)
passage2sents = {}
entity2emailid = defaultdict(list)
email2entities = defaultdict(list)
# load the correct inbox and the mappings
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/parsed_maildir/{INBOX}_09082021.csv") as f:
data = pd.read_csv(f)
print(f"Length of inbox: {INBOX} is {len(data)}")
st = time.time()
# select entries with an existent body and message id
data = data[pd.notnull(data['Body'])]
data = data[pd.notnull(data['Message-ID'])]
data = data.to_dict('records')
# data = data[0:100]
data_indices = range(len(data))
entries_lst, entity2emailid_lst, email2entities_lst, passage2sents_lst = zip(*pool.map(partial(create_local_documents, data), data_indices))
finalEntries = {}
for entries_dict in entries_lst:
for key, entry in entries_dict.items():
finalEntries[key] = entry
with open(f"Enron_{INBOX}/EmailsCorpus.json", "w") as f:
json.dump(finalEntries, f)
for passage2sents_subdict in passage2sents_lst:
for psg_key, sents in passage2sents_subdict.items():
passage2sents[psg_key] = sents
for email2entities_subdict in email2entities_lst:
for psg_key, entities_list in email2entities_subdict.items():
email2entities[psg_key] = entities_list
for entity2emailid_subdict in entity2emailid_lst:
for entity_name, psgs_list in entity2emailid_subdict.items():
if entity_name in entity2emailid:
entity2emailid[entity_name].extend(psgs_list)
else:
entity2emailid[entity_name] = psgs_list
# # save the mappings
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/qa_runs/{INBOX}/subject2sents.json", "w") as f:
json.dump(passage2sents, f)
print(f"Saved passages 2 sents for {len(passage2sents)} passages.")
with open(f"{MY_PATH}/Enron_{INBOX}/entity2emailid.json", "w") as f:
json.dump(entity2emailid, f)
print(f"Saved entity2emailid for {len(entity2emailid)} entities.")
with open(f"{MY_PATH}/Enron_{INBOX}/email2entities.json", "w") as f:
json.dump(email2entities, f)
print(f"Saved email2entities for {len(email2entities)} emails.")
print(f"Generate full set of personal documents in time: {time.time() - st}")
print(f"There are: {len(finalEntries)} passages created.")
def extra_cleaning():
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
EnronPassages_New = {}
for key, passage in tqdm(EnronPassages.items()):
new_sents = []
for sent in passage['sents']:
sent = remove_structure_tokens(sent)
if sent and sent != " ":
if sent[0] == "?" and len(sent) > 1:
sent = sent[1:]
new_sents.append(sent)
passage["sents"] = new_sents
passage['text'] = " ".join(new_sents)
EnronPassages_New[key] = passage
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json", "w") as f:
json.dump(EnronPassages, f)
# FILTER POOR QUALITY NED TAGS AND GENERATE FINAL LISTS OF LOCAL / GLOBAL ENTITIES
def ner_alias_replacements(tag_text):
tag_toks = tag_text.split()
tag_toks = [tok.replace("\\'s", "") for tok in tag_toks]
tag_toks = [tok.replace("'s", "") for tok in tag_toks]
tag_toks = [tok for tok in tag_toks if tok not in ['RE', 'F1', 'To:', "PS", "Subject", "Sent"]]
tag_toks = [tok.replace("=20","").replace("=","").strip() for tok in tag_toks if tok not in ['the'] and tok not in PUNCT]
tag_text = " ".join(tag_toks)
tag_text = tag_text.replace("Enron", "")
tag_text = tag_text.replace("U.S.", "United States")
tag_text = tag_text.replace("US", "United States")
tag_text = tag_text.replace("LA", "Los Angeles")
tag_text = tag_text.replace("L.A.", "Los Angeles")
tag_text = tag_text.replace("SF", "San Francisco")
tag_text = tag_text.replace("NY", "New York")
tag_text = tag_text.replace("N.Y.", "New York")
# punct
tag_text = tag_text.replace("**", "").strip()
tag_text = tag_text.replace("-", "").strip()
tag_text = tag_text.replace("\\t", " ").strip()
tag_text = tag_text.replace("\\", "")
tag_text = tag_text.replace(":", " ").strip()
tag_text = tag_text.replace(" ", " ")
return tag_text
def filter_named_entities():
print("Running some filtering on tagged entities to remove poor quallity!")
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
linkedentities2nertags_global = {}
for k, v in tqdm(EnronPassages.items()):
text = v['text']
ner_tag_text = []
ner_tag_to_item = {}
for tag in v['ner_tags_lst']:
tag_text = ner_alias_replacements(tag['text'])
if tag_text:
ner_tag_text.append(tag_text)
ner_tag_to_item[tag_text] = tag
filtered_ents = []
for ent in v['linked_entities_lst']:
filter = 0
# FILTER 1: exact match of alias and entity title
if not ent or not ent['title'] or ent['title'] not in text:
filter = 1
# FILTER 2: if it's an an entity title that's not in the ner tagged spanned text at all
if ent['title'] not in ner_tag_text:
filter = 1
# FILTER 3: if it's a PERSON NER tag, and not the full name (first, last) then drop it
if not filter and ner_tag_to_item[ent['title']]['ner'] == "PERSON":
if len(ent['title'].split()) == 1:
filter = 1
# sometimes the second word is just an initial e.g., "Richard B."
elif len(ent['title'].split()) == 2 and len(ent['title'].split()[1]) < 3:
filter = 1
# FILTER 4: do any of the entity linking description words match the text? e.g., Nokia Chairman
if not filter:
linkedentities2nertags_global[ent['title']] = ner_tag_to_item[ent['title']]['ner']
filtered_ents.append(ent)
v['linked_entities_lst'] = filtered_ents
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json", "w") as f:
json.dump(EnronPassages, f)
with open(f"{MY_PATH}/Enron_{INBOX}/linkedentities2nertags_global.json", "w") as f:
json.dump(linkedentities2nertags_global, f)
# PRODUCE A LIST OF THE LOCAL AND GLOBAL ENTITIES
def get_wiki_df():
st = time.time()
passages_path = '/checkpoint/simarora/mdr/data/hotpot_index/wiki_id2doc.json'
with open(passages_path) as f:
wiki_id2doc = json.load(f)
passages = []
for k, v in wiki_id2doc.items():
v['id'] = k
passages.append(v)
print(f"Loaded full set of documents in {time.time() - st}")
st = time.time()
st = time.time()
df = pd.DataFrame(passages)
print(f"Loaded full set of documents in {time.time() - st}")
st = time.time()
wikititles = [psg['title'] for psg in passages]
return df, wikititles
def get_variations_lst(titles, wikititles=[], cache=None, text="", sents=[]):
global_titles = {}
remaining = []
for tup in titles:
title, tag = tup[0], tup[1]
filt = 1
if " Cor" in title:
USE_REPLACEMENTS = [("Corp.",""), ("Corporation", ""), ("Corp.", "Corporation"), ("Corp.", "Company")]
elif " Co" in title:
USE_REPLACEMENTS = [("Co.",""), ("Co", ""), ("Co.", "Company"), ("& Co.", ""), ("Computer", "")]
elif "The " in title:
USE_REPLACEMENTS = [("The ", "")]
elif "Inc" in title:
USE_REPLACEMENTS = [("Inc. ", ""), ("Inc.", "")]
elif "Venture" in title:
USE_REPLACEMENTS = [("Ventures", " "), ("Venture Fund", " ")]
elif any(wd in title for wd in ['URL', 'Ltd.', '&', "Venture", "Capital", "News"]):
USE_REPLACEMENTS = [("Ltd.", ""), ("URL", ""), ("&", "&"), ("Limited", ""), ("Newspaper", " "), ("Capital", " ")]
else:
USE_REPLACEMENTS = []
if USE_REPLACEMENTS:
title = title.replace(",", " ")
title = title.replace(" ", " ")
for replace in USE_REPLACEMENTS:
title_new = title.replace(replace[0], replace[1]).strip()
if title == title_new:
continue
elif title_new in cache and cache[title_new]:
filt = 0
break
elif title_new in wikititles:
filt = 0
cache[title_new] = 1
break
if not filt:
text = text.replace(title, title_new)
text = text.replace(" ", " ")
new_sents = []
for sent in sents:
new_sents.append(sent.replace(title, title_new).replace(" ", " "))
sents = new_sents.copy()
global_titles[title_new] = tag
else:
remaining.append(title)
return global_titles, remaining, text, sents, cache
def local_ents_refilter_by_wikipassages():
df, wikititles = get_wiki_df()
THRESH = 9
freq_local = []
with open(f"{MY_PATH}/Enron_{INBOX}/local_entities.json") as f:
local_ents = json.load(f)
for key, value in local_ents.items():
if value > THRESH:
freq_local.append(key)
swapped_titles = []
for local_title in freq_local:
sents = len(df[df['title'].str.contains(local_title)]["id"].values)
sents += len(df[df['text'].str.contains(local_title)]["id"].values)
if sents >= 1:
swapped_titles.append(local_title)
with open(f"Enron_{INBOX}/local_ents_refilter.json", "w") as f:
json.dump(swapped_titles, f)
def local_ents_checker(local_title, hard_coded_dictionary):
# hard rules for which we want to exclude the local title as a local entity
if any((len(tok) == 2 and tok.islower() and tok not in stop_words) for tok in local_title.split()):
return False
if any(tok in['PM', 'AM', 'EDT', 'EST', 'PST', 'AB', 'SB', 'Cc', 'RE', 'F1',
'To:', "PS", "P.S.", "Subject", 'said', 'said.', "hasn\'t", 'has',
"doesn\'t", "does", "didn\'t", "did"] for tok in local_title.split()):
return False
if any((len(tok) == 1 and tok.islower() and tok not in ['a', 'i']) for tok in local_title.split()):
return False
if any(wd in local_title for wd in ['United States', "Dow Jones", 'New York', 'Committee', "AT&T",
"Associated Press", "Goldman Sachs", "Pacific Gas", "The Times",
"Financial Times", "Haas School", "Charles Schwab",
"Morgan Stanley", "J.P. Morgan", "Standard &",
"FERC", 'Los Angeles', "PG&E", "San Francisco", ".com"]):
return False
if local_title.split()[0] == '&' or local_title.split()[-1] == '&':
return False
if local_title.split()[0] in ['of', 'To', "or"] or local_title.split()[-1] == 'of':
return False
if "?" in local_title:
return False
if local_title.isupper() or local_title.islower():
return False
for tok in local_title.split():
if all(t.isdigit() for t in tok):
return False
if hard_coded_dictionary[local_title]:
OVERRIDE.append(local_title)
return False
return True
def hard_coded_remove_local_entities():
hard_coded_dictionary = defaultdict(int)
remove_local = [
'Jeff Dasovich', 'Wash. Post', 'Private Company Business News', 'Public Service Company of New Mexico',
'Channing Way Berkeley', 'Universal Studios', 'California State', "National Assn.",
'University of California, Berkeley Berkeley', 'AP Business Writer', 'Bad News', 'English News',
'West Coast', 'Haas Social Venture Competition', 'Haas Haas Celebrations',
'Electrical Engineering', 'Board of Directors', 'Pacific G&E', 'Calif Gov', 'California Senate',
'California Legislature', 'The Economic Times Copyright', 'Times Staff', 'Costa Times',
'Times Staff Writers', 'World Watch The Wall Street Journal', "Mobile Outlook",
'The Wall Street Journal A2', 'Dear Haas Community', 'California State University and University of California',
'Jeff Dasovich NA', 'Justice Department', 'Energy Department', 'State Department', 'The Commerce Department',
'Department of Water', 'Department of Finance', 'Defense Department', 'Interior Department',
'Water Resources Department', 'Department of Commerce', 'The Energy Department', 'The Justice Department',
'The Department of Energy', 'Department of Education', 'Labor Department', 'The Department of Water Resources',
'The Labor Department', 'Treasury Department', 'Commerce Department', 'Northern and', 'Account and',
'Computer Science or Engineering', 'Participation in Roundtable on Lessons Learned',
'English News Service', 'Newport News', 'Domestic News', 'Eastern Time', 'Central Time', 'Govt Affairs',
'Evening MBA Program Office', 'General Accounting Office', 'III Chief of Staff Office of Assembly',
'Office of Emergency Services', 'Office of Government Ethics', 'The General Accounting Office', 'Docket Office',
'DSan Diego', 'The State Government', 'United Kingdom0F', 'Page A1', 'Gas & Electric', 'George W.',
'Gov Davis', 'Board BOSTON', 'Science & Technology', "Gov't Affairs", 'Section 19.3.2',
'Dow Jones)The California Independent System Operator','Corp. Cut', 'Securities & Exchange Commission',
"Director Institute of Management, Innovation and Organization"
]
print(f"Total remove local size: {len(remove_local)}")
with open(f"{MY_PATH}/Enron_{INBOX}/hard_block_local_entities_v2.json", "w") as f:
json.dump(remove_local, f)
global_override = [
'UC CSU', 'Enron Corp.', "Securities & Exchange Commission", "QUALCOMM, Inc.", 'UC Berkeley',
'University of California Berkeley', 'Berkeley CA', 'University of California at Berkeley',
'Merrill Lynch & Co.', 'Wells Fargo & Co.', 'Boeing Co.', 'U.C. Berkeley', 'Bain & Co.', 'Allen & Co.',
'Bear, Stearns & Co.', 'General Electric Co.', 'Ford Motor Co.', 'Walt Disney Co.', 'Transwestern Pipeline Co.',
'Halliburton Co.', 'Portland General Electric Co.', 'Southern California Edison Co.',
'Transwestern Pipeline Co.', 'American Electric Power Co.', 'El Paso Natural Gas Co.','DTE Energy Co.',
'Green Mountain Energy Co.','Commonwealth Edison Co.', 'Arizona Public Service Co.','Tata Power Co.',
'Duke Energy Co.', 'DuPont Co.','Gas Co.','Gujarat Gas Co.', 'McKinsey & Co.', 'Goldman, Sachs & Co.',
'Economic Times', 'New York Times', "New President & CEO", "President & CEO", "VC Fund", "Lays Off",
'UC San Diego', 'District of Columbia', 'JP Morgan Chase', 'Morgan Point', 'JP Morgan',
'Transwestern Pipeline Company', 'McKinsey & Company', 'The Gas Company', 'The Washington Post Co.',
'El Paso Natural Gas Company', 'Portland General Electric Company', 'L.A. Times', 'Wall Street Journal',
'Transwestern Pipeline Company', 'Southern California Edison Company', 'Chicago Tribune Company',
'Idaho Power Company', 'The Dabhol Power Company', "The Securities and Exchange Commission",
'The New Power Company', 'San Diego Gas and Electric Company', 'Greenfield Shipping Company',
'Public Utility Holding Company Act', 'San Diego Gas & Electric Company', 'UC Davis', 'UC Irvine',
'UC BERKELEY', 'Department of Water Resources', 'Exelon Corp.', "Chronicle Staff Writers",
'Department of Energy', 'Department of Environmental Protection', "Department of Water Resources",
'TXU Corp.', 'Apache Corp.', 'Microsoft Corp.', 'Intel Corp.', 'Sony Corp.', 'News Corp.',
'General Motors Corp.', 'Exxon Mobil Corp.', 'Chevron Corp.', 'Compaq Computer Corp.',
'Nortel Networks Corp.', 'Enron North America Corp.', 'Enron Canada Corp.', 'Oracle Corp.', 'PPL Corp.',
'EMC Corp.', 'BellSouth Corp.', 'National Thermal Power Corp.', 'American Electric Power Service Corp.',
'Illinova Corp.', 'Electric Corp.', 'El Paso Energy Corp.', 'Indian Oil Corp.', 'TransAlta Corp.',
'Fluor Corp.', 'Dabhol Power Corp.', 'Mobil Corp.', 'Exxon Corp.', 'ChevronTexaco Corp.', 'E nron Corp.',
'Questar Corp.', 'Qwest Corp.', 'Sprint Corp.', '- Enron Corp.', 'Bank of America Corp.',
'Bechtel Corp.', 'First Albany Corp.', 'Sempra Energy Corp.', 'Yellow Corp.', 'Sempra Energy Trading Corp.',
'Credit Suisse First Boston Corp.', 'VoiceStream Wireless Corp.', 'Oil & Natural Gas Corp.', 'Enron Corp. Cut',
'Enron Corporation', 'VC Personnel', "Time Warner Telecom, Inc.", "Time Warner Telecom", "Our Bureau Copyright",
"Nortel Networks", "National Public Radio", "Independent Ene rgy Producers Association",
"Cinergy Corp.", "Dynegy Inc.", "Dynegy Corp.", "Nasdaq Stock Market", "The Economist Newspaper",
"The Independent London FOREIGN", "Dell Computer", "Viacom Inc.", "Compaq Computer", "Reuters Limited",
"WalMart Stores Inc.", "Cisco Systems Inc.", "Royal Dutch Shell Group", "Occidental Petroleum Corp.",
"Marathon Oil Canada Inc.", "NRG Energy Inc.", "Barclays Global Investors", "Deloitte Consulting",
"Financial Desk", "AP Business Writer DATELINE", "Financial Desk Markets", "Shiv SenaBJP",
"AP Online", "Futu reTense", "Procter & Gamble", "Chronicle Staff", "Environmental Strategies", "Editorial Desk",
"Johnson & Johnson", "Assembly Floor", "Assembly Energy", "Working Council",
"HewlettPackard Co.", "Board SAN FRANCISCO", "Angel Investors", "Your Account Settings", "McGrawHill, Inc.",
"Deutsche Bank AG", "Industrial Markets", "Verizon Communications, Inc.", "Washington Post Staff",
"Sun Microsystems Inc.", "Oil & Gas", "a Federal Energy Regulatory Commission", "UBS Capital", "AT&T Ventures",
"The Boston Consulting Group", "Oracle Venture Fund", "Gas Daily",
"The Supreme Court", "Internet Outlook", "Round Two", "NRG Energy, Inc.", 'Department of Justice',
"Wireless Telecommunications", "a Securities and Exchange Commission", "Week Change", "Pacific, Boston",
'Department of Water Resources.',"The Hindu Copyright (C", "PR Newswire (Copyright (c)", "Finance Ministry",
]
swapped_titles = [
'Enron Corp', 'Enron Corp.', 'Smith Street', 'Power Exchange', 'General Fund', 'Ken Lay', 'Dow Jones', 'Jim Foster', 'UBS Warburg',
'California Senate', 'Energy Committee', 'Universal Studios', 'Nevada Power Co.', 'Sierra Pacific Power', 'UC Berkeley', 'Bush Administration',
'Steve Baum', 'Dept. of', 'Water Resources', 'The Chronicle', 'Department of Water Resources', 'Legislative Analyst', 'Gordon Smith',
'Federal Energy Regulatory', 'Anne Kelly', 'Andy Brown', 'State Legislature', 'Quaker Oats', 'Advisory Group', 'San Diego Gas', 'Action Network',
'Government Affairs', 'Jeff D.', 'Utility Service', 'Williams Communications', 'Public Service Commission', 'Direct Access', 'California State',
'John Campbell', 'Chamber of Commerce', 'Sacramento Bee', 'San Jose Mercury News', 'Craig Rose', 'David Ward', 'Don Thompson', 'Public Affairs',
'Wall Street Journal', 'Independent System', 'Public Utilities Commission', 'Bill Campbell', 'John Nelson', 'Charles Schwab', 'Corporate Finance',
'California Assembly', 'Susan Davis', 'Pacific Gas', 'Proposition 9', 'Energy Commission', 'The Utility Reform Network', "Arthur O\\'Donnell",
'Electric Co.', 'Paul Patterson', 'Independent System Operator', 'Tom Higgins', 'Wheeler Ridge', 'Southern California Gas Co.', 'El Paso',
'Watson Wyatt', 'United States EPA', 'Business Development', 'David Young', 'Hewlett Packard', 'Bill Jones', 'Ray Hart', 'Pacific Gas &', 'California Edison',
'Senate Energy', 'Sony Computer Entertainment America', 'Reliant Energy', 'Pro Tem', 'Maharashtra Government', 'Salomon Smith Barney', 'West Coast',
'The White House', 'Claire Buchan', 'Halliburton Co.', 'Apache Corp.', 'Duke Energy Corp.', 'Dabhol Power Co.', 'Economic Times', 'Independent Energy',
'in California', 'Portland General Electric Co.', 'Portland General', 'Sierra Pacific', 'Mike Day', 'Rocky Mountain', 'Securities and Exchange Commission',
'AES Corp.', 'Michael Kahn', 'Dan Schnur', 'UC Davis', 'New York Times', 'John Stevens', 'Electric Company', 'Broadband Services', 'Ken Rice', 'Bay Area',
'New York Times Company', 'El Paso Energy', 'Rebecca Smith', 'Washington Post', 'Environmental Protection Agency', 'Southern Co.', 'Federal Reserve',
'International Business Machines', 'Microsoft Corp.', 'Intel Corp.', 'Walt Disney Co.', 'Verizon Communications Inc.', 'Sony Corp.', 'News Corp.', 'Big Board',
'George Bush', 'Entergy Corp.', 'Dabhol Power', 'Department of Energy', 'Portland General Electric Company', 'Phillips Petroleum Co.', 'Shell Oil Co.',
'John Chambers', 'Haas School', 'Utility Reform Network', 'Mark Cooper', 'North Field', 'State Government', 'Central Government', 'New Power', 'National Grid',
'Gulf Coast', 'John Anderson', 'General Motors Corp.', 'Home Depot', 'Exxon Mobil', 'MBA Program', 'Forest Service', 'Napa Valley', 'Carnegie Mellon',
'Washington University', 'John Edmiston', 'Quaker Oats Co.', 'American Electric Power Co.', 'Jeff Miller', 'Louis XIV', 't o', 'Joe Edwards', 'William S.',
'Energy Policy Act', 'General Electric Co.', 'International Business Machines Corp.', 'America Online', 'Wal-Mart Stores', 'Ford Motor', 'Bell Atlantic',
'SBC Communications', 'Fortune magazine', 'Exxon Mobil Corp.', 'Texaco Inc.', 'Chevron Corp.', 'Ford Motor Co.', 'Citigroup Inc.', 'Phillips Petroleum',
'J.C. Penney', 'Waste Management', 'Ethics Commission', 'Philip Morris', 'Union Government', 'Oversight Board', 'John Burton', 'County Board of Supervisors',
'Michael Katz', 'Jonathan Berk', 'University of Texas', 'Graduate School of Business', 'Wharton School', 'Mike Allen', 'California Commission', 'United States News',
'Andrew Rose', 'Ken Rosen', 'Urban Economics', 'Eugene E.', 'Business Administration', 'National Economic Council', 'Board of Directors', 'Asia Pacific',
'Marketing Group', 'John Morel', 'Electrical Engineering', 'External Affairs', 'Energy Services', 'New York', 'al l', 'New Economy', 'First Amendment', 'East Coast',
'Tracy Fairchild', 'Nevada Power', 'Amr Ibrahim', 'California Street', 'Republican Assembly', 'Supreme Court', 'Roger Salazar', 'Aaron Thomas', 'Joe Dunn',
'Tom Williams', 'John Sousa', 'east coast', 'Chapter 11', 'House Energy', 'Union Bank of California', 'Computer Center', 'District Court', 'Charles Robinson',
'State of California', 'J.P. Morgan', 'Golden State', 'Department of Environmental Protection', 'Natural Gas Act', 'Fortune 100', 'west coast', 'Dabhol Power Co',
'Lee Brown', 'City Council', 'City Hall', 'Digital Media', 'Edward Jones', 'Bank of New York', 'Bank One', 'Bankruptcy Court', 'Public Service Co.', 'United States Bank',
'Department of Water and Power', 'United States Bankruptcy Court', 'Southern California Gas', 'Eastern Time', 'Steve Johnson', 'Investors Service', 'Mercury News',
'Peter Cartwright', 'Securities Exchange Act', 'United States Supreme Court', 'PECO Energy Co.', 'Steve Wright', 'Cal State', 'Morro Bay', 'Southern Energy', 'AES Corp',
'Business Week', 'Mission Energy', 'Pacific Gas and Electric Co.', 'California Public Utilities', 'Henry Duque', 'United States Energy', 'Clean Air Act', 'Justice Department',
'Energy Department', 'Moss Landing', 'Chula Vista', 'United States House', 'Montana Power Co.', 'Montana Power', 'General Counsel', 'Pacific Gas and', 'Bankruptcy Code',
'College of Engineering', 'Federal Government', 'Squaw Valley', 'South Bay', 'Geoff Brown', 'Geoffrey Brown', 'Pat Wood', 'Oracle Corp.', 'Apple Computer', 'PPL Corp.',
'Wisconsin Energy', 'Stephen Oliver', "Los Angeles\\'", 'Cove Point', 'Williams Co.', 'United States Treasury', 'United States Circuit Court', 'Ras Laffan', 'Signature Services',
'customer s', 'United States Mail', 'United States Court of Appeals', 'Qualcomm Inc.', 'State Department', 'Bay area', 'Morgan Point', 'John Olson', 'Mike Smith', 'K Street',
'Richard Sanders', 'Bob Williams', 'Gary Fergus', 'Central Time', 'UC Irvine', 'Round One', 'Public Utility Commission', 'Energy Crisis', 'Energy Regulatory Commission',
'Rebecca Mark', 'Solar Power', 'Sierra Pacific Power Co.', 'Shell Oil', 'Sacramento Municipal Utility', 'Air Force', 'Workers Party', 'Peter Evans',
'Competitive Telecommunications Association', 'Richard Lyons', 'Commonwealth Edison Co.', 'Atal Bihari', 'Coyote Valley', 'Superior Court', 'Costa Times', 'Jack Scott',
'Jim Sanders', 'General Accounting Office', 'National Energy', 'Bill Morrow', 'Bob Foster', 'Bill Leonard', 'David Freeman', 'Dave Freeman', 'Board of Supervisors',
'Willie Brown', 'Communications Committee', 'Red Herring', 'Paul Carpenter', 'Harvey Morris', 'Market Surveillance Committee', 'State Auditor', 'The European Union',
'Electric Corp.', 'Utilities Commission', 'California Independent System', 'Joseph Dunn', 'John White', 'Robert Laurie', 'Richard Ellis',
'West Asia', 'Arizona Public Service Co.', 'Stephen Frank', 'Ross Johnson', 'Patrick Wood', 'David Hitchcock', 'Investor Service', 'ta ke', 'English News Service',
'Indian Oil Corp.', 'David Cox', 'Ben Campbell', 'John Wilson', 'Craig Barrett', 'William Wise', 'System Operator', 'East Bay', 'Fluor Corp.', 'sta te',
'Conference Board', 'San Francisco Chron', 'rat e', 'Dan Smith', 'Federal Energy', 'Clark Kelso', 'San Diego Gas &', 'Senate Select Committee', 'Public Utilities',
'Gray Dav', 'Department of Water', 'th e', 'Fair Oaks', 'Press Club', 'Tom Riley', 'Tamara Johnson', 'Air Resources Board', 'Regulatory Affairs', 'Marina del Rey',
'Desert Southwest', 'Franchise Tax Board', 'Investor Relations', 'General Assembly', 'High Point', 'Human Resources', 'ou r', 'Chase Manhattan', 'Ray Lane',
'Alex Brown', 'Venture Partners', 'Thomas White', 'Senate Appropriations', 'Robert C.', 'tha n', 'British Telecommunications plc', 'Health and Human Services',
'Harris Interactive', 'Kleiner Perkins', 'Mobil Corp.', 'Exxon Corp.', 'Steve Elliott', 'Board of Equalization', 'Department of Finance', 'Phi Beta Kappa', 'Richard Simon',
'Bank of Nova Scotia', 'Credit Lyonnais', 'Neil Stein', 'Wen Chen', 'Energy Conference', 'Undergraduate Program', 'Task Force', 'Legislative Counsel', 'Andersen Consulting',
'Indian Government', 'Ajit Kumar', 'Peter Behr', 'Kevin Murray', 'Carl Pope', 'Sean Gallagher', 'K. Lay', "Paul O\\'Neill", 'Chase Manhattan Bank', 'Maharashtra State', 'Banc of America',
'Ian Russell', 'Questar Corp.', 'State Senate', 'Republican Party', 'British Telecom', 'Salomon Smith', 'Defense Department', 'Wholesale Energy Market', 'Laurence Drivon', 'Western Power',
'John Hill', 'Regulatory Commission', 'o r', 'United States District Court', 'Air Quality', 'The Golden State', 'Boeing Co.', 'Social Security', 'UC San Diego', 'mor e', "Brian D\\'Arcy",
'the administration', 'n California', 'Northern and', 'yea r', 'International Power', 'California Chamber', 'Mike Briggs', 'California Independent', 'Elk Grove', 'wer e',
'Commonwealth Club', 'tha t', 'Los Angeles Department', 'stat e', 'Arctic National Wildlife', 'Diablo Canyon', 'District of Columbia', 'Pfizer Inc.', 'Jack Stewart', 'Keith McCrea',
'Barclays Capital', 'Qwest Corp.', 'Sprint Corp.', 'Enforcement Bureau', 'Financial Express', 'Business Council', 'Newport News', 'Press Trust', 'Nesbitt Burns', 'Brad Williams', 't he',
'Scott Reed', 'Chris Cox', 'Edwin Chen', 'Los Angeles Department of Water and ', 'Water Resources Department', 'at a', 'Randy Cunningham', 'Duke Power', 'Jeffrey A.', 'Jeff Brown',
'pa y', 'Joe Nation', 'Star Fleet', 'Montana Resources', 'Marine Corps', 'Office of Emergency Services', 'Otay Mesa', 'Rick Johnson', 'Societe Generale', 'Michael Hoffman',
'Blackstone Group', 'Community Energy', 'c Utilities Commission', 'Capital Investors', 'Venture Fund', 'Department of Commerce', 'Pinot Noir', 'Governing Board', 'vic e',
'Eastman Kodak', 'Carlyle Group', 'Grey Advertising', 'Model N', 'WR Hambrecht', 'North Slope', 'Energy Foundation', 'Christopher F.', 'Raymond James', 'Product Development',
'Dain Rauscher', 'Imperial Bank', 'Venture Capital', 'and Washington', 'Sevin Rosen', 'of Sales', 'Bank of America Corp.', 'n energy', 'Three Mile Island', 'Los Angeles Department of Water',
'Mark Baldwin', 'Global Coal', 'TL Ventures', 'George H.W. Bush', 'United States Power', 'for California', 'an d', 'control s', 'don e', 'the commission', 'Data Centers',
'Western Region', 'Capital Partners', 'Public Utility Holding Company Act', 'John Browne', 'Virodhi Andolan', 'are a', 'William Hogan', 'business development', 'Ken Smith',
'State Board of Equalization', 'Duke Energy Co.', 'Information Technology', 'William Blair', 'Technology Ventures', 'Capital Management', 'Growth Capital', 'Thomas Weisel',
'Investment Management', 'Union Pacific', 'Public Policy Institute', 'David Anderson', 'New West', 'supreme court', 'Susan Scott', 'Judiciary Committee', 'Eastman Chemical',
'Hummer Winblad', 'Draper Fisher', 'Arthur Andersen LLP', 'Department of Education', 'September 11th', 'S. David', 'Lloyds TSB', 'Republican party', 'for a', 'Amadeus Capital',
'Clay Johnson', 'Labor Department', 'Bill Wood', 'official s', 'Angeles Department of Water and Power', 'Florida Supreme Court', 'Governmental Affairs Committee', 'Royal Dutch',
'Alfred Kahn', 'World Affairs Council', 'Richard B.', 'Mechanical Engineering', 'Project Manager', 'The Independent Institute', 'Sony Music Entertainment', 'Texas Pacific',
'Providence Equity', 'Azure Capital', 'Page 2', 'Intel Corporation', 'Ministry of Defense', 'La Suer', 'Wind River', 'First Energy', 'Arts Alliance', 'Critical Path',
'Office of Government Ethics', 'Moore Capital', 'Desert Star', 'California Energy', 'United Way', 'Contra Costa', 'State Water Resources Control Board', 'West coast',
'Scott Miller', 'Channel 8', 'Rules Committee', 'Finance Group', 'PECO Energy', '2001 Los Angeles', 'Department of Justice', 'Contra Costa County', 'section 2', 'Pequot Capital',
'Bill Hall', 'William Hall', 'Royal Caribbean', 'Lee Friedman', 'Tom Gros', 'Blue Shield', 'Science Applications International', 'BMG Entertainment', 'Court of Appeals',
'Jeff Green', 'Bill Massey', 'Reed Elsevier', 'International Affairs', 'Professor of Public Policy', 'Computer Science', 'Data Warehouse', 'Michael Day', 'Dow Chemical',
'Fleur de Lys', 'Mona L', 'the Commission', 'First Fund', 'Discovery Capital', 'Applied Micro Circuits', 'California Report', 'Michael Ramsay', 'Tim Carter', 'Alpine Meadows',
'Order No', 'Salvation Army', 'Shaw Group', 'Michael M.', 'Chris H.', 'Williams III', 'Duke of', 'San Jose', 'David W', 'PS 2', 'Doug Smith', 'Securities and Exchange',
'Bonneville Power', 'Vol. 3', 'Steve Smith', 'Strategic Energy', 'Cal State Fullerton', 'Steve Hall', 'Phillip K.', 'Political Reform Act', 'Senate Committee', 'Glenn Johnson',
'Fair Political Practices Commission', 'Electric Board', 'Power Authority', 'Bill Ahern', 'John D. Dingell', 'John S.', 'New Energy', 'Northern Natural Gas', 'Michael Kirby',
'Gas Co.', 'Charlotte Observer', 'Stephen Moore', 'L.A. Times', 'Company, Inc.', 'Bob Anderson', 'William Mead', 'South Lake Tahoe', 'Wisconsin Gas', 'Mark Long',
'The Financial Express', "Brian O'Connell", 'Jim Fallon', 'Red Cross', 'Ann M.', 'James D.', 'Mark A.', 'Kevin Kelley', 'Steven J.', 'Linda J.', 'Coral Springs', 'P.O. Box',
'Steve C.', 'Susan M.', 'Cornell Club', 'Performance Management', 'Review Group', 'Robin Hill', 'Bad News', 'Opus One', 'Wireless Services', 'First Round',
'Kennedy School of Government', 'National Geographic', 'John Bowers', 'Optical Internet', 'Applied Physics', 'Implementation Group', 'Don Smith', 'Project Management',
'Community Choice', 'Power Pool', 'Press Conference', 'Treasury Department', 'Antitrust Act', 'Public Regulation Commission', 'Ray Williams', 'Facility Management', 'Ross Ain',
'Nord Pool', 'SBC Communications, Inc.', 'Global Telecom', 'Corporation Commission', 'Finance Committee', 'Valley Center', 'Motorola, Inc.', 'Fifth Circuit', 'Communications, Inc.',
'International Bureau', 'National Historic Preservation Act', 'Transportation Commission', 'Management Committee', 'South Slope', 'ris k', 'Dennis Harris', 'Public Affairs Committee',
'Data Quality', 'Murray P.', 'Rebecca W.', 'Hardy Jr', 'Barbara A.', 'Mona L.', 'World Trade Center', 'West Gas', 'English News', 'Nigel Shaw', 'Exchange Commission', 'Lisa M.',
'Commerce Department', 'American Water Works', 'American Water', 'Jane M.', 'Global Executive', 'Rob Nichol', 'Bill Ross', 'James Docker', 'Community Affairs', 'Project Lead',
'Mike Heim', 'Quinn Gillespie', 'William Barry', 'Milberg Weiss', '| | |', 'University Health Services', 'Adam N', 'Linda L.', 'Jo Ann', 'William Johnson', 'Blockbuster Inc.',
'Kenneth Rice', 'Commerzbank Securities', 'FPL Group', "Gray Davis'", 'San Diego Gas & Electric Co.', 'John Stout', 'Foundation for Taxpayer and Consumer Rights', 'MCI WorldCom',
'Covad Communications', 'Lucent Technologies', 'Jeff Skilling', 'San Diego Union Tribune', 'McGraw Hill', 'KGO Radio', 'San Diego Gas & Electric', 'Alpert Davis',
'Kern River Gas Transmission', 'Saber Partners', 'SoCal Gas', 'Con Edison', "Mike Day'", 'Technologic Partners', 'H&Q Asia Pacific', 'Law Ministry', 'Kasturi & Sons Ltd',
'Power Purchase Agreement', 'Calpine Corp.', 'Senate Floor', 'Delta Power', 'The California Energy Commission', 'Sierra Pacific Resources', 'Dan Richard',
'The Public Utilities Commission', 'Electronics Boutique', 'The California Public Utilities Commission', 'El Paso Corporation', 'William A. Wise', 'Tibco Software',
'Vivendi Universal', 'AOL Time Warner', 'Qwest Communications International Inc.', 'Gas Authority of India Ltd', 'Dominion Resources', 'Mirant Corp.', 'Michael Aguirre',
'British Petroleum', 'Valero Energy Corp.', 'Capstone Turbine Corp.', 'Conoco Inc.', 'Anadarko Petroleum Corp.', 'Schlumberger Ltd.', 'Deloitte & Touche', 'Japan Corp.',
'Finance Ministry', 'Lucent Technologies Inc.', 'CBS MarketWatch', 'Product Management', 'Jimmy Bean', 'Organization of Petroleum Exporting Countries', 'France Telecom',
'Dell Computer Corp.', 'Credit Lyonnais Securities', 'Azurix Corp.', 'Dow Jones & Company,', 'Illinois Power', 'Avista Corp.', 'Saks Inc.', 'Florida Power & Light',
'Northeast Utilities', 'Fisher Center for Real Estate and Urban Economics', 'Council of Economic Advisors', 'The Orange County Register', 'Mark Johnson',
'Lehman Brothers Holdings Inc.', 'Northwest Natural Gas', 'Comcast Interactive Capital', 'MSN Explorer', 'American Electronics Association', 'Richard Gephardt',
'Fortune Magazine', 'Hugo Chavez', 'Sycamore Networks', 'Corporate Communications', 'Duke Energy Corporation', 'Energy Intelligence Group', 'Montgomery Watson',
'Bertelsmann AG', 'Dresdner Kleinwort Wasserstein', 'Northern and Central California', 'Canada Corp.', 'National Desk', 'The Federal Energy Regulatory Commission',
'Calpine Corporation', '9th Circuit Court of Appeals', 'The Chronicle Publishing Co.', 'Stone & Webster', 'Pacific Gas and Electric', 'Bureau of Reclamation',
'John E. Bryson', 'Cingular Wireless', 'The Public Service Commission', 'Tyco International Ltd.', 'JDS Uniphase', 'Reliant Energy Services', 'Copley News Service',
'Columbia River Basin', 'Energy Services Inc.', 'British Wind Energy Association', 'Energy Systems Inc.', 'Phyllis Hamilton', 'UC Regents', 'National Thermal Power Corporation',
'Washington Bureau', 'Strategic Petroleum Reserve', 'Chuck Watson', 'Simmons & Co.', 'Energy Division', 'Vulcan Ventures', 'ING Barings', 'Science Communications',
'Anschutz Investment', 'Donaldson, Lufkin & Jenrette', 'Sigma Partners', 'Technology Crossover Ventures', 'Morgenthaler Ventures', 'New Millennium Partners',
'Internet Capital Group', 'Network Appliance', 'Hambrecht & Quist', 'Energy Services, Inc.', 'Larry Summers', 'Kohlberg Kravis Roberts & Co.', 'Blockbuster Video',
'Suez Lyonnaise des Eaux', 'John Heine', 'Lester Center for Entrepreneurship and Innovation', 'North American Electric Reliability Council', 'World Trade Organisation',
'Craig D.', 'Joseph Lieberman', 'Eli Lilly & Co.', 'Prudential Securities Inc.', 'Arter & Hadden', 'National Electric Power Authority', 'The Maharashtra Government',
'Judah Rose', 'Mirant Corp', 'Vestas Wind Systems', 'Global Crossing Ltd.', 'B.C. Hydro', 'The Brattle Group', 'The Energy Commission', 'The California Assembly',
'Global Markets', 'Career Services', "Department of Water Resources'", 'Western Energy', 'Ernst & Young', 'ABN Amro', 'Northwest Natural Gas Co.', 'Media Services',
'Steve Ballmer', 'Jeffrey Immelt', 'Wilson Sonsini Goodrich & Rosati', 'Duke Energy Corp', 'The Bonneville Power Administration', 'Regulatory Affairs Department',
'Industrial Development Bank of India', 'Paul Dawson', 'Giga Information', 'Crosspoint Venture Partners', 'Liberate Technologies', 'Chris Bowman', 'Barnes & Noble',
'Michael K. Powell', 'Bridgestone Firestone', 'Sofinnova Ventures', 'Ron Nichols', 'Navigant Consulting Inc.', 'Davis Administration', "Paul O'Neill", 'Joseph Pratt',
'Palm Computing', 'Industrial Finance Corporation', 'Utility Board', 'San Diego Superior Court', 'Con Ed', 'Carl Ingram', 'Pacific Bell Park', 'Mohave Generating Station',
'David Marshall', 'The Sacramento Municipal Utility District', 'U S WEST Communications, Inc.', 'Atal Behari', 'Dan Becker', 'James Woody', 'The City Council',
'The Public Utility Commission', 'Sun America', 'Middle East Economic Digest', 'National Energy Policy Development Group', 'Paul Kaufman', 'Jonathan Leonard',
'California Constitution', '11th Amendment', 'Canaan Partners', 'Whitney & Co.', 'Apollo Management', 'Blue Chip Venture', 'Kleiner Perkins Caufield & Byers',
'Scott Laughlin', 'CA Assembly', 'Labrador Ventures', 'J. & W. Seligman', 'Cable & Wireless', 'Crescendo Ventures', 'Jafco Ventures', 'Texas Pacific Group', 'with Davis',
'PA Consulting', 'Professional Services', 'Network Infrastructure', 'Benchmark Capital', 'Safeguard Scientifics', 'Zone Labs', 'Oxford Bioscience', 'Kodiak Venture Partners',
'Texas Public Utilities Commission', 'Christie Whitman', 'Low Income Home Energy Assistance Program', 'Williams Capital Group', 'Joseph Sent', 'William Blair Capital Partners',
'CNET Networks', 'Polaris Venture Partners', 'Bay Partners', 'Doll Capital Management', 'BP Plc', 'Joe Bob Perkins', 'Edward Kahn', 'Norman Y. Mineta', 'Sr. VP',
'Advent Venture Partners', 'Mark Fabiani', 'Independent Power Producers', 'Artemis Ventures', 'Trident Capital', 'Mohr Davidow Ventures', 'Ask Jeeves',
'The Electric Reliability Council of Texas', 'Democratic Assembly', 'OC Register', 'Gabriel Venture Partners', 'Challenge Fund', 'Insight Capital Partners',
'Sierra Ventures', 'Sandler Capital Management', 'Niagara Mohawk', 'Guy Phillips', 'Department of Health Services', 'John Flory', 'News World Communications, Inc.',
'VantagePoint Venture Partners', 'Walden International', 'Den Danske Bank', 'Lloyds TSB Development Capital', 'A.G. Edwards', 'Terra Lycos', 'SK Global',
'Gray Cary Ware & Freidenrich', 'Field Institute', 'Mexican Energy', 'Corporate Development', 'Willis Stein & Partners', 'Burrill & Co.', 'Prime Ventures',
'The Federal Energy Regulatory', 'Calpine Corp', 'Trinity Ventures', 'Mt. Tam', 'ARCH Venture Partners', 'First Union Capital Partners', 'Columbia Capital', '9th Circuit',
'Real Media', 'Sofinnova Partners', 'World Wide Packets', 'Netscape Communications', 'Department of Defense', 'Atal Behari Vajpayee', 'Holland & Knight', 'ETF Group',
'D.J. Smith', 'RRE Ventures', 'Boston Capital Ventures', 'New World Ventures', 'Global Switch', 'Horizon Ventures', 'Service Factory', 'CB Capital', 'GE Power Systems',
'Campesinos Unidos', 'Schroder Ventures', 'AT&T Canada', 'Coral Energy', 'Jupiter Communications', 'Venture Strategy Partners', 'Davidow Ventures', 'EchoStar Communications',
'AT&T Wireless', 'Itochu International', 'Mike Hansen', 'The California Department of Water Resources', 'GTCR Golder Rauner', "Ontario Teachers' Pension Plan Board",
'San Diego Gas & Electric Co', 'Lehman Brothers Venture Partners', 'MSN Hotmail', 'Mohr Davidow', 'J. & W. Seligman & Co.', 'Faculty Club', 'SAP Ventures', 'Capital Group',
'Pilgrim Baxter', 'Heather Cameron', 'ITC Holdings', 'NIB Capital', 'Datek Online', 'Freei Networks', 'Green Mountain Energy Company', 'Duquesne Light',
'Dell Computer Corporation', 'The Charles Schwab Corporation', 'Bayerische Landesbank', 'StarVest Partners', 'American Lawyer Media', 'Credit Suisse Group',
'Robert Mondavi Winery', 'Allegis Capital', 'Diego Gas & Electric Co.', 'Pervasive Computing', 'Lotus Notes', 'Mirant Corporation', 'Virginia Ellis',
'Electric Power Group', 'Jim Fleming', 'FPL Energy', 'Bechtel Group', 'Reliance Industries Ltd.', 'Richard Ferreira', 'Russell Hubbard', 'TransAlta Energy', 'Joel Newton',
'The Economist Group', 'Eugene Water & Electric Board', 'Qwest Communications', 'The Commission', 'AT&T Broadband', 'Rob Lamkin', 'California Supreme Court', 'Kasturi & Sons Ltd.',
'Kaufman, Paul', 'George H. Ryan', 'National Cable Television Association', 'Mobile Services', 'Public Utilities Act', 'Cambridge Silicon Radio', 'Clinton Administration',
'CSU Fresno', 'EBS, Inc.', 'Network Engineering', 'Common Carrier', 'BellSouth Telecommunications, Inc.', 'Nextel Communications, Inc.', 'Southwestern Bell Telephone Co.',
'Qwest Communications International, Inc.', 'WorldCom, Inc.', 'The State Corporation Commission', 'Lucent Technologies, Inc.', 'Cable Services',
'National Exchange Carrier Association, Inc.', 'John D. Rockefeller IV', 'FPL FiberNet', 'EOG Resources, Inc.', 'Catholic Health East', 'Christi L.', 'Mr Munde',
'Northern Natural Gas Co.', 'BSES Ltd.', 'BSES Ltd', 'Berkshire Hathaway Inc.', 'James J. Cramer', 'Robert Christensen', 'The Goldman Sachs Foundation', 'George Vaughn',
'David McManus', 'Gas Authority of India', 'Mary Lynne'
]
global_override.extend(swapped_titles.copy())
print(f"Added {len(swapped_titles)} to global override.")
global_override = list(set(global_override))
print(f"Total global override size: {len(global_override)}")
with open(f"{MY_PATH}/Enron_{INBOX}/hard_block_global_override.json", "w") as f:
json.dump(global_override, f)
for title in remove_local:
hard_coded_dictionary[title] = 1
for title in global_override:
hard_coded_dictionary[title] = 1
return hard_coded_dictionary
def create_named_entity_maps():
print("Creating named entity maps!")
global_ents = Counter()
local_ents = Counter()
# load stuff
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
with open("wikititles.json") as f:
wikititles = json.load(f)
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
qid2types_wiki_filtered, title2qid, _, _, _, _ = global_entity_valid_types(wikititles)
hard_coded_dictionary = hard_coded_remove_local_entities()
with open(f"{MY_PATH}/Enron_{INBOX}/linkedentities2nertags_global.json") as f:
linkedentities2nertags_global = json.load(f)
nertags2psgs_global = defaultdict(list)
nertags2psgs_local = defaultdict(list)
fname = f"{MY_PATH}/Enron_{INBOX}/global_existence_cache.json"
if os.path.isfile(fname):
with open(fname) as f:
global_existence_cache = json.load(f)
else:
global_existence_cache = {}
# iterate through passages
EnronPassages_New = {}
for k, v in tqdm(EnronPassages.items()):
# filter entities in passages with tons of emails, NED model does poorly on these names
email_words = v['text'].count("@")
email_words += v['text'].count("E-mail")
if email_words > 1:
v['GLOBAL_ENTITIES'] = []
v['LOCAL_ENTITIES'] = []
EnronPassages_New[k] = v.copy()
continue
# if the passage has global entities
title_in_global = []
title_not_global = []
for ent in v['linked_entities_lst']:
title = ent['title']
if title in global_existence_cache:
if global_existence_cache[title]:
title_in_global.append((title, linkedentities2nertags_global[title]))
else:
title_not_global.append((title, linkedentities2nertags_global[title]))
else:
if title in wikititles:
global_existence_cache[title] = 1
title_in_global.append((title, linkedentities2nertags_global[title]))
else:
global_existence_cache[title] = 0
title_not_global.append((title, linkedentities2nertags_global[title]))
for tag in v['ner_tags_lst']:
title = ner_alias_replacements(tag['text'])
if len(title.split()) > 1 and tag['ner'] in VALID_NER_TYPES:
if title in global_existence_cache:
if global_existence_cache[title]:
title_in_global.append((title, tag['ner']))
else:
title_not_global.append((title, tag['ner']))
else:
if title in wikititles:
global_existence_cache[title] = 1
title_in_global.append((title, tag['ner']))
else:
global_existence_cache[title] = 0
title_not_global.append((title, tag['ner']))
title_not_global = [t for t in title_not_global if not hard_coded_dictionary[t[0]] == 1]
variations_lst, title_not_global, new_text, new_sents, global_existence_cache = get_variations_lst(title_not_global, wikititles=wikititles, cache=global_existence_cache, text=v['text'], sents=v['sents'])
v['text'] = new_text
v['sents'] = new_sents
for title, tag in variations_lst.items():
if title not in global_existence_cache:
global_existence_cache[title] = 1
title_in_global.append((title, tag))
# save local and global entities for the psg
filtered_psg_local_ents = []
filtered_psg_global_ents = []
for tup in title_in_global:
ent, nertag = tup[0], tup[1]
global_ents[ent] += 1
filtered_psg_global_ents.append(ent)
filter_a = filter_global_ent(ent, wikipassages2sents, title2qid, qid2types_wiki_filtered)
if not filter_a:
nertags2psgs_global[nertag].append((v['id'], ent))
for tag in v['ner_tags_lst']:
tag_text = ner_alias_replacements(tag['text'])
if tag_text and tag_text not in filtered_psg_global_ents and tag['ner'] in VALID_NER_TYPES and local_ents_checker(tag_text, hard_coded_dictionary):
if len(tag_text.split()) > 1:
local_ents[tag_text] += 1
filtered_psg_local_ents.append(tag_text)
nertags2psgs_local[tag['ner']].append((v['id'], tag_text))
v['GLOBAL_ENTITIES'] = filtered_psg_global_ents.copy()
v['LOCAL_ENTITIES'] = filtered_psg_local_ents.copy()
EnronPassages_New[k] = v.copy()
# save stuff
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json", "w") as f:
json.dump(EnronPassages_New, f)
with open(f"{MY_PATH}/Enron_{INBOX}/local_entities.json", "w") as f:
json.dump(local_ents, f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_entities.json", "w") as f:
json.dump(global_ents, f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_existence_cache.json", "w") as f:
json.dump(global_existence_cache, f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json", "w") as f:
json.dump(nertags2psgs_local, f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json", "w") as f:
json.dump(nertags2psgs_global, f)
# DUPLICATE PASSAGES
def identify_duplicates(EnronPassages):
ENT_OVERLAP_THRESH = 5
OVERLAP_PCT_THRESH = 0.75
entity_sets = []
first_sentences = []
first_sentence_map = defaultdict(list)
duplicates_map = {}
# metadata
num_duplicates = 0
sentences_matched = 0
entities_overlapped = 0
for key, passage in tqdm(EnronPassages.items()):
sents = passage['sents']
ents = passage['GLOBAL_ENTITIES'].copy()
ents.extend(passage['LOCAL_ENTITIES'].copy())
entity_set = set(ents)
# check if it's a duplicate
is_duplicate = 0
for sent in sents:
if sent in first_sentences:
is_duplicate = 1
sentences_matched += 1
first_sentence_map[sent].append(key)
break
if not is_duplicate:
for ent_set in entity_sets:
overlap = len(entity_set.intersection(ent_set))
if overlap > ENT_OVERLAP_THRESH and overlap/len(entity_set) >= OVERLAP_PCT_THRESH:
is_duplicate = 1
entities_overlapped += 1
# save whether it's a duplicate or not
if not is_duplicate:
for sent in sents:
if len(sent.split()) > 1:
first_sentences.append(sent)
break
entity_sets.append(entity_set)
first_sentence_map[sents[0]].append(key)
duplicates_map[key] = False
else:
duplicates_map[key] = True
num_duplicates += 1
print(f"Marked {num_duplicates} passages as duplicates.")
print(f"For {sentences_matched} passages, the first sentences matched exactly.")
print(f"For {entities_overlapped} passages, the entity set had a high overlap with another passage's entity set.\n")
with open("first_sentence_map.json", "w") as f:
json.dump(first_sentence_map, f)
return duplicates_map
def global_entity_valid_types(wikititles):
print("Loading type information ...")
with open ('/checkpoint/simarora/open_domain_data/BOOTLEG_entitydb/data/entity_db/entity_mappings/qid2title.json') as f:
qid2title = json.load(f)
with open("/checkpoint/simarora/open_domain_data/BOOTLEG_entitydb/data/entity_db/type_mappings/wiki/type_vocab.json") as f:
wiki_type_vocab = json.load(f)
with open("/checkpoint/simarora/open_domain_data/BOOTLEG_entitydb/data/entity_db/type_mappings/wiki/qid2typeids.json") as f:
qid2types_wiki = json.load(f)
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
wiki_typeid2name = {}
for key, value in wiki_type_vocab.items():
wiki_typeid2name[value] = key
title2qid = {}
for k, v in qid2title.items():
title2qid[v] = k
type2freq = Counter()
type2qids = defaultdict(list)
for title in tqdm(wikititles):
if title in title2qid:
qid = title2qid[title]
types = qid2types_wiki[qid]
for ty in types:
type2freq[wiki_typeid2name[ty]] += 1
if len(wikipassages2sents[title]) > 1:
type2qids[wiki_typeid2name[ty]].append(qid)
# this is the list of types we want to allow for candidate entities
type2freq_filtered = {}
type2qids_filtered = {}
for ty, ct in type2freq.items():
if ct >= 1000:
type2freq_filtered[ty] = ct
type2qids_filtered[ty] = type2qids[ty]
with open("filteredEnronGlobalTypes.json", "w") as f:
json.dump(type2freq_filtered, f)
qid2types_wiki_filtered = {}
for qid, types_lst in tqdm(qid2types_wiki.items()):
filt_types = [wiki_typeid2name[ty] for ty in types_lst if wiki_typeid2name[ty] in type2freq_filtered]
qid2types_wiki_filtered[qid] = filt_types
return qid2types_wiki_filtered, title2qid, type2freq_filtered, type2qids_filtered, qid2title, type2qids
def filter_global_ent(title, wikipassages2sents, title2qid, qid2types_wiki_filtered):
filter = 0
MIN_PARAGRAPH_WORDS = 20
# Filter 1: the passage is too short, meaning it's probably super vague or specific
if len(wikipassages2sents[title]) <= 1:
filter = 1
# Filter 2: total words in the sentences for the passage, since there's probably too little content to write a q
total_words = 0
for sent in wikipassages2sents[title]:
total_words += len(sent.split())
if total_words < MIN_PARAGRAPH_WORDS:
filter = 1
# Filter 3: if the entity categories are not in the filtered types lists
if title not in title2qid:
filter = 1
else:
qid_a = title2qid[title]
if qid_a in qid2types_wiki_filtered:
types_a = qid2types_wiki_filtered[qid_a]
if not types_a:
filter = 1
return filter
def generate_passage_pairs():
# GENERATE PASSAGE PAIRS
def generate_global_global_pairs(wikititles, qid2types_wiki_filtered, title2qid):
print("Creating global, global passage pairs.")
random.seed(1)
ks = KnowledgeSource()
global_a = random.sample(wikititles, 50000)
# load wiki corpus information
wikititle_exists = defaultdict(int)
for title in wikititles:
wikititle_exists[title] = 1
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
# produce pairs, first load existing saved anchors if it exists
if os.path.exists("page2anchors.json"):
with open("page2anchors.json") as f:
page2anchors = json.load(f)
else:
page2anchors = {}
GLOBAL_GLOBAL_PAIRS = []
added_wikients = []
for title in tqdm(global_a):
if title not in added_wikients:
if title in page2anchors:
anchors = page2anchors[title]
else:
page = ks.get_page_by_title(title)
if page:
anchors = page['anchors']
anchors_full = [anchor for anchor in anchors if anchor['paragraph_id'] == 1]
anchors = [anchor for anchor in anchors_full if wikititle_exists[anchor['text']]]
page2anchors[title] = anchors
if anchors:
for anchor in anchors:
a, b = title, anchor['text']
# Filter the kinds of anchors we want by granularity
filter_a = filter_global_ent(a, wikipassages2sents, title2qid, qid2types_wiki_filtered)
filter_b = filter_global_ent(b, wikipassages2sents, title2qid, qid2types_wiki_filtered)
if not filter_a and not filter_b:
GLOBAL_GLOBAL_PAIRS.append({'wiki1':a, 'wiki2':b})
added_wikients.append(title)
with open("page2anchors.json", "w") as f:
json.dump(page2anchors, f)
print(f"Collected {len(GLOBAL_GLOBAL_PAIRS)} global, global pairs\n")
return GLOBAL_GLOBAL_PAIRS
def generate_global_local_pairs(EnronPassages, duplicates_map, qid2types_wiki_filtered, title2qid):
print("Creating global, local passage pairs.")
MIN_PSG_ENTITIES = 3
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
GLOBAL_LOCAL_PAIRS = []
for key, passage in tqdm(EnronPassages.items()):
is_duplicate = duplicates_map[key]
if is_duplicate:
continue
if len(passage['GLOBAL_ENTITIES']) + len(passage['LOCAL_ENTITIES']) < MIN_PSG_ENTITIES:
continue
for ent in passage["GLOBAL_ENTITIES"]:
filter_a = filter_global_ent(ent, wikipassages2sents, title2qid, qid2types_wiki_filtered)
if not filter_a:
GLOBAL_LOCAL_PAIRS.append({'enron':key, 'wiki':ent})
print(f"Collected {len(GLOBAL_LOCAL_PAIRS)} local, global pairs\n")
return GLOBAL_LOCAL_PAIRS
# Something I didn't include pairs-wise is local passage-pairs about global entities (due to the chance of knowledge intersection)
def generate_local_local_pairs(EnronPassages, duplicates_map, freq_local):
print("Creating local, local passage pairs.")
MIN_PSG_ENTITIES = 3
MAX_PSG_ENTITIES = 10
FILT_LOCAL_LOCAL_PAIRS = []
USED_PSG_PAIRS = []
# get a mapping of the passages that contain each local entity
localent2psgkey = defaultdict(list)
for key, passage in tqdm(EnronPassages.items()):
is_duplicate = duplicates_map[key]
if is_duplicate:
continue
TOTAL_ENTITIES = len(passage['GLOBAL_ENTITIES']) + len(passage['LOCAL_ENTITIES'])
if TOTAL_ENTITIES < MIN_PSG_ENTITIES or TOTAL_ENTITIES > MAX_PSG_ENTITIES:
continue
for ent in passage["LOCAL_ENTITIES"]:
if ent in freq_local:
localent2psgkey[ent].append(key)
# pick two passages that mention a local entity as a pair; one pair per local entity
pair_counter = Counter()
for ent, psgs in tqdm(localent2psgkey.items()):
for psg1 in psgs:
for psg2 in psgs:
if psg1 != psg2 and set([psg1, psg2]) not in USED_PSG_PAIRS:
FILT_LOCAL_LOCAL_PAIRS.append({'enron1':psg1, 'enron2':psg2, 'ent':ent})
USED_PSG_PAIRS.append(set([psg1, psg2]))
pair_counter[f"{psg1}_{psg2}"] += 1
break
LOCAL_LOCAL_PAIRS = []
for pair in tqdm(FILT_LOCAL_LOCAL_PAIRS):
# this filter is being used here as a sign of duplicacy
if pair_counter[f"{pair['enron1']}_{pair['enron2']}"] < 5:
LOCAL_LOCAL_PAIRS.append(pair)
print(f"Collected {len(LOCAL_LOCAL_PAIRS)} local, local pairs\n")
return LOCAL_LOCAL_PAIRS
# LOAD ENTITY SETS AND PERSONAL / GLOBAL CORPORA
with open("{MY_PATH}/wikititles.json") as f:
wikititles = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_entities.json") as f:
global_ents = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/local_entities.json") as f:
local_ents = json.load(f)
# Here we're choosing to use entities that appear above a THRESHOLD number of times in personal data
THRESH = 5
num = 0
freq_local = []
for key, value in local_ents.items():
if value >= THRESH:
num += 1
freq_local.append(key)
print(f"Found {len(local_ents)} local entities. and {num} entities appear over {THRESH} x.")
num = 0
freq_global = []
for key, value in global_ents.items():
if value >= THRESH:
num += 1
freq_global.append(key)
print(f"Found {len(global_ents)} global entities and {num} global entities appear over {THRESH} x.\n")
# GENERATE THE PASSAGE PAIRS
qid2types_wiki_filtered, title2qid, _, _, _, _ = global_entity_valid_types(wikititles)
fname = f"{MY_PATH}/Enron_{INBOX}/duplicate_enron_psg_map.json"
if os.path.isfile(fname):
with open(fname) as f:
duplicates_map = json.load(f)
else:
duplicates_map = identify_duplicates(EnronPassages)
with open(f"{MY_PATH}/Enron_{INBOX}/duplicate_enron_psg_map.json", "w") as f:
json.dump(duplicates_map, f)
print("Loaded duplicate passages map!\n")
# global global passages
GLOBAL_GLOBAL_PAIRS = generate_global_global_pairs(wikititles, qid2types_wiki_filtered, title2qid)
with open(f"{MY_PATH}/Enron_{INBOX}/global_global_pairs.json", "w") as f:
json.dump(GLOBAL_GLOBAL_PAIRS, f)
# global local passages
GLOBAL_LOCAL_PAIRS = generate_global_local_pairs(EnronPassages, duplicates_map, qid2types_wiki_filtered, title2qid)
with open(f"{MY_PATH}/Enron_{INBOX}/global_local_pairs.json", "w") as f:
json.dump(GLOBAL_LOCAL_PAIRS, f)
# local local passages
LOCAL_LOCAL_PAIRS = generate_local_local_pairs(EnronPassages, duplicates_map, freq_local)
with open(f"{MY_PATH}/Enron_{INBOX}/local_local_pairs.json", "w") as f:
json.dump(LOCAL_LOCAL_PAIRS, f)
def generate_comparison_passage_pairs():
def generate_local_local_comparison(EnronPassages, duplicates_map, freq_local, nertags2psgs_local):
print("Creating local, local passage pairs.")
MIN_PSG_ENTITIES = 2
MAX_PSG_ENTITIES = 10
FILT_LOCAL_LOCAL_PAIRS = []
USED_PSG_PAIRS = []
# get a mapping of the passages that contain each local entity
localent2psgkey = defaultdict(list)
has_enough_ents = []
for key, passage in tqdm(EnronPassages.items()):
is_duplicate = duplicates_map[key]
if is_duplicate:
continue
TOTAL_ENTITIES = len(passage['GLOBAL_ENTITIES']) + len(passage['LOCAL_ENTITIES'])
if TOTAL_ENTITIES < MIN_PSG_ENTITIES or TOTAL_ENTITIES > MAX_PSG_ENTITIES:
continue
has_enough_ents.append(key)
for ent in passage["LOCAL_ENTITIES"]:
if ent in freq_local:
localent2psgkey[ent].append(key)
# pick two passages that mention a local entity as a pair; one pair per local entity
ner2psgkeys = defaultdict(list)
psg2nertags = defaultdict(list)
for NER_TAG, psgs in nertags2psgs_local.items():
for psg in psgs:
ner2psgkeys[NER_TAG].append(psg[0])
psg2nertags[psg[0]].append([NER_TAG, psg[1]])
by_common_ent = 1
if by_common_ent:
pair_counter = Counter()
for NER_TAG, psgs in nertags2psgs_local.items():
passages = psgs.copy()
passages_keys = ner2psgkeys[NER_TAG].copy()
print(f"NER TAG: {NER_TAG}")
for tup1 in tqdm(passages):
inserted = 0
key1, title1 = tup1[0], tup1[1]
is_duplicate = duplicates_map[key1]
if is_duplicate:
continue
passage1 = EnronPassages[key1]
local_ents1 = [ent for ent in passage1['LOCAL_ENTITIES'] if ent != title1 and ent in freq_local]
for ent in local_ents1:
# iterate through passages with a matching local ent
other_passages = localent2psgkey[ent].copy()
random.shuffle(other_passages)
for other_psg in other_passages:
is_duplicate = duplicates_map[other_psg]
if is_duplicate:
continue
if other_psg in passages_keys:
other_nertags = psg2nertags[other_psg]
for tag in other_nertags:
title2 = tag[1]
key2 = other_psg
if tag[0] == NER_TAG and title2 != ent and title2 != title1 and key1 != key2 and set([key1, key2]) not in USED_PSG_PAIRS and key1 in has_enough_ents and key2 in has_enough_ents:
FILT_LOCAL_LOCAL_PAIRS.append({'enron1':key1, 'title1': title1, 'types':NER_TAG,
'enron2':key2, 'title2': title2, 'ent':ent})
USED_PSG_PAIRS.append(set([key1, key2]))
pair_counter[f"{key1}_{key2}"] += 1
inserted = 1
break
if inserted:
break
if inserted:
break
if inserted:
break
LOCAL_LOCAL_PAIRS = []
for pair in tqdm(FILT_LOCAL_LOCAL_PAIRS):
if pair_counter[f"{pair['enron1']}_{pair['enron2']}"] < 5:
LOCAL_LOCAL_PAIRS.append(pair)
print(f"Collected {len(LOCAL_LOCAL_PAIRS)} local, local pairs\n")
return LOCAL_LOCAL_PAIRS
def generate_global_local_comparison(EnronPassages, duplicates_map, wikititles, nertags2psgs_local, nertitles2types_local, wikipassages2sents):
_, _, type2freq_filtered, type2qids_filtered, qid2title, type2qids = global_entity_valid_types(wikititles)
print("Creating local, global passage pairs.")
MIN_PSG_ENTITIES = 2
MAX_PSG_ENTITIES = 10
# get a mapping of the passages that contain each local entity
localent2psgkey = defaultdict(list)
has_enough_ents = []
for key, passage in tqdm(EnronPassages.items()):
is_duplicate = duplicates_map[key]
if is_duplicate:
continue
TOTAL_ENTITIES = len(passage['GLOBAL_ENTITIES']) + len(passage['LOCAL_ENTITIES'])
if TOTAL_ENTITIES < MIN_PSG_ENTITIES or TOTAL_ENTITIES > MAX_PSG_ENTITIES:
continue
has_enough_ents.append(key)
for ent in passage["LOCAL_ENTITIES"]:
if ent in freq_local:
localent2psgkey[ent].append(key)
titlehasWikiTypes = {}
for tag, dic in nertitles2types_local.items():
titlehasWikiTypes[tag] = []
for title, lst in dic.items():
if lst:
titlehasWikiTypes[tag].append(title)
USED_TITLES = []
FILT_GLOBAL_LOCAL_PAIRS = []
USED_PSG_PAIRS = []
pair_counter = Counter()
for NER_TAG, psgs in nertags2psgs_local.items():
passages = psgs.copy()
print(f"NER TAG: {NER_TAG}")
for tup1 in tqdm(passages):
key1, title1 = tup1[0], tup1[1]
if title1 not in titlehasWikiTypes[NER_TAG] or key1 not in has_enough_ents or duplicates_map[key1]:
continue
types = nertitles2types_local[NER_TAG][title1]
qids_lst = type2qids[types[0]].copy()
while 1:
qid = random.choice(qids_lst)
wikititle = qid2title[qid]
qids_lst.remove(qid)
if len(wikipassages2sents[wikititle]) > 2:
break
if wikititle not in USED_TITLES:
USED_TITLES.append(wikititle)
FILT_GLOBAL_LOCAL_PAIRS.append({'enron1':key1, 'title1': title1, 'wiki':wikititle,
'types': types[0]})
USED_PSG_PAIRS.append(set([key1, wikititle]))
pair_counter[f"{key1}_{wikititle}"] += 1
print(f"Collected {len(FILT_GLOBAL_LOCAL_PAIRS)} local, global pairs\n")
return FILT_GLOBAL_LOCAL_PAIRS
# LOAD ENTITY SETS AND PERSONAL / GLOBAL CORPORA
with open(f"{MY_PATH}/wikititles.json") as f:
wikititles = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_entities.json") as f:
global_ents = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/local_entities.json") as f:
local_ents = json.load(f)
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json",) as f:
nertags2psgs_local = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json") as f:
nertags2psgs_global = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertitle2types_local.json") as f:
nertitles2types_local = json.load(f)
# Here we're choosing to use entities that appear above a THRESHOLD number of times in personal data
THRESH = 5
num = 0
freq_local = []
for key, value in local_ents.items():
if value >= THRESH:
num += 1
freq_local.append(key)
print(f"Found {len(local_ents)} local entities. and {num} entities appear over {THRESH} x.")
num = 0
freq_global = []
for key, value in global_ents.items():
if value >= THRESH:
num += 1
freq_global.append(key)
print(f"Found {len(global_ents)} global entities and {num} global entities appear over {THRESH} x.\n")
# GENERATE THE PASSAGE PAIRS
# qid2types_wiki_filtered, title2qid, _, _, _, _ = global_entity_valid_types(wikititles)
fname = f"{MY_PATH}/Enron_{INBOX}/duplicate_enron_psg_map.json"
if os.path.isfile(fname):
with open(fname) as f:
duplicates_map = json.load(f)
else:
assert 0, print("no duplicate passages map")
print("Loaded duplicate passages map!\n")
# global local passages
GLOBAL_LOCAL_PAIRS = generate_global_local_comparison(EnronPassages, duplicates_map, wikititles, nertags2psgs_local, nertitles2types_local, wikipassages2sents)
with open(f"{MY_PATH}/Enron_{INBOX}/compare_global_local_pairs.json", "w") as f:
json.dump(GLOBAL_LOCAL_PAIRS, f)
# local local passages
LOCAL_LOCAL_PAIRS = generate_local_local_comparison(EnronPassages, duplicates_map, freq_local, nertags2psgs_local)
with open(f"{MY_PATH}/Enron_{INBOX}/compare_local_local_pairs.json", "w") as f:
json.dump(LOCAL_LOCAL_PAIRS, f)
# COMPARISON Q HELPER FUNCTIONS
def filter_PERSON_ner(person_lst):
clean_person_lst = []
for tup in person_lst:
person = tup[1]
filt = 0
if any(wd.isupper() for wd in person.split()):
filt = 1
elif any(wd in person.lower()for wd in ["corp", "california", "<<", ">>", "email", "greetings", "enron", "business", "smart",
"socal", "@", "director", "inc", "ect", "auditorium", "+", "=", "cos.", "staff", "www.", "pro",
"department", "manager", "co.", "cos", "strategy", "other", "news", "copyright", "land", "english"]):
filt = 1
elif len(person) > 40:
filt = 1
elif len(person.split()) != 2 or (len(person.split()[0]) <= 2 or len(person.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in person):
filt = 1
elif person in ["Andrew Rich Pinot Noir", "Gordon Smith Announce Partnership", "Jeff Energy Boss", 'Morgan Chase', 'Sac Bee',
"Gary Cohen Importance", "Fleetguard Nelson", "Price Falls", "Grosvenor Estates", 'Ventana Editions',
"Saloman Smith Barney India", "Fleetwood Enter", "Adobe Adobe Photoshop", "GungHo Atmosphere", "Bayless Cos.",
"Long Haul", "eProcurement Inbox", "Pass Code", "Graham Berkeley", "Natexis Investissement", "Walker Digital"]:
filt = 1
if not filt:
clean_person_lst.append(tup)
return clean_person_lst
def filter_ORG_ner(org_lst):
clean_org_lst = []
for tup in org_lst:
org = tup[1]
filt = 0
if any((wd[0].islower() and wd not in stop_words) for wd in org.split()):
filt = 1
elif any(wd in org.lower()for wd in [","]):
filt = 1
elif len(org) > 50:
filt = 1
elif len(org.split()) >= 2 and (len(org.split()[0]) <= 2 or len(org.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in org):
filt = 1
if any(wd.lower() in ["council", "executive", "market"] for wd in org.split()):
filt = 1
if org in ["Philip Angelides", "Market Participant", "Independence Accounts"]:
filt = 1
elif org in []:
filt = 1
if not filt:
clean_org_lst.append(tup)
return clean_org_lst
def filter_EVENT_ner(event_lst):
clean_event_lst = []
event_words = [ "conference", "event", "session", "event", "weekend", "luncheon",
"festival", "workshop", "debate", "speech", "parade", "forum",
"summit", "briefing", "lecture", "night"
]
for tup in event_lst:
event = tup[1]
filt = 0
if any(wd in event.lower()for wd in [","]):
filt = 1
elif len(event) > 50:
filt = 1
elif len(event.split()) >= 2 and (len(event.split()[0]) <= 2 or len(event.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in event):
filt = 1
elif event in ["The Citysearch Weekend Guide", "Knowledge Forum", "Peyton Panel", "Bay Area Air"]:
filt = 1
elif not any(wd in event_words for wd in event.lower().split()):
filt = 1
if not filt:
clean_event_lst.append(tup)
return clean_event_lst
def filter_LOC_ner(event_lst):
clean_event_lst = []
event_words = [
'bay', 'west', 'valley', 'north', 'south', 'east', 'the', 'coast', 'southern', 'central', 'river', 'area', 'district', 'pacific', 'northwest', 'california',
'silicon', 'island', 'san', 'lake', 'northern', 'asia', 'air', 'park', 'america', 'gulf', 'quality', 'sea', 'city', 'asiapacific', 'atlantic',
'drive', 'region', 'capital', 'western', 'basin', 'round', 'new', 'europe', 'county', 'border', 'desert', 'blvd', 'water', 'el', 'arctic',
'summit', 'inn', 'plant', 'southwest', 'road', 'st.', 'offshore', 'wind', 'regional', 'middle', 'indian', 'pine', 'wildlife', 'arabian',
'chapter', 'point', 'rim', 'ventures', 'islands', 'eastern', 'dieg', 'hill', 'mt.', 'jose', 'mission', 'avenue', 'castle', 'cleone', 'gardens',
'mendocino', 'schools', 'redwood', 'persian', 'board', 'field', 'san', 'jose', 'land', 'bluff', 'creek', 'dorado', 'hills',
'refuge', 'walla', 'little', 'mount', 'tower', 'energy', 'morro', 'upper', 'lands', 'block', 'american', 'plaza',
'pac', 'location', 'rock', 'marina', 'salt', 'generators', 'rto', 'verde', 'hudson', 'belt', 'orange', 'valley', 'ave', 'palm', 'napa', 'region',
'town', 'coasts', 'international', 'white', 'plains', 'angels', 'las', 'vegas', 'japan', 'los', 'england', 'india', 'great', 'basin', 'ocean',
'new', 'york', 'long', 'isle', 'woodlands', 'holland', 'arkansas'
]
for tup in event_lst:
event = tup[1]
filt = 0
if len(event) > 50:
filt = 1
elif len(event.split()) >= 2 and (len(event.split()[0]) <= 2 or len(event.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in event):
filt = 1
elif any(wd.lower() in ["residents", "fund", "capital", "big", "council"] for wd in event.split()):
filt = 1
elif not any(wd in event_words for wd in event.lower().split()):
filt = 1
if not filt:
clean_event_lst.append(tup)
return clean_event_lst
def filter_LAW_ner(event_lst):
clean_event_lst = []
event_words = [
'act', 'agreement', 'code', 'reform', 'bill', 'amendment', 'rights',
'rules', 'constitution', 'law', 'clause', 'compliance', 'bill',
'protocol', 'certification', 'policy', 'contract', 'standards'
]
for tup in event_lst:
event = tup[1]
filt = 0
if len(event) > 50:
filt = 1
elif len(event.split()) >= 2 and (len(event.split()[0]) <= 2 or len(event.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in event):
filt = 1
elif not any(wd in event_words for wd in event.lower().split()):
filt = 1
if not filt:
clean_event_lst.append(tup)
return clean_event_lst
def filter_ner_maps():
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json") as f:
nertags2psgs_local = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json") as f:
nertags2psgs_global = json.load(f)
# PEOPLE
clean_person_global = filter_PERSON_ner(nertags2psgs_global["PERSON"].copy())
clean_person_local = filter_PERSON_ner(nertags2psgs_local["PERSON"].copy())
nertags2psgs_global["PERSON"] = clean_person_global
nertags2psgs_local["PERSON"] = clean_person_local
# ORGS
clean_org_global = filter_ORG_ner(nertags2psgs_global["ORG"].copy())
clean_org_local = filter_ORG_ner(nertags2psgs_local["ORG"].copy())
nertags2psgs_global["ORG"] = clean_org_global
nertags2psgs_local["ORG"] = clean_org_local
# EVENTS
clean_event_global = filter_EVENT_ner(nertags2psgs_global["EVENT"].copy())
clean_event_local = filter_EVENT_ner(nertags2psgs_local["EVENT"].copy())
nertags2psgs_global["EVENT"] = clean_event_global
nertags2psgs_local["EVENT"] = clean_event_local
# LOC
clean_loc_global = filter_LOC_ner(nertags2psgs_global["LOC"].copy())
clean_loc_local = filter_LOC_ner(nertags2psgs_local["LOC"].copy())
nertags2psgs_global["LOC"] = clean_loc_global
nertags2psgs_local["LOC"] = clean_loc_local
# LAW
clean_law_global = filter_LAW_ner(nertags2psgs_global["LAW"].copy())
clean_law_local = filter_LAW_ner(nertags2psgs_local["LAW"].copy())
nertags2psgs_global["LAW"] = clean_law_global
nertags2psgs_local["LAW"] = clean_law_local
# PRODUCT
nertags2psgs_global.pop("PRODUCT", None)
nertags2psgs_local.pop("PRODUCT", None)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json", 'w') as f:
json.dump(nertags2psgs_local, f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json", 'w') as f:
json.dump(nertags2psgs_global, f)
def get_bold_spans(ent_title, sents1=[], sents2=[]):
bold_spans1 = []
bold_spans2 = []
ent_words = ent_title.split()
ent_words = [wd for wd in ent_words if wd.lower() not in stop_words and (not any(ch.isdigit() for ch in wd))]
if sents1:
for sent in sents1:
sent_spans = []
for ind, tok in enumerate(sent.split()):
if any(wd in tok for wd in ent_words):
sent_spans.append(ind)
bold_spans1.append(sent_spans)
if sents2:
for sent in sents2:
sent_spans = []
for ind, tok in enumerate(sent.split()):
if any(wd in tok for wd in ent_words):
sent_spans.append(ind)
bold_spans2.append(sent_spans)
return bold_spans1, bold_spans2
# MAIN ALGORITHM
def save_final_passage_pairs():
# load key pairs
with open(f"{MY_PATH}/Enron_{INBOX}/global_global_pairs.json") as f:
GLOBAL_GLOBAL_PAIRS = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_local_pairs.json") as f:
GLOBAL_LOCAL_PAIRS = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/local_local_pairs.json") as f:
LOCAL_LOCAL_PAIRS = json.load(f)
# load corpora
with open(f"{MY_PATH}/wikititles.json") as f:
wikititles = json.load(f)
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
# load category information
_, _, type2freq_filtered, type2qids_filtered, qid2title, _ = global_entity_valid_types(wikititles)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json",) as f:
nertags2psgs_local = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json") as f:
nertags2psgs_global = json.load(f)
# save final passage pairs
bridge_passage_pairs = defaultdict(dict)
pair_unq_idx = 0
DATASET_SIZE = 80000
while pair_unq_idx < DATASET_SIZE:
r1 = 1
hop1 = random.random() < 0.5
hop2 = random.random() < 0.5
unq_idx = f"PAIRIDX:{pair_unq_idx}"
if r1:
if hop1 and hop2:
if GLOBAL_GLOBAL_PAIRS:
pair = random.choice(GLOBAL_GLOBAL_PAIRS)
GLOBAL_GLOBAL_PAIRS.remove(pair)
boldspans_psg1, boldspans_psg2 = get_bold_spans(pair['wiki2'], sents1=wikipassages2sents[pair['wiki1']], sents2=wikipassages2sents[pair['wiki2']])
bridge_passage_pairs[pair_unq_idx] = {
'sents1': wikipassages2sents[pair['wiki1']],
'title1': pair['wiki1'],
'sents2': wikipassages2sents[pair['wiki2']],
'title2': pair['wiki2'],
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'hint': f"Consider forming questions which use the entity '{pair['wiki2']}', since it's mentioned in both passages!",
'domains': [hop1, hop2],
'type':'bridge',
'unq_idx': unq_idx
}
pair_unq_idx += 1
elif hop1 and not hop2:
if GLOBAL_LOCAL_PAIRS:
pair = random.choice(GLOBAL_LOCAL_PAIRS)
GLOBAL_LOCAL_PAIRS.remove(pair)
boldspans_psg1, boldspans_psg2 = get_bold_spans(pair['wiki'], sents1=wikipassages2sents[pair['wiki']], sents2=EnronPassages[pair['enron']]['sents'])
bridge_passage_pairs[pair_unq_idx] = {
'sents2': EnronPassages[pair['enron']]['sents'],
'title2': f"Enron Email Number: {EnronPassages[pair['enron']]['id']}",
'sents1': wikipassages2sents[pair['wiki']],
'title1': pair['wiki'],
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'bold_spans': [],
'hint': f"Consider forming questions which use the entity '{pair['wiki']}', since it's mentioned in both passages!",
'domains': [hop1, hop2],
'type':'bridge',
'unq_idx': unq_idx
}
pair_unq_idx += 1
elif not hop1 and hop2:
if GLOBAL_LOCAL_PAIRS:
pair = random.choice(GLOBAL_LOCAL_PAIRS)
GLOBAL_LOCAL_PAIRS.remove(pair)
boldspans_psg1, boldspans_psg2 = get_bold_spans(pair['wiki'], sents1=EnronPassages[pair['enron']]['sents'], sents2=wikipassages2sents[pair['wiki']])
bridge_passage_pairs[pair_unq_idx] = {
'sents1': EnronPassages[pair['enron']]['sents'],
'title1': f"Enron Email Number: {EnronPassages[pair['enron']]['id']}",
'sents2': wikipassages2sents[pair['wiki']],
'title2': pair['wiki'],
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'bold_spans': [],
'hint': f"Consider forming questions which use the entity '{pair['wiki']}', since it's mentioned in both passages!",
'domains': [hop1, hop2],
'type':'bridge',
'unq_idx': unq_idx
}
pair_unq_idx += 1
elif not hop1 and not hop2:
if LOCAL_LOCAL_PAIRS:
pair = random.choice(LOCAL_LOCAL_PAIRS)
LOCAL_LOCAL_PAIRS.remove(pair)
boldspans_psg1, boldspans_psg2 = get_bold_spans(pair['ent'], sents1=EnronPassages[pair['enron1']]['sents'], sents2=EnronPassages[pair['enron2']]['sents'])
bridge_passage_pairs[pair_unq_idx] = {
'sents1': EnronPassages[pair['enron1']]['sents'],
'title1': f"Enron Email Number: {EnronPassages[pair['enron1']]['id']}",
'sents2': EnronPassages[pair['enron2']]['sents'],
'title2': f"Enron Email Number: {EnronPassages[pair['enron2']]['id']}",
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'bold_spans': [],
'hint': f"Consider forming questions which use the entity '{pair['ent']}', since it's mentioned in both passages!",
'domains': [hop1, hop2],
'type':'bridge',
'unq_idx': unq_idx
}
pair_unq_idx += 1
else:
assert 0, print("Error in path selection!")
if pair_unq_idx % 1000 == 0:
print(f"Wrote {pair_unq_idx} questions.")
print("Done collecting bridge pairs.")
with open(f"{MY_PATH}/Enron_{INBOX}/compare_local_local_pairs.json") as f:
LOCAL_LOCAL_PAIRS = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/compare_global_local_pairs.json") as f:
GLOBAL_LOCAL_PAIRS = json.load(f)
comparison_passage_pairs = defaultdict(dict)
COMPARISON_SIZE = 30000
USED_TITLES = []
pair_unq_idx = DATASET_SIZE
while pair_unq_idx < DATASET_SIZE+COMPARISON_SIZE:
r2 = random.random() < 0.5
hop1 = random.random() < 0.5
hop2 = random.random() < 0.5
sents1 = []
sents2 = []
unq_idx = f"PAIRIDX:{pair_unq_idx}"
if 1:
if not hop1 or not hop2:
if not hop1 and not hop2:
if LOCAL_LOCAL_PAIRS:
pair = random.choice(LOCAL_LOCAL_PAIRS)
LOCAL_LOCAL_PAIRS.remove(pair)
title1 = pair['title1']
key1 = pair['enron1']
sents1 = EnronPassages[key1]['sents']
boldspans_psg1, _ = get_bold_spans(title1, sents1=sents1)
title2 = pair['title2']
key2 = pair['enron2']
sents2 = EnronPassages[key2]['sents']
_, boldspans_psg2 = get_bold_spans(title2, sents2=sents2)
words1 = title1.split()
words2 = title2.split()
if len(set(words1).intersection(set(words2))) > 0:
continue
boldspans_psg1_ent, boldspans_psg2_ent = get_bold_spans(pair['ent'], sents1=sents1, sents2=sents2)
types = NER_TYPES_DICT[pair['types']]
common_ent = f"The entity {pair['ent']} also appears in both these passages."
elif not hop1 and hop2:
if GLOBAL_LOCAL_PAIRS:
pair = random.choice(GLOBAL_LOCAL_PAIRS)
GLOBAL_LOCAL_PAIRS.remove(pair)
title1 = pair['title1']
key1 = pair['enron1']
sents1 = EnronPassages[key1]['sents']
boldspans_psg1, _ = get_bold_spans(title1, sents1=sents1)
title2 = pair['wiki']
sents2 = wikipassages2sents[title2]
_, boldspans_psg2 = get_bold_spans(title2, sents2=sents2)
words1 = title1.split()
words2 = title2.split()
if len(set(words1).intersection(set(words2))) > 0:
continue
types = pair['types']
common_ent =''
elif hop1 and not hop2:
if GLOBAL_LOCAL_PAIRS:
pair = random.choice(GLOBAL_LOCAL_PAIRS)
GLOBAL_LOCAL_PAIRS.remove(pair)
title1 = pair['wiki']
sents1 = wikipassages2sents[title1]
boldspans_psg1, _ = get_bold_spans(title1, sents1=sents1)
title2 = pair['title1']
key2 = pair['enron1']
sents2 = EnronPassages[key2]['sents']
_, boldspans_psg2 = get_bold_spans(title2, sents2=sents2)
words1 = title1.split()
words2 = title2.split()
if len(set(words1).intersection(set(words2))) > 0:
continue
types = pair['types']
common_ent= ''
if r2:
hint = f"Write a YES or NO question. Some information that can help you (feel free to ignore!) is that entity {title1} in passage 1 and entity 2 {title2} may have the '{types}' property in common. {common_ent}"
choices = [{"option":"Yes"}, {"option":"No"}]
else:
hint = f"Some information that can help you (feel free to ignore!) is that entity {title1} in passage 1 and entity 2 {title2} may have the '{types}' property in common. {common_ent}"
choices = [{"option":title1}, {"option":title2}]
else:
WikiCategory = random.choice(list(type2freq_filtered.keys()))
qids_lst = type2qids_filtered[WikiCategory].copy()
qid1 = random.choice(qids_lst)
qids_lst.remove(qid1)
title1 = qid2title[qid1]
if title1 not in USED_TITLES:
USED_TITLES.append(title1)
sents1 = wikipassages2sents[title1]
key1 = title1
boldspans_psg1, _ = get_bold_spans(title1, sents1=sents1)
qid2 = random.choice(qids_lst)
qids_lst.remove(qid2)
title2 = qid2title[qid2]
if title2 not in USED_TITLES:
USED_TITLES.append(title2)
sents2 = wikipassages2sents[title2]
key2 = title2
_, boldspans_psg2 = get_bold_spans(title2, sents2=sents2)
if r2:
hint = f"Both passages are about '{WikiCategory.upper()}' entities: '{title1} 'in paragraph 1, and '{title2}' in paragraph 2. Write a question that compares the two entities and can be answered with YES or NO."
choices = [{"option":"Yes"}, {"option":"No"}]
else:
hint = f"Both passages are about '{WikiCategory.upper()}' entities: '{title1}' in paragraph 1, and '{title2}' in paragraph 2. Write a question that compares the two entities."
choices = [{"option":title1}, {"option":title2}]
if sents1 and sents2:
comparison_passage_pairs[pair_unq_idx] = {
'sents1': sents1,
'title1': title1,
'entity1': key1,
'sents2': sents2,
'title2': title2,
'entity2': key2,
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'multiple_choice': choices,
'hint': hint,
'domains': [hop1, hop2],
'type':'comparison',
'unq_idx': unq_idx
}
pair_unq_idx += 1
if pair_unq_idx % 1000 == 0:
print(f"Wrote {pair_unq_idx} questions.")
print("Done collecting comparison pairs.")
# format for the frontend interface
preprocessed = []
for key, pair in bridge_passage_pairs.items():
preprocessed.append(pair)
output_dict = {
'entries': preprocessed
}
with open(f"{MY_PATH}/Enron_{INBOX}/BRIDGE_PASSAGE_PAIRS_110121.json", "w") as f:
json.dump(output_dict, f)
preprocessed = []
for key, pair in comparison_passage_pairs.items():
preprocessed.append(pair)
output_dict = {
'entries': preprocessed
}
with open(f"{MY_PATH}/Enron_{INBOX}/COMPARISON_PASSAGE_PAIRS_01152022.json", "w") as f:
json.dump(output_dict, f)
def convert_pairs_to_batches():
BATCH_SIZE = 10
with open(f"{MY_PATH}/Enron_{INBOX}/BRIDGE_PASSAGE_PAIRS_110121.json") as f:
ALL_PAIRS = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/COMPARISON_PASSAGE_PAIRS_01152022.json") as f:
ALL_PAIRS = json.load(f)
BATCH = {'entries': []}
BATCH_NUM = 0
for entry in tqdm(ALL_PAIRS['entries']):
BATCH['entries'].append(entry)
if len(BATCH['entries']) > BATCH_SIZE:
with open(f"{MY_PATH}/Enron_{INBOX}/ComparisonBatches01152022/BATCH_{BATCH_NUM}.json", "w") as f:
json.dump(BATCH, f)
BATCH = {'entries': []}
BATCH_NUM += 1
if BATCH_NUM > 10:
BATCH_SIZE = 100
# trailing pairs in the final batch
if len(BATCH['entries']) > 0:
with open(f"{MY_PATH}/Enron_{INBOX}/ComparisonBatches01152022/BATCH_{BATCH_NUM}.json", "w") as f:
json.dump(BATCH, f)
BATCH = {'entries': []}
BATCH_NUM += 1
print(BATCH_NUM)
if __name__ == "__main__":
# DETERMINE WHICH STEPS TO RUN
create_corpus = 1
prepare_entity_maps = 1
prepare_comparison_qs = 1
generate_pairs = 1
####### PRODUCE CORPUS ########
if create_corpus:
# 90 minutes
create_local_passages_wrapper()
# 4.5 minutes
identify_duplicates_by_text()
######## REFINE LISTS OF LOCAL AND GLOBAL ENTITIES ########
if prepare_entity_maps:
# 0.5 minutes
st = time.time()
filter_named_entities()
print(f"Filtered named entities in time: {time.time() - st}")
# 35 minutes
st = time.time()
create_named_entity_maps()
print(f"Created named entities map in time: {time.time() - st}")
if prepare_comparison_qs:
filter_ner_maps()
######## GENERATE PASSAGE PAIRS ########
if generate_pairs:
extra_cleaning()
# 1 hour for global/global anchors are presaved, multiple days if we need to query the kilt database
# 1 minute for just global/local and local/local
generate_passage_pairs()
generate_comparison_passage_pairs()
# 1 minute
save_final_passage_pairs()
convert_pairs_to_batches()
| concurrentqa-main | dataset_construction/cleanEnron.py |
import os
import sys
import argparse
import json as json
import pandas as pd
from collections import Counter, defaultdict
from importlib import reload
from email.parser import Parser
# recursively get the document body
def get_body(body):
if type(body) == str:
return [body]
else:
body_results = []
for b in body:
b_value = b.get_payload()
if type(b_value) != str:
body_results.append(get_body(b_value))
else:
body_results.append(b_value)
return body_results
def parse_document(f):
try:
doc = f.read()
except Exception as e:
print(f"Exception, bad email: {e}!")
doc = ""
email = Parser().parsestr(doc)
parse = defaultdict(list)
for key in email.keys():
parse[key] = email[key]
body = email.get_payload()
parse["Body"] = get_body(body)
return parse
# recursive inspection because some sub directories have sub directories
def inspect_sub_dir(email_filename):
if os.path.isfile(email_filename):
with open(email_filename, "r") as f:
entry = parse_document(f)
entry["EMAIL_ID"] = email_filename
assert type(entry["Body"]) == list
return [entry]
else:
emails = os.listdir(email_filename)
emails.sort()
database = []
for email in emails:
file_name = email_filename + "/" + email
database.extend(inspect_sub_dir(file_name))
return database
def make_df(args, inbox):
database = []
sub_dirs = os.listdir(args.data_dir + inbox)
print(sub_dirs)
for sub_dir in sub_dirs:
emails_dir = args.data_dir + inbox + "/" + sub_dir
emails = os.listdir(emails_dir)
emails.sort()
for email in emails:
email_filename = emails_dir + "/" + email
database.extend(inspect_sub_dir(email_filename))
return database
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Load datasets for enron.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--data_dir",
type=str,
default="/checkpoint/simarora/PersonalDatasets/Enron/maildir/",
help="Raw enron data.",
)
parser.add_argument(
"--db_dir",
type=str,
default="/checkpoint/simarora/PersonalDatasets/Enron/parsed_maildir",
help="Parsed emails directory.",
)
args = parser.parse_args()
inboxes = os.listdir(args.data_dir)
inboxes.sort()
for inbox in inboxes:
if os.path.exists(f"{args.db_dir}/{inbox}_09082021.csv"):
continue
print(f"STARTING FOR INBOX: {inbox}")
try:
database = make_df(args, inbox)
print(f"MADE INITIAL DB: {len(database)}")
email_keys = database[0].keys()
df = pd.DataFrame(database)
outfile = f"{args.db_dir}/{inbox}_09082021.csv"
df.to_csv(outfile)
except:
print(f"FAILED ON INBOX: {inbox}")
| concurrentqa-main | dataset_construction/EnronParser.py |
import os
import csv
import ujson
import json
from tqdm import tqdm
import requests
import pandas as pd
import numpy as np
import time
import ast
import random
from collections import Counter, defaultdict, OrderedDict
INBOX = "skilling-j"
def add_entry(q="", idx="", answer=[], sp1={}, sp2={}, typ="", domain=[]):
entry = {
'question': q,
'_id': idx,
'answer': answer,
'sp': [sp1, sp2],
'type': typ, # comparison or bridge
'domain': domain, # 0, 1
}
original_entry = {
'_id':idx,
'answer':answer[0],
'question':q,
'supporting_facts':[[sp1['title'], 0], [sp2['title'], 0]],
'context':[],
'type':typ,
'level':'hard'
}
return entry, original_entry
local_global_queries = []
original_queries= []
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/qa_runs/{INBOX}/subject2sents.json") as f:
subject2sents = json.load(f)
# question
entry, original_entry = add_entry(q="The company providing natural gas transmission between Western US states such as New Mexico and Texas is helping support credit lines worth how much money?",
idx="01P",
answer=["$1 billion"],
sp1={'title': 'Transwestern Pipeline',
"sents": wikipassages2sents['Transwestern Pipeline'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL daily update',
"sents":subject2sents['PERSONAL daily update'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The Texas Pacific Group is known for investments in what motorcycle company?",
idx="02P",
answer=["Ducati"],
sp1={'title': 'TPG Capital',
"sents": wikipassages2sents['TPG Capital'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL re: jeff skilling for tpg ceo conference',
"sents":subject2sents['PERSONAL re: jeff skilling for tpg ceo conference'],
'sp_sent_ids': [5]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What type of partnership does Enron want to form with the media entertainment conglomerate, which is based in Burbank, California?",
idx="03P",
answer=["broadband"],
sp1={'title':'The Walt Disney Company',
"sents":wikipassages2sents['The Walt Disney Company'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: broadband partnership with walt disney corp',
"sents":subject2sents['PERSONAL re: broadband partnership with walt disney corp'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many times per year can the exam that the Enron candidate from Princeton took be taken?",
idx="04P",
answer=["five", "5"],
sp1={'title': 'PERSONAL enron candidate',
"sents":subject2sents['PERSONAL enron candidate'],
'sp_sent_ids': [0, 1, 2]},
sp2={'title':'Graduate Management Admission Test',
"sents":wikipassages2sents['Graduate Management Admission Test'],
'sp_sent_ids': [0, 4]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What is the current growth rate of the Fortune 500 company originally called Metropolitan Pathology Labs?",
idx="05",
answer=["50%", '50% per year'],
sp1={'title':'Quest Diagnostics',
"sents":wikipassages2sents['Quest Diagnostics'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL mischer-interfit health',
"sents":subject2sents['PERSONAL mischer-interfit health'],
'sp_sent_ids': [20]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How much is the Atlanta based contact technology company offering per newly referred customer?",
idx="06P",
answer=["$1,000.00", "$1,000.00 plus"],
sp1={'title':'Noble Systems Corporation',
"sents":wikipassages2sents['Noble Systems Corporation'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL noble systems executive update + opportunities for you',
"sents":subject2sents['PERSONAL noble systems executive update + opportunities for you'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Jim Kelly is the CEO participating in the Mastermind Keynote Interview. How many customers does the company Jim Kelly is from have?",
idx="07P",
answer=["7.9 million", "more than 7.9 million"],
sp1={'title':'United Parcel Service',
"sents":wikipassages2sents['United Parcel Service'],
'sp_sent_ids': [0, 3]},
sp2={'title':'PERSONAL re: invitation',
"sents":subject2sents['PERSONAL re: invitation'],
'sp_sent_ids': [5, 8]},
typ="bridge",
domain=[1,0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Does Enron earn money from projects conducted at the U.S. Navy facility located 100 km from Manila Bay?",
idx="08P",
answer=["yes"],
sp1={'title':'Subic Bay',
"sents":wikipassages2sents['Subic Bay'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL end of an era',
"sents":subject2sents['PERSONAL end of an era'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When was the dean just elected to the Enron Board of Directors born?",
idx="09P",
answer=["May 30, 1946", "1946"],
sp2={'title':'William Powers, Jr.',
"sents":wikipassages2sents['William Powers, Jr.'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL enron update0',
"sents":subject2sents['PERSONAL enron update0'],
'sp_sent_ids': [5]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="After Enron announced the $1 billion credit line, it’s S&P rating was the same as Hyundai Haesang’s S&P rating?",
idx="10P",
answer=["yes"],
sp1={'title':'Hyundai Marine & Fire Insurance',
"sents":wikipassages2sents['Hyundai Marine & Fire Insurance'],
'sp_sent_ids': [0, 5]},
sp2={'title':'PERSONAL enron update',
"sents":subject2sents['PERSONAL enron update'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did any of the selected guests who will be at the Insight Capital May 15th dinner work for Goldman Sachs?",
idx="11P",
answer=["yes"],
sp2={'title':'Robert Rubin',
"sents":wikipassages2sents['Robert Rubin'],
'sp_sent_ids': [0, 2]},
sp1={'title':'PERSONAL re: telephone call with jerry murdock15',
"sents":subject2sents['PERSONAL re: telephone call with jerry murdock15'],
'sp_sent_ids': [7, 8, 10]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are any of my fellow invitees for the Insight Capital dinner chemical engineers?",
idx="12P",
answer=["yes"],
sp2={'title':'Jack Welch',
"sents":wikipassages2sents['Jack Welch'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL re: telephone call with jerry murdock15',
"sents":subject2sents['PERSONAL re: telephone call with jerry murdock15'],
'sp_sent_ids': [8, 10]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="On what day is the upcoming luncheon with the co-founder of Netscape, Hewlett-Packard, and Mosaic?",
idx="13P",
answer=["Friday, June 22nd"],
sp1={'title':'Marc Andreessen',
"sents":wikipassages2sents['Marc Andreessen'],
'sp_sent_ids': [0, 1, 2, 3]},
sp2={'title':'PERSONAL marc andreessen in dallas 6/22...0',
"sents":subject2sents['PERSONAL marc andreessen in dallas 6/22...0'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Will there be lunch at the event for the Cambridge Ivy League institution?",
idx="14P",
answer=["no"], # there will be dinner...
sp1={'title':'Harvard University',
"sents":wikipassages2sents['Harvard University'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: harvard forum 05/18/01 - second invite1',
"sents":subject2sents['PERSONAL re: harvard forum 05/18/01 - second invite1'],
'sp_sent_ids': [5,6]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Where is the headquarters for the association with an upcoming advertisement in 'On Scene' magazine?",
idx="15P",
answer=["Fairfax, Virginia"],
sp2={'title':"International Association of Fire Chiefs" ,
"sents":wikipassages2sents["International Association of Fire Chiefs"],
'sp_sent_ids': [0, 3]},
sp1={'title':'PERSONAL the list, legal opinion & other news',
"sents":subject2sents['PERSONAL the list, legal opinion & other news'],
'sp_sent_ids': [4, 5]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When visiting the affluent summer colony located south of Cape Cod, Jeff mentioned mentioned he wanted to walk through what?",
idx="16P",
answer=["house", "our house"],
sp1={'title':"Martha\'s Vineyard",
"sents":wikipassages2sents["Martha\'s Vineyard"],
'sp_sent_ids': [0]},
sp2={'title': 'PERSONAL re: christmas gathering2',
"sents":subject2sents['PERSONAL re: christmas gathering2'],
'sp_sent_ids': [4]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When was the speaker for Commercial and Political Perspectives luncheon born?",
idx="17P",
answer=["1956"],
sp2={'title':'Bernard Harris (disambiguation)',
"sents":wikipassages2sents['Bernard Harris (disambiguation)'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL re: hbs april 25 luncheon - reminder1',
"sents":subject2sents['PERSONAL re: hbs april 25 luncheon - reminder1'],
'sp_sent_ids': [4,5,6,7]},
typ="bridge",
domain=[1,0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="At the golf tournament on Fazio Course, the New York eCommerce Association will dedicate proceeds to an organization affiliated with which International Federation?",
idx="18P",
answer=["International Federation of Red Cross and Red Crescent Societies"],
sp1={'title':'American Red Cross',
"sents":wikipassages2sents['American Red Cross'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL upcoming golf tournament and venture capital conference',
"sents":subject2sents['PERSONAL upcoming golf tournament and venture capital conference'],
'sp_sent_ids': [0, 2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="For how many days is Cheryl going to the city historically known as Constantinople and Byzantium?",
idx="19P",
answer=["3"],
sp1={'title':'Istanbul',
"sents":wikipassages2sents['Istanbul'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL a trip to turkey',
"sents":subject2sents['PERSONAL a trip to turkey'],
'sp_sent_ids': [2, 7]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The 2002 television film The Junction Boys was based on a book by an author who signed a book for who?",
idx="20P",
answer=["Jim Bavouset"],
sp1={'title':'The Junction Boys (film)',
"sents":wikipassages2sents['The Junction Boys (film)'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: [smu-betas] 76ers - it\'s time to hump it3',
"sents":subject2sents['PERSONAL re: [smu-betas] 76ers - it\'s time to hump it3'],
'sp_sent_ids': [7]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The popular author in Beta suggested which dinner location for Thursday of homecoming weekend?",
idx="21P",
answer=["The Double Tree and Central Expressway"],
sp1={'title':'PERSONAL re: [smu-betas] 76ers - it\'s time to hump it4',
"sents":subject2sents['PERSONAL re: [smu-betas] 76ers - it\'s time to hump it4'],
'sp_sent_ids': [3]},
sp2={'title':'PERSONAL -kai-3',
"sents":subject2sents['PERSONAL -kai-3'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The Beta who elaborated on another dinner idea for Thursday of HC is also organizing an outing for which sport?",
idx="22P",
answer=["golf"],
sp1={'title':'PERSONAL -kai-16',
"sents":subject2sents['PERSONAL -kai-16'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL -kai-19',
"sents":subject2sents['PERSONAL -kai-19'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Is the guy who Chuck Paul added to the Beta list arriving to HC weekend with his son?",
idx="23P",
answer=["no"],
sp1={'title':'PERSONAL -kai-3',
"sents":subject2sents['PERSONAL -kai-3'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL -kai-5',
"sents":subject2sents['PERSONAL -kai-5'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Is the newspaper which blasted Mayor Brown the largest in the United States by Sunday circulation?",
idx="24P",
answer=["no"],
sp1={'title':'PERSONAL chronicle article on hfd',
"sents":subject2sents['PERSONAL chronicle article on hfd'],
'sp_sent_ids': [0, 1]},
sp2={'title':'Houston Chronicle',
"sents":wikipassages2sents['Houston Chronicle'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The substituted CEO of Mexican Petroleums is close to which politician?",
idx="25P",
answer=["Francisco Labastida"],
sp1={'title':'Pemex',
"sents":wikipassages2sents['Pemex'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL change at pemex',
"sents":subject2sents["PERSONAL change at pemex"],
'sp_sent_ids': [0, 2]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did Rogelio's close friend who was running for presidency win the 2000 presidential election?",
idx="26P",
answer=["no"],
sp2={'title':'Francisco Labastida',
"sents":wikipassages2sents['Francisco Labastida'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL change at pemex',
"sents":subject2sents['PERSONAL change at pemex'],
'sp_sent_ids': [2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The agency that that \"manages pension and health benefits for millions of California employees\" owns how many Enron shares?",
idx="27P",
answer=["2.6 million"],
sp2={'title':'PERSONAL jedi ii',
"sents":subject2sents['PERSONAL jedi ii'],
'sp_sent_ids': [1]},
sp1={'title':'CalPERS',
"sents":wikipassages2sents['CalPERS'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Do Tonn Ostergard from YPO and Jim Dent live in the same state?",
idx="28P",
answer=["no"],
sp1={'title':'PERSONAL parent child mountain adventure, july 21-25, 2001',
"sents":subject2sents['PERSONAL parent child mountain adventure, july 21-25, 2001'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL re: [smu-betas] 76ers - it\'s time to hump it4',
"sents":subject2sents['PERSONAL re: [smu-betas] 76ers - it\'s time to hump it4'],
'sp_sent_ids': [3, 6]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Who supposedly made up lies about the player who won 47 straight games in college football?",
idx="29P",
answer=["Dent"],
sp1={'title':'Bud Wilkinson',
"sents":wikipassages2sents['Bud Wilkinson'],
'sp_sent_ids': [0, 2]},
sp2={'title':'PERSONAL [smu-betas] dent pisses on bud wilkinson\'s grave',
"sents":subject2sents['PERSONAL [smu-betas] dent pisses on bud wilkinson\'s grave'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="In what year was the athletic director of the Big 12 Conference Sooners born?",
idx="30P",
answer=["1957"],
sp1={'title':"Oklahoma Sooners",
"sents":wikipassages2sents["Oklahoma Sooners"],
'sp_sent_ids': [0, 2, 3]},
sp2={'title':'Joe Castiglione (athletic director)',
"sents":wikipassages2sents['Joe Castiglione (athletic director)'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Who wrote the song that Bill Miller will need to sing in pink skivvies?",
idx="31P",
answer=["Arthur M Alden"],
sp2={'title':'Boomer Sooner',
"sents":wikipassages2sents['Boomer Sooner'],
'sp_sent_ids': [0, 1, 2]},
sp1={'title':'PERSONAL re: [smu-betas] dent\'s wrath',
"sents":subject2sents['PERSONAL re: [smu-betas] dent\'s wrath'],
'sp_sent_ids': [1, 3]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The fight song of the New Haven based Ivy League University is borrowed from a song written in which year?",
idx="32P",
answer=["1898"],
sp1={'title':'Yale University',
"sents":wikipassages2sents['Yale University'],
'sp_sent_ids': [0]},
sp2={'title':"Boomer Sooner",
"sents":wikipassages2sents["Boomer Sooner"],
'sp_sent_ids': [3]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="For the Astros vs. Giants game at Enron Field, the Enron sign will feature the logo of a nonprofit organization that has how many offices throughout the country?",
idx="33P",
answer=["1,200"],
sp2={'title':'United Way of America',
"sents":wikipassages2sents['United Way of America'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL enron and united way\'s continued partnership',
"sents":subject2sents['PERSONAL enron and united way\'s continued partnership'],
'sp_sent_ids': [4]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="All communications between Enron and LJM must be preserved under an Act created in what year?",
idx="34P",
answer=["1995"],
sp2={'title':'Private Securities Litigation Reform Act',
"sents":wikipassages2sents['Private Securities Litigation Reform Act'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL important announcement regarding document preservation',
"sents":subject2sents['PERSONAL important announcement regarding document preservation'],
'sp_sent_ids': [0, 2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What is the approximate population of the city Mark visited in Georgia?",
idx="35P",
answer=["1.5 million people"],
sp2={'title':'Tbilisi',
"sents":wikipassages2sents['Tbilisi'],
'sp_sent_ids': [0]},
sp1={'title':'<6289674.1075845512831.JavaMail.evans@thyme>',
"sents":subject2sents['<6289674.1075845512831.JavaMail.evans@thyme>'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Was the singer of Baby One More Time requested as a speaker for the Enron eSpeak event?",
idx="36P",
answer=["yes"],
sp1={'title':"Britney Spears",
"sents":wikipassages2sents["Britney Spears"],
'sp_sent_ids': [0, 2, 4]},
sp2={'title':'PERSONAL espeak survey: the results are in!0',
"sents":subject2sents['PERSONAL espeak survey: the results are in!0'],
'sp_sent_ids': [0, 2, 3]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Do Steve Spar of ESX Engineering and Bruce Wrobel of EnCom hold the same position at their companies?",
idx="37P",
answer=["yes"], # CEO
sp1={'title':'PERSONAL status report on enron\'s investment in encom0',
"sents":subject2sents['PERSONAL status report on enron\'s investment in encom0'],
'sp_sent_ids': [0, 2]},
sp2={'title':'PERSONAL referred by jeff spar (mck - ny)',
"sents":subject2sents['PERSONAL referred by jeff spar (mck - ny)'],
'sp_sent_ids': [10]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What is the nearest hospital to the area where Reliance is developing a liquid fuel fired power plant?",
idx="38P",
answer=["Dhirubhai Ambani Hospital"],
sp2={'title':'Patalganga',
"sents":wikipassages2sents['Patalganga'],
'sp_sent_ids': [3]},
sp1={'title':'PERSONAL re: maharashtra plants',
"sents":subject2sents['PERSONAL re: maharashtra plants'],
'sp_sent_ids': [3, 4]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are Dabhol and Patalganga located in the same state in India?",
idx="39P",
answer=["yes"],
sp1={'title':'Patalganga',
"sents":wikipassages2sents['Patalganga'],
'sp_sent_ids': [0]},
sp2={'title':'Dabhol',
"sents":wikipassages2sents['Dabhol'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What is the name of the online magazine of the Oil Patch District Federal Reserve Bank?",
idx="40P",
answer=["e-Perspectives"],
sp1={'title':'Federal Reserve Bank of Dallas',
"sents":wikipassages2sents['Federal Reserve Bank of Dallas'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL welcome to the federal reserve bank of dallas community affairs\n announcement list',
"sents":subject2sents['PERSONAL welcome to the federal reserve bank of dallas community affairs\n announcement list'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The meeting with Merrill Lynch about the Houston based water services company is on what date?",
idx="41P",
answer=["Monday February 28"],
sp1={'title':"Azurix",
"sents":wikipassages2sents["Azurix"],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL re: azurix investment banking meeting0',
"sents":subject2sents['PERSONAL re: azurix investment banking meeting0'],
'sp_sent_ids': [3, 6]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are Azurix and Enron headquartered in the same city?",
idx="42P",
answer=["yes"],
sp1={'title':'Azurix',
"sents":wikipassages2sents['Azurix'],
'sp_sent_ids': [0, 1]},
sp2={'title':'Enron',
"sents":wikipassages2sents['Enron'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Skilling booked which suite to watch the MLB game for the team based in Houston?",
idx="43P",
answer=["Drayton McLane's"],
sp1={'title':"Houston Astros",
"sents":wikipassages2sents["Houston Astros"],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL astros game',
"sents":subject2sents['PERSONAL astros game'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="A professor at the Pasadena based university co-founded which coporation that's selling a software framework to Enron?",
idx="44P",
answer=["iSpheres"],
sp1={'title':"California Institute of Technology",
"sents":wikipassages2sents["California Institute of Technology"],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: advanced arbitrage enabling technology out of caltech',
"sents":subject2sents['PERSONAL re: advanced arbitrage enabling technology out of caltech'],
'sp_sent_ids': [5]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="To support the longest-serving Republican senator in Montana history, to whom should the checks be made payable?",
idx="45P",
answer=["Friends of Conrad Burns"],
sp1={'title':"Conrad Burns",
"sents":wikipassages2sents["Conrad Burns"],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL senator conrad burns contribution',
"sents":subject2sents['PERSONAL senator conrad burns contribution'],
'sp_sent_ids': [0, 1, 2]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The partner Enron is in litigation with in Federal court proposed to acquire what Corp from Enron?",
idx="46P",
answer=["Enron Renewable Energy Corp"],
sp1={'title':'PERSONAL important announcement regarding document preservation',
"sents":subject2sents['PERSONAL important announcement regarding document preservation'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL project aura; draft disclosures re ljm2',
"sents":subject2sents['PERSONAL project aura; draft disclosures re ljm2'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did Rebecca Carter's replacement for corporate secretary receive her bachelor's degree at a university located in Houston?",
idx="47P",
answer=["no"], # College Station
sp2={'title':"Texas A&M University",
"sents":wikipassages2sents["Texas A&M University"],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL enron board elects new corporate secretary',
"sents":subject2sents['PERSONAL enron board elects new corporate secretary'],
'sp_sent_ids': [0, 1, 6]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The Austin based conservative think tank that's part of the State Policy Network is similar to which foundation in D.C.?",
idx="48P",
answer=["Heritage Foundation"],
sp1={'title':'Texas Public Policy Foundation',
"sents":wikipassages2sents['Texas Public Policy Foundation'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL texas public policy foundation dinner - thursday, february 15',
"sents":subject2sents['PERSONAL texas public policy foundation dinner - thursday, february 15'],
'sp_sent_ids': [0, 1, 2]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The man who said Broadband held the future for delivery of Disney's entertainment product was born in what year?",
idx="49P",
answer=["1942"],
sp2={'title':'Michael Eisner',
"sents":wikipassages2sents['Michael Eisner'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL re: broadband partnership with walt disney corp',
"sents":subject2sents['PERSONAL re: broadband partnership with walt disney corp'],
'sp_sent_ids': [2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are the two businessmen Jack Welch and Michael Eisner both of the same nationality?",
idx="50P",
answer=["yes"],
sp1={'title':'Jack Welch',
"sents":wikipassages2sents['Jack Welch'],
'sp_sent_ids': [0]},
sp2={'title':'Michael Eisner',
"sents":wikipassages2sents['Michael Eisner'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are emmployees able to access the web portal, which spun off from Time Warner in 2009, on Enron computers?",
idx="51P",
answer=["no"],
sp1={'title':'AOL',
"sents":wikipassages2sents['AOL'],
'sp_sent_ids': [0, 7]},
sp2={'title':'PERSONAL external e-mail sites',
"sents":subject2sents['PERSONAL external e-mail sites'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Why couldn't Wade attend the meeting with the Indian politician who serves as president of the Nationalist Congress Party?",
idx="52P",
answer=["he fell sick"],
sp1={'title':'Sharad Pawar',
"sents":wikipassages2sents['Sharad Pawar'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: meetings with sharad pawar',
"sents":subject2sents['PERSONAL re: meetings with sharad pawar'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which was founded first, the party founded by Sharad Pawar or the Indian National Congress?",
idx="53P",
answer=["Indian National Congress"],
sp2={'title':'Indian National Congress',
"sents":wikipassages2sents['Indian National Congress'],
'sp_sent_ids': [0, 1]},
sp1={'title':'Sharad Pawar',
"sents":wikipassages2sents['Sharad Pawar'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Enron signed the manufacturer founded by Sidney and Bernard in 1953 to manufacture which offering?",
idx="54P",
answer=["CD/DVD"],
sp1={'title':'Harman Kardon',
"sents":wikipassages2sents['Harman Kardon'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL congrats & zapmedia...',
"sents":subject2sents['PERSONAL congrats & zapmedia...'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The Houston Astros have played as members of the Junior and \"Senior Circuit\"?",
idx="55P",
answer=["yes"],
sp1={'title':'Houston Astros',
"sents":wikipassages2sents['Houston Astros'],
'sp_sent_ids': [0, 1]},
sp2={'title':'American League',
"sents":wikipassages2sents['American League'],
'sp_sent_ids': [0, 2]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Do Azurix Corp and Wessex Water both operate in North America?",
idx="56P",
answer=["no"],
sp1={'title':'Wessex Water',
"sents":wikipassages2sents['Wessex Water'],
'sp_sent_ids': [0]},
sp2={'title':'Azurix',
"sents":wikipassages2sents['Azurix'],
'sp_sent_ids': [0, 2]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Will the sewerage utility company that serves 1.3 million people in England be part of the Enron Global Assets and Services unit?",
idx="57P",
answer=["no"],
sp1={'title':'Wessex Water',
"sents":wikipassages2sents['Wessex Water'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL organizational changes3',
"sents":subject2sents['PERSONAL organizational changes3'],
'sp_sent_ids': [0, 4]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Who chose to donate to the company that's \"the equivalent of off-Broadway in Houston\" in the Enron Matching Gift Program?",
idx="58P",
answer=[" Rebecca Skupin"],
sp1={'title':'Stages Repertory Theatre',
"sents":wikipassages2sents['Stages Repertory Theatre'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL enron matching gift program winners',
"sents":subject2sents['PERSONAL enron matching gift program winners'],
'sp_sent_ids': [0, 6]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The employee focusing on distressed counterparties in RAC will represent Enron in which case?",
idx="59P",
answer=["Pacific Gas and Electric Company bankruptcy case"],
sp2={'title':'PERSONAL pg&e bankruptcy case-- important',
"sents":subject2sents['PERSONAL pg&e bankruptcy case-- important'],
'sp_sent_ids': [1, 2]},
sp1={'title':'PERSONAL new legal team to assist rac',
"sents":subject2sents['PERSONAL new legal team to assist rac'],
'sp_sent_ids': [5]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are Kevin Hannon and Lisa Mellencamp part of the same Enron business units?",
idx="60P",
answer=["no"],
sp1={'title':'PERSONAL organizational changes3',
"sents":subject2sents['PERSONAL organizational changes3'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL new legal team to assist rac',
"sents":subject2sents['PERSONAL new legal team to assist rac'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did any of the investors in the KnowledgeCube venture capital firm get caught for insider trading?",
idx="61P",
answer=["yes"],
sp1={'title':'PERSONAL mckinsey alums/energy fund',
"sents":subject2sents['PERSONAL mckinsey alums/energy fund'],
'sp_sent_ids': [3, 5]},
sp2={'title':'Rajat Gupta',
"sents":wikipassages2sents['Rajat Gupta'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many Grand Prix wins does Luca Baldisserri's driver at the Monaco Grand Prix have?",
idx="62P",
answer=["91"],
sp2={'title':'Michael Schumacher',
"sents":wikipassages2sents['Michael Schumacher'],
'sp_sent_ids': [2]},
sp1={'title':'PERSONAL monaco grand prix',
"sents":subject2sents['PERSONAL monaco grand prix'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The championship Michael Schumaker has won seven times is sanctioned by which Federation?",
idx="63P",
answer=["Fédération Internationale de l'Automobile"],
sp1={'title':'Michael Schumacher',
"sents":wikipassages2sents['Michael Schumacher'],
'sp_sent_ids': [0, 1]},
sp2={'title':'Formula One',
"sents":wikipassages2sents['Formula One'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The conference which has been held annually at Reliant Park since 1969 has drawn close to how many participants?",
idx="64P",
answer=["50,000"],
sp1={'title':'Offshore Technology Conference',
"sents":wikipassages2sents['Offshore Technology Conference'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL speaking opportunity, otc - may2001',
"sents":subject2sents['PERSONAL speaking opportunity, otc - may2001'],
'sp_sent_ids': [8]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="For which event did Jeff Skilling invite the man who developed the market for \"Junk Bonds\" to speak?",
idx="65P",
answer=["Key Executive breakfast"],
sp1={'title':'Michael Milken',
"sents":wikipassages2sents['Michael Milken'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL re: michael milken',
"sents":subject2sents['PERSONAL re: michael milken'],
'sp_sent_ids': [0, 8]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When did the ISO that works with the Texas Reliability Entity begin processing switch requests according to PUCT?",
idx="66P",
answer=["July 31, 2001"],
sp1={'title':'Electric Reliability Council of Texas',
"sents":wikipassages2sents['Electric Reliability Council of Texas'],
'sp_sent_ids': [0, 1, 2]},
sp2={'title':'PERSONAL important update on your newpower service',
"sents":subject2sents['PERSONAL important update on your newpower service'],
'sp_sent_ids': [3, 4]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The November Rally Against Terrorism will be held at a Hotel which is accross the street from which subway station?",
idx="67P",
answer=["Pennsylvania Station"],
sp2={'title':'Hotel Pennsylvania',
"sents":wikipassages2sents['Hotel Pennsylvania'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL rally againt terrorism',
"sents":subject2sents['PERSONAL rally againt terrorism'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are the Mastermind Keynote Interview in May and Creating Value with Internet Technologies conference in October located in the same city?",
idx="68P",
answer=["no"],
sp1={'title':'PERSONAL re: invitation',
"sents":subject2sents['PERSONAL re: invitation'],
'sp_sent_ids': [5]},
sp2={'title':'PERSONAL speaker invitation to economist conference 24-25 october',
"sents":subject2sents['PERSONAL speaker invitation to economist conference 24-25 october'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are Southern California Edison and Pacific Gas & Electric and San Diego Gas & Electric based in the same city?",
idx="69P",
answer=["no"],
sp1={'title':'San Diego Gas & Electric',
"sents":wikipassages2sents['San Diego Gas & Electric'],
'sp_sent_ids': [0, 1]},
sp2={'title':'Pacific Gas and Electric Company',
"sents":wikipassages2sents['Pacific Gas and Electric Company'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Enron was referred to as Williams Companies in the information request for an Act administered by which U.S. Agency?",
idx="70P",
answer=["Environmental Protection Agency"],
sp2={'title':'Clean Water Act',
"sents":wikipassages2sents['Clean Water Act'],
'sp_sent_ids': [0, 3, 4]},
sp1={'title':'PERSONAL fw: 308 information request',
"sents":subject2sents['PERSONAL fw: 308 information request'],
'sp_sent_ids': [0, 2, 3]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many mutual fund offerings does the firm Philippe Bibi is resigning from Enron to join have?",
idx="71P",
answer=["79"],
sp2={'title':'Putnam Investments',
"sents":wikipassages2sents['Putnam Investments'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL philippe bibi',
"sents":subject2sents['PERSONAL philippe bibi'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which firm manages more assets, Galleon Group or Putnam Investments?",
idx="72P",
answer=["Putnam Investments"],
sp1={'title':'Galleon Group',
"sents":wikipassages2sents['Galleon Group'],
'sp_sent_ids': [0]},
sp2={'title':'Putnam Investments',
"sents":wikipassages2sents['Putnam Investments'],
'sp_sent_ids': [0, 1]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The 37th Governer of California has aggressively blamed companies from which state for California\'s energy meltdown?",
idx="73P",
answer=["Texas"],
sp1={'title':'Gray Davis',
"sents":wikipassages2sents['Gray Davis'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL la times article',
"sents":subject2sents['PERSONAL la times article'],
'sp_sent_ids': [3, 4]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The man whos leadership leads CA to purchase power at $135 per megawatt hour appeared on which late-night talk show?",
idx="74P",
answer=["The Tonight Show with Jay Leno"],
sp1={'title':'PERSONAL encouraging poll results',
"sents":subject2sents['PERSONAL encouraging poll results'],
'sp_sent_ids': [3]},
sp2={'title':'PERSONAL the "dark" side of popular culture',
"sents":subject2sents['PERSONAL the "dark" side of popular culture'],
'sp_sent_ids': [1,4, 7, 8]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The man who sent Wes Carberry an email about Jeff's departure has what street address?",
idx="75P",
answer=["1440 Smith Street"],
sp1={'title':'PERSONAL hope all is well...',
"sents":subject2sents['PERSONAL hope all is well...'],
'sp_sent_ids': [0, 8]},
sp2={'title':'PERSONAL fw: business development opportunity',
"sents":subject2sents['PERSONAL fw: business development opportunity'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are the Texas Venture Capital Conference and Southwest Venture Capital Conference supported by any of the same organizations?",
idx="76P",
answer=["yes"], # Houston Technology Center
sp1={'title':'PERSONAL texas venture capital conference - 5.16.01',
"sents":subject2sents['PERSONAL texas venture capital conference - 5.16.01'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL upcoming golf tournament and venture capital conference',
"sents":subject2sents['PERSONAL upcoming golf tournament and venture capital conference'],
'sp_sent_ids': [4]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Frank Ianna reports to a leader from AT&T who is interested in Enron's value propositions from which team specifically?",
idx="77P",
answer=["Enron-Adventis team"],
sp1={'title':'PERSONAL talking points - at&t',
"sents":subject2sents['PERSONAL talking points - at&t'],
'sp_sent_ids': [2, 5]},
sp2={'title':'PERSONAL moving forward: urgent, urgent.',
"sents":subject2sents['PERSONAL moving forward: urgent, urgent.'],
'sp_sent_ids': [1, 6]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="In which year was the individual on the Cogent Communications Advisory Committee from Coca Cola born?",
idx="78P",
answer=["1945"],
sp1={'title':'PERSONAL cogent communications',
"sents":subject2sents['PERSONAL cogent communications'],
'sp_sent_ids': [2, 3, 6]},
sp2={'title':'Sergio Zyman',
"sents":wikipassages2sents['Sergio Zyman'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="Sergio Zyman is known for his failure to launch a product which was later renamed to what?",
idx="79P",
answer=["Coke II"],
sp1={'title':'Sergio Zyman',
"sents":wikipassages2sents['Sergio Zyman'],
'sp_sent_ids': [0]},
sp2={'title':'New Coke',
"sents":wikipassages2sents['New Coke'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which state has is larger by area, Calfornia or Texas?",
idx="80P",
answer=["Texas"],
sp1={'title':'California',
"sents":wikipassages2sents['California'],
'sp_sent_ids': [0]},
sp2={'title':'Texas',
"sents":wikipassages2sents['Texas'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did the organization which Nasim spoke to McMahon about exceed its income targets during this first half?",
idx="81P",
answer=["yes"],
sp1={'title':'PERSONAL follow-up on my conversation in november',
"sents":subject2sents['PERSONAL follow-up on my conversation in november'],
'sp_sent_ids': [2, 11]},
sp2={'title':'PERSONAL accomplishments-june 2001',
"sents":subject2sents['PERSONAL accomplishments-june 2001'],
'sp_sent_ids': [2]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Who is the assistant to the man responsible for Enron's e-commerce systems development including ClickPaper.com?",
idx="82P",
answer=["Tina Spiller"],
sp1={'title':'PERSONAL re: fw: eworldtradex',
"sents":subject2sents['PERSONAL re: fw: eworldtradex'],
'sp_sent_ids': [12]},
sp2={'title':'PERSONAL your correspondence',
"sents":subject2sents['PERSONAL your correspondence'],
'sp_sent_ids': [3, 4, 5]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Do Jeff Skilling and Greg Piper have the same assistant at Enron?",
idx="83P",
answer=["no"],
sp1={'title':'PERSONAL your correspondence',
"sents":subject2sents['PERSONAL your correspondence'],
'sp_sent_ids': [8]},
sp2={'title':'PERSONAL re: fw: eworldtradex',
"sents":subject2sents['PERSONAL re: fw: eworldtradex'],
'sp_sent_ids': [12]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="The location for Eyeforenergy Asia 2001 is how many degrees north of the equator?",
idx="84P",
answer=["one"],
sp2={'title':'Singapore',
"sents":wikipassages2sents['Singapore'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL t,h: eyeforenergy briefing',
"sents":subject2sents['PERSONAL t,h: eyeforenergy briefing'],
'sp_sent_ids': [7]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="Where dit the organizer of EEO Europe: Energy Trading in the New Economy hold its Asia 2001 conference?",
idx="85P",
answer=["Singapore"],
sp1={'title':'PERSONAL eeo europe: energy trading in the new economy',
"sents":subject2sents['PERSONAL eeo europe: energy trading in the new economy'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL t,h: eyeforenergy briefing',
"sents":subject2sents['PERSONAL t,h: eyeforenergy briefing'],
'sp_sent_ids': [7]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="The country that had a population of 14 million at its birth in 1923 is bordered by how many countries?",
idx="86P",
answer=["eight"],
sp2={'title':'Turkey',
"sents":wikipassages2sents['Turkey'],
'sp_sent_ids': [0, 1]},
sp1={'title':'<32040359.1075840066357.JavaMail.evans@thyme>',
"sents":subject2sents['<32040359.1075840066357.JavaMail.evans@thyme>'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="After the earthquake in Turkey, Mark suggested sending contributions to an organization with national headquarters built between which years?",
idx="87P",
answer=["1915 and 1917"],
sp2={'title':'American Red Cross National Headquarters',
"sents":wikipassages2sents['American Red Cross National Headquarters'],
'sp_sent_ids': [0, 1]},
sp1={'title':'<32040359.1075840066357.JavaMail.evans@thyme>',
"sents":subject2sents['<32040359.1075840066357.JavaMail.evans@thyme>'],
'sp_sent_ids': [0, 3, 6]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Mark compared the Bosphorous Bridge to a silver version of a bridge that spans a straight how many miles long?",
idx="88P",
answer=["1 mi"],
sp1={'title':'Golden Gate Bridge',
"sents":wikipassages2sents['Golden Gate Bridge'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL fwd: picture!',
"sents":subject2sents['PERSONAL fwd: picture!'],
'sp_sent_ids': [4]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many people visit the 1545000 sqft retail space in Buckhead Atlanta annually?",
idx="89P",
answer=["25 million"],
sp1={'title':'Lenox Square',
"sents":wikipassages2sents['Lenox Square'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL lenox title sponsorship',
"sents":subject2sents['PERSONAL lenox title sponsorship'],
'sp_sent_ids': [0, 1, 2, 4]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which has a larger revenue, Genscape Inc., the two year old energy info provider, or Midcoast Energy Resources with its 4,100 miles of pipe?",
idx="90P",
answer=["Midcoast Energy Resources"],
sp1={'title':'PERSONAL david doctor & genscape, inc.',
"sents":subject2sents['PERSONAL david doctor & genscape, inc.'],
'sp_sent_ids': [6]},
sp2={'title':'PERSONAL acg october 9 lunch - reminder',
"sents":subject2sents['PERSONAL acg october 9 lunch - reminder'],
'sp_sent_ids': [7, 8]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Is the trustworthy organization Mark sugggested for Turkey's recovery one of the organizations Enron contributed to for September 11 relief efforts?",
idx="91P",
answer=["yes"],
sp1={'title':'<32040359.1075840066357.JavaMail.evans@thyme>',
"sents":subject2sents['<32040359.1075840066357.JavaMail.evans@thyme>'],
'sp_sent_ids': [0, 6]},
sp2={'title':'PERSONAL our response to the u.s. tragedy',
"sents":subject2sents['PERSONAL our response to the u.s. tragedy'],
'sp_sent_ids': [0, 4]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many people were killed on the plane with Nick Humber, the Enron employee who was traveling to Los Angeles?",
idx="92P",
answer=["92"],
sp2={'title':'American Airlines Flight 11',
"sents":wikipassages2sents['American Airlines Flight 11'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL tragedy claims life of enron employee',
"sents":subject2sents['PERSONAL tragedy claims life of enron employee'],
'sp_sent_ids': [0, 1, 2, 3]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="Who is the new Chief Financial Officer of the Enron group where Nick Humber was a director?",
idx="93P",
answer=["Tod Lindholm"],
sp1={'title':'PERSONAL tragedy claims life of enron employee',
"sents":subject2sents['PERSONAL tragedy claims life of enron employee'],
'sp_sent_ids': [2]},
sp2={'title':'PERSONAL enron wind',
"sents":subject2sents['PERSONAL enron wind'],
'sp_sent_ids': [4]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which has more miles, the GST transmission lines in the Carolinas or Midcoast's pipelines in 10 states?",
idx="94P",
answer=["transmission lines"],
sp1={'title':'PERSONAL gridsouth appointment',
"sents":subject2sents['PERSONAL gridsouth appointment'],
'sp_sent_ids': [4]},
sp2={'title':'PERSONAL acg october 9 lunch - reminder',
"sents":subject2sents['PERSONAL acg october 9 lunch - reminder'],
'sp_sent_ids': [8]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="Votenet Solutions, Inc released a Spanish language version of its software in celebration of a event which starts when?",
idx="95P",
answer=["September 15"],
sp2={'title':'National Hispanic Heritage Month',
"sents":wikipassages2sents['National Hispanic Heritage Month'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL votenet announces online voter registration software in spanish',
"sents":subject2sents['PERSONAL votenet announces online voter registration software in spanish'],
'sp_sent_ids': [1, 2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When was the company that acquired the EBS portfolio company, Amber Networks, founded?",
idx="96P",
answer=["1865"],
sp2={'title':'Nokia',
"sents":wikipassages2sents['Nokia'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL amber and storageapps acquired',
"sents":subject2sents['PERSONAL amber and storageapps acquired'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many million dollars is the relationship between the largest U.S. newspaper publisher by daily circulation and Eric's company?",
idx="97P",
answer=["$270 million"],
sp1={'title':'Gannett Company',
"sents":wikipassages2sents['Gannett Company'],
'sp_sent_ids': [0, 2, 3]},
sp2={'title':'PERSONAL congrats & zapmedia...0',
"sents":subject2sents['PERSONAL congrats & zapmedia...0'],
'sp_sent_ids': [1, 2]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did Enron raise more money for United Way Scholars last year than the contribution amount for Enron's September 11 relief efforts?",
idx="98P",
answer=["yes"],
sp1={'title':'PERSONAL alexis de tocqueville breakfast and solicitation',
"sents":subject2sents['PERSONAL alexis de tocqueville breakfast and solicitation'],
'sp_sent_ids': [4, 5, 6]},
sp2={'title':'PERSONAL our response to the u.s. tragedy',
"sents":subject2sents['PERSONAL our response to the u.s. tragedy'],
'sp_sent_ids': [0, 4]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="The team owned by Bruce McCaw partnered with Enron in which year?",
idx="99P",
answer=["2001"],
sp1={'title':'PacWest Racing',
"sents":wikipassages2sents['PacWest Racing'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL 2001 texaco/havoline grand prix',
"sents":subject2sents['PERSONAL 2001 texaco/havoline grand prix'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are the New York City Subway and Washington Metro administered by the same Transit Authority agency?",
idx="100P",
answer=["no"],
sp1={'title':'New York City Subway',
"sents":wikipassages2sents['New York City Subway'],
'sp_sent_ids': [0]},
sp2={'title':'Washington Metro',
"sents":wikipassages2sents['Washington Metro'],
'sp_sent_ids': [0, 1]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# save the queries
all_queries = []
all_queries.extend(local_global_queries.copy())
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/qa_runs/{INBOX}/enron_wiki_qas_val_all.json", "w") as f:
for query in all_queries:
json.dump(query, f)
f.write("\n")
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/qa_runs/{INBOX}/enron_wiki_qas_original.json", "w") as f:
json.dump(original_queries, f)
print(f"Saved all {len(all_queries)} queries!")
| concurrentqa-main | dataset_construction/Enron_skilling-j/make_queries.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import uuid
from pathlib import Path
import main as classification
import submitit
def parse_args():
classification_parser = classification.get_args_parser()
parser = argparse.ArgumentParser("Submitit for ConvNeXt", parents=[classification_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=72, type=int, help="Duration of the job, in hours")
parser.add_argument("--job_name", default="convnext", type=str, help="Job name")
parser.add_argument("--job_dir", default="", type=str, help="Job directory; leave empty for default")
parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', default=True, help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/convnext")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as classification
self._setup_gpu_args()
classification.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
self.args.auto_resume = True
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(self.args.job_dir)
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout * 60
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name=args.job_name)
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
| ConvNeXt-main | run_with_submitit.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data import create_transform
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
print("Transform = ")
if isinstance(transform, tuple):
for trans in transform:
print(" - - - - - - - - - - ")
for t in trans.transforms:
print(t)
else:
for t in transform.transforms:
print(t)
print("---------------------------")
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 100
elif args.data_set == 'IMNET':
print("reading from datapath", args.data_path)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == "image_folder":
root = args.data_path if is_train else args.eval_data_path
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = args.nb_classes
assert len(dataset.class_to_idx) == nb_classes
else:
raise NotImplementedError()
print("Number of the class = %d" % nb_classes)
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
if not resize_im:
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
# warping (no cropping) when evaluated at 384 or larger
if args.input_size >= 384:
t.append(
transforms.Resize((args.input_size, args.input_size),
interpolation=transforms.InterpolationMode.BICUBIC),
)
print(f"Warping {args.input_size} size input images...")
else:
if args.crop_pct is None:
args.crop_pct = 224 / 256
size = int(args.input_size / args.crop_pct)
t.append(
# to maintain same ratio w.r.t. 224 images
transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
| ConvNeXt-main | datasets.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
import utils
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,
wandb_logger=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None,
num_training_steps_per_epoch=None, update_freq=None, use_amp=False):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
optimizer.zero_grad()
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = data_iter_step // update_freq
if step >= num_training_steps_per_epoch:
continue
it = start_steps + step # global training iteration
# Update LR & WD for the first acc
if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if use_amp:
with torch.cuda.amp.autocast():
output = model(samples)
loss = criterion(output, targets)
else: # full precision
output = model(samples)
loss = criterion(output, targets)
loss_value = loss.item()
if not math.isfinite(loss_value): # this could trigger if using AMP
print("Loss is {}, stopping training".format(loss_value))
assert math.isfinite(loss_value)
if use_amp:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
else: # full precision
loss /= update_freq
loss.backward()
if (data_iter_step + 1) % update_freq == 0:
optimizer.step()
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
if mixup_fn is None:
class_acc = (output.max(-1)[-1] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
if use_amp:
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
if use_amp:
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
if wandb_logger:
wandb_logger._wandb.log({
'Rank-0 Batch Wise/train_loss': loss_value,
'Rank-0 Batch Wise/train_max_lr': max_lr,
'Rank-0 Batch Wise/train_min_lr': min_lr
}, commit=False)
if class_acc:
wandb_logger._wandb.log({'Rank-0 Batch Wise/train_class_acc': class_acc}, commit=False)
if use_amp:
wandb_logger._wandb.log({'Rank-0 Batch Wise/train_grad_norm': grad_norm}, commit=False)
wandb_logger._wandb.log({'Rank-0 Batch Wise/global_train_step': it})
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device, use_amp=False):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
if use_amp:
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
else:
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| ConvNeXt-main | engine.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import math
import time
from collections import defaultdict, deque
import datetime
import numpy as np
from timm.utils import get_state_dict
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
class WandbLogger(object):
def __init__(self, args):
self.args = args
try:
import wandb
self._wandb = wandb
except ImportError:
raise ImportError(
"To use the Weights and Biases Logger please install wandb."
"Run `pip install wandb` to install it."
)
# Initialize a W&B run
if self._wandb.run is None:
self._wandb.init(
project=args.project,
config=args
)
def log_epoch_metrics(self, metrics, commit=True):
"""
Log train/test metrics onto W&B.
"""
# Log number of model parameters as W&B summary
self._wandb.summary['n_parameters'] = metrics.get('n_parameters', None)
metrics.pop('n_parameters', None)
# Log current epoch
self._wandb.log({'epoch': metrics.get('epoch')}, commit=False)
metrics.pop('epoch')
for k, v in metrics.items():
if 'train' in k:
self._wandb.log({f'Global Train/{k}': v}, commit=False)
elif 'test' in k:
self._wandb.log({f'Global Test/{k}': v}, commit=False)
self._wandb.log({})
def log_checkpoints(self):
output_dir = self.args.output_dir
model_artifact = self._wandb.Artifact(
self._wandb.run.id + "_model", type="model"
)
model_artifact.add_dir(output_dir)
self._wandb.log_artifact(model_artifact, aliases=["latest", "best"])
def set_steps(self):
# Set global training step
self._wandb.define_metric('Rank-0 Batch Wise/*', step_metric='Rank-0 Batch Wise/global_train_step')
# Set epoch-wise step
self._wandb.define_metric('Global Train/*', step_metric='epoch')
self._wandb.define_metric('Global Test/*', step_metric='epoch')
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
os.environ['RANK'] = str(args.rank)
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['WORLD_SIZE'] = str(args.world_size)
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
if is_main_process() and isinstance(epoch, int):
to_del = epoch - args.save_ckpt_num * args.save_ckpt_freq
old_ckpt = output_dir / ('checkpoint-%s.pth' % to_del)
if os.path.exists(old_ckpt):
os.remove(old_ckpt)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
if not isinstance(checkpoint['epoch'], str): # does not support resuming with 'best', 'best-ema'
args.start_epoch = checkpoint['epoch'] + 1
else:
assert args.eval, 'Does not support resuming with checkpoint-best'
if hasattr(args, 'model_ema') and args.model_ema:
if 'model_ema' in checkpoint.keys():
model_ema.ema.load_state_dict(checkpoint['model_ema'])
else:
model_ema.ema.load_state_dict(checkpoint['model'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
| ConvNeXt-main | utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import numpy as np
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.data.mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import ModelEma
from optim_factory import create_optimizer, LayerDecayValueAssigner
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
import models.convnext
import models.convnext_isotropic
def str2bool(v):
"""
Converts string to bool type; enables command line
arguments in the format of '--arg1 true --arg2 false'
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_args_parser():
parser = argparse.ArgumentParser('ConvNeXt training and evaluation script for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Per GPU batch size')
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--update_freq', default=1, type=int,
help='gradient accumulation steps')
# Model parameters
parser.add_argument('--model', default='convnext_tiny', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--drop_path', type=float, default=0, metavar='PCT',
help='Drop path rate (default: 0.0)')
parser.add_argument('--input_size', default=224, type=int,
help='image input size')
parser.add_argument('--layer_scale_init_value', default=1e-6, type=float,
help="Layer scale initial values")
# EMA related parameters
parser.add_argument('--model_ema', type=str2bool, default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', type=str2bool, default=False, help='')
parser.add_argument('--model_ema_eval', type=str2bool, default=False, help='Using ema to eval during training.')
# Optimization parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--lr', type=float, default=4e-3, metavar='LR',
help='learning rate (default: 4e-3), with total batch size 4096')
parser.add_argument('--layer_decay', type=float, default=1.0)
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-6)')
parser.add_argument('--warmup_epochs', type=int, default=20, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', type=str2bool, default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--head_init_scale', default=1.0, type=float,
help='classifier head initial scale, typically adjusted in fine-tuning')
parser.add_argument('--model_key', default='model|module', type=str,
help='which key to load from saved state dict, usually model or model_ema')
parser.add_argument('--model_prefix', default='', type=str)
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--eval_data_path', default=None, type=str,
help='dataset path for evaluation')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--imagenet_default_mean_and_std', type=str2bool, default=True)
parser.add_argument('--data_set', default='IMNET', choices=['CIFAR', 'IMNET', 'image_folder'],
type=str, help='ImageNet dataset path')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', type=str2bool, default=True)
parser.add_argument('--save_ckpt', type=str2bool, default=True)
parser.add_argument('--save_ckpt_freq', default=1, type=int)
parser.add_argument('--save_ckpt_num', default=3, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', type=str2bool, default=False,
help='Perform evaluation only')
parser.add_argument('--dist_eval', type=str2bool, default=True,
help='Enabling distributed evaluation')
parser.add_argument('--disable_eval', type=str2bool, default=False,
help='Disabling evaluation during training')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', type=str2bool, default=True,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', type=str2bool, default=False)
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--use_amp', type=str2bool, default=False,
help="Use PyTorch's AMP (Automatic Mixed Precision) or not")
# Weights and Biases arguments
parser.add_argument('--enable_wandb', type=str2bool, default=False,
help="enable logging to Weights and Biases")
parser.add_argument('--project', default='convnext', type=str,
help="The name of the W&B project where you're sending the new run.")
parser.add_argument('--wandb_ckpt', type=str2bool, default=False,
help="Save model checkpoints as W&B Artifacts.")
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
if args.disable_eval:
args.dist_eval = False
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False, args=args)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed,
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
if global_rank == 0 and args.enable_wandb:
wandb_logger = utils.WandbLogger(args)
else:
wandb_logger = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
else:
data_loader_val = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
layer_scale_init_value=args.layer_scale_init_value,
head_init_scale=args.head_init_scale,
)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" % num_training_steps_per_epoch)
if args.layer_decay < 1.0 or args.layer_decay > 1.0:
num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value.
assert args.model in ['convnext_small', 'convnext_base', 'convnext_large', 'convnext_xlarge'], \
"Layer Decay impl only supports convnext_small/base/large/xlarge"
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp, skip_list=None,
get_num_layer=assigner.get_layer_id if assigner is not None else None,
get_layer_scale=assigner.get_scale if assigner is not None else None)
loss_scaler = NativeScaler() # if args.use_amp is False, this won't be used
print("Use Cosine LR scheduler")
lr_schedule_values = utils.cosine_scheduler(
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values)))
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema)
if args.eval:
print(f"Eval only mode")
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
print(f"Accuracy of the network on {len(dataset_val)} test images: {test_stats['acc1']:.5f}%")
return
max_accuracy = 0.0
if args.model_ema and args.model_ema_eval:
max_accuracy_ema = 0.0
print("Start training for %d epochs" % args.epochs)
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
if wandb_logger:
wandb_logger.set_steps()
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer,
device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn,
log_writer=log_writer, wandb_logger=wandb_logger, start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values,
num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq,
use_amp=args.use_amp
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema)
if data_loader_val is not None:
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
print(f"Accuracy of the model on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best", model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch)
log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
# repeat testing routines for EMA, if ema eval is turned on
if args.model_ema and args.model_ema_eval:
test_stats_ema = evaluate(data_loader_val, model_ema.ema, device, use_amp=args.use_amp)
print(f"Accuracy of the model EMA on {len(dataset_val)} test images: {test_stats_ema['acc1']:.1f}%")
if max_accuracy_ema < test_stats_ema["acc1"]:
max_accuracy_ema = test_stats_ema["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best-ema", model_ema=model_ema)
print(f'Max EMA accuracy: {max_accuracy_ema:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1_ema=test_stats_ema['acc1'], head="perf", step=epoch)
log_stats.update({**{f'test_{k}_ema': v for k, v in test_stats_ema.items()}})
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
if wandb_logger:
wandb_logger.log_epoch_metrics(log_stats)
if wandb_logger and args.wandb_ckpt and args.save_ckpt and args.output_dir:
wandb_logger.log_checkpoints()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('ConvNeXt training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| ConvNeXt-main | main.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import optim as optim
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lookahead import Lookahead
from timm.optim.nadam import Nadam
from timm.optim.novograd import NovoGrad
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
import json
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def get_num_layer_for_convnext(var_name):
"""
Divide [3, 3, 27, 3] layers into 12 groups; each group is three
consecutive blocks, including possible neighboring downsample layers;
adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py
"""
num_max_layer = 12
if var_name.startswith("downsample_layers"):
stage_id = int(var_name.split('.')[1])
if stage_id == 0:
layer_id = 0
elif stage_id == 1 or stage_id == 2:
layer_id = stage_id + 1
elif stage_id == 3:
layer_id = 12
return layer_id
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[2])
if stage_id == 0 or stage_id == 1:
layer_id = stage_id + 1
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = 12
return layer_id
else:
return num_max_layer + 1
class LayerDecayValueAssigner(object):
def __init__(self, values):
self.values = values
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
return get_num_layer_for_convnext(var_name)
def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if get_num_layer is not None:
layer_id = get_num_layer(name)
group_name = "layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if get_layer_scale is not None:
scale = get_layer_scale(layer_id)
else:
scale = 1.
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
return list(parameter_group_vars.values())
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
# if weight_decay and filter_bias_and_bn:
if filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
| ConvNeXt-main | optim_factory.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
from timm.models.registry import register_model
from .convnext import Block, LayerNorm
class ConvNeXtIsotropic(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Isotropic ConvNeXts (Section 3.3 in paper)
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depth (tuple(int)): Number of blocks. Default: 18.
dims (int): Feature dimension. Default: 384
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depth=18, dim=384, drop_path_rate=0.,
layer_scale_init_value=0, head_init_scale=1.,
):
super().__init__()
self.stem = nn.Conv2d(in_chans, dim, kernel_size=16, stride=16)
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.Sequential(*[Block(dim=dim, drop_path=dp_rates[i],
layer_scale_init_value=layer_scale_init_value)
for i in range(depth)])
self.norm = LayerNorm(dim, eps=1e-6) # final norm layer
self.head = nn.Linear(dim, num_classes)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
x = self.stem(x)
x = self.blocks(x)
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@register_model
def convnext_isotropic_small(pretrained=False, **kwargs):
model = ConvNeXtIsotropic(depth=18, dim=384, **kwargs)
if pretrained:
url = 'https://dl.fbaipublicfiles.com/convnext/convnext_iso_small_1k_224_ema.pth'
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_isotropic_base(pretrained=False, **kwargs):
model = ConvNeXtIsotropic(depth=18, dim=768, **kwargs)
if pretrained:
url = 'https://dl.fbaipublicfiles.com/convnext/convnext_iso_base_1k_224_ema.pth'
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_isotropic_large(pretrained=False, **kwargs):
model = ConvNeXtIsotropic(depth=36, dim=1024, **kwargs)
if pretrained:
url = 'https://dl.fbaipublicfiles.com/convnext/convnext_iso_large_1k_224_ema.pth'
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
| ConvNeXt-main | models/convnext_isotropic.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
from timm.models.registry import register_model
class Block(nn.Module):
r""" ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.,
layer_scale_init_value=1e-6, head_init_scale=1.,
):
super().__init__()
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
class LayerNorm(nn.Module):
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
model_urls = {
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
"convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
"convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
"convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
"convnext_tiny_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth",
"convnext_small_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth",
"convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
"convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
}
@register_model
def convnext_tiny(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
if pretrained:
url = model_urls['convnext_tiny_22k'] if in_22k else model_urls['convnext_tiny_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_small(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs)
if pretrained:
url = model_urls['convnext_small_22k'] if in_22k else model_urls['convnext_small_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_base(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
if pretrained:
url = model_urls['convnext_base_22k'] if in_22k else model_urls['convnext_base_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_large(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
if pretrained:
url = model_urls['convnext_large_22k'] if in_22k else model_urls['convnext_large_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def convnext_xlarge(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
if pretrained:
assert in_22k, "only ImageNet-22K pre-trained ConvNeXt-XL is available; please set in_22k=True"
url = model_urls['convnext_xlarge_22k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
| ConvNeXt-main | models/convnext.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.runner import get_dist_info
def get_num_layer_layer_wise(var_name, num_max_layer=12):
if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"):
return 0
elif var_name.startswith("backbone.downsample_layers"):
stage_id = int(var_name.split('.')[2])
if stage_id == 0:
layer_id = 0
elif stage_id == 1:
layer_id = 2
elif stage_id == 2:
layer_id = 3
elif stage_id == 3:
layer_id = num_max_layer
return layer_id
elif var_name.startswith("backbone.stages"):
stage_id = int(var_name.split('.')[2])
block_id = int(var_name.split('.')[3])
if stage_id == 0:
layer_id = 1
elif stage_id == 1:
layer_id = 2
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = num_max_layer
return layer_id
else:
return num_max_layer + 1
def get_num_layer_stage_wise(var_name, num_max_layer):
if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"):
return 0
elif var_name.startswith("backbone.downsample_layers"):
return 0
elif var_name.startswith("backbone.stages"):
stage_id = int(var_name.split('.')[2])
return stage_id + 1
else:
return num_max_layer - 1
@OPTIMIZER_BUILDERS.register_module()
class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor):
def add_params(self, params, module, prefix='', is_dcn_module=None):
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module
is_dcn_module (int|float|None): If the current module is a
submodule of DCN, `is_dcn_module` will be passed to
control conv_offset layer's learning rate. Defaults to None.
"""
parameter_groups = {}
print(self.paramwise_cfg)
num_layers = self.paramwise_cfg.get('num_layers') + 2
decay_rate = self.paramwise_cfg.get('decay_rate')
decay_type = self.paramwise_cfg.get('decay_type', "layer_wise")
print("Build LearningRateDecayOptimizerConstructor %s %f - %d" % (decay_type, decay_rate, num_layers))
weight_decay = self.base_wd
for name, param in module.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in ('pos_embed', 'cls_token'):
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if decay_type == "layer_wise":
layer_id = get_num_layer_layer_wise(name, self.paramwise_cfg.get('num_layers'))
elif decay_type == "stage_wise":
layer_id = get_num_layer_stage_wise(name, num_layers)
group_name = "layer_%d_%s" % (layer_id, group_name)
if group_name not in parameter_groups:
scale = decay_rate ** (num_layers - layer_id - 1)
parameter_groups[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"param_names": [],
"lr_scale": scale,
"group_name": group_name,
"lr": scale * self.base_lr,
}
parameter_groups[group_name]["params"].append(param)
parameter_groups[group_name]["param_names"].append(name)
rank, _ = get_dist_info()
if rank == 0:
to_display = {}
for key in parameter_groups:
to_display[key] = {
"param_names": parameter_groups[key]["param_names"],
"lr_scale": parameter_groups[key]["lr_scale"],
"lr": parameter_groups[key]["lr"],
"weight_decay": parameter_groups[key]["weight_decay"],
}
print("Param groups = %s" % json.dumps(to_display, indent=2))
params.extend(parameter_groups.values())
| ConvNeXt-main | object_detection/mmcv_custom/layer_decay_optimizer_constructor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# -*- coding: utf-8 -*-
from .checkpoint import load_checkpoint
from .layer_decay_optimizer_constructor import LearningRateDecayOptimizerConstructor
from .customized_text import CustomizedTextLoggerHook
__all__ = ['load_checkpoint', 'LearningRateDecayOptimizerConstructor', 'CustomizedTextLoggerHook']
| ConvNeXt-main | object_detection/mmcv_custom/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
from collections import OrderedDict
import torch
import mmcv
from mmcv.runner import HOOKS
from mmcv.runner import TextLoggerHook
@HOOKS.register_module()
class CustomizedTextLoggerHook(TextLoggerHook):
"""Customized Text Logger hook.
This logger prints out both lr and layer_0_lr.
"""
def _log_info(self, log_dict, runner):
# print exp name for users to distinguish experiments
# at every ``interval_exp_name`` iterations and the end of each epoch
if runner.meta is not None and 'exp_name' in runner.meta:
if (self.every_n_iters(runner, self.interval_exp_name)) or (
self.by_epoch and self.end_of_epoch(runner)):
exp_info = f'Exp name: {runner.meta["exp_name"]}'
runner.logger.info(exp_info)
if log_dict['mode'] == 'train':
lr_str = {}
for lr_type in ['lr', 'layer_0_lr']:
if isinstance(log_dict[lr_type], dict):
lr_str[lr_type] = []
for k, val in log_dict[lr_type].items():
lr_str.append(f'{lr_type}_{k}: {val:.3e}')
lr_str[lr_type] = ' '.join(lr_str)
else:
lr_str[lr_type] = f'{lr_type}: {log_dict[lr_type]:.3e}'
# by epoch: Epoch [4][100/1000]
# by iter: Iter [100/100000]
if self.by_epoch:
log_str = f'Epoch [{log_dict["epoch"]}]' \
f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t'
else:
log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t'
log_str += f'{lr_str["lr"]}, {lr_str["layer_0_lr"]}, '
if 'time' in log_dict.keys():
self.time_sec_tot += (log_dict['time'] * self.interval)
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
log_str += f'eta: {eta_str}, '
log_str += f'time: {log_dict["time"]:.3f}, ' \
f'data_time: {log_dict["data_time"]:.3f}, '
# statistic memory
if torch.cuda.is_available():
log_str += f'memory: {log_dict["memory"]}, '
else:
# val/test time
# here 1000 is the length of the val dataloader
# by epoch: Epoch[val] [4][1000]
# by iter: Iter[val] [1000]
if self.by_epoch:
log_str = f'Epoch({log_dict["mode"]}) ' \
f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t'
else:
log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t'
log_items = []
for name, val in log_dict.items():
# TODO: resolve this hack
# these items have been in log_str
if name in [
'mode', 'Epoch', 'iter', 'lr', 'layer_0_lr', 'time', 'data_time',
'memory', 'epoch'
]:
continue
if isinstance(val, float):
val = f'{val:.4f}'
log_items.append(f'{name}: {val}')
log_str += ', '.join(log_items)
runner.logger.info(log_str)
def log(self, runner):
if 'eval_iter_num' in runner.log_buffer.output:
# this doesn't modify runner.iter and is regardless of by_epoch
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
else:
cur_iter = self.get_iter(runner, inner_iter=True)
log_dict = OrderedDict(
mode=self.get_mode(runner),
epoch=self.get_epoch(runner),
iter=cur_iter)
# record lr and layer_0_lr
cur_lr = runner.current_lr()
if isinstance(cur_lr, list):
log_dict['layer_0_lr'] = min(cur_lr)
log_dict['lr'] = max(cur_lr)
else:
assert isinstance(cur_lr, dict)
log_dict['lr'], log_dict['layer_0_lr'] = {}, {}
for k, lr_ in cur_lr.items():
assert isinstance(lr_, list)
log_dict['layer_0_lr'].update({k: min(lr_)})
log_dict['lr'].update({k: max(lr_)})
if 'time' in runner.log_buffer.output:
# statistic memory
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
log_dict = dict(log_dict, **runner.log_buffer.output)
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
return log_dict
| ConvNeXt-main | object_detection/mmcv_custom/customized_text.py |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import time
from tempfile import TemporaryDirectory
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.parallel import is_module_wrapper
from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict
try:
import apex
except:
print('apex is not installed')
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 4 fields: ``meta``, ``state_dict`` and
``optimizer``, ``amp``. By default ``meta`` will contain version
and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
# save amp state dict in the checkpoint
# checkpoint['amp'] = apex.amp.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
| ConvNeXt-main | object_detection/mmcv_custom/runner/checkpoint.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.