python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import logging
import random
from typing import Dict, Callable, Tuple, Union, List, Any, Optional, Sequence
import ai2thor.controller
import lru
import numpy as np
from allenact.utils.system import ImportChecker
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_util import include_object_data
_UNIFORM_BOX_CACHE = {}
def save_frames_to_mp4(frames: Sequence[np.ndarray], file_name: str, fps=3):
import matplotlib.pyplot as plt
from matplotlib import animation
import pylab
h, w, _ = frames[0].shape
aspect_ratio = w / h
fig = plt.figure(figsize=(5 * aspect_ratio, 5))
ax = fig.add_subplot(111)
ax.set_frame_on(False)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
ax.set_aspect("equal")
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
im = ax.imshow(frames[0], cmap="gray", interpolation="nearest")
im.set_clim([0, 255])
pylab.tight_layout()
def update_img(n):
if n >= len(frames):
im.set_data(frames[-1])
else:
im.set_data(frames[n])
return im
ani = animation.FuncAnimation(fig, update_img, len(frames) - 1, interval=200)
writer = animation.writers["ffmpeg"](fps=fps)
ani.save(file_name, writer=writer, dpi=300)
def hand_in_initial_position(
controller: ai2thor.controller.Controller, ignore_rotation: bool = False
):
metadata = controller.last_event.metadata
return IThorEnvironment.position_dist(
metadata["heldObjectPose"]["localPosition"], {"x": 0, "y": -0.16, "z": 0.38},
) < 1e-4 and (
ignore_rotation
or IThorEnvironment.angle_between_rotations(
metadata["heldObjectPose"]["localRotation"],
{"x": -metadata["agent"]["cameraHorizon"], "y": 0, "z": 0},
)
< 1e-2
)
class BoundedFloat(object):
"""Declare a bounded float placeholder variable."""
def __init__(self, low: float, high: float):
"""High is the max float value, low is the min (both inclusive)."""
self.types = {float, int, np.float64}
if type(low) not in self.types or type(high) not in self.types:
raise ValueError("Bounds must both be floats.")
if low > high:
raise ValueError("low must be less than high.")
self.low = low
self.high = high
def sample(self) -> float:
"""Return a random float within the initialized range."""
return random.random() * (self.high - self.low) + self.low
def __contains__(self, n: float):
"""Assert n is within this classes bounded range."""
if type(n) not in self.types:
raise ValueError("n must be a float (or an int).")
return self.low <= n <= self.high
class RearrangeActionSpace(object):
"""Control which actions with bounded variables can be executed."""
def __init__(self, actions: Dict[Callable, Dict[str, BoundedFloat]]):
"""Build a new AI2-THOR action space.
Attributes
:actions (Dict[Callable, Dict[str, BoundedFloat]]) must be in the form
{
<Callable: e.g., controller.move_ahead>: {
'<x>': <BoundedFloat(low=0.5, high=2.5)>,
'<y>': <BoundedFloat(low=0.5, high=2.5)>,
'<z>': <BoundedFloat(low=0.5, high=2.5)>,
'<degrees>': <BoundedFloat(low=-90, high=90)>,
...
},
...
},
where the action variables are in the value and the callable function
is the key.
"""
self.keys = list(actions.keys())
self.actions = actions
def execute_random_action(self, log_choice: bool = True) -> None:
"""Execute a random action within the specified action space."""
action = random.choice(self.keys)
kwargs = {
name: bounds.sample() for name, bounds in self.actions[action].items()
}
# logging
if log_choice:
kwargs_str = str("".join(f" {k}: {v},\n" for k, v in kwargs.items()))
kwargs_str = "\n" + kwargs_str[:-2] if kwargs_str else ""
logging.info(f"Executing {action.__name__}(" + kwargs_str + ")")
action(**kwargs)
def __contains__(
self, action_fn_and_kwargs: Tuple[Callable, Dict[str, float]]
) -> bool:
"""Return if action_fn with variables is valid in this ActionSpace."""
action_fn, variables = action_fn_and_kwargs
# asserts the action is valid
if action_fn not in self.actions:
return False
# asserts the variables are valid
for name, x in variables.items():
if x not in self.actions[action_fn][name]:
return False
return True
def __str__(self) -> str:
"""Return a string representation of the action space."""
return self.__repr__()
def __repr__(self) -> str:
"""Return a string representation of the action space."""
s = ""
tab = " " * 2 # default tabs have like 8 spaces on shells
for action_fn, vars in self.actions.items():
fn_name = action_fn.__name__
vstr = ""
for i, (var_name, bound) in enumerate(vars.items()):
low = bound.low
high = bound.high
vstr += f"{tab * 2}{var_name}: float(low={low}, high={high})"
vstr += "\n" if i + 1 == len(vars) else ",\n"
vstr = "\n" + vstr[:-1] if vstr else ""
s += f"{tab}{fn_name}({vstr}),\n"
s = s[:-2] if s else ""
return "ActionSpace(\n" + s + "\n)"
def extract_obj_data(obj):
"""Return object evaluation metrics based on the env state."""
if "type" in obj:
return {
"type": obj["type"],
"position": obj["position"],
"rotation": obj["rotation"],
"openness": obj["openness"],
"pickupable": obj["pickupable"],
"broken": obj["broken"],
"bounding_box": obj["bounding_box"],
"objectId": obj["objectId"],
"name": obj["name"],
"parentReceptacles": obj.get("parentReceptacles", []),
}
return {
"type": obj["objectType"],
"position": obj["position"],
"rotation": obj["rotation"],
"openness": obj["openness"] if obj["openable"] else None,
"pickupable": obj["pickupable"],
"broken": obj["isBroken"],
"objectId": obj["objectId"],
"name": obj["name"],
"parentReceptacles": obj.get("parentReceptacles", []),
"bounding_box": obj["objectOrientedBoundingBox"]["cornerPoints"]
if obj["objectOrientedBoundingBox"]
else None,
}
def get_pose_info(
objs: Union[Sequence[Dict[str, Any]], Dict[str, Any]]
) -> Union[List[Dict[str, Any]], Dict[str, Any]]:
"""Return data about each specified object.
For each object, the return consists of its type, position,
rotation, openness, and bounding box.
"""
# list of objects
if isinstance(objs, Sequence):
return [extract_obj_data(obj) for obj in objs]
# single object
return extract_obj_data(objs)
def execute_action(
controller: ai2thor.controller.Controller,
action_space: RearrangeActionSpace,
action_fn: Callable,
thor_action: str,
error_message: str = "",
updated_kwarg_names: Optional[Dict[str, str]] = None,
default_thor_kwargs: Optional[Dict[str, Any]] = None,
preprocess_kwargs_inplace: Optional[Callable] = None,
**kwargs: float,
) -> bool:
"""Execute a bounded action within the AI2-THOR controller."""
if updated_kwarg_names is None:
updated_kwarg_names = {}
if default_thor_kwargs is None:
default_thor_kwargs = {}
if (action_fn, kwargs) not in action_space: # Checks that values are in bounds
raise ValueError(
error_message
+ f" action_fn=={action_fn}, kwargs=={kwargs}, action_space=={action_space}."
)
if preprocess_kwargs_inplace is not None:
if len(updated_kwarg_names) != 0:
raise NotImplementedError(
"Cannot have non-empty `updated_kwarg_names` and a non-None `preprocess_kwargs_inplace` argument."
)
preprocess_kwargs_inplace(kwargs)
# get rid of bad variable names
for better_kwarg, thor_kwarg in updated_kwarg_names.items():
kwargs[thor_kwarg] = kwargs[better_kwarg]
del kwargs[better_kwarg]
for name, value in default_thor_kwargs.items():
kwargs[name] = value
event = controller.step(thor_action, **kwargs)
return event.metadata["lastActionSuccess"]
def _iou_slow(
b1: Sequence[Sequence[float]],
b2: Sequence[Sequence[float]],
num_points: int = 2197,
) -> float:
"""Calculate the IoU between 3d bounding boxes b1 and b2."""
with ImportChecker("To use `_iou_slow` you must first install `scipy`."):
from scipy.spatial.qhull import ConvexHull, Delaunay
b1 = np.array(b1) if not isinstance(b1, np.ndarray) else b1
b2 = np.array(b2) if not isinstance(b2, np.ndarray) else b2
def _outer_bounds(
points_1: np.ndarray, points_2: np.ndarray
) -> Dict[str, Dict[str, float]]:
"""Sample points from the outer bounds formed by points_1/2."""
assert points_1.shape == points_2.shape
bounds = dict()
for i in range(points_1.shape[0]):
x1, y1, z1 = points_1[i]
x2, y2, z2 = points_2[i]
points = [
(x1, "x"),
(x2, "x"),
(y1, "y"),
(y2, "y"),
(z1, "z"),
(z2, "z"),
]
for val, d_key in points:
if d_key not in bounds:
bounds[d_key] = {"min": val, "max": val}
else:
if val > bounds[d_key]["max"]:
bounds[d_key]["max"] = val
elif val < bounds[d_key]["min"]:
bounds[d_key]["min"] = val
return bounds
def _in_box(box: np.ndarray, points: np.ndarray) -> np.ndarray:
"""For each point, return if its in the hull."""
hull = ConvexHull(box)
deln = Delaunay(box[hull.vertices])
return deln.find_simplex(points) >= 0
bounds = _outer_bounds(b1, b2)
dim_points = int(num_points ** (1 / 3))
xs = np.linspace(bounds["x"]["min"], bounds["x"]["max"], dim_points)
ys = np.linspace(bounds["y"]["min"], bounds["y"]["max"], dim_points)
zs = np.linspace(bounds["z"]["min"], bounds["z"]["max"], dim_points)
points = np.array([[x, y, z] for x in xs for y in ys for z in zs], copy=False)
in_b1 = _in_box(b1, points)
in_b2 = _in_box(b2, points)
intersection = np.count_nonzero(in_b1 * in_b2)
union = np.count_nonzero(in_b1 + in_b2)
iou = intersection / union if union else 0
return iou
def get_basis_for_3d_box(corners: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
assert corners[0].sum() == 0.0
without_first = corners[1:]
magnitudes1 = np.sqrt((without_first * without_first).sum(1))
v0_ind = np.argmin(magnitudes1)
v0_mag = magnitudes1[v0_ind]
if v0_mag < 1e-8:
raise RuntimeError(f"Could not find basis for {corners}")
v0 = without_first[np.argmin(magnitudes1)] / v0_mag
orth_to_v0 = (v0.reshape(1, -1) * without_first).sum(-1) < v0_mag / 2.0
inds_orth_to_v0 = np.where(orth_to_v0)[0]
v1_ind = inds_orth_to_v0[np.argmin(magnitudes1[inds_orth_to_v0])]
v1_mag = magnitudes1[v1_ind]
v1 = without_first[v1_ind, :] / magnitudes1[v1_ind]
orth_to_v1 = (v1.reshape(1, -1) * without_first).sum(-1) < v1_mag / 2.0
inds_orth_to_v0_and_v1 = np.where(orth_to_v0 & orth_to_v1)[0]
if len(inds_orth_to_v0_and_v1) != 1:
raise RuntimeError(f"Could not find basis for {corners}")
v2_ind = inds_orth_to_v0_and_v1[0]
v2 = without_first[v2_ind, :] / magnitudes1[v2_ind]
orth_mat = np.stack((v0, v1, v2), axis=1) # Orthonormal matrix
return orth_mat, magnitudes1[[v0_ind, v1_ind, v2_ind]]
def uniform_box_points(n):
if n not in _UNIFORM_BOX_CACHE:
start = 1.0 / (2 * n)
lin_space = np.linspace(start, 1 - start, num=n).reshape(n, 1)
mat = lin_space
for i in range(2):
mat = np.concatenate(
(np.repeat(lin_space, mat.shape[0], 0), np.tile(mat, (n, 1))), axis=1,
)
_UNIFORM_BOX_CACHE[n] = mat
return _UNIFORM_BOX_CACHE[n]
def iou_box_3d(b1: Sequence[Sequence[float]], b2: Sequence[Sequence[float]]) -> float:
"""Calculate the IoU between 3d bounding boxes b1 and b2."""
b1 = np.array(b1)
b2 = np.array(b2)
assert b1.shape == b2.shape == (8, 3)
b1_center = b1[:1, :]
b1 = b1 - b1_center
b1_orth_basis, b1_mags = get_basis_for_3d_box(corners=b1)
b2 = (b2 - b1_center) @ b1_orth_basis
b2_center = b2[:1, :]
b2 = b2 - b2_center
b2_orth_basis, b2_mags = get_basis_for_3d_box(corners=b2)
sampled_points = b2_center.reshape(1, 3) + (
uniform_box_points(13) @ (b2_mags.reshape(-1, 1) * np.transpose(b2_orth_basis))
)
prop_intersection = (
np.logical_and(
sampled_points > -1e-3, sampled_points <= 1e-3 + b1_mags.reshape(1, 3)
)
.all(-1)
.mean()
)
b1_vol = np.prod(b1_mags)
b2_vol = np.prod(b2_mags)
intersect_vol = b2_vol * prop_intersection
return intersect_vol / (b1_vol + b2_vol - intersect_vol)
class PoseMismatchError(Exception):
pass
class ObjectInteractablePostionsCache:
def __init__(self, max_size: int = 20000, ndigits=2):
self._key_to_positions = lru.LRU(size=max_size)
self.ndigits = ndigits
self.max_size = max_size
def _get_key(self, scene_name: str, obj: Dict[str, Any]):
p = obj["position"]
return (
scene_name,
obj["type"] if "type" in obj else obj["objectType"],
round(p["x"], self.ndigits),
round(p["y"], self.ndigits),
round(p["z"], self.ndigits),
)
def get(
self,
scene_name: str,
obj: Dict[str, Any],
controller: ai2thor.controller.Controller,
reachable_positions: Optional[Sequence[Dict[str, float]]] = None,
force_cache_refresh: bool = False,
) -> List[Dict[str, Union[float, int, bool]]]:
scene_name = scene_name.replace("_physics", "")
obj_key = self._get_key(scene_name=scene_name, obj=obj)
if force_cache_refresh or obj_key not in self._key_to_positions:
with include_object_data(controller):
metadata = controller.last_event.metadata
cur_scene_name = metadata["sceneName"].replace("_physics", "")
assert (
scene_name == cur_scene_name
), f"Scene names must match when filling a cache miss ({scene_name} != {cur_scene_name})."
obj_in_scene = next(
(o for o in metadata["objects"] if o["name"] == obj["name"]), None,
)
if obj_in_scene is None:
raise RuntimeError(
f"Object with name {obj['name']} must be in the scene when filling a cache miss"
)
desired_pos = obj["position"]
desired_rot = obj["rotation"]
cur_pos = obj_in_scene["position"]
cur_rot = obj_in_scene["rotation"]
should_teleport = (
IThorEnvironment.position_dist(desired_pos, cur_pos) >= 1e-3
or IThorEnvironment.rotation_dist(desired_rot, cur_rot) >= 1
)
object_held = obj_in_scene["isPickedUp"]
physics_was_unpaused = controller.last_event.metadata.get(
"physicsAutoSimulation", True
)
if should_teleport:
if object_held:
if not hand_in_initial_position(
controller=controller, ignore_rotation=True
):
raise NotImplementedError
if physics_was_unpaused:
controller.step("PausePhysicsAutoSim")
assert controller.last_event.metadata["lastActionSuccess"]
event = controller.step(
"TeleportObject",
objectId=obj_in_scene["objectId"],
rotation=desired_rot,
**desired_pos,
forceAction=True,
allowTeleportOutOfHand=True,
forceKinematic=True,
)
assert event.metadata["lastActionSuccess"]
metadata = controller.step(
action="GetInteractablePoses",
objectId=obj["objectId"],
positions=reachable_positions,
).metadata
assert metadata["lastActionSuccess"]
self._key_to_positions[obj_key] = metadata["actionReturn"]
if should_teleport:
if object_held:
if hand_in_initial_position(
controller=controller, ignore_rotation=True
):
controller.step(
"PickupObject",
objectId=obj_in_scene["objectId"],
forceAction=True,
)
assert controller.last_event.metadata["lastActionSuccess"]
if physics_was_unpaused:
controller.step("UnpausePhysicsAutoSim")
assert controller.last_event.metadata["lastActionSuccess"]
else:
raise NotImplementedError
else:
event = controller.step(
"TeleportObject",
objectId=obj_in_scene["objectId"],
rotation=cur_rot,
**cur_pos,
forceAction=True,
)
assert event.metadata["lastActionSuccess"]
return self._key_to_positions[obj_key]
| ai2thor-rearrangement-main | rearrange/utils.py |
import enum
import math
import pprint
import random
import traceback
from collections import OrderedDict
from typing import Dict, Any, Tuple, Optional, Callable, List, Union, Sequence
import ai2thor
import ai2thor.controller
import ai2thor.fifo_server
import ai2thor.server
import ai2thor.wsgi_server
import numpy as np
from packaging import version
from torch.distributions.utils import lazy_property
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_util import (
round_to_factor,
include_object_data,
)
from datagen.datagen_constants import OBJECT_TYPES_TO_NOT_MOVE
from datagen.datagen_utils import (
open_objs,
get_object_ids_to_not_move_from_object_types,
remove_objects_until_all_have_identical_meshes,
)
from rearrange.constants import (
REQUIRED_THOR_VERSION,
MAX_HAND_METERS,
)
from rearrange.utils import (
BoundedFloat,
RearrangeActionSpace,
PoseMismatchError,
ObjectInteractablePostionsCache,
execute_action,
get_pose_info,
iou_box_3d,
)
from rearrange_constants import IOU_THRESHOLD, OPENNESS_THRESHOLD, POSITION_DIFF_BARRIER
class RearrangeMode(enum.Enum):
"""Different modes allowed in RearrangeTHOREnvironment."""
MANIPULATE = "MANIPULATE"
SNAP = "SNAP"
class RearrangeTaskSpec:
"""Data container encapsulating how a single rearrangement instance should
be initialized.
The rearrangement datasets are structured as large dictionaries of the form
```python
{
SCENE_NAME: [
{
DATA_DEFINING_A_SINGLE_REARRANGE_TASK
},
...
],
...
}
```
This `RearrangeTaskSpec` is used to encapsulate the `DATA_DEFINING_A_SINGLE_REARRANGE_TASK`
which allows us to use autocomplete and type checking rather than passing around raw dictionaries.
# Attributes
scene : A string specifying the AI2-THOR scene (e.g "FloorPlan18") in which to run the rearrange task.
stage : A string specifying the type of instance this is data corresponds to (e.g. "train", "val", "test", etc.)
agent_position : A Dict[str, float] specifying the "x", "y", and "z" coordinates of the agent's starting position.
agent_rotation: A float specifying the agents starting rotation (in degrees).
openable_data : A sequence of dictionaries specifying the degree to which certain objects in the scene should be open
in the walkthrough and unshuffle phases. E.g. the openness of a particular cabinent might be specified by the
dictionary:
```python
{
"name": "Cabinet_a8b4237f",
"objectName": "Cabinet_a8b4237f",
"objectId": "Cabinet|+01.31|+02.46|+04.36",
"start_openness": 0.6170539671128578,
"target_openness": 0.8788923191809455
}
```
where `start_openness` is the degree to which the cabinent is open at the start of the unshuffle phase.
starting_poses : A sequence of dictionaries specifying the poses of all pickupable objects at the start
of the unshuffle phase. E.g. one such dictionary might look like:
```python
{
"name": "Bowl_803d17c0",
"objectName": "Bowl_803d17c0",
"position": {
"x": -0.5572903156280518,
"y": 0.8256161212921143,
"z": 6.25293493270874,
},
"rotation": {
"x": 359.9241943359375,
"y": -0.00041645264718681574,
"z": 0.004868899006396532,
},
}
```
target_poses : Similar to `starting_poses` but specifying the poses of objects during the walkthrough phase.
runtime_sample : If `True`, then this task is meant to randomly specified at runtime. That is, the above fields
(except for the `scene`) are to be left as `None` and the RearrangeTHOREnvironment will randomly generate
them instead (this may be slow).
runtime_data : A Dict[str, Any] into which the `RearrangeTHOREnvironment` may cache data for efficiency.
metrics : Any additional metrics that might be associated with a task specification. For instance, the
rearrangement dataset dictionaries include metrics such as `open_diff_count` which records the number
of objects who differ in openness at the start of the walkthrough/unshuffle phases.
"""
def __init__(
self,
scene: str,
stage: Optional[str] = None,
agent_position: Optional[Dict[str, float]] = None,
agent_rotation: Optional[float] = None,
openable_data: Optional[Sequence[Dict[str, Any]]] = None,
starting_poses: Optional[Sequence[Dict[str, Any]]] = None,
target_poses: Optional[Sequence[Dict[str, Any]]] = None,
runtime_sample: bool = False,
runtime_data: Optional[Dict[str, Any]] = None,
**metrics,
):
"""Instantiate a `RearrangeTaskSpec` object."""
self.scene = scene
self.stage = stage
self.agent_position = agent_position
self.agent_rotation = agent_rotation
self.openable_data = openable_data
self.starting_poses = starting_poses
self.target_poses = target_poses
self.runtime_sample = runtime_sample
self.runtime_data: Dict[str, Any] = (
runtime_data if runtime_data is not None else {}
)
self.metrics = metrics
def __str__(self):
"""String representation of a `RearrangeTaskSpec` object."""
return pprint.pformat(self.__dict__)
@property
def unique_id(self):
if self.runtime_sample:
raise NotImplementedError("Cannot create a unique id for a runtime sample.")
return f"{self.scene}__{self.stage}__{self.metrics['index']}"
class RearrangeTHOREnvironment:
"""Custom AI2-THOR Controller for the task of object rearrangement.
# Attributes
mode : The current mode of rearrangement. Takes one of the values of RearrangeMode
(RearrangeMode.SNAP or RearrangeMode.MANIPULATE).
force_cache_reset : Whether or not we should force cache resets when using the `drop_held_object_with_snap` action.
Setting this value to `False` results in higher FPS at the expense of possibly having `drop_held_object_with_snap`
work/fail when it shouldn't. Setting `force_cache_reset` to `True` is recommended during validation/testing.
obj_name_to_walkthrough_start_pose : Dictionary mapping AI2-THOR object names to their poses (positions & rotations)
before they were shuffled (i.e. what the agent sees at the start of the walkthrough phase).
This will be changed after every call to `reset`.
obj_name_to_unshuffle_start_pose : Same as `obj_name_to_walkthrough_start_pose` but mapping object names to their poses (positions &
rotations) just after they were shuffled, i.e. what the agent sees at the start of the unshuffle phase).
current_task_spec : A `RearrangeTaskSpec` object specifying the current rearrangement task details.
controller : A ai2thor controller used to execute all the actions.
shuffle_called : `True` if the objects have been shuffled so that we're in the `unshuffle` phase. Otherwise `False`.
"""
def __init__(
self,
mode: RearrangeMode = RearrangeMode.SNAP,
force_cache_reset: Optional[bool] = None,
controller_kwargs: Optional[Dict[str, Any]] = None,
enhanced_physics_determinism: bool = True,
):
"""Initialize a new rearrangement controller.
# Parameters
mode : See description of this class' attributes.
controller_kwargs : Dictionary specifying any keyword arguments to be passed
when initializing the `ai2thor.controller.Controller` (e.g. width/height).
"""
if ai2thor.__version__ is not None: # Allows for custom THOR installs
if (
ai2thor.__version__ not in ["0.0.1", None]
and (not ai2thor.__version__.startswith("0+"))
and version.parse(ai2thor.__version__)
< version.parse(REQUIRED_THOR_VERSION)
):
raise ImportError(
f"To run the rearrangment baseline experiments you must use"
f" ai2thor version {REQUIRED_THOR_VERSION} or higher."
)
# Saving attributes
if mode == RearrangeMode.SNAP:
assert (
force_cache_reset is not None
), "When in RearrangeMode.SNAP mode you must specify a value for `force_cache_reset`"
else:
force_cache_reset = force_cache_reset
self.force_cache_reset = force_cache_reset
self.mode = mode
self._controller_kwargs = {} if controller_kwargs is None else controller_kwargs
self._enhanced_physics_determinism = enhanced_physics_determinism
self.physics_step_kwargs = {}
if self._enhanced_physics_determinism:
self.physics_step_kwargs = {
"actionSimulationSeconds": 0.26,
"fixedDeltaTime": 0.02,
}
# Cache of where objects can be interacted with
self._interactable_positions_cache = ObjectInteractablePostionsCache()
# Object poses at start of walkthrough and unshuffle phases.
# Reset after every call to reset and shuffle respectively.
self.obj_name_to_walkthrough_start_pose: Optional[Dict[str, Dict]] = None
self.obj_name_to_unshuffle_start_pose: Optional[Dict[str, Dict]] = None
self._cached_poses: Optional[Tuple[List, List, List]] = None
# Current task specification
self.current_task_spec: Optional[RearrangeTaskSpec] = None
# Caches of starting unshuffle/walkthrough object poses and other information. Reset on every call to reset
self._sorted_and_extracted_walkthrough_start_poses: Optional[List] = None
self._sorted_and_extracted_unshuffle_start_poses: Optional[List] = None
self._have_warned_about_mismatch = False
self._agent_signals_done = False # Also reset on `shuffle()`
# instance masks now not supported. But an Exception would be thrown if
# `mode == RearrangeMode.MANIPULATE` and render_instance_masks is True, since masks are
# only available on RearrangeMode.SNAP mode.
self._render_instance_masks: bool = False
if self.mode == RearrangeMode.MANIPULATE and self._render_instance_masks:
raise Exception(
"render_instance_masks is only available on RearrangeMode.SNAP mode."
)
# local thor controller to execute all the actions
self.controller = self.create_controller()
# always begin in walkthrough phase
self.shuffle_called = False
def create_controller(self):
"""Create the ai2thor controller."""
assert ("width" in self._controller_kwargs) == (
"height" in self._controller_kwargs
), "Either controller_kwargs must contain either both of width/height or neither."
self._controller_kwargs["width"] = self._controller_kwargs.get("width", 300)
self._controller_kwargs["height"] = self._controller_kwargs.get("height", 300)
controller = ai2thor.controller.Controller(
**{
"scene": "FloorPlan17_physics",
# "server_class": ai2thor.fifo_server.FifoServer,
# "server_class": ai2thor.wsgi_server.WsgiServer, # Possibly useful in debugging
**self._controller_kwargs,
},
)
return controller
@property
def held_object(self) -> Optional[Dict[str, Any]]:
"""Return the data corresponding to the object held by the agent (if
any)."""
with include_object_data(self.controller):
metadata = self.controller.last_event.metadata
if len(metadata["inventoryObjects"]) == 0:
return None
assert len(metadata["inventoryObjects"]) <= 1
held_obj_id = metadata["inventoryObjects"][0]["objectId"]
return next(o for o in metadata["objects"] if o["objectId"] == held_obj_id)
def get_agent_location(self) -> Dict[str, Union[float, int, bool]]:
"""Returns the agent's current location.
# Returns
A dictionary of the form
```python
{
"x": X_POSITION_IN_SPACE, # float
"y": Y_POSITION_IN_SPACE, # float
"z": Z_POSITION_IN_SPACE, # float
"rotation": AGENTS_ROTATION_ABOUT_THE_Y_AXIS_IN_DEGREES, # float or int
"horizon": AGENTS_CAMERA_ANGLE_IN_DEGREES, # float (0 degrees is horizontal)
"standing": WHETHER_OR_NOT_THE_AGENT_IS_STANDING, # boolean
}
```
"""
metadata = self.controller.last_event.metadata
return {
"x": metadata["agent"]["position"]["x"],
"y": metadata["agent"]["position"]["y"],
"z": metadata["agent"]["position"]["z"],
"rotation": metadata["agent"]["rotation"]["y"],
"horizon": metadata["agent"]["cameraHorizon"],
"standing": metadata.get("isStanding", metadata["agent"].get("isStanding")),
}
@property
def observation(self) -> Tuple[np.array, Optional[np.array]]:
"""Return the current (RGB, depth, Optional[instance masks]) frames.
# Returns
A tuple containing a
* RGB frame is of shape (height)x(width)x3 with integer entries in [0:255].
* depth frame is of shape (height)x(width) with unscaled entries representing the
meter distance from the agent to the pixel. This will be `None` if the controller_kwargs
passed to the initializer did not specify that depth images should be returned by AI2-THOR.
"""
rgb = self.last_event.frame
depth = (
self.last_event.depth_frame
if hasattr(self.last_event, "depth_frame")
else None
)
return rgb, depth
@lazy_property
def walkthrough_action_space(self) -> RearrangeActionSpace:
"""Return the RearrangeActionSpace for the walkthrough phase based on
the RearrangeMode."""
# Walkthrough actions
actions: Dict[Callable, Dict[str, BoundedFloat]] = {
self.move_ahead: {},
self.move_right: {},
self.move_left: {},
self.move_back: {},
self.rotate_right: {},
self.rotate_left: {},
self.stand: {},
self.crouch: {},
self.look_up: {},
self.look_down: {},
self.done: {},
}
return RearrangeActionSpace(actions)
@lazy_property
def unshuffle_action_space(self) -> RearrangeActionSpace:
"""Return the RearrangeActionSpace for the unshuffle phase based on the
RearrangeMode."""
actions = {**self.walkthrough_action_space.actions}
# additional shuffle allowed actions
actions.update(
{
self.open_object: {
"x": BoundedFloat(low=0, high=1),
"y": BoundedFloat(low=0, high=1),
"openness": BoundedFloat(low=0, high=1),
},
self.pickup_object: {
"x": BoundedFloat(low=0, high=1),
"y": BoundedFloat(low=0, high=1),
},
self.push_object: {
"x": BoundedFloat(low=0, high=1),
"y": BoundedFloat(low=0, high=1),
"rel_x_force": BoundedFloat(low=-0.5, high=0.5),
"rel_y_force": BoundedFloat(low=-0.5, high=0.5),
"rel_z_force": BoundedFloat(low=-0.5, high=0.5),
"force_magnitude": BoundedFloat(low=0, high=1),
},
self.move_held_object: {
"x_meters": BoundedFloat(low=-0.5, high=0.5),
"y_meters": BoundedFloat(low=-0.5, high=0.5),
"z_meters": BoundedFloat(low=-0.5, high=0.5),
},
self.rotate_held_object: {
"x": BoundedFloat(low=-0.5, high=0.5),
"y": BoundedFloat(low=-0.5, high=0.5),
"z": BoundedFloat(low=-0.5, high=0.5),
},
self.drop_held_object: {},
}
)
if self.mode == RearrangeMode.SNAP:
actions.update({self.drop_held_object_with_snap: {}})
return RearrangeActionSpace(actions)
@property
def action_space(self) -> RearrangeActionSpace:
"""Return the RearrangeActionSpace based on the RearrangeMode and
whether we are in the unshuffle phase."""
if self.shuffle_called:
return self.unshuffle_action_space
else:
return self.walkthrough_action_space
def open_object(self, x: float, y: float, openness: float) -> bool:
"""Open the object corresponding to x/y to openness.
The action will not be successful if the specified openness would
cause a collision or if the object at x/y is not openable.
# Parameters
x : (float, min=0.0, max=1.0) horizontal percentage from the last frame
that the target object is located.
y : (float, min=0.0, max=1.0) vertical percentage from the last frame
that the target object is located.
# Returns
`True` if the action was successful, otherwise `False`.
"""
# If an object is already open, THOR doesn't support changing
# it's openness without first closing it. So we simply try to first
# close the object before reopening it.
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.open_object,
thor_action="OpenObject",
error_message=(
"x/y/openness must be in [0:1] and we must be in the unshuffle phase."
),
x=x,
y=y,
openness=openness,
default_thor_kwargs=self.physics_step_kwargs,
)
def pickup_object(self, x: float, y: float) -> bool:
"""Pick up the object corresponding to x/y.
The action will not be successful if the object at x/y is not
pickupable.
# Parameters
x : (float, min=0.0, max=1.0) horizontal percentage from the last frame
that the target object is located.
y : (float, min=0.0, max=1.0) vertical percentage from the last frame
that the target object is located.
# Returns
`True` if the action was successful, otherwise `False`.
"""
if len(self.last_event.metadata["inventoryObjects"]) != 0:
return False
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.pickup_object,
thor_action="PickupObject",
error_message="x/y must be in [0:1] and we must be in the unshuffle phase.",
x=x,
y=y,
default_thor_kwargs=self.physics_step_kwargs,
)
def push_object(
self,
x: float,
y: float,
rel_x_force: float,
rel_y_force: float,
rel_z_force: float,
force_magnitude: float,
) -> bool:
"""Push an object along a surface.
The action will not be successful if the object at x/y is not moveable.
# Parameters
x : (float, min=0.0, max=1.0) horizontal percentage from the last frame
that the target object is located.
y : (float, min=0.0, max=1.0) vertical percentage from the last frame
that the target object is located.
rel_x_force : (float, min=-0.5, max=0.5) amount of relative force
applied along the x axis.
rel_y_force : (float, min=-0.5, max=0.5) amount of relative force
applied along the y axis.
rel_z_force : (float, min=-0.5, max=0.5) amount of relative force
applied along the z axis.
force_magnitude : (float, min=0, max=1) relative amount of force
applied during this push action. Within AI2-THOR, the force is
rescaled to be between 0 and 50 newtons, which is estimated to
sufficiently move all pickupable objects.
# Returns
`True` if the action was successful, otherwise `False`.
"""
def preprocess_kwargs(kwargs: Dict[str, Any]):
direction = {}
for k in ["x", "y", "z"]:
force_key = f"rel_{k}_force"
direction[k] = kwargs[force_key]
del kwargs[force_key]
kwargs["direction"] = direction
kwargs["force_magnitude"] = 50 * kwargs["force_magnitude"]
# TODO: is this really the definition of success we want?
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.pickup_object,
thor_action="TouchThenApplyForce",
error_message="Error in call to pickup object."
" Must be in unshuffle phase (i.e., call shuffle()),"
" x,y,force_magnitude must be in [0:1],"
" and rel_(x/y/z)_force must be in [-0.5:0.5]",
default_thor_kwargs=dict(handDistance=1.5, **self.physics_step_kwargs),
preprocess_kwargs_inplace=preprocess_kwargs,
x=x,
y=y,
rel_x_force=rel_x_force,
rel_y_force=rel_y_force,
rel_z_force=rel_z_force,
moveMagnitude=force_magnitude,
)
def move_ahead(self) -> bool:
"""Move the agent ahead from its facing direction by 0.25 meters."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_ahead,
thor_action="MoveAhead",
default_thor_kwargs=self.physics_step_kwargs,
)
def move_back(self) -> bool:
"""Move the agent back from its facing direction by 0.25 meters."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_back,
thor_action="MoveBack",
default_thor_kwargs=self.physics_step_kwargs,
)
def move_right(self) -> bool:
"""Move the agent right from its facing direction by 0.25 meters."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_right,
thor_action="MoveRight",
default_thor_kwargs=self.physics_step_kwargs,
)
def move_left(self) -> bool:
"""Move the agent left from its facing direction by 0.25 meters."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_left,
thor_action="MoveLeft",
default_thor_kwargs=self.physics_step_kwargs,
)
def rotate_left(self) -> bool:
"""Rotate the agent left from its facing direction."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.rotate_left,
thor_action="RotateLeft",
default_thor_kwargs=self.physics_step_kwargs,
)
def rotate_right(self) -> bool:
"""Rotate the agent left from its facing direction."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.rotate_right,
thor_action="RotateRight",
default_thor_kwargs=self.physics_step_kwargs,
)
def stand(self) -> bool:
"""Stand the agent from the crouching position."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.stand,
thor_action="Stand",
default_thor_kwargs=self.physics_step_kwargs,
)
def crouch(self) -> bool:
"""Crouch the agent from the standing position."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.crouch,
thor_action="Crouch",
default_thor_kwargs=self.physics_step_kwargs,
)
def look_up(self) -> bool:
"""Turn the agent's head and camera up by 30 degrees."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.look_up,
thor_action="LookUp",
default_thor_kwargs=self.physics_step_kwargs,
)
def look_down(self) -> bool:
"""Turn the agent's head and camera down by 30 degrees."""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.look_down,
thor_action="LookDown",
default_thor_kwargs=self.physics_step_kwargs,
)
def done(self) -> bool:
"""Agent's signal that it's completed its current rearrangement phase.
Note that we do not automatically switch from the walkthrough
phase to the unshuffling phase, and vice-versa, that is up to
the user. This allows users to call .poses after the agent calls
done, and have it correspond to the current episode.
"""
self._agent_signals_done = True
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.done,
thor_action="Done",
)
def move_held_object(
self, x_meters: float, y_meters: float, z_meters: float
) -> bool:
"""Move the object in the agent's hand by the specified amount.
The maximum magnitude that the object
can move in one time step is 0.5 meters. If the calculated magnitude is
above 0.5, it's magnitude will be clipped to 0.5.
The action is successful in the case that the agent is holding an
object and moving the object by the specified amount does not bump
into an object.
# Parameters
x_meters : (float, min=-0.5, max=0.5) movement meters along the x-axis.
y_meters : (float, min=-0.5, max=0.5) movement meters along the y-axis.
z_meters : (float, min=-0.5, max=0.5) movement meters along the z-axis.
# Exceptions
In walkthrough phase. This method can only be called within the
unshuffle phase. The shuffle phase starts with controller.shuffle()
and ends with controller.reset().
"""
mag = math.sqrt(x_meters ** 2 + y_meters ** 2 + z_meters ** 2)
# clips the max value at MAX_HAND_METERS.
if MAX_HAND_METERS > mag:
scale = MAX_HAND_METERS / mag
x_meters *= scale
y_meters *= scale
z_meters *= scale
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.move_held_object,
thor_action="MoveHandDelta",
updated_kwarg_names={"x_meters": "x", "y_meters": "y", "z_meters": "z"},
x_meters=x_meters,
y_meters=y_meters,
z_meters=z_meters,
default_thor_kwargs=self.physics_step_kwargs,
)
def rotate_held_object(self, x: float, y: float, z: float) -> bool:
"""Rotate the object in the agent's hand by the specified degrees.
The rotation parameters are scaled linearly to put rotations
between [-90:90] degrees. The action is only successful agent is holding an object.
# Parameters
x : (float, min=-0.5, max=0.5) rotation along the x-axis.
y : (float, min=-0.5, max=0.5) rotation along the y-axis.
z : (float, min=-0.5, max=0.5) rotation along the z-axis.
"""
def rescale_xyz(kwargs: Dict[str, Any]):
for k in ["x", "y", "z"]:
kwargs[k] = 180 * kwargs[k]
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.rotate_held_object,
thor_action="RotateHand",
preprocess_kwargs_inplace=rescale_xyz,
x=x,
y=y,
z=z,
default_thor_kwargs=self.physics_step_kwargs,
)
def drop_held_object(self) -> bool:
"""Drop the object in the agent's hand.
The action is only successful agent is holding an object.
"""
return execute_action(
controller=self.controller,
action_space=self.action_space,
action_fn=self.drop_held_object,
thor_action="DropHandObject",
default_thor_kwargs={"autoSimulation": False, **self.physics_step_kwargs,},
)
def drop_held_object_with_snap(self) -> bool:
"""Drop the object in the agent's hand to the target position.
Exception is raised if shuffle has not yet been called on the current
episode or the agent is in default mode.
For this action to work:
1. The agent must be within 1.5 meters from the goal object's
position, observed during the walkthrough phase.
2. The agent must be looking in the direction of where it was
located in the walkthrough phase.
Otherwise, the object will be placed in a visible receptacle or
if this also fails, it will be simply dropped.
# Returns
`True` if the drop was successful, otherwise `False`.
"""
if not self.shuffle_called:
raise Exception("Must be in unshuffle stage.")
if not self.mode == RearrangeMode.SNAP:
raise Exception("Must be in RearrangeMode.SNAP mode.")
# round positions to 2 decimals
DEC = 2
with include_object_data(self.controller):
event = self.controller.last_event
held_obj = self.held_object
if held_obj is None:
return False
# When dropping up an object, make it breakable.
self.controller.step(
"MakeObjectBreakable", objectId=self.held_object["objectId"]
)
agent = event.metadata["agent"]
goal_pose = self.obj_name_to_walkthrough_start_pose[held_obj["name"]]
goal_pos = goal_pose["position"]
goal_rot = goal_pose["rotation"]
good_positions_to_drop_from = self._interactable_positions_cache.get(
scene_name=self.last_event.metadata["sceneName"],
obj={**held_obj, **{"position": goal_pos, "rotation": goal_rot},},
controller=self.controller,
force_cache_refresh=self.force_cache_reset, # Forcing cache resets when not training.
)
def position_to_tuple(position: Dict[str, float]):
return tuple(round(position[k], DEC) for k in ["x", "y", "z"])
agent_xyz = position_to_tuple(agent["position"])
agent_rot = (round(agent["rotation"]["y"] / 90) * 90) % 360
agent_standing = int(agent["isStanding"])
agent_horizon = round(agent["cameraHorizon"])
for valid_agent_pos in good_positions_to_drop_from:
# Checks if the agent is close enough to the target
# for the object to be snapped to the target location.
valid_xyz = position_to_tuple(valid_agent_pos)
valid_rot = (round(valid_agent_pos["rotation"] / 90) * 90) % 360
valid_standing = int(valid_agent_pos["standing"])
valid_horizon = round(valid_agent_pos["horizon"])
if (
valid_xyz == agent_xyz # Position
and valid_rot == agent_rot # Rotation
and valid_standing == agent_standing # Standing
and round(valid_horizon) == agent_horizon # Horizon
):
# Try a few locations near the target for robustness' sake
positions = [
{
"x": goal_pos["x"] + 0.001 * xoff,
"y": goal_pos["y"] + 0.001 * yoff,
"z": goal_pos["z"] + 0.001 * zoff,
}
for xoff in [0, -1, 1]
for zoff in [0, -1, 1]
for yoff in [0, 1, 2]
]
self.controller.step(
action="TeleportObject",
objectId=held_obj["objectId"],
rotation=goal_rot,
positions=positions,
forceKinematic=True,
allowTeleportOutOfHand=True,
makeUnbreakable=True,
)
break
if self.held_object is None:
# If we aren't holding the object anymore, then let's check if it
# was placed into the right location.
if self.are_poses_equal(
goal_pose=get_pose_info(goal_pose),
cur_pose=next(
get_pose_info(o)
for o in self.last_event.metadata["objects"]
if o["name"] == goal_pose["name"]
),
treat_broken_as_unequal=True,
):
return True
else:
return False
# We couldn't teleport the object to the target location, let's try placing it
# in a visible receptacle.
possible_receptacles = [
o for o in event.metadata["objects"] if o["visible"] and o["receptacle"]
]
possible_receptacles = sorted(
possible_receptacles, key=lambda o: (o["distance"], o["objectId"])
)
for possible_receptacle in possible_receptacles:
self.controller.step(
action="PutObject",
objectId=possible_receptacle["objectId"],
**self.physics_step_kwargs,
)
if self.controller.last_event.metadata["lastActionSuccess"]:
break
# We failed to place the object into a receptacle, let's just drop it.
if len(possible_receptacles) == 0 or (
not self.controller.last_event.metadata["lastActionSuccess"]
):
self.controller.step(
"DropHeldObjectAhead",
forceAction=True,
autoSimulation=False,
**{**self.physics_step_kwargs, "actionSimulationSeconds": 1.5},
)
return False
@property
def last_event(self) -> ai2thor.server.Event:
"""Return the AI2-THOR Event from the most recent controller action."""
return self.controller.last_event
@property
def scene(self) -> str:
"""Return the current AI2-THOR scene name."""
return self.controller.last_event.metadata["sceneName"].replace("_physics", "")
@staticmethod
def compare_poses(
goal_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
cur_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""Compare two object poses and return where they differ.
The `goal_pose` must not have the object as broken.
# Parameters
goal_pose : The goal pose of the object.
cur_pose : The current pose of the object.
# Returns
A dictionary with the following keys keys and values
* "broken" - `True` if the `cur_pose` is broken in which case all below values are `None`, otherwise `False`.
* "iou" - The IOU overlap between the two object poses (min==0, max==1) using their 3d bounding boxes. Computed
using an approximate sampling procedure. If the `position_dist` (see below) is <0.01 and the `rotation_dist`
is <10.0 then the IOU computation is short circuited and a value of 1 is returned.
* "openness_diff" - `None` if the object types are not openable. Otherwise this equals the absolute difference
between the `openness` values of the two poses.
* "position_dist" - The euclidean distance between the positions of the center points of the two poses.
* "rotation_dist" - The angle (in degrees) between the two poses. See the
`IThorEnvironment.angle_between_rotations` function for more information.
"""
if isinstance(goal_pose, Sequence):
assert isinstance(cur_pose, Sequence)
return [
RearrangeTHOREnvironment.compare_poses(goal_pose=gp, cur_pose=cp)
for gp, cp in zip(goal_pose, cur_pose)
]
assert goal_pose["type"] == cur_pose["type"]
assert not goal_pose["broken"]
if cur_pose["broken"]:
return {
"broken": True,
"iou": None,
"openness_diff": None,
"position_dist": None,
"rotation_dist": None,
}
if goal_pose["bounding_box"] is None and cur_pose["bounding_box"] is None:
iou = None
position_dist = None
rotation_dist = None
else:
position_dist = IThorEnvironment.position_dist(
goal_pose["position"], cur_pose["position"]
)
rotation_dist = IThorEnvironment.angle_between_rotations(
goal_pose["rotation"], cur_pose["rotation"]
)
if position_dist < 1e-2 and rotation_dist < 10.0:
iou = 1.0
else:
try:
iou = iou_box_3d(
goal_pose["bounding_box"], cur_pose["bounding_box"]
)
except Exception as _:
get_logger().warning(
"Could not compute IOU, will assume it was 0. Error during IOU computation:"
f"\n{traceback.format_exc()}"
)
iou = 0
if goal_pose["openness"] is None and cur_pose["openness"] is None:
openness_diff = None
else:
openness_diff = abs(goal_pose["openness"] - cur_pose["openness"])
return {
"broken": False,
"iou": iou,
"openness_diff": openness_diff,
"position_dist": position_dist,
"rotation_dist": rotation_dist,
}
@classmethod
def pose_difference_energy(
cls,
goal_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
cur_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
min_iou: float = IOU_THRESHOLD,
open_tol: float = OPENNESS_THRESHOLD,
pos_barrier: float = POSITION_DIFF_BARRIER,
) -> Union[float, np.ndarray]:
"""Computes the energy between two poses.
The energy (taking values in [0:1]) between two poses provides a soft and holistic measure of how
far apart two poses are. If the energy is near 1 then the two poses are very dissimilar, if the energy
is near 1 then the two poses are nearly equal.
# Parameters
goal_pose : The goal pose of the object.
cur_pose : The current pose of the object.
min_iou : As the IOU between the two poses increases between [0:min_iou] the contribution to the energy
corresponding solely to the to the IOU decrease from 0.5 to 0 in a linear fashion.
open_tol: If the object is openable, then if the absolute openness difference is less than `open_tol`
the energy is 0. Otherwise the pose energy is 1.
pos_barrier: If two poses are separated by a large distance, we would like to decrease the energy as
the two poses are brought closer together. The `pos_barrier` controls when this energy decrease begins,
namely at its default value of 2.0, the contribution of the distance to
the energy decreases linearly from 0.5 to 0 as the distance between the two poses decreases from
2 meters to 0 meters.
"""
if isinstance(goal_pose, Sequence):
assert isinstance(cur_pose, Sequence)
return np.array(
[
cls.pose_difference_energy(
goal_pose=p0,
cur_pose=p1,
min_iou=min_iou,
open_tol=open_tol,
pos_barrier=pos_barrier,
)
for p0, p1 in zip(goal_pose, cur_pose)
]
)
assert not goal_pose["broken"]
pose_diff = cls.compare_poses(goal_pose=goal_pose, cur_pose=cur_pose)
if pose_diff["broken"]:
return 1.0
if pose_diff["openness_diff"] is None or goal_pose["pickupable"]:
gbb = np.array(goal_pose["bounding_box"])
cbb = np.array(cur_pose["bounding_box"])
iou = pose_diff["iou"]
iou_energy = max(1 - iou / min_iou, 0)
if iou > 0:
position_dist_energy = 0.0
else:
min_pairwise_dist_between_corners = np.sqrt(
(
(
np.tile(gbb, (1, 8)).reshape(-1, 3)
- np.tile(cbb, (8, 1)).reshape(-1, 3)
)
** 2
).sum(1)
).min()
position_dist_energy = min(
min_pairwise_dist_between_corners / pos_barrier, 1.0
)
return 0.5 * iou_energy + 0.5 * position_dist_energy
else:
return 1.0 * (pose_diff["openness_diff"] > open_tol)
@classmethod
def are_poses_equal(
cls,
goal_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
cur_pose: Union[Dict[str, Any], Sequence[Dict[str, Any]]],
min_iou: float = 0.5,
open_tol: float = 0.2,
treat_broken_as_unequal: bool = False,
) -> Union[bool, np.ndarray]:
"""Determine if two object poses are equal (up to allowed error).
The `goal_pose` must not have the object as broken.
# Parameters
goal_pose : The goal pose of the object.
cur_pose : The current pose of the object.
min_iou : If the two objects are pickupable objects, they are considered equal if their IOU is `>=min_iou`.
open_tol: If the object is openable and not pickupable, then the poses are considered equal if the absolute
openness difference is less than `open_tol`.
treat_broken_as_unequal : If `False` an exception will be thrown if the `cur_pose` is broken. If `True`, then
if `cur_pose` is broken this function will always return `False`.
"""
if isinstance(goal_pose, Sequence):
assert isinstance(cur_pose, Sequence)
return np.array(
[
cls.are_poses_equal(
goal_pose=p0,
cur_pose=p1,
min_iou=min_iou,
open_tol=open_tol,
treat_broken_as_unequal=treat_broken_as_unequal,
)
for p0, p1 in zip(goal_pose, cur_pose)
]
)
assert not goal_pose["broken"]
if cur_pose["broken"]:
if treat_broken_as_unequal:
return False
else:
raise RuntimeError(
f"Cannot determine if poses of two objects are"
f" equal if one is broken object ({goal_pose} v.s. {cur_pose})."
)
pose_diff = cls.compare_poses(goal_pose=goal_pose, cur_pose=cur_pose)
return (pose_diff["iou"] is None or pose_diff["iou"] > min_iou) and (
pose_diff["openness_diff"] is None or pose_diff["openness_diff"] <= open_tol
)
@property
def all_rearranged_or_broken(self):
"""Return if every object is simultaneously broken or in its correct
pose.
The unshuffle agent can make no more progress on its task in the
case that that every object is either (1) in its correct
position or (2) broken so that it can never be placed in its
correct position. This function simply returns whether this is
the case.
"""
return all(
cp["broken"] or self.are_poses_equal(goal_pose=gp, cur_pose=cp)
for _, gp, cp in zip(*self.poses)
)
@property
def poses(
self,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]:
"""Return (unshuffle start, walkthrough start, current) pose for every
object in the scene.
Can only be called during the unshuffle phase.
# Returns
A Tuple of containing three ordered lists of object poses `(unshuffle_start_poses, walkthrough_start_poses, current_poses)`
such that, for `0 <= i < len(current_poses)`,
* `unshuffle_start_poses[i]` - corresponds to the pose of the ith object at the start of the unshuffle phase.
* `walkthrough_start_poses[i]` - corresponds to the pose of the ith object at the start of the walkthrough phase.
* `current_poses[i]` - corresponds to the pose of the ith object in the current environment.
During the unshuffle phase is commonly useful to compare `current_poses[i]` against `walkthrough_start_poses[i]`
to get a sense of the agent's progress towards placing the objects into their correct locations.
"""
# Ensure we are in the unshuffle phase.
if not self.shuffle_called:
raise Exception("shuffle() must be called before accessing poses")
# Get current object information
with include_object_data(self.controller):
obj_name_to_current_obj = self._obj_list_to_obj_name_to_pose_dict(
self.controller.last_event.metadata["objects"]
)
ordered_obj_names = list(self.obj_name_to_walkthrough_start_pose.keys())
current_objs_list = []
for obj_name in ordered_obj_names:
if obj_name not in obj_name_to_current_obj:
# obj_name_to_predicted_obj can have more objects than goal objects
# (breaking objects can generate new ones)
# The other way (more goal poses than predicted objs) is a problem, we will
# assume that the disappeared objects are broken
if not self._have_warned_about_mismatch:
# Don't want to warn many many times during single episode
self._have_warned_about_mismatch = True
usos = set(self.obj_name_to_unshuffle_start_pose.keys())
wsos = set(self.obj_name_to_walkthrough_start_pose.keys())
cos = set(obj_name_to_current_obj.keys())
get_logger().warning(
f"Mismatch between walkthrough start, unshuffle start, and current pose objects."
f"\nscene = {self.scene}, index {self.current_task_spec.metrics.get('index')}"
f"\nusos-wsos, wsos-usos = {usos - wsos}, {wsos - usos}"
f"\ncos-usos, usos-cos = {cos - usos}, {usos - cos}"
f"\ncos-wsos, wsos-cos = {cos - wsos}, {wsos - cos}"
)
obj_name_to_current_obj[obj_name] = {
**self.obj_name_to_walkthrough_start_pose[obj_name],
"isBroken": True,
"broken": True,
"position": None,
"rotation": None,
"openness": None,
}
current_objs_list.append(obj_name_to_current_obj[obj_name])
# We build a cache of object poses corresponding to the start of the walkthrough/unshuffle phases
# as these remain the same until the `reset` function is called.
if self._sorted_and_extracted_walkthrough_start_poses is None:
broken_obj_names = [
obj_name
for obj_name in ordered_obj_names
if self.obj_name_to_walkthrough_start_pose[obj_name]["isBroken"]
]
if len(broken_obj_names) != 0:
if not self.current_task_spec.runtime_sample:
# Don't worry about reporting broken objects when using
# a "runtime_sample" task spec as these types of things are
# more common.
get_logger().warning(
f"BROKEN GOAL OBJECTS!"
f"\nIn scene {self.scene}"
f"\ntask spec {self.current_task_spec}"
f"\nbroken objects {broken_obj_names}"
)
# If we find a broken goal object, we will simply pretend as though it was not
# broken. This means the agent can never succeed in unshuffling, this means it is
# possible that even a perfect agent will not succeed for some tasks.
for broken_obj_name in broken_obj_names:
self.obj_name_to_walkthrough_start_pose[broken_obj_name][
"isBroken"
] = False
self.obj_name_to_unshuffle_start_pose[broken_obj_name][
"isBroken"
] = False
ordered_obj_names = list(self.obj_name_to_walkthrough_start_pose.keys())
walkthrough_start_poses = tuple(
self.obj_name_to_walkthrough_start_pose[k] for k in ordered_obj_names
)
unshuffle_start_poses = tuple(
self.obj_name_to_unshuffle_start_pose[k] for k in ordered_obj_names
)
self._sorted_and_extracted_unshuffle_start_poses = get_pose_info(
unshuffle_start_poses
)
self._sorted_and_extracted_walkthrough_start_poses = get_pose_info(
walkthrough_start_poses
)
return (
self._sorted_and_extracted_unshuffle_start_poses,
self._sorted_and_extracted_walkthrough_start_poses,
get_pose_info(current_objs_list),
)
def _runtime_reset(
self, task_spec: RearrangeTaskSpec, force_axis_aligned_start: bool
):
"""Randomly initialize a scene at runtime.
Rather than using a predefined collection of object states,
randomly generate these positions at runtime. This may be useful for obtaining more
diverse training examples.
# Parameters
task_spec : The RearrangeTaskSpec for this runtime sample. `task_spec.runtime_sample` should be `True`.
force_axis_aligned_start : If `True`, this will force the agent's start rotation to be 'axis aligned', i.e.
to equal to 0, 90, 180, or 270 degrees.
"""
assert (
task_spec.runtime_sample
), "Attempted to use a runtime reset with a task spec which has a `False` `runtime_sample` property."
# For efficiency reasons, we do not completely reset the ai2thor scene (which
# will reset all object states to a default configuration and restore broken
# objects to their unbroken state) on every call to `_runtime_reset` if the scene name hasn't changed. Instead
# we reset the ai2thor scene only every 25 calls.
if (
task_spec.scene != self.scene
or self.current_task_spec.runtime_data["count"] >= 25
):
count = 1
self.controller.reset(task_spec.scene)
if self._enhanced_physics_determinism:
self.controller.step("PausePhysicsAutoSim")
remove_objects_until_all_have_identical_meshes(self.controller)
self.controller.step(
"InitialRandomSpawn", forceVisible=True, placeStationary=True,
)
md = self.controller.step("GetReachablePositions").metadata
assert md["lastActionSuccess"]
reachable_positions = md["actionReturn"]
else:
count = 1 + self.current_task_spec.runtime_data["count"]
reachable_positions = self.current_task_spec.runtime_data[
"reachable_positions"
]
self.current_task_spec = task_spec
self.current_task_spec.stage = "Unknown"
self.current_task_spec.runtime_data = {
"count": count,
"reachable_positions": reachable_positions,
}
with include_object_data(self.controller):
random.shuffle(reachable_positions)
# set agent position
max_teleports = min(10, len(reachable_positions))
for teleport_count, pos in enumerate(reachable_positions):
rot = 30 * random.randint(0, 11)
if force_axis_aligned_start:
rot = round_to_factor(30 * random.randint(0, 11), 90)
md = self.controller.step(
"TeleportFull",
**pos,
rotation={"x": 0, "y": rot, "z": 0},
horizon=0.0,
standing=True,
forceAction=teleport_count == max_teleports - 1,
).metadata
if md["lastActionSuccess"]:
break
else:
raise RuntimeError("No reachable positions?")
assert md["lastActionSuccess"]
self.current_task_spec.agent_position = pos
self.current_task_spec.agent_rotation = rot
self.current_task_spec.runtime_data["starting_objects"] = md["objects"]
def _task_spec_reset(
self, task_spec: RearrangeTaskSpec, force_axis_aligned_start: bool
):
"""Initialize a ai2thor environment from a (non-runtime sample) task
specification (i.e. an exhaustive collection of object poses for the
walkthrough and unshuffle phase).
After this call, the environment will be ready for use in the walkthrough phase.
# Parameters
task_spec : The RearrangeTaskSpec for this task. `task_spec.runtime_sample` should be `False`.
force_axis_aligned_start : If `True`, this will force the agent's start rotation to be 'axis aligned', i.e.
to equal to 0, 90, 180, or 270 degrees.
"""
assert (
not task_spec.runtime_sample
), "`_task_spec_reset` requires that `task_spec.runtime_sample` is `False`."
self.current_task_spec = task_spec
self.controller.reset(self.current_task_spec.scene)
if self._enhanced_physics_determinism:
self.controller.step("PausePhysicsAutoSim")
if force_axis_aligned_start:
self.current_task_spec.agent_rotation = round_to_factor(
self.current_task_spec.agent_rotation, 90
)
# set agent position
pos = self.current_task_spec.agent_position
rot = {"x": 0, "y": self.current_task_spec.agent_rotation, "z": 0}
self.controller.step(
"TeleportFull",
**pos,
rotation=rot,
horizon=0.0,
standing=True,
forceAction=True,
)
# show object metadata
with include_object_data(self.controller):
# open objects
for obj in self.current_task_spec.openable_data:
# id is re-found due to possible floating point errors
current_obj_info = next(
l_obj
for l_obj in self.last_event.metadata["objects"]
if l_obj["name"] == obj["name"]
)
self.controller.step(
action="OpenObject",
objectId=current_obj_info["objectId"],
openness=obj["target_openness"],
forceAction=True,
**self.physics_step_kwargs,
)
# arrange walkthrough poses for pickupable objects
self.controller.step(
"SetObjectPoses",
objectPoses=self.current_task_spec.target_poses,
placeStationary=False,
enablePhysicsJitter=True,
forceRigidbodySleep=True,
skipMoveable=True,
)
assert self.controller.last_event.metadata["lastActionSuccess"]
def reset(
self, task_spec: RearrangeTaskSpec, force_axis_aligned_start: bool = False,
) -> None:
"""Reset the environment with respect to the new task specification.
The environment will start in the walkthrough phase.
# Parameters
task_spec : The `RearrangeTaskSpec` defining environment state.
force_axis_aligned_start : If `True`, this will force the agent's start rotation to be 'axis aligned', i.e.
to equal to 0, 90, 180, or 270 degrees.
"""
if task_spec.runtime_sample:
self._runtime_reset(
task_spec=task_spec, force_axis_aligned_start=force_axis_aligned_start
)
else:
self._task_spec_reset(
task_spec=task_spec, force_axis_aligned_start=force_axis_aligned_start,
)
self.shuffle_called = False
self.obj_name_to_walkthrough_start_pose = self._obj_list_to_obj_name_to_pose_dict(
self.last_event.metadata["objects"]
)
self._have_warned_about_mismatch = False
self._sorted_and_extracted_walkthrough_start_poses = None
self._sorted_and_extracted_unshuffle_start_poses = None
self._agent_signals_done = False
def _runtime_shuffle(self):
"""Randomly shuffle objects in the environment to start the unshuffle
phase.
Also resets the agent's position to its start position.
"""
assert (not self.shuffle_called) and self.current_task_spec.runtime_sample
task_spec = self.current_task_spec
# set agent position
pos = task_spec.agent_position
rot = {"x": 0, "y": task_spec.agent_rotation, "z": 0}
self.controller.step(
"TeleportFull",
**pos,
rotation=rot,
horizon=0.0,
standing=True,
forceAction=True,
)
# Randomly shuffle a subset of objects.
nobjects_to_move = random.randint(1, 5)
pickupable = [
o for o in task_spec.runtime_data["starting_objects"] if o["pickupable"]
]
random.shuffle(pickupable)
pickupable.sort(
key=lambda x: 1 * (x["objectType"] in OBJECT_TYPES_TO_NOT_MOVE),
reverse=True,
)
objects_to_not_move = pickupable[:-nobjects_to_move]
object_ids_not_to_move = [o["objectId"] for o in objects_to_not_move]
object_ids_not_to_move.extend(
get_object_ids_to_not_move_from_object_types(
controller=self.controller, object_types=OBJECT_TYPES_TO_NOT_MOVE,
)
)
self.controller.step(
"InitialRandomSpawn",
excludedObjectIds=object_ids_not_to_move,
forceVisible=True,
placeStationary=True,
)
# Randomly open some subset of objects.
num_objects_to_open = random.randint(0, 1)
openable_objects = [
o
for o in self.last_event.metadata["objects"]
if o["openable"] and not o["pickupable"]
]
random.shuffle(openable_objects)
open_objs(
objects_to_open=openable_objects[:num_objects_to_open],
controller=self.controller,
)
self.current_task_spec.runtime_data[
"target_objects"
] = self.last_event.metadata["objects"]
def _task_spec_shuffle(self, reset: bool = False):
"""Shuffle objects in the environment to start the unshuffle phase
using the current task specification.
Also resets the agent's position to its start position.
"""
assert not (self.current_task_spec.runtime_sample or self.shuffle_called)
task_spec = self.current_task_spec
# TODO: No need to reset every time right?
if reset:
self.controller.reset(self.scene)
if self._enhanced_physics_determinism:
self.controller.step("PausePhysicsAutoSim")
# set agent position
pos = task_spec.agent_position
rot = {"x": 0, "y": task_spec.agent_rotation, "z": 0}
self.controller.step(
"TeleportFull",
**pos,
rotation=rot,
horizon=0.0,
standing=True,
forceAction=True,
)
# open objects
with include_object_data(self.controller):
for obj in task_spec.openable_data:
# id is re-found due to possible floating point errors
current_obj_info = next(
l_obj
for l_obj in self.last_event.metadata["objects"]
if l_obj["name"] == obj["name"]
)
self.controller.step(
action="OpenObject",
objectId=current_obj_info["objectId"],
openness=obj["start_openness"],
forceAction=True,
**(
self.physics_step_kwargs
if obj is task_spec.openable_data[-1]
else {}
),
)
# arrange unshuffle start poses for pickupable objects
self.controller.step(
"SetObjectPoses",
objectPoses=task_spec.starting_poses,
placeStationary=False,
enablePhysicsJitter=True,
forceRigidbodySleep=True,
skipMoveable=True,
)
assert self.controller.last_event.metadata["lastActionSuccess"]
def shuffle(self, require_reset: bool = False):
"""Shuffle objects in the environment to start the unshuffle phase."""
assert not self.shuffle_called
runtime_sample = self.current_task_spec.runtime_sample
if runtime_sample:
self._runtime_shuffle()
else:
self._task_spec_shuffle(reset=require_reset)
# Save object metadata
with include_object_data(self.controller):
self.obj_name_to_unshuffle_start_pose = self._obj_list_to_obj_name_to_pose_dict(
self.last_event.metadata["objects"]
)
if len(self.obj_name_to_unshuffle_start_pose) != len(
self.obj_name_to_walkthrough_start_pose
):
if runtime_sample or require_reset:
walkthrough_start_obj_names = set(
self.obj_name_to_walkthrough_start_pose.keys()
)
unshuffle_start_obj_names = set(
self.obj_name_to_unshuffle_start_pose.keys()
)
raise PoseMismatchError(
"Irrecoverable difference between walkthrough and unshuffle phase objects."
f"\ng-i, i-g = {walkthrough_start_obj_names - unshuffle_start_obj_names},"
f" {unshuffle_start_obj_names - walkthrough_start_obj_names}"
)
else:
self.shuffle(require_reset=True)
self.shuffle_called = True
self._agent_signals_done = False
@staticmethod
def _obj_list_to_obj_name_to_pose_dict(
objects: List[Dict[str, Any]]
) -> OrderedDict:
"""Helper function to transform a list of object data dicts into a
dictionary."""
objects = [
o
for o in objects
if o["openable"] or o.get("objectOrientedBoundingBox") is not None
]
d = OrderedDict(
(o["name"], o) for o in sorted(objects, key=lambda x: x["name"])
)
assert len(d) == len(objects)
return d
def stop(self):
"""Terminate the current AI2-THOR session."""
try:
self.controller.stop()
except Exception as _:
pass
def __del__(self):
self.stop()
| ai2thor-rearrangement-main | rearrange/environment.py |
from typing import cast, Dict, Any
import torch
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
class MaskedPPO(AbstractActorCriticLoss):
"""Compute the PPO loss where specified by a mask.
# Attributes
mask_uuid : A string specifying the sensor UUID to use for masking. The PPO loss will only
be computed for those steps where this mask equals 1.
"""
def __init__(
self, mask_uuid: str, ppo_params: Dict[str, Any],
):
"""Initializer.
# Parameters
mask_uuid : A string specifying the sensor UUID to use for masking. The PPO loss will only
be computed for those steps where this mask equals 1.
ppo_params : A dictionary containing keyword arguments for the ppo loss. See the `PPO` class
for what arguments are available.
"""
super().__init__()
self.mask_uuid = mask_uuid
self._ppo_loss = PPO(**ppo_params)
def loss( # type: ignore
self,
step_count: int,
batch: ObservationType,
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs
):
mask = batch["observations"][self.mask_uuid].float()
denominator = mask.sum().item()
losses_per_step, _ = self._ppo_loss.loss_per_step(
step_count=step_count, batch=batch, actor_critic_output=actor_critic_output,
)
losses_per_step["entropy"] = (
losses_per_step["entropy"][0].unsqueeze(-1),
losses_per_step["entropy"][1],
)
losses = {
key: ((loss * mask).sum() / max(denominator, 1), weight)
for (key, (loss, weight)) in losses_per_step.items()
}
total_loss = sum(
loss * weight if weight is not None else loss
for loss, weight in losses.values()
)
if denominator == 0:
losses_to_record = {}
else:
losses_to_record = {
"ppo_total": cast(torch.Tensor, total_loss).item(),
**{key: loss.item() for key, (loss, _) in losses.items()},
}
return (
total_loss,
losses_to_record,
)
| ai2thor-rearrangement-main | rearrange/losses.py |
import copy
import platform
from abc import abstractmethod
from typing import Optional, List, Sequence, Dict, Any, Tuple
import ai2thor.platform
import gym.spaces
import stringcase
import torch
import torchvision.models
from torch import nn, cuda, optim
from torch.optim.lr_scheduler import LambdaLR
import datagen.datagen_utils as datagen_utils
from allenact.base_abstractions.experiment_config import (
ExperimentConfig,
MachineParams,
split_processes_onto_devices,
)
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from allenact.base_abstractions.sensor import SensorSuite, Sensor, ExpertActionSensor
from allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor
from allenact.utils.experiment_utils import TrainingPipeline, LinearDecay, Builder
from allenact.utils.misc_utils import partition_sequence, md5_hash_str_as_int
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_sensors import (
BinnedPointCloudMapTHORSensor,
SemanticMapTHORSensor,
)
from allenact_plugins.ithor_plugin.ithor_util import get_open_x_displays
from rearrange.baseline_models import (
RearrangeActorCriticSimpleConvRNN,
ResNetRearrangeActorCriticRNN,
)
from rearrange.constants import (
OBJECT_TYPES_WITH_PROPERTIES,
THOR_COMMIT_ID,
)
from rearrange.environment import RearrangeMode
class RearrangeBaseExperimentConfig(ExperimentConfig):
# Task parameters
MAX_STEPS = {"walkthrough": 250, "unshuffle": 500}
REQUIRE_DONE_ACTION = True
FORCE_AXIS_ALIGNED_START = True
RANDOMIZE_START_ROTATION_DURING_TRAINING = False
# Environment parameters
REARRANGE_ENV_KWARGS = dict(mode=RearrangeMode.SNAP,)
SCREEN_SIZE = 224
THOR_CONTROLLER_KWARGS = {
"rotateStepDegrees": 90,
"snapToGrid": True,
"quality": "Very Low",
"width": SCREEN_SIZE,
"height": SCREEN_SIZE,
"commit_id": THOR_COMMIT_ID,
"fastActionEmit": True,
}
INCLUDE_OTHER_MOVE_ACTIONS = True
# Training parameters
TRAINING_STEPS = int(75e6)
SAVE_INTERVAL = int(1e6)
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING: Optional[Tuple[str, str]] = None
# Sensor info
SENSORS: Optional[Sequence[Sensor]] = None
EGOCENTRIC_RGB_UUID = "rgb"
UNSHUFFLED_RGB_UUID = "unshuffled_rgb"
EGOCENTRIC_RGB_RESNET_UUID = "rgb_resnet"
UNSHUFFLED_RGB_RESNET_UUID = "unshuffled_rgb_resnet"
# Actions
PICKUP_ACTIONS = list(
sorted(
[
f"pickup_{stringcase.snakecase(object_type)}"
for object_type, properties in OBJECT_TYPES_WITH_PROPERTIES.items()
if properties["pickupable"]
]
)
)
OPEN_ACTIONS = list(
sorted(
[
f"open_by_type_{stringcase.snakecase(object_type)}"
for object_type, properties in OBJECT_TYPES_WITH_PROPERTIES.items()
if properties["openable"] and not properties["pickupable"]
]
)
)
@classmethod
def sensors(cls) -> Sequence[Sensor]:
return cls.SENSORS
@classmethod
def actions(cls):
other_move_actions = (
tuple()
if not cls.INCLUDE_OTHER_MOVE_ACTIONS
else ("move_left", "move_right", "move_back",)
)
return (
("done", "move_ahead",)
+ other_move_actions
+ (
"rotate_right",
"rotate_left",
"stand",
"crouch",
"look_up",
"look_down",
"drop_held_object_with_snap",
*cls.OPEN_ACTIONS,
*cls.PICKUP_ACTIONS,
)
)
@classmethod
def resnet_preprocessor_graph(cls, mode: str) -> SensorPreprocessorGraph:
def create_resnet_builder(in_uuid: str, out_uuid: str):
cnn_type, pretraining_type = cls.CNN_PREPROCESSOR_TYPE_AND_PRETRAINING
if pretraining_type == "imagenet":
assert cnn_type in [
"RN18",
"RN50",
], "Only allow using RN18/RN50 with `imagenet` pretrained weights."
return ResNetPreprocessor(
input_height=cls.THOR_CONTROLLER_KWARGS["height"],
input_width=cls.THOR_CONTROLLER_KWARGS["width"],
output_width=7,
output_height=7,
output_dims=512 if "18" in cnn_type else 2048,
pool=False,
torchvision_resnet_model=getattr(
torchvision.models, f"resnet{cnn_type.replace('RN', '')}"
),
input_uuids=[in_uuid],
output_uuid=out_uuid,
)
elif pretraining_type == "clip":
from allenact_plugins.clip_plugin.clip_preprocessors import (
ClipResNetPreprocessor,
)
import clip
# Let's make sure we download the clip model now
# so we don't download it on every spawned process
clip.load(cnn_type, "cpu")
return ClipResNetPreprocessor(
rgb_input_uuid=in_uuid,
clip_model_type=cnn_type,
pool=False,
output_uuid=out_uuid,
)
else:
raise NotImplementedError
img_uuids = [cls.EGOCENTRIC_RGB_UUID, cls.UNSHUFFLED_RGB_UUID]
return SensorPreprocessorGraph(
source_observation_spaces=SensorSuite(
[
sensor
for sensor in cls.sensors()
if (mode == "train" or not isinstance(sensor, ExpertActionSensor))
]
).observation_spaces,
preprocessors=[
create_resnet_builder(sid, f"{sid}_resnet") for sid in img_uuids
],
)
@classmethod
def get_lr_scheduler_builder(cls, use_lr_decay: bool):
return (
None
if not use_lr_decay
else Builder(
LambdaLR,
{
"lr_lambda": LinearDecay(
steps=cls.TRAINING_STEPS // 3, startp=1.0, endp=1.0 / 3
)
},
)
)
@classmethod
def machine_params(cls, mode="train", **kwargs) -> MachineParams:
"""Return the number of processes and gpu_ids to use with training."""
num_gpus = cuda.device_count()
has_gpu = num_gpus != 0
sampler_devices = None
if mode == "train":
nprocesses = cls.num_train_processes() if torch.cuda.is_available() else 1
devices = (
list(range(min(nprocesses, num_gpus)))
if has_gpu
else [torch.device("cpu")]
)
elif mode == "valid":
devices = [num_gpus - 1] if has_gpu else [torch.device("cpu")]
nprocesses = 2 if has_gpu else 0
else:
nprocesses = 20 if has_gpu else 1
devices = (
list(range(min(nprocesses, num_gpus)))
if has_gpu
else [torch.device("cpu")]
)
nprocesses = split_processes_onto_devices(
nprocesses=nprocesses, ndevices=len(devices)
)
return MachineParams(
nprocesses=nprocesses,
devices=devices,
sampler_devices=sampler_devices,
sensor_preprocessor_graph=cls.resnet_preprocessor_graph(mode=mode)
if cls.CNN_PREPROCESSOR_TYPE_AND_PRETRAINING is not None
else None,
)
@classmethod
def stagewise_task_sampler_args(
cls,
stage: str,
process_ind: int,
total_processes: int,
allowed_rearrange_inds_subset: Optional[Sequence[int]] = None,
allowed_scenes: Sequence[str] = None,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
force_x_display: Optional[str] = None,
):
if allowed_scenes is not None:
scenes = allowed_scenes
elif stage == "combined":
# Split scenes more evenly as the train scenes will have more episodes
train_scenes = datagen_utils.get_scenes("train")
other_scenes = datagen_utils.get_scenes("val") + datagen_utils.get_scenes(
"test"
)
assert len(train_scenes) == 2 * len(other_scenes)
scenes = []
while len(train_scenes) != 0:
scenes.append(train_scenes.pop())
scenes.append(train_scenes.pop())
scenes.append(other_scenes.pop())
assert len(train_scenes) == len(other_scenes)
else:
scenes = datagen_utils.get_scenes(stage)
if total_processes > len(scenes):
assert stage == "train" and total_processes % len(scenes) == 0
scenes = scenes * (total_processes // len(scenes))
allowed_scenes = list(
sorted(partition_sequence(seq=scenes, parts=total_processes,)[process_ind])
)
scene_to_allowed_rearrange_inds = None
if allowed_rearrange_inds_subset is not None:
allowed_rearrange_inds_subset = tuple(allowed_rearrange_inds_subset)
assert stage in ["valid", "train_unseen"]
scene_to_allowed_rearrange_inds = {
scene: allowed_rearrange_inds_subset for scene in allowed_scenes
}
seed = md5_hash_str_as_int(str(allowed_scenes))
device = (
devices[process_ind % len(devices)]
if devices is not None and len(devices) > 0
else torch.device("cpu")
)
x_display: Optional[str] = None
gpu_device: Optional[int] = None
thor_platform: Optional[ai2thor.platform.BaseLinuxPlatform] = None
if force_x_display is not None:
x_display = force_x_display
elif platform.system() == "Linux":
try:
x_displays = get_open_x_displays(throw_error_if_empty=True)
if devices is not None and len(
[d for d in devices if d != torch.device("cpu")]
) > len(x_displays):
get_logger().warning(
f"More GPU devices found than X-displays (devices: `{x_displays}`, x_displays: `{x_displays}`)."
f" This is not necessarily a bad thing but may mean that you're not using GPU memory as"
f" efficiently as possible. Consider following the instructions here:"
f" https://allenact.org/installation/installation-framework/#installation-of-ithor-ithor-plugin"
f" describing how to start an X-display on every GPU."
)
x_display = x_displays[process_ind % len(x_displays)]
except IOError:
# Could not find an open `x_display`, use CloudRendering instead.
assert all(
[d != torch.device("cpu") and d >= 0 for d in devices]
), "Cannot use CPU devices when there are no open x-displays as CloudRendering requires specifying a GPU."
gpu_device = device
thor_platform = ai2thor.platform.CloudRendering
kwargs = {
"stage": stage,
"allowed_scenes": allowed_scenes,
"scene_to_allowed_rearrange_inds": scene_to_allowed_rearrange_inds,
"seed": seed,
"x_display": x_display,
"thor_controller_kwargs": {
"gpu_device": gpu_device,
"platform": thor_platform,
},
}
sensors = kwargs.get("sensors", copy.deepcopy(cls.sensors()))
kwargs["sensors"] = sensors
sem_sensor = next(
(s for s in kwargs["sensors"] if isinstance(s, SemanticMapTHORSensor)), None
)
binned_pc_sensor = next(
(
s
for s in kwargs["sensors"]
if isinstance(s, BinnedPointCloudMapTHORSensor)
),
None,
)
if sem_sensor is not None:
sem_sensor.device = torch.device(device)
if binned_pc_sensor is not None:
binned_pc_sensor.device = torch.device(device)
if stage != "train":
# Don't include several sensors during validation/testing
kwargs["sensors"] = [
s
for s in kwargs["sensors"]
if not isinstance(
s,
(
ExpertActionSensor,
SemanticMapTHORSensor,
BinnedPointCloudMapTHORSensor,
),
)
]
return kwargs
@classmethod
def train_task_sampler_args(
cls,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
):
return dict(
force_cache_reset=False,
epochs=float("inf"),
**cls.stagewise_task_sampler_args(
stage="train",
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
),
)
@classmethod
def valid_task_sampler_args(
cls,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
):
return dict(
force_cache_reset=True,
epochs=1,
**cls.stagewise_task_sampler_args(
stage="valid",
allowed_rearrange_inds_subset=tuple(range(0, 50, 5)),
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
),
)
@classmethod
def test_task_sampler_args(
cls,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
task_spec_in_metrics: bool = False,
):
task_spec_in_metrics = False
# Train_unseen
# stage = "train_unseen"
# allowed_rearrange_inds_subset = list(range(15))
# Val
# stage = "val"
# allowed_rearrange_inds_subset = None
# Test
# stage = "test"
# allowed_rearrange_inds_subset = None
# Combined (Will run inference on all datasets)
stage = "combined"
allowed_rearrange_inds_subset = None
return dict(
force_cache_reset=True,
epochs=1,
task_spec_in_metrics=task_spec_in_metrics,
**cls.stagewise_task_sampler_args(
stage=stage,
allowed_rearrange_inds_subset=allowed_rearrange_inds_subset,
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
),
)
@classmethod
@abstractmethod
def _training_pipeline_info(cls) -> Dict[str, Any]:
raise NotImplementedError
@classmethod
@abstractmethod
def num_train_processes(cls) -> int:
raise NotImplementedError
@classmethod
def training_pipeline(cls, **kwargs) -> TrainingPipeline:
info = cls._training_pipeline_info()
return TrainingPipeline(
gamma=info.get("gamma", 0.99),
use_gae=info.get("use_gae", True),
gae_lambda=info.get("gae_lambda", 0.95),
num_steps=info["num_steps"],
num_mini_batch=info["num_mini_batch"],
update_repeats=info["update_repeats"],
max_grad_norm=info.get("max_grad_norm", 0.5),
save_interval=cls.SAVE_INTERVAL,
named_losses=info["named_losses"],
metric_accumulate_interval=cls.num_train_processes()
* max(*cls.MAX_STEPS.values())
if torch.cuda.is_available()
else 1,
optimizer_builder=Builder(optim.Adam, dict(lr=info["lr"])),
advance_scene_rollout_period=None,
pipeline_stages=info["pipeline_stages"],
lr_scheduler_builder=cls.get_lr_scheduler_builder(
use_lr_decay=info["use_lr_decay"]
),
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
if cls.CNN_PREPROCESSOR_TYPE_AND_PRETRAINING is None:
return RearrangeActorCriticSimpleConvRNN(
action_space=gym.spaces.Discrete(len(cls.actions())),
observation_space=SensorSuite(cls.sensors()).observation_spaces,
rgb_uuid=cls.EGOCENTRIC_RGB_UUID,
unshuffled_rgb_uuid=cls.UNSHUFFLED_RGB_UUID,
)
else:
return ResNetRearrangeActorCriticRNN(
action_space=gym.spaces.Discrete(len(cls.actions())),
observation_space=kwargs[
"sensor_preprocessor_graph"
].observation_spaces,
rgb_uuid=cls.EGOCENTRIC_RGB_RESNET_UUID,
unshuffled_rgb_uuid=cls.UNSHUFFLED_RGB_RESNET_UUID,
)
| ai2thor-rearrangement-main | baseline_configs/rearrange_base.py |
ai2thor-rearrangement-main | baseline_configs/__init__.py |
|
import os
from typing import Type, Optional
import gym
import torch
from torch import nn
from allenact.base_abstractions.sensor import SensorSuite, Sensor, ExpertActionSensor
from allenact.embodiedai.mapping.mapping_models.active_neural_slam import (
ActiveNeuralSLAM,
)
from allenact.utils.misc_utils import multiprocessing_safe_download_file_from_url
from allenact_plugins.ithor_plugin.ithor_sensors import (
RelativePositionChangeTHORSensor,
ReachableBoundsTHORSensor,
)
from baseline_configs.rearrange_base import RearrangeBaseExperimentConfig
from baseline_configs.two_phase.two_phase_rgb_resnet_ppowalkthrough_ilunshuffle import (
TwoPhaseRGBResNetPPOWalkthroughILUnshuffleExperimentConfig,
)
from rearrange.baseline_models import TwoPhaseRearrangeActorCriticFrozenMap
from rearrange.constants import (
PICKUPABLE_OBJECTS,
OPENABLE_OBJECTS,
)
from rearrange.sensors import (
InWalkthroughPhaseSensor,
RGBRearrangeSensor,
ClosestUnshuffledRGBRearrangeSensor,
)
from rearrange_constants import ABS_PATH_OF_REARRANGE_TOP_LEVEL_DIR
class TwoPhaseRGBResNetFrozenMapPPOWalkthroughILUnshuffleExperimentConfig(
TwoPhaseRGBResNetPPOWalkthroughILUnshuffleExperimentConfig
):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = (
None # Not necessary as we're handling things in the model
)
IL_PIPELINE_TYPE: str = "40proc-longtf"
ORDERED_OBJECT_TYPES = list(sorted(PICKUPABLE_OBJECTS + OPENABLE_OBJECTS))
MAP_RANGE_SENSOR = ReachableBoundsTHORSensor(margin=1.0)
MAP_INFO = dict(
map_range_sensor=MAP_RANGE_SENSOR,
vision_range_in_cm=40 * 5,
map_size_in_cm=1050
if isinstance(MAP_RANGE_SENSOR, ReachableBoundsTHORSensor)
else 2200,
resolution_in_cm=5,
)
SENSORS = [
ExpertActionSensor(len(RearrangeBaseExperimentConfig.actions())),
RGBRearrangeSensor(
height=RearrangeBaseExperimentConfig.SCREEN_SIZE,
width=RearrangeBaseExperimentConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid=RearrangeBaseExperimentConfig.EGOCENTRIC_RGB_UUID,
),
ClosestUnshuffledRGBRearrangeSensor(
height=RearrangeBaseExperimentConfig.SCREEN_SIZE,
width=RearrangeBaseExperimentConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid=RearrangeBaseExperimentConfig.UNSHUFFLED_RGB_UUID,
),
InWalkthroughPhaseSensor(),
RelativePositionChangeTHORSensor(),
MAP_RANGE_SENSOR,
]
@classmethod
def tag(cls) -> str:
return f"TwoPhaseRGBResNetFrozenMapPPOWalkthroughILUnshuffle_{cls.IL_PIPELINE_TYPE}"
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def get_sensor_uuid(stype: Type[Sensor]) -> Optional[str]:
s = next((s for s in cls.SENSORS if isinstance(s, stype)), None,)
return None if s is None else s.uuid
walkthrougher_should_ignore_action_mask = [
any(k in a for k in ["drop", "open", "pickup"]) for a in cls.actions()
]
map_kwargs = dict(
frame_height=224,
frame_width=224,
vision_range_in_cm=cls.MAP_INFO["vision_range_in_cm"],
resolution_in_cm=cls.MAP_INFO["resolution_in_cm"],
map_size_in_cm=cls.MAP_INFO["map_size_in_cm"],
)
observation_space = (
SensorSuite(cls.SENSORS).observation_spaces
if kwargs.get("sensor_preprocessor_graph") is None
else kwargs["sensor_preprocessor_graph"].observation_spaces
)
semantic_map_channels = len(cls.ORDERED_OBJECT_TYPES)
height_map_channels = 3
map_kwargs["n_map_channels"] = height_map_channels + semantic_map_channels
frozen_map = ActiveNeuralSLAM(**map_kwargs, use_resnet_layernorm=True)
pretrained_map_ckpt_path = os.path.join(
ABS_PATH_OF_REARRANGE_TOP_LEVEL_DIR,
"pretrained_model_ckpts",
"pretrained_active_neural_slam_via_walkthrough_75m.pt",
)
multiprocessing_safe_download_file_from_url(
url="https://prior-model-weights.s3.us-east-2.amazonaws.com/embodied-ai/rearrangement/walkthrough/pretrained_active_neural_slam_via_walkthrough_75m.pt",
save_path=pretrained_map_ckpt_path,
)
frozen_map.load_state_dict(
torch.load(pretrained_map_ckpt_path, map_location="cpu",)
)
return TwoPhaseRearrangeActorCriticFrozenMap(
map=frozen_map,
semantic_map_channels=semantic_map_channels,
height_map_channels=height_map_channels,
action_space=gym.spaces.Discrete(len(cls.actions())),
observation_space=observation_space,
rgb_uuid=cls.EGOCENTRIC_RGB_UUID,
in_walkthrough_phase_uuid=get_sensor_uuid(InWalkthroughPhaseSensor),
is_walkthrough_phase_embedding_dim=cls.IS_WALKTHROUGH_PHASE_EMBEDING_DIM,
rnn_type=cls.RNN_TYPE,
walkthrougher_should_ignore_action_mask=walkthrougher_should_ignore_action_mask,
done_action_index=cls.actions().index("done"),
)
| ai2thor-rearrangement-main | baseline_configs/two_phase/two_phase_rgb_resnet_frozen_map_ppowalkthrough_ilunshuffle.py |
ai2thor-rearrangement-main | baseline_configs/two_phase/__init__.py |
|
from abc import ABC
from typing import Optional, Sequence, Dict, Type, Union
import gym
import gym.spaces
from torch import nn
from allenact.base_abstractions.sensor import SensorSuite, Sensor
try:
from allenact.embodiedai.sensors.vision_sensors import DepthSensor
except ImportError:
raise ImportError("Please update to allenact>=0.4.0.")
from baseline_configs.rearrange_base import RearrangeBaseExperimentConfig
from rearrange.baseline_models import (
TwoPhaseRearrangeActorCriticSimpleConvRNN,
ResNetTwoPhaseRearrangeActorCriticRNN,
)
from rearrange.sensors import ClosestUnshuffledRGBRearrangeSensor
from rearrange.sensors import (
RGBRearrangeSensor,
InWalkthroughPhaseSensor,
)
from rearrange.tasks import RearrangeTaskSampler
class TwoPhaseRGBBaseExperimentConfig(RearrangeBaseExperimentConfig, ABC):
SENSORS = [
RGBRearrangeSensor(
height=RearrangeBaseExperimentConfig.SCREEN_SIZE,
width=RearrangeBaseExperimentConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid=RearrangeBaseExperimentConfig.EGOCENTRIC_RGB_UUID,
),
ClosestUnshuffledRGBRearrangeSensor(
height=RearrangeBaseExperimentConfig.SCREEN_SIZE,
width=RearrangeBaseExperimentConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid=RearrangeBaseExperimentConfig.UNSHUFFLED_RGB_UUID,
),
InWalkthroughPhaseSensor(),
]
TRAIN_UNSHUFFLE_RUNS_PER_WALKTHROUGH: int = 1
IS_WALKTHROUGH_PHASE_EMBEDING_DIM: int = 32
RNN_TYPE: str = "LSTM"
@classmethod
def make_sampler_fn(
cls,
stage: str,
force_cache_reset: bool,
allowed_scenes: Optional[Sequence[str]],
seed: int,
epochs: Union[str, float, int],
scene_to_allowed_rearrange_inds: Optional[Dict[str, Sequence[int]]] = None,
x_display: Optional[str] = None,
sensors: Optional[Sequence[Sensor]] = None,
only_one_unshuffle_per_walkthrough: bool = False,
thor_controller_kwargs: Optional[Dict] = None,
**kwargs,
) -> RearrangeTaskSampler:
"""Return a RearrangeTaskSampler."""
sensors = cls.SENSORS if sensors is None else sensors
if "mp_ctx" in kwargs:
del kwargs["mp_ctx"]
assert not cls.RANDOMIZE_START_ROTATION_DURING_TRAINING
return RearrangeTaskSampler.from_fixed_dataset(
run_walkthrough_phase=True,
run_unshuffle_phase=True,
stage=stage,
allowed_scenes=allowed_scenes,
scene_to_allowed_rearrange_inds=scene_to_allowed_rearrange_inds,
rearrange_env_kwargs=dict(
force_cache_reset=force_cache_reset,
**cls.REARRANGE_ENV_KWARGS,
controller_kwargs={
"x_display": x_display,
**cls.THOR_CONTROLLER_KWARGS,
**(
{} if thor_controller_kwargs is None else thor_controller_kwargs
),
"renderDepthImage": any(
isinstance(s, DepthSensor) for s in sensors
),
},
),
seed=seed,
sensors=SensorSuite(sensors),
max_steps=cls.MAX_STEPS,
discrete_actions=cls.actions(),
require_done_action=cls.REQUIRE_DONE_ACTION,
force_axis_aligned_start=cls.FORCE_AXIS_ALIGNED_START,
unshuffle_runs_per_walkthrough=cls.TRAIN_UNSHUFFLE_RUNS_PER_WALKTHROUGH
if (not only_one_unshuffle_per_walkthrough) and stage == "train"
else None,
epochs=epochs,
**kwargs,
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def get_sensor_uuid(stype: Type[Sensor]) -> Optional[str]:
s = next((s for s in cls.SENSORS if isinstance(s, stype)), None,)
return None if s is None else s.uuid
walkthrougher_should_ignore_action_mask = [
any(k in a for k in ["drop", "open", "pickup"]) for a in cls.actions()
]
if cls.CNN_PREPROCESSOR_TYPE_AND_PRETRAINING is None:
return TwoPhaseRearrangeActorCriticSimpleConvRNN(
action_space=gym.spaces.Discrete(len(cls.actions())),
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
rgb_uuid=cls.EGOCENTRIC_RGB_UUID,
unshuffled_rgb_uuid=cls.UNSHUFFLED_RGB_UUID,
in_walkthrough_phase_uuid=get_sensor_uuid(InWalkthroughPhaseSensor),
is_walkthrough_phase_embedding_dim=cls.IS_WALKTHROUGH_PHASE_EMBEDING_DIM,
rnn_type=cls.RNN_TYPE,
walkthrougher_should_ignore_action_mask=walkthrougher_should_ignore_action_mask,
done_action_index=cls.actions().index("done"),
)
else:
return ResNetTwoPhaseRearrangeActorCriticRNN(
action_space=gym.spaces.Discrete(len(cls.actions())),
observation_space=kwargs[
"sensor_preprocessor_graph"
].observation_spaces,
rgb_uuid=cls.EGOCENTRIC_RGB_RESNET_UUID,
unshuffled_rgb_uuid=cls.UNSHUFFLED_RGB_RESNET_UUID,
in_walkthrough_phase_uuid=get_sensor_uuid(InWalkthroughPhaseSensor),
is_walkthrough_phase_embedding_dim=cls.IS_WALKTHROUGH_PHASE_EMBEDING_DIM,
rnn_type=cls.RNN_TYPE,
walkthrougher_should_ignore_action_mask=walkthrougher_should_ignore_action_mask,
done_action_index=cls.actions().index("done"),
)
| ai2thor-rearrangement-main | baseline_configs/two_phase/two_phase_rgb_base.py |
from baseline_configs.two_phase.two_phase_rgb_ppowalkthrough_ilunshuffle import (
TwoPhaseRGBPPOWalkthroughILUnshuffleExperimentConfig,
)
class TwoPhaseRGBResNetPPOWalkthroughILUnshuffleExperimentConfig(
TwoPhaseRGBPPOWalkthroughILUnshuffleExperimentConfig
):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = ("RN18", "imagenet")
IL_PIPELINE_TYPE: str = "40proc-longtf"
@classmethod
def tag(cls) -> str:
return f"TwoPhaseRGBResNetPPOWalkthroughILUnshuffle_{cls.IL_PIPELINE_TYPE}"
| ai2thor-rearrangement-main | baseline_configs/two_phase/two_phase_rgb_resnet_ppowalkthrough_ilunshuffle.py |
from typing import Dict, Any
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.base_abstractions.sensor import ExpertActionSensor
from allenact.utils.experiment_utils import LinearDecay, PipelineStage
from baseline_configs.one_phase.one_phase_rgb_il_base import (
il_training_params,
StepwiseLinearDecay,
)
from baseline_configs.rearrange_base import RearrangeBaseExperimentConfig
from baseline_configs.two_phase.two_phase_rgb_base import (
TwoPhaseRGBBaseExperimentConfig,
)
from rearrange.losses import MaskedPPO
class TwoPhaseRGBPPOWalkthroughILUnshuffleExperimentConfig(
TwoPhaseRGBBaseExperimentConfig
):
SENSORS = [
*TwoPhaseRGBBaseExperimentConfig.SENSORS,
ExpertActionSensor(len(RearrangeBaseExperimentConfig.actions())),
]
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = None
IL_PIPELINE_TYPE: str = "40proc-longtf"
@classmethod
def tag(cls) -> str:
return f"TwoPhaseRGBPPOWalkthroughILUnshuffle_{cls.IL_PIPELINE_TYPE}"
@classmethod
def num_train_processes(cls) -> int:
return cls._use_label_to_get_training_params()["num_train_processes"]
@classmethod
def _training_pipeline_info(cls) -> Dict[str, Any]:
"""Define how the model trains."""
training_steps = cls.TRAINING_STEPS
il_params = cls._use_label_to_get_training_params()
bc_tf1_steps = il_params["bc_tf1_steps"]
dagger_steps = il_params["dagger_steps"]
return dict(
named_losses=dict(
walkthrough_ppo_loss=MaskedPPO(
mask_uuid="in_walkthrough_phase",
ppo_params=dict(
clip_decay=LinearDecay(training_steps), **PPOConfig
),
),
imitation_loss=Imitation(),
),
pipeline_stages=[
PipelineStage(
loss_names=["walkthrough_ppo_loss", "imitation_loss"],
max_stage_steps=training_steps,
teacher_forcing=StepwiseLinearDecay(
cumm_steps_and_values=[
(bc_tf1_steps, 1.0),
(bc_tf1_steps + dagger_steps, 0.0),
]
),
)
],
**il_params,
)
@classmethod
def _use_label_to_get_training_params(cls):
return il_training_params(
label=cls.IL_PIPELINE_TYPE.lower(), training_steps=cls.TRAINING_STEPS
)
| ai2thor-rearrangement-main | baseline_configs/two_phase/two_phase_rgb_ppowalkthrough_ilunshuffle.py |
from typing import Dict, Any, cast
import gym
import torch
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.base_abstractions.sensor import SensorSuite
from allenact.embodiedai.mapping.mapping_losses import (
BinnedPointCloudMapLoss,
SemanticMapFocalLoss,
)
from allenact.utils.experiment_utils import LinearDecay, PipelineStage
from allenact_plugins.ithor_plugin.ithor_sensors import (
RelativePositionChangeTHORSensor,
ReachableBoundsTHORSensor,
BinnedPointCloudMapTHORSensor,
SemanticMapTHORSensor,
)
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from baseline_configs.walkthrough.walkthrough_rgb_base import (
WalkthroughBaseExperimentConfig,
)
from rearrange.baseline_models import WalkthroughActorCriticResNetWithPassiveMap
from rearrange.constants import (
FOV,
PICKUPABLE_OBJECTS,
OPENABLE_OBJECTS,
)
class WalkthroughRGBMappingPPOExperimentConfig(WalkthroughBaseExperimentConfig):
ORDERED_OBJECT_TYPES = list(sorted(PICKUPABLE_OBJECTS + OPENABLE_OBJECTS))
MAP_RANGE_SENSOR = ReachableBoundsTHORSensor(margin=1.0)
MAP_INFO = dict(
map_range_sensor=MAP_RANGE_SENSOR,
vision_range_in_cm=40 * 5,
map_size_in_cm=1050
if isinstance(MAP_RANGE_SENSOR, ReachableBoundsTHORSensor)
else 2200,
resolution_in_cm=5,
)
SENSORS = WalkthroughBaseExperimentConfig.SENSORS + [
RelativePositionChangeTHORSensor(),
MAP_RANGE_SENSOR,
DepthSensorThor(
height=WalkthroughBaseExperimentConfig.SCREEN_SIZE,
width=WalkthroughBaseExperimentConfig.SCREEN_SIZE,
use_normalization=False,
uuid="depth",
),
BinnedPointCloudMapTHORSensor(fov=FOV, **MAP_INFO),
SemanticMapTHORSensor(
fov=FOV, **MAP_INFO, ordered_object_types=ORDERED_OBJECT_TYPES,
),
]
@classmethod
def tag(cls) -> str:
return "WalkthroughRGBMappingPPO"
@classmethod
def num_train_processes(cls) -> int:
return max(1, torch.cuda.device_count() * 5)
@classmethod
def create_model(cls, **kwargs) -> WalkthroughActorCriticResNetWithPassiveMap:
map_sensor = cast(
BinnedPointCloudMapTHORSensor,
next(
s for s in cls.SENSORS if isinstance(s, BinnedPointCloudMapTHORSensor)
),
)
map_kwargs = dict(
frame_height=224,
frame_width=224,
vision_range_in_cm=map_sensor.vision_range_in_cm,
resolution_in_cm=map_sensor.resolution_in_cm,
map_size_in_cm=map_sensor.map_size_in_cm,
)
observation_space = (
SensorSuite(cls.SENSORS).observation_spaces
if kwargs.get("sensor_preprocessor_graph") is None
else kwargs["sensor_preprocessor_graph"].observation_spaces
)
return WalkthroughActorCriticResNetWithPassiveMap(
action_space=gym.spaces.Discrete(len(cls.actions())),
observation_space=observation_space,
rgb_uuid=cls.EGOCENTRIC_RGB_UUID,
unshuffled_rgb_uuid=cls.UNSHUFFLED_RGB_UUID,
semantic_map_channels=len(cls.ORDERED_OBJECT_TYPES),
height_map_channels=3,
map_kwargs=map_kwargs,
)
@classmethod
def _training_pipeline_info(cls, **kwargs) -> Dict[str, Any]:
"""Define how the model trains."""
training_steps = cls.TRAINING_STEPS
return dict(
named_losses=dict(
ppo_loss=PPO(clip_decay=LinearDecay(training_steps), **PPOConfig),
binned_map_loss=BinnedPointCloudMapLoss(
binned_pc_uuid="binned_pc_map",
map_logits_uuid="ego_height_binned_map_logits",
),
semantic_map_loss=SemanticMapFocalLoss(
semantic_map_uuid="semantic_map",
map_logits_uuid="ego_semantic_map_logits",
),
),
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss", "binned_map_loss", "semantic_map_loss"],
loss_weights=[1.0, 1.0, 100.0],
max_stage_steps=training_steps,
)
],
num_steps=32,
num_mini_batch=1,
update_repeats=3,
use_lr_decay=True,
lr=3e-4,
)
| ai2thor-rearrangement-main | baseline_configs/walkthrough/walkthrough_rgb_mapping_ppo.py |
ai2thor-rearrangement-main | baseline_configs/walkthrough/__init__.py |
|
from typing import Optional, Sequence, Dict
from allenact.base_abstractions.sensor import SensorSuite, Sensor
try:
from allenact.embodiedai.sensors.vision_sensors import DepthSensor
except ImportError:
raise ImportError("Please update to allenact>=0.4.0.")
from baseline_configs.rearrange_base import RearrangeBaseExperimentConfig
from rearrange.sensors import UnshuffledRGBRearrangeSensor
from rearrange.tasks import RearrangeTaskSampler
class WalkthroughBaseExperimentConfig(RearrangeBaseExperimentConfig):
SENSORS = [
UnshuffledRGBRearrangeSensor(
height=RearrangeBaseExperimentConfig.SCREEN_SIZE,
width=RearrangeBaseExperimentConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid=RearrangeBaseExperimentConfig.UNSHUFFLED_RGB_UUID,
),
]
# Sensor info
EGOCENTRIC_RGB_UUID = RearrangeBaseExperimentConfig.UNSHUFFLED_RGB_UUID
EGOCENTRIC_RGB_RESNET_UUID = (
RearrangeBaseExperimentConfig.UNSHUFFLED_RGB_RESNET_UUID
)
THOR_CONTROLLER_KWARGS = {
**RearrangeBaseExperimentConfig.THOR_CONTROLLER_KWARGS,
"snapToGrid": False,
}
FORCE_AXIS_ALIGNED_START = False
RANDOMIZE_START_ROTATION_DURING_TRAINING = True
@classmethod
def actions(cls):
other_move_actions = (
tuple()
if not cls.INCLUDE_OTHER_MOVE_ACTIONS
else ("move_left", "move_right", "move_back",)
)
return (
("done", "move_ahead",)
+ other_move_actions
+ (
"rotate_right",
"rotate_left",
"stand",
"crouch",
"look_up",
"look_down",
)
)
@classmethod
def make_sampler_fn(
cls,
stage: str,
force_cache_reset: bool,
allowed_scenes: Optional[Sequence[str]],
seed: int,
scene_to_allowed_rearrange_inds: Optional[Dict[str, Sequence[int]]] = None,
x_display: Optional[str] = None,
sensors: Optional[Sequence[Sensor]] = None,
thor_controller_kwargs: Optional[Dict] = None,
**kwargs,
) -> RearrangeTaskSampler:
"""Return an RearrangeTaskSampler."""
sensors = cls.SENSORS if sensors is None else sensors
if "mp_ctx" in kwargs:
del kwargs["mp_ctx"]
return RearrangeTaskSampler.from_fixed_dataset(
run_walkthrough_phase=True,
run_unshuffle_phase=False,
stage=stage,
allowed_scenes=allowed_scenes,
scene_to_allowed_rearrange_inds=scene_to_allowed_rearrange_inds,
rearrange_env_kwargs=dict(
force_cache_reset=force_cache_reset,
**cls.REARRANGE_ENV_KWARGS,
controller_kwargs={
"x_display": x_display,
**cls.THOR_CONTROLLER_KWARGS,
"renderDepthImage": any(
isinstance(s, DepthSensor) for s in sensors
),
**(
{} if thor_controller_kwargs is None else thor_controller_kwargs
),
},
),
seed=seed,
sensors=SensorSuite(sensors),
max_steps=cls.MAX_STEPS,
discrete_actions=cls.actions(),
require_done_action=cls.REQUIRE_DONE_ACTION,
force_axis_aligned_start=cls.FORCE_AXIS_ALIGNED_START,
randomize_start_rotation=stage == "train"
and cls.RANDOMIZE_START_ROTATION_DURING_TRAINING,
**kwargs,
)
| ai2thor-rearrangement-main | baseline_configs/walkthrough/walkthrough_rgb_base.py |
from baseline_configs.walkthrough.walkthrough_rgb_ppo import (
WalkthroughPPOExperimentConfig,
)
class WalkthroughRGBResNetPPOExperimentConfig(WalkthroughPPOExperimentConfig):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = ("RN18", "imagenet")
@classmethod
def tag(cls) -> str:
return "WalkthroughRGBResNetPPO"
| ai2thor-rearrangement-main | baseline_configs/walkthrough/walkthrough_rgb_resnet_ppo.py |
from typing import Dict, Any
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import LinearDecay, PipelineStage
from baseline_configs.walkthrough.walkthrough_rgb_base import (
WalkthroughBaseExperimentConfig,
)
class WalkthroughPPOExperimentConfig(WalkthroughBaseExperimentConfig):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = None
@classmethod
def tag(cls) -> str:
return "WalkthroughRGBPPO"
@classmethod
def num_train_processes(cls) -> int:
return 40
@classmethod
def _training_pipeline_info(cls, **kwargs) -> Dict[str, Any]:
"""Define how the model trains."""
training_steps = cls.TRAINING_STEPS
return dict(
named_losses=dict(
ppo_loss=PPO(clip_decay=LinearDecay(training_steps), **PPOConfig)
),
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=training_steps,)
],
num_steps=64,
num_mini_batch=1,
update_repeats=3,
use_lr_decay=True,
lr=3e-4,
)
| ai2thor-rearrangement-main | baseline_configs/walkthrough/walkthrough_rgb_ppo.py |
import os
from typing import Sequence
import gym
import torch
from torch import nn
from allenact.base_abstractions.sensor import SensorSuite, Sensor
from allenact.embodiedai.mapping.mapping_models.active_neural_slam import (
ActiveNeuralSLAM,
)
from allenact.utils.misc_utils import multiprocessing_safe_download_file_from_url
from allenact_plugins.ithor_plugin.ithor_sensors import (
RelativePositionChangeTHORSensor,
ReachableBoundsTHORSensor,
)
from baseline_configs.one_phase.one_phase_rgb_il_base import (
OnePhaseRGBILBaseExperimentConfig,
)
from rearrange.baseline_models import OnePhaseRearrangeActorCriticFrozenMap
from rearrange.constants import (
PICKUPABLE_OBJECTS,
OPENABLE_OBJECTS,
)
from rearrange_constants import ABS_PATH_OF_REARRANGE_TOP_LEVEL_DIR
class OnePhaseRGBResNetFrozenMapDaggerExperimentConfig(
OnePhaseRGBILBaseExperimentConfig
):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = (
None # Not necessary as we're handling things in the model
)
IL_PIPELINE_TYPE = "40proc"
ORDERED_OBJECT_TYPES = list(sorted(PICKUPABLE_OBJECTS + OPENABLE_OBJECTS))
MAP_RANGE_SENSOR = ReachableBoundsTHORSensor(margin=1.0)
MAP_INFO = dict(
map_range_sensor=MAP_RANGE_SENSOR,
vision_range_in_cm=40 * 5,
map_size_in_cm=1050
if isinstance(MAP_RANGE_SENSOR, ReachableBoundsTHORSensor)
else 2200,
resolution_in_cm=5,
)
@classmethod
def sensors(cls) -> Sequence[Sensor]:
return list(
super(OnePhaseRGBResNetFrozenMapDaggerExperimentConfig, cls).sensors()
) + [RelativePositionChangeTHORSensor(), cls.MAP_RANGE_SENSOR,]
@classmethod
def tag(cls) -> str:
return f"OnePhaseRGBResNetFrozenMapDagger_{cls.IL_PIPELINE_TYPE}"
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
map_kwargs = dict(
frame_height=224,
frame_width=224,
vision_range_in_cm=cls.MAP_INFO["vision_range_in_cm"],
resolution_in_cm=cls.MAP_INFO["resolution_in_cm"],
map_size_in_cm=cls.MAP_INFO["map_size_in_cm"],
)
observation_space = (
SensorSuite(cls.sensors()).observation_spaces
if kwargs.get("sensor_preprocessor_graph") is None
else kwargs["sensor_preprocessor_graph"].observation_spaces
)
semantic_map_channels = len(cls.ORDERED_OBJECT_TYPES)
height_map_channels = 3
map_kwargs["n_map_channels"] = height_map_channels + semantic_map_channels
frozen_map = ActiveNeuralSLAM(**map_kwargs, use_resnet_layernorm=True)
pretrained_map_ckpt_path = os.path.join(
ABS_PATH_OF_REARRANGE_TOP_LEVEL_DIR,
"pretrained_model_ckpts",
"pretrained_active_neural_slam_via_walkthrough_75m.pt",
)
multiprocessing_safe_download_file_from_url(
url="https://prior-model-weights.s3.us-east-2.amazonaws.com/embodied-ai/rearrangement/walkthrough/pretrained_active_neural_slam_via_walkthrough_75m.pt",
save_path=pretrained_map_ckpt_path,
)
frozen_map.load_state_dict(
torch.load(pretrained_map_ckpt_path, map_location="cpu",)
)
return OnePhaseRearrangeActorCriticFrozenMap(
map=frozen_map,
action_space=gym.spaces.Discrete(len(cls.actions())),
observation_space=observation_space,
rgb_uuid=cls.EGOCENTRIC_RGB_UUID,
unshuffled_rgb_uuid=cls.UNSHUFFLED_RGB_UUID,
semantic_map_channels=semantic_map_channels,
height_map_channels=height_map_channels,
)
| ai2thor-rearrangement-main | baseline_configs/one_phase/one_phase_rgb_resnet_frozen_map_dagger.py |
from typing import Tuple, Sequence, Optional, Dict, Any
import torch
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.base_abstractions.sensor import ExpertActionSensor, Sensor
from allenact.utils.experiment_utils import PipelineStage
from allenact.utils.misc_utils import all_unique
from baseline_configs.one_phase.one_phase_rgb_base import (
OnePhaseRGBBaseExperimentConfig,
)
from baseline_configs.rearrange_base import RearrangeBaseExperimentConfig
class StepwiseLinearDecay:
def __init__(self, cumm_steps_and_values: Sequence[Tuple[int, float]]):
assert len(cumm_steps_and_values) >= 1
self.steps_and_values = list(sorted(cumm_steps_and_values))
self.steps = [steps for steps, _ in cumm_steps_and_values]
self.values = [value for _, value in cumm_steps_and_values]
assert all_unique(self.steps)
assert all(0 <= v <= 1 for v in self.values)
def __call__(self, epoch: int) -> float:
"""Get the value for the input number of steps."""
if epoch <= self.steps[0]:
return self.values[0]
elif epoch >= self.steps[-1]:
return self.values[-1]
else:
# TODO: Binary search would be more efficient but seems overkill
for i, (s0, s1) in enumerate(zip(self.steps[:-1], self.steps[1:])):
if epoch < s1:
p = (epoch - s0) / (s1 - s0)
v0 = self.values[i]
v1 = self.values[i + 1]
return p * v1 + (1 - p) * v0
def il_training_params(label: str, training_steps: int):
use_lr_decay = False
if label == "80proc":
lr = 3e-4
num_train_processes = 80
num_steps = 64
dagger_steps = min(int(1e6), training_steps // 10)
bc_tf1_steps = min(int(1e5), training_steps // 10)
update_repeats = 3
num_mini_batch = 2 if torch.cuda.is_available() else 1
elif label == "40proc":
lr = 3e-4
num_train_processes = 40
num_steps = 64
dagger_steps = min(int(1e6), training_steps // 10)
bc_tf1_steps = min(int(1e5), training_steps // 10)
update_repeats = 3
num_mini_batch = 1
elif label == "40proc-longtf":
lr = 3e-4
num_train_processes = 40
num_steps = 64
dagger_steps = min(int(5e6), training_steps // 10)
bc_tf1_steps = min(int(5e5), training_steps // 10)
update_repeats = 3
num_mini_batch = 1
else:
raise NotImplementedError
return dict(
lr=lr,
num_steps=num_steps,
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
use_lr_decay=use_lr_decay,
num_train_processes=num_train_processes,
dagger_steps=dagger_steps,
bc_tf1_steps=bc_tf1_steps,
)
class OnePhaseRGBILBaseExperimentConfig(OnePhaseRGBBaseExperimentConfig):
IL_PIPELINE_TYPE: Optional[str] = None
@classmethod
def sensors(cls) -> Sequence[Sensor]:
return [
*super(OnePhaseRGBILBaseExperimentConfig, cls).sensors(),
ExpertActionSensor(len(RearrangeBaseExperimentConfig.actions())),
]
@classmethod
def _training_pipeline_info(cls, **kwargs) -> Dict[str, Any]:
"""Define how the model trains."""
training_steps = cls.TRAINING_STEPS
params = cls._use_label_to_get_training_params()
bc_tf1_steps = params["bc_tf1_steps"]
dagger_steps = params["dagger_steps"]
return dict(
named_losses=dict(imitation_loss=Imitation()),
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=training_steps,
teacher_forcing=StepwiseLinearDecay(
cumm_steps_and_values=[
(bc_tf1_steps, 1.0),
(bc_tf1_steps + dagger_steps, 0.0),
]
),
)
],
**params
)
@classmethod
def num_train_processes(cls) -> int:
return cls._use_label_to_get_training_params()["num_train_processes"]
@classmethod
def _use_label_to_get_training_params(cls):
return il_training_params(
label=cls.IL_PIPELINE_TYPE.lower(), training_steps=cls.TRAINING_STEPS
)
| ai2thor-rearrangement-main | baseline_configs/one_phase/one_phase_rgb_il_base.py |
from baseline_configs.one_phase.one_phase_rgb_il_base import (
OnePhaseRGBILBaseExperimentConfig,
)
class OnePhaseRGBResNetDaggerExperimentConfig(OnePhaseRGBILBaseExperimentConfig):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = ("RN18", "imagenet")
IL_PIPELINE_TYPE = "40proc"
@classmethod
def tag(cls) -> str:
return f"OnePhaseRGBResNetDagger_{cls.IL_PIPELINE_TYPE}"
| ai2thor-rearrangement-main | baseline_configs/one_phase/one_phase_rgb_resnet_dagger.py |
ai2thor-rearrangement-main | baseline_configs/one_phase/__init__.py |
|
from baseline_configs.one_phase.one_phase_rgb_il_base import (
OnePhaseRGBILBaseExperimentConfig,
)
class OnePhaseRGBClipResNet50DaggerExperimentConfig(OnePhaseRGBILBaseExperimentConfig):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = ("RN50", "clip")
IL_PIPELINE_TYPE = "40proc"
@classmethod
def tag(cls) -> str:
return f"OnePhaseRGBClipResNet50Dagger_{cls.IL_PIPELINE_TYPE}"
| ai2thor-rearrangement-main | baseline_configs/one_phase/one_phase_rgb_clipresnet50_dagger.py |
import warnings
from abc import ABC
from typing import Optional, Dict, Sequence
from allenact.base_abstractions.sensor import SensorSuite, Sensor
try:
from allenact.embodiedai.sensors.vision_sensors import (
DepthSensor,
IMAGENET_RGB_MEANS,
IMAGENET_RGB_STDS,
)
except ImportError:
raise ImportError("Please update to allenact>=0.4.0.")
from baseline_configs.rearrange_base import RearrangeBaseExperimentConfig
from rearrange.sensors import (
RGBRearrangeSensor,
UnshuffledRGBRearrangeSensor,
)
from rearrange.tasks import RearrangeTaskSampler
class OnePhaseRGBBaseExperimentConfig(RearrangeBaseExperimentConfig, ABC):
@classmethod
def sensors(cls) -> Sequence[Sensor]:
if cls.CNN_PREPROCESSOR_TYPE_AND_PRETRAINING is None:
warnings.warn("No CNN_PREPROCESSOR_TYPE_AND_PRETRAINING specified. Will use NO vision sensors.")
return []
_, pretraining_type = cls.CNN_PREPROCESSOR_TYPE_AND_PRETRAINING
if pretraining_type.strip().lower() == "clip":
from allenact_plugins.clip_plugin.clip_preprocessors import (
ClipResNetPreprocessor,
)
mean = ClipResNetPreprocessor.CLIP_RGB_MEANS
stdev = ClipResNetPreprocessor.CLIP_RGB_STDS
else:
mean = IMAGENET_RGB_MEANS
stdev = IMAGENET_RGB_STDS
return [
RGBRearrangeSensor(
height=RearrangeBaseExperimentConfig.SCREEN_SIZE,
width=RearrangeBaseExperimentConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid=RearrangeBaseExperimentConfig.EGOCENTRIC_RGB_UUID,
mean=mean,
stdev=stdev,
),
UnshuffledRGBRearrangeSensor(
height=RearrangeBaseExperimentConfig.SCREEN_SIZE,
width=RearrangeBaseExperimentConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid=RearrangeBaseExperimentConfig.UNSHUFFLED_RGB_UUID,
mean=mean,
stdev=stdev,
),
]
@classmethod
def make_sampler_fn(
cls,
stage: str,
force_cache_reset: bool,
allowed_scenes: Optional[Sequence[str]],
seed: int,
epochs: int,
scene_to_allowed_rearrange_inds: Optional[Dict[str, Sequence[int]]] = None,
x_display: Optional[str] = None,
sensors: Optional[Sequence[Sensor]] = None,
thor_controller_kwargs: Optional[Dict] = None,
**kwargs,
) -> RearrangeTaskSampler:
"""Return a RearrangeTaskSampler."""
sensors = cls.sensors() if sensors is None else sensors
if "mp_ctx" in kwargs:
del kwargs["mp_ctx"]
assert not cls.RANDOMIZE_START_ROTATION_DURING_TRAINING
return RearrangeTaskSampler.from_fixed_dataset(
run_walkthrough_phase=False,
run_unshuffle_phase=True,
stage=stage,
allowed_scenes=allowed_scenes,
scene_to_allowed_rearrange_inds=scene_to_allowed_rearrange_inds,
rearrange_env_kwargs=dict(
force_cache_reset=force_cache_reset,
**cls.REARRANGE_ENV_KWARGS,
controller_kwargs={
"x_display": x_display,
**cls.THOR_CONTROLLER_KWARGS,
**(
{} if thor_controller_kwargs is None else thor_controller_kwargs
),
"renderDepthImage": any(
isinstance(s, DepthSensor) for s in sensors
),
},
),
seed=seed,
sensors=SensorSuite(sensors),
max_steps=cls.MAX_STEPS,
discrete_actions=cls.actions(),
require_done_action=cls.REQUIRE_DONE_ACTION,
force_axis_aligned_start=cls.FORCE_AXIS_ALIGNED_START,
epochs=epochs,
**kwargs,
)
| ai2thor-rearrangement-main | baseline_configs/one_phase/one_phase_rgb_base.py |
from typing import Dict, Any
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import LinearDecay, PipelineStage
from baseline_configs.one_phase.one_phase_rgb_base import (
OnePhaseRGBBaseExperimentConfig,
)
class OnePhaseRGBPPOExperimentConfig(OnePhaseRGBBaseExperimentConfig):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = None
@classmethod
def tag(cls) -> str:
return "OnePhaseRGBPPO"
@classmethod
def num_train_processes(cls) -> int:
return 40
@classmethod
def _training_pipeline_info(cls, **kwargs) -> Dict[str, Any]:
"""Define how the model trains."""
training_steps = cls.TRAINING_STEPS
return dict(
named_losses=dict(
ppo_loss=PPO(clip_decay=LinearDecay(training_steps), **PPOConfig)
),
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=training_steps,)
],
num_steps=64,
num_mini_batch=1,
update_repeats=3,
use_lr_decay=True,
lr=3e-4,
)
| ai2thor-rearrangement-main | baseline_configs/one_phase/one_phase_rgb_ppo.py |
from baseline_configs.one_phase.one_phase_rgb_il_base import (
OnePhaseRGBILBaseExperimentConfig,
)
class OnePhaseRGBDaggerExperimentConfig(OnePhaseRGBILBaseExperimentConfig):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = None
IL_PIPELINE_TYPE = "40proc"
@classmethod
def tag(cls) -> str:
return f"OnePhaseRGBDagger_{cls.IL_PIPELINE_TYPE}"
| ai2thor-rearrangement-main | baseline_configs/one_phase/one_phase_rgb_dagger.py |
from baseline_configs.one_phase.one_phase_rgb_ppo import OnePhaseRGBPPOExperimentConfig
class OnePhaseRGBResNetPPOExperimentConfig(OnePhaseRGBPPOExperimentConfig):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = ("RN18", "imagenet")
@classmethod
def tag(cls) -> str:
return "OnePhaseRGBResNetPPO"
| ai2thor-rearrangement-main | baseline_configs/one_phase/one_phase_rgb_resnet_ppo.py |
from baseline_configs.one_phase.one_phase_rgb_il_base import (
OnePhaseRGBILBaseExperimentConfig,
)
class OnePhaseRGBResNetDaggerExperimentConfig(OnePhaseRGBILBaseExperimentConfig):
CNN_PREPROCESSOR_TYPE_AND_PRETRAINING = ("RN50", "imagenet")
IL_PIPELINE_TYPE = "40proc"
@classmethod
def tag(cls) -> str:
return f"OnePhaseRGBResNetDagger_{cls.IL_PIPELINE_TYPE}"
| ai2thor-rearrangement-main | baseline_configs/one_phase/one_phase_rgb_resnet50_dagger.py |
"""A script for generating rearrangement datasets."""
import argparse
import json
import math
import multiprocessing as mp
import os
import platform
import queue
import random
import time
import warnings
from collections import defaultdict
from typing import List, Set, Dict, Optional, Any, cast
import compress_pickle
import numpy as np
import tqdm
from ai2thor.controller import Controller
from allenact.utils.misc_utils import md5_hash_str_as_int
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from setproctitle import setproctitle as ptitle
from datagen.datagen_constants import OBJECT_TYPES_TO_NOT_MOVE
from datagen.datagen_utils import (
get_scenes,
get_random_seeds,
filter_pickupable,
open_objs,
get_object_ids_to_not_move_from_object_types,
remove_objects_until_all_have_identical_meshes,
check_object_opens,
)
from rearrange.constants import STARTER_DATA_DIR, THOR_COMMIT_ID
from rearrange.environment import (
RearrangeTHOREnvironment,
RearrangeTaskSpec,
)
from rearrange_constants import OPENNESS_THRESHOLD, IOU_THRESHOLD
mp = mp.get_context("spawn")
def generate_one_rearrangement_given_initial_conditions(
controller: Controller,
scene: str,
start_kwargs: dict,
target_kwargs: dict,
num_objs_to_move: int,
num_objs_to_open: int,
object_types_to_not_move: Set[str],
agent_pos: Dict[str, float],
agent_rot: Dict[str, float],
allow_putting_objects_away: bool = False,
):
# Start position
controller.reset(scene)
controller.step(
"TeleportFull", horizon=0, standing=True, rotation=agent_rot, **agent_pos
)
if not controller.last_event.metadata["lastActionSuccess"]:
return None, None, None
if not remove_objects_until_all_have_identical_meshes(controller):
return None, None, None
controller.step("MakeAllObjectsUnbreakable")
controller.step("InitialRandomSpawn", **start_kwargs)
if not controller.last_event.metadata["lastActionSuccess"]:
return None, None, None
for _ in range(12):
controller.step("Pass")
if any(o["isBroken"] for o in controller.last_event.metadata["objects"]):
return None, None, None
# get initial and post random spawn object data
objects_after_first_irs = controller.last_event.metadata["objects"]
# of the non-movable objects randomly open some of them
openable_objects = [
obj
for obj in objects_after_first_irs
if obj["openable"] and not obj["pickupable"]
]
random.shuffle(openable_objects)
object_names_to_open = []
for oo in openable_objects:
if len(object_names_to_open) == num_objs_to_open:
break
if check_object_opens(oo, controller):
object_names_to_open.append(oo["name"])
if len(object_names_to_open) != num_objs_to_open:
return None, None, None
try:
start_openness = open_objs(
object_names_to_open=object_names_to_open, controller=controller
)
except (StopIteration, RuntimeError):
return None, None, None
# accounts for possibly a rare event that I cannot think of, where opening
# a non-pickupable object moves a pickupable object.
pickupable_objects_after_first_irs = filter_pickupable(
objects=objects_after_first_irs,
object_types_to_not_move=object_types_to_not_move,
)
# choose which objects to move
if len(pickupable_objects_after_first_irs) < num_objs_to_move:
return None, None, None
random.shuffle(pickupable_objects_after_first_irs)
if num_objs_to_move == 0:
objects_to_not_move = pickupable_objects_after_first_irs
moved_objs = []
else:
objects_to_not_move = pickupable_objects_after_first_irs[:-num_objs_to_move]
moved_objs = pickupable_objects_after_first_irs[-num_objs_to_move:]
moved_obj_names = {o["name"] for o in moved_objs}
unmoved_obj_names = {o["name"] for o in objects_to_not_move}
if allow_putting_objects_away:
# If we're having a really hard time shuffling objects successfully, then let's
# move some of the objects we don't care about (i.e. the ones whose position won't change)
# into cupboards/drawers/etc so that there is more space.
controller.step(
"InitialRandomSpawn",
**start_kwargs,
excludedObjectIds=[
o["objectId"]
for o in controller.last_event.metadata["objects"]
if o["name"] in moved_obj_names
],
)
if not controller.last_event.metadata["lastActionSuccess"]:
return None, None, None
objects_after_first_irs = controller.last_event.metadata["objects"]
pickupable_objects_after_first_irs = filter_pickupable(
objects=objects_after_first_irs,
object_types_to_not_move=object_types_to_not_move,
)
controller.step(
"TeleportFull", horizon=0, standing=True, rotation=agent_rot, **agent_pos
)
if not controller.last_event.metadata["lastActionSuccess"]:
return None, None, None
second_stage_success = False
pickupable_objects_after_shuffle: Optional[List[Dict[str, Any]]] = None
target_openness: Optional[Dict[str, float]] = None
for retry_ind in range(2):
object_ids_not_to_move = [
o["objectId"]
for o in controller.last_event.metadata["objects"]
if o["name"] in unmoved_obj_names
]
object_ids_not_to_move.extend(
get_object_ids_to_not_move_from_object_types(
controller=controller, object_types=object_types_to_not_move,
)
)
controller.step(
"InitialRandomSpawn",
excludedObjectIds=object_ids_not_to_move,
**{**target_kwargs, "randomSeed": target_kwargs["randomSeed"] + retry_ind},
)
if not controller.last_event.metadata["lastActionSuccess"]:
continue
for _ in range(12):
# This shouldn't be necessary but we run these actions
# to let physics settle.
controller.step("Pass")
# change the openness of one the same non-pickupable objects
try:
target_openness = open_objs(
object_names_to_open=object_names_to_open, controller=controller
)
except (StopIteration, RuntimeError):
return None, None, None
# get initial and post random spawn object data
pickupable_objects_after_shuffle = filter_pickupable(
controller.last_event.metadata["objects"], object_types_to_not_move
)
all_teleport_success = True
for o in pickupable_objects_after_shuffle:
if o["name"] in moved_obj_names:
pos = o["position"]
positions = [
{
"x": pos["x"] + 0.001 * xoff,
"y": pos["y"] + 0.001 * yoff,
"z": pos["z"] + 0.001 * zoff,
}
for xoff in [0, -1, 1]
for zoff in [0, -1, 1]
for yoff in [0, 1, 2]
]
controller.step(
"TeleportObject",
objectId=o["objectId"],
positions=positions,
rotation=o["rotation"],
makeUnbreakable=True,
)
if not controller.last_event.metadata["lastActionSuccess"]:
all_teleport_success = False
break
if all_teleport_success:
second_stage_success = True
break
for o in controller.last_event.metadata["objects"]:
if o["isBroken"]:
print(
f"In scene {controller.last_event.metadata['sceneName']},"
f" object {o['name']} broke during setup."
)
return None, None, None
if not second_stage_success:
return None, None, None
pickupable_objects_after_first_irs.sort(key=lambda x: x["name"])
pickupable_objects_after_shuffle.sort(key=lambda x: x["name"])
if any(
o0["name"] != o1["name"]
for o0, o1 in zip(
pickupable_objects_after_first_irs, pickupable_objects_after_shuffle
)
):
print("Pickupable object names don't match after shuffle!")
return None, None, None
# [opened, starting, target]
return (
[
{
"name": open_obj_name,
"objectName": open_obj_name,
"objectId": next(
o["objectId"]
for o in openable_objects
if o["name"] == open_obj_name
),
"start_openness": start_openness[open_obj_name],
"target_openness": target_openness[open_obj_name],
}
for open_obj_name in start_openness
],
[
{
"name": pickupable_objects_after_first_irs[i]["name"],
"objectName": pickupable_objects_after_first_irs[i]["name"],
"position": pickupable_objects_after_first_irs[i]["position"],
"rotation": pickupable_objects_after_first_irs[i]["rotation"],
}
for i in range(len(pickupable_objects_after_first_irs))
],
[
{
"name": pickupable_objects_after_shuffle[i]["name"],
"objectName": pickupable_objects_after_shuffle[i]["name"],
"position": pickupable_objects_after_shuffle[i]["position"],
"rotation": pickupable_objects_after_shuffle[i]["rotation"],
}
for i in range(len(pickupable_objects_after_shuffle))
],
)
def generate_rearrangements_for_scenes(
stage_seed: int,
stage_scenes: List[str],
env: RearrangeTHOREnvironment,
object_types_to_not_move: Set[str],
max_obj_rearrangements_per_scene: int = 5,
scene_reuse_count: int = 50,
obj_name_to_avoid_positions: Optional[Dict[str, np.ndarray]] = None,
force_visible: bool = True,
place_stationary: bool = False,
rotation_increment: int = 30,
) -> dict:
if 360 % rotation_increment != 0:
raise ValueError("Rotation increment must be a factor of 360")
if obj_name_to_avoid_positions is None:
obj_name_to_avoid_positions = defaultdict(
lambda: np.array([[-1000, -1000, -1000]])
)
controller = env.controller
out: dict = dict()
for scene in stage_scenes:
print(f"Scene {scene}")
seed = md5_hash_str_as_int(f"{stage_seed}|{scene}")
random.seed(seed)
out[scene] = []
# set positions and rotations
controller.reset(scene)
scene_has_openable = 0 != len(
[
o
for o in controller.last_event.metadata["objects"]
if o["openable"] and not o["pickupable"]
]
)
if not scene_has_openable:
warnings.warn(f"SCENE {scene} HAS NO OPENABLE OBJECTS")
evt = controller.step("GetReachablePositions")
rps: List[Dict[str, float]] = evt.metadata["actionReturn"]
rps.sort(key=lambda d: (round(d["x"], 2), round(d["z"], 2)))
rotations = np.arange(0, 360, rotation_increment)
for reuse_i in range(scene_reuse_count):
try_count = 0
# Evenly distribute # of object rearrangements
num_objs_to_open = scene_has_openable * (reuse_i % 2)
num_objs_to_move = (1 - num_objs_to_open) + math.floor(
max_obj_rearrangements_per_scene * (reuse_i / scene_reuse_count)
)
position_count_offset = 0
while True:
try_count += 1
if try_count > 300:
raise RuntimeError(
f"Something wrong with scene {scene}, please file an issue."
)
episode_seed_string = f"{scene}|ind_{reuse_i}|tries_{try_count}|counts_{position_count_offset}|seed_{stage_seed}"
seed = md5_hash_str_as_int(episode_seed_string)
random.seed(seed)
# avoid agent being unable to teleport to position
# due to object being placed there
pos = random.choice(rps)
rot = {"x": 0, "y": int(random.choice(rotations)), "z": 0}
# used to make sure the positions of the objects
# are not always the same across the same scene.
start_kwargs = {
"randomSeed": random.randint(0, int(1e7) - 1),
"forceVisible": force_visible,
"placeStationary": place_stationary,
"excludedReceptacles": ["ToiletPaperHanger"],
}
target_kwargs = {
"randomSeed": random.randint(0, int(1e7) - 1),
"forceVisible": force_visible,
"placeStationary": place_stationary,
"excludedReceptacles": ["ToiletPaperHanger"],
}
# sometimes weird bugs arise where the pickupable
# object count within a scene does not match
(
opened_data,
starting_poses,
target_poses,
) = generate_one_rearrangement_given_initial_conditions(
controller=controller,
scene=scene,
start_kwargs=start_kwargs,
target_kwargs=target_kwargs,
num_objs_to_move=num_objs_to_move + position_count_offset,
num_objs_to_open=num_objs_to_open,
object_types_to_not_move=object_types_to_not_move,
agent_pos=pos,
agent_rot=rot,
allow_putting_objects_away=try_count >= 30,
)
if opened_data is None:
position_count_offset = max(position_count_offset - 1, 0)
print(
f"{episode_seed_string}: Failed during generation." # {scene}, {pos}, {int(rot['y'])} {start_kwargs}, {target_kwargs}."
)
continue
task_spec_dict = {
"agent_position": pos,
"agent_rotation": int(rot["y"]),
"object_rearrangement_count": int(num_objs_to_move)
+ int(num_objs_to_open),
"openable_data": opened_data,
"starting_poses": starting_poses,
"target_poses": target_poses,
}
env.reset(task_spec=RearrangeTaskSpec(scene=scene, **task_spec_dict))
env.shuffle()
ips, gps, cps = env.poses
pose_diffs = cast(
List[Dict[str, Any]], env.compare_poses(goal_pose=gps, cur_pose=cps)
)
reachable_positions = env.controller.step(
"GetReachablePositions"
).metadata["actionReturn"]
failed = False
for gp, cp, pd in zip(gps, cps, pose_diffs):
if pd["iou"] is not None and pd["iou"] < IOU_THRESHOLD:
assert gp["type"] not in object_types_to_not_move
if gp["broken"] or cp["broken"]:
failed = True
break
pose_diff_energy = env.pose_difference_energy(
goal_pose=gp, cur_pose=cp
)
if pose_diff_energy != 0:
obj_name = gp["name"]
# Ensure that objects to rearrange are visible from somewhere
interactable_poses = env.controller.step(
"GetInteractablePoses",
objectId=cp["objectId"],
positions=reachable_positions,
).metadata["actionReturn"]
if interactable_poses is None or len(interactable_poses) == 0:
print(
f"{episode_seed_string}: {obj_name} is not visible despite needing to be rearranged."
)
failed = True
break
if obj_name in obj_name_to_avoid_positions:
if cp["pickupable"]:
threshold = 0.15
start_position = cp["position"]
pos_array = np.array(
[[start_position[k] for k in ["x", "y", "z"]]]
)
elif cp["openness"] is not None:
threshold = 0.05
pos_array = np.array([[cp["openness"]]])
else:
continue
dist = np.sqrt(
(
(obj_name_to_avoid_positions[obj_name] - pos_array)
** 2
).sum(-1)
).min()
if dist <= threshold:
print(
f"{episode_seed_string}: {obj_name} is within the threshold ({dist} <= {threshold})."
)
failed = True
break
if failed:
continue
npos_diff = int(
sum(
pd["iou"] is not None and pd["iou"] < IOU_THRESHOLD
for pd in pose_diffs
)
)
nopen_diff = int(
sum(
pd["openness_diff"] is not None
and pd["openness_diff"] >= OPENNESS_THRESHOLD
for pd in pose_diffs
)
)
if npos_diff != num_objs_to_move:
position_count_offset += (npos_diff < num_objs_to_move) - (
npos_diff > num_objs_to_move
)
position_count_offset = max(position_count_offset, 0)
print(
f"{episode_seed_string}: Incorrect amount of objects have moved expected != actual ({num_objs_to_move} != {npos_diff})"
)
continue
if nopen_diff != num_objs_to_open:
print(
f"{episode_seed_string}: Incorrect amount of objects have opened expected != actual ({num_objs_to_open} != {nopen_diff})"
)
continue
task_spec_dict["position_diff_count"] = npos_diff
task_spec_dict["open_diff_count"] = nopen_diff
task_spec_dict["pose_diff_energy"] = float(
env.pose_difference_energy(goal_pose=gps, cur_pose=cps).sum()
)
if (npos_diff == 0 and nopen_diff == 0) or task_spec_dict[
"pose_diff_energy"
] == 0.0:
print(
f"Not enough has moved in {scene}, {pos}, {int(rot['y'])} {start_kwargs}, {target_kwargs}!"
)
continue
if npos_diff > max_obj_rearrangements_per_scene or nopen_diff > 1:
print(
f"{episode_seed_string}: Final check failed ({npos_diff} [{max_obj_rearrangements_per_scene} max] pos. diffs,"
f" {nopen_diff} [1 max] opened)"
)
continue
out[scene].append(task_spec_dict)
print(f"{episode_seed_string} SUCCESS")
break
return out
def rearrangement_datagen_worker(
input_queue: mp.Queue,
output_queue: mp.Queue,
scene_to_obj_name_to_avoid_positions: Optional[
Dict[str, Dict[str, np.ndarray]]
] = None,
):
ptitle("Rearrange Datagen Worker")
env = RearrangeTHOREnvironment(
force_cache_reset=True, controller_kwargs={"commit_id": THOR_COMMIT_ID}
)
while True:
try:
scene, stage, seed = input_queue.get(timeout=2)
except queue.Empty:
break
data = generate_rearrangements_for_scenes(
stage_seed=seed,
stage_scenes=[scene],
env=env,
object_types_to_not_move=OBJECT_TYPES_TO_NOT_MOVE,
obj_name_to_avoid_positions=None
if scene_to_obj_name_to_avoid_positions is None
else scene_to_obj_name_to_avoid_positions[scene],
)
output_queue.put((scene, stage, data[scene]))
def get_scene_to_obj_name_to_seen_positions():
scene_to_task_spec_dicts = compress_pickle.load(
os.path.join(STARTER_DATA_DIR, f"train.pkl.gz")
)
assert len(scene_to_task_spec_dicts) == 80 and all(
len(v) == 50 for v in scene_to_task_spec_dicts.values()
)
scene_to_obj_name_to_positions = {}
for scene in tqdm.tqdm(scene_to_task_spec_dicts):
obj_name_to_positions = defaultdict(lambda: [])
for task_spec_dict in scene_to_task_spec_dicts[scene]:
for od in task_spec_dict["openable_data"]:
obj_name_to_positions[od["name"]].extend(
(od["start_openness"], od["target_openness"])
)
for sp, tp in zip(
task_spec_dict["starting_poses"], task_spec_dict["target_poses"]
):
assert sp["name"] == tp["name"]
position_dist = IThorEnvironment.position_dist(
sp["position"], tp["position"]
)
rotation_dist = IThorEnvironment.angle_between_rotations(
sp["rotation"], tp["rotation"]
)
if position_dist >= 1e-2 or rotation_dist >= 5:
obj_name_to_positions[sp["name"]].append(
[sp["position"][k] for k in ["x", "y", "z"]]
)
obj_name_to_positions[sp["name"]].append(
[tp["position"][k] for k in ["x", "y", "z"]]
)
scene_to_obj_name_to_positions[scene] = {
k: np.array(v) for k, v in obj_name_to_positions.items()
}
return scene_to_obj_name_to_positions
def main():
ptitle("Rearrange Datagen Manager")
parser = argparse.ArgumentParser()
parser.add_argument("--debug", "-d", action="store_true", default=False)
parser.add_argument("--train_unseen", "-t", action="store_true", default=False)
args = parser.parse_args()
nprocesses = (
max((3 * mp.cpu_count()) // 4, 1) if platform.system() == "Linux" else 1
)
stage_seeds = get_random_seeds()
scene_to_obj_name_to_avoid_positions = None
if args.debug:
stage_to_scenes = {"debug": ["FloorPlan428"]}
elif args.train_unseen:
stage_to_scenes = {"train_unseen": get_scenes("train")}
scene_to_obj_name_to_avoid_positions = get_scene_to_obj_name_to_seen_positions()
else:
stage_to_scenes = {
stage: get_scenes(stage) for stage in ("train", "val", "test")
}
os.makedirs(STARTER_DATA_DIR, exist_ok=True)
stage_to_scene_to_rearrangements = {stage: {} for stage in stage_to_scenes}
for stage in stage_to_scenes:
path = os.path.join(STARTER_DATA_DIR, f"{stage}.json")
if os.path.exists(path):
with open(path, "r") as f:
stage_to_scene_to_rearrangements[stage] = json.load(f)
send_queue = mp.Queue()
num_scenes_to_run = 0
for stage in stage_to_scenes:
for scene in stage_to_scenes[stage]:
if scene not in stage_to_scene_to_rearrangements[stage]:
num_scenes_to_run += 1
send_queue.put((scene, stage, stage_seeds[stage]))
receive_queue = mp.Queue()
processes = []
for i in range(nprocesses):
p = mp.Process(
target=rearrangement_datagen_worker,
kwargs=dict(
input_queue=send_queue,
output_queue=receive_queue,
scene_to_obj_name_to_avoid_positions=scene_to_obj_name_to_avoid_positions,
),
)
p.start()
processes.append(p)
time.sleep(0.5)
num_received = 0
while num_scenes_to_run > num_received:
try:
scene, stage, data = receive_queue.get(timeout=1)
num_received += 1
except queue.Empty:
continue
print(f"Saving {scene}")
scene_to_rearrangements = stage_to_scene_to_rearrangements[stage]
if scene not in scene_to_rearrangements:
scene_to_rearrangements[scene] = []
scene_to_rearrangements[scene].extend(data)
with open(os.path.join(STARTER_DATA_DIR, f"{stage}.json"), "w") as f:
json.dump(scene_to_rearrangements, f)
compress_pickle.dump(
obj=scene_to_rearrangements,
path=os.path.join(STARTER_DATA_DIR, f"{stage}.pkl.gz"),
pickler_kwargs={"protocol": 4,}, # Backwards compatible with python 3.6
)
for p in processes:
try:
p.join(timeout=1)
except:
pass
if __name__ == "__main__":
main()
| ai2thor-rearrangement-main | datagen/datagen_runner.py |
ai2thor-rearrangement-main | datagen/__init__.py |
|
OBJECT_TYPES_TO_NOT_MOVE = {
"Apple",
"Bread",
"Cloth",
"HandTowel",
"HandTowelHolder",
"Towel",
"TowelHolder",
"KeyChain",
"Lettuce",
"Pillow",
"Potato",
"Tomato",
}
OBJECT_TYPES_THAT_CAN_HAVE_IDENTICAL_MESHES = [
"AluminumFoil",
"CD",
"Dumbbell",
"Ladle",
"Vase",
]
| ai2thor-rearrangement-main | datagen/datagen_constants.py |
import json
import os
from collections import defaultdict
import compress_pickle
from allenact.utils.misc_utils import partition_sequence
from rearrange.constants import STARTER_DATA_DIR
def combine(task_limit_for_train: int = 10000):
stages = ("train", "val", "test")
all_data = defaultdict(lambda: [])
for stage in stages:
print(stage)
data_path = os.path.join(STARTER_DATA_DIR, f"{stage}.pkl.gz")
if not os.path.exists(data_path):
raise RuntimeError(f"No data at path {data_path}")
data = compress_pickle.load(path=data_path)
max_per_scene = task_limit_for_train if "train" in stage else 10000
count = 0
for scene in data:
assert len(data[scene]) == 50
for index, task_spec_dict in enumerate(data[scene]):
task_spec_dict["scene"] = scene
task_spec_dict["index"] = index
task_spec_dict["stage"] = stage
pieces_per_part = max_per_scene // 5 # 5 hardnesses
parts = partition_sequence(data[scene], 5)
all_together = sum([part[:pieces_per_part] for part in parts], [])
count += len(all_together)
all_data[scene].extend(all_together)
print(count)
all_data = dict(all_data)
with open(os.path.join(STARTER_DATA_DIR, f"combined.json"), "w") as f:
json.dump(all_data, f)
compress_pickle.dump(
obj=all_data,
path=os.path.join(STARTER_DATA_DIR, f"combined.pkl.gz"),
pickler_kwargs={"protocol": 4,}, # Backwards compatible with python 3.6
)
if __name__ == "__main__":
combine(10)
| ai2thor-rearrangement-main | datagen/create_combined_dataset.py |
import random
from collections import defaultdict
from typing import List, Dict, Set, Optional, Any
from ai2thor.controller import Controller
from datagen.datagen_constants import OBJECT_TYPES_THAT_CAN_HAVE_IDENTICAL_MESHES
from rearrange_constants import OPENNESS_THRESHOLD
def get_scenes(stage: str) -> List[str]:
"""Returns a list of iTHOR scene names for each stage."""
assert stage in {"debug", "train", "train_unseen", "val", "valid", "test", "all"}
if stage == "debug":
return ["FloorPlan1"]
# [1-20] for train, [21-25] for val, [26-30] for test
if stage in ["train", "train_unseen"]:
scene_nums = range(1, 21)
elif stage in ["val", "valid"]:
scene_nums = range(21, 26)
elif stage == "test":
scene_nums = range(26, 31)
elif stage == "all":
scene_nums = range(1, 31)
else:
raise NotImplementedError
kitchens = [f"FloorPlan{i}" for i in scene_nums]
living_rooms = [f"FloorPlan{200+i}" for i in scene_nums]
bedrooms = [f"FloorPlan{300+i}" for i in scene_nums]
bathrooms = [f"FloorPlan{400+i}" for i in scene_nums]
return kitchens + living_rooms + bedrooms + bathrooms
def filter_pickupable(
objects: List[Dict], object_types_to_not_move: Set[str]
) -> List[Dict]:
"""Filters object data only for pickupable objects."""
return [
obj
for obj in objects
if obj["pickupable"] and not obj["objectType"] in object_types_to_not_move
]
def get_random_seeds(max_seed: int = int(1e8)) -> Dict[str, int]:
# Generate random seeds for each stage
# Train seed
random.seed(1329328939)
train_seed = random.randint(0, max_seed - 1)
# Train unseen seed
random.seed(709384928)
train_unseen_seed = random.randint(0, max_seed - 1)
# val seed
random.seed(3348958620)
val_seed = random.randint(0, max_seed - 1)
# test seed
random.seed(289123396)
test_seed = random.randint(0, max_seed - 1)
# Debug seed
random.seed(239084231)
debug_seed = random.randint(0, max_seed - 1)
return {
"train": train_seed,
"train_unseen": train_unseen_seed,
"val": val_seed,
"valid": val_seed,
"test": test_seed,
"debug": debug_seed,
}
def check_object_opens(obj: Dict[str, Any], controller: Controller):
controller.step(
"OpenObject", objectId=obj["objectId"], openness=1.0, forceAction=True,
)
obj_opened_fully = controller.last_event.metadata["lastActionSuccess"]
controller.step(
"CloseObject", objectId=obj["objectId"], forceAction=True,
)
obj_closed_fully = controller.last_event.metadata["lastActionSuccess"]
return obj_opened_fully and obj_closed_fully
def get_object_by_name(name: str, controller: Controller):
return next(
o for o in controller.last_event.metadata["objects"] if o["name"] == name
)
def open_objs(
object_names_to_open: List[str], controller: Controller
) -> Dict[str, Optional[float]]:
"""Opens up the chosen pickupable objects if they're openable."""
out: Dict[str, Optional[float]] = defaultdict(lambda: None)
for obj_name in object_names_to_open:
obj = get_object_by_name(obj_name, controller)
last_openness = obj["openness"]
new_openness = last_openness
while abs(last_openness - new_openness) <= OPENNESS_THRESHOLD:
new_openness = random.random()
event = controller.step(
"OpenObject",
objectId=obj["objectId"],
openness=new_openness,
forceAction=True,
)
obj_after_open = get_object_by_name(obj_name, controller)
if abs(obj_after_open["openness"] - new_openness) > 0.001:
raise RuntimeError(
f"In scene {event.metadata['sceneName']}, {obj['name']} was supposed to open to {new_openness}"
f" from {last_openness} but instead reached {obj_after_open['openness']}. Last action success was:"
f" {event.metadata['lastActionSuccess']}"
)
out[obj["name"]] = obj_after_open["openness"]
return out
def get_object_ids_to_not_move_from_object_types(
controller: Controller, object_types: Set[str]
) -> List[str]:
object_types = set(object_types)
return [
o["objectId"]
for o in controller.last_event.metadata["objects"]
if o["objectType"] in object_types
]
def remove_objects_until_all_have_identical_meshes(controller: Controller):
obj_type_to_obj_list = defaultdict(lambda: [])
for obj in controller.last_event.metadata["objects"]:
obj_type_to_obj_list[obj["objectType"]].append(obj)
for obj_type in OBJECT_TYPES_THAT_CAN_HAVE_IDENTICAL_MESHES:
objs_of_type = list(
sorted(obj_type_to_obj_list[obj_type], key=lambda x: x["name"])
)
random.shuffle(objs_of_type)
objs_to_remove = objs_of_type[:-1]
for obj_to_remove in objs_to_remove:
obj_to_remove_name = obj_to_remove["name"]
obj_id_to_remove = next(
obj["objectId"]
for obj in controller.last_event.metadata["objects"]
if obj["name"] == obj_to_remove_name
)
controller.step("RemoveFromScene", objectId=obj_id_to_remove)
if not controller.last_event.metadata["lastActionSuccess"]:
return False
return True
| ai2thor-rearrangement-main | datagen/datagen_utils.py |
""" Modified from the official evaluation script for v1.0 of the ROPES dataset to add consistency metric"""
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix((remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if str(qa['id']) not in predictions:
message = 'Unanswered question ' + str(qa['id']) + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = exact_match / total
f1 = f1 / total
return {'global_em': exact_match, 'global_f1': f1}
def evaluate_contrast(original_dataset, original_predictions, contrast_dataset, contrast_predictions):
original_f1 = contrast_f1 = original_exact_match = contrast_exact_match = total = consistency = 0
for original_article, contrast_article in zip(original_dataset, contrast_dataset):
for original_paragraph, contrast_paragraph in zip(original_article['paragraphs'], contrast_article['paragraphs']):
for original_qa, contrast_qa in zip(original_paragraph['qas'], contrast_paragraph['qas']):
total += 1
if str(original_qa['id']) not in original_predictions:
message = 'Unanswered question ' + str(qa['id']) + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
original_ground_truths = list(map(lambda x: x['text'], original_qa['answers']))
original_prediction = original_predictions[original_qa['id']]
contrast_ground_truths = list(map(lambda x: x['text'], contrast_qa['answers']))
contrast_prediction = contrast_predictions[contrast_qa['id']]
original_exact_match += metric_max_over_ground_truths(
exact_match_score, original_prediction, original_ground_truths)
contrast_exact_match += metric_max_over_ground_truths(
exact_match_score, contrast_prediction, contrast_ground_truths)
original_f1 += metric_max_over_ground_truths(
f1_score, original_prediction, original_ground_truths)
contrast_f1 += metric_max_over_ground_truths(
f1_score, contrast_prediction, contrast_ground_truths)
consistency += metric_max_over_ground_truths(
exact_match_score, original_prediction, original_ground_truths) and metric_max_over_ground_truths(
exact_match_score, contrast_prediction, contrast_ground_truths)
original_exact_match = original_exact_match / total
original_f1 = original_f1 / total
contrast_exact_match = contrast_exact_match / total
contrast_f1 = contrast_f1 / total
consistency = consistency / total
return {'original_exact_match': original_exact_match,
'original_f1': original_f1,
'contrast_exact_match': contrast_exact_match,
'contrast_f1': contrast_f1,
'consistency': consistency}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluation for ROPES contrast set')
parser.add_argument('--original_dataset_path', help='Dataset path')
parser.add_argument('--original_prediction_path', help='Prediction path')
parser.add_argument('--contrast_dataset_path', help='Dataset path')
parser.add_argument('--contrast_prediction_path', help='Prediction path')
parser.add_argument("--output_path", help='Metrics path')
args = parser.parse_args()
with open(args.original_dataset_path) as original_dataset_path:
original_dataset_json = json.load(original_dataset_path)
original_dataset = original_dataset_json['data']
with open(args.original_prediction_path) as original_prediction_path:
original_predictions = json.load(original_prediction_path)
with open(args.contrast_dataset_path) as contrast_dataset_path:
contrast_dataset_json = json.load(contrast_dataset_path)
contrast_dataset = contrast_dataset_json['data']
with open(args.contrast_prediction_path) as contrast_prediction_path:
contrast_predictions = json.load(contrast_prediction_path)
metrics = evaluate_contrast(original_dataset, original_predictions, contrast_dataset, contrast_predictions)
with open(args.output_path, "w", encoding="utf8") as outpath:
json.dump(metrics, outpath)
| contrast-sets-main | ropes/evaluate_contrast_set.py |
import argparse
import csv
import json
import os
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from perspectrum_model import PerspectrumTransformerModel
def evaluate(model_dir, data_path, result_path, cuda=False, **kwargs):
result = _evaluate_stance(model_dir, data_path, cuda)
for key, val in result.items():
print("{} = {}".format(key, val))
result_dir = os.path.dirname(result_path)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
with open(result_path, 'w') as fout:
json.dump(result, fout)
print("Results written to {}.".format(result_path))
def _evaluate_stance(model_dir, data_path, cuda):
model = PerspectrumTransformerModel('roberta', model_dir, cuda=cuda)
instances = _load_instances(data_path)
# Skip instances where perspectives are not relevant to the claim at all
relevant_instances = [ins for ins in instances if ins["original_stance_label"] != 'unk']
print("Evaluating on {} examples...".format(len(relevant_instances)))
original_claim_persp_pairs = [(ins['original_claim'], ins['perspective']) for ins in relevant_instances]
contrast_claim_persp_pairs = [(ins['contrast_claim'], ins['perspective']) for ins in relevant_instances]
original_logits = model.predict_batch(original_claim_persp_pairs)
contrast_logits = model.predict_batch(contrast_claim_persp_pairs)
# We trained the model with {0, 1} labels, so we use 0.5 as logits cutoff
original_pred = [1 if logit > 0.5 else 0 for logit in original_logits]
contrast_pred = [1 if logit > 0.5 else 0 for logit in contrast_logits]
original_gold = [1 if ins['original_stance_label'] == 'pos' else 0 for ins in relevant_instances]
contrast_gold = [1 if ins['contrast_stance_label'] == 'pos' else 0 for ins in relevant_instances]
original_acc = accuracy_score(original_gold, original_pred)
contrast_acc = accuracy_score(contrast_gold, contrast_pred)
original_correct = np.equal(np.array(original_gold), np.array(original_pred))
contrast_correct = np.equal(np.array(contrast_gold), np.array(contrast_pred))
consistency_array = np.bitwise_and(original_correct, contrast_correct).astype(int)
consistency = np.sum(consistency_array) / len(consistency_array)
return {
'original_f1': original_acc,
'contrast_f1': contrast_acc,
'consistency': consistency,
'set_size': len(relevant_instances)
}
def _load_instances(data_path):
print("Loading examples from {}...".format(data_path))
instances = []
with open(data_path) as fin:
reader = csv.DictReader(fin)
for row in reader:
instances.append(row)
return instances
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", action='store_true',
help="Whether to use gpu for this")
parser.add_argument("--task", default="stance", type=str,
help="task to evaluate on, either \'stance\' or \'relevance\'. ")
parser.add_argument("--model_dir", default="model/stance_roberta", type=str,
help="directory containing pretrained model weights.")
parser.add_argument("--data_path", default="perspectrum_contrast_sets.csv", type=str,
help="path to perspectrum_minimal_pairs.csv")
parser.add_argument("--result_path", default="log/result.json", type=str,
help="path to log/write results to.")
args = parser.parse_args()
evaluate(**vars(args))
| contrast-sets-main | perspectrum/run_evaluation.py |
import torch
from typing import List
from transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
InputExample)
from transformers import glue_convert_examples_to_features as convert_examples_to_features
class PerspectrumTransformerModel:
def __init__(self, model_type, model_path, model_name="roberta-large", cuda=True, **kwargs):
"""
Load pretrained model
"""
self.device = "cuda" if cuda else "cpu"
if model_type == "roberta":
self.model_type = "roberta"
self.tokenizer = RobertaTokenizer.from_pretrained(model_name)
self.model = RobertaForSequenceClassification.from_pretrained(model_path)
else:
self.model_type = "bert"
self.tokenizer = BertTokenizer.from_pretrained(model_name)
self.model = BertForSequenceClassification.from_pretrained(model_path)
self.model.to(self.device)
def predict_batch(self, sent_pairs, label_set=(0, 1), max_sequnce_length=128, batch_size=20) -> List:
"""
Run prediction
:param sent_pairs: a list of sentence pairs to predict
:param label_set: set of labels
:param max_sequnce_length
:param batch_size
:return: a list of
"""
predictions = []
for i in range(0, len(sent_pairs), batch_size):
examples = []
sent_pair_batch = sent_pairs[i:i+batch_size]
for sent_pair in sent_pair_batch:
examples.append(
InputExample(guid="dummy", text_a=sent_pair[0], text_b=sent_pair[1], label=label_set[0]))
features = convert_examples_to_features(examples,
self.tokenizer,
label_list=label_set,
max_length=max_sequnce_length,
output_mode="classification",
)
batch_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
batch_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
batch_labels = torch.tensor([f.label for f in features], dtype=torch.long)
batch_input_ids = batch_input_ids.to(self.device)
batch_attention_mask = batch_attention_mask.to(self.device)
batch_labels = batch_labels.to(self.device)
if self.model_type == "bert":
batch_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
batch_token_type_ids = batch_token_type_ids.to(self.device)
else:
batch_token_type_ids = None
inputs = {
"input_ids": batch_input_ids,
"attention_mask": batch_attention_mask,
"labels": batch_labels,
"token_type_ids": batch_token_type_ids
}
with torch.no_grad():
output = self.model(**inputs)
tmp_eval_loss, logits = output[:2]
logits = logits.detach().cpu().numpy()
predictions.extend(logits[:, 1])
return predictions
if __name__ == '__main__':
import sys
model = PerspectrumTransformerModel("roberta", sys.argv[1], cuda=True)
print(model.predict([("123", "123"), ("asd", "asd")])) | contrast-sets-main | perspectrum/perspectrum_model.py |
from typing import List, Dict, Tuple
from collections import defaultdict
import json
import argparse
"""Script to measure consistency among MTMSN predictions."""
def read_json(input_json):
with open(input_json, "r") as f:
json_data = json.load(f)
return json_data
def make_qid2f1_map(predictions_json):
"""Make a map from {query_id: F1} from MTMSN prediction json.
The json is query_id: List[prediction_dict]; The complete list forms the predicted answer where each element is a
span that is predicted as answer. *DROP allows multiple span answers*
Each prediction_dict contains the key "f1" and "em" which is the score for the complete example; for multi-span
answers, the f1 and em in each dict is the same and is computed considering the full prediction. It is copied
to each dict individually for convenience.
"""
qid2f1, qid2em = defaultdict(float), defaultdict(float)
for qid, pred_list in predictions_json.items():
f1 = pred_list[0]["f1"]
em = pred_list[0]["em"]
qid2f1[qid] = f1
qid2em[qid] = em
return qid2f1, qid2em
def original_qids(minimal_pairs_preds, full_data_preds):
"""Generate dict from original query_id to its perturbed ids for which example we perturbed.
The perturbed query_id is original_query_id appended with _NUM where NUM=1,2,...
"""
origqid2perturbedids = defaultdict(list)
for qid in minimal_pairs_preds:
orig_id = qid.split("_")[0]
assert orig_id in full_data_preds, f"Orig id: {orig_id} does not exist in full data predictions"
origqid2perturbedids[orig_id].append(qid)
return origqid2perturbedids
def compute_consistency_score(origqid2perturbedids,
fulldata_qid2f1, fulldata_qid2em, minimal_pairs_qid2f1, minimal_pairs_qid2em,
f1_consistency: bool = False, f1thresh: float = None):
"""Compute consistency score for the given model predictions.
Consistency for an example is 1.0 iff the prediction on the original question and all its perturbations is correct.
Args:
f1_consistency: `bool`
If True, a prediction is judged correct if the F1 is above a predefined threshold, else EM=1.0 is used
f1thresh: `float`
If f1_consistency is True, then this threshold is used to judge if a prediction is correct.
"""
if f1_consistency:
print("--- Computing consistency based on F1 threshold of {} ---".format(f1thresh))
assert f1thresh is not None, "F1-threshold is not provided"
else:
print("--- Computing consistency based on EM of 1.0 ---")
def is_prediction_correct(f1, em):
if f1_consistency:
return f1 >= f1thresh
else:
return em == 1.0
origqid2consistency = defaultdict(float)
num_orig_ques = 0
num_partially_correct = 0
num_all_incorrect = 0
num_all_correct = 0
for orig_id, perturbed_ids in origqid2perturbedids.items():
orig_correct = is_prediction_correct(fulldata_qid2f1[orig_id], fulldata_qid2em[orig_id])
perturbations_correct = [is_prediction_correct(minimal_pairs_qid2f1[qid], minimal_pairs_qid2em[qid])
for qid in perturbed_ids]
perturbations_correct.append(orig_correct)
consistency = float(all(perturbations_correct))
origqid2consistency[orig_id] = consistency
num_orig_ques += 1
num_all_correct += float(all(perturbations_correct))
num_all_incorrect += float(not any(perturbations_correct))
num_partially_correct += float(any(perturbations_correct) and not all(perturbations_correct))
avg_consistency = (num_all_correct/num_orig_ques) * 100.0
avg_partial_correct = (num_partially_correct/num_orig_ques) * 100.0
avg_all_incorrect= (num_all_incorrect/num_orig_ques) * 100.0
print("Perc examples w/ all in-correct: {}".format(avg_all_incorrect))
print("Perc examples w/ partially correct: {}".format(avg_partial_correct))
print("Avg Consistency : {}".format(avg_consistency))
return avg_consistency
def avg_f1(qid2f1):
total_f1 = sum(qid2f1.values())
avg_f1 = total_f1/len(qid2f1)
return avg_f1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--full_data_preds", type=str, default="predictions/drop_dataset_test_preds.json")
parser.add_argument("--minimal_pairs_preds", type=str, default="predictions/minimal_pairs_test_preds.json")
args = parser.parse_args()
full_data_preds = read_json(args.full_data_preds)
minimal_pairs_preds = read_json(args.minimal_pairs_preds)
fulldata_qid2f1, fulldata_qid2em = make_qid2f1_map(full_data_preds)
minimal_pairs_qid2f1, minimal_pairs_qid2em = make_qid2f1_map(minimal_pairs_preds)
origqid2perturbedids = original_qids(minimal_pairs_preds, full_data_preds)
origques_qid2f1 = {qid: fulldata_qid2f1[qid] for qid in origqid2perturbedids}
origques_qid2em = {qid: fulldata_qid2em[qid] for qid in origqid2perturbedids}
print()
print(f"Size of full data: {len(fulldata_qid2f1)}. Avg F1: {avg_f1(fulldata_qid2f1)}")
print(f"Size of questions that were perturbed: {len(origques_qid2f1)}. Avg F1: {avg_f1(origques_qid2f1)}")
print(f"Size of perturbed questions: {len(minimal_pairs_qid2f1)}. Avg F1: {avg_f1(minimal_pairs_qid2f1)}")
print()
avg_consistency_f1 = compute_consistency_score(origqid2perturbedids, fulldata_qid2f1, fulldata_qid2em,
minimal_pairs_qid2f1, minimal_pairs_qid2em, f1_consistency=True,
f1thresh=0.8)
avg_consistency_em = compute_consistency_score(origqid2perturbedids, fulldata_qid2f1, fulldata_qid2em,
minimal_pairs_qid2f1, minimal_pairs_qid2em, f1_consistency=False) | contrast-sets-main | DROP/consistency.py |
import json
import sys
import hashlib
from collections import defaultdict
import argparse
def merge_data(args):
all_data = defaultdict(lambda: defaultdict(lambda: {'qas': []})) # {(title, url) -> {context_id -> {}}}
for filename in args.files_to_merge:
file_data = json.load(open(filename))["data"]
for article_info in file_data:
title = article_info["title"]
url = article_info["url"]
for paragraph_info in article_info["paragraphs"]:
context_id = paragraph_info['context_id']
context = paragraph_info['context']
paragraph_has_perturbations = False
perturbed_qa_info = []
for qa_info in paragraph_info["qas"]:
if "original_id" in qa_info:
paragraph_has_perturbations = True
elif "_" in qa_info['id']:
# This was Dheeru's perturbation. The original id is the string before the underscore.
qa_info["original_id"] = qa_info['id'].split('_')[0]
paragraph_has_perturbations = True
else:
continue
# Some of the perturbations were done manually. So recomputing id just to be sure.
updated_id = hashlib.sha1(
f"{paragraph_info['context_id']} {qa_info['question']}".encode()).hexdigest()
# Also recomputing answer starts
for answer_info in qa_info['answers']:
try:
answer_info['answer_start'] = context.index(answer_info['text'])
except ValueError as error:
print("Could not find answer!")
print(f"Context was {context}")
print(f"Answer was {answer_info['text']}")
raise error
qa_info['id'] = updated_id
perturbed_qa_info.append(qa_info)
if paragraph_has_perturbations:
perturbed_paragraph_info = all_data[(title, url)][context_id]
perturbed_paragraph_info['context'] = context
perturbed_paragraph_info['context_id'] = context_id
for qa_info in perturbed_qa_info:
perturbed_paragraph_info['qas'].append(qa_info)
perturbed_data = {"data": []}
for (title, url), paragraphs_info in all_data.items():
article_info = {"title": title,
"url": url,
"paragraphs": list(paragraphs_info.values())}
perturbed_data['data'].append(article_info)
return perturbed_data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--output-file', type=str, help='Location of output file', required=True, dest='output_file')
parser.add_argument('--files-to-merge', type=str, help='All individual pertrubation outputs', required=True,
dest='files_to_merge', nargs='+')
args = parser.parse_args()
perturbed_data = merge_data(args)
with open(args.output_file, "w") as out_ptr:
json.dump(perturbed_data, out_ptr, indent=2)
| contrast-sets-main | quoref/merge_perturbed_files.py |
import re
import json
import random
import hashlib
import argparse
import datetime
from collections import defaultdict
def get_answers(context):
print("Enter answer spans below. You can copy text from the context and paste here.")
print("Hit enter if you are done inputting all answer spans")
new_answers = []
current_span = None
span_index = 0
while current_span != '':
current_span = input(f"Span {span_index + 1}: ")
if current_span != '':
# Important note: This will only find the first index of the answer.
try:
answer_start = context.index(current_span)
new_answers.append({"text": current_span,
"answer_start": answer_start})
except ValueError:
print("Could not find answer span in the context! Please try again.")
continue
span_index += 1
return new_answers
def get_new_passage(context):
while True:
string_to_replace = input("Enter a unique string from the passage that you want to change: ")
num_occurrences = len(re.findall(string_to_replace, context))
if num_occurrences == 0:
print("The string you entered does not occur in the passage. Please try again!")
elif num_occurrences > 1:
print("The string you entered is not unique. Please try again!")
else:
replacement = input("Enter a replacement: ")
new_context = context.replace(string_to_replace, replacement)
return new_context
def get_perturbed_info_for_article(datum):
"""
Given paragraphs and question-answer pairs for an article, we can either get perturbed question-answer pairs,
or perturbed contexts with new question-answer pairs. This method returns two lists for the two kinds of
perturbations.
"""
new_qas = []
new_paragraphs = []
end_session = False
num_new_instances = 0
for paragraph_index, paragraph_info in enumerate(datum["paragraphs"]):
if end_session:
break
question_ids = {qa_info["id"] for qa_info in paragraph_info["qas"]}
print("\nContext:")
context = paragraph_info["context"]
context_id = paragraph_info["context_id"]
print(context)
qa_indices = list(range(len(paragraph_info["qas"])))
random.shuffle(qa_indices)
for qa_index in qa_indices:
qa_info = paragraph_info["qas"][qa_index]
if "original_id" in qa_info:
# This is a perturbed instance. Let's not perturb it further.
continue
original_id = qa_info["id"]
question = qa_info['question']
print(f"\nNEW QUESTION FOR THE ABOVE CONTEXT: {question}")
print(f"Answers: {[a['text'] for a in qa_info['answers']]}")
print("You can modify the question or the passage, skip to the next question, or exit.")
response = input("Type a new question, hit enter to skip, type 'p' to edit passsage or 'exit' to end session: ")
while len(response) > 0 and response.lower() != 'exit':
if len(response) > 1:
perturbed_question = response.strip()
new_id = hashlib.sha1(f"{context} {perturbed_question}".encode()).hexdigest()
if new_id not in question_ids:
new_answers = get_answers(context)
if new_answers:
new_qa_info = {"question": perturbed_question,
"id": new_id,
"answers": new_answers,
"original_id": original_id}
new_qas.append((paragraph_index, new_qa_info))
num_new_instances += 1
else:
print("This question exists in the dataset! Please try again.\n")
elif response.lower() == "p":
perturbed_context = get_new_passage(context)
new_context_id = hashlib.sha1(perturbed_context.encode()).hexdigest()
print(f"New context: {perturbed_context}\n")
new_id = hashlib.sha1(f"{perturbed_context} {question}".encode()).hexdigest()
new_answers = get_answers(perturbed_context)
if new_answers:
new_qa_info = {"question": question,
"id": new_id,
"answers": new_answers,
"original_id": original_id}
new_paragraph_info = {"context": perturbed_context,
"qas": [new_qa_info],
"context_id": new_context_id,
"original_context_id": context_id}
new_paragraphs.append(new_paragraph_info)
num_new_instances += 1
else:
print(f"Unrecognized input: {response}")
print(f"\nSTILL MODIFYING THE SAME QUESTION: {question}")
print(f"Answers: {[a['text'] for a in qa_info['answers']]}")
print("You can modify the question or the passage, move on to the next question, or exit.")
response = input("Type a new question, hit enter to move on, type 'p' to edit passsage or 'exit' to end session: ")
if response.lower() == 'exit':
end_session = True
break
return new_qas, new_paragraphs, end_session, num_new_instances
def add_perturbations(data):
"""
Takes a dataset, queries user for perturbations, and adds them to the original dataset.
"""
data_indices = list(range(len(data["data"])))
random.shuffle(data_indices)
num_new_instances = 0
for datum_index in data_indices:
datum = data["data"][datum_index]
new_qa_info, new_paragraphs, end_session, num_instances = get_perturbed_info_for_article(datum)
num_new_instances += num_instances
for paragraph_index, qa_info in new_qa_info:
datum["paragraphs"][paragraph_index]["qas"].append(qa_info)
for paragraph_info in new_paragraphs:
datum["paragraphs"].append(paragraph_info)
if end_session:
print(f"\nEnding session. You generated {num_new_instances} new instance(s). Thank you!")
break
def get_perturbations(data):
"""
Takes a dataset, queries user for perturbations, and returns a smaller dataset with just the perturbations.
"""
perturbed_data = {"data": []}
data_indices = list(range(len(data["data"])))
random.shuffle(data_indices)
num_new_instances = 0
for datum_index in data_indices:
datum = data["data"][datum_index]
new_qa_info, new_paragraphs, end_session, num_instances = get_perturbed_info_for_article(datum)
num_new_instances += num_instances
if new_qa_info or new_paragraphs:
paragraphs_info = defaultdict(lambda: {'qas': []})
for paragraph_index, qa_info in new_qa_info:
original_paragraph_info = datum['paragraphs'][paragraph_index]
context_id = original_paragraph_info['context_id']
paragraphs_info[context_id]['context'] = original_paragraph_info['context']
paragraphs_info[context_id]['context_id'] = original_paragraph_info['context_id']
paragraphs_info[context_id]['qas'].append(qa_info)
for paragraph_info in new_paragraphs:
paragraphs_info[paragraph_info['context_id']] = paragraph_info
perturbed_data['data'].append({'title': datum['title'],
'url': datum['url'],
'paragraphs': list(paragraphs_info.values())})
if end_session:
print(f"\nEnding session. You generated {num_new_instances} new instance(s). Thank you!")
break
return perturbed_data
def main(args):
input_filename = args.input
data = json.load(open(input_filename))
if args.output_perturbations_only:
perturbed_data = get_perturbations(data)
else:
add_perturbations(data)
perturbed_data = data
filename_prefix = input_filename.split("/")[-1].split(".")[0]
# Removing previous timestamp if any
filename_prefix = re.sub('_2019[0-9]*$', '', filename_prefix)
output_name_suffix = ''
if '_perturbed' not in filename_prefix:
output_name_suffix = '_perturbed'
timestamp = re.sub('[^0-9]', '', str(datetime.datetime.now()).split('.')[0])
# Will be written in current directory
output_filename = f"{filename_prefix}{output_name_suffix}_{timestamp}.json"
json.dump(perturbed_data, open(output_filename, "w"), indent=2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str, help='''Location of input file. If you want to continue your work from a
previous session, provide the output from that session as the input here. If
'--output-perturbations-only' is not set, the resulting dataset from this session will contain a
union of your perturbations from both sessions.''')
parser.add_argument('--output-perturbations-only', dest='output_perturbations_only', action='store_true',
help='''If this flag is set, the output will not contain instances from the input file.
It will only have perturbations, but they will still have references to the original instances.''')
args = parser.parse_args()
main(args)
| contrast-sets-main | quoref/interface.py |
"""
This evaluation script modifies code for the official Quoref evaluator (``allennlp/tools/quoref_eval.py``) to deal
with evaluating on contrast sets.
"""
import json
from typing import Dict, Tuple, List, Any, Set
import argparse
from collections import defaultdict
import numpy as np
from allennlp.tools import drop_eval
CONSISTENCY_F1_THRESHOLD = 0.8
def _get_contrast_sets(perturbed_gold_annotations: Dict[str, Any]) -> List[Set[str]]:
grouped_instance_ids = defaultdict(set)
for article_info in perturbed_gold_annotations["data"]:
for paragraph_info in article_info["paragraphs"]:
for qa_pair in paragraph_info["qas"]:
query_id = qa_pair["id"]
original_query_id = qa_pair["original_id"]
grouped_instance_ids[original_query_id].add(original_query_id)
grouped_instance_ids[original_query_id].add(query_id)
return list(grouped_instance_ids.values())
def _get_questions_and_answers_from_data(annotations: Dict[str, Any]) -> Dict[str, List[str]]:
"""
If the annotations file is in the same format as the original data files, this method can be used to extract a
dict of query ids and answers.
"""
answers_dict: Dict[str, List[str]] = {}
questions_dict: Dict[str, str] = {}
for article_info in annotations["data"]:
for paragraph_info in article_info["paragraphs"]:
for qa_pair in paragraph_info["qas"]:
query_id = qa_pair["id"]
candidate_answers = [answer["text"] for answer in qa_pair["answers"]]
answers_dict[query_id] = candidate_answers
questions_dict[query_id] = qa_pair["question"]
return answers_dict, questions_dict
def get_instance_metrics(annotations: Dict[str, Any],
predicted_answers: Dict[str, Any]) -> Dict[str, Tuple[float, float]]:
"""
Takes gold annotations and predicted answers and evaluates the predictions for each question
in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to
match predictions to gold annotations.
The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a
list of strings (or just one string) that is the answer.
The ``annotations`` are assumed to have either the format of the dev set in the Quoref data release, or the
same format as the predicted answers file.
"""
instance_metrics: Dict[str, Tuple[float, float]] = {}
if "data" in annotations:
# We're looking at annotations in the original data format. Let's extract the answers.
annotated_answers, questions_dict = _get_questions_and_answers_from_data(annotations)
else:
questions_dict = None
annotated_answers = annotations
for query_id, candidate_answers in annotated_answers.items():
max_em_score = 0.0
max_f1_score = 0.0
if query_id in predicted_answers:
predicted = predicted_answers[query_id]
gold_answer = tuple(candidate_answers)
em_score, f1_score = drop_eval.get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
else:
print("Missing prediction for question: {}".format(query_id))
max_em_score = 0.0
max_f1_score = 0.0
instance_metrics[query_id] = max_em_score, max_f1_score
return instance_metrics, questions_dict
def evaluate_contrast_sets(original_prediction_path: str,
original_gold_path: str,
perturbed_prediction_path: str,
perturbed_gold_path: str,
verbose: bool = False) -> None:
"""
Takes a prediction files and gold files of original and perturbed sets, evaluates the predictions in both
files, and computes individual metrics and consistency over contrast sets. All
files must be json formatted and must have query_id keys, which are used to match predictions to gold
annotations. Writes metrics to standard output.
"""
# pylint: disable=too-many-locals,too-many-statements
original_predicted_answers = json.load(open(original_prediction_path, encoding="utf-8"))
original_annotations = json.load(open(original_gold_path, encoding="utf-8"))
perturbed_predicted_answers = json.load(open(perturbed_prediction_path, encoding="utf-8"))
perturbed_annotations = json.load(open(perturbed_gold_path, encoding="utf-8"))
original_instance_metrics, original_questions = get_instance_metrics(original_annotations,
original_predicted_answers)
perturbed_instance_metrics, perturbed_questions = get_instance_metrics(perturbed_annotations,
perturbed_predicted_answers)
original_em_scores = [x[0] for x in original_instance_metrics.values()]
original_f1_scores = [x[1] for x in original_instance_metrics.values()]
global_original_em = np.mean(original_em_scores)
global_original_f1 = np.mean(original_f1_scores)
perturbed_em_scores = [x[0] for x in perturbed_instance_metrics.values()]
perturbed_f1_scores = [x[1] for x in perturbed_instance_metrics.values()]
global_perturbed_em = np.mean(perturbed_em_scores)
global_perturbed_f1 = np.mean(perturbed_f1_scores)
global_combined_em = np.mean(original_em_scores + perturbed_em_scores)
global_combined_f1 = np.mean(original_f1_scores + perturbed_f1_scores)
print("\nMetrics on original dataset")
print("Exact-match accuracy {0:.2f}".format(global_original_em * 100))
print("F1 score {0:.2f}".format(global_original_f1 * 100))
print("\nMetrics on perturbed dataset")
print("Exact-match accuracy {0:.2f}".format(global_perturbed_em * 100))
print("F1 score {0:.2f}".format(global_perturbed_f1 * 100))
print("\nMetrics on combined dataset")
print("Exact-match accuracy {0:.2f}".format(global_combined_em * 100))
print("F1 score {0:.2f}".format(global_combined_f1 * 100))
contrast_sets = _get_contrast_sets(perturbed_annotations)
set_sizes = [len(set_) for set_ in contrast_sets]
mean_size = np.mean(set_sizes)
std_sizes = np.std(set_sizes)
all_instance_metrics = {key: value for key, value in list(original_instance_metrics.items()) +
list(perturbed_instance_metrics.items())}
consistency_scores = []
if original_questions is not None and perturbed_questions is not None:
all_questions = {key: (value, "original") for key, value in original_questions.items()}
all_questions.update({key: (value, "perturbed") for key, value in perturbed_questions.items()})
elif verbose:
print("Warning: verbose flag is set, but original data does not contain questions! Ignoring the flag.")
verbose = False
num_changed_questions = 0
for set_ in contrast_sets:
consistency = 1.0 if all([all_instance_metrics[query_id][1] > CONSISTENCY_F1_THRESHOLD
for query_id in set_]) else 0.0
consistency_scores.append(consistency)
perturbed_set_questions = []
if original_questions is not None:
for query_id in set_:
question_text, question_type = all_questions[query_id]
if question_type == 'original':
original_set_question = question_text
else:
perturbed_set_questions.append(question_text)
num_changed_questions += sum([text != original_set_question for text in perturbed_set_questions])
if verbose:
print("===================")
for query_id in set_:
print(f"Question: {all_questions[query_id]}")
print(f"Metrics: {all_instance_metrics[query_id]}")
print(f"Consistency: {consistency}")
global_consistency = np.mean(consistency_scores)
percent_changed_questions = num_changed_questions / len(perturbed_questions) * 100
print("\nMetrics on contrast sets:")
print(f"Number of contrast sets: {len(contrast_sets)}")
print(f"Max contrast set size: {max(set_sizes)}")
print(f"Mean set size: {mean_size} (+/- {std_sizes})")
print(f"Number of questions changed: {num_changed_questions} ({percent_changed_questions}%)")
print("Consistency: {0:.2f}".format(global_consistency * 100))
if __name__ == "__main__":
# pylint: disable=invalid-name
parser = argparse.ArgumentParser(description="Evaluate Quoref predictions given contrast sets")
parser.add_argument(
"--original_gold_path",
type=str,
required=True,
default="quoref-test-v0.1.json",
help="location of the original test set with answers",
)
parser.add_argument(
"--original_prediction_path",
type=str,
required=True,
help="location of the file with predictions over the original test set",
)
parser.add_argument(
"--perturbed_gold_path",
type=str,
required=True,
help="location of the perturbed test set with answers",
)
parser.add_argument(
"--perturbed_prediction_path",
type=str,
required=True,
help="location of the file with predictions over the perturbed test set",
)
parser.add_argument(
"--verbose",
action='store_true',
help="will show details of instances if set",
)
args = parser.parse_args()
evaluate_contrast_sets(args.original_prediction_path,
args.original_gold_path,
args.perturbed_prediction_path,
args.perturbed_gold_path,
args.verbose)
| contrast-sets-main | quoref/compute_metrics.py |
import argparse
import json
from collections import defaultdict
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluation for NLVR2 contrast set')
parser.add_argument('--prediction-path', help='Prediction path')
args = parser.parse_args()
# prediction file is expected to be a text file with separate line per prediction,
# with identifier and prediction (as 0/1) separated by a comma.
# e.g.
# test1-769-1-0-1,1
# test1-769-1-0-2,0
# test1-256-2-0-1,0
# test1-256-2-0-2,1
lines = list(open(args.prediction_path, 'rt'))
predictions = [line.split(',') for line in lines]
pred_per_identifier = {identifier: pred.strip() for identifier, pred in predictions}
n_total = 0
n_correct = 0
correct_per_group = defaultdict(list)
for line in open('contrast_set_nlvr2.jsonl', 'rt'):
ex = json.loads(line)
identifier = ex['identifier']
group = ex['identifier'][:-2]
gold_label = ex['label']
correct = False
n_total += 1
if identifier in pred_per_identifier:
pred = bool(pred_per_identifier[identifier])
correct = pred == gold_label
if correct:
n_correct += 1
else:
# prediction not found
pass
correct_per_group[group].append(correct)
acc = n_correct / n_total
consistency = sum([all(g) for g in correct_per_group.values()]) / len(correct_per_group)
print(f"Accuracy: {acc}")
print(f"Consistency: {consistency}")
| contrast-sets-main | nlvr2/eval.py |
import sys
import conllu
import json
from collections import defaultdict
import IPython as ipy
def read_data(filename):
data = open(filename).read()
texts = [t for t in data.split("\n\n") if t.strip() != ""]
trees = []
for text in texts:
trees += conllu.parse(text)
return trees
def count_attachments(trees):
labels = defaultdict(int)
head_pos = defaultdict(int)
total = 0
for tree in trees:
for token in tree:
if token['upostag'] != 'ADP':
continue
total += 1
head = tree[int(token['head'])-1]
grand_head = tree[int(head['head'])-1]
labels[head['deprel']] += 1
head_pos[grand_head['upostag']] += 1
print(f"total: {total}")
return labels, head_pos
def compare_attachments(orig, edit):
attachments = defaultdict(int)
alterations = {}
total = 0
for i,otree in enumerate(orig):
total += 1
print(" ".join([f['form'] for f in otree]))
etree = edit[i]
print(" ".join([f['form'] for f in etree]))
alt = False
alt_adp = False
for j,otoken in enumerate(otree):
etoken = etree[j]
if etoken['form'] != otoken['form']:
if not alt:
alt = True
print("First altered token: {}-->{}".format(otoken['form'],etoken['form']))
if otoken['upostag'] != 'ADP':
continue
ohead = otree[int(otoken['head'])-1]
ehead = etree[int(etoken['head'])-1]
ograndhead = otree[int(ohead['head'])-1]
egrandhead = etree[int(ehead['head'])-1]
if ohead['form'] == ehead['form'] and ograndhead['form'] == egrandhead['form']:
continue
else:
print("First altered adposition: {0} {2}-->{1} {2}".format(ograndhead['form'],egrandhead['form'],otoken['form']))
alterations[i] = (j, ograndhead['id']-1, egrandhead['id']-1)
attachments[(ograndhead['upostag'],egrandhead['upostag'])] += 1
break
print(f"Total altered adpositions: {total}")
return alterations,attachments
def compare_all_attachments(trees):
alterations = defaultdict(list)
for i,tree in enumerate(trees):
print(" ".join([f['form'] for f in tree]))
for j,token in enumerate(tree):
if token['upostag'] != 'ADP':
continue
head = tree[int(token['head'])-1]
grandhead = tree[int(head['head'])-1]
alterations[i].append((j, grandhead['id']-1))
return alterations
if __name__=="__main__":
trees_orig = read_data(sys.argv[1])
trees_edit = read_data(sys.argv[2])
alter_file = sys.argv[3]
label_distribution_orig, pos_distribution_orig = count_attachments(trees_orig)
print("Original Stats:")
print(", ".join([f"{key}:{value}" for key,value in sorted(label_distribution_orig.items())]))
print(", ".join([f"{key}:{value}" for key,value in sorted(pos_distribution_orig.items())]))
label_distribution_edit, pos_distribution_edit = count_attachments(trees_edit)
print("Altered Stats:")
print(", ".join([f"{key}:{value}" for key,value in sorted(label_distribution_edit.items())]))
print(", ".join([f"{key}:{value}" for key,value in sorted(pos_distribution_edit.items())]))
alterations,attachments = compare_attachments(trees_orig,trees_edit)
print(alterations)
print("")
print("\n".join([f"{key}:{value}" for key,value in sorted(attachments.items())]))
with open("alter_dict.json",'w') as f:
f.write(json.dumps(alterations))
orig_full_gold = compare_all_attachments(trees_orig)
with open("orig_gold_dict.json",'w') as f:
f.write(json.dumps(orig_full_gold))
edit_full_gold = compare_all_attachments(trees_edit)
with open("edit_gold_dict.json",'w') as f:
f.write(json.dumps(edit_full_gold))
| contrast-sets-main | UD_English/stats.py |
import sys
import json
from collections import defaultdict
import IPython as ipy
def eval_target_predictions(predict_originals_file, predict_altered_file, gold_file):
with open(gold_file,'r') as f:
true_attachments = json.loads(f.read())
predictions_orig = []
with open(predict_originals_file,'r') as f:
for line in f:
predictions_orig.append(json.loads(line))
predictions_alt = []
with open(predict_altered_file,'r') as f:
for line in f:
predictions_alt.append(json.loads(line))
comparison = defaultdict(list)
for variant in [predictions_orig, predictions_alt]:
for i,prediction in enumerate(variant):
gold = true_attachments[str(i)]
target_adp = prediction['words'][gold[0]]
pred_head_idx = prediction['predicted_heads'][gold[0]]
pred_head = prediction['words'][pred_head_idx-1]
pred_grandhead_idx = prediction['predicted_heads'][pred_head_idx-1]
pred_grandhead = prediction['words'][pred_grandhead_idx-1]
if variant == predictions_orig:
# parser predictions are for the original version of the sentences
gold_grandhead = prediction['words'][gold[1]]
else:
# parser predictions are on the altered version of the sentences
gold_grandhead = prediction['words'][gold[2]]
comparison[i].append((pred_grandhead,
gold_grandhead,
pred_grandhead==gold_grandhead))
return comparison
def eval_all_predictions(predict_file, gold_file):
with open(gold_file,'r') as f:
true_attachments = json.loads(f.read())
predictions = []
with open(predict_file,'r') as f:
for line in f:
predictions.append(json.loads(line))
comparisons = defaultdict(list)
for i,prediction in enumerate(predictions):
gold_list = true_attachments[str(i)]
for gold_annotation in gold_list:
target_word = prediction['words'][gold_annotation[0]]
pred_head_idx = prediction['predicted_heads'][gold_annotation[0]]
pred_head = prediction['words'][pred_head_idx-1]
pred_grandhead_idx = prediction['predicted_heads'][pred_head_idx-1]
pred_grandhead = prediction['words'][pred_grandhead_idx-1]
gold_grandhead = prediction['words'][gold_annotation[1]]
comparisons[i].append((pred_grandhead,
gold_grandhead,
pred_grandhead==gold_grandhead))
return comparisons
if __name__=="__main__":
if len(sys.argv) > 3:
comparison = eval_target_predictions(sys.argv[1],sys.argv[2],sys.argv[3])
osum = sum([comparison[i][0][2] for i in comparison])
print("Correct attachments when predicting original sentences: {}/150".format(osum))
esum = sum([comparison[i][1][2] for i in comparison])
print("Correct attachments when predicting edited sentences: {}/150".format(esum))
consistency = sum([1 if (comparison[i][0][2] and comparison[i][1][2]) else 0 for i in comparison])
print("Correct attachments when predicting both versions of a sentence: {}/150".format(consistency))
else:
full_results = eval_all_predictions(sys.argv[1],sys.argv[2])
correct_count = 0
total_count = 0
for i in full_results:
for j in range(len(full_results[i])):
correct_count += full_results[i][j][2]
total_count += 1
print("Correct attachments for all words: {}/{} ({:.2f}%)".format(correct_count, total_count, 100*correct_count/total_count ))
ipy.embed()
| contrast-sets-main | UD_English/eval_json_predictions.py |
def helper(arr):
corr = 0
for a in arr:
if a == 1:
corr += 1
return 1.0*corr/len(arr)
if __name__ == "__main__":
output_labels = {"BEFORE": 0, "AFTER": 1, "EQUAL": 2, "VAGUE": 3}
with open('proposed_elmo_lr0.001.merged.output','r') as f:
content = f.readlines()
uid2result = {}
perturb2result = {"order":[],"tense":[],"indicator":[],"other":[]}
tmp_cnt = 0
for line in content:
line = line.strip()
tmp = line.split(",")
unit_id = tmp[0]
gold_label = output_labels[tmp[1]]
pred_label = int(tmp[2])
if unit_id not in uid2result:
uid2result[unit_id] = []
if pred_label == gold_label:
uid2result[unit_id].append(1)
else:
uid2result[unit_id].append(0)
if "order" in line.lower() \
or "tense" in line.lower()\
or "indicator" in line.lower() or "connective" in line.lower()\
or "timex" in line.lower() or "word" in line.lower() or "verb" in line.lower():
tmp_cnt += 1
if "order" in line.lower():
if pred_label == gold_label:
perturb2result["order"].append(1)
else:
perturb2result["order"].append(0)
if "tense" in line.lower():
if pred_label == gold_label:
perturb2result["tense"].append(1)
else:
perturb2result["tense"].append(0)
if "indicator" in line.lower() or "connective" in line.lower():
if pred_label == gold_label:
perturb2result["indicator"].append(1)
else:
perturb2result["indicator"].append(0)
if "timex" in line.lower() or "word" in line.lower() or "verb" in line.lower():
if pred_label == gold_label:
perturb2result["other"].append(1)
else:
perturb2result["other"].append(0)
corr_cnt = 0
cnt = 0
for uid, result in uid2result.items():
corr = True
for r in result:
if r == 0:
corr = False
break
if corr:
corr_cnt += 1
cnt += 1
print('Percentage of instance whose perturbations are all correctly predicted: %.2f%%' % (100.0*corr_cnt/cnt))
print('Accuracy of appearance order change (#instance=%d/%d=%.2f): %.2f%%' % (len(perturb2result['order']), tmp_cnt, 1.0*len(perturb2result['order'])/tmp_cnt, 100.0*helper(perturb2result['order'])))
print('Accuracy of tense change (#instance=%d/%d=%.2f): %.2f%%' % (len(perturb2result['tense']), tmp_cnt, 1.0*len(perturb2result['tense'])/tmp_cnt, 100.0*helper(perturb2result['tense'])))
print('Accuracy of indicator change (#instance=%d/%d=%.2f): %.2f%%' % (len(perturb2result['indicator']), tmp_cnt, 1.0*len(perturb2result['indicator'])/tmp_cnt, 100.0 * helper(perturb2result['indicator'])))
print('Accuracy of other changes (#instance=%d/%d=%.2f): %.2f%%' % (len(perturb2result['other']), tmp_cnt, 1.0*len(perturb2result['other'])/tmp_cnt, 100.0 * helper(perturb2result['other']))) | contrast-sets-main | MATRES/consistency_analysis.py |
import pandas as pd
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
lemmatizer = WordNetLemmatizer()
# function to convert nltk tag to wordnet tag
def nltk_tag_to_wordnet_tag(nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return None
def tokenlist2PosAndLemma(toklist):
try:
pos = nltk.pos_tag(toklist)
except:
print()
wordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), pos)
lemma = []
for word, tag in wordnet_tagged:
if tag is None:
# if there is no available tag, append the token as is
lemma.append(word)
else:
# else use the tag to lemmatize the token
lemma.append(lemmatizer.lemmatize(word, tag).lower())
assert len(pos)==len(lemma)==len(toklist)
return pos, lemma
def bodygraph2xml(bodygraph):
splitter = "///"
sentences_all = bodygraph.split("<p>")
sentences = []
xml_str = ''
# filter sentences out of context
for s in sentences_all:
if "<span" not in s:
continue
s = s.replace('</p>', '') \
.replace("<span style='color:red;'>", '') \
.replace("<span style='color:blue;'>", '') \
.replace('</span>', '')
sentences.append(s)
assert len(sentences)<3
if len(sentences) == 1:
target_sentid = 0
tokens = sentences[0].split()
event_tok_id = []
for i, tok in enumerate(tokens):
if "<strong>" in tok:
event_tok_id.append(i)
tokens[i] = tokens[i].replace("<strong>",'').replace("</strong>",'')
tokens[i] = tokens[i].strip()
tokens = [x for x in tokens if x]
assert len(event_tok_id)==2
pos, lemma = tokenlist2PosAndLemma(tokens)
for i, tok in enumerate(tokens):
xml_str += tok + splitter + lemma[i] + splitter + pos[i][1] + splitter
if i < event_tok_id[0]:
xml_str += "B"
elif i == event_tok_id[0]:
xml_str += "E1"
elif event_tok_id[0]<i<event_tok_id[1]:
xml_str += "M"
elif i == event_tok_id[1]:
xml_str += "E2"
else:
xml_str += "A"
xml_str += " "
else:
target_sentid = 1
tokens1 = sentences[0].split()
event1_tok_id = []
for i, tok in enumerate(tokens1):
if "<strong>" in tok:
event1_tok_id.append(i)
tokens1[i] = tokens1[i].replace("<strong>", '').replace("</strong>", '')
tokens1[i] = tokens1[i].strip()
tokens1 = [x for x in tokens1 if x]
assert len(event1_tok_id) == 1
pos1, lemma1 = tokenlist2PosAndLemma(tokens1)
tokens2 = sentences[1].split()
event2_tok_id = []
for i, tok in enumerate(tokens2):
if "<strong>" in tok:
event2_tok_id.append(i)
tokens2[i] = tokens2[i].replace("<strong>", '').replace("</strong>", '')
tokens2[i] = tokens2[i].strip()
tokens2 = [x for x in tokens2 if x]
assert len(event2_tok_id) == 1
pos2, lemma2 = tokenlist2PosAndLemma(tokens2)
for i, tok in enumerate(tokens1):
xml_str += tok + splitter + lemma1[i] + splitter + pos1[i][1] + splitter
if i < event1_tok_id[0]:
xml_str += "B"
elif i == event1_tok_id[0]:
xml_str += "E1"
else:
xml_str += "M"
xml_str += " "
for i, tok in enumerate(tokens2):
xml_str += tok + splitter + lemma2[i] + splitter + pos2[i][1] + splitter
if i < event2_tok_id[0]:
xml_str += "M"
elif i == event2_tok_id[0]:
xml_str += "E2"
else:
xml_str += "A"
xml_str += " "
return target_sentid, xml_str
def row2xml_original(row):
ret = '<SENTENCE UNITID="%s" DOCID="%s" SOURCE="E1" TARGET="E2" SOURCESENTID="%d" TARGETSENTID="%d"' \
' LABEL="%s" SENTDIFF="%d" PERTURB="">%s </SENTENCE>'
unit_id = row['_unit_id']
bodygraph = row['bodygraph']
docid = row['docid']
decision = row['decision'].strip()
label = decision2label(decision)
source_sentid = 0
target_sentid, bodyxml = bodygraph2xml(bodygraph)
sentdiff = target_sentid-source_sentid
return ret % (unit_id, docid, source_sentid, target_sentid, label, sentdiff, bodyxml)
def row2xml_perturbed(row):
ret = '<SENTENCE UNITID="%s" DOCID="%s" SOURCE="E1" TARGET="E2" SOURCESENTID="%d" TARGETSENTID="%d"' \
' LABEL="%s" SENTDIFF="%d" PERTURB="%s">%s </SENTENCE>'
unit_id = row['_unit_id']
bodygraph = row['modified bodygraph']
docid = row['docid']
decision = row['new decision'].strip()
perturb = row['reason']
label = decision2label(decision)
source_sentid = 0
target_sentid, bodyxml = bodygraph2xml(bodygraph)
sentdiff = target_sentid-source_sentid
return ret % (unit_id, docid, source_sentid, target_sentid, label, sentdiff, perturb, bodyxml)
def decision2label(decision):
if decision == 'before':
return "BEFORE"
if decision == 'after':
return "AFTER"
if decision == 'simultaneous':
return "EQUAL"
if decision == 'vague':
return "VAGUE"
return "UNDEF" # shouldn't happen
if __name__ == "__main__":
csv_fname = "Platinum_subset_minimal_pairs.csv"
xml_fname_original = "Platinum_subset_original.xml"
xml_fname_perturbed = "Platinum_subset_perturbed.xml"
data = pd.read_csv(csv_fname)
n = len(data)
# get original annotations
with open(xml_fname_original,'w') as f:
f.write("<DATA>\n")
prev_unit_id = -1
for i, row in data.iterrows():
if type(row['modified bodygraph']) is float:
continue
unit_id = row['_unit_id']
if unit_id == prev_unit_id:
continue
f.write(row2xml_original(row))
f.write("\n")
prev_unit_id = unit_id
f.write("</DATA>")
# get perturbed annotations
with open(xml_fname_perturbed,'w') as f:
f.write("<DATA>\n")
for i, row in data.iterrows():
if type(row['modified bodygraph']) is float:
continue
f.write(row2xml_perturbed(row))
f.write("\n")
f.write("</DATA>")
| contrast-sets-main | MATRES/AnnotationCSV2XML.py |
import torch
import argparse
from Code.utils.constants import GAIN, BIAS
def get_args():
parser = argparse.ArgumentParser(description='RL')
# dataset
parser.add_argument(
'--output-dir', type=str, default='outputs')
parser.add_argument(
'--dataset-train', type=str, default='data/train_social_chem_with_prefix_t5.jsonl',
help='JSONL file containing train prompts. Each row must contain a prompt at `row["prompt"]["text"]`.')
parser.add_argument(
'--dataset-val', type=str, default='data/dev_social_chem_with_prefix_t5.jsonl',
help='JSONL file containing dev prompts. Each row must contain a prompt at `row["prompt"]["text"]`.')
parser.add_argument(
'--perspective-rate-limit', type=int, default=15, help='number of perspective call per second')
parser.add_argument(
'--init-model', type=str, default='output_t5_large_qgen/',
help='language model used for policy.')
parser.add_argument(
'--ref-model', type=str, default='output_t5_large_qgen/',
help='language model used for reference policy.')
parser.add_argument(
'--response-length', type=int, default=16, help='number of tokens to generate for each prompt.')
parser.add_argument(
'--temperature', type=float, default=0.7, help='temperature for sampling policy.')
# ppo
parser.add_argument(
'--total-episodes', type=int, default=1000000, help='total number of episodes')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument(
'--nminibatches', type=int, default=1, help='number of ppo minibatch per batch')
parser.add_argument(
'--noptepochs', type=int, default=4, help='number of ppo epochs reusing rollouts')
parser.add_argument(
'--lr', type=float, default=1e-5, help='learning rate')
parser.add_argument(
'--vf_coef', type=float, default=1.0, help='value loss coefficient')
parser.add_argument(
'--cliprange', type=float, default=.2, help='clip parameter for policy gradient')
parser.add_argument(
'--cliprange_value', type=float, default=.2, help='clip parameter for value function')
parser.add_argument(
'--gamma', type=float, default=1.0, help='discount factor for rewards')
parser.add_argument(
'--lam', type=float, default=0.95, help='lambda parameter for generalized advantage estimation')
parser.add_argument(
'--whiten_rewards', action='store_false', default=True, help='whether to normalize reward in each minibatch')
parser.add_argument(
'--clip_grad', action='store_true', default=False, help='whether to clip gradient')
parser.add_argument(
'--max-grad-norm', type=float, default=0.5, help='maximum norm of gradients ')
# reward
parser.add_argument(
'--kl_coef', type=float, default=0.15, help='coefficient for KL term in reward')
parser.add_argument(
'--adaptive_kl', action='store_false', default=True, help='whether to use adaptive KL controller')
parser.add_argument(
'--target', type=float, default=6.0, help='target value in adaptive KL controller')
parser.add_argument(
'--horizon', type=float, default=10000, help='horizon value in adaptive KL controller')
parser.add_argument(
'--gain', type=float, default=GAIN, help='normalization factor for reward')
parser.add_argument(
'--bias', type=float, default=BIAS, help='normalization factor for reward')
# generation
parser.add_argument(
'--num-samples', type=int, default=25, help='number of samples to generate for each prompt.')
parser.add_argument(
'--top-p', type=float, default=1.0, help='hyperparameter for nucleus sampling')
# other
parser.add_argument(
'--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument(
'--log-interval', type=int, default=100, help='step interval to print out logs')
parser.add_argument(
'--save-interval', type=int, default=1000, help='step interval to save model checkpoints')
parser.add_argument('--eval-interval', type=int, default=1000, help='step interval to do evaluation')
parser.add_argument(
'--cuda-deterministic', action='store_false', default=True,
help="sets flags for determinism when using CUDA (potentially slow!)")
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
return args
| clarifydelphi-main | Code/arguments.py |
import os
os.environ['TRANSFORMERS_CACHE'] = 'cache/'
import torch
import torch.nn.functional as F
from typing import Union, List, Dict
from transformers import T5ForConditionalGeneration, T5Tokenizer
from Code.utils.constants import NEGATIVE_INF
from Code.utils.utils import logits_to_entropy, mask_pad
class Policy:
def __init__(self, model_name, device):
self.model = T5ForConditionalGeneration.from_pretrained(model_name)
self.tokenizer = T5Tokenizer.from_pretrained(model_name)
self.max_length = 50
self.device = device
self.model = self.model.to(self.device)
device_map = None
if torch.cuda.device_count() == 8:
device_map = {
4: [0],
5: [1, 2, 3, 4, 5, 6, 7],
6: [8, 9, 10, 11, 12, 13, 14, 15],
7: [16, 17, 18, 19, 20, 21, 22, 23],
}
if torch.cuda.device_count() == 6:
device_map = {
0: [0],
1: [1, 2, 3],
2: [4, 5, 6, 7, 8],
3: [9, 10, 11, 12, 13],
4: [14, 15, 16, 17, 18],
5: [19, 20, 21, 22, 23],
}
elif torch.cuda.device_count() == 4:
device_map = {
0: [0],
1: [1, 2, 3, 4, 5, 6, 7],
2: [8, 9, 10, 11, 12, 13, 14, 15],
3: [16, 17, 18, 19, 20, 21, 22, 23],
}
self.model.parallelize(device_map=device_map)
def sample(self,
prompts: Union[str, List[str]] = None,
input_ids: torch.Tensor = None,
attention_mask: torch.Tensor = None,
max_len: int = 30,
min_len: int = 3,
sample: bool = True,
top_k: int = None,
top_p: float = None,
temperature: float = None) -> Dict[str, Union[torch.Tensor, List[str]]]:
if prompts is not None:
assert input_ids is None and attention_mask is None, 'repeated input'
if isinstance(prompts, str):
prompts = [prompts]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding='max_length', truncation='longest_first', max_length=self.max_length)
input_ids = encodings_dict['input_ids'].to(self.device)
attention_mask = encodings_dict['attention_mask'].to(self.device)
else:
assert input_ids is not None, 'no input'
prompts = self.tokenizer.batch_decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
response_ids = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_len,
min_length=min_len,
do_sample=sample,
top_k=top_k,
top_p=top_p,
temperature=temperature,
) # begins with 0 ([BOS]); ends with 1 ([EOS])
response_ids = response_ids[:, 1:].contiguous() # no beginning; ends with 1 ([EOS])
response_mask = (response_ids != self.model.config.pad_token_id).int()
response_text = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
with torch.no_grad():
# It seems impossible to merge this into generate() -- the "scores" returned by generate() is not correct, it contains mostly -inf
outputs = self.forward_pass(input_ids, attention_mask, response_ids, response_mask)
response_logits = outputs['response/logits']
response_logprobs = outputs['response/log_prob']
response_entropy = outputs['response/entropy']
return {
'query/text': prompts,
'query/input_ids': input_ids,
'query/mask': attention_mask,
'response/text': response_text,
'response/input_ids': response_ids,
'response/mask': response_mask,
'response/logits': response_logits,
'response/log_prob': response_logprobs,
'response/entropy': response_entropy,
}
def forward_pass(self,
query_input_ids: torch.Tensor,
query_mask: torch.Tensor,
response_input_ids: torch.Tensor,
response_mask: torch.Tensor):
outputs = self.model(
input_ids=query_input_ids,
attention_mask=query_mask,
labels=mask_pad(response_input_ids, response_mask, -100),
return_dict=True,
output_attentions=False,
output_hidden_states=False,
)
response_logits = outputs.logits # (B, RL-1, V)
logprobs = F.log_softmax(response_logits, dim=-1)
response_logprobs = torch.gather(logprobs, 2, response_input_ids[:, :, None]).squeeze(2) # (B, RL-1)
response_entropy = logits_to_entropy(response_logits) # (B, RL-1)
return {
'response/logits': response_logits,
'response/log_prob': mask_pad(response_logprobs, response_mask),
'response/entropy': mask_pad(response_entropy, response_mask),
}
if __name__ == "__main__":
test = Policy('t5-large', 0.7, 'cuda:0','t5-base')
output = test.sample(prompts=['I like dogs.', 'a boy'], sample=False)
test.forward_pass(output['query/input_ids'], output['query/mask'], output['response/input_ids'], output['response/mask'])
from IPython import embed
embed()
| clarifydelphi-main | Code/policy.py |
import torch
import numpy as np
import csv
import pandas as pd
from tqdm import tqdm
from Code.policy import Policy
from torch.utils.data import DataLoader
from Code.lean_main import PromptDataset, PromptCollator
def expand(tensor, num_repeat):
return torch.reshape(tensor[:, None].expand(-1, num_repeat, -1), [batch_size * num_repeat, -1])
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def distinctness(responses, num_sample):
generations_batch = list(chunks(responses, num_sample))
dist1, dist2, dist3 = [], [], []
# calculate dist1, dist2, dist3 across generations for every prompt
for generations in tqdm(generations_batch, total=len(generations_batch), desc='Evaluating diversity'):
unigrams, bigrams, trigrams = set(), set(), set()
total_words = 0
for gen in generations:
o = gen.split(' ')
total_words += len(o)
unigrams.update(o)
for i in range(len(o) - 1):
bigrams.add(o[i] + '_' + o[i + 1])
for i in range(len(o) - 2):
trigrams.add(o[i] + '_' + o[i + 1] + '_' + o[i + 2])
dist1.append(len(unigrams) / total_words)
dist2.append(len(bigrams) / total_words)
dist3.append(len(trigrams) / total_words)
# take the mean across prompts
return np.nanmean(dist1), np.nanmean(dist2), np.nanmean(dist3)
if __name__ == "__main__":
model = 'PATH_TO_MODEL_DIR'
batch_size = 4
num_samples = 1
checkpoint_path = 'CHECKPOINT_PATH'
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
policy = Policy(model_name=model, device=device)
prompt_collator = PromptCollator(tokenizer=policy.tokenizer)
if checkpoint_path is not None:
checkpoint = torch.load(checkpoint_path, map_location='cpu')
policy.model.load_state_dict(checkpoint['policy_model'])
print('model initialization done!')
val_dataset = PromptDataset(path='data/dev_with_prefix.jsonl')
dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, collate_fn=prompt_collator, drop_last=True)
perplexities, prompts, responses = [], [], []
for i, batch in enumerate(tqdm(dataloader, total=len(dataloader))):
input_ids, attention_mask = batch
outputs = policy.sample(input_ids=expand(input_ids, num_samples), attention_mask=expand(attention_mask, num_samples), top_p=0.6, sample=True)
prompt, response = outputs['query/text'], outputs['response/text']
prompts.extend([x for n, x in enumerate(prompt) if not n % num_samples])
responses.extend(response)
data = pd.DataFrame.from_dict({'prompt': prompts})
outfile = csv.writer(open('predictions.tsv', 'w'))
for d, r in zip(data["prompt"], responses):
print(d)
print(r)
outfile.writerow([d, r])
| clarifydelphi-main | Code/sample_clarifyd.py |
import os
import sys
import torch
import json
import time
import logging
import random
import argparse
import numpy as np
import itertools
from datetime import datetime
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam, Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.tensorboard import SummaryWriter
from transformers import get_linear_schedule_with_warmup
from Code.arguments import get_args
from Code.policy import Policy
from Code.value import Value
from Code.utils.utils import ensure_dir, ceil_div, exact_div, whiten, reduce_mean, reduce_sum, reduce_std, clamp, flatten_dict
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
class PromptDataset(Dataset):
def __init__(self, path):
self.prompts = [json.loads(s.strip())["prompt"]["text"].strip() for s in open(path, 'r').readlines()]
def __len__(self):
return len(self.prompts)
def __getitem__(self, idx):
return {'prompt': self.prompts[idx]}
class PromptDatasetForDebug(Dataset):
def __init__(self, situation):
self.prompts = [situation]
def __len__(self):
return len(self.prompts)
def __getitem__(self, idx):
return {'prompt': self.prompts[idx]}
class PromptCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
prompts = [sequence['prompt'] for sequence in sequences]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids']
attention_mask = encodings_dict['attention_mask']
return input_ids, attention_mask
class FixedKLController:
def __init__(self, kl_coef):
self.value = kl_coef
def update(self, current, n_steps):
pass
class AdaptiveKLController:
def __init__(self, init_kl_coef, params):
self.value = init_kl_coef
self.params = params
def update(self, current, n_steps):
target = self.params.target
proportional_error = np.clip(current / target - 1, -0.2, 0.2)
mult = 1 + proportional_error * n_steps / self.params.horizon
self.value *= mult
| clarifydelphi-main | Code/lean_main.py |
import torch
from transformers import T5Tokenizer
from Code.model.t5 import T5ForTokenRegression
from Code.utils.utils import mask_pad
from IPython import embed
class Value:
def __init__(self, model_type, device):
self.model = T5ForTokenRegression.from_pretrained(model_type)
self.device = device
self.model = self.model.to(self.device)
device_map = None
if torch.cuda.device_count() == 8:
device_map = {
4: [0],
5: [1, 2, 3, 4, 5, 6, 7],
6: [8, 9, 10, 11, 12, 13, 14, 15],
7: [16, 17, 18, 19, 20, 21, 22, 23],
}
if torch.cuda.device_count() == 6:
device_map = {
0: [0],
1: [1, 2, 3],
2: [4, 5, 6, 7, 8],
3: [9, 10, 11, 12, 13],
4: [14, 15, 16, 17, 18],
5: [19, 20, 21, 22, 23],
}
elif torch.cuda.device_count() == 4:
device_map = {
0: [0],
1: [1, 2, 3, 4, 5, 6, 7],
2: [8, 9, 10, 11, 12, 13, 14, 15],
3: [16, 17, 18, 19, 20, 21, 22, 23],
}
self.model.encoder.parallelize(device_map=device_map)
self.model.decoder.parallelize(device_map=device_map)
self.model.model_parallel = True
self.tokenizer = T5Tokenizer.from_pretrained(model_type)
self.model.config.pad_token_id = self.tokenizer.pad_token_id
def forward_pass(self,
query_input_ids: torch.Tensor,
query_mask: torch.Tensor,
response_input_ids: torch.Tensor,
response_mask: torch.Tensor):
query_input_ids = query_input_ids.to(self.device)
query_mask = query_mask.to(self.device)
response_input_ids = response_input_ids.to(self.device)
response_mask = response_mask.to(self.device)
outputs = self.model(
input_ids=query_input_ids,
attention_mask=query_mask,
labels=mask_pad(response_input_ids, response_mask, -100),
return_dict=True,
output_attentions=False,
output_hidden_states=False,
)
return {
'response/value': mask_pad(outputs.logits, response_mask)
}
| clarifydelphi-main | Code/value.py |
import os
import sys
import torch
import json
import time
import logging
import random
import argparse
import numpy as np
import itertools
from datetime import datetime
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam, Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.tensorboard import SummaryWriter
from transformers import get_linear_schedule_with_warmup
from Code.arguments import get_args
from Code.policy import Policy
from Code.value import Value
from Code.reward import Reward, reward_to_toxicity
from Code.utils.utils import ensure_dir, ceil_div, exact_div, whiten, reduce_mean, reduce_sum, reduce_std, clamp, flatten_dict
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
class PromptDataset(Dataset):
def __init__(self, path):
self.prompts = [json.loads(s.strip())["prompt"]["text"].strip() for s in open(path, 'r').readlines()]
def __len__(self):
return len(self.prompts)
def __getitem__(self, idx):
return {'prompt': self.prompts[idx]}
class PromptDatasetForDebug(Dataset):
def __init__(self, situation):
self.prompts = [situation]
def __len__(self):
return len(self.prompts)
def __getitem__(self, idx):
return {'prompt': self.prompts[idx]}
class PromptCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
prompts = [sequence['prompt'] for sequence in sequences]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids']
attention_mask = encodings_dict['attention_mask']
return input_ids, attention_mask
class FixedKLController:
def __init__(self, kl_coef):
self.value = kl_coef
def update(self, current, n_steps):
pass
class AdaptiveKLController:
def __init__(self, init_kl_coef, params):
self.value = init_kl_coef
self.params = params
def update(self, current, n_steps):
target = self.params.target
proportional_error = np.clip(current / target - 1, -0.2, 0.2)
mult = 1 + proportional_error * n_steps / self.params.horizon
self.value *= mult
class PPOTrainer:
def __init__(self,
params: argparse.Namespace,
policy: Policy,
ref_policy: Policy,
value_model: Value,
score_model: Reward,
train_dataloader: DataLoader,
val_dataloader: DataLoader,
optimizer: Optimizer,
scheduler: LambdaLR):
self.params = params
self.policy = policy
self.ref_policy = ref_policy
self.value_model = value_model
self.score_model = score_model
self.optimizer = optimizer
self.scheduler = scheduler
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.train_sampler = iter(self.train_dataloader)
self.writer = SummaryWriter()
if self.params.adaptive_kl:
self.kl_ctl = FixedKLController(self.params.kl_coef)
else:
self.kl_ctl = AdaptiveKLController(self.params.kl_coef, params=self.params)
self.params.minibatch_size = exact_div(self.params.batch_size, self.params.nminibatches)
def compute_rewards(self, scores, logprobs, ref_logprobs, masks):
kl = logprobs - ref_logprobs
non_score_reward = -self.kl_ctl.value * kl
response_length = logprobs.size(1)
score_reward = torch.tensor([[0.] * (l-1) + [s] + [0.] * (response_length - l) for s, l
in zip(scores, torch.sum(masks, dim=1).tolist())], device=logprobs.device)
rewards = non_score_reward + score_reward
return rewards, non_score_reward, self.kl_ctl.value
def train_minibatch(self, rollouts):
"""One step of PPO training."""
self.optimizer.zero_grad()
ppo_loss, stats = self.loss(rollouts)
ppo_loss.backward()
if self.params.clip_grad:
torch.nn.utils.clip_grad_norm_(itertools.chain(self.policy.model.parameters(),
self.value_model.model.parameters()),
self.params.max_grad_norm)
self.optimizer.step()
return stats
def train(self, rollouts):
stat_list = []
# Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
for ppo_epoch_idx in range(self.params.noptepochs):
order = np.random.permutation(self.params.batch_size)
for mb_start in range(0, self.params.batch_size, self.params.minibatch_size):
mb_data = {k: v[order[mb_start:mb_start + self.params.minibatch_size]] if type(v) == torch.Tensor else
[v[i] for i in order[mb_start:mb_start + self.params.minibatch_size].tolist()]
for k, v in rollouts.items()}
stats = self.train_minibatch(mb_data)
stat_list.append(stats)
# Collect the stats. (They will be averaged later.)
return {k: [s[k] for s in stat_list] for k in stat_list[0].keys()}
def step(self, step_num):
step_started_at = time.time()
try:
input_ids, attention_mask = next(self.train_sampler)
assert len(input_ids) == self.params.batch_size, 'insufficient batch'
except (StopIteration, AssertionError):
self.train_sampler = iter(self.train_dataloader)
input_ids, attention_mask = next(self.train_sampler)
with torch.no_grad():
rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask)
forward_inputs = {'query_input_ids': rollouts['query/input_ids'],
'query_mask': rollouts['query/mask'],
'response_input_ids': rollouts['response/input_ids'],
'response_mask': rollouts['response/mask']}
rollouts['response/value'] = self.value_model.forward_pass(**forward_inputs)['response/value']
rollouts['response/value'] *= rollouts['response/mask']
ref_logprobs = self.ref_policy.forward_pass(**forward_inputs)['response/log_prob']
logprobs, masks = rollouts['response/log_prob'], rollouts['response/mask']
scores = self.score_model.get_reward(rollouts['query/text'], rollouts['response/text'])
rewards, non_score_reward, kl_coef = self.compute_rewards(scores, logprobs, ref_logprobs, masks)
rollouts['rewards'] = rewards
train_stats = self.train(rollouts=rollouts)
data = {'scores': scores, 'logprobs': logprobs, 'ref_logprobs': ref_logprobs, 'masks': masks,
'non_score_reward': non_score_reward, 'train_stats': train_stats, 'kl_coef': kl_coef}
stats = self.record_step_stats(data, step_num)
for metric in ['kl', 'entropy', 'reward', 'reward_total']:
self.writer.add_scalar(f'Objective/{metric}', stats[f'objective/{metric}'], step_num)
for metric in ['policy', 'value', 'total']:
self.writer.add_scalar(f'Loss/{metric}', stats[f'ppo/loss/{metric}'], step_num)
self.kl_ctl.update(stats['objective/kl'], self.params.batch_size)
self.print_samples(queries=rollouts['query/text'], responses=rollouts['response/text'], scores=scores,
logprobs=logprobs, ref_logprobs=ref_logprobs, masks=masks, step=step_num)
step_time = time.time() - step_started_at
eps_per_second = float(self.params.batch_size) / step_time
log.info(f"[ppo_step {step_num}] step_time={step_time:.2f}s, eps/s={eps_per_second:.2f}")
self.save(step=step_num)
self.eval(step=step_num)
def record_step_stats(self, data, step):
masks = data['masks']
kl = data['logprobs'] - data['ref_logprobs']
mean_kl = torch.mean(reduce_sum(kl, masks, axis=1))
mean_entropy = torch.mean(reduce_sum(-data['logprobs'], masks, axis=1))
mean_non_score_reward = torch.mean(reduce_sum(data['non_score_reward'], masks, axis=1))
stats = {
'objective/kl': mean_kl.item(),
'objective/kl_coef': self.params.kl_coef,
'objective/entropy': mean_entropy.item(),
}
for k, v in data['train_stats'].items():
stats[f'ppo/{k}'] = np.mean([x.item() for x in v])
stats['objective/reward'] = np.mean(data['scores'])
stats['objective/reward_total'] = np.mean(data['scores']) + mean_non_score_reward.item()
stats['ppo/val/var_explained'] = 1 - stats['ppo/val/error'] / stats['ppo/returns/var']
steps = step + 1
stats.update({
'elapsed/updates': steps,
'elapsed/steps/serial': steps * self.params.response_length,
'elapsed/steps/total': steps * self.params.batch_size * self.params.response_length,
'elapsed/episodes': steps * self.params.batch_size,
})
return stats
def print_samples(self, queries, responses, scores, logprobs, ref_logprobs, masks, step):
if step % self.params.log_interval != 0:
return
# Log samples
for i in range(min(10, len(queries))):
sample_kl = torch.sum((logprobs[i] - ref_logprobs[i]) * masks[i]).item()
print(queries[i] + responses[i])
print(f" score = {scores[i]:+.2f}")
print(f" kl = {sample_kl:+.2f}")
print(f" total = {scores[i] - self.params.kl_coef * sample_kl:+.2f}")
def save(self, step):
if step % self.params.save_interval != 0:
return
torch.save({
'policy_model': self.policy.model.state_dict(),
'value_model': self.value_model.model.state_dict(),
'optimizer': self.optimizer.state_dict()
}, f'{self.params.model_dir}/ckp_{step}.pth')
log.info(f"[ppo_step {step}] model checkpoint saved")
def eval(self, step):
if step % self.params.eval_interval != 0:
return
log.info(f"[ppo_step {step}] evaluating ...")
perplexities, divergences = [], []
for i, (input_ids, attention_mask) in enumerate(tqdm(self.val_dataloader)):
with torch.no_grad():
rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask)
forward_inputs = {'query_input_ids': rollouts['query/input_ids'],
'query_mask': rollouts['query/mask'],
'response_input_ids': rollouts['response/input_ids'],
'response_mask': rollouts['response/mask']}
ref_logprobs = self.ref_policy.forward_pass(**forward_inputs)['response/log_prob']
perplexity = -1. * reduce_sum(ref_logprobs, rollouts['response/mask'].float(), axis=1)
perplexities.extend(perplexity.cpu().detach().numpy().tolist())
score = self.score_model.get_reward(rollouts['query/text'], rollouts['response/text'])
divergences.extend(score)
ppl_score, divergence_score = np.mean(perplexities), np.mean(divergences)
print(f" perplexity = {ppl_score:+.2f}")
print(f" divergence = {divergence_score:+.2f}")
self.writer.add_scalar('Evaluation/perplexity', ppl_score, step)
self.writer.add_scalar('Evaluation/divergence', divergence_score, step)
def loss(self, rollouts):
values = rollouts['response/value']
old_logprob = rollouts['response/log_prob']
rewards = rollouts['rewards']
masks = rollouts['response/mask']
with torch.no_grad():
if self.params.whiten_rewards:
rewards = whiten(rewards, masks, shift_mean=False)
lastgaelam = 0
advantages_reversed = []
#gen_length = self.params.response_length
gen_length = rewards.size(1)
for t in reversed(range(gen_length)):
nextvalues = values[:, t + 1] if t < gen_length - 1 else 0.0
delta = rewards[:, t] + self.params.gamma * nextvalues - values[:, t]
lastgaelam = delta + self.params.gamma * self.params.lam * lastgaelam
advantages_reversed.append(lastgaelam)
advantages = torch.stack(advantages_reversed[::-1], dim=1)
returns = advantages + values
advantages = whiten(advantages, masks).detach()
forward_inputs = {'query_input_ids': rollouts['query/input_ids'],
'query_mask': rollouts['query/mask'],
'response_input_ids': rollouts['response/input_ids'],
'response_mask': rollouts['response/mask']}
outputs = self.policy.forward_pass(**forward_inputs)
outputs['response/value'] = self.value_model.forward_pass(**forward_inputs)['response/value']
outputs['response/value'] *= rollouts['response/mask']
vpred = outputs['response/value']
vpredclipped = clamp(vpred, values - self.params.cliprange_value, values + self.params.cliprange_value)
vf_losses1 = torch.square(vpred - returns)
vf_losses2 = torch.square(vpredclipped - returns)
vf_loss = .5 * reduce_mean(torch.max(vf_losses1, vf_losses2), masks)
vf_clipfrac = reduce_mean(torch.gt(vf_losses2, vf_losses1).float(), masks)
logprob = outputs['response/log_prob']
ratio = torch.exp(logprob - old_logprob)
pg_losses = -advantages * ratio
pg_losses2 = -advantages * torch.clamp(ratio, min=1.0 - self.params.cliprange, max=1.0 + self.params.cliprange)
pg_loss = reduce_mean(torch.max(pg_losses, pg_losses2), masks)
pg_clipfrac = reduce_mean(torch.gt(pg_losses2, pg_losses).float(), masks)
loss = pg_loss + self.params.vf_coef * vf_loss
entropy = reduce_mean(outputs['response/entropy'], masks)
approxkl = .5 * reduce_mean(torch.square(logprob - old_logprob), masks)
return_mean, return_var = reduce_mean(returns, masks), reduce_std(returns, masks)
value_mean, value_var = reduce_mean(values, masks), reduce_std(values, masks)
stats = dict(
loss=dict(policy=pg_loss, value=vf_loss, total=loss),
policy=dict(entropy=entropy, approxkl=approxkl, clipfrac=pg_clipfrac),
returns=dict(mean=return_mean, var=return_var),
val=dict(vpred=reduce_mean(vpred, masks), error=reduce_mean((vpred - returns) ** 2, masks),
clipfrac=vf_clipfrac, mean=value_mean, var=value_var)
)
return loss, flatten_dict(stats, sep='/')
def main():
args = get_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
num_gpus = torch.cuda.device_count()
log.info(f'Detect {num_gpus} GPUS')
device = torch.device('cuda:4' if torch.cuda.is_available() else 'cpu')
time = datetime.now()
date_time = time.strftime("%m-%d-%Y_%H:%M:%S")
args.save_dir = os.path.join(args.output_dir, date_time)
args.reward_dir = os.path.join(args.save_dir, 'reward')
args.model_dir = os.path.join(args.save_dir, 'model')
args.tensorboard_dir = os.path.join(args.save_dir, 'tensorboard')
for d in [args.output_dir, args.save_dir, args.reward_dir, args.model_dir, args.tensorboard_dir]:
ensure_dir(d)
log.info(f'Write to output directory: {args.save_dir}')
with open(os.path.join(args.save_dir, 'args.json'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
log.info(f'Initializing models ...')
ref_policy = Policy(model_name=args.init_model, device=device)
policy = Policy(model_name=args.init_model, device=device)
value = Value(model_type=args.init_model, device=device)
reward = Reward(save_path=args.reward_dir,
batch_size=args.batch_size, gain=args.gain, bias=args.bias, device=2)
log.info(f'Initialization done!')
prompt_collator = PromptCollator(tokenizer=policy.tokenizer)
train_dataset = PromptDataset(path=args.dataset_train)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, collate_fn=prompt_collator)
log.info(f'Load train set with {len(train_dataset)} examples')
val_dataset = PromptDataset(path=args.dataset_val)
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, collate_fn=prompt_collator)
log.info(f'Load val set with {len(val_dataset)} examples')
# normalize the rewards to have mean 0, var 1
reward.set_reward_norm(dataloader=train_dataloader, policy=policy)
# set up optimizer and scheduler
optimizer = Adam(itertools.chain(policy.model.parameters(), value.model.parameters()), lr=args.lr, eps=1e-5)
args.total_steps = ceil_div(args.total_episodes, args.batch_size)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=args.total_steps)
trainer = PPOTrainer(params=args, policy=policy, ref_policy=ref_policy, value_model=value, score_model=reward,
train_dataloader=train_dataloader, val_dataloader=val_dataloader,
optimizer=optimizer, scheduler=scheduler)
for step_num in range(args.total_steps):
print(step_num)
try:
trainer.step(step_num)
except RuntimeError:
torch.cuda.empty_cache()
continue
if __name__ == "__main__":
main()
| clarifydelphi-main | Code/main.py |
import json
import math
import os
import re
import numpy as np
from tqdm import tqdm
import logging
from torch.utils.data import DataLoader
from typing import Optional, List, Iterable, Dict, Any
from Code.policy import Policy
from Code.model.delphi import DelphiScorer
from Code.utils.utils import batchify, load_jsonl
from scipy.special import rel_entr
from transformers import T5Tokenizer, T5ForConditionalGeneration
from transformers import RobertaTokenizer, RobertaForSequenceClassification
import torch
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
def my_jensenshannon(p, q, base=None, *, axis=0, keepdims=False):
"""
Compute the Jensen-Shannon distance (metric) between
two probability arrays. This is the square root
of the Jensen-Shannon divergence.
The Jensen-Shannon distance between two probability
vectors `p` and `q` is defined as,
.. math::
\\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
and :math:`D` is the Kullback-Leibler divergence.
This routine will normalize `p` and `q` if they don't sum to 1.0.
Parameters
----------
p : (N,) array_like
left probability vector
q : (N,) array_like
right probability vector
base : double, optional
the base of the logarithm used to compute the output
if not given, then the routine uses the default base of
scipy.stats.entropy.
axis : int, optional
Axis along which the Jensen-Shannon distances are computed. The default
is 0.
.. versionadded:: 1.7.0
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Default is False.
.. versionadded:: 1.7.0
Returns
-------
js : double or ndarray
The Jensen-Shannon distances between `p` and `q` along the `axis`.
Notes
-----
.. versionadded:: 1.2.0
Examples
"""
p = np.asarray(p)
q = np.asarray(q)
p = p / np.sum(p, axis=axis, keepdims=True)
q = q / np.sum(q, axis=axis, keepdims=True)
m = (p + q) / 2.0
left = rel_entr(p, m)
right = rel_entr(q, m)
left_sum = np.sum(left, axis=axis, keepdims=keepdims)
right_sum = np.sum(right, axis=axis, keepdims=keepdims)
js = left_sum + right_sum
if base is not None:
js /= np.log(base)
return js / 2.0
def load_model_t5(model_name_or_path, cuda_devices = None):
cuda_devices = cuda_devices or []
if len(cuda_devices) > 0:
device = f"cuda:{cuda_devices[0]}"
else:
device = "cpu"
model = T5ForConditionalGeneration.from_pretrained(model_name_or_path, local_files_only=True)
tokenizer = T5Tokenizer.from_pretrained('t5-large')
model.to(device)
return {"model": model, "tokenizer": tokenizer, "model_name": model_name_or_path, "cuda_device": device}
def predict_merge(situations, questions, answers):
tokenizer = model_dict_fusion["tokenizer"]
model = model_dict_fusion["model"]
task_prefix = 'merge: '
inputs = []
for situation, question, answer in zip(situations, questions, answers):
input = task_prefix+'SITUATION: ' + situation.strip() + ' QUESTION: ' + question.strip() + ' ANSWER: ' + answer.strip()
inputs.append(input)
inputs = tokenizer(inputs, return_tensors="pt", padding=True).to(model_dict_fusion['cuda_device'])
output_sequences = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
do_sample=False, # disable sampling to test if batching affects output
max_length=100,
)
predicted_merge= tokenizer.batch_decode(output_sequences, skip_special_tokens=True)
return predicted_merge
model_nli = RobertaForSequenceClassification.from_pretrained('alisawuffles/roberta-large-wanli').to("cuda:5")
tokenizer_nli = RobertaTokenizer.from_pretrained('alisawuffles/roberta-large-wanli')
model_dict_answer = load_model_t5('out_dir_t5_large_answergen', cuda_devices=[2])
model_dict_fusion = load_model_t5('checkpoint-11000', cuda_devices=[2])
def get_answers_from_model_batch_t5(situations, questions, judgements):
# generate question
tokenizer = model_dict_answer["tokenizer"]
model = model_dict_answer["model"]
bad = []
good = []
model_inputs = []
uts = []
all_situations = []
counter = 0
qs = []
for jud, sit, q in zip(judgements, situations, questions):
for ut in ['weakener', 'strengthener']:
try:
if not sit[-1] == '.':
sit = sit + '.'
except IndexError:
print('oh no')
print(situations)
print(judgements)
print(questions)
sit = re.sub('question: ', '', sit)
input = "answer: "+ jud + ' ' + sit + ' TYPE: ' + ut + ' QUESTION: ' + q
model_inputs.append(input)
uts.append(ut)
all_situations.append(sit)
qs.append(q)
counter += 1
inputs = tokenizer(model_inputs, return_tensors="pt", padding=True).to(model_dict_answer['cuda_device'])
response_ids = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
do_sample=True,
max_length=200,
top_p=0.6,
top_k=None,
eos_token_id=tokenizer.eos_token_id,
num_beams=None,
early_stopping=True,
pad_token_id=tokenizer.pad_token_id,
num_return_sequences=1 # max(1, args.beams)
)
pred = tokenizer.batch_decode(response_ids, skip_special_tokens=True)
for sit, answer, ut, question in zip(all_situations, pred, uts, qs):
x = tokenizer_nli(sit, answer, return_tensors='pt', max_length=128, truncation=True).to("cuda:5")
logits = model_nli(**x).logits
probs = logits.softmax(dim=1).squeeze(0)
label_id = torch.argmax(probs).item()
prediction_sit_ans = model_nli.config.id2label[label_id]
if prediction_sit_ans in ['contradiction', 'entailment']:
answer = ' '
if ut == 'weakener':
bad.append(answer)
else:
good.append(answer)
return bad, good
class Reward:
def __init__(self, save_path: str, device: str, batch_size: int,
gain: float = None, bias: float = None):
self.gain, self.bias = gain, bias
self.path = save_path
self.batch_size = batch_size
self.delphi_scorer = DelphiScorer(device_id=device)
def set_reward_norm(self, dataloader: DataLoader, policy: Policy,
new_mean: int = 0., new_std: int = 1.):
if self.gain is None and self.bias is None:
log.info('compute reward statistics before normalization ...')
else:
log.info(f'reward after normalization: mean={new_mean}, std={new_std}')
log.info(f'normalization factor: gain={self.gain}, bias={self.bias}')
return
good_sentences = []
bad_sentences = []
for i, batch in enumerate(tqdm(dataloader, total=len(dataloader), desc='sampling from policy')):
input_ids, attention_mask = batch
outputs = policy.sample(input_ids=input_ids, attention_mask=attention_mask)
# only generate one question
prompts, responses = outputs['query/text'], outputs['response/text']
class_labels, judgements = self.delphi_scorer.generate_batch(prompts)
bad_answers, good_answers = get_answers_from_model_batch_t5(prompts, responses, judgements)
good_answers = predict_merge(prompts, responses, good_answers)
bad_answers = predict_merge(prompts, responses, bad_answers)
good_sentences.extend(good_answers)
bad_sentences.extend(bad_answers)
divergences = []
for good_sents, bad_sents in tqdm(zip(batchify(good_sentences, self.batch_size), batchify(bad_sentences, self.batch_size)), total=math.ceil(len(good_sentences) / self.batch_size), desc='computing rewards'):
good_scores = self.delphi_scorer.score_batch(good_sents)
bad_scores = self.delphi_scorer.score_batch(bad_sents)
div = my_jensenshannon(good_scores, bad_scores, base=2, axis=1)
divergences.extend(div)
rewards = divergences
old_mean, old_std = np.mean(rewards), np.std(rewards)
log.info('statistics:')
log.info(f'reward before normalization: mean={old_mean}, std={old_std}')
self.gain = new_std / old_std
self.bias = new_mean - self.gain * old_mean
log.info(f'reward after normalization: mean={new_mean}, std={new_std}')
log.info(f'normalization factor: gain={self.gain}, bias={self.bias}')
json.dump({'old_mean': old_mean, 'old_std': old_std,
'new_mean': new_mean, 'new_std': new_std,
'gain': self.gain, 'bias': self.bias,
}, open(os.path.join(self.path, 'reward_normalization.json'), 'w'), indent=4)
def get_reward(self, prompts: List[str], responses: List[str]) -> List[float]:
assert len(prompts) == len(responses), f'prompts({len(prompts)}) and responses({len(responses)}) mismatch'
class_labels, judgements = self.delphi_scorer.generate_batch(prompts)
bad_answers, good_answers = get_answers_from_model_batch_t5(prompts, responses, judgements)
good_sentences = predict_merge(prompts, responses, good_answers)
bad_sentences = predict_merge(prompts, responses, bad_answers)
divergences = []
for good_sents, bad_sents in tqdm(zip(batchify(good_sentences, self.batch_size), batchify(bad_sentences, self.batch_size)), total=math.ceil(len(good_sentences) / self.batch_size), desc='computing rewards'):
good_scores = self.delphi_scorer.score_batch(good_sents)
bad_scores = self.delphi_scorer.score_batch(bad_sents)
div = my_jensenshannon(good_scores, bad_scores, base=2, axis=1)
divergences.extend(div)
rewards = divergences
return [self.gain * x + self.bias for x in rewards]
def toxicity_to_reward(score):
return - score
def reward_to_toxicity(score):
return - score
| clarifydelphi-main | Code/reward.py |
from pathlib import Path
import yaml
NEGATIVE_INF = -100000.0
# Config
CONFIG_FILE = Path('config.yml')
#reward
GAIN = 4.072529137302586
BIAS = -0.45725615025178 | clarifydelphi-main | Code/utils/constants.py |
import json
from pathlib import Path
from typing import TypeVar, Iterable, List, Union, Any
import numpy as np
import torch
from tqdm.auto import tqdm
import os
import collections
from utils.constants import NEGATIVE_INF
T = TypeVar('T')
def reduce_sum(value, mask, axis=None):
if axis is None:
return torch.sum(value * mask)
return torch.sum(value * mask, axis)
def reduce_mean(value, mask, axis=None):
if axis is None:
return torch.sum(value * mask) / torch.sum(mask)
return reduce_sum(value, mask, axis) / torch.sum(mask, axis)
def reduce_std(value, mask):
return torch.sqrt(reduce_mean(torch.square(value), mask) - torch.square(reduce_mean(value, mask)))
def logits_to_entropy(logits):
distribution = torch.distributions.Categorical(logits=logits)
return distribution.entropy()
def mask_pad(value, mask, pad_value=None):
if pad_value is None:
pad_value = NEGATIVE_INF
return value * mask + pad_value * (1 - mask)
def clamp(value, min_value, max_value):
return torch.max(torch.min(value, max_value), min_value)
def ceil_div(a, b):
return (a - 1) // b + 1
def exact_div(a, b):
q = a // b
if a != q * b:
raise ValueError('Inexact division: %s / %s = %s' % (a, b, a / b))
return q
def whiten(values, masks, shift_mean=True):
mean, var = reduce_mean(values, masks), reduce_std(values, masks)
whitened = (values - mean) * torch.rsqrt(var + 1e-8)
if not shift_mean:
whitened += mean
return whitened
def flatten_dict(nested, sep='.'):
def rec(nest, prefix, into):
for k, v in nest.items():
if sep in k:
raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'")
if isinstance(v, collections.Mapping):
rec(v, prefix + k + sep, into)
else:
into[prefix + k] = v
flat = {}
rec(nested, '', flat)
return flat
def ensure_dir(d):
if not os.path.exists(d):
os.makedirs(d)
def batchify(data: Iterable[T], batch_size: int) -> Iterable[List[T]]:
assert batch_size > 0
batch = []
for item in data:
# Yield next batch
if len(batch) == batch_size:
yield batch
batch = []
batch.append(item)
# Yield last un-filled batch
if len(batch) != 0:
yield batch
def set_seed(seed, n_gpu):
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def load_jsonl(file: Union[str, Path]) -> Iterable[Any]:
with open(file) as f:
for line in f:
yield json.loads(line)
def load_cache(file: Path):
if file.exists():
with file.open() as f:
for line in tqdm(f, desc=f'Loading cache from {file}'):
yield json.loads(line)
| clarifydelphi-main | Code/utils/utils.py |
import torch
from torch import nn
from torch.nn import MSELoss
from transformers.models.t5.modeling_t5 import T5PreTrainedModel, T5Stack
from transformers.modeling_outputs import TokenClassifierOutput
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from Code.utils.utils import reduce_mean
import copy
from IPython import embed
class T5ForTokenRegression(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
r"lm_head\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.classifier = nn.Linear(config.d_model, config.num_labels)
# Initialize weights and apply final processing
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.encoder.first_device)
self.classifier = self.classifier.to(self.encoder.first_device)
sequence_output = sequence_output.to(self.classifier.weight.device)
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
logits = self.classifier(sequence_output).squeeze(-1)
loss = None
if labels is not None:
loss_fct = MSELoss(reduction='none')
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
) | clarifydelphi-main | Code/model/t5.py |
import sys
sys.path.append(".")
import torch
from scipy.special import softmax
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
class DelphiScorer:
def __init__(self, device_id="cuda:0", model="t5-11b", parallel=False):
CUDA_DEVICE = device_id if torch.cuda.is_available() else 'cpu'
self.device = torch.device(CUDA_DEVICE)
print(f"DelphiScorer device: {self.device}")
if model == "t5-large":
MODEL_BASE = "t5-large"
MODEL_LOCATION = "large_commonsense_morality_hf"
self.class_token_pos = 4
self.sep_tokens = ["<unk> /class> <unk> text>", " class>", "<unk> /text>"]
elif model == "t5-11b":
MODEL_BASE = "t5-11b"
MODEL_LOCATION = "11b_commonsense_morality_hf"
self.class_token_pos = 4
self.sep_tokens = ["<unk> /class> <unk> text>", " class>", "<unk> /text>"]
else:
print("ERROR: model doesn't exist")
return
self.model = T5ForConditionalGeneration.from_pretrained(MODEL_LOCATION)
self.model.to(self.device)
if parallel:
self.model.parallelize()
self.tokenizer = T5Tokenizer.from_pretrained(MODEL_BASE)
self.class1_pos = 0
self.class0_pos = 1
self.classminus1_pos = 2
def score(self, input_string, normalize=None):
input_string = f"[moral_single]: {input_string}"
input_ids = self.tokenizer(input_string, return_tensors='pt').to(self.device).input_ids
outputs = self.model.generate(input_ids, max_length=200, output_scores=True, return_dict_in_generate=True)
probs = [(self.tokenizer.decode(i), x) for (i, x) in enumerate(outputs['scores'][self.class_token_pos][0].softmax(0))]
class1_prob = sum([v[1].item() for v in probs if v[0] == "1"])
class0_prob = sum([v[1].item() for v in probs if v[0] == "0"])
classminus1_prob = sum([v[1].item() for v in probs if v[0] == "-1"])
probs = [class1_prob, class0_prob, classminus1_prob]
probs_sum = sum(probs)
if normalize == "regular":
probs = [p / probs_sum for p in probs]
elif normalize == "softmax":
probs = softmax(probs)
return probs
def score_batch(self, input_strings, normalize=None):
input_strings = [f"[moral_single]: {x}" for x in input_strings]
inputs = {k: v.to(self.device) for k, v in self.tokenizer(input_strings, return_tensors='pt', padding=True).items()}
outputs = self.model.generate(**inputs, max_length=200, output_scores=True, return_dict_in_generate=True)
probs = outputs['scores'][self.class_token_pos].softmax(-1)
class1_prob = probs[:, self.tokenizer.convert_tokens_to_ids("1")]
class0_prob = probs[:, self.tokenizer.convert_tokens_to_ids("0")]
classminus1_prob = probs[:, self.tokenizer.convert_tokens_to_ids("-1")]
probs = torch.stack([class1_prob, class0_prob, classminus1_prob], dim=-1)
probs_sum = torch.sum(probs, dim=1)
if normalize == "regular":
probs = probs / probs_sum
elif normalize == "softmax":
probs = probs.softmax(-1)
return probs.tolist()
def compute_toxicity(self, input_string, normalize=None):
score = self.score(input_string, normalize)
return score[self.classminus1_pos] - score[self.class1_pos]
def compute_toxicity_batch(self, input_strings, normalize=None):
scores = self.score_batch(input_strings, normalize)
toxicities = [s[self.classminus1_pos] - s[self.class1_pos] for s in scores]
return toxicities
def generate(self, input_string):
input_string = f"[moral_single]: {input_string}"
input_ids = self.tokenizer(input_string, return_tensors='pt').to(self.device).input_ids
outputs = self.model.generate(input_ids, max_length=200, output_scores=True, return_dict_in_generate=True)
decoded_sequence = self.tokenizer.decode(outputs["sequences"][0])
class_label = int(decoded_sequence.split(self.sep_tokens[0])[0].split(self.sep_tokens[1])[-1])
text_label = decoded_sequence.split(self.sep_tokens[0])[-1].split(self.sep_tokens[2])[0]
return class_label, text_label
def generate_batch(self, input_strings):
input_strings = [f"[moral_single]: {input_string}" for input_string in input_strings]
input_ids = self.tokenizer(input_strings, return_tensors='pt', padding=True, truncation=True).to(self.device).input_ids
outputs = self.model.generate(input_ids, max_length=200, output_scores=True, return_dict_in_generate=True)
decoded_sequences = [self.tokenizer.decode(output) for output in outputs["sequences"]]
class_labels = [int(decoded_sequence.split(self.sep_tokens[0])[0].split(self.sep_tokens[1])[-1]) for decoded_sequence in decoded_sequences]
text_labels = [decoded_sequence.split(self.sep_tokens[0])[-1].split(self.sep_tokens[2])[0] for decoded_sequence in decoded_sequences]
return class_labels, text_labels
def generate_beam(self,
input_string,
num_beams=5,
max_length=50,
num_return_sequences=5,):
input_string = f"[moral_single]: {input_string}"
input_ids = self.tokenizer(input_string,
max_length=16,
truncation=True,
return_tensors='pt').to(self.device).input_ids
outputs = self.model.generate(input_ids,
# output_scores=True,
# return_dict_in_generate=True,
num_beams=num_beams,
max_length=max_length,
num_return_sequences=num_return_sequences,)
decoded_sequences = self.tokenizer.batch_decode(outputs)
class_labels = [ds.split(self.sep_tokens[0])[0].split(self.sep_tokens[1])[-1] for ds in decoded_sequences]
text_labels = [ds.split(self.sep_tokens[0])[-1].split(self.sep_tokens[2])[0] for ds in decoded_sequences]
return class_labels, text_labels
def generate_with_score(self, input_string):
input_string = f"[moral_single]: {input_string}"
input_ids = self.tokenizer(input_string, return_tensors='pt').to(self.device).input_ids
outputs = self.model.generate(input_ids, max_length=200, output_scores=True, return_dict_in_generate=True)
probs = [(self.tokenizer.decode(i), x) for (i, x) in enumerate(outputs['scores'][self.class_token_pos][0].softmax(0))]
class1_prob = sum([v[1].item() for v in probs if v[0] == "1"])
class0_prob = sum([v[1].item() for v in probs if v[0] == "0"])
classminus1_prob = sum([v[1].item() for v in probs if v[0] == "-1"])
probs = [class1_prob, class0_prob, classminus1_prob]
# probs_sum = sum(probs)
decoded_sequence = self.tokenizer.decode(outputs["sequences"][0])
class_label = int(decoded_sequence.split(self.sep_tokens[0])[0].split(self.sep_tokens[1])[-1])
text_label = decoded_sequence.split(self.sep_tokens[0])[-1].split(self.sep_tokens[2])[0]
return class_label, probs, text_label
| clarifydelphi-main | Code/model/delphi.py |
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register('bert-for-qa')
class BertQAPredictor(Predictor):
"""
Predictor for the :class:`~allennlp.models.reading_comprehension.BertForQuestionAnswering` model.
"""
def predict(self, question: str, passage: str) -> JsonDict:
"""
Make a machine comprehension prediction on the supplied input.
See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task.
Parameters
----------
question : ``str``
A question about the content in the supplied paragraph. The question must be answerable by a
span in the paragraph.
passage : ``str``
A paragraph of information relevant to the question.
Returns
-------
A dictionary that represents the prediction made by the system. The answer string will be under the
"best_span_str" key.
"""
return self.predict_json({"passage" : passage, "question" : question})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like ``{"question": "...", "passage": "..."}``.
"""
question_text = json_dict["question"]
passage_text = json_dict["passage"]
return self._dataset_reader.text_to_instance(question_text, passage_text)
| allennlp-bert-qa-wrapper-master | pretrained_bert/predictor.py |
from pretrained_bert.model import BertForQuestionAnswering
from pretrained_bert.dataset_reader import SquadReaderForPretrainedBert
from pretrained_bert.predictor import BertQAPredictor
| allennlp-bert-qa-wrapper-master | pretrained_bert/__init__.py |
from typing import Dict, List
import collections
import logging
import math
import torch
from overrides import overrides
from pytorch_pretrained_bert import BertForQuestionAnswering as HuggingFaceBertQA
from pytorch_pretrained_bert import BertConfig
from pytorch_pretrained_bert.tokenization import BasicTokenizer
from allennlp.common import JsonDict
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
BERT_LARGE_CONFIG = {"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"type_vocab_size": 2,
"vocab_size": 30522
}
BERT_BASE_CONFIG = {"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 30522
}
@Model.register('bert_for_qa')
class BertForQuestionAnswering(Model):
def __init__(self,
vocab: Vocabulary,
bert_model_type: str,
pretrained_archive_path: str,
null_score_difference_threshold: float = 0.0,
model_is_for_squad1: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30) -> None:
super().__init__(vocab)
if bert_model_type == "bert_base":
config_to_use = BERT_BASE_CONFIG
elif bert_model_type == "bert_large":
config_to_use = BERT_LARGE_CONFIG
else:
raise RuntimeError(f"`bert_model_type` should either be \"bert_large\" or \"bert_base\"")
config = BertConfig(vocab_size_or_config_json_file=config_to_use["vocab_size"],
hidden_size=config_to_use["hidden_size"],
num_hidden_layers=config_to_use["num_hidden_layers"],
num_attention_heads=config_to_use["num_attention_heads"],
intermediate_size=config_to_use["intermediate_size"],
hidden_act=config_to_use["hidden_act"],
hidden_dropout_prob=config_to_use["hidden_dropout_prob"],
attention_probs_dropout_prob=config_to_use["attention_probs_dropout_prob"],
max_position_embeddings=config_to_use["max_position_embeddings"],
type_vocab_size=config_to_use["type_vocab_size"],
initializer_range=config_to_use["initializer_range"])
self.bert_qa_model = HuggingFaceBertQA(config)
self._loaded_qa_weights = False
self._pretrained_archive_path = pretrained_archive_path
self._null_score_difference_threshold = null_score_difference_threshold
self._model_is_for_squad1 = model_is_for_squad1
self._n_best_size = n_best_size
self._max_answer_length = max_answer_length
@overrides
def forward(self, # type: ignore
input_ids: torch.Tensor,
token_type_ids: torch.Tensor,
attention_mask: torch.Tensor,
tokens: List[str],
document_tokens: List[str],
token_to_original_map: Dict[int, int],
token_is_max_context: Dict[int, bool]) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
if not self._loaded_qa_weights and self.training:
self.bert_qa_model = HuggingFaceBertQA.from_pretrained(self._pretrained_archive_path)
self._loaded_qa_weights = True
start_logits, end_logits = self.bert_qa_model(torch.stack(input_ids),
torch.stack(token_type_ids),
torch.stack(attention_mask))
output_dict = {"start_logits": start_logits,
"end_logits": end_logits,
"tokens": tokens,
"document_tokens": document_tokens,
"token_to_original_map": token_to_original_map,
"token_is_max_context": token_is_max_context}
if self.training:
loss = torch.sum(start_logits) * 0.0
output_dict["loss"] = loss
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
batch_start_logits = output_dict["start_logits"]
batch_end_logits = output_dict["end_logits"]
batch_tokens = output_dict["tokens"]
batch_document_tokens = output_dict["document_tokens"]
batch_token_map = output_dict["token_to_original_map"]
batch_token_is_max_context = output_dict["token_is_max_context"]
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit"])
predictions: List[str] = []
nbest_info: JsonDict = []
for start_logits, end_logits, tokens, document_tokens, token_map, token_is_max_context in zip(
batch_start_logits,
batch_end_logits,
batch_tokens,
batch_document_tokens,
batch_token_map,
batch_token_is_max_context):
prelim_predictions = []
score_null = 1000000 # large and positive
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
start_indexes = self._get_best_indexes(start_logits, self._n_best_size)
end_indexes = self._get_best_indexes(end_logits, self._n_best_size)
if not self._model_is_for_squad1:
feature_null_score = start_logits[0] + end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
null_start_logit = start_logits[0]
null_end_logit = end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(tokens):
continue
if end_index >= len(tokens):
continue
if start_index not in token_map:
continue
if end_index not in token_map:
continue
if not token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > self._max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=start_logits[start_index],
end_logit=end_logits[end_index]))
if not self._model_is_for_squad1:
prelim_predictions.append(
_PrelimPrediction(
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= self._n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = token_map[pred.start_index]
orig_doc_end = token_map[pred.end_index]
orig_tokens = document_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = self._get_final_text(tok_text,
orig_text,
do_lower_case=True)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if not self._model_is_for_squad1:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="!!NO ANSWER!!",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0,
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_valid_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_valid_entry:
if entry.text:
best_valid_entry = entry
probs = self._compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if self._model_is_for_squad1:
predictions.append(nbest_json[0]["text"])
else:
if best_valid_entry is None:
predictions.append("!!NO ANSWER!!")
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_valid_entry.start_logit - (
best_valid_entry.end_logit)
if score_diff > self._null_score_difference_threshold:
predictions.append("!!NO ANSWER!!")
else:
predictions.append(best_valid_entry.text)
nbest_info.append(nbest_json)
output_dict["predictions"] = predictions
output_dict["nbest_info"] = nbest_info
return output_dict
@staticmethod
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = [index_score_pair[0] for index_score_pair in index_and_score[:n_best_size]]
return best_indexes
@staticmethod
def _get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, char) in enumerate(text):
if char == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(char)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(f"Unable to find text: '{pred_text}' in '{orig_text}'")
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
@staticmethod
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
exp_score = math.exp(score - max_score)
exp_scores.append(exp_score)
total_sum += exp_score
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
| allennlp-bert-qa-wrapper-master | pretrained_bert/model.py |
import json
import logging
import collections
from typing import List
import torch
from overrides import overrides
from pytorch_pretrained_bert import BertTokenizer
from allennlp.common.file_utils import cached_path
from allennlp.data.fields import MetadataField
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("squad_for_pretrained_bert")
class SquadReaderForPretrainedBert(DatasetReader):
def __init__(self,
pretrained_bert_model_file: str,
lazy: bool = False,
max_query_length: int = 64,
max_sequence_length: int = 384,
document_stride: int = 128) -> None:
super().__init__(lazy)
self._tokenizer = BertTokenizer.from_pretrained(pretrained_bert_model_file)
self._max_query_length = max_query_length
self._max_sequence_length = max_sequence_length
self._document_stride = document_stride
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset_json = json.load(dataset_file)
dataset = dataset_json['data']
for entry in dataset:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
for question_answer in paragraph["qas"]:
question_text = question_answer["question"]
instance = self.text_to_instance(question_text=question_text,
paragraph_text=paragraph_text)
if instance is not None:
yield instance
@staticmethod
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
@overrides
def text_to_instance(self, # type: ignore
question_text: str,
paragraph_text: str) -> Instance:
# pylint: disable=arguments-differ
def is_whitespace(char):
if char == " " or char == "\t" or char == "\r" or char == "\n" or ord(char) == 0x202F:
return True
return False
doc_tokens: List[str] = []
prev_is_whitespace = True
for char in paragraph_text:
if is_whitespace(char):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(char)
else:
doc_tokens[-1] += char
prev_is_whitespace = False
query_tokens = self._tokenizer.tokenize(question_text)
if len(query_tokens) > self._max_query_length:
query_tokens = query_tokens[0:self._max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = self._tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = self._max_sequence_length - len(query_tokens) - 3
# Different from original pytorch-pretrained-bert,
# we don't use a sliding window approach here.
# We just truncate the original doc to defined max_sequence_length.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
if start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
# We only select the first index of doc_spans here.
doc_span_index = 0
doc_span = doc_spans[0]
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = self._check_is_max_context(doc_spans,
doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = self._tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self._max_sequence_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self._max_sequence_length
assert len(input_mask) == self._max_sequence_length
assert len(segment_ids) == self._max_sequence_length
input_ids_tensor = torch.tensor(input_ids, dtype=torch.long)
input_mask_tensor = torch.tensor(input_mask, dtype=torch.long)
segment_ids_tensor = torch.tensor(segment_ids, dtype=torch.long)
instance = Instance({"input_ids": MetadataField(input_ids_tensor),
"token_type_ids": MetadataField(segment_ids_tensor),
"attention_mask": MetadataField(input_mask_tensor),
"tokens": MetadataField(tokens),
"document_tokens": MetadataField(doc_tokens),
"token_to_original_map": MetadataField(token_to_orig_map),
"token_is_max_context": MetadataField(token_is_max_context)})
# We truncate the original doc to defined max_sequence_length.
# Here we only process the first part of doc_spans and return the result.
return instance
| allennlp-bert-qa-wrapper-master | pretrained_bert/dataset_reader.py |
import setuptools
setuptools.setup(
name="bart_score",
version="0.1.0",
description="BARTScore: Evaluating Generated Text as Text Generation",
author="John Giorgi",
url="https://github.com/allenai/BARTScore",
python_requires=">=3.6",
packages=setuptools.find_packages(),
install_requires=[
"torch>=1.6.0",
"transformers>=4.6.1",
"pytorch_pretrained_bert>=0.6.2",
"fairseq>=0.9.0,<=0.11.0",
"nltk>=3.7.0",
"jsonlines>=3.0.0",
"sentencepiece>=0.1.96",
"mosestokenizer>=1.2.1",
"pyrouge>=0.1.3",
"bert-score>=0.3.11",
"tabulate>=0.8.10",
],
)
| BARTScore-main | setup.py |
#!/usr/bin/env python3
import argparse
import hashlib
import logging
import os
import sys
from typing import List, Dict, Iterator, Any, Tuple
import numpy as np
import sentencepiece as spm
import torch
from fairseq import checkpoint_utils, utils
from fairseq.data import LanguagePairDataset
from sacrebleu import get_source_file, get_reference_files, DATASETS, get_langpairs_for_testset
logger = logging.getLogger('prism')
logger.setLevel(logging.INFO)
MODELS = {
'8412b2044da4b9b2c0a8ce87b305d0d1': {
'name': 'm39v1',
'path': 'todo',
'date': '2020-04-30',
'description': 'model released with arXiv paper April 2020',
'langs': ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'eo', 'fi', 'fr', 'he',
'hr', 'hu', 'id', 'it', 'ja', 'kk', 'lt', 'lv', 'mk', 'nl', 'no', 'pl', 'pt', 'ro', 'ru',
'sk', 'sl', 'sq', 'sr', 'sv', 'tr', 'uk', 'vi', 'zh'],
}
}
def hash_model(model_dir):
md5 = hashlib.md5()
block_size = 2 ** 20
for fname in ('checkpoint.pt', 'spm.model', 'dict.src.txt', 'dict.tgt.txt'):
with open(os.path.join(model_dir, fname), "rb") as f:
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
md5.digest()
return md5.hexdigest()
class Prism:
def __init__(self, model_dir, lang, temperature=1.0):
'''
model_dir should contain:
1) checkpoint.pt: the fairseq model
2) spm.model: the sentencepiece model
3) dict.src.txt: the fairseq source dictionary
4) dict.tgt.txt: the fairseq target dictionary (likely a copy of the source)
lang: ISO 639-1 Code (e.g. "en"). Must be a language compatable with the model.
'''
self.sp = spm.SentencePieceProcessor()
self.sp.Load(model_dir + '/spm.model')
self.lang = lang
self.temperature = temperature
# this prints things and I can't figure out how to disable it
sys.stdout = open(os.devnull, 'w')
self.models, self.args, self.task = checkpoint_utils.load_model_ensemble_and_task(
[model_dir + '/checkpoint.pt', ],
arg_overrides=dict(data=model_dir + '/'),
)
sys.stdout = sys.__stdout__
self.use_cuda = torch.cuda.is_available()
self.generator = SequenceScorer(self.task.target_dictionary, temperature=temperature)
for model in self.models:
if self.use_cuda:
model.cuda()
model.make_generation_fast_(
beamable_mm_beam_size=None,
need_attn=False,
)
# if model.args.fp16:
# model.half()
# hash model
self.model_hash = hash_model(model_dir)
if self.model_hash in MODELS:
model_langs = MODELS[self.model_hash]['langs']
if lang not in model_langs:
model_name = MODELS[self.model_hash]['name']
logger.warning(f'Language "{lang}" is unsupported for model "{model_name}"')
logger.warning(f'Supported languages for {model_name}: {", ".join(model_langs)}')
sys.exit(1)
else:
logger.warning('unrecognized model, so cannot check language')
def identifier(self):
if self.model_hash in MODELS:
model_name = MODELS[self.model_hash]['name']
else:
logger.warning('unrecognized model, using hash to identify')
model_name = self.model_hash
return dict(version='0.1', model=model_name, seg_scores='avg_log_prob',
sys_scores='avg_log_prob', log_base=2, temperature=self.temperature)
def _binarize(self, sentence: str) -> torch.LongTensor:
return self.task.source_dictionary.encode_line(sentence, add_if_not_exist=False).long()
def _encode(self, sent, prepend=True):
sent = ' '.join(self.sp.EncodeAsPieces(sent))
if prepend:
sent = f'<{self.lang}> ' + sent
return self._binarize(sent)
def _build_batches(self,
source_tokens: List[List[int]],
target_tokens: List[List[int]],
skip_invalid_size_inputs: bool) -> Iterator[Dict[str, Any]]:
# Prune token
source_tokens = [src_token[:1800] for src_token in source_tokens]
target_tokens = [tgt_token[:2000] for tgt_token in target_tokens]
source_lengths = torch.LongTensor([t.numel() for t in source_tokens])
target_lengths = torch.LongTensor([t.numel() for t in target_tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=LanguagePairDataset(source_tokens, source_lengths, self.task.source_dictionary,
tgt=target_tokens, tgt_sizes=target_lengths,
tgt_dict=self.task.target_dictionary),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=(2000, 2000), # ???
ignore_invalid_inputs=skip_invalid_size_inputs,
).next_epoch_itr(shuffle=False)
return batch_iterator
def _score_forward(self, tok_sents_in, tok_sents_out):
assert len(tok_sents_in) == len(tok_sents_out)
tok_level_scores = [None, ] * len(tok_sents_in) # for debug
results = [None, ] * len(tok_sents_in)
for batch in self._build_batches(tok_sents_in, tok_sents_out, skip_invalid_size_inputs=False):
if self.use_cuda: # must be a better way
batch['id'] = batch['id'].cuda()
batch['net_input']['src_tokens'] = batch['net_input']['src_tokens'].cuda()
batch['net_input']['src_lengths'] = batch['net_input']['src_lengths'].cuda()
batch['net_input']['prev_output_tokens'] = batch['net_input']['prev_output_tokens'].cuda()
batch['target'] = batch['target'].cuda()
translations = self.task.inference_step(self.generator, self.models, batch)
ids = batch['id'].cpu().numpy()
tok_scores = [x[0]['positional_scores'].cpu().numpy() for x in translations]
# [1:] to skip language tag log prob
sent_scores = [np.mean(x[1:]) for x in tok_scores]
for _id, sent_score, _tok_score in zip(ids, sent_scores, tok_scores):
results[_id] = sent_score
tok_level_scores[_id] = _tok_score
if logger.level == logging.DEBUG:
for ii, (sent_in, scores_out, sent_out) in enumerate(zip(tok_sents_in, tok_level_scores, tok_sents_out)):
sent_in_str = ' '.join([self.task.source_dictionary[x] for x in sent_in])
logger.debug(f'Input[{ii}] = ' + sent_in_str)
sent_out_tok = [self.task.source_dictionary[x] for x in sent_out]
logger.debug(f'Output[{ii}] = ' + \
f' '.join([f'{a}[{b:.02f}]' for a, b in zip(sent_out_tok, scores_out)]))
if None in results:
raise Exception('Missing one or more sentence scores')
return np.array(results)
def score(self, cand, ref=None, src=None, segment_scores=False):
if not (ref is None) ^ (src is None):
raise Exception('Must provide exactly one of "ref" or "src"')
tokenized_cand = [self._encode(sentence, prepend=False) for sentence in cand]
tokenized_cand_prep = [self._encode(sentence, prepend=True) for sentence in cand]
if src is not None:
# Prism-src: score candidate given on source
if len(cand) != len(src):
raise Exception(f'Length of cand ({len(cand)}) does not match length of src ({len(src)})')
tokenized_src = [self._encode(sentence, prepend=False) for sentence in src]
scores = self._score_forward(tokenized_src, tokenized_cand_prep)
if not segment_scores:
scores = np.mean(scores)
return scores
else:
# Prism-ref: average candidate given reference and reference given candidate
if len(cand) != len(ref):
raise Exception(f'Length of cand ({len(cand)}) does not match length of ref ({len(ref)})')
tokenized_ref = [self._encode(sentence, prepend=False) for sentence in ref]
tokenized_ref_prep = [self._encode(sentence, prepend=True) for sentence in ref]
forward_scores = self._score_forward(tok_sents_in=tokenized_ref, tok_sents_out=tokenized_cand_prep)
reverse_scores = self._score_forward(tok_sents_in=tokenized_cand, tok_sents_out=tokenized_ref_prep)
scores = 0.5 * forward_scores + 0.5 * reverse_scores
if not segment_scores:
scores = np.mean(scores)
return scores
return forward_scores, reverse_scores, scores
def parse_sacrebleu_uri(uri: str) -> Tuple[str]:
"""
Parses the test set and language pair from a URI of the form
sacrebleu:wmt19:de-en
sacrebleu:wmt19/google/ar:de-en
"""
try:
_, testset, langpair = uri.split(":")
except ValueError:
logger.error('sacrebleu:* flags must take the form "sacrebleu:testset:langpair"')
sys.exit(1)
testsets = sorted(DATASETS, reverse=True)
if testset not in testsets:
logger.error(f"Test set '{testset}' was not found. Available sacrebleu test sets are:")
for key in testsets:
logger.error(f" {key:20s}: {DATASETS[key].get('description', '')}")
sys.exit(1)
lang_pairs = get_langpairs_for_testset(testset)
if langpair not in lang_pairs:
logger.error(f"Language pair '{langpair}' not available for testset '{testset}'.\n"
f" Language pairs available for {testset}: {', '.join(lang_pairs)}")
sys.exit(1)
return testset, langpair
def main():
parser = argparse.ArgumentParser(description='Prism: MT metric based on multilingual NMT',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cand', required=False, type=argparse.FileType('rt'), default=sys.stdin,
help='Candidate text file. If not provided, candidates are read from stdin.')
parser.add_argument('--ref', required=False, type=str,
help='Reference text file. If provided, reference-based Prism-ref scores are returned. '
'A value of "sacrebleu:{testset}:{langpair}" will use sacrebleu datasets. '
'You must provide exactly one of --ref or --src. ')
parser.add_argument('--src', required=False, type=str,
help='Source text file. If provided, source-based Prism-src scores are returned. '
'A value of "sacrebleu:{testset}:{langpair}" will use sacrebleu datasets. '
'You must provide exactly one of --ref or --src.')
parser.add_argument('--model-dir', required=True, type=str, help='Model Directory')
parser.add_argument('--lang', type=str, help='2-character language code (ISO 639-1)')
parser.add_argument('--temperature', type=float, default=1.0, help='Softmax temperature: '
'values >1.0 produce more uniform samples and values <1.0 produce sharper samples')
parser.add_argument('--segment-scores', action='store_true',
help='Print per-sentence scores instead of corpus level score')
parser.add_argument('--debug', action='store_true', help='Print debug info')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
if not (args.ref is None) ^ (args.src is None):
logger.error('You must provide exactly one of --ref or --src')
sys.exit(1)
if args.ref is not None:
if args.ref.startswith('sacrebleu:'):
testset, langpair = parse_sacrebleu_uri(args.ref)
path = get_reference_files(testset, langpair)[0]
args.ref = open(path).readlines()
args.lang = langpair.split("-")[1]
logger.info(f"Scoring against {len(args.ref)}-line {args.lang} reference"
f" from sacrebleu dataset {testset}/{langpair}")
else:
args.ref = open(args.ref, 'rt').readlines()
if args.src is not None:
if args.src.startswith('sacrebleu:'):
testset, langpair = parse_sacrebleu_uri(args.src)
path = get_source_file(testset, langpair)
args.src = open(path).readlines()
args.lang = langpair.split("-")[0]
logger.info(f"Scoring against {len(args.src)}-line {args.lang} source"
f" from sacrebleu dataset {testset}/{langpair}")
else:
args.src = open(args.src, 'rt').readlines()
if args.lang is None:
logger.error("The language must be specified (--lang XX), XX the ISO 639-1 code")
sys.exit(1)
if args.temperature <= 0:
raise Exception('temperature must be > 0')
args.cand = args.cand.readlines()
n_gpus = torch.cuda.device_count()
logging.debug(f'Running on {"GPU" if n_gpus else "CPU"}')
if len(args.cand) > 50 and n_gpus == 0:
logging.warning('Running on CPU is slow...')
prism = Prism(model_dir=args.model_dir, lang=args.lang, temperature=args.temperature)
scores = prism.score(cand=args.cand, ref=args.ref, src=args.src, segment_scores=args.segment_scores)
logger.info(f'Prism identifier: {prism.identifier()}')
if args.segment_scores:
for ss in scores:
print(ss)
else:
print(scores)
class SequenceScorer(object):
"""
Copy of https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_scorer.py
with softmax temperature control added
MIT License
Copyright (c) Facebook, Inc. and its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def __init__(self, tgt_dict, softmax_batch=None, temperature=1.0):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos()
self.softmax_batch = softmax_batch or sys.maxsize
self.temperature = temperature
assert self.softmax_batch > 0
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample['net_input']
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample['target']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model.forward(**net_input)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample['target'] = tgt
# divide the logits by temperature prior to softmax
# for example, see https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_generator.py:
# decoder_out[0][:, -1:, :].div_(temperature)
bd[0].div_(self.temperature)
curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample['target'] = orig_target
probs = probs.view(sample['target'].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None and torch.is_tensor(attn):
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \
if sample['target'] is not None else None
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
alignment = utils.extract_hard_alignment(avg_attn_i, sample['net_input']['src_tokens'][i],
sample['target'][i], self.pad, self.eos)
else:
avg_attn_i = alignment = None
hypos.append([{
'tokens': ref,
'score': score_i,
'attention': avg_attn_i,
'alignment': alignment,
'positional_scores': avg_probs_i,
}])
return hypos
if __name__ == '__main__':
main()
| BARTScore-main | WMT/prism.py |
import os
import pickle
import sys
import nltk
from mosestokenizer import *
from nltk import word_tokenize
from nltk.tokenize import sent_tokenize
nltk.download('stopwords')
detokenizer = MosesDetokenizer('en')
def read_file_to_list(file_name):
lines = []
with open(file_name, 'r', encoding='utf8') as f:
for line in f.readlines():
lines.append(line.strip())
return lines
def write_list_to_file(list_to_write, filename):
out_file = open(filename, 'w')
for line in list_to_write:
print(line, file=out_file)
out_file.flush()
out_file.close()
print(f'Saved to {filename}.')
def read_pickle(file):
with open(file, 'rb') as f:
data = pickle.load(f)
return data
def save_pickle(data, file):
with open(file, 'wb') as f:
pickle.dump(data, f)
print(f'Saved to {file}.')
def capitalize_sents(text: str):
""" Given a string, capitalize the initial letter of each sentence. """
sentences = sent_tokenize(text)
sentences = [sent.strip() for sent in sentences]
sentences = [sent.capitalize() for sent in sentences]
sentences = " ".join(sentences)
return sentences
def is_capitalized(text: str):
""" Given a string (system output etc.) , check whether it is lowercased,
or normally capitalized.
"""
return not text.islower()
def tokenize(text: str):
words = word_tokenize(text)
return " ".join(words)
def detokenize(text: str):
words = text.split(" ")
return detokenizer(words)
def use_original_bracket(text: str):
return text.replace('-lrb-', '(').replace('-rrb-', ')').replace('-LRB-', '(').replace('-RRB-', ')').replace('-lsb-',
'[').replace(
'-rsb-', ']').replace('-LSB-', '[').replace('-RSB-', ']')
# Disable print
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore print
def enablePrint():
sys.stdout = sys.__stdout__
| BARTScore-main | WMT/utils.py |
import torch
import torch.nn as nn
import traceback
from transformers import BartTokenizer, BartForConditionalGeneration
class BARTScorer:
def __init__(self, device='cuda:0', max_length=1024, checkpoint='facebook/bart-large-cnn'):
# Set up model
self.device = device
self.max_length = max_length
self.tokenizer = BartTokenizer.from_pretrained(checkpoint)
self.model = BartForConditionalGeneration.from_pretrained(checkpoint)
self.model.eval()
self.model.to(device)
# Set up loss
self.loss_fct = nn.NLLLoss(reduction='none', ignore_index=self.model.config.pad_token_id)
self.lsm = nn.LogSoftmax(dim=1)
def load(self):
""" Load model from paraphrase finetuning """
self.model.load_state_dict(torch.load('models/bart.pth', map_location=self.device))
def score(self, srcs, tgts, batch_size):
""" Score a batch of examples """
score_list = []
for i in range(0, len(srcs), batch_size):
src_list = srcs[i: i + batch_size]
tgt_list = tgts[i: i + batch_size]
try:
with torch.no_grad():
encoded_src = self.tokenizer(
src_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
encoded_tgt = self.tokenizer(
tgt_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
src_tokens = encoded_src['input_ids'].to(self.device)
src_mask = encoded_src['attention_mask'].to(self.device)
tgt_tokens = encoded_tgt['input_ids'].to(self.device)
tgt_mask = encoded_tgt['attention_mask']
tgt_len = tgt_mask.sum(dim=1).to(self.device)
output = self.model(
input_ids=src_tokens,
attention_mask=src_mask,
labels=tgt_tokens
)
logits = output.logits.view(-1, self.model.config.vocab_size)
loss = self.loss_fct(self.lsm(logits), tgt_tokens.view(-1))
loss = loss.view(tgt_tokens.shape[0], -1)
loss = loss.sum(dim=1) / tgt_len
curr_score_list = [-x.item() for x in loss]
score_list += curr_score_list
except RuntimeError:
traceback.print_exc()
print(f'source: {src_list}')
print(f'target: {tgt_list}')
exit(0)
return score_list
| BARTScore-main | WMT/bart_score.py |
import argparse
import os
import time
import numpy as np
from utils import *
from tqdm import tqdm
REF_HYPO = read_file_to_list('files/tiny_ref_hypo_prompt.txt')
class Scorer:
""" Support BLEU, CHRF, BLEURT, PRISM, COMET, BERTScore, BARTScore """
def __init__(self, file_path, device='cuda:0'):
""" file_path: path to the pickle file
All the data are normal capitalized, not tokenied, including src, ref, sys
"""
self.device = device
self.data = read_pickle(file_path)
print(f'Data loaded from {file_path}.')
self.refs, self.betters, self.worses = [], [], []
for doc_id in self.data:
self.refs.append(self.data[doc_id]['ref'])
self.betters.append(self.data[doc_id]['better']['sys'])
self.worses.append(self.data[doc_id]['worse']['sys'])
def save_data(self, path):
save_pickle(self.data, path)
def record(self, scores_better, scores_worse, name):
""" Record the scores from a metric """
for doc_id in self.data:
self.data[doc_id]['better']['scores'][name] = str(scores_better[doc_id])
self.data[doc_id]['worse']['scores'][name] = str(scores_worse[doc_id])
def score(self, metrics):
for metric_name in metrics:
if metric_name == 'bleu':
from sacrebleu import corpus_bleu
from sacremoses import MosesTokenizer
def run_sentence_bleu(candidates: list, references: list) -> list:
""" Runs sentence BLEU from Sacrebleu. """
tokenizer = MosesTokenizer(lang='en')
candidates = [tokenizer.tokenize(mt, return_str=True) for mt in candidates]
references = [tokenizer.tokenize(ref, return_str=True) for ref in references]
assert len(candidates) == len(references)
bleu_scores = []
for i in range(len(candidates)):
bleu_scores.append(corpus_bleu([candidates[i], ], [[references[i], ]]).score)
return bleu_scores
start = time.time()
print(f'Begin calculating BLEU.')
scores_better = run_sentence_bleu(self.betters, self.refs)
scores_worse = run_sentence_bleu(self.worses, self.refs)
print(f'Finished calculating BLEU, time passed {time.time() - start}s.')
self.record(scores_better, scores_worse, 'bleu')
elif metric_name == 'chrf':
from sacrebleu import sentence_chrf
def run_sentence_chrf(candidates: list, references: list) -> list:
""" Runs sentence chrF from Sacrebleu. """
assert len(candidates) == len(references)
chrf_scores = []
for i in range(len(candidates)):
chrf_scores.append(
sentence_chrf(hypothesis=candidates[i], references=[references[i]]).score
)
return chrf_scores
start = time.time()
print(f'Begin calculating CHRF.')
scores_better = run_sentence_chrf(self.betters, self.refs)
scores_worse = run_sentence_chrf(self.worses, self.refs)
print(f'Finished calculating CHRF, time passed {time.time() - start}s.')
self.record(scores_better, scores_worse, 'chrf')
elif metric_name == 'bleurt':
from bleurt import score
def run_bleurt(
candidates: list, references: list, checkpoint: str = "models/bleurt-large-512"
):
scorer = score.BleurtScorer(checkpoint)
scores = scorer.score(references=references, candidates=candidates)
return scores
start = time.time()
print(f'Begin calculating BLEURT.')
scores_better = run_bleurt(self.betters, self.refs)
scores_worse = run_bleurt(self.worses, self.refs)
print(f'Finished calculating BLEURT, time passed {time.time() - start}s.')
self.record(scores_better, scores_worse, 'bleurt')
elif metric_name == 'prism':
from prism import Prism
def run_prism(mt: list, ref: list) -> list:
prism = Prism(model_dir="./models/m39v1", lang='en', temperature=1.0)
_, _, scores = prism.score(cand=mt, ref=ref, segment_scores=True)
return list(scores)
start = time.time()
print(f'Begin calculating PRISM.')
scores_better = run_prism(self.betters, self.refs)
scores_worse = run_prism(self.worses, self.refs)
print(f'Finished calculating PRISM, time passed {time.time() - start}s.')
self.record(scores_better, scores_worse, 'prism')
elif metric_name == 'comet':
from comet.models import load_checkpoint
def create_samples():
""" Dataframe to dictionary. """
hyp1_samples, hyp2_samples = [], []
for doc_id in self.data:
hyp1_samples.append(
{
'src': str(self.data[doc_id]['src']),
'ref': str(self.data[doc_id]['ref']),
'mt': str(self.data[doc_id]['better']['sys'])
}
)
hyp2_samples.append(
{
'src': str(self.data[doc_id]['src']),
'ref': str(self.data[doc_id]['ref']),
'mt': str(self.data[doc_id]['worse']['sys'])
}
)
return hyp1_samples, hyp2_samples
checkpoint = './models/wmt-large-da-estimator-1718/_ckpt_epoch_1.ckpt'
model = load_checkpoint(checkpoint)
hyp1_samples, hyp2_samples = create_samples()
start = time.time()
print(f'Begin calculating COMET.')
_, scores_better = model.predict(
hyp1_samples, cuda=True, show_progress=False
)
_, scores_worse = model.predict(
hyp2_samples, cuda=True, show_progress=False
)
print(f'Finished calculating COMET, time passed {time.time() - start}s.')
self.record(scores_better, scores_worse, 'comet')
elif metric_name == 'bert_score':
import bert_score
def run_bertscore(mt: list, ref: list):
""" Runs BERTScores and returns precision, recall and F1 BERTScores ."""
_, _, f1 = bert_score.score(
cands=mt,
refs=ref,
idf=False,
batch_size=32,
lang='en',
rescale_with_baseline=False,
verbose=True,
nthreads=4,
)
return f1.numpy()
start = time.time()
print(f'Begin calculating BERTScore.')
scores_better = run_bertscore(self.betters, self.refs)
scores_worse = run_bertscore(self.worses, self.refs)
print(f'Finished calculating BERTScore, time passed {time.time() - start}s.')
self.record(scores_better, scores_worse, 'bert_score')
elif metric_name == 'bart_score' or metric_name == 'bart_score_cnn' or metric_name == 'bart_score_para':
from bart_score import BARTScorer
def run_bartscore(scorer, mt: list, ref: list):
hypo_ref = np.array(scorer.score(mt, ref, batch_size=4))
ref_hypo = np.array(scorer.score(ref, mt, batch_size=4))
avg_f = 0.5 * (ref_hypo + hypo_ref)
return avg_f
# Set up BARTScore
if 'cnn' in metric_name:
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
elif 'para' in metric_name:
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
bart_scorer.load()
else:
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large')
start = time.time()
print(f'Begin calculating BARTScore.')
scores_better = run_bartscore(bart_scorer, self.betters, self.refs)
scores_worse = run_bartscore(bart_scorer, self.worses, self.refs)
print(f'Finished calculating BARTScore, time passed {time.time() - start}s.')
self.record(scores_better, scores_worse, metric_name)
elif metric_name.startswith('prompt'):
""" BARTScore adding prompts """
from bart_score import BARTScorer
def prefix_prompt(l, p):
new_l = []
for x in l:
new_l.append(p + ', ' + x)
return new_l
def suffix_prompt(l, p):
new_l = []
for x in l:
new_l.append(x + ' ' + p + ',')
return new_l
if 'cnn' in metric_name:
name = 'bart_score_cnn'
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
elif 'para' in metric_name:
name = 'bart_score_para'
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
bart_scorer.load()
else:
name = 'bart_score'
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large')
start = time.time()
print(f'BARTScore-P setup finished. Begin calculating BARTScore-P.')
for prompt in tqdm(REF_HYPO, total=len(REF_HYPO), desc='Calculating prompt.'):
ref_better_en = np.array(bart_scorer.score(suffix_prompt(self.refs, prompt), self.betters,
batch_size=4))
better_ref_en = np.array(bart_scorer.score(suffix_prompt(self.betters, prompt), self.refs,
batch_size=4))
better_scores = 0.5 * (ref_better_en + better_ref_en)
ref_worse_en = np.array(bart_scorer.score(suffix_prompt(self.refs, prompt), self.worses,
batch_size=5))
worse_ref_en = np.array(bart_scorer.score(suffix_prompt(self.worses, prompt), self.refs,
batch_size=5))
worse_scores = 0.5 * (ref_worse_en + worse_ref_en)
self.record(better_scores, worse_scores, f'{name}_en_{prompt}')
ref_better_de = np.array(bart_scorer.score(self.refs, prefix_prompt(self.betters, prompt),
batch_size=5))
better_ref_de = np.array(bart_scorer.score(self.betters, prefix_prompt(self.refs, prompt),
batch_size=5))
better_scores = 0.5 * (ref_better_de + better_ref_de)
ref_worse_de = np.array(bart_scorer.score(self.refs, prefix_prompt(self.worses, prompt),
batch_size=5))
worse_ref_de = np.array(bart_scorer.score(self.worses, prefix_prompt(self.refs, prompt),
batch_size=5))
worse_scores = 0.5 * (ref_worse_de + worse_ref_de)
self.record(better_scores, worse_scores, f'{name}_de_{prompt}')
print(f'Finished calculating BARTScore, time passed {time.time() - start}s.')
def main():
parser = argparse.ArgumentParser(description='Scorer parameters')
parser.add_argument('--file', type=str, required=True,
help='The data to load from.')
parser.add_argument('--device', type=str, default='cuda:0',
help='The device to run on.')
parser.add_argument('--output', type=str, required=True,
help='The output path to save the calculated scores.')
parser.add_argument('--bleu', action='store_true', default=False,
help='Whether to calculate BLEU')
parser.add_argument('--chrf', action='store_true', default=False,
help='Whether to calculate CHRF')
parser.add_argument('--bleurt', action='store_true', default=False,
help='Whether to calculate BLEURT')
parser.add_argument('--prism', action='store_true', default=False,
help='Whether to calculate PRISM')
parser.add_argument('--comet', action='store_true', default=False,
help='Whether to calculate COMET')
parser.add_argument('--bert_score', action='store_true', default=False,
help='Whether to calculate BERTScore')
parser.add_argument('--bart_score', action='store_true', default=False,
help='Whether to calculate BARTScore')
parser.add_argument('--bart_score_cnn', action='store_true', default=False,
help='Whether to calculate BARTScore-CNN')
parser.add_argument('--bart_score_para', action='store_true', default=False,
help='Whether to calculate BARTScore-Para')
parser.add_argument('--prompt', type=str, default=None,
help='Whether to calculate BARTScore-P, can be bart_ref, bart_cnn_ref, bart_para_ref')
args = parser.parse_args()
scorer = Scorer(args.file, args.device)
METRICS = []
if args.bleu:
METRICS.append('bleu')
if args.chrf:
METRICS.append('chrf')
if args.bleurt:
METRICS.append('bleurt')
if args.prism:
METRICS.append('prism')
if args.comet:
METRICS.append('comet')
if args.bert_score:
METRICS.append('bert_score')
if args.bart_score:
METRICS.append('bart_score')
if args.bart_score_cnn:
METRICS.append('bart_score_cnn')
if args.bart_score_para:
METRICS.append('bart_score_para')
if args.prompt is not None:
prompt = args.prompt
assert prompt in ['bart_ref', 'bart_cnn_ref', 'bart_para_ref']
METRICS.append(f'prompt_{prompt}')
scorer.score(METRICS)
scorer.save_data(args.output)
if __name__ == '__main__':
main()
"""
python score.py --file kk-en/data.pkl --device cuda:0 --output kk-en/scores.pkl --bleu --chrf --bleurt --prism --comet --bert_score --bart_score --bart_score_cnn --bart_score_para
python score.py --file lt-en/scores.pkl --device cuda:3 --output lt-en/scores.pkl --bart_score --bart_score_cnn --bart_score_para
"""
| BARTScore-main | WMT/score.py |
from bart_score.utils import *
from copy import deepcopy
from tqdm import trange
from tqdm import tqdm
from typing import Optional, List
class SUMStat:
""" A class used to get stats of SUM trained data """
def __init__(self, path):
self.path = path
self.data = read_pickle(path)
self.sample_id = list(self.data.keys())[0]
self.sample_sys = list(self.data[self.sample_id]['sys_summs'].keys())[0]
self._metrics = list(self.data[self.sample_id]['sys_summs'][self.sample_sys]['scores'].keys())
self._auto_metrics = [x for x in self.metrics if x not in self.human_metrics]
def save_data(self, path=None):
if path is None:
path = self.path
save_pickle(self.data, path)
def evaluate_summary(
self,
human_metrics: Optional[List[str]] = None,
auto_metrics: Optional[List[str]] = None,
benchmark=None,
table=None
):
""" Evaluate summaries. Conduct summary-level correlations w.r.t each document """
if human_metrics is None:
human_metrics = self.human_metrics
if auto_metrics is None:
auto_metrics = self.auto_metrics
if benchmark is None:
benchmark = self.benchmark
assert all(human_metric in self.human_metrics for human_metric in human_metrics)
assert all(auto_metric in self.auto_metrics for auto_metric in auto_metrics)
metric_with_corr = []
for human_metric in human_metrics:
print(f'Human metric: {human_metric}')
headers = ['auto metric', 'human metric', 'sys_spearmanr', 'sys_kendalltau', 'sum_spearmanr', 'sum_kendalltau']
for auto_metric in auto_metrics:
# Wherever possible, we are using notation from section 2 in https://aclanthology.org/2021.tacl-1.67/
X = []
Z = []
for doc_id in self.data:
X.append([])
Z.append([])
# All system generated summaries for this particular document
sys_summs = self.data[doc_id]['sys_summs']
for sys_name in sys_summs:
x_i_j = sys_summs[sys_name]['scores'][auto_metric]
z_i_j = sys_summs[sys_name]['scores'][human_metric]
X[-1].append(x_i_j)
Z[-1].append(z_i_j)
# TODO: From the original BARTScore code, not clear what it is checking
if len(set(X[-1])) == 1 or len(set(Z[-1])) == 1:
del X[-1]
del Z[-1]
continue
# System-level correlations
X_sys, Z_sys = np.mean(X, axis=0), np.mean(Z, axis=0)
r_sys_spearmanr = spearmanr(Z_sys, X_sys).correlation
r_sys_kendalltau = kendalltau(Z_sys, X_sys).correlation
# Summary-level correlations
r_sum_spearmanr = np.mean([spearmanr(Z[i], X[i]).correlation for i in range(len(X))])
r_sum_kendalltau = np.mean([kendalltau(Z[i], X[i]).correlation for i in range(len(X))])
metric_with_corr.append(
[
auto_metric,
human_metric,
r_sys_spearmanr,
r_sys_kendalltau,
r_sum_spearmanr,
r_sum_kendalltau
]
)
if table is not None:
with open(table, "w") as f:
f.write("auto_metric\thuman_metric\tbenchmark\tspearmanr\tkendalltau\tcorrelation_level\tmulti_ref_aggregation\n")
for auto, human, sys_spr, sys_kt, sum_spr, sum_kt in metric_with_corr:
if "_max" in auto:
multi_ref = "max"
elif "_mean" in auto:
multi_ref = "mean"
else:
multi_ref = "none"
f.write(f'{auto}\t{human}\t{benchmark}\t{sys_spr}\t{sys_kt}\tsystem\t{multi_ref}\n')
f.write(f'{auto}\t{human}\t{benchmark}\t{sum_spr}\t{sum_kt}\tsummary\t{multi_ref}\n')
print(tabulate(metric_with_corr, headers=headers, tablefmt='simple'))
def get_fact_pearson(self, auto_metrics=None):
assert 'QAGS' in self.path
headers = ['metric', 'pearson']
metric_with_corr = []
if auto_metrics is None:
auto_metrics = self.auto_metrics
for metric in auto_metrics:
human_scores = []
metric_scores = []
for doc_id in self.data:
human_scores.append(self.data[doc_id]['sys_summs'][0]['scores']['fact'])
metric_scores.append(self.data[doc_id]['sys_summs'][0]['scores'][metric])
pearson, _ = pearsonr(human_scores, metric_scores)
metric_with_corr.append([metric, pearson])
metric_with_corr = sorted(metric_with_corr, key=lambda x: x[1], reverse=True)
print(tabulate(metric_with_corr, headers=headers, tablefmt='simple'))
def fact_pearson_sig_test(self, metric_list):
for m in metric_list:
assert m in self.auto_metrics
comp_tab = np.zeros((len(metric_list), len(metric_list)), dtype=int)
for i in range(len(metric_list)): # row
for j in range(i + 1, len(metric_list)): # col
m1 = metric_list[i]
m2 = metric_list[j]
# Test if m1 is significant better than m2
out = self.fact_pearson_sig_test_two(m1, m2)
if out == 1:
comp_tab[j][i] = 1
elif out == -1:
comp_tab[i][j] = 1
else:
pass
result = comp_tab.sum(axis=1)
best_metrics = []
for i in range(len(result)):
if result[i] == 0:
best_metrics.append(metric_list[i])
print(f'Best metrics are: {best_metrics}')
def fact_pearson_sig_test_two(self, m1, m2):
assert 'QAGS' in self.path
random.seed(666)
doc_ids = list(self.data.keys())
better = 0
for i in trange(1000):
random.shuffle(doc_ids)
sub_ids = doc_ids[:int(0.8 * len(doc_ids))]
m1_scores, m2_scores, human_scores = [], [], []
for doc_id in sub_ids:
human_scores.append(self.data[doc_id]['sys_summs'][0]['scores']['fact'])
m1_scores.append(self.data[doc_id]['sys_summs'][0]['scores'][m1])
m2_scores.append(self.data[doc_id]['sys_summs'][0]['scores'][m2])
pearson_m1, _ = pearsonr(human_scores, m1_scores)
pearson_m2, _ = pearsonr(human_scores, m2_scores)
if pearson_m1 > pearson_m2:
better += 1
if better > 950:
return 1
elif better < 50:
return -1
else:
return 0
def get_fact_acc(self, auto_metrics=None):
""" Used for the Rank19 dataset. """
assert 'Rank' in self.path
headers = ['metric', 'acc']
metric_with_acc = []
if auto_metrics is None:
auto_metrics = self.auto_metrics
for metric in auto_metrics:
correct = 0
for doc_id in self.data:
if self.data[doc_id]['sys_summs']['correct']['scores'][metric] > \
self.data[doc_id]['sys_summs']['incorrect']['scores'][metric]:
correct += 1
metric_with_acc.append([metric, correct / len(self.data)])
metric_with_acc = sorted(metric_with_acc, key=lambda x: x[1], reverse=True)
print(tabulate(metric_with_acc, headers=headers, tablefmt='simple'))
def fact_acc_sig_test(self, metric_list):
for m in metric_list:
assert m in self.auto_metrics
comp_tab = np.zeros((len(metric_list), len(metric_list)), dtype=int)
for i in range(len(metric_list)): # row
for j in range(i + 1, len(metric_list)): # col
m1 = metric_list[i]
m2 = metric_list[j]
# Test if m1 is significant better than m2
out = self.fact_acc_sig_test_two(m1, m2)
if out == 1:
comp_tab[j][i] = 1
elif out == -1:
comp_tab[i][j] = 1
else:
pass
result = comp_tab.sum(axis=1)
best_metrics = []
for i in range(len(result)):
if result[i] == 0:
best_metrics.append(metric_list[i])
print(f'Best metrics are: {best_metrics}')
def fact_acc_sig_test_two(self, m1, m2):
""" Return 1 if m1 significant better than m2, or -1 if m1 significant worse than m2
or 0 if cannot decide.
"""
assert 'Rank' in self.path
random.seed(666)
doc_ids = list(self.data.keys())
better = 0
for i in trange(1000):
random.shuffle(doc_ids)
sub_ids = doc_ids[:int(0.8 * len(doc_ids))]
m1_correct = 0
m2_correct = 0
for doc_id in sub_ids:
if self.data[doc_id]['sys_summs']['correct']['scores'][m1] > \
self.data[doc_id]['sys_summs']['incorrect']['scores'][m1]:
m1_correct += 1
if self.data[doc_id]['sys_summs']['correct']['scores'][m2] > \
self.data[doc_id]['sys_summs']['incorrect']['scores'][m2]:
m2_correct += 1
if m1_correct > m2_correct:
better += 1
if better > 950:
return 1
elif better < 50:
return -1
else:
return 0
def sig_test(self, metric_list, human_metric):
""" Comparisons between all pairs of metrics. Using Spearman correlation. """
for m in metric_list:
assert m in self.auto_metrics
comp_tab = np.zeros((len(metric_list), len(metric_list)), dtype=int)
for i in range(len(metric_list)): # row
for j in range(i + 1, len(metric_list)): # col
m1 = metric_list[i]
m2 = metric_list[j]
# Test if m1 is significant better than m2
out = self.sig_test_two(m1, m2, human_metric)
if out == 1:
comp_tab[j][i] = 1
elif out == -1:
comp_tab[i][j] = 1
else:
pass
result = comp_tab.sum(axis=1)
best_metrics = []
for i in range(len(result)):
if result[i] == 0:
best_metrics.append(metric_list[i])
print(f'Best metrics are: {best_metrics}')
def sig_test_two(self, m1, m2, human_metric):
""" Comparisons between a pair of metrics. Using Spearman correlation.
Test if m1 is significant better than m2. return 1 if m1 is better,
return -1 if m2 is better, otherwise return 0
"""
assert (not 'Rank' in self.path) and (not 'QAGS' in self.path)
random.seed(666)
doc_ids = list(self.data.keys())
better = 0
for i in trange(1000):
random.shuffle(doc_ids)
sub_ids = doc_ids[:int(0.8 * len(doc_ids))]
corr1, corr2 = [], []
for doc_id in sub_ids:
target, pred1, pred2 = [], [], []
sys_summs = self.data[doc_id]['sys_summs']
for sys_name in sys_summs:
pred1.append(sys_summs[sys_name]['scores'][m1])
pred2.append(sys_summs[sys_name]['scores'][m2])
target.append(sys_summs[sys_name]['scores'][human_metric])
if len(set(pred1)) == 1 or len(set(pred2)) == 1 or len(set(target)) == 1:
continue
corr1.append(spearmanr(target, pred1)[0])
corr2.append(spearmanr(target, pred2)[0])
corr1 = np.mean(corr1)
corr2 = np.mean(corr2)
if corr1 > corr2:
better += 1
if better > 950:
return 1
elif better < 50:
return -1
else:
return 0
def combine_prompt(self):
""" Take the average of all prompted results for a single prediction.
We consider encoder-based prompts and decoder-based prompts separately.
"""
def get_keys(s):
""" Get the first key and second key in MAP """
k1, k2 = None, None
if s.startswith('bart_score_cnn'):
k1 = 'bart_score_cnn'
elif s.startswith('bart_score_para'):
k1 = 'bart_score_para'
else:
k1 = 'bart_score'
if 'src' in s:
if '_en_' in s:
k2 = 'src_hypo_en'
else:
k2 = 'src_hypo_de'
if 'hypo_ref' in s:
if '_en_' in s:
k2 = 'hypo_ref_en'
else:
k2 = 'hypo_ref_de'
if 'ref_hypo' in s:
if '_en_' in s:
k2 = 'ref_hypo_en'
else:
k2 = 'ref_hypo_de'
if 'avg_f' in s:
if '_en_' in s:
k2 = 'avg_f_en'
else:
k2 = 'avg_f_de'
if 'harm_f' in s:
if '_en_' in s:
k2 = 'harm_f_en'
else:
k2 = 'harm_f_de'
return k1, k2
for doc_id in self.data:
sys_summs = self.data[doc_id]['sys_summs']
for sys_name in sys_summs:
types = {
'src_hypo_en': [],
'src_hypo_de': [],
'ref_hypo_en': [],
'ref_hypo_de': [],
'hypo_ref_en': [],
'hypo_ref_de': [],
'avg_f_en': [],
'avg_f_de': [],
'harm_f_en': [],
'harm_f_de': []
}
MAP = {
'bart_score': deepcopy(types),
'bart_score_cnn': deepcopy(types),
'bart_score_para': deepcopy(types)
}
scores = sys_summs[sys_name]['scores']
for k in scores:
if '_en_' in k or '_de_' in k:
k1, k2 = get_keys(k)
MAP[k1][k2].append(scores[k])
for k, v in MAP.items():
for kk, vv in v.items():
if len(vv) == 0:
continue
new_m = k + '_' + kk
if new_m not in self.auto_metrics:
print(f'new_metric: {new_m}')
self._metrics.append(new_m)
self._auto_metrics.append(new_m)
self.data[doc_id]['sys_summs'][sys_name]['scores'][new_m] = sum(vv) / len(vv)
@property
def auto_metrics(self):
return self._auto_metrics
@property
def metrics(self):
return self._metrics
@property
def human_metrics(self):
""" All available human metrics. """
if 'REALSumm' in self.path:
return ['litepyramid_recall']
if 'SummEval' in self.path:
return ['coherence', 'consistency', 'fluency', 'relevance']
if 'Newsroom' in self.path:
return ['coherence', 'fluency', 'informativeness', 'relevance']
if 'Rank19' in self.path or 'QAGS' in self.path:
return ['fact']
@property
def benchmark(self):
""" All available benchmarks. """
if 'REALSumm' in self.path:
return 'REALSumm'
elif 'SummEval' in self.path:
return 'SummEval'
elif 'Newsroom' in self.path:
return 'Newsroom'
elif 'Rank19' in self.path:
return 'Rank19'
elif 'QAGS' in self.path:
return 'QAGS'
else:
raise ValueError(
"Unable to determine benchmark from path. Please provide it via the benchmark arg."
)
class D2TStat:
""" A class used to get stats of D2T trained data """
def __init__(self, path):
self.path = path
self.data = read_pickle(path)
self.sample_id = list(self.data.keys())[0]
self._metrics = list(self.data[self.sample_id]['scores'].keys())
self._auto_metrics = [x for x in self.metrics if x not in self.human_metrics]
def evaluate_text(self, human_metric, auto_metrics=None, table=None):
print(f'Human metric: {human_metric}')
headers = ['metric', 'spearman', 'kendalltau']
metric_with_corr = []
if auto_metrics is None:
auto_metrics = self.auto_metrics
for metric in auto_metrics:
human_scores = []
metric_scores = []
for doc_id in self.data:
human_scores.append(self.data[doc_id]['scores'][human_metric])
metric_scores.append(self.data[doc_id]['scores'][metric])
spearman = spearmanr(human_scores, metric_scores)[0]
ktau = kendalltau(human_scores, metric_scores)[0]
metric_with_corr.append([metric, spearman, ktau])
sorted_metric_with_corr = sorted(metric_with_corr, key=lambda x: x[1], reverse=True)
if table is not None:
file = open(table, 'w')
for each in sorted_metric_with_corr:
print(f'{each[0]}\t{each[1]}\t{each[2]}', file=file)
file.flush()
print(tabulate(sorted_metric_with_corr, headers=headers, tablefmt='simple'))
def sig_test_two(self, m1, m2, human_metric):
human_scores = []
m1_scores = []
m2_scores = []
doc_ids = list(self.data.keys())
better = 0
random.seed(666)
for i in trange(1000):
random.shuffle(doc_ids)
sub_ids = doc_ids[:int(0.8 * len(doc_ids))]
for doc_id in sub_ids:
human_scores.append(self.data[doc_id]['scores'][human_metric])
m1_scores.append(self.data[doc_id]['scores'][m1])
m2_scores.append(self.data[doc_id]['scores'][m2])
spearman1, _ = spearmanr(human_scores, m1_scores)
spearman2, _ = spearmanr(human_scores, m2_scores)
if spearman1 > spearman2:
better += 1
if better > 950:
return 1
elif better < 50:
return -1
else:
return 0
def combine_prompt(self):
def get_keys(s):
""" Get the first key and second key in MAP """
k1, k2 = None, None
if s.startswith('bart_score_cnn'):
k1 = 'bart_score_cnn'
elif s.startswith('bart_score_para'):
k1 = 'bart_score_para'
else:
k1 = 'bart_score'
if 'src' in s:
if '_en_' in s:
k2 = 'src_hypo_en'
else:
k2 = 'src_hypo_de'
if 'hypo_ref' in s:
if '_en_' in s:
k2 = 'hypo_ref_en'
else:
k2 = 'hypo_ref_de'
if 'ref_hypo' in s:
if '_en_' in s:
k2 = 'ref_hypo_en'
else:
k2 = 'ref_hypo_de'
if 'avg_f' in s:
if '_en_' in s:
k2 = 'avg_f_en'
else:
k2 = 'avg_f_de'
if 'harm_f' in s:
if '_en_' in s:
k2 = 'harm_f_en'
else:
k2 = 'harm_f_de'
return k1, k2
for doc_id in self.data:
types = {
'src_hypo_en': [],
'src_hypo_de': [],
'ref_hypo_en': [],
'ref_hypo_de': [],
'hypo_ref_en': [],
'hypo_ref_de': [],
'avg_f_en': [],
'avg_f_de': [],
'harm_f_en': [],
'harm_f_de': []
}
MAP = {
'bart_score': deepcopy(types),
'bart_score_cnn': deepcopy(types),
'bart_score_para': deepcopy(types)
}
scores = self.data[doc_id]['scores']
for k in scores:
if '_en_' in k or '_de_' in k:
k1, k2 = get_keys(k)
MAP[k1][k2].append(scores[k])
for k, v in MAP.items():
for kk, vv in v.items():
if len(vv) == 0:
continue
new_m = k + '_' + kk
if new_m not in self.auto_metrics:
print(f'new_metric: {new_m}')
self._metrics.append(new_m)
self._auto_metrics.append(new_m)
self.data[doc_id]['scores'][new_m] = sum(vv) / len(vv)
def save_data(self, path=None):
if path is None:
path = self.path
save_pickle(self.data, path)
@property
def auto_metrics(self):
return self._auto_metrics
@property
def metrics(self):
return self._metrics
@property
def human_metrics(self):
return ['informativeness', 'naturalness', 'quality']
class WMTStat:
""" A class used to get stats of WMT trained data """
def __init__(self, path):
self.path = path
self.data = read_pickle(path)
self._metrics = list(self.data[0]['better']['scores'].keys())
pos = path.find('-en')
self.lp = path[pos - 2: pos + 3]
# systems ranked by their DA score
self._systems = {
'de-en': ['Facebook_FAIR.6750', 'RWTH_Aachen_System.6818', 'MSRA.MADL.6910', 'online-B.0', 'JHU.6809',
'MLLP-UPV.6899', 'dfki-nmt.6478', 'UCAM.6461', 'online-A.0', 'NEU.6801', 'uedin.6749',
'online-Y.0', 'TartuNLP-c.6502', 'online-G.0', 'PROMT_NMT_DE-EN.6683', 'online-X.0'],
'fi-en': ['MSRA.NAO.6983', 'online-Y.0', 'GTCOM-Primary.6946', 'USYD.6995', 'online-B.0',
'Helsinki_NLP.6889', 'online-A.0', 'online-G.0', 'TartuNLP-c.6905', 'online-X.0', 'parfda.6526',
'apertium-fin-eng-unconstrained-fien.6449'],
'gu-en': ['NEU.6756', 'UEDIN.6534', 'GTCOM-Primary.6969', 'CUNI-T2T-transfer-guen.6431',
'aylien_mt_gu-en_multilingual.6826', 'NICT.6603', 'online-G.0', 'IITP-MT.6824', 'UdS-DFKI.6861',
'IIITH-MT.6688', 'Ju_Saarland.6525'],
'kk-en': ['online-B.0', 'NEU.6753', 'rug_kken_morfessor.6677', 'online-G.0', 'talp_upc_2019_kken.6657',
'NRC-CNRC.6895', 'Frank_s_MT.6127', 'NICT.6770', 'CUNI-T2T-transfer-kken.6436', 'UMD.6736',
'DBMS-KU_KKEN.6726'],
'lt-en': ['GTCOM-Primary.6998', 'tilde-nc-nmt.6881', 'NEU.6759', 'MSRA.MASS.6945', 'tilde-c-nmt.6876',
'online-B.0', 'online-A.0', 'TartuNLP-c.6908', 'online-G.0', 'JUMT.6616', 'online-X.0'],
'ru-en': ['Facebook_FAIR.6937', 'online-G.0', 'eTranslation.6598', 'online-B.0', 'NEU.6803',
'MSRA.SCA.6976', 'rerank-re.6540', 'online-Y.0', 'online-A.0', 'afrl-syscomb19.6782',
'afrl-ewc.6659', 'TartuNLP-u.6650', 'online-X.0', 'NICT.6561'],
'zh-en': ['Baidu-system.6940', 'KSAI-system.6927', 'MSRA.MASS.6996', 'MSRA.MASS.6942', 'NEU.6832',
'BTRANS.6825', 'online-B.0', 'BTRANS-ensemble.6992', 'UEDIN.6530', 'online-Y.0', 'NICT.6814',
'online-A.0', 'online-G.0', 'online-X.0', 'Apprentice-c.6706']
}
def save_data(self, path=None):
if path is None:
path = self.path
save_pickle(self.data, path)
def retrieve_scores(self, metric, doc_ids):
""" retrieve better, worse scores """
better, worse = [], []
for doc_id in doc_ids:
better.append(float(self.data[doc_id]['better']['scores'][metric]))
worse.append(float(self.data[doc_id]['worse']['scores'][metric]))
return better, worse
def kendall(self, hyp1_scores: list, hyp2_scores: list):
""" Computes the official WMT19 shared task Kendall correlation score. """
assert len(hyp1_scores) == len(hyp2_scores)
conc, disc = 0, 0
for x1, x2 in zip(hyp1_scores, hyp2_scores):
if x1 > x2:
conc += 1
else:
disc += 1
return (conc - disc) / (conc + disc)
def print_ktau(self, metrics=None):
headers = ['metric', 'k-tau']
metric_with_ktau = []
doc_ids = list(self.data.keys())
if metrics is None:
metrics = self.metrics
for metric in tqdm(metrics):
better, worse = self.retrieve_scores(metric, doc_ids)
ktau = self.kendall(better, worse)
metric_with_ktau.append([metric, ktau])
sorted_metric_with_ktau = sorted(metric_with_ktau, key=lambda x: x[1], reverse=True)
print(tabulate(sorted_metric_with_ktau, headers=headers, tablefmt='simple'))
def print_ref_len(self):
""" Get the length of reference texts """
ref_lens = []
for doc_id in self.data:
ref = self.data[doc_id]['ref']
ref_len = len(ref.split(' '))
ref_lens.append(ref_len)
print(f'Mean reference length: {np.mean(ref_lens)}')
print(f'Max reference length: {np.max(ref_lens)}')
print(f'Min reference length: {np.min(ref_lens)}')
print(f'20% percentile: {np.percentile(ref_lens, 20)}')
print(f'80% percentile: {np.percentile(ref_lens, 80)}')
print(f'90% percentile: {np.percentile(ref_lens, 90)}')
def print_len_ktau(self, min_len, max_len, metrics=None):
headers = ['metric', 'k-tau']
metric_with_ktau = []
sub_ids = []
for doc_id in tqdm(self.data):
ref_len = len(self.data[doc_id]['ref'].split(' '))
if min_len <= ref_len <= max_len:
sub_ids.append(doc_id)
print(f'Considered samples: {len(sub_ids)}')
if metrics is None:
metrics = self.metrics
for metric in tqdm(metrics):
better, worse = self.retrieve_scores(metric, sub_ids)
ktau = self.kendall(better, worse)
metric_with_ktau.append([metric, ktau])
sorted_metric_with_ktau = sorted(metric_with_ktau, key=lambda x: x[1], reverse=True)
print(tabulate(sorted_metric_with_ktau, headers=headers, tablefmt='simple'))
def sig_test_two(self, m1, m2):
random.seed(666)
doc_ids = list(self.data.keys())
better = 0
for _ in trange(1000):
random.shuffle(doc_ids)
sub_ids = doc_ids[:int(0.8 * len(doc_ids))]
better_m1, worse_m1, better_m2, worse_m2 = [], [], [], []
for doc_id in sub_ids:
better_m1.append(float(self.data[doc_id]['better']['scores'][m1]))
worse_m1.append(float(self.data[doc_id]['worse']['scores'][m1]))
better_m2.append(float(self.data[doc_id]['better']['scores'][m2]))
worse_m2.append(float(self.data[doc_id]['worse']['scores'][m2]))
m1_ktau = self.kendall(better_m1, worse_m1)
m2_ktau = self.kendall(better_m2, worse_m2)
if m1_ktau > m2_ktau:
better += 1
if better > 950:
return 1
elif better < 50:
return -1
else:
return 0
@property
def metrics(self):
return self._metrics
@property
def systems(self):
return self._systems[self.lp]
| BARTScore-main | bart_score/analysis.py |
from bart_score.scorer import BARTScorer | BARTScore-main | bart_score/__init__.py |
import pickle
import jsonlines
import nltk
from nltk.tokenize import sent_tokenize
from nltk import word_tokenize
import numpy as np
from tabulate import tabulate
from mosestokenizer import *
import random
from random import choices
import os
import sys
import re
from collections import defaultdict as ddict
from scipy.stats import pearsonr, spearmanr, kendalltau
# nltk.download('stopwords')
detokenizer = MosesDetokenizer('en')
def read_pickle(file):
with open(file, 'rb') as f:
data = pickle.load(f)
return data
def save_pickle(data, file):
with open(file, 'wb') as f:
pickle.dump(data, f)
print(f'Saved to {file}.')
def read_file_to_list(file_name):
lines = []
with open(file_name, 'r', encoding='utf8') as f:
for line in f.readlines():
lines.append(line.strip())
return lines
def write_list_to_file(list_to_write, filename):
out_file = open(filename, 'w')
for line in list_to_write:
print(line, file=out_file)
out_file.flush()
out_file.close()
print(f'Saved to {filename}.')
def read_jsonlines_to_list(file_name):
lines = []
with jsonlines.open(file_name, 'r') as reader:
for obj in reader:
lines.append(obj)
return lines
def write_list_to_jsonline(list_to_write, filename):
with jsonlines.open(filename, 'w') as writer:
writer.write_all(list_to_write)
print(f'Saved to {filename}.')
def capitalize_sents(text: str):
""" Given a string, capitalize the initial letter of each sentence. """
sentences = sent_tokenize(text)
sentences = [sent.strip() for sent in sentences]
sentences = [sent.capitalize() for sent in sentences]
sentences = " ".join(sentences)
return sentences
def is_capitalized(text: str):
""" Given a string (system output etc.) , check whether it is lowercased,
or normally capitalized.
"""
return not text.islower()
def tokenize(text: str):
words = word_tokenize(text)
return " ".join(words)
def detokenize(text: str):
words = text.split(" ")
return detokenizer(words)
def use_original_bracket(text: str):
return text.replace('-lrb-', '(').replace('-rrb-', ')').replace('-LRB-', '(').replace('-RRB-', ')').replace('-lsb-',
'[').replace(
'-rsb-', ']').replace('-LSB-', '[').replace('-RSB-', ']')
# Disable print
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore print
def enablePrint():
sys.stdout = sys.__stdout__
def retrieve_scores(saveto):
def get_r_p_f(line):
line = line.split(" ")
r = float(line[-3][-7:])
p = float(line[-2][-7:])
f = float(line[-1][-7:])
return r, p, f
lines = read_file_to_list(saveto)
rouge1_list, rouge2_list, rougel_list = [], [], []
for line in lines:
if line.startswith('1 ROUGE-1 Eval'):
rouge1_list.append(get_r_p_f(line))
if line.startswith('1 ROUGE-2 Eval'):
rouge2_list.append(get_r_p_f(line))
if line.startswith('1 ROUGE-L Eval'):
rougel_list.append(get_r_p_f(line))
return rouge1_list, rouge2_list, rougel_list
def get_rank(data, metric):
""" Rank all systems based on a metric (avg score) """
scores = {} # {sysname: [scores]}
for doc_id in data:
sys_summs = data[doc_id]['sys_summs']
for sys_name in sys_summs:
score = sys_summs[sys_name]['scores'][metric]
scores.setdefault(sys_name, []).append(score)
scores = {k: sum(v) / len(v) for k, v in scores.items()}
new_scores = dict(sorted(scores.items(), key=lambda x: x[1], reverse=True))
# for k in new_scores:
# print(k)
return new_scores.keys()
def get_sents_from_tags(text, sent_start_tag, sent_end_tag):
sents = re.findall(r'%s (.+?) %s' % (sent_start_tag, sent_end_tag), text)
sents = [sent for sent in sents if len(sent) > 0] # remove empty sents
return sents
def get_metrics_list(sd):
"""
Does each system summary dict have same all_metrics?
:param sd: scores dict
:return: list of all_metrics in the scores dict
"""
metrics_tuple_set = set(
[tuple(sorted(list(x['scores'].keys())))
for d in sd.values() for x in d['sys_summs'].values()])
assert len(metrics_tuple_set) == 1, (
metrics_tuple_set, "all system summary score dicts should have the same set of all_metrics")
metrics_list = list(list(metrics_tuple_set)[0])
return metrics_list
def print_score_ranges(sd):
metrics_list = get_metrics_list(sd)
print_list = []
headers = ["min", "25-perc", "median", "75-perc", "max", "mean"]
for m in metrics_list:
scores = [s['scores'][m] for d in sd.values() for s in d['sys_summs'].values() if s['sys_summ'] != 'EMPTY']
print_list.append([m,
np.min(scores),
np.percentile(scores, 25),
np.median(scores),
np.percentile(scores, 75),
np.max(scores),
np.mean(scores)])
print(tabulate(print_list, headers=headers, floatfmt=".6f", tablefmt="simple"))
def get_system_level_scores(sd, metrics, agg='mean', nas=False):
"""
systems[system_name][metric] = average_score or list of scores
"""
systems = ddict(lambda: ddict(list))
for isd in sd.values():
for system_name, scores in isd['sys_summs'].items():
for m in metrics:
# Empty summary
if scores['sys_summ'] == 'EMPTY':
systems[system_name][m].append(None)
else:
systems[system_name][m].append(scores['scores'][m])
for system_name, scores in systems.items():
for m in scores:
all_scores = systems[system_name][m]
if agg == 'mean':
all_scores = [x for x in all_scores if x is not None]
systems[system_name][m] = np.mean(all_scores)
if nas:
min_scores = {}
max_scores = {}
for m in metrics:
min_scores[m] = np.min([systems[sys][m] for sys in systems.keys()])
max_scores[m] = np.max([systems[sys][m] for sys in systems.keys()])
for sys in systems:
systems[sys]['nas'] = np.mean([
(systems[sys][m] - min_scores[m]) / (max_scores[m] - min_scores[m]) for m in metrics
])
return systems
def get_topk(systems, k, metric='rouge2_f'):
systems_l = [(name, score[metric]) for name, score in systems.items()]
systems_l = sorted(systems_l, key=lambda x: x[1], reverse=True)
topk_system_names = [tup[0] for tup in systems_l[:k]]
return {name: systems[name] for name in topk_system_names}
def print_correlation(topk_systems, metric_pairs):
# disagreement between every pair of metrics for the topk
headers = ['metric_pair', 'pearson', 'spearman', 'kendalltau']
print_list = []
for pair in metric_pairs:
if 'bart_en_sim' in pair[1] or 'bart_sim' in pair[1]:
continue
m1_scores = []
m2_scores = []
for scores in topk_systems.values():
m1_scores.append(scores[pair[0]]) # Human metric
m2_scores.append(scores[pair[1]])
pearson, _ = pearsonr(m1_scores, m2_scores)
spearman, _ = spearmanr(m1_scores, m2_scores)
ktau, _ = kendalltau(m1_scores, m2_scores)
print_list.append([f'{pair[1]}', pearson, spearman, ktau])
# sort based on pearson
print_list = sorted(print_list, key=lambda x: x[2], reverse=True)
print(tabulate(print_list, headers=headers, tablefmt='simple'))
def get_predictions_br(system_pairs, systems, metric):
random.seed(666)
preds = {}
for pair in system_pairs:
sys1 = systems[pair[0]][metric]
sys2 = systems[pair[1]][metric]
n = len(sys1)
points = [i for i in range(0, n)]
is_better = 0
N = 1000
for i in range(N):
sample = choices(points, k=n)
sys1_, sys2_ = [], []
# Due to EMPTY summary, we have to ensure sys1_, sys2_ not empty
while len(sys1_) == 0:
for p in sample:
if sys1[p] is None or sys2[p] is None:
continue
else:
sys1_.append(sys1[p])
sys2_.append(sys2[p])
sample = choices(points, k=n)
if np.mean(sys1_) > np.mean(sys2_):
is_better += 1
if is_better / N >= 0.95:
preds[pair] = 0 # pair[0] is better
elif is_better / N <= 0.05:
preds[pair] = 1 # pair[1] is better
else:
preds[pair] = 2 # can't say
return preds
| BARTScore-main | bart_score/utils.py |
import torch
import torch.nn as nn
import traceback
from transformers import BartTokenizer, BartForConditionalGeneration
from typing import List
import numpy as np
class BARTScorer:
def __init__(self, device='cuda:0', max_length=1024, checkpoint='facebook/bart-large-cnn'):
# Set up model
self.device = device
self.max_length = max_length
self.tokenizer = BartTokenizer.from_pretrained(checkpoint)
self.model = BartForConditionalGeneration.from_pretrained(checkpoint)
self.model.eval()
self.model.to(device)
# Set up loss
self.loss_fct = nn.NLLLoss(reduction='none', ignore_index=self.model.config.pad_token_id)
self.lsm = nn.LogSoftmax(dim=1)
def load(self, path=None):
""" Load model from paraphrase finetuning """
if path is None:
path = 'models/bart.pth'
self.model.load_state_dict(torch.load(path, map_location=self.device))
def score(self, srcs, tgts, batch_size=4):
""" Score a batch of examples """
score_list = []
for i in range(0, len(srcs), batch_size):
src_list = srcs[i: i + batch_size]
tgt_list = tgts[i: i + batch_size]
try:
with torch.no_grad():
encoded_src = self.tokenizer(
src_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
encoded_tgt = self.tokenizer(
tgt_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
src_tokens = encoded_src['input_ids'].to(self.device)
src_mask = encoded_src['attention_mask'].to(self.device)
tgt_tokens = encoded_tgt['input_ids'].to(self.device)
tgt_mask = encoded_tgt['attention_mask']
tgt_len = tgt_mask.sum(dim=1).to(self.device)
output = self.model(
input_ids=src_tokens,
attention_mask=src_mask,
labels=tgt_tokens
)
logits = output.logits.view(-1, self.model.config.vocab_size)
loss = self.loss_fct(self.lsm(logits), tgt_tokens.view(-1))
loss = loss.view(tgt_tokens.shape[0], -1)
loss = loss.sum(dim=1) / tgt_len
curr_score_list = [-x.item() for x in loss]
score_list += curr_score_list
except RuntimeError:
traceback.print_exc()
print(f'source: {src_list}')
print(f'target: {tgt_list}')
exit(0)
return score_list
def multi_ref_score(self, srcs, tgts: List[List[str]], agg="mean", batch_size=4):
# Assert we have the same number of references
ref_nums = [len(x) for x in tgts]
if len(set(ref_nums)) > 1:
raise Exception("You have different number of references per test sample.")
ref_num = len(tgts[0])
score_matrix = []
for i in range(ref_num):
curr_tgts = [x[i] for x in tgts]
scores = self.score(srcs, curr_tgts, batch_size)
score_matrix.append(scores)
if agg == "mean":
score_list = np.mean(score_matrix, axis=0)
elif agg == "max":
score_list = np.max(score_matrix, axis=0)
else:
raise NotImplementedError
return list(score_list)
def test(self, batch_size=3):
""" Test """
src_list = [
'This is a very good idea. Although simple, but very insightful.',
'Can I take a look?',
'Do not trust him, he is a liar.'
]
tgt_list = [
"That's stupid.",
"What's the problem?",
'He is trustworthy.'
]
print(self.score(src_list, tgt_list, batch_size))
| BARTScore-main | bart_score/scorer.py |
#!/usr/bin/env python3
import argparse
import hashlib
import logging
import os
import sys
from typing import List, Dict, Iterator, Any, Tuple
import numpy as np
import sentencepiece as spm
import torch
from fairseq import checkpoint_utils, utils
from fairseq.data import LanguagePairDataset
from sacrebleu import get_source_file, get_reference_files, DATASETS, get_langpairs_for_testset
logger = logging.getLogger('prism')
logger.setLevel(logging.INFO)
MODELS = {
'8412b2044da4b9b2c0a8ce87b305d0d1': {
'name': 'm39v1',
'path': 'todo',
'date': '2020-04-30',
'description': 'model released with arXiv paper April 2020',
'langs': ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'eo', 'fi', 'fr', 'he',
'hr', 'hu', 'id', 'it', 'ja', 'kk', 'lt', 'lv', 'mk', 'nl', 'no', 'pl', 'pt', 'ro', 'ru',
'sk', 'sl', 'sq', 'sr', 'sv', 'tr', 'uk', 'vi', 'zh'],
}
}
def hash_model(model_dir):
md5 = hashlib.md5()
block_size = 2 ** 20
for fname in ('checkpoint.pt', 'spm.model', 'dict.src.txt', 'dict.tgt.txt'):
with open(os.path.join(model_dir, fname), "rb") as f:
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
md5.digest()
return md5.hexdigest()
class Prism:
def __init__(self, model_dir, lang, temperature=1.0):
'''
model_dir should contain:
1) checkpoint.pt: the fairseq model
2) spm.model: the sentencepiece model
3) dict.src.txt: the fairseq source dictionary
4) dict.tgt.txt: the fairseq target dictionary (likely a copy of the source)
lang: ISO 639-1 Code (e.g. "en"). Must be a language compatable with the model.
'''
self.sp = spm.SentencePieceProcessor()
self.sp.Load(model_dir + '/spm.model')
self.lang = lang
self.temperature = temperature
# this prints things and I can't figure out how to disable it
sys.stdout = open(os.devnull, 'w')
self.models, self.args, self.task = checkpoint_utils.load_model_ensemble_and_task(
[model_dir + '/checkpoint.pt', ],
arg_overrides=dict(data=model_dir + '/'),
)
sys.stdout = sys.__stdout__
self.use_cuda = torch.cuda.is_available()
self.generator = SequenceScorer(self.task.target_dictionary, temperature=temperature)
for model in self.models:
if self.use_cuda:
model.cuda()
model.make_generation_fast_(
beamable_mm_beam_size=None,
need_attn=False,
)
# if model.args.fp16:
# model.half()
# hash model
self.model_hash = hash_model(model_dir)
if self.model_hash in MODELS:
model_langs = MODELS[self.model_hash]['langs']
if lang not in model_langs:
model_name = MODELS[self.model_hash]['name']
logger.warning(f'Language "{lang}" is unsupported for model "{model_name}"')
logger.warning(f'Supported languages for {model_name}: {", ".join(model_langs)}')
sys.exit(1)
else:
logger.warning('unrecognized model, so cannot check language')
def identifier(self):
if self.model_hash in MODELS:
model_name = MODELS[self.model_hash]['name']
else:
logger.warning('unrecognized model, using hash to identify')
model_name = self.model_hash
return dict(version='0.1', model=model_name, seg_scores='avg_log_prob',
sys_scores='avg_log_prob', log_base=2, temperature=self.temperature)
def _binarize(self, sentence: str) -> torch.LongTensor:
return self.task.source_dictionary.encode_line(sentence, add_if_not_exist=False).long()
def _encode(self, sent, prepend=True):
sent = ' '.join(self.sp.EncodeAsPieces(sent))
if prepend:
sent = f'<{self.lang}> ' + sent
return self._binarize(sent)
def _build_batches(self,
source_tokens: List[List[int]],
target_tokens: List[List[int]],
skip_invalid_size_inputs: bool) -> Iterator[Dict[str, Any]]:
# Prune token
source_tokens = [src_token[:1800] for src_token in source_tokens]
target_tokens = [tgt_token[:2000] for tgt_token in target_tokens]
source_lengths = torch.LongTensor([t.numel() for t in source_tokens])
target_lengths = torch.LongTensor([t.numel() for t in target_tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=LanguagePairDataset(source_tokens, source_lengths, self.task.source_dictionary,
tgt=target_tokens, tgt_sizes=target_lengths,
tgt_dict=self.task.target_dictionary),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=(2000, 2000), # ???
ignore_invalid_inputs=skip_invalid_size_inputs,
).next_epoch_itr(shuffle=False)
return batch_iterator
def _score_forward(self, tok_sents_in, tok_sents_out):
assert len(tok_sents_in) == len(tok_sents_out)
tok_level_scores = [None, ] * len(tok_sents_in) # for debug
results = [None, ] * len(tok_sents_in)
for batch in self._build_batches(tok_sents_in, tok_sents_out, skip_invalid_size_inputs=False):
if self.use_cuda: # must be a better way
batch['id'] = batch['id'].cuda()
batch['net_input']['src_tokens'] = batch['net_input']['src_tokens'].cuda()
batch['net_input']['src_lengths'] = batch['net_input']['src_lengths'].cuda()
batch['net_input']['prev_output_tokens'] = batch['net_input']['prev_output_tokens'].cuda()
batch['target'] = batch['target'].cuda()
translations = self.task.inference_step(self.generator, self.models, batch)
ids = batch['id'].cpu().numpy()
tok_scores = [x[0]['positional_scores'].cpu().numpy() for x in translations]
# [1:] to skip language tag log prob
sent_scores = [np.mean(x[1:]) for x in tok_scores]
for _id, sent_score, _tok_score in zip(ids, sent_scores, tok_scores):
results[_id] = sent_score
tok_level_scores[_id] = _tok_score
if logger.level == logging.DEBUG:
for ii, (sent_in, scores_out, sent_out) in enumerate(zip(tok_sents_in, tok_level_scores, tok_sents_out)):
sent_in_str = ' '.join([self.task.source_dictionary[x] for x in sent_in])
logger.debug(f'Input[{ii}] = ' + sent_in_str)
sent_out_tok = [self.task.source_dictionary[x] for x in sent_out]
logger.debug(f'Output[{ii}] = ' + \
f' '.join([f'{a}[{b:.02f}]' for a, b in zip(sent_out_tok, scores_out)]))
if None in results:
raise Exception('Missing one or more sentence scores')
return np.array(results)
def score(self, cand, ref=None, src=None, segment_scores=False):
if not (ref is None) ^ (src is None):
raise Exception('Must provide exactly one of "ref" or "src"')
tokenized_cand = [self._encode(sentence, prepend=False) for sentence in cand]
tokenized_cand_prep = [self._encode(sentence, prepend=True) for sentence in cand]
if src is not None:
# Prism-src: score candidate given on source
if len(cand) != len(src):
raise Exception(f'Length of cand ({len(cand)}) does not match length of src ({len(src)})')
tokenized_src = [self._encode(sentence, prepend=False) for sentence in src]
scores = self._score_forward(tokenized_src, tokenized_cand_prep)
if not segment_scores:
scores = np.mean(scores)
return scores
else:
# Prism-ref: average candidate given reference and reference given candidate
if len(cand) != len(ref):
raise Exception(f'Length of cand ({len(cand)}) does not match length of ref ({len(ref)})')
tokenized_ref = [self._encode(sentence, prepend=False) for sentence in ref]
tokenized_ref_prep = [self._encode(sentence, prepend=True) for sentence in ref]
forward_scores = self._score_forward(tok_sents_in=tokenized_ref, tok_sents_out=tokenized_cand_prep)
reverse_scores = self._score_forward(tok_sents_in=tokenized_cand, tok_sents_out=tokenized_ref_prep)
scores = 0.5 * forward_scores + 0.5 * reverse_scores
if not segment_scores:
scores = np.mean(scores)
return scores
return forward_scores, reverse_scores, scores
def parse_sacrebleu_uri(uri: str) -> Tuple[str]:
"""
Parses the test set and language pair from a URI of the form
sacrebleu:wmt19:de-en
sacrebleu:wmt19/google/ar:de-en
"""
try:
_, testset, langpair = uri.split(":")
except ValueError:
logger.error('sacrebleu:* flags must take the form "sacrebleu:testset:langpair"')
sys.exit(1)
testsets = sorted(DATASETS, reverse=True)
if testset not in testsets:
logger.error(f"Test set '{testset}' was not found. Available sacrebleu test sets are:")
for key in testsets:
logger.error(f" {key:20s}: {DATASETS[key].get('description', '')}")
sys.exit(1)
lang_pairs = get_langpairs_for_testset(testset)
if langpair not in lang_pairs:
logger.error(f"Language pair '{langpair}' not available for testset '{testset}'.\n"
f" Language pairs available for {testset}: {', '.join(lang_pairs)}")
sys.exit(1)
return testset, langpair
def main():
parser = argparse.ArgumentParser(description='Prism: MT metric based on multilingual NMT',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cand', required=False, type=argparse.FileType('rt'), default=sys.stdin,
help='Candidate text file. If not provided, candidates are read from stdin.')
parser.add_argument('--ref', required=False, type=str,
help='Reference text file. If provided, reference-based Prism-ref scores are returned. '
'A value of "sacrebleu:{testset}:{langpair}" will use sacrebleu datasets. '
'You must provide exactly one of --ref or --src. ')
parser.add_argument('--src', required=False, type=str,
help='Source text file. If provided, source-based Prism-src scores are returned. '
'A value of "sacrebleu:{testset}:{langpair}" will use sacrebleu datasets. '
'You must provide exactly one of --ref or --src.')
parser.add_argument('--model-dir', required=True, type=str, help='Model Directory')
parser.add_argument('--lang', type=str, help='2-character language code (ISO 639-1)')
parser.add_argument('--temperature', type=float, default=1.0, help='Softmax temperature: '
'values >1.0 produce more uniform samples and values <1.0 produce sharper samples')
parser.add_argument('--segment-scores', action='store_true',
help='Print per-sentence scores instead of corpus level score')
parser.add_argument('--debug', action='store_true', help='Print debug info')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
if not (args.ref is None) ^ (args.src is None):
logger.error('You must provide exactly one of --ref or --src')
sys.exit(1)
if args.ref is not None:
if args.ref.startswith('sacrebleu:'):
testset, langpair = parse_sacrebleu_uri(args.ref)
path = get_reference_files(testset, langpair)[0]
args.ref = open(path).readlines()
args.lang = langpair.split("-")[1]
logger.info(f"Scoring against {len(args.ref)}-line {args.lang} reference"
f" from sacrebleu dataset {testset}/{langpair}")
else:
args.ref = open(args.ref, 'rt').readlines()
if args.src is not None:
if args.src.startswith('sacrebleu:'):
testset, langpair = parse_sacrebleu_uri(args.src)
path = get_source_file(testset, langpair)
args.src = open(path).readlines()
args.lang = langpair.split("-")[0]
logger.info(f"Scoring against {len(args.src)}-line {args.lang} source"
f" from sacrebleu dataset {testset}/{langpair}")
else:
args.src = open(args.src, 'rt').readlines()
if args.lang is None:
logger.error("The language must be specified (--lang XX), XX the ISO 639-1 code")
sys.exit(1)
if args.temperature <= 0:
raise Exception('temperature must be > 0')
args.cand = args.cand.readlines()
n_gpus = torch.cuda.device_count()
logging.debug(f'Running on {"GPU" if n_gpus else "CPU"}')
if len(args.cand) > 50 and n_gpus == 0:
logging.warning('Running on CPU is slow...')
prism = Prism(model_dir=args.model_dir, lang=args.lang, temperature=args.temperature)
scores = prism.score(cand=args.cand, ref=args.ref, src=args.src, segment_scores=args.segment_scores)
logger.info(f'Prism identifier: {prism.identifier()}')
if args.segment_scores:
for ss in scores:
print(ss)
else:
print(scores)
class SequenceScorer(object):
"""
Copy of https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_scorer.py
with softmax temperature control added
MIT License
Copyright (c) Facebook, Inc. and its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def __init__(self, tgt_dict, softmax_batch=None, temperature=1.0):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos()
self.softmax_batch = softmax_batch or sys.maxsize
self.temperature = temperature
assert self.softmax_batch > 0
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample['net_input']
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample['target']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model.forward(**net_input)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample['target'] = tgt
# divide the logits by temperature prior to softmax
# for example, see https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_generator.py:
# decoder_out[0][:, -1:, :].div_(temperature)
bd[0].div_(self.temperature)
curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample['target'] = orig_target
probs = probs.view(sample['target'].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None and torch.is_tensor(attn):
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \
if sample['target'] is not None else None
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
alignment = utils.extract_hard_alignment(avg_attn_i, sample['net_input']['src_tokens'][i],
sample['target'][i], self.pad, self.eos)
else:
avg_attn_i = alignment = None
hypos.append([{
'tokens': ref,
'score': score_i,
'attention': avg_attn_i,
'alignment': alignment,
'positional_scores': avg_probs_i,
}])
return hypos
if __name__ == '__main__':
main()
| BARTScore-main | D2T/prism.py |
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
import string
from pyemd import emd
from torch import nn
from math import log
from itertools import chain
from pytorch_pretrained_bert import BertTokenizer, BertModel
from pytorch_pretrained_bert.modeling import BertPreTrainedModel
from collections import defaultdict, Counter
from multiprocessing import Pool
from functools import partial
import os
import sys
import requests
import zipfile
USERHOME = os.path.expanduser("~")
MOVERSCORE_DIR = os.environ.get('MOVERSCORE', os.path.join(USERHOME, '.moverscore'))
MNLI_BERT = 'https://github.com/AIPHES/emnlp19-moverscore/releases/download/0.6/MNLI_BERT.zip'
output_dir = os.path.join(MOVERSCORE_DIR)
def download_MNLI_BERT(url, filename):
with open(filename, 'wb') as f:
response = requests.get(url, stream=True)
total = response.headers.get('content-length')
if total is None:
f.write(response.content)
else:
downloaded = 0
total = int(total)
for data in response.iter_content(chunk_size=max(int(total / 1000), 1024 * 1024)):
downloaded += len(data)
f.write(data)
done = int(50 * downloaded / total)
sys.stdout.write('\r[{}{}]'.format('-' * done, '.' * (50 - done)))
sys.stdout.flush()
sys.stdout.write('\n')
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
tarball = os.path.join(output_dir, os.path.basename(MNLI_BERT))
rawdir = os.path.join(output_dir, 'raw')
if not os.path.exists(tarball):
print("Downloading %s to %s" % (MNLI_BERT, tarball))
download_MNLI_BERT(MNLI_BERT, tarball)
if tarball.endswith('.zip'):
z = zipfile.ZipFile(tarball, 'r')
# z.printdir()
z.extractall(output_dir)
z.close()
# device = 'cuda'
# output_dir = "./uncased_L-12_H-768_A-12/mnli/"
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=None):
encoded_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=True)
return encoded_layers, pooled_output
tokenizer = BertTokenizer.from_pretrained(output_dir, do_lower_case=True)
model = BertForSequenceClassification.from_pretrained(output_dir, 3)
model.eval()
# model.to(device)
def truncate(tokens):
if len(tokens) > 510:
tokens = tokens[0:510]
return tokens
def process(a):
a = ["[CLS]"] + truncate(tokenizer.tokenize(a)) + ["[SEP]"]
a = tokenizer.convert_tokens_to_ids(a)
return set(a)
def get_idf_dict(arr, nthreads=4):
idf_count = Counter()
num_docs = len(arr)
process_partial = partial(process)
with Pool(nthreads) as p:
idf_count.update(chain.from_iterable(p.map(process_partial, arr)))
idf_dict = defaultdict(lambda: log((num_docs + 1) / (1)))
idf_dict.update({idx: log((num_docs + 1) / (c + 1)) for (idx, c) in idf_count.items()})
return idf_dict
def padding(arr, pad_token, dtype=torch.long):
lens = torch.LongTensor([len(a) for a in arr])
max_len = lens.max().item()
padded = torch.ones(len(arr), max_len, dtype=dtype) * pad_token
mask = torch.zeros(len(arr), max_len, dtype=torch.long)
for i, a in enumerate(arr):
padded[i, :lens[i]] = torch.tensor(a, dtype=dtype)
mask[i, :lens[i]] = 1
return padded, lens, mask
def bert_encode(model, x, attention_mask, device='cuda:0'):
model.eval()
model.to(device)
x_seg = torch.zeros_like(x, dtype=torch.long)
with torch.no_grad():
x_encoded_layers, pooled_output = model(x, x_seg, attention_mask=attention_mask, output_all_encoded_layers=True)
return x_encoded_layers
def collate_idf(arr, tokenize, numericalize, idf_dict,
pad="[PAD]", device='cuda:0'):
tokens = [["[CLS]"] + truncate(tokenize(a)) + ["[SEP]"] for a in arr]
arr = [numericalize(a) for a in tokens]
idf_weights = [[idf_dict[i] for i in a] for a in arr]
pad_token = numericalize([pad])[0]
padded, lens, mask = padding(arr, pad_token, dtype=torch.long)
padded_idf, _, _ = padding(idf_weights, pad_token, dtype=torch.float)
padded = padded.to(device=device)
mask = mask.to(device=device)
lens = lens.to(device=device)
return padded, padded_idf, lens, mask, tokens
def get_bert_embedding(all_sens, model, tokenizer, idf_dict,
batch_size=-1, device='cuda:0'):
padded_sens, padded_idf, lens, mask, tokens = collate_idf(all_sens,
tokenizer.tokenize, tokenizer.convert_tokens_to_ids,
idf_dict,
device=device)
if batch_size == -1: batch_size = len(all_sens)
embeddings = []
with torch.no_grad():
for i in range(0, len(all_sens), batch_size):
batch_embedding = bert_encode(model, padded_sens[i:i + batch_size],
attention_mask=mask[i:i + batch_size],
device=device)
batch_embedding = torch.stack(batch_embedding)
embeddings.append(batch_embedding)
del batch_embedding
total_embedding = torch.cat(embeddings, dim=-3)
return total_embedding, lens, mask, padded_idf, tokens
plus_mask = lambda x, m: x + (1.0 - m).unsqueeze(-1) * 1e30
minus_mask = lambda x, m: x - (1.0 - m).unsqueeze(-1) * 1e30
mul_mask = lambda x, m: x * m.unsqueeze(-1)
masked_reduce_min = lambda x, m: torch.min(plus_mask(x, m), dim=1, out=None)
masked_reduce_max = lambda x, m: torch.max(minus_mask(x, m), dim=1, out=None)
masked_reduce_mean = lambda x, m: mul_mask(x, m).sum(1) / (m.sum(1, keepdim=True) + 1e-10)
masked_reduce_geomean = lambda x, m: np.exp(mul_mask(np.log(x), m).sum(1) / (m.sum(1, keepdim=True) + 1e-10))
idf_reduce_mean = lambda x, m: mul_mask(x, m).sum(1)
idf_reduce_max = lambda x, m, idf: torch.max(mul_mask(minus_mask(x, m), idf), dim=1, out=None)
idf_reduce_min = lambda x, m, idf: torch.min(mul_mask(plus_mask(x, m), idf), dim=1, out=None)
def pairwise_distances(x, y=None):
x_norm = (x ** 2).sum(1).view(-1, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
y_t = torch.transpose(y, 0, 1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
return torch.clamp(dist, 0.0, np.inf)
def slide_window(a, w=3, o=2):
if a.size - w + 1 <= 0:
w = a.size
sh = (a.size - w + 1, w)
st = a.strides * 2
view = np.lib.stride_tricks.as_strided(a, strides=st, shape=sh)[0::o]
return view.copy().tolist()
def _safe_divide(numerator, denominator):
return numerator / (denominator + 0.00001)
def load_ngram(ids, embedding, idf, n, o, device='cuda:0'):
new_a = []
new_idf = []
slide_wins = slide_window(np.array(ids), w=n, o=o)
for slide_win in slide_wins:
new_idf.append(idf[slide_win].sum().item())
scale = _safe_divide(idf[slide_win], idf[slide_win].sum(0)).unsqueeze(-1).to(device)
tmp = (scale * embedding[slide_win]).sum(0)
new_a.append(tmp)
new_a = torch.stack(new_a, 0).to(device)
return new_a, new_idf
def word_mover_score(refs, hyps, idf_dict_ref, idf_dict_hyp, stop_words=[], n_gram=1, remove_subwords=True,
batch_size=256, device='cuda:0'):
preds = []
for batch_start in range(0, len(refs), batch_size):
batch_refs = refs[batch_start:batch_start + batch_size]
batch_hyps = hyps[batch_start:batch_start + batch_size]
ref_embedding, ref_lens, ref_masks, ref_idf, ref_tokens = get_bert_embedding(batch_refs, model, tokenizer,
idf_dict_ref,
device=device)
hyp_embedding, hyp_lens, hyp_masks, hyp_idf, hyp_tokens = get_bert_embedding(batch_hyps, model, tokenizer,
idf_dict_hyp,
device=device)
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
ref_embedding_max, _ = torch.max(ref_embedding[-5:], dim=0, out=None)
hyp_embedding_max, _ = torch.max(hyp_embedding[-5:], dim=0, out=None)
ref_embedding_min, _ = torch.min(ref_embedding[-5:], dim=0, out=None)
hyp_embedding_min, _ = torch.min(hyp_embedding[-5:], dim=0, out=None)
ref_embedding_avg = ref_embedding[-5:].mean(0)
hyp_embedding_avg = hyp_embedding[-5:].mean(0)
ref_embedding = torch.cat([ref_embedding_min, ref_embedding_avg, ref_embedding_max], -1)
hyp_embedding = torch.cat([hyp_embedding_min, hyp_embedding_avg, hyp_embedding_max], -1)
for i in range(len(ref_tokens)):
if remove_subwords:
ref_ids = [k for k, w in enumerate(ref_tokens[i]) if
w not in set(string.punctuation) and '##' not in w and w not in stop_words]
hyp_ids = [k for k, w in enumerate(hyp_tokens[i]) if
w not in set(string.punctuation) and '##' not in w and w not in stop_words]
else:
ref_ids = [k for k, w in enumerate(ref_tokens[i]) if
w not in set(string.punctuation) and w not in stop_words]
hyp_ids = [k for k, w in enumerate(hyp_tokens[i]) if
w not in set(string.punctuation) and w not in stop_words]
ref_embedding_i, ref_idf_i = load_ngram(ref_ids, ref_embedding[i], ref_idf[i], n_gram, 1, device=device)
hyp_embedding_i, hyp_idf_i = load_ngram(hyp_ids, hyp_embedding[i], hyp_idf[i], n_gram, 1, device=device)
raw = torch.cat([ref_embedding_i, hyp_embedding_i], 0)
raw.div_(torch.norm(raw, dim=-1).unsqueeze(-1) + 0.000001)
distance_matrix = pairwise_distances(raw, raw)
c1 = np.zeros(len(ref_idf_i) + len(hyp_idf_i), dtype=np.double)
c2 = np.zeros(len(ref_idf_i) + len(hyp_idf_i), dtype=np.double)
c1[:len(ref_idf_i)] = ref_idf_i
c2[-len(hyp_idf_i):] = hyp_idf_i
c1 = _safe_divide(c1, np.sum(c1))
c2 = _safe_divide(c2, np.sum(c2))
score = 1 - emd(c1, c2, distance_matrix.double().cpu().numpy())
preds.append(score)
return preds
| BARTScore-main | D2T/moverscore.py |
import os
import pickle
import sys
import nltk
from mosestokenizer import *
from nltk import word_tokenize
from nltk.tokenize import sent_tokenize
nltk.download('stopwords')
detokenizer = MosesDetokenizer('en')
def read_file_to_list(file_name):
lines = []
with open(file_name, 'r', encoding='utf8') as f:
for line in f.readlines():
lines.append(line.strip())
return lines
def write_list_to_file(list_to_write, filename):
out_file = open(filename, 'w')
for line in list_to_write:
print(line, file=out_file)
out_file.flush()
out_file.close()
print(f'Saved to {filename}.')
def read_pickle(file):
with open(file, 'rb') as f:
data = pickle.load(f)
return data
def save_pickle(data, file):
with open(file, 'wb') as f:
pickle.dump(data, f)
print(f'Saved to {file}.')
def capitalize_sents(text: str):
""" Given a string, capitalize the initial letter of each sentence. """
sentences = sent_tokenize(text)
sentences = [sent.strip() for sent in sentences]
sentences = [sent.capitalize() for sent in sentences]
sentences = " ".join(sentences)
return sentences
def is_capitalized(text: str):
""" Given a string (system output etc.) , check whether it is lowercased,
or normally capitalized.
"""
return not text.islower()
def tokenize(text: str):
words = word_tokenize(text)
return " ".join(words)
def detokenize(text: str):
words = text.split(" ")
return detokenizer(words)
def use_original_bracket(text: str):
return text.replace('-lrb-', '(').replace('-rrb-', ')').replace('-LRB-', '(').replace('-RRB-', ')').replace('-lsb-',
'[').replace(
'-rsb-', ']').replace('-LSB-', '[').replace('-RSB-', ']')
# Disable print
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore print
def enablePrint():
sys.stdout = sys.__stdout__
| BARTScore-main | D2T/utils.py |
import torch
import torch.nn as nn
import traceback
from transformers import BartTokenizer, BartForConditionalGeneration
class BARTScorer:
def __init__(self, device='cuda:0', max_length=1024, checkpoint='facebook/bart-large-cnn'):
# Set up model
self.device = device
self.max_length = max_length
self.tokenizer = BartTokenizer.from_pretrained(checkpoint)
self.model = BartForConditionalGeneration.from_pretrained(checkpoint)
self.model.eval()
self.model.to(device)
# Set up loss
self.loss_fct = nn.NLLLoss(reduction='none', ignore_index=self.model.config.pad_token_id)
self.lsm = nn.LogSoftmax(dim=1)
def load(self):
""" Load model from paraphrase finetuning """
self.model.load_state_dict(torch.load('models/bart.pth', map_location=self.device))
def score(self, srcs, tgts, batch_size):
""" Score a batch of examples """
score_list = []
for i in range(0, len(srcs), batch_size):
src_list = srcs[i: i + batch_size]
tgt_list = tgts[i: i + batch_size]
try:
with torch.no_grad():
encoded_src = self.tokenizer(
src_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
encoded_tgt = self.tokenizer(
tgt_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
src_tokens = encoded_src['input_ids'].to(self.device)
src_mask = encoded_src['attention_mask'].to(self.device)
tgt_tokens = encoded_tgt['input_ids'].to(self.device)
tgt_mask = encoded_tgt['attention_mask']
tgt_len = tgt_mask.sum(dim=1).to(self.device)
output = self.model(
input_ids=src_tokens,
attention_mask=src_mask,
labels=tgt_tokens
)
logits = output.logits.view(-1, self.model.config.vocab_size)
loss = self.loss_fct(self.lsm(logits), tgt_tokens.view(-1))
loss = loss.view(tgt_tokens.shape[0], -1)
loss = loss.sum(dim=1) / tgt_len
curr_score_list = [-x.item() for x in loss]
score_list += curr_score_list
except RuntimeError:
traceback.print_exc()
print(f'source: {src_list}')
print(f'target: {tgt_list}')
exit(0)
return score_list
| BARTScore-main | D2T/bart_score.py |
import argparse
import os
import time
import numpy as np
from utils import *
SRC_HYPO = read_file_to_list('files/src_hypo_prompt.txt')
REF_HYPO = read_file_to_list('files/ref_hypo_prompt.txt')
class Scorer:
""" Support ROUGE-1,2,L, BERTScore, MoverScore, PRISM, BARTScore """
def __init__(self, file_path, device='cuda:0'):
""" file_path: path to the pickle file
All the data are normal capitalized, and tokenized, including ref_summs, and sys_summ.
"""
self.device = device
self.data = read_pickle(file_path)
print(f'Data loaded from {file_path}.')
def save_data(self, path):
save_pickle(self.data, path)
def score(self, metrics):
""" metrics: list of metrics """
for metric_name in metrics:
if metric_name == 'bert_score':
from bert_score import BERTScorer
# Set up BERTScore
bert_scorer = BERTScorer(
lang='en',
idf=False,
rescale_with_baseline=True,
device=self.device
)
print(f'BERTScore setup finished. Begin calculating BERTScore.')
start = time.time()
for doc_id in self.data:
ref_summs = self.data[doc_id]['ref_summs']
sys_summ = self.data[doc_id]['sys_summ']
P, R, F = bert_scorer.score([sys_summ] * len(ref_summs), ref_summs)
P = P.max().item()
R = R.max().item()
F = F.max().item()
self.data[doc_id]['scores']['bert_score_p'] = P
self.data[doc_id]['scores']['bert_score_r'] = R
self.data[doc_id]['scores']['bert_score_f'] = F
print(f'Finished calculating BERTScore, time passed {time.time() - start}s.')
elif metric_name == 'mover_score':
from moverscore import word_mover_score, get_idf_dict
# Set up MoverScore
self.stop_words = []
ref_lines = []
for doc_id in self.data:
ref_lines.extend(self.data[doc_id]['ref_summs'])
ref_lines = list(set(ref_lines))
self.idf_refs = get_idf_dict(ref_lines)
# IDF for all system hypos, used for MoverScore
sys_lines = []
for doc_id in self.data:
sys_summ = self.data[doc_id]['sys_summ']
sys_lines.append(sys_summ)
self.idf_hyps = get_idf_dict(sys_lines)
print(f'MoverScore setup finished. Begin calculating MoverScore.')
start = time.time()
for doc_id in self.data:
ref_summs = self.data[doc_id]['ref_summs']
sys_summ = self.data[doc_id]['sys_summ']
scores = word_mover_score(ref_summs, [sys_summ] * len(ref_summs), self.idf_refs, self.idf_hyps,
self.stop_words,
n_gram=1, remove_subwords=True, batch_size=48, device=self.device)
score = max(scores)
self.data[doc_id]['scores']['mover_score'] = score
print(f'Finished calculating MoverScore, time passed {time.time() - start}s.')
elif metric_name == 'rouge':
from gehrmann_rouge_opennmt.rouge_baselines.baseline import baseline_main
def rouge(dic):
""" Get r, p, f scores """
r1_, r2_, rl_ = [], [], []
for k in dic:
r1_.append([dic[k]['rouge_1_recall'], dic[k]['rouge_1_precision'], dic[k]['rouge_1_f_score']])
r2_.append([dic[k]['rouge_2_recall'], dic[k]['rouge_2_precision'], dic[k]['rouge_2_f_score']])
rl_.append([dic[k]['rouge_l_recall'], dic[k]['rouge_l_precision'], dic[k]['rouge_l_f_score']])
return r1_, r2_, rl_
print(f'Begin calculating ROUGE.')
start = time.time()
blockPrint()
for doc_id in self.data:
ref_summs = self.data[doc_id]['ref_summs']
sys_summ = self.data[doc_id]['sys_summ']
sys_summ = sys_summ.lower()
write_list_to_file([sys_summ] * len(ref_summs), 'hypo.txt')
write_list_to_file(ref_summs, 'ref.txt')
args = argparse.Namespace(check_repeats=True, delete=True, get_each_score=True, stemming=True,
method='sent_no_tag', n_bootstrap=1000, run_google_rouge=False,
run_rouge=True, source='./hypo.txt', target='./ref.txt',
ref_sep='||NEVER||', num_ref=1, temp_dir='./temp/')
scores = baseline_main(args, return_pyrouge_scores=True)['individual_score_results']
r1, r2, rl = rouge(scores)
r1 = np.max(r1, axis=0)
r2 = np.max(r2, axis=0)
rl = np.max(rl, axis=0)
self.data[doc_id]['scores']['rouge1_r'] = r1[0]
self.data[doc_id]['scores']['rouge1_p'] = r1[1]
self.data[doc_id]['scores']['rouge1_f'] = r1[2]
self.data[doc_id]['scores']['rouge2_r'] = r2[0]
self.data[doc_id]['scores']['rouge2_p'] = r2[1]
self.data[doc_id]['scores']['rouge2_f'] = r2[2]
self.data[doc_id]['scores']['rougel_r'] = rl[0]
self.data[doc_id]['scores']['rougel_p'] = rl[1]
self.data[doc_id]['scores']['rougel_f'] = rl[2]
enablePrint()
os.system('rm -rf hypo.txt ref.txt saved_out.txt')
print(f'Finished calculating ROUGE, time passed {time.time() - start}s.')
elif metric_name == 'prism':
from prism import Prism
# Set up Prism
self.prism = Prism(model_dir='./models/m39v1/', lang='en')
print(f'PRISM setup finished. Begin calculating PRISM.')
start = time.time()
for doc_id in self.data:
ref_summs = self.data[doc_id]['ref_summs']
ref_summs = [detokenize(line) for line in ref_summs]
sys_summ = detokenize(self.data[doc_id]['sys_summ'])
ref_hypo_scores, hypo_ref_scores, scores = self.prism.score(cand=[sys_summ] * len(ref_summs),
ref=ref_summs,
segment_scores=True)
ref_hypo, hypo_ref, score = max(ref_hypo_scores), max(hypo_ref_scores), max(scores)
self.data[doc_id]['scores']['prism_ref_hypo'] = ref_hypo
self.data[doc_id]['scores']['prism_hypo_ref'] = hypo_ref
self.data[doc_id]['scores']['prism_avg'] = score
print(f'Finished calculating PRISM, time passed {time.time() - start}s.')
elif metric_name == 'bart_score' or metric_name == 'bart_score_cnn' or metric_name == 'bart_score_para':
""" Vanilla BARTScore, BARTScore-CNN, BARTScore-CNN-Para """
from bart_score import BARTScorer
# Set up BARTScore
if 'cnn' in metric_name:
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
elif 'para' in metric_name:
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
bart_scorer.load()
else:
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large')
print(f'BARTScore setup finished. Begin calculating BARTScore.')
start = time.time()
for doc_id in self.data:
ref_summs = self.data[doc_id]['ref_summs']
ref_summs = [detokenize(line) for line in ref_summs]
sys_summ = detokenize(self.data[doc_id]['sys_summ'])
ref_hypo_scores = np.array(bart_scorer.score(ref_summs, [sys_summ] * len(ref_summs), batch_size=4))
hypo_ref_scores = np.array(bart_scorer.score([sys_summ] * len(ref_summs), ref_summs, batch_size=4))
ref_hypo = ref_hypo_scores.max()
hypo_ref = hypo_ref_scores.max()
avg_f = (0.5 * (ref_hypo_scores + hypo_ref_scores)).max()
harm_f = (ref_hypo_scores * hypo_ref_scores / (ref_hypo_scores + hypo_ref_scores)).max()
self.data[doc_id]['scores'][f'{metric_name}_ref_hypo'] = ref_hypo
self.data[doc_id]['scores'][f'{metric_name}_hypo_ref'] = hypo_ref
self.data[doc_id]['scores'][f'{metric_name}_avg_f'] = avg_f
self.data[doc_id]['scores'][f'{metric_name}_harm_f'] = harm_f
print(f'Finished calculating BARTScore, time passed {time.time() - start}s.')
elif metric_name.startswith('prompt'):
""" BARTScore adding prompts """
from bart_score import BARTScorer
def prefix_prompt(l, p):
new_l = []
for x in l:
new_l.append(p + ', ' + x)
return new_l
def suffix_prompt(l, p):
new_l = []
for x in l:
new_l.append(x + ' ' + p + ',')
return new_l
if 'cnn' in metric_name:
name = 'bart_score_cnn'
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
elif 'para' in metric_name:
name = 'bart_score_para'
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
bart_scorer.load()
else:
name = 'bart_score'
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large')
print(f'BARTScore-P setup finished. Begin calculating BARTScore-P.')
start = time.time()
for doc_id in self.data:
ref_summs = self.data[doc_id]['ref_summs']
ref_summs = [detokenize(line) for line in ref_summs]
sys_summ = detokenize(self.data[doc_id]['sys_summ'])
sys_summs = [sys_summ] * len(ref_summs)
for prompt in REF_HYPO:
ref_hypo_scores_en = np.array(
bart_scorer.score(suffix_prompt(ref_summs, prompt), sys_summs, batch_size=4))
hypo_ref_scores_en = np.array(
bart_scorer.score(suffix_prompt(sys_summs, prompt), ref_summs, batch_size=4))
ref_hypo_scores_de = np.array(
bart_scorer.score(ref_summs, prefix_prompt(sys_summs, prompt), batch_size=4))
hypo_ref_scores_de = np.array(
bart_scorer.score(sys_summs, prefix_prompt(ref_summs, prompt), batch_size=4))
ref_hypo_en = ref_hypo_scores_en.max()
hypo_ref_en = hypo_ref_scores_en.max()
avg_f_en = (0.5 * (ref_hypo_scores_en + hypo_ref_scores_en)).max()
harm_f_en = (ref_hypo_scores_en * hypo_ref_scores_en / (
ref_hypo_scores_en + hypo_ref_scores_en)).max()
ref_hypo_de = ref_hypo_scores_de.max()
hypo_ref_de = hypo_ref_scores_de.max()
avg_f_de = (0.5 * (ref_hypo_scores_de + hypo_ref_scores_de)).max()
harm_f_de = (ref_hypo_scores_de * hypo_ref_scores_de / (
ref_hypo_scores_de + hypo_ref_scores_de)).max()
self.data[doc_id]['scores'][f'{name}_ref_hypo_en_{prompt}'] = ref_hypo_en
self.data[doc_id]['scores'][f'{name}_hypo_ref_en_{prompt}'] = hypo_ref_en
self.data[doc_id]['scores'][f'{name}_avg_f_en_{prompt}'] = avg_f_en
self.data[doc_id]['scores'][f'{name}_harm_f_en_{prompt}'] = harm_f_en
self.data[doc_id]['scores'][f'{name}_ref_hypo_de_{prompt}'] = ref_hypo_de
self.data[doc_id]['scores'][f'{name}_hypo_ref_de_{prompt}'] = hypo_ref_de
self.data[doc_id]['scores'][f'{name}_avg_f_de_{prompt}'] = avg_f_de
self.data[doc_id]['scores'][f'{name}_harm_f_de_{prompt}'] = harm_f_de
print(f'Finished calculating BARTScore, time passed {time.time() - start}s.')
else:
raise NotImplementedError
def main():
parser = argparse.ArgumentParser(description='Scorer parameters')
parser.add_argument('--file', type=str, required=True,
help='The data to load from.')
parser.add_argument('--device', type=str, default='cuda:0',
help='The device to run on.')
parser.add_argument('--output', type=str, required=True,
help='The output path to save the calculated scores.')
parser.add_argument('--bert_score', action='store_true', default=False,
help='Whether to calculate BERTScore')
parser.add_argument('--mover_score', action='store_true', default=False,
help='Whether to calculate MoverScore')
parser.add_argument('--rouge', action='store_true', default=False,
help='Whether to calculate ROUGE')
parser.add_argument('--bart_score', action='store_true', default=False,
help='Whether to calculate BARTScore')
parser.add_argument('--bart_score_cnn', action='store_true', default=False,
help='Whether to calculate BARTScore-CNN')
parser.add_argument('--bart_score_para', action='store_true', default=False,
help='Whether to calculate BARTScore-Para')
parser.add_argument('--prism', action='store_true', default=False,
help='Whether to calculate PRISM')
parser.add_argument('--prompt', type=str, default=None,
help='Whether to calculate BARTScore-P. Can be bart_ref, '
'bart_cnn_ref, bart_para_ref')
args = parser.parse_args()
scorer = Scorer(args.file, args.device)
METRICS = []
if args.bert_score:
METRICS.append('bert_score')
if args.mover_score:
METRICS.append('mover_score')
if args.rouge:
METRICS.append('rouge')
if args.bart_score:
METRICS.append('bart_score')
if args.bart_score_cnn:
METRICS.append('bart_score_cnn')
if args.bart_score_para:
METRICS.append('bart_score_para')
if args.prism:
METRICS.append('prism')
if args.prompt is not None:
prompt = args.prompt
assert prompt in ['bart_ref', 'bart_cnn_ref', 'bart_para_ref']
METRICS.append(f'prompt_{prompt}')
scorer.score(METRICS)
scorer.save_data(args.output)
if __name__ == '__main__':
main()
| BARTScore-main | D2T/score.py |
BARTScore-main | D2T/gehrmann_rouge_opennmt/__init__.py |
|
#!/usr/bin/env python
from __future__ import print_function, division
import argparse, os, re, time
import pdb
from gehrmann_rouge_opennmt.rouge_baselines.g_rouge import rouge
from gehrmann_rouge_opennmt.rouge_baselines.util import has_repeat, n_grams
from functools import reduce
import numpy as np
def split_sentences(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
bare_sents = re.findall(r'%s (.+?) %s' % (sentence_start_tag, sentence_end_tag), article)
return bare_sents
# convenient decorator
def register_to_registry(registry):
def _register(func):
registry[func.__name__] = func
return func
return _register
baseline_registry = {}
register = register_to_registry(baseline_registry)
# baseline methods
@register
def first_sentence(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
''' use sentence tags to output the first sentence of an article as its summary. '''
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents[:1]
@register
def first_three_sentences(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents[:3]
@register
def first_two_sentences(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents[:2]
@register
def verbatim(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents
@register
def pre_sent_tag_verbatim(article):
sents = article.split('<t>')
good_sents = []
for sent in sents:
sent = sent.strip()
if len(sent.split()) > 0:
good_sents.append(sent)
# print(good_sents)
return good_sents
@register
def sent_tag_verbatim(article):
sents = split_sentences(article, '<t>', '</t>')
# print(sents)
return sents
@register
def sent_no_tag(article, eos='.'):
sents = article.split(" %s " % eos)
sents = [sent + " ." for sent in sents]
return sents
@register
def sent_tag_p_verbatim(article):
bare_article = article.strip()
bare_article += ' </t>'
sents = split_sentences(bare_article, '<t>', '</t>')
# print(sents)
return sents
@register
def adhoc_old0(article):
sents = split_sentences(article, '<t>', '</t>')
good_sents = []
for sent in sents:
# Remove <unk>
tokens = [x for x in sent.split() if x != '<unk>']
# Ignore length 1 sententces
if len(tokens) > 1:
good_sents.append(' '.join(tokens))
return good_sents
@register
def full(article):
return [article]
@register
def adhoc_base(article):
article += ' </t> </t>'
first_end = article.index(' </t> </t>')
article = article[:first_end] + ' </t>'
sents = split_sentences(article)
good_sents = []
for sent in sents:
# Remove <unk>
tokens = [x for x in sent.split() if x != '<unk>']
# Ignore length 1 sententces
if len(tokens) > 1:
good_sents.append(' '.join(tokens))
return good_sents
@register
def no_sent_tag(article):
article = article.strip()
try:
if article[-1] != '.':
article += ' .'
except:
article += ' .'
good_sents = list(re.findall(r'.+?\.', article))
return good_sents
@register
def second_sentence(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents[1:2]
def baseline_main(args, return_pyrouge_scores=False):
# Check the presence of target file
if args.run_rouge or args.run_google_rouge:
assert args.target is not None, 'Need the path to target file `--target` for ROUGE evaluations.'
process = baseline_registry[args.method]
# Read and preprocess generated summary
n_source = 0
references = []
summaries = []
with open(args.source, 'r') as f:
for i, article in enumerate(f):
summary = process(article)
summaries.append(summary)
n_source += 1
mean_num_sent_per_summ = np.mean([len(summ) for summ in summaries])
assert mean_num_sent_per_summ > 0, "Expect to read > 0 sentences per summary!"
# Read and preprocess a single candidate reference summary for each example
if args.run_rouge or args.run_google_rouge:
n_target = 0
with open(args.target, 'r') as f:
for i, article in enumerate(f):
# For us, method is 'sent_tag_verbatim
if args.ref_sep: # pgour added this to handle multiple reference texts
# pdb.set_trace()
raw_candidates_l = article.split(args.ref_sep)
candidates_l = []
for raw_candidate in raw_candidates_l:
if args.method == "full":
candidate = [raw_candidate]
else:
candidate = sent_no_tag(raw_candidate)
candidates_l.append(candidate)
assert len(candidates_l) == args.num_ref, f"len(candidates_l) {len(candidates_l)} mismatches " \
f"args.num_ref {args.num_ref}"
references.append(candidates_l)
n_target += 1
else:
if args.method == "full":
candidate = [article]
else:
candidate = sent_no_tag(article)
references.append([candidate])
n_target += 1
# pdb.set_trace()
mean_num_sent_per_ref = np.mean([len(candidate[0]) for candidate in references])
assert mean_num_sent_per_ref > 0, "Expect to read > 0 sentences per reference summary!"
# logger.info(f"read {mean_num_sent_per_summ:.2f} and {mean_num_sent_per_ref:.2f} sentences on average per "
# f"generated and system summary.")
assert n_source == n_target, 'Source and target must have the same number of samples.'
# Run official ROUGE evaluation
if args.run_rouge:
# logger.info("getting rouge")
from gehrmann_rouge_opennmt.rouge_baselines.util import evaluate_rouge
# TODO: what is going on here? Why the double assignment?
rouge_args = rouge_args = [
'-c', 95, # 95% confidence intervals, necessary for the dictionary conversion routine
'-n', 2, # up to bigram
'-a',
'-r', args.n_bootstrap, # the number of bootstrap samples for confidence bounds
]
# if args.stemming:
# # add the stemming flag
# rouge_args += ['-m']
if args.get_each_score:
# add the 'per-evaluation scores' flag
rouge_args += ['-d']
# evaluate with official ROUGE script v1.5.5
scores = evaluate_rouge(summaries, references, remove_temp=args.delete, rouge_args=rouge_args,
get_each_score=args.get_each_score, temp_dir=args.temp_dir)
if return_pyrouge_scores:
# We always return from here, below this line is not important
return scores
# Run Google's ROUGE evaluation. Not used by us.
if args.run_google_rouge:
# Based on https://github.com/google/seq2seq, modified to support multi-sentence summaries
t0 = time.time()
g_scores = rouge(summaries, [candidates[0] for candidates in references])
dt = time.time() - t0
g_headers = ['rouge_1/r_score', 'rouge_1/p_score', 'rouge_1/f_score', 'rouge_2/r_score', 'rouge_2/p_score',
'rouge_2/f_score', 'rouge_l/r_score', 'rouge_l/p_score', 'rouge_l/f_score']
print('* evaluated {} samples, took {:.3f}s, averaging {:.3f}s/sample'.format(n_target, dt, dt / n_target))
# Evaluate self-repetitions
if args.check_repeats:
t0 = time.time()
# Counts
n_sent_repeats = 0
ngram_repeats = {2: 0, 4: 0, 8: 0, 16: 0, 32: 0}
for summary in summaries:
# Sentence-level repeats
# Count of samples containing self-repetitions of a full sentence
n_sent_repeats += has_repeat(summary)
# N-gram repeats
for n in ngram_repeats.keys():
# Respect sentence boundary
grams = reduce(lambda x, y: x + y, [n_grams(sent.split(), n) for sent in summary], [])
ngram_repeats[n] += has_repeat(grams)
dt = time.time() - t0
print('* portion of samples that contains self-repetitions')
# Sort the statistics by importance
str_keys = ['full-sent'] + list(map(lambda n: '%d-gram' % n, sorted(ngram_repeats.keys(), reverse=True)))
print(','.join(str_keys))
print("{:.2f}%".format(n_sent_repeats / n_source * 100), end=',\t')
for n in sorted(ngram_repeats.keys(), reverse=True):
print("{:.2f}%".format(ngram_repeats[n] / n_source * 100), end=',\t')
print()
print('* evaluated {} samples, took {:.3f}s, averaging {:.3f}s/sample'.format(n_source, dt, dt / n_source))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source', required=True,
help='Path to the tokenized source file. One sample per line with sentence tags.')
parser.add_argument('-t', '--target', required=False,
help='Path to the tokenized target file. One sample per line with sentence tags.')
parser.add_argument('-m', '--method', default='first_sentence', choices=baseline_registry.keys(),
help='Baseline method to use.')
parser.add_argument('-d', '--delete', action='store_true',
help='Delete the temporary files created during evaluation.')
parser.add_argument('-g', '--google', dest='run_google_rouge', action='store_true',
help='Evaluate with the ROUGE implementation from google/seq2seq.')
parser.add_argument('--no-rouge', dest='run_rouge', action='store_false', help='Skip ROUGE evaluation.')
parser.add_argument('-r', '--check-repeats', action='store_true', help='Evaluate self repeats.')
parser.add_argument('--ref_sep', type=str, default=None, help='if there are multiple references per '
'line in ref file, they are separated by this separator.') # pgour added
parser.add_argument('--num_ref', type=int, default=1,
help='number of ref summaries for each doc (per line in file)')
# ROUGE arguments
parser.add_argument('--no-stemming', dest='stemming', action='store_false', help='Turn off stemming in ROUGE.')
parser.add_argument('--n-bootstrap', type=int, default=1000, help='The number of bootstrap samples used in ROUGE.')
parser.add_argument('--get_each_score', action='store_true', help='produce separate score of each document-summary')
args = parser.parse_args()
# pgour: sanity check
if args.num_ref != 1:
assert (args.ref_sep is not None), "if more than 1 ref per summary, expected a --ref_sep"
baseline_main(args)
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/baseline.py |
from __future__ import print_function
import pdb
from six.moves import xrange
# from pyrouge import Rouge155
from gehrmann_rouge_opennmt.rouge_baselines.Rouge155 import Rouge155
import tempfile, os, glob, shutil
import numpy as np
import random
def evaluate_rouge(summaries, references, remove_temp=False, rouge_args=[], get_each_score=False, temp_dir=None):
'''
Args:
summaries: [[sentence]]. Each summary is a list of strings (sentences)
references: [[[sentence]]]. Each reference is a list of candidate summaries.
remove_temp: bool. Whether to remove the temporary files created during evaluation.
rouge_args: [string]. A list of arguments to pass to the ROUGE CLI.
'''
# temp_dir = tempfile.mkdtemp()
rand_dir_name = str(random.randint(0, 1000000))
while os.path.exists(os.path.join(temp_dir, rand_dir_name)):
rand_dir_name = str(random.randint(0, 1000000))
temp_dir = os.path.join(temp_dir, rand_dir_name)
system_dir = os.path.join(temp_dir, 'system')
model_dir = os.path.join(temp_dir, 'model')
# directory for generated summaries
os.makedirs(system_dir)
# directory for reference summaries
os.makedirs(model_dir)
print(temp_dir, system_dir, model_dir)
# pdb.set_trace()
assert len(summaries) == len(references)
for i, (summary, candidates) in enumerate(zip(summaries, references)):
summary_fn = '%i.txt' % i
for j, candidate in enumerate(candidates):
candidate_fn = '%i.%i.txt' % (i, j)
with open(os.path.join(model_dir, candidate_fn), 'w') as f:
f.write('\n'.join(candidate))
with open(os.path.join(system_dir, summary_fn), 'w') as f:
f.write('\n'.join(summary))
args_str = ' '.join(map(str, rouge_args))
rouge = Rouge155(rouge_args=args_str)
rouge.system_dir = system_dir
rouge.model_dir = model_dir
rouge.system_filename_pattern = '(\d+).txt'
rouge.model_filename_pattern = '#ID#.\d+.txt'
output = rouge.convert_and_evaluate()
r = rouge.output_to_dict(output, get_each_score=get_each_score)
# remove the created temporary files
if remove_temp:
shutil.rmtree(temp_dir)
return r
def n_grams(tokens, n):
l = len(tokens)
return [tuple(tokens[i:i + n]) for i in xrange(l) if i + n < l]
def has_repeat(elements):
d = set(elements)
return len(d) < len(elements)
if __name__ == '__main__':
article = [
u"marseille prosecutor says `` so far no videos were used in the crash investigation '' despite media reports .",
u"journalists at bild and paris match are `` very confident '' the video clip is real , an editor says .",
u'andreas lubitz had informed his lufthansa training school of an episode of severe depression , airline says .',
]
candidates = [article]
references = [candidates]
summaries = [article]
rouge_args = [
'-c', 95,
'-U',
'-r', 1,
'-n', 2,
'-a',
]
print(evaluate_rouge(summaries, references, True, rouge_args))
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/util.py |
BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/__init__.py |
|
from __future__ import print_function, unicode_literals, division
import os
import pdb
import re
import codecs
import platform
from subprocess import check_output
from tempfile import mkdtemp
from functools import partial
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
from gehrmann_rouge_opennmt.rouge_baselines.pyrouge.pyrouge.utils import log
from gehrmann_rouge_opennmt.rouge_baselines.pyrouge.pyrouge.utils.file_utils import DirectoryProcessor
from gehrmann_rouge_opennmt.rouge_baselines.pyrouge.pyrouge.utils.file_utils import verify_dir
class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames, key=lambda x: int(x.split('.')[0])):
# pdb.set_trace()
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
# self.log.info(
# "Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = os.environ.copy()
if hasattr(self, "_home_dir") and self._home_dir:
env['ROUGE_EVAL_HOME'] = self._home_dir
# self.log.info(
# "Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output, get_each_score):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
individual_score_pattern = re.compile(
r"(\d+) (ROUGE-\S+) Eval (\d+).1 R:(\d.\d+) P:(\d.\d+) F:(\d.\d+)"
)
results = {}
if get_each_score:
results['individual_score_results'] = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
if get_each_score:
individual_score_match = individual_score_pattern.match(line)
if individual_score_match:
sys_id, rouge_type, eval_id, recall, precision, f_score = individual_score_match.groups()
eval_id = int(eval_id)
eval_id = eval_id - 1 # IMPORTANT: pyrouge returns doc_ids starting from 1, we want them to start at 0
rouge_type = rouge_type.lower().replace("-", '_')
if eval_id not in results['individual_score_results'].keys():
results['individual_score_results'][eval_id] = {}
results['individual_score_results'][eval_id][f'{rouge_type}_recall'] = float(recall)
results['individual_score_results'][eval_id][f'{rouge_type}_precision'] = float(precision)
results['individual_score_results'][eval_id][f'{rouge_type}_f_score'] = float(f_score)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
# self.log.info(
# "Processing summaries. Saving system files to {} and "
# "model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
# self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
if __name__ == "__main__":
import argparse
from utils.argparsers import rouge_path_parser
parser = argparse.ArgumentParser(parents=[rouge_path_parser])
args = parser.parse_args()
rouge = Rouge155(args.rouge_home)
rouge.save_home_dir()
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/Rouge155.py |
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ROUGe metric implementation.
This is a modified and slightly extended verison of
https://github.com/miso-belica/sumy/blob/dev/sumy/evaluation/rouge.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import numpy as np
#pylint: disable=C0103
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _split_into_words(sentences):
"""Splits multiple sentences into words and flattens the result"""
return list(itertools.chain(*[_.split() for _ in sentences]))
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
words = _split_into_words(sentences)
return _get_ngrams(n, words)
def _len_lcs(x, y):
"""
Returns the length of the Longest Common Subsequence between sequences x
and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""
Computes the length of the longest common subsequence (lcs) between two
strings. The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _recon_lcs(x, y):
"""
Returns the Longest Subsequence between x and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns:
sequence: LCS of x and y
"""
i, j = len(x), len(y)
table = _lcs(x, y)
def _recon(i, j):
"""private recon calculation"""
if i == 0 or j == 0:
return []
elif x[i - 1] == y[j - 1]:
return _recon(i - 1, j - 1) + [(x[i - 1], i)]
elif table[i - 1, j] > table[i, j - 1]:
return _recon(i - 1, j)
else:
return _recon(i, j - 1)
recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))
return recon_tuple
def rouge_n(evaluated_sentences, reference_sentences, n=2):
"""
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
n: Size of ngram. Defaults to 2.
Returns:
A tuple (f1, precision, recall) for ROUGE-N
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
# return overlapping_count / reference_count
return f1_score, precision, recall
def _f_p_r_lcs(llcs, m, n):
"""
Computes the LCS-based F-measure score
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta**2)) * r_lcs * p_lcs
denom = r_lcs + ((beta**2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs, p_lcs, r_lcs
def rouge_l_sentence_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (sentence level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
reference_words = _split_into_words(reference_sentences)
evaluated_words = _split_into_words(evaluated_sentences)
m = len(reference_words)
n = len(evaluated_words)
lcs = _len_lcs(evaluated_words, reference_words)
return _f_p_r_lcs(lcs, m, n)
def _union_lcs(evaluated_sentences, reference_sentence):
"""
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C. For example
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and
c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is
“w1 w2” and the longest common subsequence of r_i and c2 is “w1 w3 w5”. The
union longest common subsequence of r_i, c1, and c2 is “w1 w2 w3 w5” and
LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = set()
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
union_lcs_count = len(lcs_union)
union_lcs_value = union_lcs_count / combined_lcs_length
return union_lcs_value
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
# total number of words in reference sentences
m = len(_split_into_words(reference_sentences))
# total number of words in evaluated sentences
n = len(_split_into_words(evaluated_sentences))
union_lcs_sum_across_all_references = 0
for ref_s in reference_sentences:
union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,
ref_s)
return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)
def rouge(hypotheses, references):
"""Calculates average rouge scores for a list of hypotheses and
references"""
# Filter out hyps that are of 0 length
# hyps_and_refs = zip(hypotheses, references)
# hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]
# hypotheses, references = zip(*hyps_and_refs)
# Calculate ROUGE-1 F1, precision, recall scores
rouge_1 = [
rouge_n(hyp, ref, 1) for hyp, ref in zip(hypotheses, references)
]
rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))
# Calculate ROUGE-2 F1, precision, recall scores
rouge_2 = [
rouge_n(hyp, ref, 2) for hyp, ref in zip(hypotheses, references)
]
rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))
# Calculate ROUGE-L F1, precision, recall scores
rouge_l = [
rouge_l_sentence_level(hyp, ref)
for hyp, ref in zip(hypotheses, references)
]
rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))
return {
"rouge_1/f_score": rouge_1_f,
"rouge_1/r_score": rouge_1_r,
"rouge_1/p_score": rouge_1_p,
"rouge_2/f_score": rouge_2_f,
"rouge_2/r_score": rouge_2_r,
"rouge_2/p_score": rouge_2_p,
"rouge_l/f_score": rouge_l_f,
"rouge_l/r_score": rouge_l_r,
"rouge_l/p_score": rouge_l_p,
}
if __name__ == '__main__':
from baseline import split_sentences
article = r'''<s> marseille prosecutor says `` so far no videos were used in the crash investigation '' despite media reports . </s> <s> journalists at bild and paris match are `` very confident '' the video clip is real , an editor says . </s> <s> andreas lubitz had informed his lufthansa training school of an episode of severe depression , airline says . </s>'''
sents = split_sentences(article)
print(sents)
print(rouge([sents], [sents]))
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/g_rouge.py |
BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/__init__.py |
|
from setuptools import setup
import os
from pyrouge.utils.file_utils import list_files
data_files = list_files('pyrouge/tests/data')
data_files = [p.replace('pyrouge/tests/', '') for p in data_files]
script_files = [os.path.join('bin', s) for s in os.listdir('bin')]
setup(
name='pyrouge',
version='0.1.3',
author='Benjamin Heinzerling, Anders Johannsen',
author_email='[email protected]',
packages=['pyrouge', 'pyrouge.utils', 'pyrouge.tests'],
scripts=script_files,
#test_suite='pyrouge.test.suite',
package_data={'pyrouge.tests': data_files},
url='https://github.com/noutenki/pyrouge',
license='LICENSE.txt',
description='A Python wrapper for the ROUGE summarization evaluation'
' package.',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Text Processing :: Linguistic'],
long_description=open('README.rst').read(),
)
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/setup.py |
BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/bin/__init__.py |
|
# from pyrouge.Rouge155 import Rouge155
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/__init__.py |
import unittest
from pyrouge.tests.Rouge155_test import PyrougeTest
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(PyrougeTest))
unittest.TextTestRunner().run(suite)
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/test.py |
from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import platform
from subprocess import check_output
from tempfile import mkdtemp
from functools import partial
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
from pyrouge.utils import log
from pyrouge.utils.file_utils import DirectoryProcessor
from pyrouge.utils.file_utils import verify_dir
class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
# return rouge_output
return "" #Pgour DEBUG
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-m',
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-e', self._data_dir] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
if __name__ == "__main__":
import argparse
from utils.argparsers import rouge_path_parser
parser = argparse.ArgumentParser(parents=[rouge_path_parser])
args = parser.parse_args()
rouge = Rouge155(args.rouge_home)
rouge.save_home_dir()
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/Rouge155.py |
from __future__ import print_function, unicode_literals, division
import unittest
import os
import re
from subprocess import check_output
from tempfile import mkdtemp
from pyrouge import Rouge155
from pyrouge.utils.file_utils import str_from_file, xml_equal
module_path = os.path.dirname(__file__)
os.chdir(module_path)
add_data_path = lambda p: os.path.join('data', p)
check_output_clean = lambda c: check_output(c).decode("UTF-8").strip()
class PyrougeTest(unittest.TestCase):
def test_paths(self):
rouge = Rouge155()
def get_home_from_settings():
with open(rouge.settings_file) as f:
for line in f.readlines():
if line.startswith("home_dir"):
rouge_home = line.split("=")[1].strip()
return rouge_home
self.assertEqual(rouge.home_dir, get_home_from_settings())
self.assertTrue(os.path.exists(rouge.bin_path))
self.assertTrue(os.path.exists(rouge.data_dir))
wrong_path = "/nonexisting/path/rewafafkljaerearjafankwe3"
with self.assertRaises(Exception) as context:
rouge.system_dir = wrong_path
self.assertEqual(
str(context.exception),
"Cannot set {} directory because the path {} does not "
"exist.".format("system", wrong_path))
right_path = add_data_path("systems")
rouge.system_dir = right_path
self.assertEqual(rouge.system_dir, right_path)
with self.assertRaises(Exception) as context:
rouge.model_dir = wrong_path
self.assertEqual(
str(context.exception),
"Cannot set {} directory because the path {} does not "
"exist.".format("model", wrong_path))
right_path = add_data_path("models")
rouge.model_dir = right_path
self.assertEqual(rouge.model_dir, right_path)
def test_wrong_system_pattern(self):
wrong_regexp = "adfdas454fd"
rouge = Rouge155()
rouge.system_dir = add_data_path("systems")
rouge.model_dir = add_data_path("models")
#rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
rouge.system_filename_pattern = wrong_regexp
rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
with self.assertRaises(Exception) as context:
rouge.evaluate()
self.assertEqual(
str(context.exception),
"Did not find any files matching the pattern {} in the system "
"summaries directory {}.".format(wrong_regexp, rouge.system_dir))
def test_wrong_model_pattern(self):
rouge = Rouge155()
rouge.system_dir = add_data_path("systems")
rouge.model_dir = add_data_path("models_plain")
rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
with self.assertRaises(Exception) as context:
rouge.evaluate()
match_string = (
r"Could not find any model summaries for the system "
r"summary with ID " + "(\d+)" + r". Specified model filename "
r"pattern was: " + re.escape(rouge.model_filename_pattern))
try:
assert_regex = self.assertRegex
except AttributeError:
assert_regex = self.assertRegexpMatches
assert_regex(str(context.exception), re.compile(match_string))
def test_text_conversion(self):
rouge = Rouge155()
text = str_from_file(add_data_path("spl_test_doc"))
html = rouge.convert_text_to_rouge_format(text, "D00000.M.100.A.C")
target = str_from_file(add_data_path("spl_test_doc.html"))
self.assertEqual(html, target)
# only run this test if BeautifulSoup is installed
try:
from bs4 import BeautifulSoup
def test_get_plain_text(self):
input_dir = add_data_path("SL2003_models_rouge_format")
output_dir = mkdtemp()
target_dir = add_data_path("SL2003_models_plain_text")
command = (
"pyrouge_convert_rouge_format_to_plain_text "
"-i {} -o {}".format(input_dir, output_dir))
check_output(command.split())
filenames = os.listdir(input_dir)
for filename in filenames:
output_file = os.path.join(output_dir, filename)
output = str_from_file(output_file)
target_file = os.path.join(target_dir, filename)
target = str_from_file(target_file)
self.assertEqual(output, target)
except ImportError:
pass
def test_convert_summaries(self):
input_dir = add_data_path("SL2003_models_plain_text")
output_dir = mkdtemp()
target_dir = add_data_path("SL2003_models_rouge_format")
command = (
"pyrouge_convert_plain_text_to_rouge_format -i {} -o {}".format(
input_dir, output_dir))
check_output(command.split())
filenames = os.listdir(input_dir)
for filename in filenames:
output_file = os.path.join(output_dir, filename)
output = str_from_file(output_file)
target_file = os.path.join(target_dir, filename)
target = str_from_file(target_file)
filename = filename.replace(".html", "")
target = target.replace(filename, "dummy title")
self.assertEqual(output, target, filename)
def test_config_file(self):
rouge = Rouge155()
rouge.system_dir = add_data_path("systems")
rouge.model_dir = add_data_path("models")
rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
rouge.config_file = add_data_path("config_test.xml")
rouge.write_config(system_id=11)
self.assertTrue(xml_equal(
rouge.config_file,
add_data_path("ROUGE-test_11.xml")))
os.remove(rouge.config_file)
def test_evaluation(self):
rouge = Rouge155()
rouge.system_dir = add_data_path("systems")
rouge.model_dir = add_data_path("models")
rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
pyrouge_output = rouge.evaluate(system_id=11).strip()
rouge_command = (
"{bin} -e {data} -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 "
"-a -m {xml}".format(
bin=rouge.bin_path,
data=rouge.data_dir,
xml=add_data_path("ROUGE-test_11.xml")))
orig_rouge_output = check_output_clean(rouge_command.split())
self.assertEqual(pyrouge_output, orig_rouge_output)
def test_rouge_for_plain_text(self):
model_dir = add_data_path("models_plain")
system_dir = add_data_path("systems_plain")
pyrouge_command = (
"pyrouge_evaluate_plain_text_files -m {} -s {} -sfp "
"D(\d+).M.100.T.A -mfp D#ID#.M.100.T.[A-Z] -id 1".format(
model_dir, system_dir))
pyrouge_output = check_output_clean(pyrouge_command.split())
rouge = Rouge155()
config_file = add_data_path("config_test2.xml")
rouge_command = (
"{bin} -e {data} -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 "
"-a -m {xml}".format(
bin=rouge.bin_path,
data=rouge.data_dir,
xml=config_file))
orig_rouge_output = check_output_clean(rouge_command.split())
self.assertEqual(pyrouge_output, orig_rouge_output)
def test_write_config(self):
system_dir = add_data_path("systems")
model_dir = add_data_path("models")
system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
config_file = os.path.join(mkdtemp(), "config_test.xml")
command = (
"pyrouge_write_config_file -m {m} -s {s} "
"-mfp {mfp} -sfp {sfp} -c {c}".format(
m=model_dir, s=system_dir,
mfp=model_filename_pattern, sfp=system_filename_pattern,
c=config_file))
check_output(command.split())
target_xml = add_data_path("config_test.xml")
print(config_file, target_xml)
self.assertTrue(xml_equal(config_file, target_xml))
def test_options(self):
rouge = Rouge155()
model_dir = add_data_path("models_plain")
system_dir = add_data_path("systems_plain")
config_file = add_data_path("config_test2.xml")
command_part1 = (
"pyrouge_evaluate_plain_text_files -m {} -s {} -sfp "
"D(\d+).M.100.T.A -mfp D#ID#.M.100.T.[A-Z] -id 1 -rargs".format(
model_dir, system_dir))
command_part2 = [
"\"-e {data} -c 90 -2 -1 -U -r 1000 -n 2 -w 1.2 "
"-a -m {xml}\"".format(
data=rouge.data_dir, xml=config_file)]
pyrouge_command = command_part1.split() + command_part2
pyrouge_output = check_output_clean(pyrouge_command)
rouge_command = (
"{bin} -e {data} -c 90 -2 -1 -U -r 1000 -n 2 -w 1.2 "
"-a -m {xml}".format(
bin=rouge.bin_path, data=rouge.data_dir, xml=config_file))
orig_rouge_output = check_output_clean(rouge_command.split())
self.assertEqual(pyrouge_output, orig_rouge_output)
def main():
unittest.main()
if __name__ == "__main__":
main()
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/tests/Rouge155_test.py |
BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/tests/__init__.py |
|
import unittest
import pyrouge.test
from pyrouge.test.Rouge155_test import PyrougeTest
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(PyrougeTest))
unittest.TextTestRunner().run(suite)
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/tests/__main__.py |
from __future__ import print_function, unicode_literals, division
from pyrouge.utils import log
from pyrouge.utils.string_utils import cleanup
from pyrouge.utils.file_utils import DirectoryProcessor
class PunktSentenceSplitter:
"""
Splits sentences using the NLTK Punkt sentence tokenizer. If installed,
PunktSentenceSplitter can use the default NLTK data for English, otherwise
custom trained data has to be provided.
"""
def __init__(self, language="en", punkt_data_path=None):
self.lang2datapath = {"en": "tokenizers/punkt/english.pickle"}
self.log = log.get_global_console_logger()
try:
import nltk.data
except ImportError:
self.log.error(
"Cannot import NLTK data for the sentence splitter. Please "
"check if the 'punkt' NLTK-package is installed correctly.")
try:
if not punkt_data_path:
punkt_data_path = self.lang2datapath[language]
self.sent_detector = nltk.data.load(punkt_data_path)
except KeyError:
self.log.error(
"No sentence splitter data for language {}.".format(language))
except:
self.log.error(
"Could not load sentence splitter data: {}".format(
self.lang2datapath[language]))
def split(self, text):
"""Splits text and returns a list of the resulting sentences."""
text = cleanup(text)
return self.sent_detector.tokenize(text.strip())
@staticmethod
def split_files(input_dir, output_dir, lang="en", punkt_data_path=None):
ss = PunktSentenceSplitter(lang, punkt_data_path)
DirectoryProcessor.process(input_dir, output_dir, ss.split)
if __name__ == '__main__':
text = "Punkt knows that the periods in Mr. Smith and Johann S. Bach do "
"not mark sentence boundaries. And sometimes sentences can start with "
"non-capitalized words. i is a good variable name."
ss = PunktSentenceSplitter()
print(ss.split(text))
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/utils/sentence_splitter.py |
import logging
def get_console_logger(name, level=logging.INFO):
logFormatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setFormatter(logFormatter)
logger.addHandler(ch)
return logger
def get_global_console_logger(level=logging.INFO):
return get_console_logger('global', level)
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/utils/log.py |
BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/utils/__init__.py |
|
import argparse
io_parser = argparse.ArgumentParser(add_help=False)
io_parser.add_argument(
'-i', '--input-files-dir',
help="Path of the directory containing the files to be converted.",
type=str, action="store", dest="input_dir",
required=True
)
io_parser.add_argument(
'-o', '--output-files-dir',
help="Path of the directory in which the converted files will be saved.",
type=str, action="store", dest="output_dir",
required=True
)
ss_parser = argparse.ArgumentParser(add_help=False)
ss_parser.add_argument(
'-ss', '--split-sentences',
help="ROUGE assumes one sentence per line as default summary format. Use "
"this flag to split sentences using NLTK if the summary texts have "
"another format.",
action="store_true", dest="split_sents"
)
rouge_path_parser = argparse.ArgumentParser(add_help=False)
rouge_path_parser.add_argument(
'-hd', '--home-dir',
help="Path of the directory containing ROUGE-1.5.5.pl.",
type=str, action="store", dest="rouge_home",
required=True
)
model_sys_parser = argparse.ArgumentParser(add_help=False)
model_sys_parser.add_argument(
'-mfp', '--model-fn-pattern',
help="Regexp matching model filenames.",
type=str, action="store", dest="model_filename_pattern",
required=True
)
model_sys_parser.add_argument(
'-sfp', '--system-fn-pattern',
help="Regexp matching system filenames.",
type=str, action="store", dest="system_filename_pattern",
required=True
)
model_sys_parser.add_argument(
'-m', '--model-dir',
help="Path of the directory containing model summaries.",
type=str, action="store", dest="model_dir",
required=True
)
model_sys_parser.add_argument(
'-s', '--system-dir',
help="Path of the directory containing system summaries.",
type=str, action="store", dest="system_dir",
required=True
)
model_sys_parser.add_argument(
'-id', '--system-id',
help="Optional system ID. This is useful when comparing several systems.",
action="store", dest="system_id"
)
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument(
'-c', '--config-file-path',
help="Path of configfile to be written, including file name.",
type=str, action="store", dest="config_file_path",
required=True
)
main_parser = argparse.ArgumentParser(
parents=[model_sys_parser], add_help=False)
main_parser.add_argument(
'-hd', '--home-dir',
help="Path of the directory containing ROUGE-1.5.5.pl.",
type=str, action="store", dest="rouge_home",
)
main_parser.add_argument(
'-rargs', '--rouge-args',
help="Override pyrouge default ROUGE command line options with the "
"ROUGE_ARGS string, enclosed in qoutation marks.",
type=str, action="store", dest="rouge_args"
)
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/utils/argparsers.py |
from __future__ import print_function, unicode_literals, division
import re
def remove_newlines(s):
p = re.compile("[\n|\r\n|\n\r]")
s = re.sub(p, " ", s)
s = remove_extraneous_whitespace(s)
return s
def remove_extraneous_whitespace(s):
p = re.compile("(\s+)")
s = re.sub(p, " ", s)
return s
def cleanup(s):
return remove_newlines(s)
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/utils/string_utils.py |
from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import logging
import xml.etree.ElementTree as et
from gehrmann_rouge_opennmt.rouge_baselines.pyrouge.pyrouge.utils import log
class DirectoryProcessor:
@staticmethod
def process(input_dir, output_dir, function):
"""
Apply function to all files in input_dir and save the resulting ouput
files in output_dir.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger(level=logging.INFO)
# logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
# logger.info("Processing {}.".format(input_file_name))
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(output_string)
# logger.info("Saved processed files to {}.".format(output_dir))
def str_from_file(path):
"""
Return file contents as string.
"""
with open(path) as f:
s = f.read().strip()
return s
def xml_equal(xml_file1, xml_file2):
"""
Parse xml and convert to a canonical string representation so we don't
have to worry about semantically meaningless differences
"""
def canonical(xml_file):
# poor man's canonicalization, since we don't want to install
# external packages just for unittesting
s = et.tostring(et.parse(xml_file).getroot()).decode("UTF-8")
s = re.sub("[\n|\t]*", "", s)
s = re.sub("\s+", " ", s)
s = "".join(sorted(s)).strip()
return s
return canonical(xml_file1) == canonical(xml_file2)
def list_files(dir_path, recursive=True):
"""
Return a list of files in dir_path.
"""
for root, dirs, files in os.walk(dir_path):
file_list = [os.path.join(root, f) for f in files]
if recursive:
for dir in dirs:
dir = os.path.join(root, dir)
file_list.extend(list_files(dir, recursive=True))
return file_list
def verify_dir(path, name=None):
if name:
name_str = "Cannot set {} directory because t".format(name)
else:
name_str = "T"
msg = "{}he path {} does not exist.".format(name_str, path)
if not os.path.exists(path):
raise Exception(msg)
| BARTScore-main | D2T/gehrmann_rouge_opennmt/rouge_baselines/pyrouge/pyrouge/utils/file_utils.py |
#!/usr/bin/env python3
import argparse
import hashlib
import logging
import os
import sys
from typing import List, Dict, Iterator, Any, Tuple
import numpy as np
import sentencepiece as spm
import torch
from fairseq import checkpoint_utils, utils
from fairseq.data import LanguagePairDataset
from sacrebleu import get_source_file, get_reference_files, DATASETS, get_langpairs_for_testset
logger = logging.getLogger('prism')
logger.setLevel(logging.INFO)
MODELS = {
'8412b2044da4b9b2c0a8ce87b305d0d1': {
'name': 'm39v1',
'path': 'todo',
'date': '2020-04-30',
'description': 'model released with arXiv paper April 2020',
'langs': ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'eo', 'fi', 'fr', 'he',
'hr', 'hu', 'id', 'it', 'ja', 'kk', 'lt', 'lv', 'mk', 'nl', 'no', 'pl', 'pt', 'ro', 'ru',
'sk', 'sl', 'sq', 'sr', 'sv', 'tr', 'uk', 'vi', 'zh'],
}
}
def hash_model(model_dir):
md5 = hashlib.md5()
block_size = 2 ** 20
for fname in ('checkpoint.pt', 'spm.model', 'dict.src.txt', 'dict.tgt.txt'):
with open(os.path.join(model_dir, fname), "rb") as f:
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
md5.digest()
return md5.hexdigest()
class Prism:
def __init__(self, model_dir, lang, temperature=1.0):
'''
model_dir should contain:
1) checkpoint.pt: the fairseq model
2) spm.model: the sentencepiece model
3) dict.src.txt: the fairseq source dictionary
4) dict.tgt.txt: the fairseq target dictionary (likely a copy of the source)
lang: ISO 639-1 Code (e.g. "en"). Must be a language compatable with the model.
'''
self.sp = spm.SentencePieceProcessor()
self.sp.Load(model_dir + '/spm.model')
self.lang = lang
self.temperature = temperature
# this prints things and I can't figure out how to disable it
sys.stdout = open(os.devnull, 'w')
self.models, self.args, self.task = checkpoint_utils.load_model_ensemble_and_task(
[model_dir + '/checkpoint.pt', ],
arg_overrides=dict(data=model_dir + '/'),
)
sys.stdout = sys.__stdout__
self.use_cuda = torch.cuda.is_available()
self.generator = SequenceScorer(self.task.target_dictionary, temperature=temperature)
for model in self.models:
if self.use_cuda:
model.cuda()
model.make_generation_fast_(
beamable_mm_beam_size=None,
need_attn=False,
)
# if model.args.fp16:
# model.half()
# hash model
self.model_hash = hash_model(model_dir)
if self.model_hash in MODELS:
model_langs = MODELS[self.model_hash]['langs']
if lang not in model_langs:
model_name = MODELS[self.model_hash]['name']
logger.warning(f'Language "{lang}" is unsupported for model "{model_name}"')
logger.warning(f'Supported languages for {model_name}: {", ".join(model_langs)}')
sys.exit(1)
else:
logger.warning('unrecognized model, so cannot check language')
def identifier(self):
if self.model_hash in MODELS:
model_name = MODELS[self.model_hash]['name']
else:
logger.warning('unrecognized model, using hash to identify')
model_name = self.model_hash
return dict(version='0.1', model=model_name, seg_scores='avg_log_prob',
sys_scores='avg_log_prob', log_base=2, temperature=self.temperature)
def _binarize(self, sentence: str) -> torch.LongTensor:
return self.task.source_dictionary.encode_line(sentence, add_if_not_exist=False).long()
def _encode(self, sent, prepend=True):
sent = ' '.join(self.sp.EncodeAsPieces(sent))
if prepend:
sent = f'<{self.lang}> ' + sent
return self._binarize(sent)
def _build_batches(self,
source_tokens: List[List[int]],
target_tokens: List[List[int]],
skip_invalid_size_inputs: bool) -> Iterator[Dict[str, Any]]:
# Prune token
source_tokens = [src_token[:1800] for src_token in source_tokens]
target_tokens = [tgt_token[:2000] for tgt_token in target_tokens]
source_lengths = torch.LongTensor([t.numel() for t in source_tokens])
target_lengths = torch.LongTensor([t.numel() for t in target_tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=LanguagePairDataset(source_tokens, source_lengths, self.task.source_dictionary,
tgt=target_tokens, tgt_sizes=target_lengths,
tgt_dict=self.task.target_dictionary),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=(2000, 2000), # ???
ignore_invalid_inputs=skip_invalid_size_inputs,
).next_epoch_itr(shuffle=False)
return batch_iterator
def _score_forward(self, tok_sents_in, tok_sents_out):
assert len(tok_sents_in) == len(tok_sents_out)
tok_level_scores = [None, ] * len(tok_sents_in) # for debug
results = [None, ] * len(tok_sents_in)
for batch in self._build_batches(tok_sents_in, tok_sents_out, skip_invalid_size_inputs=False):
if self.use_cuda: # must be a better way
batch['id'] = batch['id'].cuda()
batch['net_input']['src_tokens'] = batch['net_input']['src_tokens'].cuda()
batch['net_input']['src_lengths'] = batch['net_input']['src_lengths'].cuda()
batch['net_input']['prev_output_tokens'] = batch['net_input']['prev_output_tokens'].cuda()
batch['target'] = batch['target'].cuda()
translations = self.task.inference_step(self.generator, self.models, batch)
ids = batch['id'].cpu().numpy()
tok_scores = [x[0]['positional_scores'].cpu().numpy() for x in translations]
# [1:] to skip language tag log prob
sent_scores = [np.mean(x[1:]) for x in tok_scores]
for _id, sent_score, _tok_score in zip(ids, sent_scores, tok_scores):
results[_id] = sent_score
tok_level_scores[_id] = _tok_score
if logger.level == logging.DEBUG:
for ii, (sent_in, scores_out, sent_out) in enumerate(zip(tok_sents_in, tok_level_scores, tok_sents_out)):
sent_in_str = ' '.join([self.task.source_dictionary[x] for x in sent_in])
logger.debug(f'Input[{ii}] = ' + sent_in_str)
sent_out_tok = [self.task.source_dictionary[x] for x in sent_out]
logger.debug(f'Output[{ii}] = ' + \
f' '.join([f'{a}[{b:.02f}]' for a, b in zip(sent_out_tok, scores_out)]))
if None in results:
raise Exception('Missing one or more sentence scores')
return np.array(results)
def score(self, cand, ref=None, src=None, segment_scores=False):
if not (ref is None) ^ (src is None):
raise Exception('Must provide exactly one of "ref" or "src"')
tokenized_cand = [self._encode(sentence, prepend=False) for sentence in cand]
tokenized_cand_prep = [self._encode(sentence, prepend=True) for sentence in cand]
if src is not None:
# Prism-src: score candidate given on source
if len(cand) != len(src):
raise Exception(f'Length of cand ({len(cand)}) does not match length of src ({len(src)})')
tokenized_src = [self._encode(sentence, prepend=False) for sentence in src]
scores = self._score_forward(tokenized_src, tokenized_cand_prep)
if not segment_scores:
scores = np.mean(scores)
return scores
else:
# Prism-ref: average candidate given reference and reference given candidate
if len(cand) != len(ref):
raise Exception(f'Length of cand ({len(cand)}) does not match length of ref ({len(ref)})')
tokenized_ref = [self._encode(sentence, prepend=False) for sentence in ref]
tokenized_ref_prep = [self._encode(sentence, prepend=True) for sentence in ref]
forward_scores = self._score_forward(tok_sents_in=tokenized_ref, tok_sents_out=tokenized_cand_prep)
reverse_scores = self._score_forward(tok_sents_in=tokenized_cand, tok_sents_out=tokenized_ref_prep)
scores = 0.5 * forward_scores + 0.5 * reverse_scores
if not segment_scores:
scores = np.mean(scores)
return scores
return forward_scores, reverse_scores, scores
def parse_sacrebleu_uri(uri: str) -> Tuple[str]:
"""
Parses the test set and language pair from a URI of the form
sacrebleu:wmt19:de-en
sacrebleu:wmt19/google/ar:de-en
"""
try:
_, testset, langpair = uri.split(":")
except ValueError:
logger.error('sacrebleu:* flags must take the form "sacrebleu:testset:langpair"')
sys.exit(1)
testsets = sorted(DATASETS, reverse=True)
if testset not in testsets:
logger.error(f"Test set '{testset}' was not found. Available sacrebleu test sets are:")
for key in testsets:
logger.error(f" {key:20s}: {DATASETS[key].get('description', '')}")
sys.exit(1)
lang_pairs = get_langpairs_for_testset(testset)
if langpair not in lang_pairs:
logger.error(f"Language pair '{langpair}' not available for testset '{testset}'.\n"
f" Language pairs available for {testset}: {', '.join(lang_pairs)}")
sys.exit(1)
return testset, langpair
def main():
parser = argparse.ArgumentParser(description='Prism: MT metric based on multilingual NMT',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cand', required=False, type=argparse.FileType('rt'), default=sys.stdin,
help='Candidate text file. If not provided, candidates are read from stdin.')
parser.add_argument('--ref', required=False, type=str,
help='Reference text file. If provided, reference-based Prism-ref scores are returned. '
'A value of "sacrebleu:{testset}:{langpair}" will use sacrebleu datasets. '
'You must provide exactly one of --ref or --src. ')
parser.add_argument('--src', required=False, type=str,
help='Source text file. If provided, source-based Prism-src scores are returned. '
'A value of "sacrebleu:{testset}:{langpair}" will use sacrebleu datasets. '
'You must provide exactly one of --ref or --src.')
parser.add_argument('--model-dir', required=True, type=str, help='Model Directory')
parser.add_argument('--lang', type=str, help='2-character language code (ISO 639-1)')
parser.add_argument('--temperature', type=float, default=1.0, help='Softmax temperature: '
'values >1.0 produce more uniform samples and values <1.0 produce sharper samples')
parser.add_argument('--segment-scores', action='store_true',
help='Print per-sentence scores instead of corpus level score')
parser.add_argument('--debug', action='store_true', help='Print debug info')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
if not (args.ref is None) ^ (args.src is None):
logger.error('You must provide exactly one of --ref or --src')
sys.exit(1)
if args.ref is not None:
if args.ref.startswith('sacrebleu:'):
testset, langpair = parse_sacrebleu_uri(args.ref)
path = get_reference_files(testset, langpair)[0]
args.ref = open(path).readlines()
args.lang = langpair.split("-")[1]
logger.info(f"Scoring against {len(args.ref)}-line {args.lang} reference"
f" from sacrebleu dataset {testset}/{langpair}")
else:
args.ref = open(args.ref, 'rt').readlines()
if args.src is not None:
if args.src.startswith('sacrebleu:'):
testset, langpair = parse_sacrebleu_uri(args.src)
path = get_source_file(testset, langpair)
args.src = open(path).readlines()
args.lang = langpair.split("-")[0]
logger.info(f"Scoring against {len(args.src)}-line {args.lang} source"
f" from sacrebleu dataset {testset}/{langpair}")
else:
args.src = open(args.src, 'rt').readlines()
if args.lang is None:
logger.error("The language must be specified (--lang XX), XX the ISO 639-1 code")
sys.exit(1)
if args.temperature <= 0:
raise Exception('temperature must be > 0')
args.cand = args.cand.readlines()
n_gpus = torch.cuda.device_count()
logging.debug(f'Running on {"GPU" if n_gpus else "CPU"}')
if len(args.cand) > 50 and n_gpus == 0:
logging.warning('Running on CPU is slow...')
prism = Prism(model_dir=args.model_dir, lang=args.lang, temperature=args.temperature)
scores = prism.score(cand=args.cand, ref=args.ref, src=args.src, segment_scores=args.segment_scores)
logger.info(f'Prism identifier: {prism.identifier()}')
if args.segment_scores:
for ss in scores:
print(ss)
else:
print(scores)
class SequenceScorer(object):
"""
Copy of https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_scorer.py
with softmax temperature control added
MIT License
Copyright (c) Facebook, Inc. and its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def __init__(self, tgt_dict, softmax_batch=None, temperature=1.0):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos()
self.softmax_batch = softmax_batch or sys.maxsize
self.temperature = temperature
assert self.softmax_batch > 0
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample['net_input']
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample['target']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model.forward(**net_input)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample['target'] = tgt
# divide the logits by temperature prior to softmax
# for example, see https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_generator.py:
# decoder_out[0][:, -1:, :].div_(temperature)
bd[0].div_(self.temperature)
curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample['target'] = orig_target
probs = probs.view(sample['target'].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None and torch.is_tensor(attn):
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \
if sample['target'] is not None else None
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
alignment = utils.extract_hard_alignment(avg_attn_i, sample['net_input']['src_tokens'][i],
sample['target'][i], self.pad, self.eos)
else:
avg_attn_i = alignment = None
hypos.append([{
'tokens': ref,
'score': score_i,
'attention': avg_attn_i,
'alignment': alignment,
'positional_scores': avg_probs_i,
}])
return hypos
if __name__ == '__main__':
main()
| BARTScore-main | SUM/prism.py |
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
import string
from pyemd import emd
from torch import nn
from math import log
from itertools import chain
from pytorch_pretrained_bert import BertTokenizer, BertModel
from pytorch_pretrained_bert.modeling import BertPreTrainedModel
from collections import defaultdict, Counter
from multiprocessing import Pool
from functools import partial
import os
import sys
import requests
import zipfile
USERHOME = os.path.expanduser("~")
MOVERSCORE_DIR = os.environ.get('MOVERSCORE', os.path.join(USERHOME, '.moverscore'))
MNLI_BERT = 'https://github.com/AIPHES/emnlp19-moverscore/releases/download/0.6/MNLI_BERT.zip'
output_dir = os.path.join(MOVERSCORE_DIR)
def download_MNLI_BERT(url, filename):
with open(filename, 'wb') as f:
response = requests.get(url, stream=True)
total = response.headers.get('content-length')
if total is None:
f.write(response.content)
else:
downloaded = 0
total = int(total)
for data in response.iter_content(chunk_size=max(int(total / 1000), 1024 * 1024)):
downloaded += len(data)
f.write(data)
done = int(50 * downloaded / total)
sys.stdout.write('\r[{}{}]'.format('-' * done, '.' * (50 - done)))
sys.stdout.flush()
sys.stdout.write('\n')
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
tarball = os.path.join(output_dir, os.path.basename(MNLI_BERT))
rawdir = os.path.join(output_dir, 'raw')
if not os.path.exists(tarball):
print("Downloading %s to %s" % (MNLI_BERT, tarball))
download_MNLI_BERT(MNLI_BERT, tarball)
if tarball.endswith('.zip'):
z = zipfile.ZipFile(tarball, 'r')
# z.printdir()
z.extractall(output_dir)
z.close()
# device = 'cuda'
# output_dir = "./uncased_L-12_H-768_A-12/mnli/"
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=None):
encoded_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=True)
return encoded_layers, pooled_output
tokenizer = BertTokenizer.from_pretrained(output_dir, do_lower_case=True)
model = BertForSequenceClassification.from_pretrained(output_dir, 3)
model.eval()
# model.to(device)
def truncate(tokens):
if len(tokens) > 510:
tokens = tokens[0:510]
return tokens
def process(a):
a = ["[CLS]"] + truncate(tokenizer.tokenize(a)) + ["[SEP]"]
a = tokenizer.convert_tokens_to_ids(a)
return set(a)
def get_idf_dict(arr, nthreads=4):
idf_count = Counter()
num_docs = len(arr)
process_partial = partial(process)
with Pool(nthreads) as p:
idf_count.update(chain.from_iterable(p.map(process_partial, arr)))
idf_dict = defaultdict(lambda: log((num_docs + 1) / (1)))
idf_dict.update({idx: log((num_docs + 1) / (c + 1)) for (idx, c) in idf_count.items()})
return idf_dict
def padding(arr, pad_token, dtype=torch.long):
lens = torch.LongTensor([len(a) for a in arr])
max_len = lens.max().item()
padded = torch.ones(len(arr), max_len, dtype=dtype) * pad_token
mask = torch.zeros(len(arr), max_len, dtype=torch.long)
for i, a in enumerate(arr):
padded[i, :lens[i]] = torch.tensor(a, dtype=dtype)
mask[i, :lens[i]] = 1
return padded, lens, mask
def bert_encode(model, x, attention_mask, device='cuda:0'):
model.eval()
model.to(device)
x_seg = torch.zeros_like(x, dtype=torch.long)
with torch.no_grad():
x_encoded_layers, pooled_output = model(x, x_seg, attention_mask=attention_mask, output_all_encoded_layers=True)
return x_encoded_layers
def collate_idf(arr, tokenize, numericalize, idf_dict,
pad="[PAD]", device='cuda:0'):
tokens = [["[CLS]"] + truncate(tokenize(a)) + ["[SEP]"] for a in arr]
arr = [numericalize(a) for a in tokens]
idf_weights = [[idf_dict[i] for i in a] for a in arr]
pad_token = numericalize([pad])[0]
padded, lens, mask = padding(arr, pad_token, dtype=torch.long)
padded_idf, _, _ = padding(idf_weights, pad_token, dtype=torch.float)
padded = padded.to(device=device)
mask = mask.to(device=device)
lens = lens.to(device=device)
return padded, padded_idf, lens, mask, tokens
def get_bert_embedding(all_sens, model, tokenizer, idf_dict,
batch_size=-1, device='cuda:0'):
padded_sens, padded_idf, lens, mask, tokens = collate_idf(all_sens,
tokenizer.tokenize, tokenizer.convert_tokens_to_ids,
idf_dict,
device=device)
if batch_size == -1: batch_size = len(all_sens)
embeddings = []
with torch.no_grad():
for i in range(0, len(all_sens), batch_size):
batch_embedding = bert_encode(model, padded_sens[i:i + batch_size],
attention_mask=mask[i:i + batch_size],
device=device)
batch_embedding = torch.stack(batch_embedding)
embeddings.append(batch_embedding)
del batch_embedding
total_embedding = torch.cat(embeddings, dim=-3)
return total_embedding, lens, mask, padded_idf, tokens
plus_mask = lambda x, m: x + (1.0 - m).unsqueeze(-1) * 1e30
minus_mask = lambda x, m: x - (1.0 - m).unsqueeze(-1) * 1e30
mul_mask = lambda x, m: x * m.unsqueeze(-1)
masked_reduce_min = lambda x, m: torch.min(plus_mask(x, m), dim=1, out=None)
masked_reduce_max = lambda x, m: torch.max(minus_mask(x, m), dim=1, out=None)
masked_reduce_mean = lambda x, m: mul_mask(x, m).sum(1) / (m.sum(1, keepdim=True) + 1e-10)
masked_reduce_geomean = lambda x, m: np.exp(mul_mask(np.log(x), m).sum(1) / (m.sum(1, keepdim=True) + 1e-10))
idf_reduce_mean = lambda x, m: mul_mask(x, m).sum(1)
idf_reduce_max = lambda x, m, idf: torch.max(mul_mask(minus_mask(x, m), idf), dim=1, out=None)
idf_reduce_min = lambda x, m, idf: torch.min(mul_mask(plus_mask(x, m), idf), dim=1, out=None)
def pairwise_distances(x, y=None):
x_norm = (x ** 2).sum(1).view(-1, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
y_t = torch.transpose(y, 0, 1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
return torch.clamp(dist, 0.0, np.inf)
def slide_window(a, w=3, o=2):
if a.size - w + 1 <= 0:
w = a.size
sh = (a.size - w + 1, w)
st = a.strides * 2
view = np.lib.stride_tricks.as_strided(a, strides=st, shape=sh)[0::o]
return view.copy().tolist()
def _safe_divide(numerator, denominator):
return numerator / (denominator + 0.00001)
def load_ngram(ids, embedding, idf, n, o, device='cuda:0'):
new_a = []
new_idf = []
slide_wins = slide_window(np.array(ids), w=n, o=o)
for slide_win in slide_wins:
new_idf.append(idf[slide_win].sum().item())
scale = _safe_divide(idf[slide_win], idf[slide_win].sum(0)).unsqueeze(-1).to(device)
tmp = (scale * embedding[slide_win]).sum(0)
new_a.append(tmp)
new_a = torch.stack(new_a, 0).to(device)
return new_a, new_idf
def word_mover_score(refs, hyps, idf_dict_ref, idf_dict_hyp, stop_words=[], n_gram=1, remove_subwords=True,
batch_size=256, device='cuda:0'):
preds = []
for batch_start in range(0, len(refs), batch_size):
batch_refs = refs[batch_start:batch_start + batch_size]
batch_hyps = hyps[batch_start:batch_start + batch_size]
ref_embedding, ref_lens, ref_masks, ref_idf, ref_tokens = get_bert_embedding(batch_refs, model, tokenizer,
idf_dict_ref,
device=device)
hyp_embedding, hyp_lens, hyp_masks, hyp_idf, hyp_tokens = get_bert_embedding(batch_hyps, model, tokenizer,
idf_dict_hyp,
device=device)
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
ref_embedding_max, _ = torch.max(ref_embedding[-5:], dim=0, out=None)
hyp_embedding_max, _ = torch.max(hyp_embedding[-5:], dim=0, out=None)
ref_embedding_min, _ = torch.min(ref_embedding[-5:], dim=0, out=None)
hyp_embedding_min, _ = torch.min(hyp_embedding[-5:], dim=0, out=None)
ref_embedding_avg = ref_embedding[-5:].mean(0)
hyp_embedding_avg = hyp_embedding[-5:].mean(0)
ref_embedding = torch.cat([ref_embedding_min, ref_embedding_avg, ref_embedding_max], -1)
hyp_embedding = torch.cat([hyp_embedding_min, hyp_embedding_avg, hyp_embedding_max], -1)
for i in range(len(ref_tokens)):
if remove_subwords:
ref_ids = [k for k, w in enumerate(ref_tokens[i]) if
w not in set(string.punctuation) and '##' not in w and w not in stop_words]
hyp_ids = [k for k, w in enumerate(hyp_tokens[i]) if
w not in set(string.punctuation) and '##' not in w and w not in stop_words]
else:
ref_ids = [k for k, w in enumerate(ref_tokens[i]) if
w not in set(string.punctuation) and w not in stop_words]
hyp_ids = [k for k, w in enumerate(hyp_tokens[i]) if
w not in set(string.punctuation) and w not in stop_words]
ref_embedding_i, ref_idf_i = load_ngram(ref_ids, ref_embedding[i], ref_idf[i], n_gram, 1, device=device)
hyp_embedding_i, hyp_idf_i = load_ngram(hyp_ids, hyp_embedding[i], hyp_idf[i], n_gram, 1, device=device)
raw = torch.cat([ref_embedding_i, hyp_embedding_i], 0)
raw.div_(torch.norm(raw, dim=-1).unsqueeze(-1) + 0.000001)
distance_matrix = pairwise_distances(raw, raw)
c1 = np.zeros(len(ref_idf_i) + len(hyp_idf_i), dtype=np.double)
c2 = np.zeros(len(ref_idf_i) + len(hyp_idf_i), dtype=np.double)
c1[:len(ref_idf_i)] = ref_idf_i
c2[-len(hyp_idf_i):] = hyp_idf_i
c1 = _safe_divide(c1, np.sum(c1))
c2 = _safe_divide(c2, np.sum(c2))
score = 1 - emd(c1, c2, distance_matrix.double().cpu().numpy())
preds.append(score)
return preds
| BARTScore-main | SUM/moverscore.py |
import os
import pickle
import sys
import nltk
from mosestokenizer import *
from nltk import word_tokenize
from nltk.tokenize import sent_tokenize
nltk.download('stopwords')
detokenizer = MosesDetokenizer('en')
def read_file_to_list(file_name):
lines = []
with open(file_name, 'r', encoding='utf8') as f:
for line in f.readlines():
lines.append(line.strip())
return lines
def write_list_to_file(list_to_write, filename):
out_file = open(filename, 'w')
for line in list_to_write:
print(line, file=out_file)
out_file.flush()
out_file.close()
print(f'Saved to {filename}.')
def read_pickle(file):
with open(file, 'rb') as f:
data = pickle.load(f)
return data
def save_pickle(data, file):
with open(file, 'wb') as f:
pickle.dump(data, f)
print(f'Saved to {file}.')
def capitalize_sents(text: str):
""" Given a string, capitalize the initial letter of each sentence. """
sentences = sent_tokenize(text)
sentences = [sent.strip() for sent in sentences]
sentences = [sent.capitalize() for sent in sentences]
sentences = " ".join(sentences)
return sentences
def is_capitalized(text: str):
""" Given a string (system output etc.) , check whether it is lowercased,
or normally capitalized.
"""
return not text.islower()
def tokenize(text: str):
words = word_tokenize(text)
return " ".join(words)
def detokenize(text: str):
words = text.split(" ")
return detokenizer(words)
def use_original_bracket(text: str):
return text.replace('-lrb-', '(').replace('-rrb-', ')').replace('-LRB-', '(').replace('-RRB-', ')').replace('-lsb-',
'[').replace(
'-rsb-', ']').replace('-LSB-', '[').replace('-RSB-', ']')
# Disable print
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore print
def enablePrint():
sys.stdout = sys.__stdout__
| BARTScore-main | SUM/utils.py |
import torch
import torch.nn as nn
import traceback
from transformers import BartTokenizer, BartForConditionalGeneration
class BARTScorer:
def __init__(self, device='cuda:0', max_length=1024, checkpoint='facebook/bart-large-cnn'):
# Set up model
self.device = device
self.max_length = max_length
self.tokenizer = BartTokenizer.from_pretrained(checkpoint)
self.model = BartForConditionalGeneration.from_pretrained(checkpoint)
self.model.eval()
self.model.to(device)
# Set up loss
self.loss_fct = nn.NLLLoss(reduction='none', ignore_index=self.model.config.pad_token_id)
self.lsm = nn.LogSoftmax(dim=1)
def load(self):
""" Load model from paraphrase finetuning """
self.model.load_state_dict(torch.load('models/bart.pth', map_location=self.device))
def score(self, srcs, tgts, batch_size):
""" Score a batch of examples """
score_list = []
for i in range(0, len(srcs), batch_size):
src_list = srcs[i: i + batch_size]
tgt_list = tgts[i: i + batch_size]
try:
with torch.no_grad():
encoded_src = self.tokenizer(
src_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
encoded_tgt = self.tokenizer(
tgt_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
src_tokens = encoded_src['input_ids'].to(self.device)
src_mask = encoded_src['attention_mask'].to(self.device)
tgt_tokens = encoded_tgt['input_ids'].to(self.device)
tgt_mask = encoded_tgt['attention_mask']
tgt_len = tgt_mask.sum(dim=1).to(self.device)
output = self.model(
input_ids=src_tokens,
attention_mask=src_mask,
labels=tgt_tokens
)
logits = output.logits.view(-1, self.model.config.vocab_size)
loss = self.loss_fct(self.lsm(logits), tgt_tokens.view(-1))
loss = loss.view(tgt_tokens.shape[0], -1)
loss = loss.sum(dim=1) / tgt_len
curr_score_list = [-x.item() for x in loss]
score_list += curr_score_list
except RuntimeError:
traceback.print_exc()
print(f'source: {src_list}')
print(f'target: {tgt_list}')
exit(0)
return score_list
| BARTScore-main | SUM/bart_score.py |
import argparse
import os
import time
import numpy as np
from utils import *
from tqdm import tqdm
SRC_HYPO = read_file_to_list('files/src_hypo_prompt.txt')
REF_HYPO = read_file_to_list('files/ref_hypo_prompt.txt')
class Scorer:
""" Support ROUGE-1,2,L, BERTScore, MoverScore, PRISM, BARTScore """
def __init__(self, file_path, device='cuda:0', multi_ref=False):
""" file_path: path to the pickle file
All the data are normal capitalized, and tokenized, including src, ref_summ, ref_summs, and sys_summ.
"""
self.multi_ref = multi_ref
self.device = device
self.data = read_pickle(file_path)
print(f'Data loaded from {file_path}.')
self.sys_names = self.get_sys_names()
if not multi_ref:
self.single_ref_lines = self.get_single_ref_lines()
print(f'In a single-reference setting.')
else:
self.multi_ref_lines = self.get_multi_ref_lines()
self.ref_num = len(self.multi_ref_lines[0])
print(f'In a multi-reference setting.')
def get_sys_names(self):
first_id = list(self.data.keys())[0]
return list(self.data[first_id]['sys_summs'].keys())
def get_single_ref_lines(self):
ref_lines = []
for doc_id in self.data:
ref_lines.append(self.data[doc_id]['ref_summ'])
return ref_lines
def get_multi_ref_lines(self):
ref_lines = []
for doc_id in self.data:
ref_lines.append(self.data[doc_id]['ref_summs'])
return ref_lines
def get_sys_lines(self, sys_name):
sys_lines = []
for doc_id in self.data:
sys_lines.append(self.data[doc_id]['sys_summs'][sys_name]['sys_summ'])
return sys_lines
def get_src_lines(self):
src_lines = []
for doc_id in self.data:
src_lines.append(self.data[doc_id]['src'])
return src_lines
def save_data(self, path):
save_pickle(self.data, path)
def score(self, metrics):
""" metrics: list of metrics """
for metric_name in metrics:
if metric_name == 'bert_score':
from bert_score import BERTScorer
# Set up BERTScore
bert_scorer = BERTScorer(
lang='en',
idf=False,
rescale_with_baseline=True,
device=self.device
)
print(f"BERTScore setup finished (hash='{bert_scorer.hash}'). Begin calculating BERTScore.")
start = time.time()
# Keep capitalization, detokenize everything
src_lines = self.get_src_lines()
src_lines = [detokenize(line) for line in src_lines]
ref_lines = self.single_ref_lines if not self.multi_ref else self.multi_ref_lines
for sys_name in tqdm(self.sys_names):
sys_lines = self.get_sys_lines(sys_name)
P_src_hypo, R_src_hypo, F_src_hypo = bert_scorer.score(sys_lines, src_lines)
if not self.multi_ref:
P_hypo_ref, R_hypo_ref, F_hypo_ref = bert_scorer.score(sys_lines, ref_lines)
else:
total_num = len(sys_lines)
P_hypo_ref, R_hypo_ref, F_hypo_ref = [], [], []
for i in range(self.ref_num):
ref_list = [x[i] for x in ref_lines]
curr_P, curr_R, curr_F = bert_scorer.score(sys_lines, ref_list)
P_hypo_ref.append(curr_P.numpy())
R_hypo_ref.append(curr_R.numpy())
F_hypo_ref.append(curr_F.numpy())
counter = 0
for doc_id in self.data:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
'bert_score_p_src_hypo': P_src_hypo[counter],
'bert_score_r_src_hypo': R_src_hypo[counter],
'bert_score_f_src_hypo': F_src_hypo[counter]
})
if not self.multi_ref:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
'bert_score_p_hypo_ref': P_hypo_ref[counter],
'bert_score_r_hypo_ref': R_hypo_ref[counter],
'bert_score_f_hypo_ref': F_hypo_ref[counter],
})
else:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
'bert_score_p_hypo_ref_mean': np.mean(P_hypo_ref, axis=0)[counter],
'bert_score_r_hypo_ref_mean': np.mean(R_hypo_ref, axis=0)[counter],
'bert_score_f_hypo_ref_mean': np.mean(F_hypo_ref, axis=0)[counter],
'bert_score_p_hypo_ref_max': np.max(P_hypo_ref, axis=0)[counter],
'bert_score_r_hypo_ref_max': np.max(R_hypo_ref, axis=0)[counter],
'bert_score_f_hypo_ref_max': np.max(F_hypo_ref, axis=0)[counter],
})
counter += 1
print(f'Finished calculating BERTScore, time passed {time.time() - start}s.')
elif metric_name == 'mover_score':
from moverscore import word_mover_score, get_idf_dict
# Set up MoverScore
with open('files/stopwords.txt', 'r', encoding='utf-8') as f:
self.stop_words = set(f.read().strip().split(' '))
# IDF for all system hypos, used for MoverScore
self.sys_lines = []
for name in self.sys_names:
sys_lines = self.get_sys_lines(name)
self.sys_lines.extend(sys_lines)
self.idf_hyps = get_idf_dict(self.sys_lines)
print(f'MoverScore setup finished. Begin calculating MoverScore.')
start = time.time()
# Keep capitalization, detokenize everything
src_lines = self.get_src_lines()
idf_srcs = get_idf_dict(src_lines)
if not self.multi_ref:
ref_lines = self.single_ref_lines
idf_refs = get_idf_dict(ref_lines)
else:
ref_lines = self.multi_ref_lines
idf_refs = get_idf_dict(sum(ref_lines, []))
for sys_name in tqdm(self.sys_names):
sys_lines = self.get_sys_lines(sys_name)
scores_src_hypo = word_mover_score(src_lines, sys_lines, idf_srcs, self.idf_hyps, self.stop_words,
n_gram=1, remove_subwords=True, batch_size=48, device=self.device)
if not self.multi_ref:
scores_hypo_ref = word_mover_score(ref_lines, sys_lines, idf_refs, self.idf_hyps, self.stop_words,
n_gram=1, remove_subwords=True, batch_size=48, device=self.device)
else:
scores_hypo_ref = []
for i in range(self.ref_num):
ref_list = [x[i] for x in ref_lines]
curr_scores = word_mover_score(ref_list, sys_lines, idf_refs, self.idf_hyps,
self.stop_words, n_gram=1, remove_subwords=True,
batch_size=48, device=self.device)
scores_hypo_ref.append(np.array(curr_scores))
counter = 0
for doc_id in self.data:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
"mover_score_src_hypo": scores_src_hypo[counter]
})
if not self.multi_ref:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
"mover_score_hypo_ref": scores_hypo_ref[counter]
})
else:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
"mover_score_hypo_ref_mean": np.mean(scores_hypo_ref, axis=0)[counter],
"mover_score_hypo_ref_max": np.max(scores_hypo_ref, axis=0)[counter],
})
counter += 1
print(f'Finished calculating MoverScore, time passed {time.time() - start}s.')
elif metric_name == 'rouge':
from gehrmann_rouge_opennmt.rouge_baselines.baseline import baseline_main
def rouge(dic):
""" Get r, p, f scores """
r1_, r2_, rl_ = [], [], []
for k in dic:
r1_.append([dic[k]['rouge_1_recall'], dic[k]['rouge_1_precision'], dic[k]['rouge_1_f_score']])
r2_.append([dic[k]['rouge_2_recall'], dic[k]['rouge_2_precision'], dic[k]['rouge_2_f_score']])
rl_.append([dic[k]['rouge_l_recall'], dic[k]['rouge_l_precision'], dic[k]['rouge_l_f_score']])
return r1_, r2_, rl_
print(f'Begin calculating ROUGE.')
start = time.time()
blockPrint()
src_lines = self.get_src_lines()
src_lines = [line.lower() for line in src_lines]
write_list_to_file(src_lines, 'src.txt')
if not self.multi_ref:
ref_lines = [line.lower() for line in self.single_ref_lines]
else:
ref_lines = [[text.lower() for text in line] for line in self.multi_ref_lines]
for sys_name in tqdm(self.sys_names):
sys_lines = self.get_sys_lines(sys_name)
sys_lines = [line.lower() for line in sys_lines]
rouge1_hypo_ref_scores, rouge2_hypo_ref_scores, rougel_hypo_ref_scores = [], [], []
write_list_to_file(sys_lines, 'hypo.txt')
args = argparse.Namespace(check_repeats=True, delete=True, get_each_score=True, stemming=True,
method='sent_no_tag', n_bootstrap=1000, run_google_rouge=False,
run_rouge=True, source='./hypo.txt', target='./src.txt',
ref_sep='||NEVER||', num_ref=1, temp_dir='./temp/')
scores = baseline_main(args, return_pyrouge_scores=True)['individual_score_results']
rouge1_src_hypo_scores, rouge2_src_hypo_scores, rougel_src_hypo_scores = rouge(scores)
if not self.multi_ref:
write_list_to_file(ref_lines, 'ref.txt')
args = argparse.Namespace(check_repeats=True, delete=True, get_each_score=True, stemming=True,
method='sent_no_tag', n_bootstrap=1000, run_google_rouge=False,
run_rouge=True, source='./hypo.txt', target='./ref.txt',
ref_sep='||NEVER||', num_ref=1, temp_dir='./temp/')
scores = baseline_main(args, return_pyrouge_scores=True)['individual_score_results']
rouge1_hypo_ref_scores, rouge2_hypo_ref_scores, rougel_hypo_ref_scores = rouge(scores)
else:
for i in range(self.ref_num):
ref_list = [x[i] for x in ref_lines]
write_list_to_file(ref_list, 'ref.txt')
args = argparse.Namespace(check_repeats=True, delete=True, get_each_score=True,
stemming=True,
method='sent_no_tag', n_bootstrap=1000, run_google_rouge=False,
run_rouge=True, source='./hypo.txt', target='./ref.txt',
ref_sep='||NEVER||', num_ref=1, temp_dir='./temp/')
scores = baseline_main(args, return_pyrouge_scores=True)['individual_score_results']
r1, r2, rl = rouge(scores)
rouge1_hypo_ref_scores.append(r1)
rouge2_hypo_ref_scores.append(r2)
rougel_hypo_ref_scores.append(rl)
counter = 0
for doc_id in self.data:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
'rouge1_r_src_hypo': rouge1_src_hypo_scores[counter][0],
'rouge1_p_src_hypo': rouge1_src_hypo_scores[counter][1],
'rouge1_f_src_hypo': rouge1_src_hypo_scores[counter][2],
'rouge2_r_src_hypo': rouge2_src_hypo_scores[counter][0],
'rouge2_p_src_hypo': rouge2_src_hypo_scores[counter][1],
'rouge2_f_src_hypo': rouge2_src_hypo_scores[counter][2],
'rougel_r_src_hypo': rougel_src_hypo_scores[counter][0],
'rougel_p_src_hypo': rougel_src_hypo_scores[counter][1],
'rougel_f_src_hypo': rougel_src_hypo_scores[counter][2]
})
if not self.multi_ref:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
'rouge1_r_hypo_ref': rouge1_hypo_ref_scores[counter][0],
'rouge1_p_hypo_ref': rouge1_hypo_ref_scores[counter][1],
'rouge1_f_hypo_ref': rouge1_hypo_ref_scores[counter][2],
'rouge2_r_hypo_ref': rouge2_hypo_ref_scores[counter][0],
'rouge2_p_hypo_ref': rouge2_hypo_ref_scores[counter][1],
'rouge2_f_hypo_ref': rouge2_hypo_ref_scores[counter][2],
'rougel_r_hypo_ref': rougel_hypo_ref_scores[counter][0],
'rougel_p_hypo_ref': rougel_hypo_ref_scores[counter][1],
'rougel_f_hypo_ref': rougel_hypo_ref_scores[counter][2],
})
else:
rouge1_hypo_ref_scores_mean = np.mean(rouge1_hypo_ref_scores, axis=0)
rouge2_hypo_ref_scores_mean = np.mean(rouge2_hypo_ref_scores, axis=0)
rougel_hypo_ref_scores_mean = np.mean(rougel_hypo_ref_scores, axis=0)
rouge1_hypo_ref_scores_max = np.max(rouge1_hypo_ref_scores, axis=0)
rouge2_hypo_ref_scores_max = np.max(rouge2_hypo_ref_scores, axis=0)
rougel_hypo_ref_scores_max = np.max(rougel_hypo_ref_scores, axis=0)
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
'rouge1_r_hypo_ref_mean': rouge1_hypo_ref_scores_mean[counter][0],
'rouge1_p_hypo_ref_mean': rouge1_hypo_ref_scores_mean[counter][1],
'rouge1_f_hypo_ref_mean': rouge1_hypo_ref_scores_mean[counter][2],
'rouge2_r_hypo_ref_mean': rouge2_hypo_ref_scores_mean[counter][0],
'rouge2_p_hypo_ref_mean': rouge2_hypo_ref_scores_mean[counter][1],
'rouge2_f_hypo_ref_mean': rouge2_hypo_ref_scores_mean[counter][2],
'rougel_r_hypo_ref_mean': rougel_hypo_ref_scores_mean[counter][0],
'rougel_p_hypo_ref_mean': rougel_hypo_ref_scores_mean[counter][1],
'rougel_f_hypo_ref_mean': rougel_hypo_ref_scores_mean[counter][2],
'rouge1_r_hypo_ref_max': rouge1_hypo_ref_scores_max[counter][0],
'rouge1_p_hypo_ref_max': rouge1_hypo_ref_scores_max[counter][1],
'rouge1_f_hypo_ref_max': rouge1_hypo_ref_scores_max[counter][2],
'rouge2_r_hypo_ref_max': rouge2_hypo_ref_scores_max[counter][0],
'rouge2_p_hypo_ref_max': rouge2_hypo_ref_scores_max[counter][1],
'rouge2_f_hypo_ref_max': rouge2_hypo_ref_scores_max[counter][2],
'rougel_r_hypo_ref_max': rougel_hypo_ref_scores_max[counter][0],
'rougel_p_hypo_ref_max': rougel_hypo_ref_scores_max[counter][1],
'rougel_f_hypo_ref_max': rougel_hypo_ref_scores_max[counter][2],
})
counter += 1
enablePrint()
os.system('rm -rf hypo.txt ref.txt src.txt')
print(f'Finished calculating ROUGE, time passed {time.time() - start}s.')
elif metric_name == 'prism':
from prism import Prism
# Set up Prism
self.prism = Prism(model_dir='./models/m39v1/', lang='en')
print(f'PRISM setup finished. Begin calculating PRISM.')
start = time.time()
# Keep capitalization, detokenize everything
src_lines = self.get_src_lines()
src_lines = [detokenize(line) for line in src_lines]
if not self.multi_ref:
ref_lines = [detokenize(line) for line in self.single_ref_lines]
else:
ref_lines = [[detokenize(text) for text in line] for line in self.multi_ref_lines]
for sys_name in tqdm(self.sys_names):
sys_lines = self.get_sys_lines(sys_name)
sys_lines = [detokenize(line) for line in sys_lines]
# Calculate Both src-based and ref-based
src_hypo_scores = self.prism.score(cand=sys_lines, src=src_lines, segment_scores=True)
if not self.multi_ref:
ref_hypo_scores, hypo_ref_scores, scores = self.prism.score(cand=sys_lines, ref=ref_lines,
segment_scores=True)
else:
ref_hypo_scores, hypo_ref_scores, scores = [], [], []
for i in range(self.ref_num):
ref_list = [x[i] for x in ref_lines]
curr_ref_hypo_scores, curr_hypo_ref_scores, curr_scores = self.prism.score(cand=sys_lines,
ref=ref_list,
segment_scores=True)
ref_hypo_scores.append(curr_ref_hypo_scores)
hypo_ref_scores.append(curr_hypo_ref_scores)
scores.append(curr_scores)
counter = 0
for doc_id in self.data:
self.data[doc_id]['sys_summs'][sys_name]['scores']["prism_src_hypo"] = src_hypo_scores[counter]
if not self.multi_ref:
self.data[doc_id]['sys_summs'][sys_name]['scores']['prism_ref_hypo'] = ref_hypo_scores[counter]
self.data[doc_id]['sys_summs'][sys_name]['scores']['prism_hypo_ref'] = hypo_ref_scores[counter]
self.data[doc_id]['sys_summs'][sys_name]['scores']['prism_avg'] = scores[counter]
else:
self.data[doc_id]['sys_summs'][sys_name]['scores']['prism_ref_hypo_mean'] = np.mean(ref_hypo_scores, axis=0)[counter]
self.data[doc_id]['sys_summs'][sys_name]['scores']['prism_hypo_ref_mean'] = np.mean(hypo_ref_scores, axis=0)[counter]
self.data[doc_id]['sys_summs'][sys_name]['scores']['prism_avg_mean'] = np.mean(scores, axis=0)[counter]
self.data[doc_id]['sys_summs'][sys_name]['scores']['prism_ref_hypo_max'] = np.max(ref_hypo_scores, axis=0)[counter]
self.data[doc_id]['sys_summs'][sys_name]['scores']['prism_hypo_ref_max'] = np.max(hypo_ref_scores, axis=0)[counter]
self.data[doc_id]['sys_summs'][sys_name]['scores']['prism_avg_max'] = np.max(scores, axis=0)[counter]
counter += 1
print(f'Finished calculating PRISM, time passed {time.time() - start}s.')
elif metric_name == 'bart_score' or metric_name == 'bart_score_cnn' or metric_name == 'bart_score_para':
""" Vanilla BARTScore, BARTScore-CNN, BARTScore-CNN-Para """
from bart_score import BARTScorer
# Set up BARTScore
if 'cnn' in metric_name:
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
elif 'para' in metric_name:
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
bart_scorer.load()
else:
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large')
print(f'BARTScore setup finished. Begin calculating BARTScore.')
start = time.time()
# Keep capitalization, detokenize everything
src_lines = self.get_src_lines()
src_lines = [detokenize(line) for line in src_lines]
if not self.multi_ref:
ref_lines = [detokenize(line) for line in self.single_ref_lines]
else:
ref_lines = [[detokenize(text) for text in line] for line in self.multi_ref_lines]
for sys_name in tqdm(self.sys_names):
sys_lines = self.get_sys_lines(sys_name)
sys_lines = [detokenize(line) for line in sys_lines]
src_hypo = bart_scorer.score(src_lines, sys_lines, batch_size=4)
if not self.multi_ref:
ref_hypo = np.array(bart_scorer.score(ref_lines, sys_lines, batch_size=4))
hypo_ref = np.array(bart_scorer.score(sys_lines, ref_lines, batch_size=4))
else:
ref_hypo, hypo_ref = [], []
for i in range(self.ref_num):
ref_list = [x[i] for x in ref_lines]
curr_ref_hypo = np.array(bart_scorer.score(ref_list, sys_lines, batch_size=4))
curr_hypo_ref = np.array(bart_scorer.score(sys_lines, ref_list, batch_size=4))
ref_hypo.append(curr_ref_hypo)
hypo_ref.append(curr_hypo_ref)
if not self.multi_ref:
avg_f = (ref_hypo + hypo_ref) / 2
harm_f = (ref_hypo * hypo_ref) / (ref_hypo + hypo_ref)
else:
ref_hypo_mean = np.mean(ref_hypo, axis=0)
hypo_ref_mean = np.mean(hypo_ref, axis=0)
ref_hypo_max = np.max(ref_hypo, axis=0)
hypo_ref_max = np.max(hypo_ref, axis=0)
avg_f_mean = (ref_hypo_mean + hypo_ref_mean) / 2
harm_f_mean = (ref_hypo_mean * hypo_ref_mean) / (ref_hypo_mean + hypo_ref_mean)
avg_f_max = (ref_hypo_max + hypo_ref_max) / 2
harm_f_max = (ref_hypo_max * hypo_ref_max) / (ref_hypo_max + hypo_ref_max)
counter = 0
for doc_id in self.data:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
f'{metric_name}_src_hypo': src_hypo[counter],
})
if not self.multi_ref:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
f'{metric_name}_hypo_ref': hypo_ref[counter],
f'{metric_name}_ref_hypo': ref_hypo[counter],
f'{metric_name}_avg_f': avg_f[counter],
f'{metric_name}_harm_f': harm_f[counter]
})
else:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
f'{metric_name}_ref_hypo_mean': ref_hypo_mean[counter],
f'{metric_name}_hypo_ref_mean': hypo_ref_mean[counter],
f'{metric_name}_avg_f_mean': avg_f_mean[counter],
f'{metric_name}_harm_f_mean': harm_f_mean[counter],
f'{metric_name}_ref_hypo_max': ref_hypo_max[counter],
f'{metric_name}_hypo_ref_max': hypo_ref_max[counter],
f'{metric_name}_avg_f_max': avg_f_max[counter],
f'{metric_name}_harm_f_max': harm_f_max[counter]
})
counter += 1
print(f'Finished calculating BARTScore, time passed {time.time() - start}s.')
elif metric_name.startswith('prompt'):
""" BARTScore adding prompts """
from bart_score import BARTScorer
def prefix_prompt(l, p):
new_l = []
for x in l:
new_l.append(p + ', ' + x)
return new_l
def suffix_prompt(l, p):
new_l = []
for x in l:
new_l.append(x + ' ' + p + ',')
return new_l
if 'cnn' in metric_name:
name = 'bart_score_cnn'
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
elif 'para' in metric_name:
name = 'bart_score_para'
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large-cnn')
bart_scorer.load()
else:
name = 'bart_score'
bart_scorer = BARTScorer(device=self.device, checkpoint='facebook/bart-large')
print(f'BARTScore-P setup finished. Begin calculating BARTScore-P.')
start = time.time()
# Keep capitalization, detokenize everything
src_lines = self.get_src_lines()
src_lines = [detokenize(line) for line in src_lines]
if not self.multi_ref:
ref_lines = [detokenize(line) for line in self.single_ref_lines]
else:
ref_lines = [[detokenize(text) for text in line] for line in self.multi_ref_lines]
# SRC -> HYPO prompt
if 'src' in metric_name:
for prompt in SRC_HYPO:
for sys_name in tqdm(self.sys_names):
sys_lines = self.get_sys_lines(sys_name)
sys_lines = [detokenize(line) for line in sys_lines]
src_hypo_en = bart_scorer.score(suffix_prompt(src_lines, prompt), sys_lines, batch_size=4)
src_hypo_de = bart_scorer.score(src_lines, prefix_prompt(sys_lines, prompt), batch_size=4)
counter = 0
for doc_id in self.data:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
f'{name}_src_hypo_en_{prompt}': src_hypo_en[counter],
f'{name}_src_hypo_de_{prompt}': src_hypo_de[counter]
})
counter += 1
# REF <-> HYPO prompt
if 'ref' in metric_name:
for prompt in REF_HYPO:
for sys_name in tqdm(self.sys_names):
sys_lines = self.get_sys_lines(sys_name)
sys_lines = [detokenize(line) for line in sys_lines]
if not self.multi_ref:
ref_hypo_en = np.array(
bart_scorer.score(suffix_prompt(ref_lines, prompt), sys_lines, batch_size=4))
hypo_ref_en = np.array(
bart_scorer.score(suffix_prompt(sys_lines, prompt), ref_lines, batch_size=4))
ref_hypo_de = np.array(
bart_scorer.score(ref_lines, prefix_prompt(sys_lines, prompt), batch_size=4))
hypo_ref_de = np.array(
bart_scorer.score(sys_lines, prefix_prompt(ref_lines, prompt), batch_size=4))
else:
ref_hypo_en, hypo_ref_en, ref_hypo_de, hypo_ref_de = np.zeros(len(sys_lines)), np.zeros(
len(sys_lines)), \
np.zeros(len(sys_lines)), np.zeros(
len(sys_lines))
for i in range(self.ref_num):
ref_list = [x[i] for x in ref_lines]
curr_ref_hypo_en = np.array(
bart_scorer.score(suffix_prompt(ref_list, prompt), sys_lines, batch_size=4))
curr_hypo_ref_en = np.array(
bart_scorer.score(suffix_prompt(sys_lines, prompt), ref_list, batch_size=4))
curr_ref_hypo_de = np.array(
bart_scorer.score(ref_list, prefix_prompt(sys_lines, prompt), batch_size=4))
curr_hypo_ref_de = np.array(
bart_scorer.score(sys_lines, prefix_prompt(ref_list, prompt), batch_size=4))
ref_hypo_en += curr_ref_hypo_en
hypo_ref_en += curr_hypo_ref_en
ref_hypo_de += curr_ref_hypo_de
hypo_ref_de += curr_hypo_ref_de
ref_hypo_en = ref_hypo_en / self.ref_num
hypo_ref_en = hypo_ref_en / self.ref_num
ref_hypo_de = ref_hypo_de / self.ref_num
hypo_ref_de = hypo_ref_de / self.ref_num
avg_f_en = (ref_hypo_en + hypo_ref_en) / 2
avg_f_de = (ref_hypo_de + hypo_ref_de) / 2
harm_f_en = (ref_hypo_en * hypo_ref_en) / (ref_hypo_en + hypo_ref_en)
harm_f_de = (ref_hypo_de * hypo_ref_de) / (ref_hypo_de + hypo_ref_de)
counter = 0
for doc_id in self.data:
self.data[doc_id]['sys_summs'][sys_name]['scores'].update({
f'{name}_hypo_ref_en_{prompt}': hypo_ref_en[counter],
f'{name}_ref_hypo_en_{prompt}': ref_hypo_en[counter],
f'{name}_avg_f_en_{prompt}': avg_f_en[counter],
f'{name}_harm_f_en_{prompt}': harm_f_en[counter],
f'{name}_hypo_ref_de_{prompt}': hypo_ref_de[counter],
f'{name}_ref_hypo_de_{prompt}': ref_hypo_de[counter],
f'{name}_avg_f_de_{prompt}': avg_f_de[counter],
f'{name}_harm_f_de_{prompt}': harm_f_de[counter]
})
counter += 1
print(f'Finished calculating BARTScore-P, time passed {time.time() - start}s.')
else:
raise NotImplementedError
def main():
parser = argparse.ArgumentParser(description='Scorer parameters')
parser.add_argument('--file', type=str, required=True,
help='The data to load from.')
parser.add_argument('--device', type=str, default='cuda:0',
help='The device to run on.')
parser.add_argument('--multi_ref', action='store_true', default=False,
help='Whether we are using multiple references to calculate scores.')
parser.add_argument('--output', type=str, required=True,
help='The output path to save the calculated scores.')
parser.add_argument('--bert_score', action='store_true', default=False,
help='Whether to calculate BERTScore')
parser.add_argument('--mover_score', action='store_true', default=False,
help='Whether to calculate MoverScore')
parser.add_argument('--rouge', action='store_true', default=False,
help='Whether to calculate ROUGE')
parser.add_argument('--bart_score', action='store_true', default=False,
help='Whether to calculate BARTScore')
parser.add_argument('--bart_score_cnn', action='store_true', default=False,
help='Whether to calculate BARTScore-CNN')
parser.add_argument('--bart_score_para', action='store_true', default=False,
help='Whether to calculate BARTScore-Para')
parser.add_argument('--prism', action='store_true', default=False,
help='Whether to calculate PRISM')
parser.add_argument('--prompt', type=str, default=None,
help='Whether to calculate BARTScore-P. Can be bart_src, bart_ref, bart_cnn_src, '
'bart_cnn_ref, bart_para_src, bart_para_ref')
args = parser.parse_args()
scorer = Scorer(args.file, args.device, args.multi_ref)
METRICS = []
if args.bert_score:
METRICS.append('bert_score')
if args.mover_score:
METRICS.append('mover_score')
if args.rouge:
METRICS.append('rouge')
if args.bart_score:
METRICS.append('bart_score')
if args.bart_score_cnn:
METRICS.append('bart_score_cnn')
if args.bart_score_para:
METRICS.append('bart_score_para')
if args.prism:
METRICS.append('prism')
if args.prompt is not None:
prompt = args.prompt
assert prompt in ['bart_src', 'bart_ref', 'bart_cnn_src',
'bart_cnn_ref', 'bart_para_src', 'bart_para_ref']
METRICS.append(f'prompt_{prompt}')
scorer.score(METRICS)
scorer.save_data(args.output)
if __name__ == '__main__':
main()
| BARTScore-main | SUM/score.py |
BARTScore-main | SUM/gehrmann_rouge_opennmt/__init__.py |
|
#!/usr/bin/env python
from __future__ import print_function, division
import argparse, os, re, time
import pdb
from gehrmann_rouge_opennmt.rouge_baselines.g_rouge import rouge
from gehrmann_rouge_opennmt.rouge_baselines.util import has_repeat, n_grams
from functools import reduce
import numpy as np
def split_sentences(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
bare_sents = re.findall(r'%s (.+?) %s' % (sentence_start_tag, sentence_end_tag), article)
return bare_sents
# convenient decorator
def register_to_registry(registry):
def _register(func):
registry[func.__name__] = func
return func
return _register
baseline_registry = {}
register = register_to_registry(baseline_registry)
# baseline methods
@register
def first_sentence(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
''' use sentence tags to output the first sentence of an article as its summary. '''
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents[:1]
@register
def first_three_sentences(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents[:3]
@register
def first_two_sentences(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents[:2]
@register
def verbatim(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents
@register
def pre_sent_tag_verbatim(article):
sents = article.split('<t>')
good_sents = []
for sent in sents:
sent = sent.strip()
if len(sent.split()) > 0:
good_sents.append(sent)
# print(good_sents)
return good_sents
@register
def sent_tag_verbatim(article):
sents = split_sentences(article, '<t>', '</t>')
# print(sents)
return sents
@register
def sent_no_tag(article, eos='.'):
sents = article.split(" %s " % eos)
sents = [sent + " ." for sent in sents]
return sents
@register
def sent_tag_p_verbatim(article):
bare_article = article.strip()
bare_article += ' </t>'
sents = split_sentences(bare_article, '<t>', '</t>')
# print(sents)
return sents
@register
def adhoc_old0(article):
sents = split_sentences(article, '<t>', '</t>')
good_sents = []
for sent in sents:
# Remove <unk>
tokens = [x for x in sent.split() if x != '<unk>']
# Ignore length 1 sententces
if len(tokens) > 1:
good_sents.append(' '.join(tokens))
return good_sents
@register
def full(article):
return [article]
@register
def adhoc_base(article):
article += ' </t> </t>'
first_end = article.index(' </t> </t>')
article = article[:first_end] + ' </t>'
sents = split_sentences(article)
good_sents = []
for sent in sents:
# Remove <unk>
tokens = [x for x in sent.split() if x != '<unk>']
# Ignore length 1 sententces
if len(tokens) > 1:
good_sents.append(' '.join(tokens))
return good_sents
@register
def no_sent_tag(article):
article = article.strip()
try:
if article[-1] != '.':
article += ' .'
except:
article += ' .'
good_sents = list(re.findall(r'.+?\.', article))
return good_sents
@register
def second_sentence(article, sentence_start_tag='<t>', sentence_end_tag='</t>'):
sents = split_sentences(article, sentence_start_tag, sentence_end_tag)
return sents[1:2]
def baseline_main(args, return_pyrouge_scores=False):
# Check the presence of target file
if args.run_rouge or args.run_google_rouge:
assert args.target is not None, 'Need the path to target file `--target` for ROUGE evaluations.'
process = baseline_registry[args.method]
# Read and preprocess generated summary
n_source = 0
references = []
summaries = []
with open(args.source, 'r') as f:
for i, article in enumerate(f):
summary = process(article)
summaries.append(summary)
n_source += 1
mean_num_sent_per_summ = np.mean([len(summ) for summ in summaries])
assert mean_num_sent_per_summ > 0, "Expect to read > 0 sentences per summary!"
# Read and preprocess a single candidate reference summary for each example
if args.run_rouge or args.run_google_rouge:
n_target = 0
with open(args.target, 'r') as f:
for i, article in enumerate(f):
# For us, method is 'sent_tag_verbatim
if args.ref_sep: # pgour added this to handle multiple reference texts
# pdb.set_trace()
raw_candidates_l = article.split(args.ref_sep)
candidates_l = []
for raw_candidate in raw_candidates_l:
if args.method == "full":
candidate = [raw_candidate]
else:
candidate = sent_no_tag(raw_candidate)
candidates_l.append(candidate)
assert len(candidates_l) == args.num_ref, f"len(candidates_l) {len(candidates_l)} mismatches " \
f"args.num_ref {args.num_ref}"
references.append(candidates_l)
n_target += 1
else:
if args.method == "full":
candidate = [article]
else:
candidate = sent_no_tag(article)
references.append([candidate])
n_target += 1
# pdb.set_trace()
mean_num_sent_per_ref = np.mean([len(candidate[0]) for candidate in references])
assert mean_num_sent_per_ref > 0, "Expect to read > 0 sentences per reference summary!"
# logger.info(f"read {mean_num_sent_per_summ:.2f} and {mean_num_sent_per_ref:.2f} sentences on average per "
# f"generated and system summary.")
assert n_source == n_target, 'Source and target must have the same number of samples.'
# Run official ROUGE evaluation
if args.run_rouge:
# logger.info("getting rouge")
from gehrmann_rouge_opennmt.rouge_baselines.util import evaluate_rouge
# TODO: what is going on here? Why the double assignment?
rouge_args = rouge_args = [
'-c', 95, # 95% confidence intervals, necessary for the dictionary conversion routine
'-n', 2, # up to bigram
'-a',
'-r', args.n_bootstrap, # the number of bootstrap samples for confidence bounds
]
# if args.stemming:
# # add the stemming flag
# rouge_args += ['-m']
if args.get_each_score:
# add the 'per-evaluation scores' flag
rouge_args += ['-d']
# evaluate with official ROUGE script v1.5.5
scores = evaluate_rouge(summaries, references, remove_temp=args.delete, rouge_args=rouge_args,
get_each_score=args.get_each_score, temp_dir=args.temp_dir)
if return_pyrouge_scores:
# We always return from here, below this line is not important
return scores
# Run Google's ROUGE evaluation. Not used by us.
if args.run_google_rouge:
# Based on https://github.com/google/seq2seq, modified to support multi-sentence summaries
t0 = time.time()
g_scores = rouge(summaries, [candidates[0] for candidates in references])
dt = time.time() - t0
g_headers = ['rouge_1/r_score', 'rouge_1/p_score', 'rouge_1/f_score', 'rouge_2/r_score', 'rouge_2/p_score',
'rouge_2/f_score', 'rouge_l/r_score', 'rouge_l/p_score', 'rouge_l/f_score']
print('* evaluated {} samples, took {:.3f}s, averaging {:.3f}s/sample'.format(n_target, dt, dt / n_target))
# Evaluate self-repetitions
if args.check_repeats:
t0 = time.time()
# Counts
n_sent_repeats = 0
ngram_repeats = {2: 0, 4: 0, 8: 0, 16: 0, 32: 0}
for summary in summaries:
# Sentence-level repeats
# Count of samples containing self-repetitions of a full sentence
n_sent_repeats += has_repeat(summary)
# N-gram repeats
for n in ngram_repeats.keys():
# Respect sentence boundary
grams = reduce(lambda x, y: x + y, [n_grams(sent.split(), n) for sent in summary], [])
ngram_repeats[n] += has_repeat(grams)
dt = time.time() - t0
print('* portion of samples that contains self-repetitions')
# Sort the statistics by importance
str_keys = ['full-sent'] + list(map(lambda n: '%d-gram' % n, sorted(ngram_repeats.keys(), reverse=True)))
print(','.join(str_keys))
print("{:.2f}%".format(n_sent_repeats / n_source * 100), end=',\t')
for n in sorted(ngram_repeats.keys(), reverse=True):
print("{:.2f}%".format(ngram_repeats[n] / n_source * 100), end=',\t')
print()
print('* evaluated {} samples, took {:.3f}s, averaging {:.3f}s/sample'.format(n_source, dt, dt / n_source))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source', required=True,
help='Path to the tokenized source file. One sample per line with sentence tags.')
parser.add_argument('-t', '--target', required=False,
help='Path to the tokenized target file. One sample per line with sentence tags.')
parser.add_argument('-m', '--method', default='first_sentence', choices=baseline_registry.keys(),
help='Baseline method to use.')
parser.add_argument('-d', '--delete', action='store_true',
help='Delete the temporary files created during evaluation.')
parser.add_argument('-g', '--google', dest='run_google_rouge', action='store_true',
help='Evaluate with the ROUGE implementation from google/seq2seq.')
parser.add_argument('--no-rouge', dest='run_rouge', action='store_false', help='Skip ROUGE evaluation.')
parser.add_argument('-r', '--check-repeats', action='store_true', help='Evaluate self repeats.')
parser.add_argument('--ref_sep', type=str, default=None, help='if there are multiple references per '
'line in ref file, they are separated by this separator.') # pgour added
parser.add_argument('--num_ref', type=int, default=1,
help='number of ref summaries for each doc (per line in file)')
# ROUGE arguments
parser.add_argument('--no-stemming', dest='stemming', action='store_false', help='Turn off stemming in ROUGE.')
parser.add_argument('--n-bootstrap', type=int, default=1000, help='The number of bootstrap samples used in ROUGE.')
parser.add_argument('--get_each_score', action='store_true', help='produce separate score of each document-summary')
args = parser.parse_args()
# pgour: sanity check
if args.num_ref != 1:
assert (args.ref_sep is not None), "if more than 1 ref per summary, expected a --ref_sep"
baseline_main(args)
| BARTScore-main | SUM/gehrmann_rouge_opennmt/rouge_baselines/baseline.py |
from __future__ import print_function
import pdb
from six.moves import xrange
# from pyrouge import Rouge155
from gehrmann_rouge_opennmt.rouge_baselines.Rouge155 import Rouge155
import tempfile, os, glob, shutil
import numpy as np
import random
def evaluate_rouge(summaries, references, remove_temp=False, rouge_args=[], get_each_score=False, temp_dir=None):
'''
Args:
summaries: [[sentence]]. Each summary is a list of strings (sentences)
references: [[[sentence]]]. Each reference is a list of candidate summaries.
remove_temp: bool. Whether to remove the temporary files created during evaluation.
rouge_args: [string]. A list of arguments to pass to the ROUGE CLI.
'''
# temp_dir = tempfile.mkdtemp()
rand_dir_name = str(random.randint(0, 1000000))
while os.path.exists(os.path.join(temp_dir, rand_dir_name)):
rand_dir_name = str(random.randint(0, 1000000))
temp_dir = os.path.join(temp_dir, rand_dir_name)
system_dir = os.path.join(temp_dir, 'system')
model_dir = os.path.join(temp_dir, 'model')
# directory for generated summaries
os.makedirs(system_dir)
# directory for reference summaries
os.makedirs(model_dir)
print(temp_dir, system_dir, model_dir)
# pdb.set_trace()
assert len(summaries) == len(references)
for i, (summary, candidates) in enumerate(zip(summaries, references)):
summary_fn = '%i.txt' % i
for j, candidate in enumerate(candidates):
candidate_fn = '%i.%i.txt' % (i, j)
with open(os.path.join(model_dir, candidate_fn), 'w') as f:
f.write('\n'.join(candidate))
with open(os.path.join(system_dir, summary_fn), 'w') as f:
f.write('\n'.join(summary))
args_str = ' '.join(map(str, rouge_args))
rouge = Rouge155(rouge_args=args_str)
rouge.system_dir = system_dir
rouge.model_dir = model_dir
rouge.system_filename_pattern = '(\d+).txt'
rouge.model_filename_pattern = '#ID#.\d+.txt'
output = rouge.convert_and_evaluate()
r = rouge.output_to_dict(output, get_each_score=get_each_score)
# remove the created temporary files
if remove_temp:
shutil.rmtree(temp_dir)
return r
def n_grams(tokens, n):
l = len(tokens)
return [tuple(tokens[i:i + n]) for i in xrange(l) if i + n < l]
def has_repeat(elements):
d = set(elements)
return len(d) < len(elements)
if __name__ == '__main__':
article = [
u"marseille prosecutor says `` so far no videos were used in the crash investigation '' despite media reports .",
u"journalists at bild and paris match are `` very confident '' the video clip is real , an editor says .",
u'andreas lubitz had informed his lufthansa training school of an episode of severe depression , airline says .',
]
candidates = [article]
references = [candidates]
summaries = [article]
rouge_args = [
'-c', 95,
'-U',
'-r', 1,
'-n', 2,
'-a',
]
print(evaluate_rouge(summaries, references, True, rouge_args))
| BARTScore-main | SUM/gehrmann_rouge_opennmt/rouge_baselines/util.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.