python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_marginal_3agents_config import (
FurnMoveVision3AgentUncoordinatedExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveVision3AgentUncoordinatedExperimentConfig):
@property
def saved_model_path(cls) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_marginal_3agents_100000_2020-03-08_22-23-13.dat",
)
def simple_name(self):
return "vision_marginal_3agents"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__3agents__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_marginal_3agents_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_mixture_nocl_norot_config import (
FurnMoveMixtureNoRotationsNoCLExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveMixtureNoRotationsNoCLExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_mixture_nocl_norot_1000000_2019-11-19_07-38-29.dat",
)
def simple_name(self):
return "grid_mixture_nocl_norot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_mixture_nocl_norot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_marginal_cl_norot_config import (
FurnMoveGridExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveGridExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_marginal_cl_norot_1000000_2019-11-19_07-29-18.dat",
)
def simple_name(self):
return "grid_marginal_cl_norot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_marginal_cl_norot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_bigcentral_nocl_norot_config import (
FurnMoveNoRotationsExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveNoRotationsExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_bigcentral_nocl_norot_1000000_2019-11-19_07-42-15.dat",
)
def simple_name(self):
return "grid_bigcentral_nocl_norot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_bigcentral_nocl_norot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_marginalnocomm_nocl_rot_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_marginalnocomm_nocl_rot_500000_2020-02-21_15-49-01.dat",
)
def simple_name(self):
return "vision_marginalnocomm_nocl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_marginalnocomm_nocl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_mixture_3agents_config import (
FurnMove3AgentMixtureExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMove3AgentMixtureExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_mixture_3agents_1000000_2020-02-28_18-25-19.dat",
)
def simple_name(self):
return "grid_mixture_3agents"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__3agents__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_mixture_3agents_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_bigcentral_nocl_rot_config import (
FurnMoveBigCentralVisionExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveBigCentralVisionExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_bigcentral_nocl_rot_500000_2019-11-19_13-48-44.dat",
)
def simple_name(self):
return "vision_bigcentral_nocl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_bigcentral_nocl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_bigcentral_nocl_rot_config import (
FurnMoveBigCentralNoCLExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveBigCentralNoCLExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_bigcentral_nocl_rot_1000000_2019-11-12_09-47-18.dat",
)
def simple_name(self):
return "grid_bigcentral_nocl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_bigcentral_nocl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_mixture_cl_rot_config import (
FurnMoveGridMixtureExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveGridMixtureExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_mixture_cl_rot_1000000_2019-11-12_07-34-02.dat",
)
def simple_name(self):
return "grid_mixture_cl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_mixture_cl_rot_config.py |
import os
from typing import Optional, List, Dict
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironment
from rl_base import Episode
from rl_multi_agent.experiments.furnmove_vision_mixture_cl_rot_config import (
FurnMoveMixtureVisionExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
def add_tv_and_dresser_info_to_info(info, env, agent_id):
dresser = [
o
for o in env.all_objects_with_properties(
{"objectType": "Dresser"}, agent_id=agent_id
)
if len(o["objectId"].split("|")) == 2
][0]
info["dresser_visible"] = dresser["visible"]
if agent_id == 0:
info["dresser_location"] = {
"position": dresser["position"],
"rotation": dresser["rotation"],
}
television = [
o
for o in env.all_objects_with_properties(
{"objectType": "Television"}, agent_id=agent_id
)
if len(o["objectId"].split("|")) == 2
][0]
info["tv_visible"] = television["visible"]
if agent_id == 0:
info["tv_location"] = {
"position": television["position"],
"rotation": television["rotation"],
}
def before_step(episode: Episode):
env: AI2ThorEnvironment = episode.environment
extra_infos = []
for agent_id in range(env.num_agents):
info = {}
add_tv_and_dresser_info_to_info(info=info, env=env, agent_id=agent_id)
extra_infos.append(info)
return extra_infos
def after_step(
step_results: List[Dict], before_info: Optional[List[Dict]], episode: Episode
):
after_info = before_step(episode=episode)
for sr, bi, ai in zip(step_results, before_info, after_info):
sr["extra_before_info"] = bi
sr["extra_after_info"] = ai
class EvalConfig(SaveFurnMoveMixin, FurnMoveMixtureVisionExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_mixture_cl_rot_500000_2019-11-09_19-24-52.dat",
)
@classmethod
def get_init_train_params(cls):
init_train_params = super(EvalConfig, cls).get_init_train_params()
init_train_params["episode_args"]["before_step_function"] = before_step
init_train_params["episode_args"]["after_step_function"] = after_step
return init_train_params
def simple_name(self):
return "vision_mixture_cl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_mixture_cl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_mixture_nocl_rot_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_mixture_nocl_rot_500000_2019-11-19_13-45-52.dat",
)
def simple_name(self):
return "vision_mixture_nocl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_mixture_nocl_rot_config_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_marginal_nocl_rot_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_eval_experiments.furnmove_vision_mixture_cl_rot_config import (
before_step,
after_step,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_marginal_nocl_rot_500000_2020-02-21_11-11-10.dat",
)
@classmethod
def get_init_train_params(cls):
init_train_params = super(EvalConfig, cls).get_init_train_params()
init_train_params["episode_args"]["before_step_function"] = before_step
init_train_params["episode_args"]["after_step_function"] = after_step
return init_train_params
def simple_name(self):
return "vision_marginal_nocl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_marginal_nocl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_mixture_nocl_rot_config import (
FurnMoveGridExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveGridExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_mixture_nocl_rot_1000000_2019-11-19_07-36-26.dat",
)
def simple_name(self):
return "grid_mixture_nocl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_mixture_nocl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_mixture4mix_cl_rot_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_mixture4mix_cl_rot_500000_2019-11-10_22-24-13.dat",
)
def simple_name(self):
return "vision_mixture4mix_cl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_mixture4mix_cl_rot_pass_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_marginal_nocl_norot_config import (
FurnMoveMarginalNoRotationsExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveMarginalNoRotationsExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_marginal_nocl_norot_1000000_2020-02-22_05-52-57.dat",
)
def simple_name(self):
return "grid_marginal_nocl_norot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_marginal_nocl_norot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_marginal_nocl_rot_config import (
FurnMoveGridExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveGridExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_marginal_nocl_rot_1000000_2020-02-22_00-38-14.dat",
)
def simple_name(self):
return "grid_marginal_nocl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_marginal_nocl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_central_3agents_config import (
FurnMove3AgentCentralExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMove3AgentCentralExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_central_3agents_1000000_2020-03-02_19-28-34.dat",
)
def simple_name(self):
return "grid_central_3agents"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__3agents__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_central_3agents_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_bigcentral_cl_rot_config import (
FurnMoveBigCentralExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveBigCentralExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_bigcentral_cl_rot_1000000_2019-11-11_17-06-52.dat",
)
def simple_name(self):
return "grid_bigcentral_cl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_bigcentral_cl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_marginal_cl_rot_config import (
FurnMoveVisionMarginalWithCLExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveVisionMarginalWithCLExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_marginal_cl_rot_500000_2020-02-27_01-07-07.dat",
)
def simple_name(self):
return "vision_marginal_cl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_marginal_cl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_bigcentral_cl_norot_config import (
FurnMoveNoRotationsExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveNoRotationsExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_bigcentral_cl_norot_1000000_2019-11-12_16-36-02.dat",
)
def simple_name(self):
return "grid_bigcentral_cl_norot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_bigcentral_cl_norot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_mixture2mix_cl_rot_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_mixture2mix_cl_rot_500000_2019-11-12_17-53-52.dat",
)
def simple_name(self):
return "vision_mixture2mix_cl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_mixture2mix_cl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_bigcentral_cl_rot_config import (
FurnMoveBigCentralVisionExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveBigCentralVisionExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_bigcentral_cl_rot_500000_2019-11-11_20-34-22.dat",
)
def simple_name(self):
return "vision_bigcentral_cl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_bigcentral_cl_rot_config.py |
from typing import Dict, Sequence, Tuple
import numpy as np
import constants
def manhattan_dists_between_positions(
positions: Sequence[Dict[str, float]], grid_size: float
):
dists_in_steps = [[] for _ in range(len(positions))]
for i in range(len(positions) - 1):
p0 = positions[i]
for j in range(i + 1, len(positions)):
p1 = positions[j]
dist = int(
round((abs(p0["x"] - p1["x"]) + abs(p0["z"] - p1["z"])) / grid_size)
)
dists_in_steps[i].append(dist)
dists_in_steps[j].append(dist)
return dists_in_steps
def pad_matrix_to_size_center(
matrix: np.ndarray,
desired_output_shape: Tuple[int, int],
point_to_element_map: Dict[Tuple[float, float], Tuple[int, int]],
):
assert matrix.shape[0] <= desired_output_shape[0]
assert matrix.shape[1] <= desired_output_shape[1]
pad_row = desired_output_shape[0] - matrix.shape[0]
pad_col = desired_output_shape[1] - matrix.shape[1]
pad_top = pad_row // 2
pad_bottom = pad_row - pad_top
pad_left = pad_col // 2
pad_right = pad_col - pad_left
pad_matrix = np.full(desired_output_shape, fill_value=constants.NO_INFO_SYM)
assert pad_top + pad_bottom + matrix.shape[0] == desired_output_shape[0]
assert pad_left + pad_right + matrix.shape[1] == desired_output_shape[1]
pad_matrix[
pad_top : pad_top + matrix.shape[0], pad_left : pad_left + matrix.shape[1]
] = matrix
# update point to element map as per the padding
point_to_pad_element_map = dict()
for key, value in point_to_element_map.items():
point_to_pad_element_map[key] = (value[0] + pad_top, value[1] + pad_left)
return pad_matrix, point_to_pad_element_map
def pad_matrix(
matrix: np.ndarray,
pad: int,
point_to_element_map: Dict[Tuple[float, float], Tuple[int, int]],
):
pad_matrix = np.full(
[matrix.shape[0] + 2 * pad, matrix.shape[1] + 2 * pad],
fill_value=constants.NO_INFO_SYM,
)
pad_matrix[pad : pad + matrix.shape[0], pad : pad + matrix.shape[1]] = matrix
# update point to element map as per the padding
point_to_pad_element_map = dict()
for key, value in point_to_element_map.items():
point_to_pad_element_map[key] = (value[0] + pad, value[1] + pad)
return pad_matrix, point_to_pad_element_map
| cordial-sync-master | rl_ai2thor/ai2thor_utils.py |
cordial-sync-master | rl_ai2thor/__init__.py |
|
"""A wrapper for engaging with the THOR environment."""
import copy
import math
import os
import random
import sys
import warnings
from collections import defaultdict
from typing import Tuple, Dict, List, Set, Union, Any, Optional, Mapping
import ai2thor.server
import networkx as nx
import numpy as np
from ai2thor.controller import Controller
import constants
from rl_ai2thor.ai2thor_utils import pad_matrix_to_size_center, pad_matrix
from utils.misc_util import round_to_factor
class AI2ThorEnvironment(object):
def __init__(
self,
docker_enabled: bool = False,
x_display: str = None,
local_thor_build: str = None,
time_scale: float = 1.0,
visibility_distance: float = constants.VISIBILITY_DISTANCE,
fov: float = constants.FOV,
restrict_to_initially_reachable_points: bool = False,
num_agents: int = 1,
visible_agents: bool = True,
render_depth_image: bool = False,
headless: bool = False,
always_return_visible_range: bool = False,
allow_agents_to_intersect: bool = False,
) -> None:
self.num_agents = num_agents
self.controller = Controller(headless=headless)
self.controller.local_executable_path = local_thor_build
self.controller.docker_enabled = docker_enabled
self.x_display = x_display
self._initially_reachable_points: Optional[List[Dict]] = None
self._initially_reachable_points_set: Optional[Set[Dict]] = None
self._started = False
self.move_mag: Optional[float] = None
self.grid_size: Optional[float] = None
self._grid_size_digits: Optional[float] = None
self.time_scale = time_scale
self.visibility_distance = visibility_distance
self.fov = fov
self.restrict_to_initially_reachable_points = (
restrict_to_initially_reachable_points
)
self.visible_agents = visible_agents
self.render_depth_image = render_depth_image
self.headless = headless
self.always_return_visible_range = always_return_visible_range
self.allow_agents_to_intersect = allow_agents_to_intersect
@property
def scene_name(self) -> str:
return self.controller.last_event.metadata["sceneName"]
@property
def current_frame(self) -> np.ndarray:
return self.controller.last_event.frame
@property
def current_frames(self) -> Tuple[np.ndarray, ...]:
return tuple(
self.controller.last_event.events[i].frame for i in range(self.num_agents)
)
@property
def current_depth_frames(self) -> Tuple[np.ndarray, ...]:
if not self.render_depth_image:
raise Exception(
"Depth frames are not available, "
"must set render_depth_image to true before initializing."
)
return tuple(
self.controller.last_event.events[i].depth_frame
for i in range(self.num_agents)
)
@property
def last_event(self) -> ai2thor.server.Event:
return self.controller.last_event
@property
def started(self) -> bool:
return self._started
def start(
self,
scene_name: Optional[str],
move_mag: float = 0.25,
player_screen_width=300,
player_screen_height=300,
quality="Very Low",
) -> None:
if self.headless and (
player_screen_height != 300 or player_screen_height != 300
):
warnings.warn(
"In headless mode but choosing non-default player screen width/height, will be ignored."
)
if player_screen_width < 300 or player_screen_height < 300:
self.controller.start(
x_display=self.x_display,
player_screen_width=300,
player_screen_height=300,
)
if not self.headless:
self.controller.step(
{
"action": "ChangeResolution",
"x": player_screen_width,
"y": player_screen_height,
}
)
else:
self.controller.start(
x_display=self.x_display,
player_screen_width=player_screen_width,
player_screen_height=player_screen_height,
)
self.controller.step({"action": "ChangeQuality", "quality": quality})
if not self.controller.last_event.metadata["lastActionSuccess"]:
raise Exception("Failed to change quality to: {}.".format(quality))
self._started = True
self.reset(scene_name=scene_name, move_mag=move_mag)
def stop(self) -> None:
try:
self.controller.stop_unity()
except Exception as e:
warnings.warn(str(e))
finally:
self._started = False
def reset(
self, scene_name: Optional[str], move_mag: float = 0.25,
):
self.move_mag = move_mag
self.grid_size = self.move_mag
self._grid_size_digits = [
i
for i in range(2, 10)
if abs(round(self.grid_size, i) - self.grid_size) < 1e-9
][0]
assert self._grid_size_digits != 9, (
"Bad grid size chosen. " "Should have a finite decimal expansion."
)
if scene_name is None:
scene_name = self.controller.last_event.metadata["sceneName"]
self.controller.reset(scene_name)
tmp_stderr = sys.stderr
sys.stderr = open(
os.devnull, "w"
) # TODO: HACKILY BLOCKING sequenceId print errors
self.controller.step(
{
"action": "Initialize",
"gridSize": self.grid_size,
"visibilityDistance": self.visibility_distance,
"fov": self.fov,
"timeScale": self.time_scale,
# "sequenceId": 0, # TODO: WHY IS THIS NECESSARY?
"agentCount": self.num_agents,
"makeAgentsVisible": self.visible_agents,
"renderDepthImage": self.render_depth_image,
"alwaysReturnVisibleRange": self.always_return_visible_range,
}
)
sys.stderr.close()
sys.stderr = tmp_stderr
self._initially_reachable_points = None
self._initially_reachable_points_set = None
self.controller.step({"action": "GetReachablePositions"})
if not self.controller.last_event.metadata["lastActionSuccess"]:
warnings.warn(
"Error when getting reachable points: {}".format(
self.controller.last_event.metadata["errorMessage"]
)
)
self._initially_reachable_points = self.controller.last_event.metadata[
"reachablePositions"
]
def teleport_agent_to(
self,
x: float,
y: float,
z: float,
rotation: float,
horizon: float,
standing: Optional[bool] = None,
force_action: bool = False,
only_initially_reachable: bool = False,
agent_id: int = None,
render_image=True,
) -> None:
if self.num_agents == 1 and agent_id == -1:
agent_id = 0
if standing is None:
standing = self.last_event.metadata["isStanding"]
if only_initially_reachable:
reachable_points = self.initially_reachable_points
target = {"x": x, "y": y, "z": z}
reachable = False
for p in reachable_points:
if self.position_dist(target, p) < 0.01:
reachable = True
break
if not reachable:
self.last_event.metadata["lastAction"] = "TeleportFull"
self.last_event.metadata[
"errorMessage"
] = "Target position was not initially reachable."
self.last_event.metadata["lastActionSuccess"] = False
return
self.controller.step(
dict(
action="TeleportFull",
x=x,
y=y,
z=z,
rotation={"x": 0.0, "y": rotation, "z": 0.0},
horizon=horizon,
standing=standing,
forceAction=force_action,
agentId=agent_id,
render_image=render_image,
)
)
def random_reachable_state(
self,
seed: int = None,
specific_rotations=(0, 90, 180, 270),
specific_horizons=(0, 30, 60, 330),
only_initially_reachable: bool = False,
) -> Dict:
if seed is not None:
random.seed(seed)
if only_initially_reachable:
xyz = random.choice(self.initially_reachable_points)
else:
xyz = random.choice(self.currently_reachable_points)
rotation = random.choice(specific_rotations)
horizon = random.choice(specific_horizons)
state = copy.copy(xyz)
state["rotation"] = rotation
state["horizon"] = horizon
return state
def randomize_agent_location(
self,
seed: int = None,
partial_position: Optional[Dict[str, float]] = None,
agent_id: int = None,
only_initially_reachable: bool = False,
) -> Dict:
if partial_position is None:
partial_position = {}
k = 0
while k == 0 or (not self.last_event.metadata["lastActionSuccess"] and k < 10):
state = self.random_reachable_state(
seed=seed, only_initially_reachable=only_initially_reachable
)
self.teleport_agent_to(**{**state, **partial_position}, agent_id=agent_id)
k += 1
if not self.last_event.metadata["lastActionSuccess"]:
warnings.warn(
(
"Randomize agent location in scene {}"
" with seed {} and partial position {} failed in "
"10 attempts. Forcing the action."
).format(self.scene_name, seed, partial_position)
)
self.teleport_agent_to(
**{**state, **partial_position}, force_action=True, agent_id=agent_id
)
assert self.last_event.metadata["lastActionSuccess"]
return state
@property
def initially_reachable_points(self) -> List[Dict[str, float]]:
assert self._initially_reachable_points is not None
return copy.deepcopy(self._initially_reachable_points) # type:ignore
@property
def initially_reachable_points_set(self) -> Set[Tuple[float, float]]:
if self._initially_reachable_points_set is None:
self._initially_reachable_points_set = set()
for p in self.initially_reachable_points:
self._initially_reachable_points_set.add(
self._agent_location_to_tuple(p)
)
return self._initially_reachable_points_set
@property
def currently_reachable_points(self) -> List[Dict[str, float]]:
self.step({"action": "GetReachablePositions", "agentId": 0})
return self.last_event.metadata["reachablePositions"] # type:ignore
def refresh_initially_reachable(self):
self._initially_reachable_points = self.currently_reachable_points
self._initially_reachable_points_set = None
def currently_reachable_points_set(self) -> Set[Tuple[float, float]]:
currently_reachable_points = self.currently_reachable_points
currently_reachable_points_set = set()
for p in currently_reachable_points:
currently_reachable_points_set.add(self._agent_location_to_tuple(p))
return currently_reachable_points_set
def get_initially_unreachable_points(self) -> Set[Tuple[float, float]]:
unreachable_points = set([])
x_vals = set([p["x"] for p in self.initially_reachable_points])
z_vals = set([p["z"] for p in self.initially_reachable_points])
for x in x_vals:
for z in z_vals:
if (x, z) not in self.initially_reachable_points:
unreachable_points.add((x, z))
return unreachable_points
def _points_to_matrix(
self, points: List[Dict[str, float]], padding: float = 0.0
) -> Tuple[List[List[bool]], Dict[Tuple, Tuple]]:
xz_set = set(
(
round(p["x"], self._grid_size_digits),
round(p["z"], self._grid_size_digits),
)
for p in points
)
xs = [p["x"] for p in points]
zs = [p["z"] for p in points]
n = 1.0 / self.grid_size
x_min = math.floor(n * (min(*xs) - padding)) / n
x_max = math.ceil(n * (max(*xs) + padding)) / n
z_min = math.floor(n * (min(*zs) - padding)) / n
z_max = math.ceil(n * (max(*zs) + padding)) / n
x_vals = list(np.linspace(x_min, x_max, round(1 + (x_max - x_min) * 4)))
z_vals = list(
reversed(np.linspace(z_min, z_max, round(1 + (z_max - z_min) * 4)))
)
point_to_element_map = dict()
matrix = [[False for _ in range(len(x_vals))] for _ in range(len(z_vals))]
for i, x in enumerate(x_vals):
for j, z in enumerate(z_vals):
matrix[j][i] = (x, z) in xz_set
point_to_element_map[(x, z)] = (j, i)
return matrix, point_to_element_map
def get_currently_reachable_points_matrix(
self, padding: float = 0.0
) -> Tuple[List[List[bool]], Dict[Tuple, Tuple]]:
return self._points_to_matrix(self.currently_reachable_points, padding=padding)
def get_initially_reachable_points_matrix(
self, padding: float = 0.0
) -> Tuple[List[List[bool]], Dict[Tuple, Tuple]]:
return self._points_to_matrix(self.initially_reachable_points, padding=padding)
def get_current_occupancy_matrix(
self, padding: float = 0.0, use_initially_reachable_points_matrix: bool = False
) -> Tuple[np.ndarray, Dict[Tuple, Tuple]]:
if use_initially_reachable_points_matrix:
(
matrix_bool,
point_to_element_map,
) = self.get_initially_reachable_points_matrix(padding=padding)
else:
(
matrix_bool,
point_to_element_map,
) = self.get_currently_reachable_points_matrix(padding=padding)
# 0/1 reachable point matrix
matrix_bool = np.array(matrix_bool, dtype=bool)
matrix = np.full(matrix_bool.shape, fill_value=constants.UNREACHABLE_SYM)
matrix[matrix_bool] = constants.REACHABLE_SYM
for i in range(self.num_agents):
agent_location = self.get_agent_location(agent_id=i)
xz_val = (
round(agent_location["x"], self._grid_size_digits),
round(agent_location["z"], self._grid_size_digits),
)
if xz_val in point_to_element_map:
# TODO: FIX THIS?
rowcol_val = point_to_element_map[xz_val]
matrix[rowcol_val[0], rowcol_val[1]] = constants.AGENT_SYM
return matrix, point_to_element_map
def get_current_occupancy_matrices_two_agents(
self, padding: float = 0.0, use_initially_reachable_points_matrix: bool = False
) -> Tuple[List[np.ndarray], Dict[Tuple, Tuple]]:
if use_initially_reachable_points_matrix:
(
matrix_bool,
point_to_element_map,
) = self.get_initially_reachable_points_matrix(padding=padding)
else:
(
matrix_bool,
point_to_element_map,
) = self.get_currently_reachable_points_matrix(padding=padding)
# 0/1 reachable point matrix
matrix_bool = np.array(matrix_bool, dtype=bool)
matrix = np.full(matrix_bool.shape, fill_value=constants.UNREACHABLE_SYM)
matrix[matrix_bool] = constants.REACHABLE_SYM
matrix_all_agents = [copy.deepcopy(matrix) for _ in range(self.num_agents)]
assert self.num_agents == 2
my_symbols = [
constants.AGENT_SELF_0,
constants.AGENT_SELF_90,
constants.AGENT_SELF_180,
constants.AGENT_SELF_270,
]
your_symbols = [
constants.AGENT_OTHER_0,
constants.AGENT_OTHER_90,
constants.AGENT_OTHER_180,
constants.AGENT_OTHER_270,
]
for i in range(2):
agent_location = self.get_agent_location(agent_id=i)
xz_val = (
round(agent_location["x"], self._grid_size_digits),
round(agent_location["z"], self._grid_size_digits),
)
clock_90 = (int(agent_location["rotation"]) % 360) // 90
if xz_val in point_to_element_map:
# TODO: FIX THIS?
rowcol_val = point_to_element_map[xz_val]
matrix_all_agents[i][rowcol_val[0], rowcol_val[1]] = my_symbols[
clock_90
]
matrix_all_agents[1 - i][rowcol_val[0], rowcol_val[1]] = your_symbols[
clock_90
]
return matrix_all_agents, point_to_element_map
def get_current_multi_agent_occupancy_tensors(
self, padding: float = 0.0, use_initially_reachable_points_matrix: bool = False
) -> Tuple[List[np.ndarray], Dict[Tuple, Tuple]]:
if use_initially_reachable_points_matrix:
(
matrix_bool,
point_to_element_map,
) = self.get_initially_reachable_points_matrix(padding=padding)
else:
(
matrix_bool,
point_to_element_map,
) = self.get_currently_reachable_points_matrix(padding=padding)
# 0/1 reachable point matrix
reachable_tensor = np.array([matrix_bool], dtype=bool)
positions_tensors = [
np.zeros((4 * self.num_agents, *reachable_tensor.shape[-2:]), dtype=float)
for _ in range(self.num_agents)
]
for i in range(self.num_agents):
agent_location = self.get_agent_location(agent_id=i)
xz_val = (
round(agent_location["x"], self._grid_size_digits),
round(agent_location["z"], self._grid_size_digits),
)
clock_90 = (int(agent_location["rotation"]) % 360) // 90
for j in range(self.num_agents):
if xz_val in point_to_element_map:
# TODO: FIX THIS?
rowcol_val = point_to_element_map[xz_val]
positions_tensors[j][
clock_90 + 4 * ((i - j) % self.num_agents),
rowcol_val[0],
rowcol_val[1],
] = 1.0
return (
[
np.concatenate((reachable_tensor, pt), axis=0)
for pt in positions_tensors
],
point_to_element_map,
)
def get_current_multi_agent_occupancy_tensors_no_rot(
self, padding: float = 0.0, use_initially_reachable_points_matrix: bool = False
) -> Tuple[List[np.ndarray], Dict[Tuple, Tuple]]:
if use_initially_reachable_points_matrix:
(
matrix_bool,
point_to_element_map,
) = self.get_initially_reachable_points_matrix(padding=padding)
else:
(
matrix_bool,
point_to_element_map,
) = self.get_currently_reachable_points_matrix(padding=padding)
# 0/1 reachable point matrix
reachable_tensor = np.array([matrix_bool], dtype=bool)
positions_tensors = [
np.zeros((self.num_agents, *reachable_tensor.shape[-2:]), dtype=float)
for _ in range(self.num_agents)
]
for i in range(self.num_agents):
agent_location = self.get_agent_location(agent_id=i)
xz_val = (
round(agent_location["x"], self._grid_size_digits),
round(agent_location["z"], self._grid_size_digits),
)
for j in range(self.num_agents):
if xz_val in point_to_element_map:
rowcol_val = point_to_element_map[xz_val]
positions_tensors[j][
(i - j) % self.num_agents, rowcol_val[0], rowcol_val[1]
] = 1.0
# TODO: Simplify with `positions_tensors[i] = np.roll(positions_tensors[0], 4*i, axis=0)`?
return (
[
np.concatenate((reachable_tensor, pt), axis=0)
for pt in positions_tensors
],
point_to_element_map,
)
def get_agent_location(self, agent_id: int = None) -> Dict[str, float]:
if self.num_agents == 1:
metadata = self.controller.last_event.metadata
else:
metadata = self.controller.last_event.events[agent_id].metadata
location = {
"x": metadata["agent"]["position"]["x"],
"y": metadata["agent"]["position"]["y"],
"z": metadata["agent"]["position"]["z"],
"rotation": metadata["agent"]["rotation"]["y"],
"horizon": metadata["agent"]["cameraHorizon"],
}
return location
def _agent_location_to_tuple(self, p):
return (
round(p["x"], self._grid_size_digits),
round(p["z"], self._grid_size_digits),
)
def get_agent_locations(self) -> Tuple[Dict[str, float], ...]:
"""Gets all agents' locations."""
return tuple(self.get_agent_location(i) for i in range(self.num_agents))
def get_agent_metadata(self, agent_id: int = 0) -> Dict[str, Any]:
"""Gets agent's metadata."""
return self.controller.last_event.events[agent_id].metadata["agent"]
def get_all_agent_metadata(self) -> Tuple[Dict[str, Any], ...]:
"""Gets all agents' locations."""
return tuple(self.get_agent_metadata(i) for i in range(self.num_agents))
def step(
self, action_dict: Dict[str, Union[str, int, float]]
) -> ai2thor.server.Event:
action = action_dict["action"]
agent_id = action_dict.get("agentId")
if agent_id is not None:
assert type(agent_id) == int and 0 <= agent_id <= self.num_agents
elif action == "RandomlyCreateLiftedFurniture":
pass
else:
assert self.num_agents == 1
agent_id = 0
if (
self.allow_agents_to_intersect
and "allowAgentsToIntersect" not in action_dict
):
action_dict["allowAgentsToIntersect"] = True
if "MoveAgents" in action:
assert "Object" in action
action_dict = {
**action_dict,
"moveMagnitude": self.move_mag,
} # type: ignore
start_agent_locations = [
self.get_agent_location(agent_id=i) for i in range(self.num_agents)
]
object_info = self.get_object_by_id(action_dict["objectId"], agent_id=0)
self.controller.step(action_dict)
if self.restrict_to_initially_reachable_points:
end_locations = [
self._agent_location_to_tuple(self.get_agent_location(agent_id=i))
for i in range(self.num_agents)
]
if any(
t not in self.initially_reachable_points_set for t in end_locations
):
for i in range(self.num_agents):
self.teleport_agent_to(
**start_agent_locations[i], agent_id=i, force_action=True
)
self.controller.step(
{
"action": "TeleportObject",
"objectId": action_dict["objectId"],
**object_info["position"],
"rotation": object_info["rotation"],
"forceAction": True,
}
)
self.last_event.events[agent_id].metadata["lastAction"] = action
self.last_event.events[agent_id].metadata[
"lastActionSuccess"
] = False
self.last_event.events[agent_id].metadata[
"errorMessage"
] = "Moved to location outside of initially reachable points."
self.last_event.metadata = self.last_event.events[agent_id].metadata
elif "Move" in action and "Hand" not in action: # type: ignore
action_dict = {
**action_dict,
"moveMagnitude": self.move_mag,
} # type: ignore
start_location = self.get_agent_location(agent_id=agent_id)
self.controller.step(action_dict)
if self.restrict_to_initially_reachable_points:
end_location_tuple = self._agent_location_to_tuple(
self.get_agent_location(agent_id=agent_id)
)
if end_location_tuple not in self.initially_reachable_points_set:
self.teleport_agent_to(
**start_location, agent_id=agent_id, force_action=True
)
self.last_event.metadata["lastAction"] = action
self.last_event.metadata["lastActionSuccess"] = False
self.last_event.metadata[
"errorMessage"
] = "Moved to location outside of initially reachable points."
elif "RandomizeHideSeekObjects" in action:
last_positions = [
self.get_agent_location(agent_id=i) for i in range(self.num_agents)
]
self.controller.step(action_dict)
metadata = self.last_event.metadata
for i, lp in enumerate(last_positions):
if self.position_dist(lp, self.get_agent_location(agent_id=i)) > 0.001:
self.teleport_agent_to(**lp, agent_id=agent_id, force_action=True)
warnings.warn(
"In scene {}, after randomization of hide and seek objects, agent {} moved.".format(
self.scene_name, i
)
)
self.controller.step({"action": "GetReachablePositions"})
self._initially_reachable_points = self.controller.last_event.metadata[
"reachablePositions"
]
self._initially_reachable_points_set = None
self.controller.last_event.metadata["lastAction"] = action
self.controller.last_event.metadata["lastActionSuccess"] = metadata[
"lastActionSuccess"
]
self.controller.last_event.metadata["reachablePositions"] = []
else:
return self.controller.step(action_dict)
@staticmethod
def position_dist(
p0: Mapping[str, Any], p1: Mapping[str, Any], use_l1: bool = False
) -> float:
if use_l1:
return (
abs(p0["x"] - p1["x"]) + abs(p0["y"] - p1["y"]) + abs(p0["z"] - p1["z"])
)
else:
return math.sqrt(
(p0["x"] - p1["x"]) ** 2
+ (p0["y"] - p1["y"]) ** 2
+ (p0["z"] - p1["z"]) ** 2
)
def closest_object_with_properties(
self, properties: Dict[str, Any]
) -> Optional[Dict[str, Any]]:
agent_pos = self.controller.last_event.metadata["agent"]["position"]
min_dist = float("inf")
closest = None
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
d = self.position_dist(agent_pos, o["position"])
if d < min_dist:
min_dist = d
closest = o
return closest
def closest_visible_object_of_type(self, type: str) -> Optional[Dict[str, Any]]:
properties = {"visible": True, "objectType": type}
return self.closest_object_with_properties(properties)
def closest_object_of_type(self, type: str) -> Optional[Dict[str, Any]]:
properties = {"objectType": type}
return self.closest_object_with_properties(properties)
def closest_reachable_point_to_position(
self, position: Dict[str, float]
) -> Tuple[Dict[str, float], float]:
target = np.array([position["x"], position["z"]])
min_dist = float("inf")
closest_point = None
for pt in self.initially_reachable_points:
dist = np.linalg.norm(target - np.array([pt["x"], pt["z"]]))
if dist < min_dist:
closest_point = pt
min_dist = dist
if min_dist < 1e-3:
break
assert closest_point is not None
return closest_point, min_dist
@staticmethod
def _angle_from_to(a_from: float, a_to: float) -> float:
a_from = a_from % 360
a_to = a_to % 360
min_rot = min(a_from, a_to)
max_rot = max(a_from, a_to)
rot_across_0 = (360 - max_rot) + min_rot
rot_not_across_0 = max_rot - min_rot
rot_err = min(rot_across_0, rot_not_across_0)
if rot_across_0 == rot_err:
rot_err *= -1 if a_to > a_from else 1
else:
rot_err *= 1 if a_to > a_from else -1
return rot_err
def agent_xz_to_scene_xz(self, agent_xz: Dict[str, float]) -> Dict[str, float]:
agent_pos = self.get_agent_location()
x_rel_agent = agent_xz["x"]
z_rel_agent = agent_xz["z"]
scene_x = agent_pos["x"]
scene_z = agent_pos["z"]
rotation = agent_pos["rotation"]
if abs(rotation) < 1e-5:
scene_x += x_rel_agent
scene_z += z_rel_agent
elif abs(rotation - 90) < 1e-5:
scene_x += z_rel_agent
scene_z += -x_rel_agent
elif abs(rotation - 180) < 1e-5:
scene_x += -x_rel_agent
scene_z += -z_rel_agent
elif abs(rotation - 270) < 1e-5:
scene_x += -z_rel_agent
scene_z += x_rel_agent
else:
raise Exception("Rotation must be one of 0, 90, 180, or 270.")
return {"x": scene_x, "z": scene_z}
def scene_xz_to_agent_xz(self, scene_xz: Dict[str, float]) -> Dict[str, float]:
agent_pos = self.get_agent_location()
x_err = scene_xz["x"] - agent_pos["x"]
z_err = scene_xz["z"] - agent_pos["z"]
rotation = agent_pos["rotation"]
if abs(rotation) < 1e-5:
agent_x = x_err
agent_z = z_err
elif abs(rotation - 90) < 1e-5:
agent_x = -z_err
agent_z = x_err
elif abs(rotation - 180) < 1e-5:
agent_x = -x_err
agent_z = -z_err
elif abs(rotation - 270) < 1e-5:
agent_x = z_err
agent_z = -x_err
else:
raise Exception("Rotation must be one of 0, 90, 180, or 270.")
return {"x": agent_x, "z": agent_z}
def current_matrix_frame(
self,
agent_id: int,
matrix: np.ndarray,
point_to_element_map: Dict[Tuple[float, float], Tuple[int, int]],
d_ahead: int,
d_side: int,
) -> np.ndarray:
padded_matrix, point_to_pad_element_map = pad_matrix(
matrix, pad=max(d_ahead, d_side), point_to_element_map=point_to_element_map
)
agent_pos = self.get_agent_location(agent_id=agent_id)
agent_x = round(agent_pos["x"], self._grid_size_digits)
agent_z = round(agent_pos["z"], self._grid_size_digits)
(agent_row, agent_col) = point_to_pad_element_map[(agent_x, agent_z)]
rotation = int(agent_pos["rotation"]) % 360
if rotation == 0:
local_ego_matrix = padded_matrix[
agent_row - d_ahead : agent_row,
agent_col - d_side : agent_col + d_side + 1,
]
elif rotation == 90:
local_matrix = padded_matrix[
agent_row - d_side : agent_row + d_side + 1,
agent_col + 1 : agent_col + d_ahead + 1,
]
local_ego_matrix = np.rot90(local_matrix, 1)
elif rotation == 180:
local_matrix = padded_matrix[
agent_row + 1 : agent_row + d_ahead + 1,
agent_col - d_side : agent_col + d_side + 1,
]
local_ego_matrix = np.rot90(local_matrix, 2)
elif rotation == 270:
local_matrix = padded_matrix[
agent_row - d_side : agent_row + d_side + 1,
agent_col - d_ahead : agent_col,
]
local_ego_matrix = np.rot90(local_matrix, 3)
else:
raise Exception("Rotation must be one of 0, 90, 180, or 270.")
assert local_ego_matrix.shape == (d_ahead, 2 * d_side + 1)
return local_ego_matrix
def current_allocentric_matrix_frame(
self,
agent_id: int,
matrix: np.ndarray,
point_to_element_map: Dict[Tuple[float, float], Tuple[int, int]],
d_each_side: int,
) -> np.ndarray:
padded_matrix, point_to_pad_element_map = pad_matrix(
matrix, pad=d_each_side, point_to_element_map=point_to_element_map
)
agent_pos = self.get_agent_location(agent_id=agent_id)
agent_x = round(agent_pos["x"], self._grid_size_digits)
agent_z = round(agent_pos["z"], self._grid_size_digits)
(agent_row, agent_col) = point_to_pad_element_map[(agent_x, agent_z)]
local_allo_matrix = padded_matrix[
agent_row - d_each_side : agent_row + d_each_side + 1,
agent_col - d_each_side : agent_col + d_each_side + 1,
]
assert local_allo_matrix.shape == (2 * d_each_side + 1, 2 * d_each_side + 1)
return local_allo_matrix
def current_allocentric_matrix_frame_full_range_center(
self,
matrix: np.ndarray,
point_to_element_map: Dict[Tuple[float, float], Tuple[int, int]],
desired_output_shape: Tuple[int, int],
) -> np.ndarray:
global_allo_matrix, point_to_pad_element_map = pad_matrix_to_size_center(
matrix,
desired_output_shape=desired_output_shape,
point_to_element_map=point_to_element_map,
)
assert global_allo_matrix.shape == desired_output_shape
return global_allo_matrix
def all_objects(self, agent_id: int = None) -> List[Dict[str, Any]]:
if self.num_agents == 1:
agent_id = 0
return self.controller.last_event.events[agent_id].metadata["objects"]
def all_objects_with_properties(
self, properties: Dict[str, Any], agent_id: int = None
) -> List[Dict[str, Any]]:
if self.num_agents == 1:
agent_id = 0
objects = []
for o in self.all_objects(agent_id=agent_id):
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
objects.append(o)
return objects
def visible_objects(self, agent_id: int = None) -> List[Dict[str, Any]]:
if self.num_agents == 1:
agent_id = 0
return self.all_objects_with_properties({"visible": True}, agent_id=agent_id)
def get_object_by_id(
self, object_id: str, agent_id: Optional[int] = None
) -> Dict[str, Any]:
if self.num_agents == 0:
agent_id = 0
return [
o
for o in self.last_event.events[agent_id].metadata["objects"]
if o["objectId"] == object_id
][0]
class AI2ThorEnvironmentWithGraph(AI2ThorEnvironment):
_cached_graphs: Dict[str, nx.DiGraph] = {}
def __init__(
self,
docker_enabled: bool = False,
x_display: str = None,
local_thor_build: str = None,
time_scale: float = 1.0,
visibility_distance: float = constants.VISIBILITY_DISTANCE,
fov: float = constants.FOV,
restrict_to_initially_reachable_points: bool = False,
num_agents: int = 1,
visible_agents: bool = True,
render_depth_image: bool = False,
override_graph: Union[
nx.classes.digraph.DiGraph, nx.classes.graph.Graph
] = None,
**kwargs,
):
super(AI2ThorEnvironmentWithGraph, self).__init__(
docker_enabled=docker_enabled,
x_display=x_display,
local_thor_build=local_thor_build,
time_scale=time_scale,
visibility_distance=visibility_distance,
fov=fov,
restrict_to_initially_reachable_points=restrict_to_initially_reachable_points,
num_agents=num_agents,
visible_agents=visible_agents,
render_depth_image=render_depth_image,
**kwargs,
)
if override_graph:
self._cached_graphs[self.scene_name] = override_graph
def initially_reachable_points_with_rotations(self, horizon):
points_slim = self.initially_reachable_points
points = []
for r in [0, 90, 180, 270]:
for p in points_slim:
p = copy.copy(p)
p["rotation"] = r
p["horizon"] = horizon
points.append(p)
return points
def refresh_initially_reachable(self):
self._initially_reachable_points = self.currently_reachable_points
self._initially_reachable_points_set = None
if self.scene_name in self._cached_graphs:
g = self._cached_graphs[self.scene_name]
initially_reachable_keys_set = set(
self.get_key(p)
for p in self.initially_reachable_points_with_rotations(horizon=30)
)
for n in list(g.nodes()):
if n not in initially_reachable_keys_set:
g.remove_node(n)
for n in initially_reachable_keys_set:
if n not in g:
self._add_node_to_graph(g, n)
def update_graph_with_failed_action(self, failed, agent_id):
source_key = self.get_key(self.last_event.events[agent_id].metadata["agent"])
e_dict = self.graph[source_key]
to_remove_key = None
for t_key in self.graph[source_key]:
if e_dict[t_key]["action"] == failed:
to_remove_key = t_key
break
if to_remove_key is not None:
self.graph.remove_edge(source_key, to_remove_key)
def _add_from_to_edge(self, g, s, t):
def ae(x, y):
return abs(x - y) < 0.001
s_x, s_z, s_rot, s_hor = s
t_x, t_z, t_rot, t_hor = t
dist = round(math.sqrt((s_x - t_x) ** 2 + (s_z - t_z) ** 2), 5)
angle_dist = abs(s_rot - t_rot) % 360
if dist == 0 and angle_dist == 90:
if (s_rot + 90) % 360 == s_rot % 360:
action = "RotateRight"
else:
action = "RotateLeft"
g.add_edge(s, t, action=action)
elif dist == 0.25 and s_rot == t_rot:
if (
(s_rot == 0 and ae(t_z - s_z, 0.25))
or (s_rot == 90 and ae(t_x - s_x, 0.25))
or (s_rot == 180 and ae(t_z - s_z, -0.25))
or (s_rot == 270 and ae(t_x - s_x, -0.25))
):
g.add_edge(s, t, action="MoveAhead")
def _add_node_to_graph(self, graph: nx.DiGraph, s: Tuple[float, float, int, int]):
if s in graph:
return
existing_nodes = list(graph.nodes())
graph.add_node(s)
for t in existing_nodes:
self._add_from_to_edge(graph, s, t)
self._add_from_to_edge(graph, t, s)
@property
def graph(self):
if self.scene_name not in self._cached_graphs:
g = nx.DiGraph()
points = self.initially_reachable_points_with_rotations(horizon=30)
for p in points:
self._add_node_to_graph(g, self.get_key(p))
self._cached_graphs[self.scene_name] = g
return self._cached_graphs[self.scene_name]
@graph.setter
def graph(self, g):
self._cached_graphs[self.scene_name] = g
def location_for_key(self, key, y_value=0.0):
x, z, rot, hor = key
loc = dict(x=x, y=y_value, z=z, rotation=rot, horizon=hor)
return loc
def get_key(self, input) -> Tuple[float, float, int, int]:
if "x" in input:
x = input["x"]
z = input["z"]
rot = input["rotation"]
hor = input["horizon"]
else:
x = input["position"]["x"]
z = input["position"]["z"]
rot = input["rotation"]["y"]
hor = input["cameraHorizon"]
return (
round(x, 2),
round(z, 2),
round_to_factor(rot, 90),
round_to_factor(hor, 30),
)
def is_neighbor_and_facing(self, p, possible_neighbor) -> bool:
def ae(x, y):
return abs(x - y) < 0.001
p0 = p
p1 = possible_neighbor
return (
(ae(p1["x"] - p0["x"], 0.25) and ae(p1["rotation"], 270))
or (ae(p1["x"] - p0["x"], -0.25) and ae(p1["rotation"], 90))
or (ae(p1["z"] - p0["z"], 0.25) and ae(p1["rotation"], 180))
or (ae(p1["z"] - p0["z"], -0.25) and ae(p1["rotation"], 0))
)
def _check_contains_key(self, key: Tuple[float, float, int, int], add_if_not=True):
if key not in self.graph:
warnings.warn(
"{} was not in the graph for scene {}.".format(key, self.scene_name)
)
self._add_node_to_graph(self.graph, key)
def shortest_state_path(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
path = nx.shortest_path(self.graph, source_state_key, goal_state_key)
return path
def shortest_path_next_state(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
if source_state_key == goal_state_key:
raise Exception("called next state on the same source and goal state")
state_path = self.shortest_state_path(source_state_key, goal_state_key)
return state_path[1]
def shortest_path_next_action(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
# FIXME: Make this generic for any action_space, currently hardcoded to "MoveAhead", "RotateRight", "RotateLeft", "LookUp", "Lookdown"
next_state_key = self.shortest_path_next_state(source_state_key, goal_state_key)
if not self.graph.has_edge(source_state_key, next_state_key):
print(
"source_state_key: "
+ str(source_state_key)
+ "\ngoal_state_key: "
+ str(goal_state_key)
+ "\nnext_state_key: "
+ str(next_state_key)
)
raise Exception(
"calculated next state is not reachable from source state, check!"
)
source_loc = self.location_for_key(source_state_key)
next_loc = self.location_for_key(next_state_key)
diff = defaultdict(lambda: None)
# Not so clean way to check that strictly one of x,z,rotation or horizon changes.
diff_detected = False
for key in source_loc.keys():
if source_loc[key] != next_loc[key]:
if diff_detected:
raise Exception(
"More than one basic action required to move to next node state, check!"
)
diff[key] = next_loc[key] - source_loc[key]
diff_detected = key
if diff_detected == "x" or diff_detected == "z":
return "MoveAhead"
elif diff_detected == "rotation":
if (source_loc["rotation"] + 90) % 360 == next_loc["rotation"]:
return "RotateRight"
elif (source_loc["rotation"] - 90) % 360 == next_loc["rotation"]:
return "RotateLeft"
else:
raise Exception("Cannot reach next state in one rotate action")
elif diff_detected == "horizon":
source_horizon = round(source_loc["horizon"] / 30) * 30.0
next_horizon = round(next_loc["horizon"] / 30) * 30.0
if source_horizon + 30 == next_horizon:
return "LookDown"
elif source_horizon - 30 == next_horizon:
return "LookUp"
else:
raise Exception("Cannot reach next state in one look up/down action")
else:
raise Exception("no change in x, z, rotation or camera, check!")
def shortest_path_length(self, source_state_key, goal_state_key):
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
try:
return nx.shortest_path_length(self.graph, source_state_key, goal_state_key)
except nx.NetworkXNoPath as _:
return float("inf")
| cordial-sync-master | rl_ai2thor/ai2thor_environment.py |
from abc import abstractmethod, ABC
from typing import Dict, Any, Optional, Sequence, Tuple, List
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironment
from rl_base import Episode
from rl_base.episode import MultiAgentEpisode
class AI2ThorEpisode(Episode[AI2ThorEnvironment]):
def __init__(
self,
env: AI2ThorEnvironment,
task_data: Dict[str, Any],
max_steps: int,
**kwargs,
) -> None:
super(AI2ThorEpisode, self).__init__(
env=env, task_data=task_data, max_steps=max_steps, **kwargs
)
self._last_action = None
self._last_action_success = None
def last_action(self):
return self._last_action
def last_action_success(self):
return self._last_action_success
def step(self, action_as_int: int) -> Dict[str, Any]:
step_result = super(AI2ThorEpisode, self).step(action_as_int=action_as_int)
self._last_action = action_as_int
self._last_action_success = self.environment.last_event.metadata[
"lastActionSuccess"
]
step_result["action"] = self._last_action
step_result["action_success"] = self._last_action_success
return step_result
def state_for_agent(self):
state = {
"frame": self.environment.current_frame,
"last_action": self._last_action,
"last_action_success": self._last_action_success,
}
return state
@abstractmethod
def _step(self, action_as_int: int) -> Dict[str, Any]:
raise NotImplementedError()
class MultiAgentAI2ThorEpisode(MultiAgentEpisode[AI2ThorEnvironment], ABC):
def __init__(
self,
env: AI2ThorEnvironment,
task_data: Dict[str, Any],
max_steps: int,
**kwargs,
) -> None:
super(MultiAgentAI2ThorEpisode, self).__init__(
env=env, task_data=task_data, max_steps=max_steps, **kwargs
)
self._last_actions: Optional[Sequence[int]] = None
self._last_actions_success: Optional[Sequence[bool]] = None
self.include_depth_frame = kwargs.get("include_depth_frame", False)
def last_actions(self):
return self._last_actions
def last_actions_success(self):
return self._last_actions_success
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
assert not self.is_paused() and not self.is_complete()
step_results = []
before_info = (
None
if self.before_step_function is None
else self.before_step_function(episode=self)
)
for agent_id in range(self._env.num_agents):
self._increment_num_steps_taken_in_episode()
step_result = self._step(actions_as_ints[agent_id], agent_id=agent_id)
step_result["action"] = actions_as_ints[agent_id]
step_result["action_success"] = self.environment.last_event.metadata[
"lastActionSuccess"
]
step_results.append(step_result)
if self.after_step_function is not None:
self.after_step_function(
step_results=step_results, before_info=before_info, episode=self
)
return step_results
def states_for_agents(self):
states = []
for agent_id in range(self.environment.num_agents):
e = self.environment.last_event.events[agent_id]
last_action = (
None if self._last_actions is None else self._last_actions[agent_id]
)
last_action_success = (
None
if self._last_actions_success is None
else self._last_actions_success[agent_id]
)
states.append(
{
"frame": e.frame,
"last_action": last_action,
"last_action_success": last_action_success,
}
)
if self.include_depth_frame:
states[-1]["depth_frame"] = e.depth_frame
return states
| cordial-sync-master | rl_ai2thor/ai2thor_episodes.py |
import copy
import itertools
import math
# noinspection PyUnresolvedReferences
import random
import re
import warnings
from typing import List, Dict, Optional, Any, Set, Tuple, Union
import ai2thor.server
import cv2
import numpy as np
from ai2thor.server import Event, MultiAgentEvent
from scipy.ndimage.measurements import label
import constants
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironment
MOVE_MAP = {
0: dict(row=-1, col=0),
90: dict(row=0, col=1),
180: dict(row=1, col=0),
270: dict(row=0, col=-1),
}
SMALL_TELEVISION_TEMPLATE_STRING = """
0 0 2 0 0
0 2 1 2 0
0 2 1 1 2
0 2 1 2 0
0 0 2 0 0
"""
class GridWorldController(object):
def __init__(
self,
agent_initially_reachable_pos: List[Dict[str, float]],
rotation_to_lifted_object_reachable_pos: Dict[int, List[Dict[str, float]]],
lifted_object_id: str,
lifted_object_type: str,
object_template_string: str,
scene_name,
min_steps_between_agents: int = 1,
grid_size=0.25,
remove_unconnected_positions=False,
):
# Initially reachble pos and mask don't change over the course of episodes
self.agent_initially_reachable_pos_tuple = tuple(agent_initially_reachable_pos)
self.agent_initially_reachable_positions_mask = None
self.rotation_to_lifted_object_reachable_pos = {
int(k): rotation_to_lifted_object_reachable_pos[k]
for k in rotation_to_lifted_object_reachable_pos
}
self.grid_size = grid_size
self.remove_unconnected_positions = remove_unconnected_positions
self.lifted_object_id = lifted_object_id
self.lifted_object_type = lifted_object_type
self.lifted_object: Optional[TrackedObject] = None
self.lifted_object_template = None
self.scene_name = scene_name
self.agents: List[AgentObject] = []
self.agent_count = 0
self.lifted_object_template = self.parse_template_to_mask(
object_template_string
)
self.min_steps_between_agents = min_steps_between_agents
# Run only once, in the initialization
self._build_grid_world(
padding_units=max(3, *self.lifted_object_template.shape)
if not remove_unconnected_positions
else max(
2,
(self.lifted_object_template == 1).sum(1).max() // 2,
(self.lifted_object_template == 1).sum(0).max() // 2,
)
)
self.last_event = None
self.steps_taken = 0
self.tracked_objects: Dict[str, TrackedObject] = dict()
self._error_message = ""
def start(self):
pass
def reset(self, scene_name):
assert self.scene_name is None or scene_name == self.scene_name
self.scene_name = scene_name
self.last_event = None
self.agent_count = 1
self.agents = []
self.steps_taken = 0
self._error_message = ""
def parse_template_to_mask(self, template):
tv_tmpl = []
for line in template.strip().split("\n"):
row = map(lambda x: int(x.strip()), line.split())
tv_tmpl.append(list(row))
return np.array(tv_tmpl, dtype=np.uint8)
def empty_mask(self):
return np.zeros((self._nrows, self._ncols), dtype=np.bool)
def _build_grid_world(self, padding_units):
# Initializes a lot of the basic chasis.
# Doesn't set the location for agents or lifted object or target
self._min_x = 2 ** 32
self._max_x = -1 * 2 ** 32
self._min_z = 2 ** 32
self._max_z = -1 * 2 ** 32
for point in self.agent_initially_reachable_pos_tuple:
if point["x"] < self._min_x:
self._min_x = point["x"]
if point["z"] < self._min_z:
self._min_z = point["z"]
if point["z"] > self._max_z:
self._max_z = point["z"]
if point["x"] > self._max_x:
self._max_x = point["x"]
for point in sum(self.rotation_to_lifted_object_reachable_pos.values(), []):
if point["x"] < self._min_x:
self._min_x = point["x"]
if point["z"] < self._min_z:
self._min_z = point["z"]
if point["z"] > self._max_z:
self._max_z = point["z"]
if point["x"] > self._max_x:
self._max_x = point["x"]
# adding buffer of 6 (1.0 / grid_size) points to allow for the origin
# of the object to be at the edge
self._max_z += padding_units * self.grid_size
self._max_x += padding_units * self.grid_size
self._min_z -= padding_units * self.grid_size
self._min_x -= padding_units * self.grid_size
self._ncols = int((self._max_x - self._min_x) / self.grid_size) + 1
self._nrows = int((self._max_z - self._min_z) / self.grid_size) + 1
self.agent_reachable_positions_mask = self.empty_mask()
self.rotation_to_lifted_object_reachable_position_masks = {
rot: self.empty_mask()
for rot in self.rotation_to_lifted_object_reachable_pos
}
for rot in self.rotation_to_lifted_object_reachable_pos:
self._build_points_mask(
self.rotation_to_lifted_object_reachable_pos[rot],
self.rotation_to_lifted_object_reachable_position_masks[rot],
)
self._build_points_mask(
self.agent_initially_reachable_pos_tuple,
self.agent_reachable_positions_mask,
)
if self.remove_unconnected_positions:
flat_masks = np.stack(
self.agent_reachable_positions_mask
+ list(
self.rotation_to_lifted_object_reachable_position_masks.values()
),
axis=0,
).any(0)
labels, ncomponents = label(
flat_masks, np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
)
if ncomponents > 1:
reachable_point = np.argwhere(self.agent_reachable_positions_mask)[0]
good_label = labels[tuple(reachable_point)]
connected_mask = labels == good_label
row_span = np.argwhere(connected_mask.any(axis=1))
row_min, row_max = (
max(row_span.min() - padding_units, 0),
min(row_span.max() + padding_units, connected_mask.shape[0]),
)
row_slice = slice(row_min, row_max + 1)
col_span = np.argwhere(connected_mask.any(axis=0))
col_min, col_max = (
max(col_span.min() - padding_units, 0),
min(col_span.max() + padding_units, connected_mask.shape[1]),
)
col_slice = slice(col_min, col_max + 1)
for (k, v) in list(
self.rotation_to_lifted_object_reachable_position_masks.items()
):
self.rotation_to_lifted_object_reachable_position_masks[
k
] = np.logical_and(v, connected_mask)[row_slice, col_slice]
self.agent_reachable_positions_mask = self.agent_reachable_positions_mask[
row_slice, col_slice
]
new_xz_mins = self.rowcol_to_xz((row_max, col_min))
new_xz_maxes = self.rowcol_to_xz((row_min, col_max))
self._max_x, self._max_z = tuple(float(w) for w in new_xz_maxes)
self._min_x, self._min_z = tuple(float(w) for w in new_xz_mins)
(self._nrows, self._ncols,) = self.agent_reachable_positions_mask.shape
self.agent_initially_reachable_positions_mask = copy.deepcopy(
self.agent_reachable_positions_mask
)
@property
def min_x(self):
return self._min_x
@property
def max_x(self):
return self._max_x
@property
def min_z(self):
return self._min_z
@property
def max_z(self):
return self._max_z
def rowcol_to_xz(self, rowcol):
row, col = rowcol
x = (col * self.grid_size) + self._min_x
z = (-row * self.grid_size) + self._max_z
return x, z
def xz_to_rowcol(self, xz):
x, z = xz
row = round((self._max_z - z) / self.grid_size)
col = round((x - self._min_x) / self.grid_size)
return int(row), int(col)
def _build_points_mask(self, points, mask):
for point in points:
row, col = self.xz_to_rowcol((point["x"], point["z"]))
mask[row, col] = True
def viz_mask(self, mask):
viz_scale = 20
viz_image = (
np.ones(
(self._nrows * viz_scale, self._ncols * viz_scale, 3), dtype=np.uint8
)
* 255
)
for point in np.argwhere(mask):
cv2.circle(
viz_image,
(point[1] * viz_scale, point[0] * viz_scale),
4,
(255, 0, 0),
-1,
)
cv2.imshow("aoeu", viz_image)
cv2.waitKey(2000)
def get_boundary_of_mask(self, mask):
mask_points = np.argwhere(mask)
if len(mask_points) <= 1:
raise Exception("Too few valid mask points")
leftmost_ind = np.argmin(mask_points[:, 1])
leftmost_point = mask_points[leftmost_ind, :]
path = [leftmost_point]
up = np.array([-1, 0])
right = np.array([0, 1])
down = np.array([1, 0])
left = np.array([0, -1])
dirs = [up, right, down, left]
def inbounds(p):
return (
0 <= p[0] < mask.shape[0]
and 0 <= p[1] < mask.shape[1]
and mask[p[0], p[1]]
)
dir_ind = 0
while dir_ind < 6:
move_success = False
dir = dirs[dir_ind % 4]
p = path[-1] + dir
if inbounds(p):
move_success = True
path.append(p)
if not move_success:
p = path[-1] + dirs[(dir_ind + 1) % 4]
if inbounds(p):
move_success = True
path.append(p)
if not move_success:
dir_ind += 1
if len(path) > 1 and np.all(path[0] == path[-1]):
break
if dir_ind == 6:
raise Exception("Error occurred!")
return np.array(path)
def draw_path_from_row_cols(
self,
points,
viz_image,
viz_scale,
color,
thickness,
expand_from_center: Optional[np.ndarray] = None,
):
if expand_from_center is not None:
points = points - 0.5 * (points < expand_from_center.reshape((1, -1)))
points = points + 0.5 * (points > expand_from_center.reshape((1, -1)))
for ind in range(len(points) - 1):
p0 = points[ind]
p1 = points[ind + 1]
cv2.line(
viz_image,
(int(round(p0[1] * viz_scale)), int(round(p0[0] * viz_scale))),
(int(round(p1[1] * viz_scale)), int(round(p1[0] * viz_scale))),
color,
thickness,
)
def viz_world(self, viz_scale=20, do_wait=True, wait_key=0, array_only=False):
viz_image = (
np.ones(
(self._nrows * viz_scale, self._ncols * viz_scale, 3), dtype=np.uint8
)
* 255
)
for p in np.argwhere(self.agent_reachable_positions_mask):
tl = (p[1] * viz_scale - viz_scale // 4, p[0] * viz_scale - viz_scale // 4)
br = (p[1] * viz_scale + viz_scale // 4, p[0] * viz_scale + viz_scale // 4)
cv2.rectangle(viz_image, tl, br, (210, 210, 210), -1)
masks = [
self.rotation_to_lifted_object_reachable_position_masks[rot]
for rot in sorted(
list(self.rotation_to_lifted_object_reachable_position_masks.keys())
)
]
for p in np.argwhere((np.stack(masks, axis=0)).any(0) != 0):
color = np.array([0, 0, 0])
for i, mask in enumerate(masks):
if mask[p[0], p[1]] and i < 3:
color[i] = 255
elif mask[p[0], p[1]]:
color = color // 2
offset = viz_scale // 10 + viz_scale // 4
tl = (p[1] * viz_scale - offset, p[0] * viz_scale - offset)
br = (p[1] * viz_scale + offset, p[0] * viz_scale + offset)
cv2.rectangle(
viz_image,
tl,
br,
tuple(int(i) for i in color),
thickness=viz_scale // 10,
)
for object_id, tracked_object in self.tracked_objects.items():
if object_id == self.lifted_object_id:
continue
else:
if hasattr(tracked_object, "object_mask"):
object_mask = tracked_object.object_mask
row = tracked_object.row
col = tracked_object.col
row_rad, col_rad = (
object_mask.shape[0] // 2,
object_mask.shape[1] // 2,
)
obj_mask = self.empty_mask()
obj_mask[
(row - row_rad) : (row + row_rad + 1),
(col - col_rad) : (col + col_rad + 1),
] = object_mask
boundary_points = self.get_boundary_of_mask(obj_mask)
self.draw_path_from_row_cols(
points=boundary_points,
viz_image=viz_image,
viz_scale=viz_scale,
color=(255, 165, 0),
thickness=max(viz_scale // 10, 1),
expand_from_center=np.array([row, col]),
)
if self.lifted_object is not None:
self.draw_path_from_row_cols(
points=self.get_boundary_of_mask(self.current_lifted_object_mask()),
viz_image=viz_image,
viz_scale=viz_scale,
color=(255, 0, 255),
thickness=2,
expand_from_center=np.array(
[self.lifted_object.row, self.lifted_object.col]
),
)
self.draw_path_from_row_cols(
points=self.get_boundary_of_mask(
self.current_near_lifted_object_mask()
),
viz_image=viz_image,
viz_scale=viz_scale,
color=(180, 0, 180, 180),
thickness=max(viz_scale // 10, 1),
expand_from_center=np.array(
[self.lifted_object.row, self.lifted_object.col]
),
)
agent_colors = [ # Red + blue + tableau 10 colors
(255, 0, 0),
(0, 0, 255),
(31, 119, 180),
(255, 127, 14),
(44, 160, 44),
(214, 39, 40),
(148, 103, 189),
(140, 86, 75),
(227, 119, 194),
(127, 127, 127),
(188, 189, 34),
(23, 190, 207),
]
for i, a in enumerate(self.agents):
forward_dir = MOVE_MAP[a.rot]
right_dir = MOVE_MAP[(a.rot + 90) % 360]
cv2.drawContours(
viz_image,
[
np.array(
[
(
a.col * viz_scale
+ forward_dir["col"] * (viz_scale // 3),
a.row * viz_scale
+ forward_dir["row"] * (viz_scale // 3),
),
(
a.col * viz_scale
- (
right_dir["col"] * viz_scale // 4
+ forward_dir["col"] * viz_scale // 3
),
a.row * viz_scale
- (
right_dir["row"] * viz_scale // 4
+ forward_dir["row"] * viz_scale // 3
),
),
(
a.col * viz_scale
- (
-right_dir["col"] * viz_scale // 4
+ forward_dir["col"] * viz_scale // 3
),
a.row * viz_scale
- (
-right_dir["row"] * viz_scale // 4
+ forward_dir["row"] * viz_scale // 3
),
),
]
)
],
0,
agent_colors[i],
-1,
)
if not array_only:
cv2.imshow("aoeu", viz_image)
if do_wait:
return str(chr(cv2.waitKey(wait_key) & 255))
else:
cv2.waitKey(100)
return viz_image
def viz_ego_agent_views(
self,
viz_scale=20,
view_shape=(15, 15),
do_wait=True,
wait_key=0,
array_only=False,
):
world = self.viz_world(viz_scale=viz_scale, array_only=True)
assert view_shape[0] == view_shape[1]
pad = viz_scale * view_shape[0]
world = np.pad(
world,
((pad, pad), (pad, pad), (0, 0)),
mode="constant",
constant_values=255,
)
def to_pix(a):
return int(round(a * viz_scale + viz_scale * view_shape[0]))
forward_width, side_width = view_shape
ego_views = []
for agent in self.agents:
row, col, rot = agent.row, agent.col, int(agent.rot)
if rot == 0:
row -= 1 / 2
row_pix_slice = slice(to_pix(row - forward_width), to_pix(row) + 1)
col_pix_slice = slice(
to_pix(col - side_width / 2), to_pix(col + side_width / 2) + 1
)
elif rot == 180:
row += 1 / 2
row_pix_slice = slice(to_pix(row), to_pix(row + forward_width) + 1)
col_pix_slice = slice(
to_pix(col - side_width / 2), to_pix(col + side_width / 2) + 1
)
elif rot == 90:
col += 1 / 2
row_pix_slice = slice(
to_pix(row - side_width / 2), to_pix(row + side_width / 2) + 1
)
col_pix_slice = slice(to_pix(col), to_pix(col + forward_width) + 1)
elif rot == 270:
col -= 1 / 2
row_pix_slice = slice(
to_pix(row - side_width / 2), to_pix(row + side_width / 2) + 1
)
col_pix_slice = slice(to_pix(col - forward_width), to_pix(col) + 1)
else:
raise NotImplementedError
ego_views.append(np.rot90(world[row_pix_slice, col_pix_slice], k=rot // 90))
if not array_only:
cv2.imshow("aoeu", np.concatenate(ego_views, axis=0))
if do_wait:
return str(chr(cv2.waitKey(wait_key) & 255))
else:
cv2.waitKey(100)
return ego_views
def Initialize(self, action):
self.agent_count = action["agentCount"]
for i in range(self.agent_count):
self.agents.append(
AgentObject(
self,
len(self.agents),
min_steps_between_agents=self.min_steps_between_agents,
)
)
return (True, None)
def GetReachablePositions(self, action):
# We don't need this for now
# If we have to use this:
# The usage concurs to "currently reachable" agent positions
# Initially reachable points can be accessed by the object variable
# current_reachable_pos: List[Dict[str, float]] = []
# for p in np.argwhere(self.agent_reachable_positions_mask):
# x, z = self._rowcol_to_xz((p[0], p[1]))
# current_reachable_pos.append(
# {
# "x": x,
# "z": z
# }
# )
# return (True, current_reachable_pos)
raise NotImplementedError
def GetCurrentReachablePositionsSet(self, action):
# More efficient than GetReachablePositions as it directly gives
# sets instead of a list of dict and then convert to set
# The usage concurs to "currently reachable" agent positions
# Initially reachable points can be accessed by the object variable
current_reachable_pos_set: Set[Tuple[float, float]] = set()
for p in np.argwhere(self.agent_reachable_positions_mask):
x, z = self.rowcol_to_xz((p[0], p[1]))
current_reachable_pos_set.add((x, z))
return (True, current_reachable_pos_set)
def TeleportFull(self, action):
agent = self.agents[action["agentId"]]
row, col = self.xz_to_rowcol((action["x"], action["z"]))
if action.get("makeReachable"):
self.agent_reachable_positions_mask[row, col] = True
if (not action.get("forceAction")) and (
not agent.is_valid_new_position(
new_row=row,
new_col=col,
additional_mask=self.current_near_lifted_object_mask()
if self.lifted_object is not None
else None,
allow_agent_intersection=False
if "allowAgentIntersection" not in action
else action["allowAgentIntersection"],
)
):
return False, None
agent.row, agent.col = row, col
agent.rot = action["rotation"]["y"]
return True, None
def TeleportObject(self, action):
# Updates and sets the lifted object position and agents' location too
objectId = action["objectId"]
obj = self.tracked_objects[objectId]
to_row, to_col = self.xz_to_rowcol((action["x"], action["z"]))
to_rot = 90 * (round(action["rotation"]["y"] / 90) % 4)
old_rot = obj.rot
obj.rot = to_rot
# TODO: Assertion is only necessary as objects do not currently
# store their own masks
assert objectId == self.lifted_object_id
if self._move_object(
obj=obj,
delta={"row": to_row - obj.row, "col": to_col - obj.col},
valid_mask=self.rotation_to_lifted_object_reachable_position_masks[
int(self.lifted_object.rot)
],
skip_valid_check=action["forceAction"]
if "forceAction" in action
else False,
):
return True, None
else:
obj.rot = old_rot
return False, None
def CreateLiftedFurnitureAtLocation(self, action):
# Updates and sets the lifted object position and agents' location too
assert action["objectType"] == self.lifted_object_type
rowcol = self.xz_to_rowcol((action["x"], action["z"]))
rot = 90 * (round(action["rotation"]["y"] / 90) % 4)
valid_position = self.rotation_to_lifted_object_reachable_position_masks[rot][
rowcol[0], rowcol[1]
]
if not valid_position:
if action.get("forceAction"):
self.rotation_to_lifted_object_reachable_position_masks[rot][
rowcol[0], rowcol[1]
] = True
else:
return False, None
self.lifted_object = TrackedObject(
self, self.lifted_object_id, self.lifted_object_type
)
row, col = rowcol
self.lifted_object.row = row
self.lifted_object.col = col
self.lifted_object.rot = rot
self.tracked_objects[self.lifted_object_id] = self.lifted_object
if not all(
agent.is_valid_new_position(
new_row=agent.row,
new_col=agent.col,
additional_mask=self.current_near_lifted_object_mask(),
allow_agent_intersection=True,
)
for agent in self.agents
):
self.lifted_object = None
del self.tracked_objects[self.lifted_object_id]
return False, None
return (True, self.lifted_object_id)
def RandomlyCreateLiftedFurniture(self, action):
# Updates and sets the lifted object position and agents' location too
assert action["objectType"] == self.lifted_object_type
# pick random reachable spot in object_reachable_pos
# random.seed(0)
for i in range(10):
agent_points = []
point = random.choice(
# np.argwhere(self.rotation_to_lifted_object_reachable_position_masks[rotation])
np.argwhere(self.agent_reachable_positions_mask)
)
possible_rotations = [
rot
for rot in self.rotation_to_lifted_object_reachable_position_masks
if self.rotation_to_lifted_object_reachable_position_masks[rot][
point[0], point[1]
]
]
if len(possible_rotations) == 0:
continue
rotation = random.choice(possible_rotations)
self.lifted_object = TrackedObject(
self, self.lifted_object_id, self.lifted_object_type
)
row, col = point
self.lifted_object.row = row
self.lifted_object.col = col
self.lifted_object.rot = rotation
self.tracked_objects[self.lifted_object_id] = self.lifted_object
current_state = self.empty_mask()
object_mask = self.lifted_object_template == 1
interactable_positions = self.lifted_object_template == 2
rotations = int((360 - rotation) / 90)
if rotations < 4:
object_mask = np.rot90(object_mask, k=rotations)
interactable_positions = np.rot90(interactable_positions, k=rotations)
mask_buffer_row, mask_buffer_col = (
object_mask.shape[0] // 2,
object_mask.shape[1] // 2,
)
rlow, rhigh = row - mask_buffer_row, row + mask_buffer_row + 1
clow, chigh = col - mask_buffer_col, col + mask_buffer_col + 1
rlowdelta, rhighdelta = (
max(-rlow, 0),
max(rhigh - current_state.shape[0], 0),
)
clowdelta, chighdelta = (
max(-clow, 0),
max(chigh - current_state.shape[1], 0),
)
current_state[
rlow + rlowdelta : rhigh - rhighdelta,
clow + clowdelta : chigh - chighdelta,
] = interactable_positions[
rlowdelta : interactable_positions.shape[0] - rhighdelta,
clowdelta : interactable_positions.shape[1] - chighdelta,
]
current_state &= self.agent_reachable_positions_mask
agent_points = []
if self.min_steps_between_agents == 1:
agent_points = random.sample(
list(np.argwhere(current_state)), k=self.agent_count
)
# XXX need to retry if we can't put the agent in a location
if len(agent_points) != self.agent_count:
continue
else:
rows_and_cols = np.argwhere(current_state)
if len(rows_and_cols) < self.agent_count:
continue
np.random.shuffle(rows_and_cols)
for count, items in enumerate(
itertools.combinations(
list(range(rows_and_cols.shape[0])), self.agent_count
)
):
if count > 100:
break
subset: np.ndarray = rows_and_cols[items, :]
diag = np.expand_dims(
np.diag([self.min_steps_between_agents] * self.agent_count), -1
)
if (
diag
+ np.abs(subset.reshape(-1, 1, 2) - subset.reshape(1, -1, 2))
).max(-1).min() >= self.min_steps_between_agents:
np.random.shuffle(subset)
agent_points = subset
break
if len(agent_points) != self.agent_count:
break
if len(agent_points) != self.agent_count:
return (False, None)
for i, agent in enumerate(self.agents):
agent.row = agent_points[i][0]
agent.col = agent_points[i][1]
if random.random() < 0.5:
if agent.row > self.lifted_object.row:
agent.rot = 0
elif agent.row < self.lifted_object.row:
agent.rot = 180
else:
agent.rot = random.choice([0, 180])
else:
if agent.col < self.lifted_object.col:
agent.rot = 90
elif agent.col > self.lifted_object.col:
agent.rot = 270
else:
agent.rot = random.choice([90, 270])
return (True, self.lifted_object_id)
def GetReachablePositionsForObject(self, action):
assert action["objectId"] == self.lifted_object_id
return (True, self.rotation_to_lifted_object_reachable_pos)
def MoveAhead(self, action):
return self._move_agent(action, 0)
def _move_object(self, obj, delta, valid_mask, skip_valid_check=False):
if skip_valid_check or obj.is_valid_new_position(
obj.row + delta["row"], obj.col + delta["col"], valid_mask
):
obj.row += delta["row"]
obj.col += delta["col"]
return True
else:
return False
def _move_agents_with_lifted(self, action, r):
assert action["objectId"] == self.lifted_object_id
agent = self.agents[action["agentId"]]
delta = MOVE_MAP[int((agent.rot + r) % 360)]
obj = self.lifted_object
next_obj_z = obj.row + delta["row"]
next_obj_x = obj.col + delta["col"]
success = True
if obj.is_valid_new_position(
next_obj_z,
next_obj_x,
self.rotation_to_lifted_object_reachable_position_masks[
int(self.lifted_object.rot)
],
):
imask = self.current_near_lifted_object_mask(row=next_obj_z, col=next_obj_x)
for a in self.agents:
if not a.is_valid_new_position(
a.row + delta["row"],
a.col + delta["col"],
imask,
allow_agent_intersection=True,
):
success = False
break
else:
success = False
if success:
assert self._move_object(
self.lifted_object,
delta,
self.rotation_to_lifted_object_reachable_position_masks[
int(self.lifted_object.rot)
],
skip_valid_check=True,
)
for a in self.agents:
assert self._move_object(
a,
delta,
self.current_near_lifted_object_mask(),
skip_valid_check=True,
)
return (success, None)
def _move_lifted(self, action, r):
assert action["objectId"] == self.lifted_object_id
agent = self.agents[action["agentId"]]
delta = MOVE_MAP[int((agent.rot + r) % 360)]
obj = self.lifted_object
next_obj_z = obj.row + delta["row"]
next_obj_x = obj.col + delta["col"]
success = True
if obj.is_valid_new_position(
next_obj_z,
next_obj_x,
self.rotation_to_lifted_object_reachable_position_masks[
int(self.lifted_object.rot)
],
):
imask = self.current_near_lifted_object_mask(row=next_obj_z, col=next_obj_x)
for a in self.agents:
if not imask[a.row, a.col]:
success = False
else:
success = False
if success:
self._move_object(
self.lifted_object,
delta,
self.rotation_to_lifted_object_reachable_position_masks[
int(self.lifted_object.rot)
],
)
return (success, None)
def _move_agent(self, action, r):
agent = self.agents[action["agentId"]]
delta = MOVE_MAP[int((agent.rot + r) % 360)]
success = self._move_object(
agent, delta, self.current_near_lifted_object_mask()
)
return (success, None)
def MoveLeft(self, action):
return self._move_agent(action, -90)
def MoveRight(self, action):
return self._move_agent(action, 90)
def MoveBack(self, action):
return self._move_agent(action, 180)
def RotateRight(self, action):
agent = self.agents[action["agentId"]]
agent.rot = (agent.rot + 90) % 360
return (True, None)
def RotateLeft(self, action):
agent = self.agents[action["agentId"]]
agent.rot = (agent.rot - 90) % 360
return (True, None)
def current_near_lifted_object_mask(self, row=None, col=None, rotation=None):
if rotation is None:
rotation = self.lifted_object.rot
rotations = int((360 - rotation) / 90)
interactable_mask = self.lifted_object_template == 2
interactable_mask = np.rot90(interactable_mask, k=rotations)
if col is None or row is None:
row = self.lifted_object.row
col = self.lifted_object.col
mask_buffer_row, mask_buffer_col = (
interactable_mask.shape[0] // 2,
interactable_mask.shape[1] // 2,
)
current_state = self.empty_mask()
rlow, rhigh = row - mask_buffer_row, row + mask_buffer_row + 1
clow, chigh = col - mask_buffer_col, col + mask_buffer_col + 1
rlowdelta, rhighdelta = (
max(-rlow, 0),
max(rhigh - current_state.shape[0], 0),
)
clowdelta, chighdelta = max(-clow, 0), max(chigh - current_state.shape[1], 0)
current_state[
rlow + rlowdelta : rhigh - rhighdelta,
clow + clowdelta : chigh - chighdelta,
] = interactable_mask[
rlowdelta : interactable_mask.shape[0] - rhighdelta,
clowdelta : interactable_mask.shape[1] - chighdelta,
]
return current_state
def current_lifted_object_mask(self):
rotation = self.lifted_object.rot
rotations = int((360 - rotation) / 90)
object_mask = self.lifted_object_template == 1
object_mask = np.rot90(object_mask, k=rotations)
row = self.lifted_object.row
col = self.lifted_object.col
mask_buffer_row, mask_buffer_col = (
object_mask.shape[0] // 2,
object_mask.shape[1] // 2,
)
current_state = self.empty_mask()
rlow, rhigh = row - mask_buffer_row, row + mask_buffer_row + 1
clow, chigh = col - mask_buffer_col, col + mask_buffer_col + 1
rlowdelta, rhighdelta = (
max(-rlow, 0),
max(rhigh - current_state.shape[0], 0),
)
clowdelta, chighdelta = max(-clow, 0), max(chigh - current_state.shape[1], 0)
current_state[
rlow + rlowdelta : rhigh - rhighdelta,
clow + clowdelta : chigh - chighdelta,
] = object_mask[
rlowdelta : object_mask.shape[0] - rhighdelta,
clowdelta : object_mask.shape[1] - chighdelta,
]
return current_state
def _rotate_lifted(self, new_rotation):
if not self.rotation_to_lifted_object_reachable_position_masks[new_rotation][
self.lifted_object.row, self.lifted_object.col
]:
self._error_message = (
"Lifted object colliding with non-agent after rotation."
)
return False
imask = self.current_near_lifted_object_mask(rotation=new_rotation)
for a in self.agents:
if not imask[a.row, a.col]:
self._error_message = (
"Lifted object colliding with agent after rotation."
)
return False
self.lifted_object.rot = new_rotation
return True
def agent_inside_range(
self, agent_id, top_row, bottom_row, left_column, right_column
):
row = self.agents[agent_id].row
col = self.agents[agent_id].col
return top_row <= row <= bottom_row and left_column <= col <= right_column
def CreateAndPlaceObjectOnFloorAtLocation(self, action):
# Places object on the floor, but also updates the
# agent_reachable_positions_mask
object_mask = action["object_mask"]
object_type = action["objectType"]
force_action = False if "forceAction" not in action else action["forceAction"]
rot = 90 * (round(action["rotation"]["y"] / 90) % 4)
mask = np.rot90(object_mask, k=-(rot // 90))
row, col = self.xz_to_rowcol((action["x"], action["z"]))
row_rad, col_rad = mask.shape[0] // 2, mask.shape[1] // 2
reachable_subset = self.agent_reachable_positions_mask[
row - row_rad : row + row_rad + 1, col - col_rad : col + col_rad + 1
]
if force_action or (np.logical_and(reachable_subset, mask) == mask).all():
if (not force_action) and np.any(
[
self.agent_inside_range(
agent_id,
row - row_rad,
row + row_rad,
col - col_rad,
col + col_rad,
)
for agent_id in range(len(self.agents))
]
):
# TODO: THIS CURRENTLY ONLY WORKS FOR RECTANGULARLY SHAPED OBJECTS
return False, None
# update the agent_reachable_positions_mask
self.agent_reachable_positions_mask[
row - row_rad : row + row_rad + 1, col - col_rad : col + col_rad + 1
] &= np.logical_not(mask)
xz = self.rowcol_to_xz((row, col))
object_id = object_type + "|{}".format(len(self.tracked_objects) + 1)
floor_object = TrackedObject(self, object_id, object_type)
floor_object.row = row
floor_object.col = col
floor_object.rot = rot
floor_object.object_mask = mask
self.tracked_objects[object_id] = floor_object
return (
True,
{
"position": {"x": xz[0], "y": math.nan, "z": xz[1]},
"row": floor_object.row,
"col": floor_object.col,
"rotation": floor_object.rot,
"objectId": object_id,
},
)
return False, None
def RandomlyCreateAndPlaceObjectOnFloor(self, action):
# Places object on the floor, but also updates the
# agent_reachable_positions_mask
object_mask = action["object_mask"]
object_type = action["objectType"]
object_masks = [(k, np.rot90(object_mask, k=k)) for k in range(4)]
positions = np.argwhere(self.agent_reachable_positions_mask)
for i in np.random.permutation(positions.shape[0]):
row, col = positions[i]
random.shuffle(object_masks)
for k, mask in object_masks:
row_rad, col_rad = mask.shape[0] // 2, mask.shape[1] // 2
reachable_subset = self.agent_reachable_positions_mask[
row - row_rad : row + row_rad + 1, col - col_rad : col + col_rad + 1
]
if (np.logical_and(reachable_subset, mask) == mask).all():
if np.any(
[
self.agent_inside_range(
agent_id,
row - row_rad,
row + row_rad,
col - col_rad,
col + col_rad,
)
for agent_id in range(len(self.agents))
]
):
continue
# update the agent_reachable_positions_mask
self.agent_reachable_positions_mask[
row - row_rad : row + row_rad + 1,
col - col_rad : col + col_rad + 1,
] &= np.logical_not(mask)
xz = self.rowcol_to_xz((row, col))
object_id = object_type + "|{}".format(
len(self.tracked_objects) + 1
)
floor_object = TrackedObject(self, object_id, object_type)
floor_object.row = row
floor_object.col = col
floor_object.rot = 90 * k
floor_object.object_mask = mask
self.tracked_objects[object_id] = floor_object
return (
True,
{
"position": {"x": xz[0], "y": math.nan, "z": xz[1]},
"row": row,
"col": col,
"rotation": 90 * k,
"objectId": object_id,
},
)
return False, None
def RotateLiftedObjectLeft(self, action):
new_rotation = (self.lifted_object.rot - 90) % 360
return (self._rotate_lifted(new_rotation), None)
def RotateLiftedObjectRight(self, action):
new_rotation = (self.lifted_object.rot + 90) % 360
return (self._rotate_lifted(new_rotation), None)
def MoveAgentsRightWithObject(self, action):
return self._move_agents_with_lifted(action, 90)
def MoveAgentsAheadWithObject(self, action):
return self._move_agents_with_lifted(action, 0)
def MoveAgentsBackWithObject(self, action):
return self._move_agents_with_lifted(action, 180)
def MoveAgentsLeftWithObject(self, action):
return self._move_agents_with_lifted(action, -90)
def MoveLiftedObjectRight(self, action):
return self._move_lifted(action, 90)
def MoveLiftedObjectAhead(self, action):
return self._move_lifted(action, 0)
def MoveLiftedObjectBack(self, action):
return self._move_lifted(action, 180)
def MoveLiftedObjectLeft(self, action):
return self._move_lifted(action, -90)
def Pass(self, action):
return (True, None)
def step(self, action, raise_for_failure=False):
self.steps_taken += 1
# XXX should have invalid action
# print("running method %s" % action)
method = getattr(self, action["action"])
success, result = method(action)
events = []
for a in self.agents:
events.append(
Event(
self._generate_metadata(
a, self.lifted_object, result, action, success
)
)
)
self.last_event = MultiAgentEvent(
action.get("agentId") if "agentId" in action else 0, events
)
self.last_event.metadata["errorMessage"] = self._error_message
self._error_message = ""
return self.last_event
def _generate_metadata(
self,
agent: "AgentObject",
lifted_object: "GridObject",
result: Any,
action: str,
success: bool,
):
metadata = dict()
metadata["agent"] = dict(position=agent.position, rotation=agent.rotation)
metadata["objects"] = []
if len(self.tracked_objects) > 0:
for object_id, tracked_object in self.tracked_objects.items():
metadata["objects"].append(
dict(
position=tracked_object.position,
rotation=tracked_object.rotation,
objectType=tracked_object.object_type,
objectId=tracked_object.object_id,
)
)
metadata["actionReturn"] = result
metadata["lastAction"] = action["action"]
metadata["lastActionSuccess"] = success
metadata["sceneName"] = self.scene_name
metadata["screenHeight"] = 300
metadata["screenWidth"] = 300
metadata["colors"] = []
return metadata
class GridObject(object):
def __init__(self, controller):
self.controller = controller
self.col = 0
self.row = 0
self.rot = 0.0
@property
def position(self):
cx = (self.col * self.controller.grid_size) + self.controller.min_x
cz = (-self.row * self.controller.grid_size) + self.controller.max_z
return dict(x=cx, y=1.0, z=cz)
@property
def x(self):
return (self.col * self.controller.grid_size) + self.controller.min_x
@property
def z(self):
return (-self.row * self.controller.grid_size) + self.controller.max_z
@property
def rotation(self):
return dict(x=0.0, y=self.rot, z=0.0)
class AgentObject(GridObject):
def __init__(
self,
controller: GridWorldController,
agent_id,
min_steps_between_agents: int = 1,
):
super().__init__(controller)
self.agent_id = agent_id
self.min_steps_between_agents = min_steps_between_agents
def is_valid_new_position(
self, new_row, new_col, additional_mask, allow_agent_intersection=False
):
if additional_mask is not None:
additional_mask &= self.controller.agent_reachable_positions_mask
else:
additional_mask = copy.deepcopy(
self.controller.agent_reachable_positions_mask
)
# mark spots occupied by agents as False
if not allow_agent_intersection:
for a in self.controller.agents:
if a is not self:
d = self.min_steps_between_agents
additional_mask[
(a.row - d + 1) : (a.row + d), (a.col - d + 1) : (a.col + d)
] = False
return additional_mask[new_row, new_col]
class TrackedObject(GridObject):
def __init__(self, controller, object_id, object_type):
super().__init__(controller)
self.object_id = object_id
self.object_type = object_type
def is_valid_new_position(self, new_row, new_col, additional_mask):
assert additional_mask is not None
return additional_mask[new_row, new_col]
def run_demo(controller: GridWorldController):
key_to_action = {
"w": "MoveAhead",
"a": "MoveLeft",
"s": "MoveBack",
"d": "MoveRight",
"z": "RotateLeft",
"x": "RotateRight",
"i": "MoveAgentsAheadWithObject",
"j": "MoveAgentsLeftWithObject",
"k": "MoveAgentsBackWithObject",
"l": "MoveAgentsRightWithObject",
"m": "RotateLiftedObjectLeft",
",": "RotateLiftedObjectRight",
"t": "MoveLiftedObjectAhead",
"f": "MoveLiftedObjectLeft",
"g": "MoveLiftedObjectBack",
"h": "MoveLiftedObjectRight",
}
print("Controls:")
print("'q':\tQuit")
print("'0' ('1', '2', ...):\tChange to controlling agent 0.")
for k in "wasdzxijklm,tfgh":
print(
"'{}':\t{}".format(
k,
" ".join(
[
word.lower() if i != 0 else word
for i, word in enumerate(
re.split("([A-Z][^A-Z]*)", key_to_action[k])
)
if word != ""
]
),
)
)
controlling_agent_id = 0
trying_to_quit = False
while True:
c = controller.viz_world()
# controller.viz_ego_agent_views()
if c in ["0", "1", "2", "3", "4", "5", "6"]:
trying_to_quit = False
controlling_agent_id = int(c)
print("Switched to agent {}".format(c))
elif c == "q":
print("Are you sure you wish to exit the demo? (y/n)")
trying_to_quit = True
elif trying_to_quit and c == "y":
return
elif c in key_to_action:
trying_to_quit = False
controller.step(
{
"action": key_to_action[c],
"agentId": controlling_agent_id,
"objectId": "Television|1",
}
)
print(
"Taking action {}\nAction {}\n".format(
key_to_action[c],
"success"
if controller.last_event.metadata["lastActionSuccess"]
else "failure",
)
)
for agent_id, agent in enumerate(controller.agents):
print("Agent {} position".format(agent_id), agent.position)
print("Object position", controller.lifted_object.position)
print("")
else:
trying_to_quit = False
print('Invalid key "{}"'.format(c))
class AI2ThorLiftedObjectGridEnvironment(object):
def __init__(
self,
lifted_object_height: float,
max_dist_to_lifted_object: float,
object_type: str,
min_steps_between_agents: int = 1,
docker_enabled: bool = False,
x_display: str = None,
local_thor_build: str = None,
time_scale: float = 1.0,
visibility_distance: float = constants.VISIBILITY_DISTANCE,
fov: float = constants.FOV,
num_agents: int = 1,
visible_agents: bool = True,
headless: bool = True,
remove_unconnected_positions: bool = False,
) -> None:
assert object_type == "Television"
self.lifted_object_height = lifted_object_height
self.max_dist_to_lifted_object = max_dist_to_lifted_object
self.object_type = object_type
self.num_agents = num_agents
self._thor_env = AI2ThorEnvironment(
docker_enabled=docker_enabled,
x_display=x_display,
local_thor_build=local_thor_build,
time_scale=time_scale,
visibility_distance=visibility_distance,
fov=fov,
restrict_to_initially_reachable_points=True,
num_agents=1,
visible_agents=True,
headless=headless,
)
self.controller: Optional[GridWorldController] = None
self.x_display = x_display
self._started = False
self.move_mag: Optional[float] = None
self.grid_size: Optional[float] = None
self.time_scale = time_scale
self.visibility_distance = visibility_distance
self.visible_agents = visible_agents
self.headless = headless
self.min_steps_between_agents = min_steps_between_agents
self.remove_unconnected_positions = remove_unconnected_positions
self.cached_initially_reachable_positions = {}
self.cached_rotation_to_lifted_object_reachable_positions = {}
self.cached_controller = {}
self.object_unreachable_silhouette_template_string = None
@property
def scene_name(self) -> str:
return self.controller.last_event.metadata["sceneName"]
@property
def current_frame(self) -> np.ndarray:
return self.controller.last_event.frame
@property
def current_frames(self) -> Tuple[np.ndarray, ...]:
return tuple(
self.controller.last_event.events[i].frame for i in range(self.num_agents)
)
@property
def last_event(self) -> ai2thor.server.Event:
return self.controller.last_event
@property
def started(self) -> bool:
return self._started
def start(
self,
scene_name: Optional[str],
move_mag: float = 0.25,
player_screen_width=128,
player_screen_height=128,
quality="Very Low",
) -> None:
self._thor_env.start(
scene_name=scene_name,
move_mag=move_mag,
player_screen_width=player_screen_width,
player_screen_height=player_screen_height,
quality=quality,
)
self._started = True
self.reset(scene_name=scene_name, move_mag=move_mag)
def stop(self) -> None:
try:
self._thor_env.stop()
except Exception as e:
warnings.warn(str(e))
finally:
self._started = False
def reset(self, scene_name: Optional[str], move_mag: float = 0.25):
# While called to 'RandomlyCreateLiftedFurniture' are made via the
# thor_env, it is only to obtain the reachable locations. For the
# gridworld environment the lifted object is not placed, nor are the
# agents places. That is done by executing:
# env.controller.step({action: "RandomlyCreateLiftedFurniture}
assert move_mag == 0.25
self.move_mag = move_mag
self.grid_size = self.move_mag
if scene_name is None:
scene_name = self.controller.last_event.metadata["sceneName"]
if scene_name not in self.cached_initially_reachable_positions:
self._thor_env.reset(scene_name, move_mag=move_mag)
self.cached_initially_reachable_positions[scene_name] = copy.deepcopy(
self._thor_env.initially_reachable_points
)
self._thor_env.step(
dict(
action="RandomlyCreateLiftedFurniture",
objectType=self.object_type,
objectVariation=1,
y=self.lifted_object_height,
z=self.max_dist_to_lifted_object,
)
)
assert self._thor_env.last_event.metadata["lastActionSuccess"]
object_id = self._thor_env.last_event.metadata["actionReturn"]
self._thor_env.step(
{
"action": "GetReachablePositionsForObject",
"objectId": object_id,
"positions": self.cached_initially_reachable_positions[scene_name],
}
)
assert self._thor_env.last_event.metadata["lastActionSuccess"]
self.cached_rotation_to_lifted_object_reachable_positions[
scene_name
] = self._thor_env.last_event.metadata["actionReturn"]
if self.object_unreachable_silhouette_template_string is None:
self._thor_env.step(
{
"action": "GetUnreachableSilhouetteForObject",
"objectId": object_id,
"z": self.max_dist_to_lifted_object,
}
)
assert self._thor_env.last_event.metadata["lastActionSuccess"]
self.object_unreachable_silhouette_template_string = self._thor_env.last_event.metadata[
"actionReturn"
]
# Remove rows / cols where both the object isn't present and the agents' cannot go
mat = [
l.strip().split(" ")
for l in self.object_unreachable_silhouette_template_string.strip().split(
"\n"
)
]
any_removed = True
while any_removed:
any_removed = False
if all(mat[0][i] == "0" for i in range(len(mat[0]))) and all(
mat[-1][i] == "0" for i in range(len(mat[0]))
):
any_removed = True
mat.pop(0)
mat.pop(-1)
if all(mat[i][0] == "0" for i in range(len(mat))) and all(
mat[i][-1] == "0" for i in range(len(mat))
):
any_removed = True
for l in mat:
l.pop(0)
l.pop(-1)
assert len(mat) % 2 == 1 and len(mat[0]) % 2 == 1
self.object_unreachable_silhouette_template_string = "\n".join(
[" ".join(l) for l in mat]
)
self.cached_controller[scene_name] = copy.deepcopy(
GridWorldController(
agent_initially_reachable_pos=self.cached_initially_reachable_positions[
scene_name
],
rotation_to_lifted_object_reachable_pos=self.cached_rotation_to_lifted_object_reachable_positions[
scene_name
],
lifted_object_id=self.object_type + "|1",
lifted_object_type=self.object_type,
min_steps_between_agents=self.min_steps_between_agents,
grid_size=self.grid_size,
object_template_string=self.object_unreachable_silhouette_template_string,
scene_name=scene_name,
remove_unconnected_positions=self.remove_unconnected_positions,
)
)
self.controller = copy.deepcopy(self.cached_controller[scene_name])
self.controller.step({"action": "Initialize", "agentCount": self.num_agents})
@property
def initially_reachable_points(self) -> Tuple[Dict[str, float]]:
return self.controller.agent_initially_reachable_pos_tuple # type:ignore
def get_current_multi_agent_occupancy_tensors(
self, use_initially_reachable_points_matrix: bool = False
) -> Tuple[List[np.ndarray], Dict[Tuple, Tuple]]:
# Padding is already incorporated in the controller level
# Check if that needs to improved.
# The reachable tensor is as index 0
point_to_element_map = dict()
if use_initially_reachable_points_matrix:
reachable_tensor = np.expand_dims(
copy.deepcopy(self.controller.agent_initially_reachable_positions_mask),
axis=0,
)
for point in self.controller.agent_initially_reachable_pos_tuple:
xz = (point["x"], point["z"])
point_to_element_map[xz] = self.controller.xz_to_rowcol(xz)
else:
reachable_tensor = np.expand_dims(
copy.deepcopy(self.controller.agent_reachable_positions_mask), axis=0
)
for point in self.controller.agent_initially_reachable_pos_tuple:
xz = (point["x"], point["z"])
point_to_element_map[xz] = self.controller.xz_to_rowcol(xz)
for point in sum(
self.controller.rotation_to_lifted_object_reachable_pos.values(), []
):
xz = (point["x"], point["z"])
point_to_element_map[xz] = self.controller.xz_to_rowcol(xz)
# 0/1 reachable point matrix
positions_tensors = [
np.zeros((4 * self.num_agents, *reachable_tensor.shape[-2:]), dtype=float)
for _ in range(self.num_agents)
]
for i in range(self.num_agents):
agent_location_in_grid = self.get_agent_location_in_mask(agent_id=i)
# This is now in sync with the quantization done in visual hull processing
clock_90 = round(agent_location_in_grid["rot"] / 90) % 4
rowcol_val = (agent_location_in_grid["row"], agent_location_in_grid["col"])
for j in range(self.num_agents):
positions_tensors[j][
clock_90 + 4 * ((i - j) % self.num_agents),
rowcol_val[0],
rowcol_val[1],
] = 1.0
return (
[
np.concatenate((reachable_tensor, pt), axis=0)
for pt in positions_tensors
],
point_to_element_map,
)
def get_agent_location(self, agent_id: int = None) -> Dict[str, float]:
if self.num_agents == 1:
metadata = self.controller.last_event.metadata
else:
metadata = self.controller.last_event.events[agent_id].metadata
location = {
"x": metadata["agent"]["position"]["x"],
"y": metadata["agent"]["position"]["y"],
"z": metadata["agent"]["position"]["z"],
"rotation": metadata["agent"]["rotation"]["y"],
}
return location
def get_agent_location_in_mask(self, agent_id: int = None) -> Dict[str, float]:
if self.num_agents <= 1:
agent_id = 0
a = self.controller.agents[agent_id]
return {"row": a.row, "col": a.col, "rot": a.rot}
def _agent_location_to_tuple(self, p: Dict[str, float]):
return (round(p["x"], 2), round(p["z"], 2))
def get_agent_locations(self) -> Tuple[Dict[str, float], ...]:
"""Gets all agents' locations."""
return tuple(self.get_agent_location(i) for i in range(self.num_agents))
def get_agent_metadata(self, agent_id: int = 0) -> Dict[str, Any]:
"""Gets agent's metadata."""
return self.controller.last_event.events[agent_id].metadata["agent"]
def get_all_agent_metadata(self) -> Tuple[Dict[str, Any], ...]:
"""Gets all agents' locations."""
return tuple(self.get_agent_metadata(i) for i in range(self.num_agents))
def get_object_by_id(
self, object_id: str, agent_id: Optional[int] = None
) -> Dict[str, Any]:
if self.num_agents == 0:
agent_id = 0
return [
o
for o in self.last_event.events[agent_id].metadata["objects"]
if o["objectId"] == object_id
][0]
def step(
self, action_dict: Dict[str, Union[str, int, float]]
) -> ai2thor.server.Event:
action = action_dict["action"]
agent_id = action_dict.get("agentId")
if agent_id is not None:
assert type(agent_id) == int and 0 <= agent_id <= self.num_agents
else:
assert self.num_agents == 1
action_dict["agentId"] = 0
return self.controller.step(action_dict)
def visualize(self, wait_key):
self.controller.viz_world(wait_key)
if __name__ == "__main__":
import random
import constants
env = AI2ThorLiftedObjectGridEnvironment(
lifted_object_height=1.3,
max_dist_to_lifted_object=1,
min_steps_between_agents=2,
object_type="Television",
num_agents=2,
local_thor_build=constants.ABS_PATH_TO_LOCAL_THOR_BUILD,
headless=False,
remove_unconnected_positions=True,
)
scenes = constants.TRAIN_SCENE_NAMES[20:40]
env.start(scenes[0], player_screen_height=500, player_screen_width=500)
dresser_silhouette_string = """1 1 1 1 1
1 1 1 1 1
1 1 1 1 1
"""
dresser_mask = env.controller.parse_template_to_mask(dresser_silhouette_string)
# Run demo
while True:
env.reset(random.choice(scenes))
env.step(
{
"action": "RandomlyCreateAndPlaceObjectOnFloor",
"agentId": 0,
"object_mask": dresser_mask,
"objectType": "Dresser",
}
)
assert env.last_event.metadata["lastActionSuccess"]
# print(env.last_event.metadata["actionReturn"])
env.step(
{
"action": "RandomlyCreateLiftedFurniture",
"objectType": "Television",
"agentId": 0,
}
)
for i in range(10):
if not env.last_event.metadata["lastActionSuccess"]:
env.step(
{
"action": "RandomlyCreateLiftedFurniture",
"objectType": "Television",
"agentId": 0,
}
)
run_demo(env.controller)
# print(env.controller.steps_taken)
env.cached_controller.clear()
env.cached_initially_reachable_positions.clear()
env.cached_rotation_to_lifted_object_reachable_positions.clear()
| cordial-sync-master | rl_ai2thor/ai2thor_gridworld_environment.py |
from __future__ import print_function, division
import math
from typing import Optional, List
import matplotlib
matplotlib.use("TkAgg", force=False)
import matplotlib.pyplot as plt
from matplotlib import animation
import pylab
from PIL import Image, ImageDraw
import copy
import numpy as np
import textwrap
import re
import itertools
from utils import misc_util
def outline_square(frame, row, col, num_grids=7, stroke=2):
f = copy.copy(frame)
row_inds = [0] + np.cumsum(misc_util.partition(f.shape[0], num_grids)).tolist()
col_inds = [0] + np.cumsum(misc_util.partition(f.shape[1], num_grids)).tolist()
row_start = row_inds[row]
row_end = row_inds[row + 1]
col_start = col_inds[col]
col_end = col_inds[col + 1]
col = np.array([[[255, 0, 0]]])
offset = stroke
f[row_start : (row_start + offset), col_start:col_end, :] = col
f[(row_end - offset) : row_end, col_start:col_end, :] = col
f[row_start:row_end, col_start : (col_start + offset), :] = col
f[row_start:row_end, (col_end - offset) : col_end, :] = col
return f
def PIL2array(img):
return np.array(img.getdata(), np.uint8).reshape(img.size[1], img.size[0], 3)
def save_frames_to_mp4(frames, file_name, fps=3):
h, w, _ = frames[0].shape
aspect_ratio = w / h
fig = plt.figure(figsize=(5 * aspect_ratio, 5))
ax = fig.add_subplot(111)
ax.set_frame_on(False)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
ax.set_aspect("equal")
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
im = ax.imshow(frames[0], cmap="gray", interpolation="nearest")
im.set_clim([0, 255])
pylab.tight_layout()
def update_img(n):
if n >= len(frames):
im.set_data(frames[-1])
else:
im.set_data(frames[n])
# k += 1
return im
ani = animation.FuncAnimation(fig, update_img, len(frames) - 1, interval=200)
writer = animation.writers["ffmpeg"](fps=fps)
ani.save(file_name, writer=writer, dpi=300)
class ThorPositionTo2DFrameTranslator(object):
def __init__(self, frame_shape, cam_position, orth_size):
self.frame_shape = frame_shape
self.lower_left = np.array((cam_position[0], cam_position[2])) - orth_size
self.span = 2 * orth_size
def __call__(self, position):
if len(position) == 3:
x, _, z = position
else:
x, z = position
camera_position = (np.array((x, z)) - self.lower_left) / self.span
return np.array(
(
round(self.frame_shape[0] * (1.0 - camera_position[1])),
round(self.frame_shape[1] * camera_position[0]),
),
dtype=int,
)
def position_to_tuple(position):
if "position" in position:
position = position["position"]
return (position["x"], position["y"], position["z"])
def get_agent_map_data(env):
env.step({"action": "ToggleMapView", "agentId": 0})
cam_position = env.last_event.metadata["cameraPosition"]
cam_orth_size = env.last_event.metadata["cameraOrthSize"]
pos_translator = ThorPositionTo2DFrameTranslator(
env.last_event.events[0].frame.shape,
position_to_tuple(cam_position),
cam_orth_size,
)
to_return = {
"frame": env.last_event.events[0].frame,
"cam_position": cam_position,
"cam_orth_size": cam_orth_size,
"pos_translator": pos_translator,
}
env.step({"action": "ToggleMapView", "agentId": 0})
return to_return
def add_agent_view_triangle(
position, rotation, frame, pos_translator, scale=1.0, opacity=0.1
):
p0 = np.array((position[0], position[2]))
p1 = copy.copy(p0)
p2 = copy.copy(p0)
theta = -2 * math.pi * (rotation / 360.0)
rotation_mat = np.array(
[[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]]
)
offset1 = scale * np.array([-1, 1]) * math.sqrt(2) / 2
offset2 = scale * np.array([1, 1]) * math.sqrt(2) / 2
p1 += np.matmul(rotation_mat, offset1)
p2 += np.matmul(rotation_mat, offset2)
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
points = [tuple(reversed(pos_translator(p))) for p in [p0, p1, p2]]
draw = ImageDraw.Draw(img2)
draw.polygon(points, fill=(255, 255, 255, opacity))
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
def draw_line_with_rounded_ends(draw, xy, fill, width):
draw.line(xy, fill=fill, width=width)
for c in [xy[:2], xy[2:]]:
draw.ellipse(
(
c[0] - width / 2 + 1,
c[1] - width / 2 + 1,
c[0] + width / 2 - 1,
c[1] + width / 2 - 1,
),
fill=fill,
outline=None,
)
def add_line_to_map(p0, p1, frame, pos_translator, opacity, color=None):
if p0 == p1:
return frame
if color is None:
color = (255, 0, 0)
input_was_rgba = frame.shape[-1] == 4
if input_was_rgba:
img1 = Image.fromarray(frame.astype("uint8"), "RGBA")
else:
img1 = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.new("RGBA", frame.shape[:-1]) # Use RGBA
opacity = int(round(255 * opacity)) # Define transparency for the triangle.
draw = ImageDraw.Draw(img2)
# draw.line(
draw_line_with_rounded_ends(
draw,
tuple(reversed(pos_translator(p0))) + tuple(reversed(pos_translator(p1))),
fill=color + (opacity,),
width=int(frame.shape[0] / 100),
)
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB" if not input_was_rgba else "RGBA"))
def overlay_rgba_onto_rgb(rgb, rgba):
img1 = Image.fromarray(rgb.astype("uint8"), "RGB").convert("RGBA")
img2 = Image.fromarray(rgba.astype("uint8"), "RGBA")
img = Image.alpha_composite(img1, img2)
return np.array(img.convert("RGB"))
def visualize_agent_path(
positions,
frame,
pos_translator,
color_pair_ind: Optional[int] = None,
colors: Optional[List] = None,
show_vis_cone=True,
show_visibility_cone_marked_points=True,
only_show_last_visibility_cone=False,
position_mark_colors: Optional[List[Optional[str]]] = None,
opacity: float = 1.0,
):
import colour as col
if colors is None:
c0, c1 = [("red", "#ffc8c8"), ("green", "#c8ffc8"), ("blue", "#c8c8ff")][
(color_pair_ind % 3)
]
colors = list(col.Color(c0).range_to(col.Color(c1), len(positions) - 1))
if opacity != 0:
lines_frame = np.zeros((frame.shape[0], frame.shape[1], 4), dtype=np.uint8)
else:
lines_frame = frame
for i in range(len(positions) - 1):
lines_frame = add_line_to_map(
position_to_tuple(positions[i]),
position_to_tuple(positions[i + 1]),
lines_frame,
pos_translator,
opacity=1.0,
color=tuple(map(lambda x: int(round(255 * x)), colors[i].rgb)),
)
if opacity != 0:
lines_frame[:, :, 3] = np.array(
(lines_frame[:, :, 3] * opacity).round(), dtype=np.uint8
)
frame = overlay_rgba_onto_rgb(rgb=frame, rgba=lines_frame)
else:
frame = lines_frame
mark_positions = []
if position_mark_colors is not None:
assert len(position_mark_colors) == len(positions)
mark_positions = [
p
for p, mark_col in zip(positions, position_mark_colors)
if mark_col is not None
]
offsets = [(0.1, 0), (0, -0.1), (-0.1, 0), (0, 0.1)]
offset_mark_positions = []
mark_colors = []
for i in range(len(positions)):
if position_mark_colors[i] is not None:
offset_ind = (int(positions[i]["rotation"]) % 360) // 90
offset = offsets[offset_ind]
mp = copy.copy(positions[i])
mp["x"] = offset[0] + mp["x"]
mp["z"] = offset[1] + mp["z"]
offset_mark_positions.append(mp)
mark_colors.append(position_mark_colors[i])
frame = mark_positions_with_color(
offset_mark_positions,
frame,
pos_translator,
mark_colors,
radius_frame_percent=0.02,
)
agent_view_triangle_positions = positions
if only_show_last_visibility_cone:
agent_view_triangle_positions = [positions[-1]]
elif show_visibility_cone_marked_points:
agent_view_triangle_positions = copy.copy(mark_positions)
if show_vis_cone:
for i, position in enumerate(agent_view_triangle_positions):
frame = add_agent_view_triangle(
position_to_tuple(position),
rotation=position["rotation"],
frame=frame,
pos_translator=pos_translator,
scale=1.5,
opacity=0.15,
)
return frame
def visualize_agent_path_video(
positions,
frame,
pos_translator,
color_pair_ind: int,
show_visibility_cone_marked_points=True,
only_show_last_visibility_cone=False,
position_mark_colors: Optional[List[Optional[str]]] = None,
):
import colour as col
c0, c1 = [("red", "red"), ("green", "green"), ("orange", "black")][
(color_pair_ind % 3)
]
if len(positions) > 1:
colors = list(col.Color(c0).range_to(col.Color(c1), len(positions) - 1))
else:
colors = list(col.Color(c0).range_to(col.Color(c1), len(positions)))
for i in range(len(positions) - 1):
frame = add_line_to_map(
position_to_tuple(positions[i]),
position_to_tuple(positions[i + 1]),
frame,
pos_translator,
opacity=1.0, # 0.5,
color=tuple(map(lambda x: int(round(255 * x)), colors[i].rgb)),
)
mark_positions = []
if position_mark_colors is not None:
if not len(position_mark_colors) == len(positions):
raise RuntimeError
mark_positions = [
p
for p, mark_col in zip(positions, position_mark_colors)
if mark_col is not None
]
offsets = [(0.1, 0), (0, -0.1), (-0.1, 0), (0, 0.1)]
offset_mark_positions = []
mark_colors = []
for i in range(len(positions)):
if position_mark_colors[i] is not None:
offset_ind = (int(positions[i]["rotation"]) % 360) // 90
offset = offsets[offset_ind]
mp = copy.copy(positions[i])
mp["x"] = offset[0] + mp["x"]
mp["z"] = offset[1] + mp["z"]
offset_mark_positions.append(mp)
mark_colors.append(position_mark_colors[i])
frame = mark_positions_with_color(
offset_mark_positions,
frame,
pos_translator,
mark_colors,
radius_frame_percent=0.02,
)
agent_view_triangle_positions = positions
if only_show_last_visibility_cone:
agent_view_triangle_positions = [positions[-1]]
elif show_visibility_cone_marked_points:
agent_view_triangle_positions = copy.copy(mark_positions)
for i, position in enumerate(agent_view_triangle_positions):
frame = add_agent_view_triangle(
position_to_tuple(position),
rotation=position["rotation"],
frame=frame,
pos_translator=pos_translator,
scale=1.5,
opacity=0.15,
)
return frame
def mark_positions_with_color(
positions, frame, pos_translator, color, radius_frame_percent: float = 0.01
):
if len(positions) == 0:
return frame
if type(color) == list:
assert len(positions) % len(color) == 0
colors = color * (len(positions) // len(color))
else:
colors = [color] * len(positions)
radius = int(frame.shape[0] * radius_frame_percent)
img = Image.fromarray(frame.astype("uint8"), "RGB").convert("RGBA")
draw = ImageDraw.Draw(img)
for i, p in enumerate(positions):
ptuple = tuple(reversed(pos_translator(position_to_tuple(p))))
draw.ellipse(
(
ptuple[0] - radius / 2 + 1,
ptuple[1] - radius / 2 + 1,
ptuple[0] + radius / 2 - 1,
ptuple[1] + radius / 2 - 1,
),
fill=colors[i],
outline=None,
)
return np.array(img.convert("RGB"))
def plot_confusion_matrix(cm, labels):
"""
:param labels:
:param cm: A confusion matrix: A square ```numpy array``` of the same size as labels
:return: A ``matplotlib.figure.Figure`` object with a numerical and graphical representation of the cm array
"""
numClasses = len(labels)
fig = matplotlib.figure.Figure(
figsize=(numClasses, numClasses), dpi=100, facecolor="w", edgecolor="k"
)
ax = fig.add_subplot(1, 1, 1)
ax.imshow(cm, cmap="Oranges")
classes = [
re.sub(r"([a-z](?=[A-Z])|[A-Z](?=[A-Z][a-z]))", r"\1 ", x) for x in labels
]
classes = ["\n".join(textwrap.wrap(l, 20)) for l in classes]
tick_marks = np.arange(len(classes))
ax.set_xlabel("Agent 1")
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, rotation=-90, ha="center")
ax.xaxis.set_label_position("bottom")
ax.xaxis.tick_bottom()
ax.set_ylabel("Agent 0")
ax.set_yticks(tick_marks)
ax.set_ylim(-0.5, len(classes) - 0.5)
ax.set_yticklabels(classes, va="center")
ax.yaxis.set_label_position("left")
ax.yaxis.tick_left()
for i, j in itertools.product(range(numClasses), range(numClasses)):
ax.text(
j,
i,
round(cm[i, j] * 100, 2) if cm[i, j] != 0 else ".",
horizontalalignment="center",
verticalalignment="center",
color="black",
)
fig.set_tight_layout(True)
return fig
| cordial-sync-master | utils/visualization_utils.py |
from __future__ import division
import glob
import itertools
import json
import logging
import math
import os
import re
import shutil
import subprocess
from typing import Optional, Tuple, Sequence, Dict, Union
import numpy as np
import torch
import constants
try:
from reprlib import repr
except ImportError:
pass
from threading import Thread
from queue import Queue, Empty
def pad_matrix_to_size_topleft(
matrix: np.ndarray,
desired_output_shape: Tuple[int, int],
point_to_element_map: Dict[Tuple[float, float], Tuple[int, int]],
fill_value: Union[float, int] = constants.NO_INFO_SYM,
):
assert matrix.shape[0] <= desired_output_shape[0]
assert matrix.shape[1] <= desired_output_shape[1]
pad_matrix = np.full(desired_output_shape, fill_value=fill_value)
pad_matrix[0 : matrix.shape[0], 0 : matrix.shape[1]] = matrix
return pad_matrix, point_to_element_map
def _joint_probability_tensor_from_mixture_slow(
mixture_weights: torch.FloatTensor, marginal_prob_matrices: torch.FloatTensor
):
"""Used to error check joint_probability_tensor_from_mixture."""
return sum(
[
mixture_weights[j]
* outer_product(
[
marginal_prob_matrices[i][j]
for i in range(marginal_prob_matrices.shape[0])
]
)
for j in range(marginal_prob_matrices.shape[1])
]
)
def joint_probability_tensor_from_mixture(
mixture_weights: torch.FloatTensor, marginal_prob_matrices: torch.FloatTensor
):
assert len(mixture_weights.shape) == 1
if mixture_weights.shape[0] == 2:
v0 = marginal_prob_matrices[0].permute(1, 0)
u0 = marginal_prob_matrices[1] * mixture_weights.view(-1, 1)
return torch.matmul(v0, u0)
product: Optional[torch.Tensor] = None
new_shape = [mixture_weights.shape[0]] + [1] * marginal_prob_matrices.shape[0]
for i, matrix in enumerate(marginal_prob_matrices):
assert len(matrix.shape) == 2
if i == 0:
product = mixture_weights.reshape(*new_shape)
else:
new_shape[i] = 1
new_shape[i + 1] = -1
product = product * matrix.view(*new_shape)
return product.sum(0) # type: ignore
def joint_log_probability_tensor_from_mixture(
log_mixture_weights: torch.FloatTensor,
marginal_log_prob_matrices: torch.FloatTensor,
):
assert len(log_mixture_weights.shape) == 1
log_sum_tensor: Optional[torch.Tensor] = None
new_shape = [log_mixture_weights.shape[0]] + [1] * marginal_log_prob_matrices.shape[
0
]
for i, matrix in enumerate(marginal_log_prob_matrices):
assert len(matrix.shape) == 2
if i == 0:
log_sum_tensor = log_mixture_weights.reshape(*new_shape)
else:
new_shape[i] = 1
new_shape[i + 1] = -1
log_sum_tensor = log_sum_tensor + matrix.view(*new_shape)
return log_sum_exp(log_sum_tensor, dim=0) # type: ignore
def outer_product(vectors: Sequence[torch.FloatTensor]) -> torch.FloatTensor:
assert len(vectors) > 1
product: Optional[torch.Tensor] = None
new_shape = [1] * len(vectors)
for i, vector in enumerate(vectors):
new_shape[i] = -1
if i > 0:
new_shape[i - 1] = 1
product = product * vector.view(*new_shape)
else:
product = vector.view(*new_shape)
return product # type: ignore
def outer_sum(vectors: Sequence[torch.FloatTensor]) -> torch.FloatTensor:
assert len(vectors) > 1
sum_tensor: Optional[torch.Tensor] = None
new_shape = [1] * len(vectors)
for i, vector in enumerate(vectors):
new_shape[i] = -1
if i > 0:
new_shape[i - 1] = 1
sum_tensor = sum_tensor + vector.view(*new_shape)
else:
sum_tensor = vector.view(*new_shape)
return sum_tensor # type: ignore
def huber_loss(diff, delta):
sq = diff.pow(2)
abs = diff.abs()
where_abs = (abs - delta >= 0).float()
return (sq * (1.0 - where_abs) + (2 * delta * abs - delta ** 2) * where_abs).sum()
def log_sum_exp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
Taken from https://github.com/pytorch/pytorch/issues/2591
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
from numbers import Number
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
def fibonacci_sphere(samples=1):
rnd = 1.0
offset = 2.0 / samples
increment = math.pi * (3.0 - math.sqrt(5.0))
xs = []
ys = []
zs = []
for i in range(samples):
y = ((i * offset) - 1) + (offset / 2)
r = math.sqrt(1 - pow(y, 2))
phi = ((i + rnd) % samples) * increment
x = math.cos(phi) * r
z = math.sin(phi) * r
xs.append(x)
ys.append(y)
zs.append(z)
return {"xs": xs, "ys": ys, "zs": zs}
def save_project_state_in_log(
call,
task,
local_start_time_str,
dependent_data_paths: Optional[Tuple[str, ...]] = None,
log_dir: str = "./logs/",
):
short_sha = (
subprocess.check_output(["git", "describe", "--always"]).strip().decode("utf-8")
)
log_file_path = os.path.join(log_dir, task, local_start_time_str)
diff_path = os.path.join(log_file_path, "git-diff.txt")
sha_path = os.path.join(log_file_path, "sha.txt")
if not os.path.exists(log_file_path):
os.makedirs(log_file_path)
if os.path.exists(diff_path):
raise Exception("Diff should not already exist.")
with open(diff_path, "w") as f:
f.write(subprocess.check_output(["git", "diff"]).strip().decode("utf-8"))
with open(sha_path, "w") as f:
f.write(short_sha)
# Save data that we are dependent on (e.g. previously trained models)
if dependent_data_paths is not None:
for path in dependent_data_paths:
if path is not None:
hash = get_hash_of_file(path)
new_path = os.path.join(log_dir, "saved_data", hash + ".dat")
if not os.path.exists(new_path):
shutil.copyfile(path, new_path)
with open(
os.path.join(log_file_path, "saved_files_to_hashes.txt"), "a"
) as f:
f.write("{}\t{}\n".format(path, hash))
# Finally save the call made to main
with open(os.path.join(log_file_path, "call.json"), "w") as f:
json.dump(call, f)
return log_file_path
def random_orthogonal_matrix(dim=3):
random_state = np.random
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim - n + 1,))
D[n - 1] = np.sign(x[0])
x[0] -= D[n - 1] * np.sqrt((x * x).sum())
# Householder transformation
Hx = np.eye(dim - n + 1) - 2.0 * np.outer(x, x) / (x * x).sum()
mat = np.eye(dim)
mat[n - 1 :, n - 1 :] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1) ** (1 - (dim % 2)) * D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D * H.T).T
return H
def partition(n, num_parts):
m = n // num_parts
parts = [m] * num_parts
num_extra = n % num_parts
for i in range(num_extra):
parts[i] += 1
return parts
def expand_to_shape(shape, grid):
new = np.zeros(shape=(shape[0], shape[1]))
row_parts = np.cumsum(partition(shape[0], grid.shape[0]))
col_parts = np.cumsum(partition(shape[1], grid.shape[1]))
for i in range(grid.shape[0]):
if i == 0:
r0, r1 = (0, row_parts[i])
else:
r0, r1 = (row_parts[i - 1], row_parts[i])
for j in range(grid.shape[1]):
if j == 0:
c0, c1 = (0, col_parts[j])
else:
c0, c1 = (col_parts[j - 1], col_parts[j])
new[r0:r1, c0:c1] = grid[i, j]
return new
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter("%(asctime)s : %(message)s")
dir = "/".join(log_file.split("/")[:-1])
if not os.path.exists(dir):
os.makedirs(dir)
fileHandler = logging.FileHandler(log_file, mode="w")
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def read_config(file_path):
"""Read JSON config."""
json_object = json.load(open(file_path, "r"))
return json_object
def norm_col_init(weights, std=1.0):
x = torch.randn(weights.size())
x *= std / torch.sqrt((x ** 2).sum(1, keepdim=True))
return x
def ensure_shared_grads(model, shared_model, gpu=False):
sentinal = object()
for param, shared_param in itertools.zip_longest(
model.parameters(), shared_model.parameters(), fillvalue=sentinal
):
assert sentinal is not param and sentinal is not shared_param
if shared_param.requires_grad:
assert param.requires_grad
if not gpu or param.grad is None:
shared_param._grad = param.grad
else:
shared_param._grad = param.grad.cpu()
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find("Linear") != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
def get_hash_of_file(file_path):
import hashlib, os
sha_hash = hashlib.sha1()
if not os.path.exists(file_path):
raise IOError("File " + file_path + " not found.")
with open(file_path, "rb") as f:
while True:
# Read file in as little chunks
buf = f.read(4096)
if not buf:
break
sha_hash.update(hashlib.sha1(buf).hexdigest().encode("utf-8"))
return sha_hash.hexdigest()
def get_hash_of_dirs(directory, verbose=0):
# http://akiscode.com/articles/sha-1directoryhash.shtml
# Copyright (c) 2009 Stephen Akiki
# MIT License (Means you can do whatever you want with this)
# See http://www.opensource.org/licenses/mit-license.php
# Error Codes:
# -1 -> Directory does not exist
# -2 -> General error (see stack traceback)
import hashlib, os
sha_hash = hashlib.sha1()
if not os.path.exists(directory):
return -1
try:
for root, dirs, files in os.walk(directory):
for names in files:
if verbose == 1:
print("Hashing", names)
filepath = os.path.join(root, names)
try:
f1 = open(filepath, "rb")
except Exception as e:
# You can't open the file for some reason
f1.close()
continue
while True:
# Read file in as little chunks
buf = f1.read(4096)
if not buf:
break
sha_hash.update(hashlib.sha1(buf).hexdigest().encode("utf-8"))
f1.close()
except Exception as e:
raise e
return sha_hash.hexdigest()
def round_to_factor(num, base) -> int:
return int((num / base)) * base
def key_for_point(x, z):
return "%0.1f %0.1f" % (x, z)
def point_for_key(key):
x, z = key.split("|")
return dict(x=x, z=z)
def location_to_metadata(loc):
assert "x" in loc.keys()
assert "y" in loc.keys()
assert "z" in loc.keys()
assert "rotation" in loc.keys()
assert "horizon" in loc.keys()
meta = dict()
meta["position"] = dict(x=loc["x"], y=loc["y"], z=loc["z"])
meta["rotation"] = dict(y=round(loc["rotation"]))
meta["cameraHorizon"] = round(loc["horizon"])
return meta
def models_with_log_name(log_name, model_folder):
exp_name, date_time = log_name.split("/")
model_names_all = glob.glob(
os.path.join(model_folder, exp_name + "_*_" + date_time + ".dat")
)
model_names = []
model_iterations = []
for name in model_names_all:
search_string = exp_name + "_" + "(.*)" + "_" + date_time + ".dat"
iter = int(re.search(search_string, name).group(1))
if iter % 10000 == 0:
model_names.append(name)
model_iterations.append(iter)
sorted_model_names = [x for _, x in sorted(zip(model_iterations, model_names))]
sorted_model_iterations = [y for y, _ in sorted(zip(model_iterations, model_names))]
return sorted_model_names, sorted_model_iterations
def last_model_with_log_name(log_name, model_folder):
sorted_model_names, sorted_model_iterations = models_with_log_name(
log_name, model_folder
)
assert len(sorted_model_names) >= 1
return sorted_model_names[-1], sorted_model_iterations[-1]
def first_model_with_log_name(log_name, model_folder):
sorted_model_names, sorted_model_iterations = models_with_log_name(
log_name, model_folder
)
assert len(sorted_model_names) >= 1
return sorted_model_names[0], sorted_model_iterations[0]
def manhattan_dist_between_two_positions(p0, p1):
dist = int(round((abs(p0["x"] - p1["x"]) + abs(p0["z"] - p1["z"])) / 0.25))
return dist
class NonBlockingStreamReader:
# Taken from http://eyalarubas.com/python-subproc-nonblock.html
def __init__(self, stream):
"""
stream: the stream to read from.
Usually a process' stdout or stderr.
"""
self._s = stream
self._q = Queue()
def _populateQueue(stream, queue):
"""
Collect lines from 'stream' and put them in 'quque'.
"""
while True:
line = stream.readline()
if line:
queue.put(line)
else:
break
# raise UnexpectedEndOfStream
self._t = Thread(target=_populateQueue, args=(self._s, self._q))
self._t.daemon = True
self._t.start() # start collecting lines from the stream
def readline(self, timeout=None):
try:
return self._q.get(block=timeout is not None, timeout=timeout)
except Empty:
return None
def all_equal(seq: Sequence):
if len(seq) <= 1:
return True
return all(seq[0] == s for s in seq[1:])
def unzip(xs):
a = None
n = None
for x in xs:
if n is None:
n = len(x)
a = [[] for _ in range(n)]
for i, y in enumerate(x):
a[i].append(y)
return a
| cordial-sync-master | utils/misc_util.py |
cordial-sync-master | utils/__init__.py |
|
"""Contains a bunch of utilities useful during network training in PyTorch."""
import math
from collections import deque
from typing import Dict, Union, List, Tuple, Any, Callable
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from torchvision import transforms
def init_orthogonal(tensor, gain=1):
r"""
Taken from a future torch version
"""
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor.numel() // rows
flattened = tensor.new(rows, cols).normal_(0, 1)
if rows < cols:
flattened.t_()
# Compute the qr factorization
q, r = torch.qr(flattened)
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph
if rows < cols:
q.t_()
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
def get_gpu_memory_map():
# From https://discuss.pytorch.org/t/access-gpu-memory-usage-in-pytorch/3192/3
import subprocess
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
["nvidia-smi", "--query-gpu=memory.used", "--format=csv,nounits,noheader"],
encoding="utf-8",
)
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split("\n")]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def load_model_from_state_dict(model, state_dict):
model_state_dict = model.state_dict()
model_keys = set(model_state_dict.keys())
dict_keys = set(state_dict.keys())
in_model_not_in_dict = []
in_dict_not_in_model = []
wrong_parameter_sizes = []
for key in model_keys | dict_keys:
if key in model_keys and key not in dict_keys:
in_model_not_in_dict.append(key)
elif key not in model_keys and key in dict_keys:
in_dict_not_in_model.append(key)
elif model_state_dict[key].shape != state_dict[key].shape:
wrong_parameter_sizes.append(key)
if (
len(in_model_not_in_dict) == 0
and len(in_dict_not_in_model) == 0
and len(wrong_parameter_sizes) == 0
):
return model.load_state_dict(state_dict)
else:
print(
(
"WARNING: Loading model from state dictionary but:\n"
"* The following parameters are present in the state"
" dictionary and not in the model and will be ignored: {}\n"
"* The following parameters are present in the model but "
"not in the state and will remain in their initial state: {}\n"
"* The following parameters are present in both the model and "
"saved state but are of incompatible sizes, they will remain as in the model: {}\n"
).format(
"\n\t- None"
if len(in_dict_not_in_model) == 0
else "\n\t- " + "\n\t- ".join(in_dict_not_in_model),
"\n\t- None"
if len(in_model_not_in_dict) == 0
else "\n\t- " + "\n\t- ".join(in_model_not_in_dict),
"\n\t- None"
if len(wrong_parameter_sizes) == 0
else "\n\t- " + "\n\t- ".join(wrong_parameter_sizes),
)
)
yn = input("Continue? (y/n)").lower().strip()
if yn not in ["y", "yes"]:
print("Aborting...")
quit()
return model.load_state_dict(
{
**model.state_dict(),
**{
k: state_dict[k]
for k in ((dict_keys - set(wrong_parameter_sizes)) & model_keys)
},
}
)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def binary_search_for_model_with_least_upper_bound_parameters(
target_parameter_count,
create_model_func: Callable[[int], nn.Module],
lower: int,
upper: int,
):
assert lower <= upper
lower_count = count_parameters(create_model_func(lower))
upper_count = count_parameters(create_model_func(upper))
assert lower_count <= target_parameter_count <= upper_count, "Invalid range"
def run_search(
target_parameter_count,
create_model_func: Callable[[int], nn.Module],
lower: int,
upper: int,
):
if lower == upper:
return lower
mid = int(math.floor((lower + upper) / 2))
mid_count = count_parameters(create_model_func(mid))
if mid_count == target_parameter_count:
return mid
elif mid_count > target_parameter_count:
return run_search(
target_parameter_count=target_parameter_count,
create_model_func=create_model_func,
lower=lower,
upper=mid,
)
else:
return run_search(
target_parameter_count=target_parameter_count,
create_model_func=create_model_func,
lower=mid + 1,
upper=upper,
)
return run_search(
target_parameter_count=target_parameter_count,
create_model_func=create_model_func,
lower=lower,
upper=upper,
)
def logit_offsets_for_conditional_probabilities(
action_group_dims: Tuple[int, ...]
) -> List[float]:
consts = [0.0]
for i in range(1, len(action_group_dims)):
consts.append(math.log(action_group_dims[0] / action_group_dims[i]))
return consts
class ScalarMeanTracker(object):
def __init__(self) -> None:
self._sums: Dict[str, float] = {}
self._counts: Dict[str, int] = {}
def add_scalars(self, scalars: Dict[str, Union[float, int]]) -> None:
for k in scalars:
if np.isscalar(scalars[k]):
if k not in self._sums:
self._sums[k] = float(scalars[k])
self._counts[k] = 1
else:
self._sums[k] += float(scalars[k])
self._counts[k] += 1
def means(self):
means = {k: self._sums[k] / self._counts[k] for k in self._sums}
return means
def counts(self):
return {**self._counts}
def pop_and_reset_for_key(self, k):
s = self._sums[k]
c = self._counts[k]
del self._sums[k]
del self._counts[k]
return s / c
def pop_and_reset(self):
means = {k: self._sums[k] / self._counts[k] for k in self._sums}
self._sums = {}
self._counts = {}
return means
class TensorConcatTracker(object):
def __init__(self) -> None:
self._tensors: Dict[str, torch.FloatTensor] = {}
def add_tensors(self, tensors: Dict[str, Union[torch.FloatTensor, Any]]) -> None:
for k in tensors:
if type(tensors[k]) == torch.FloatTensor:
if k not in self._tensors:
self._tensors[k] = tensors[k]
else:
self._tensors[k] = torch.cat((self._tensors[k], tensors[k]), dim=0)
def pop_and_reset(self):
t = self._tensors
self._tensors = {}
return t
class RollingAverage(object):
"""Computes and stores the running average as well
as the average within a recent window"""
def __init__(self, window_size):
assert window_size > 0
self.window_size = window_size
self.rolling_sum = 0
self.sum = 0
self.count = 0
self.rolling_deque = deque()
def add(self, val):
"""Add one value."""
self.sum += val
self.rolling_sum += val
self.count += 1
self.rolling_deque.append(val)
if len(self.rolling_deque) > self.window_size:
self.rolling_sum -= self.rolling_deque.popleft()
def rolling_average(self):
assert self.count > 0
return self.rolling_sum / (1.0 * len(self.rolling_deque))
def full_average(self):
assert self.count > 0
return self.sum / self.count
class TrainTestInfoStore(object):
def __init__(self, train_window_size, test_window_size):
self.train_window_size = train_window_size
self.test_window_size = test_window_size
self.train_recent_save = []
self.train_averages = []
self.train_num_frames = []
self.test_recent_save = []
self.test_averages = []
self.test_num_frames = []
def add_train_result(self, episode_reward, num_frames):
self.train_recent_save.append(episode_reward)
if len(self.train_recent_save) == self.train_window_size:
self.train_averages.append(np.mean(self.train_recent_save))
self.train_num_frames.append(num_frames)
self.train_recent_save = []
def add_test_result(self, episode_reward, num_frames):
self.test_recent_save.append(episode_reward)
if len(self.test_recent_save) == self.test_window_size:
self.test_averages.append(np.mean(self.test_recent_save))
self.test_num_frames.append(num_frames)
self.test_recent_save = []
def train_results(self):
return self.train_averages, self.train_num_frames
def test_results(self):
return self.test_averages, self.test_num_frames
def train_full_average(self):
sum = (
np.sum(self.train_recent_save)
+ np.sum(self.train_averages) * self.train_window_size
)
return sum / (
len(self.train_averages) * self.train_window_size
+ len(self.train_recent_save)
)
def test_full_average(self):
sum = (
np.sum(self.test_recent_save)
+ np.sum(self.test_averages) * self.test_window_size
)
return sum / (
len(self.test_averages) * self.test_window_size + len(self.test_recent_save)
)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
"""Resets counters."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""Updates counters."""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_gpu_model(model, optim, epoch, ckpt_fname):
state_dict = model.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
optimizer = optim.state_dict()
for key in optimizer.keys():
optimizer[key] = optimizer[key].cpu()
torch.save(
{"epoch": epoch, "state_dict": state_dict, "optimizer": optimizer}, ckpt_fname
)
def save_model(model, optim, epoch, ckpt_fname):
state_dict = model.state_dict()
optimizer = optim.state_dict()
torch.save(
{"epoch": epoch, "state_dict": state_dict, "optimizer": optimizer}, ckpt_fname
)
def show_image_stack(image_stack):
"""Displays the stack of images
If the image_stack is of type torch.Tensor, then expected size is (1, N, H, W)
If the image_stack is of type np.ndarray, then expected size is (H, W, N)
"""
import matplotlib
matplotlib.use("TkAgg", force=False)
import matplotlib.pyplot as plt # Keeping this outside causes issues in multiprocessing.
if isinstance(image_stack, torch.Tensor):
image_stack = image_stack.squeeze().cpu().numpy()
image_stack = np.transpose(image_stack, (1, 2, 0))
num_images = image_stack.shape[2]
length = np.ceil(np.sqrt(num_images)).item()
plt.figure()
for idx in range(num_images):
plt.subplot(length, length, idx + 1)
img = image_stack[:, :, idx]
plt.imshow(img, cmap="gray")
plt.show()
def recursively_detach(to_detach: Any):
"""Recursively detach tensors in nested structure."""
if to_detach is None:
return to_detach
elif isinstance(to_detach, tuple):
return tuple(recursively_detach(x) for x in to_detach)
elif isinstance(to_detach, list):
return [recursively_detach(x) for x in to_detach]
elif isinstance(to_detach, dict):
return {k: recursively_detach(to_detach[k]) for k in to_detach}
elif isinstance(to_detach, set):
return set(recursively_detach(x) for x in to_detach)
elif (
isinstance(to_detach, np.ndarray)
or np.isscalar(to_detach)
or isinstance(to_detach, str)
):
return to_detach
elif isinstance(to_detach, torch.Tensor):
return to_detach.detach()
elif hasattr(to_detach, "repackage_hidden"):
return to_detach.repackage_hidden()
elif hasattr(to_detach, "detach"):
return to_detach.detach()
else:
raise NotImplementedError(
"Sorry, hidden state of type {} is not supported.".format(type(to_detach))
)
def put_tensor_onto_gpu_of_template(tensor, template):
if template.is_cuda:
with torch.cuda.device(template.get_device()):
tensor = tensor.cuda()
return tensor
else:
return tensor.cpu()
def resnet_input_transform(input_image, im_size):
"""Takes in numpy ndarray of size (H, W, 3) and transforms into tensor for
resnet input.
"""
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
all_transforms = transforms.Compose(
[
transforms.ToPILImage(),
ScaleBothSides(im_size),
transforms.ToTensor(),
normalize,
]
)
transformed_image = all_transforms(input_image)
return transformed_image
def resize_image(input_image, im_size):
all_transforms = transforms.Compose(
[transforms.ToPILImage(), ScaleBothSides(im_size), transforms.ToTensor()]
)
return all_transforms(input_image)
def norm_col_init(weights, std=1.0):
x = torch.randn(weights.size())
x *= std / torch.sqrt((x ** 2).sum(1, keepdim=True))
return x
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find("Linear") != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
class ScaleBothSides(object):
"""Rescales the input PIL.Image to the given 'size'.
'size' will be the size of both edges, and this can change aspect ratio.
size: output size of both edges
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
return img.resize((self.size, self.size), self.interpolation)
| cordial-sync-master | utils/net_util.py |
#!/usr/bin/env python
import subprocess
import shlex
import re
import platform
import tempfile
import os
import sys
def pci_records():
records = []
command = shlex.split('lspci -vmm')
output = subprocess.check_output(command).decode()
for devices in output.strip().split("\n\n"):
record = {}
records.append(record)
for row in devices.split("\n"):
key, value = row.split("\t")
record[key.split(':')[0]] = value
return records
def generate_xorg_conf(devices):
xorg_conf = []
device_section = """
Section "Device"
Identifier "Device{device_id}"
Driver "nvidia"
VendorName "NVIDIA Corporation"
BusID "{bus_id}"
EndSection
"""
server_layout_section = """
Section "ServerLayout"
Identifier "Layout0"
{screen_records}
EndSection
"""
screen_section = """
Section "Screen"
Identifier "Screen{screen_id}"
Device "Device{device_id}"
DefaultDepth 24
Option "AllowEmptyInitialConfiguration" "True"
SubSection "Display"
Depth 24
Virtual 1024 768
EndSubSection
EndSection
"""
screen_records = []
for i, bus_id in enumerate(devices):
xorg_conf.append(device_section.format(device_id=i, bus_id=bus_id))
xorg_conf.append(screen_section.format(device_id=i, screen_id=i))
screen_records.append('Screen {screen_id} "Screen{screen_id}" 0 0'.format(screen_id=i))
xorg_conf.append(server_layout_section.format(screen_records="\n ".join(screen_records)))
output = "\n".join(xorg_conf)
print(output)
return output
def startx(display):
if platform.system() != 'Linux':
raise Exception("Can only run startx on linux")
devices = []
for r in pci_records():
if r.get('Vendor', '') == 'NVIDIA Corporation'\
and r['Class'] in ['VGA compatible controller', '3D controller']:
bus_id = 'PCI:' + ':'.join(map(lambda x: str(int(x, 16)), re.split(r'[:\.]', r['Slot'])))
devices.append(bus_id)
if not devices:
raise Exception("no nvidia cards found")
try:
fd, path = tempfile.mkstemp()
with open(path, "w") as f:
f.write(generate_xorg_conf(devices))
command = shlex.split("Xorg -noreset -logfile xorg.log -logverbose -config %s :%s" % (path, display))
subprocess.call(command)
finally:
os.close(fd)
os.unlink(path)
if __name__ == '__main__':
display = 0
if len(sys.argv) > 1:
display = int(sys.argv[1])
print("Starting X on DISPLAY=:%s" % display)
startx(display)
| cordial-sync-master | utils/startx.py |
import argparse
from constants import ABS_PATH_TO_LOCAL_THOR_BUILD
def str2bool(v):
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def parse_arguments():
parser = argparse.ArgumentParser(description="A3C")
parser.add_argument(
"--task",
type=str,
help="The experiment config to run (e.g. furnmove_grid_bigcentral_cl_rot, furnmove_grid_mixture_3agents, etc.)."
"`--experiment_dir` should be used to specify the directory where this config resides.",
)
parser.add_argument(
"--experiment_dir",
type=str,
default="rl_multi_agent/experiments",
help="The directory to look in to find the config file for this run. (default `rl_multi_agent/experiments`)",
)
parser.add_argument(
"--tag",
type=str,
default="",
help="A tag for the run (e.g. lstm-not-gru, trying-new-thing). If not-empty, this tag is used as a subdirectory"
"along the tensorboard path. (default: '')",
)
parser.add_argument(
"--lr",
type=float,
default=0.0001,
metavar="LR",
help="Learning rate (default: 0.0001).",
)
parser.add_argument(
"--seed",
type=int,
default=1,
metavar="S",
help="Random seed. As A3C is asynchronous, setting this seed has "
"does not guarantee any exact reproducibility from run to run. (default: 1)",
)
parser.add_argument(
"--workers",
type=int,
default=32,
metavar="W",
help="How many training processes to use. (default: 32)",
)
parser.add_argument(
"--num_steps",
type=int,
default=50,
metavar="NS",
help="Number of forward steps in A3C before computing the loss and backproping. (default: 50)",
)
parser.add_argument(
"--shared_optimizer",
default=True,
metavar="SO",
type=str2bool,
help="use an optimizer with shared statistics. (default: True)",
)
parser.add_argument(
"--save_freq",
type=int,
default=1e6,
help="Save model after this # of training episodes. (default: 1e+6)",
)
parser.add_argument(
"--optimizer",
default="Adam",
metavar="OPT",
help="Optimizer choice (must be Adam or RMSprop). (default: Adam)",
)
parser.add_argument(
"--save_model_dir",
default="trained_models/",
metavar="SMD",
help="Folder to save trained model checkpoints. (default: trained_models)",
)
parser.add_argument(
"--log_dir",
default="logs/",
metavar="LG",
help="Folder in which to save (tensorboard) logs. (default: logs)",
)
parser.add_argument(
"--gpu_ids",
type=int,
default=-1,
nargs="+",
help="GPUs to use [-1 CPU only] (default: -1)",
)
parser.add_argument(
"--amsgrad",
type=str2bool,
default=True,
metavar="AM",
help="Adam optimizer amsgrad parameter. (default: True)",
)
parser.add_argument(
"--docker_enabled",
action="store_true",
help="Whether or not to use docker."
" This flag should not be used unless you know what you're doing.",
)
parser.add_argument(
"--x_display",
type=str,
default=None,
help=(
"The X display to target, if any. If targeting a multiple displays"
"please use the x_displays argument."
),
)
parser.add_argument(
"--x_displays",
type=str,
default=None,
nargs="+",
help="The x-displays to target, if any.",
)
parser.add_argument(
"--val_timeout",
type=float,
default=10,
help="The length of time to wait in between validation episodes. (default: 10)",
)
parser.add_argument(
"--enable_val_agent",
type=str2bool,
default=True,
help="Whether or not to use an agent to validate results while training. (default: True)",
)
parser.add_argument(
"--enable_logging",
type=str2bool,
default=True,
help="Whether or not to record logging information (e.g. tensorboard logs). (default: True)",
)
parser.add_argument(
"--verbose",
type=str2bool,
default=False,
help="If true, validation agent will print more information. (default: False)",
)
parser.add_argument(
"--skip_backprop",
type=str2bool,
default=False,
help="If true, will not backprop during training. Useful when debugging. (default: False)",
)
parser.add_argument(
"--max_ep",
type=float,
default=float("inf"),
help="Maximum # of episodes to run when training. (default: 'inf')",
)
parser.add_argument(
"--local_thor_build",
type=str,
default=ABS_PATH_TO_LOCAL_THOR_BUILD,
help="A path to a local thor build to use if desired. (default: {})".format(
ABS_PATH_TO_LOCAL_THOR_BUILD
),
)
parser.add_argument(
"--visualize_test_agent",
type=str2bool,
default=False,
help="Whether or not to create plots and graphics for test agent runs. (default: False)",
)
parser.add_argument(
"--test_gpu_ids",
type=int,
default=None,
nargs="+",
help="GPUs to use for test agents [-1 CPU only]. (default: -1)",
)
parser.add_argument(
"--use_episode_init_queue",
type=str2bool,
default=False,
help="If True, attempts to use the episode init queue. This is necessary when evaluating models on fixed"
"datasets. Search this codebase for the `create_episode_init_queue` for more information."
" (default: False)",
)
parser.add_argument(
"--save_extra_data",
type=str2bool,
default=False,
help="If true, attempt to save extra data from train processes. (default: False)",
)
return parser.parse_args()
| cordial-sync-master | utils/flag_parser.py |
import numpy as np
class ReservoirSampler(object):
"""Finds a random subset k elements from a stream of data in O(k) space.
See https://en.wikipedia.org/wiki/Reservoir_sampling.
"""
def __init__(self, k):
self.samples = []
self.num_seen = 0
self.k = k
def add(self, item):
self.num_seen += 1
if self.num_seen <= self.k:
self.samples.append(item)
elif np.random.rand(1)[0] <= self.k / (1.0 * self.num_seen):
self.samples[np.random.choice(range(self.k))] = item
def get_sample(self):
return self.samples[:]
| cordial-sync-master | utils/debug_util.py |
aspire-main | examples/__init__.py |
|
"""
Script to demo example usage of the Aspire Multi-Vector encoder which
represents documents via contextual sentence embeddings and uses an
optimal transport based Wasserstein distance to compute document similarity:
allenai/aspire-contextualsentence-multim-biomed and
allenai/aspire-contextualsentence-multim-compsci
Models released at:
https://huggingface.co/allenai/aspire-contextualsentence-multim-biomed
https://huggingface.co/allenai/aspire-contextualsentence-multim-compsci
Requirements:
- transformers version: 4.5.1
- torch version: 1.8.1
- geomloss version: 0.2.4
Code here is used in the demo jupyter notebook: examples/demo-contextualsentence-multim.ipynb
"""
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn import functional
import geomloss
from transformers import AutoModel, AutoTokenizer
# Define the Aspire contextual encoder:
class AspireConSent(nn.Module):
def __init__(self, hf_model_name):
"""
:param hf_model_name: dict; model hyperparams.
"""
torch.nn.Module.__init__(self)
self.bert_encoding_dim = 768
self.bert_layer_count = 12 + 1 # plus 1 for the bottom most layer.
self.bert_encoder = AutoModel.from_pretrained(hf_model_name)
self.bert_encoder.config.output_hidden_states = True
def forward(self, bert_batch, abs_lens, sent_tok_idxs):
"""
Pass a batch of sentences through BERT and get sentence
reps based on averaging contextual token embeddings.
:return:
sent_reps: batch_size x num_sents x encoding_dim
"""
# batch_size x num_sents x encoding_dim
doc_cls_reps, sent_reps = self.consent_reps_bert(bert_batch=bert_batch, num_sents=abs_lens,
batch_senttok_idxs=sent_tok_idxs)
if len(sent_reps.size()) == 2:
sent_reps = sent_reps.unsqueeze(0)
if len(doc_cls_reps.size()) == 1:
doc_cls_reps = doc_cls_reps.unsqueeze(0)
return doc_cls_reps, sent_reps
def consent_reps_bert(self, bert_batch, batch_senttok_idxs, num_sents):
"""
Pass the concated abstract through BERT, and average token reps to get contextual sentence reps.
-- NO weighted combine across layers.
:param bert_batch: dict('tokid_tt', 'seg_tt', 'attnmask_tt', 'seq_lens'); items to use for getting BERT
representations. The sentence mapped to BERT vocab and appropriately padded.
:param batch_senttok_idxs: list(list(list(int))); batch_size([num_sents_per_abs[num_tokens_in_sent]])
:param num_sents: list(int); number of sentences in each example in the batch passed.
:return:
doc_cls_reps: FloatTensor [batch_size x bert_encoding_dim]
sent_reps: FloatTensor [batch_size x num_sents x bert_encoding_dim]
"""
seq_lens = bert_batch['seq_lens']
batch_size, max_seq_len = len(seq_lens), max(seq_lens)
max_sents = max(num_sents)
tokid_tt, seg_tt, attnmask_tt = bert_batch['tokid_tt'], bert_batch['seg_tt'], bert_batch['attnmask_tt']
# if torch.cuda.is_available():
# tokid_tt, seg_tt, attnmask_tt = tokid_tt.cuda(), seg_tt.cuda(), attnmask_tt.cuda()
# Pass input through BERT and return all layer hidden outputs.
model_outputs = self.bert_encoder(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
final_hidden_state = model_outputs.last_hidden_state
# Read of CLS token as document representation.
doc_cls_reps = final_hidden_state[:, 0, :]
doc_cls_reps = doc_cls_reps.squeeze()
# Average token reps for every sentence to get sentence representations.
# Build the first sent for all batch examples, second sent ... and so on in each iteration below.
sent_reps = []
for sent_i in range(max_sents):
cur_sent_mask = np.zeros((batch_size, max_seq_len, self.bert_encoding_dim))
# Build a mask for the ith sentence for all the abstracts of the batch.
for batch_abs_i in range(batch_size):
abs_sent_idxs = batch_senttok_idxs[batch_abs_i]
try:
sent_i_tok_idxs = abs_sent_idxs[sent_i]
except IndexError: # This happens in the case where the abstract has fewer than max sents.
sent_i_tok_idxs = []
cur_sent_mask[batch_abs_i, sent_i_tok_idxs, :] = 1.0
sent_mask = Variable(torch.FloatTensor(cur_sent_mask))
# if torch.cuda.is_available():
# sent_mask = sent_mask.cuda()
# batch_size x seq_len x encoding_dim
sent_tokens = final_hidden_state * sent_mask
# The sent_masks non zero elements in one slice along embedding dim is the sentence length.
cur_sent_reps = torch.sum(sent_tokens, dim=1)/ \
torch.count_nonzero(sent_mask[:, :, 0], dim=1).clamp(min=1).unsqueeze(dim=1)
sent_reps.append(cur_sent_reps.unsqueeze(dim=1))
# batch_size x max_sents x encoding_dim
sent_reps = torch.cat(sent_reps, dim=1)
return doc_cls_reps, sent_reps
# Define the class for the distance function.
# Copied over from src.learning.facetid_models.pair_distances
class AllPairMaskedWasserstein:
def __init__(self, model_hparams):
self.geoml_blur = model_hparams.get('geoml_blur', 0.05)
self.geoml_scaling = model_hparams.get('geoml_scaling', 0.9)
self.geoml_reach = model_hparams.get('geoml_reach', None)
self.sent_sm_temp = model_hparams.get('sent_sm_temp', 1.0)
def compute_distance(self, query, cand, return_pair_sims=False):
"""
Given a set of query and candidate reps compute the wasserstein distance between
the query and candidates.
:param query: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:param cand: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:return:
batch_sims: ef_batch_size; pooled pairwise _distances_ between
input reps. (distances are just negated similarities here)
"""
query_reps, query_abs_lens = query.embed, query.abs_lens
cand_reps, cand_abs_lens = cand.embed, cand.abs_lens
qef_batch_size, _, qmax_sents = query_reps.size()
cef_batch_size, encoding_dim, cmax_sents = cand_reps.size()
pad_mask = np.ones((qef_batch_size, qmax_sents, cmax_sents))*-10e8
for i in range(qef_batch_size):
ql, cl = query_abs_lens[i], cand_abs_lens[i]
pad_mask[i, :ql, :cl] = 0.0
pad_mask = Variable(torch.FloatTensor(pad_mask))
# if torch.cuda.is_available():
# pad_mask = pad_mask.cuda()
assert (qef_batch_size == cef_batch_size)
# (effective) batch_size x qmax_sents x cmax_sents
# inputs are: batch_size x encoding_dim x c/qmax_sents so permute them.
neg_pair_dists = -1*torch.cdist(query_reps.permute(0, 2, 1).contiguous(),
cand_reps.permute(0, 2, 1).contiguous())
if len(neg_pair_dists.size()) == 2:
neg_pair_dists = neg_pair_dists.unsqueeze(0)
assert (neg_pair_dists.size(1) == qmax_sents)
assert (neg_pair_dists.size(2) == cmax_sents)
# Add very large negative values in the pad positions which will be zero.
neg_pair_dists = neg_pair_dists + pad_mask
q_max_sent_sims, _ = torch.max(neg_pair_dists, dim=2)
c_max_sent_sims, _ = torch.max(neg_pair_dists, dim=1)
query_distr = functional.log_softmax(q_max_sent_sims/self.sent_sm_temp, dim=1).exp()
cand_distr = functional.log_softmax(c_max_sent_sims/self.sent_sm_temp, dim=1).exp()
if return_pair_sims:
# This is only used at test time -- change the way the pad mask is changed in place
# if you want to use at train time too.
pad_mask[pad_mask == 0] = 1.0
pad_mask[pad_mask == -10e8] = 0.0
neg_pair_dists = neg_pair_dists * pad_mask
# p=1 is the L2 distance oddly enough.
ot_solver = geomloss.SamplesLoss("sinkhorn", p=1, blur=self.geoml_blur, reach=self.geoml_reach,
scaling=self.geoml_scaling, debias=False, potentials=True)
# Input reps to solver need to be: batch_size x c/qmax_sents x encoding_dim
q_pot, c_pot = ot_solver(query_distr, query_reps.permute(0, 2, 1).contiguous(),
cand_distr, cand_reps.permute(0, 2, 1).contiguous())
# Implement the expression to compute the plan from the potentials:
# https://www.kernel-operations.io/geomloss/_auto_examples/optimal_transport/
# plot_optimal_transport_labels.html?highlight=plan#regularized-optimal-transport
outersum = q_pot.unsqueeze(dim=2).expand(-1, -1, cmax_sents) + \
c_pot.unsqueeze(dim=2).expand(-1, -1, qmax_sents).permute(0, 2, 1)
# Zero out the pad values because they seem to cause nans to occur.
outersum = outersum * pad_mask
exps = torch.exp(torch.div(outersum+neg_pair_dists, self.geoml_blur))
outerprod = torch.einsum('bi,bj->bij', query_distr, cand_distr)
transport_plan = exps*outerprod
pair_sims = neg_pair_dists
masked_sims = transport_plan*pair_sims
wasserstein_dists = torch.sum(torch.sum(masked_sims, dim=1), dim=1)
return wasserstein_dists, [query_distr, cand_distr, pair_sims, transport_plan, masked_sims]
else:
ot_solver_distance = geomloss.SamplesLoss("sinkhorn", p=1, blur=self.geoml_blur, reach=self.geoml_reach,
scaling=self.geoml_scaling, debias=False, potentials=False)
wasserstein_dists = ot_solver_distance(query_distr, query_reps.permute(0, 2, 1).contiguous(),
cand_distr, cand_reps.permute(0, 2, 1).contiguous())
return wasserstein_dists
# Both below functions copied over from src.learning.batchers
# Function to prepare tokenize, pad inputs, while maintaining token indices
# for getting contextual sentence eocndings.
def prepare_bert_sentences(batch_doc_sents, tokenizer):
"""
Given a batch of documents with sentences prepare a batch which can be passed through BERT.
And keep track of the token indices for every sentence so sentence reps can be aggregated
by averaging word embeddings.
:param batch_doc_sents: list(list(string)); [batch_size[title and abstract sentences]]
:param tokenizer: an instance of the appropriately initialized BERT tokenizer.
:return:
All truncated to max_num_toks by lopping off final sentence.
bert_batch: dict(); bert batch.
batch_tokenized_text: list(string); tokenized concated title and abstract.
batch_sent_token_idxs: list(list(list(int))); batch_size([num_sents_per_abs[num_tokens_in_sent]])
"""
max_num_toks = 500
# Construct the batch.
tokenized_batch = []
batch_tokenized_text = []
batch_sent_token_idxs = []
batch_seg_ids = []
batch_attn_mask = []
seq_lens = []
max_seq_len = -1
for abs_sents in batch_doc_sents:
abs_tokenized_text = []
abs_indexed_tokens = []
abs_sent_token_indices = [] # list of list for every abstract.
cur_len = 0
for sent_i, sent in enumerate(abs_sents):
tokenized_sent = tokenizer.tokenize(sent)
# Convert token to vocabulary indices
sent_indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_sent)
# Add 1 for accounting for the CLS token which will be added
# at the start of the sequence below.
cur_sent_tok_idxs = [cur_len+i+1 for i in range(len(tokenized_sent))]
# Store the token indices but account for the max_num_tokens
if cur_len + len(cur_sent_tok_idxs) <= max_num_toks:
abs_sent_token_indices.append(cur_sent_tok_idxs)
abs_tokenized_text.extend(tokenized_sent)
abs_indexed_tokens.extend(sent_indexed_tokens)
else:
len_exceded_by = cur_len + len(cur_sent_tok_idxs) - max_num_toks
reduced_len = len(cur_sent_tok_idxs) - len_exceded_by
# It can be that len_exceded_by is exactly len(cur_sent_tok_idxs)
# dont append a empty list then.
if reduced_len > 0:
abs_sent_token_indices.append(cur_sent_tok_idxs[:reduced_len])
abs_tokenized_text.extend(tokenized_sent[:reduced_len])
abs_indexed_tokens.extend(sent_indexed_tokens[:reduced_len])
break
cur_len += len(cur_sent_tok_idxs)
batch_tokenized_text.append(abs_tokenized_text)
# Exclude the titles token indices.
batch_sent_token_idxs.append(abs_sent_token_indices[1:])
# Append CLS and SEP tokens to the text..
abs_indexed_tokens = tokenizer.build_inputs_with_special_tokens(token_ids_0=abs_indexed_tokens)
if len(abs_indexed_tokens) > max_seq_len:
max_seq_len = len(abs_indexed_tokens)
seq_lens.append(len(abs_indexed_tokens))
tokenized_batch.append(abs_indexed_tokens)
batch_seg_ids.append([0] * len(abs_indexed_tokens))
batch_attn_mask.append([1] * len(abs_indexed_tokens))
# Pad the batch.
for ids_sent, seg_ids, attn_mask in zip(tokenized_batch, batch_seg_ids, batch_attn_mask):
pad_len = max_seq_len - len(ids_sent)
ids_sent.extend([tokenizer.pad_token_id] * pad_len)
seg_ids.extend([tokenizer.pad_token_id] * pad_len)
attn_mask.extend([tokenizer.pad_token_id] * pad_len)
# The batch which the BERT model will input.
bert_batch = {
'tokid_tt': torch.tensor(tokenized_batch),
'seg_tt': torch.tensor(batch_seg_ids),
'attnmask_tt': torch.tensor(batch_attn_mask),
'seq_lens': seq_lens
}
return bert_batch, batch_tokenized_text, batch_sent_token_idxs
# Prepare a batch of abstracts for passing through the model.
def prepare_abstracts(batch_abs, pt_lm_tokenizer):
"""
Given the abstracts sentences as a list of strings prep them to pass through model.
:param batch_abs: list(dict); list of example dicts with abstract sentences, and titles.
:return:
bert_batch: dict(); returned from prepare_bert_sentences.
abs_lens: list(int); number of sentences per abstract.
sent_token_idxs: list(list(list(int))); batch_size(num_abs_sents(num_sent_tokens(ints)))
"""
# Prepare bert batch.
batch_abs_seqs = []
# Add the title and abstract concated with seps because thats how SPECTER did it.
for ex_abs in batch_abs:
seqs = [ex_abs['TITLE'] + ' [SEP] ']
seqs.extend([s for s in ex_abs['ABSTRACT']])
batch_abs_seqs.append(seqs)
bert_batch, tokenized_abs, sent_token_idxs = prepare_bert_sentences(
batch_doc_sents=batch_abs_seqs, tokenizer=pt_lm_tokenizer)
# Get SEP indices from the sentences; some of the sentences may have been cut off
# at some max length.
abs_lens = []
for abs_sent_tok_idxs in sent_token_idxs:
num_sents = len(abs_sent_tok_idxs)
abs_lens.append(num_sents)
assert (num_sents > 0)
return bert_batch, abs_lens, sent_token_idxs
| aspire-main | examples/ex_aspire_consent_multimatch.py |
"""
Script to demo example usage of the Aspire Multi-Vector encoder which
represents documents via contextual sentence embeddings, i.e the models:
allenai/aspire-contextualsentence-singlem-biomed and
allenai/aspire-contextualsentence-singlem-compsci
Models released at:
https://huggingface.co/allenai/aspire-contextualsentence-singlem-biomed
https://huggingface.co/allenai/aspire-contextualsentence-singlem-compsci
Requirements:
- transformers version: 4.5.1
- torch version: 1.8.1
Code here is used here: https://github.com/allenai/aspire#tsaspire
"""
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from transformers import AutoModel, AutoTokenizer
# Define the Aspire contextual encoder:
class AspireConSent(nn.Module):
def __init__(self, hf_model_name):
"""
:param hf_model_name: dict; model hyperparams.
"""
torch.nn.Module.__init__(self)
self.bert_encoding_dim = 768
self.bert_layer_count = 12 + 1 # plus 1 for the bottom most layer.
self.bert_encoder = AutoModel.from_pretrained(hf_model_name)
self.bert_encoder.config.output_hidden_states = True
def forward(self, bert_batch, abs_lens, sent_tok_idxs):
"""
Pass a batch of sentences through BERT and get sentence
reps based on averaging contextual token embeddings.
:return:
sent_reps: batch_size x num_sents x encoding_dim
"""
# batch_size x num_sents x encoding_dim
doc_cls_reps, sent_reps = self.consent_reps_bert(bert_batch=bert_batch, num_sents=abs_lens,
batch_senttok_idxs=sent_tok_idxs)
if len(sent_reps.size()) == 2:
sent_reps = sent_reps.unsqueeze(0)
if len(doc_cls_reps.size()) == 1:
doc_cls_reps = doc_cls_reps.unsqueeze(0)
return doc_cls_reps, sent_reps
def consent_reps_bert(self, bert_batch, batch_senttok_idxs, num_sents):
"""
Pass the concated abstract through BERT, and average token reps to get contextual sentence reps.
-- NO weighted combine across layers.
:param bert_batch: dict('tokid_tt', 'seg_tt', 'attnmask_tt', 'seq_lens'); items to use for getting BERT
representations. The sentence mapped to BERT vocab and appropriately padded.
:param batch_senttok_idxs: list(list(list(int))); batch_size([num_sents_per_abs[num_tokens_in_sent]])
:param num_sents: list(int); number of sentences in each example in the batch passed.
:return:
doc_cls_reps: FloatTensor [batch_size x bert_encoding_dim]
sent_reps: FloatTensor [batch_size x num_sents x bert_encoding_dim]
"""
seq_lens = bert_batch['seq_lens']
batch_size, max_seq_len = len(seq_lens), max(seq_lens)
max_sents = max(num_sents)
tokid_tt, seg_tt, attnmask_tt = bert_batch['tokid_tt'], bert_batch['seg_tt'], bert_batch['attnmask_tt']
# if torch.cuda.is_available():
# tokid_tt, seg_tt, attnmask_tt = tokid_tt.cuda(), seg_tt.cuda(), attnmask_tt.cuda()
# Pass input through BERT and return all layer hidden outputs.
model_outputs = self.bert_encoder(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
final_hidden_state = model_outputs.last_hidden_state
# Read of CLS token as document representation.
doc_cls_reps = final_hidden_state[:, 0, :]
doc_cls_reps = doc_cls_reps.squeeze()
# Average token reps for every sentence to get sentence representations.
# Build the first sent for all batch examples, second sent ... and so on in each iteration below.
sent_reps = []
for sent_i in range(max_sents):
cur_sent_mask = np.zeros((batch_size, max_seq_len, self.bert_encoding_dim))
# Build a mask for the ith sentence for all the abstracts of the batch.
for batch_abs_i in range(batch_size):
abs_sent_idxs = batch_senttok_idxs[batch_abs_i]
try:
sent_i_tok_idxs = abs_sent_idxs[sent_i]
except IndexError: # This happens in the case where the abstract has fewer than max sents.
sent_i_tok_idxs = []
cur_sent_mask[batch_abs_i, sent_i_tok_idxs, :] = 1.0
sent_mask = Variable(torch.FloatTensor(cur_sent_mask))
# if torch.cuda.is_available():
# sent_mask = sent_mask.cuda()
# batch_size x seq_len x encoding_dim
sent_tokens = final_hidden_state * sent_mask
# The sent_masks non zero elements in one slice along embedding dim is the sentence length.
cur_sent_reps = torch.sum(sent_tokens, dim=1)/ \
torch.count_nonzero(sent_mask[:, :, 0], dim=1).clamp(min=1).unsqueeze(dim=1)
sent_reps.append(cur_sent_reps.unsqueeze(dim=1))
# batch_size x max_sents x encoding_dim
sent_reps = torch.cat(sent_reps, dim=1)
return doc_cls_reps, sent_reps
# Both below functions copied over from src.learning.batchers
# Function to prepare tokenize, pad inputs, while maintaining token indices
# for getting contextual sentence eocndings.
def prepare_bert_sentences(batch_doc_sents, tokenizer):
"""
Given a batch of documents with sentences prepare a batch which can be passed through BERT.
And keep track of the token indices for every sentence so sentence reps can be aggregated
by averaging word embeddings.
:param batch_doc_sents: list(list(string)); [batch_size[title and abstract sentences]]
:param tokenizer: an instance of the appropriately initialized BERT tokenizer.
:return:
All truncated to max_num_toks by lopping off final sentence.
bert_batch: dict(); bert batch.
batch_tokenized_text: list(string); tokenized concated title and abstract.
batch_sent_token_idxs: list(list(list(int))); batch_size([num_sents_per_abs[num_tokens_in_sent]])
"""
max_num_toks = 500
# Construct the batch.
tokenized_batch = []
batch_tokenized_text = []
batch_sent_token_idxs = []
batch_seg_ids = []
batch_attn_mask = []
seq_lens = []
max_seq_len = -1
for abs_sents in batch_doc_sents:
abs_tokenized_text = []
abs_indexed_tokens = []
abs_sent_token_indices = [] # list of list for every abstract.
cur_len = 0
for sent_i, sent in enumerate(abs_sents):
tokenized_sent = tokenizer.tokenize(sent)
# Convert token to vocabulary indices
sent_indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_sent)
# Add 1 for accounting for the CLS token which will be added
# at the start of the sequence below.
cur_sent_tok_idxs = [cur_len+i+1 for i in range(len(tokenized_sent))]
# Store the token indices but account for the max_num_tokens
if cur_len + len(cur_sent_tok_idxs) <= max_num_toks:
abs_sent_token_indices.append(cur_sent_tok_idxs)
abs_tokenized_text.extend(tokenized_sent)
abs_indexed_tokens.extend(sent_indexed_tokens)
else:
len_exceded_by = cur_len + len(cur_sent_tok_idxs) - max_num_toks
reduced_len = len(cur_sent_tok_idxs) - len_exceded_by
# It can be that len_exceded_by is exactly len(cur_sent_tok_idxs)
# dont append a empty list then.
if reduced_len > 0:
abs_sent_token_indices.append(cur_sent_tok_idxs[:reduced_len])
abs_tokenized_text.extend(tokenized_sent[:reduced_len])
abs_indexed_tokens.extend(sent_indexed_tokens[:reduced_len])
break
cur_len += len(cur_sent_tok_idxs)
batch_tokenized_text.append(abs_tokenized_text)
# Exclude the titles token indices.
batch_sent_token_idxs.append(abs_sent_token_indices[1:])
# Append CLS and SEP tokens to the text..
abs_indexed_tokens = tokenizer.build_inputs_with_special_tokens(token_ids_0=abs_indexed_tokens)
if len(abs_indexed_tokens) > max_seq_len:
max_seq_len = len(abs_indexed_tokens)
seq_lens.append(len(abs_indexed_tokens))
tokenized_batch.append(abs_indexed_tokens)
batch_seg_ids.append([0] * len(abs_indexed_tokens))
batch_attn_mask.append([1] * len(abs_indexed_tokens))
# Pad the batch.
for ids_sent, seg_ids, attn_mask in zip(tokenized_batch, batch_seg_ids, batch_attn_mask):
pad_len = max_seq_len - len(ids_sent)
ids_sent.extend([tokenizer.pad_token_id] * pad_len)
seg_ids.extend([tokenizer.pad_token_id] * pad_len)
attn_mask.extend([tokenizer.pad_token_id] * pad_len)
# The batch which the BERT model will input.
bert_batch = {
'tokid_tt': torch.tensor(tokenized_batch),
'seg_tt': torch.tensor(batch_seg_ids),
'attnmask_tt': torch.tensor(batch_attn_mask),
'seq_lens': seq_lens
}
return bert_batch, batch_tokenized_text, batch_sent_token_idxs
# Prepare a batch of abstracts for passing through the model.
def prepare_abstracts(batch_abs, pt_lm_tokenizer):
"""
Given the abstracts sentences as a list of strings prep them to pass through model.
:param batch_abs: list(dict); list of example dicts with abstract sentences, and titles.
:return:
bert_batch: dict(); returned from prepare_bert_sentences.
abs_lens: list(int); number of sentences per abstract.
sent_token_idxs: list(list(list(int))); batch_size(num_abs_sents(num_sent_tokens(ints)))
"""
# Prepare bert batch.
batch_abs_seqs = []
# Add the title and abstract concated with seps because thats how SPECTER did it.
for ex_abs in batch_abs:
seqs = [ex_abs['TITLE'] + ' [SEP] ']
seqs.extend([s for s in ex_abs['ABSTRACT']])
batch_abs_seqs.append(seqs)
bert_batch, tokenized_abs, sent_token_idxs = prepare_bert_sentences(
batch_doc_sents=batch_abs_seqs, tokenizer=pt_lm_tokenizer)
# Get SEP indices from the sentences; some of the sentences may have been cut off
# at some max length.
abs_lens = []
for abs_sent_tok_idxs in sent_token_idxs:
num_sents = len(abs_sent_tok_idxs)
abs_lens.append(num_sents)
assert (num_sents > 0)
return bert_batch, abs_lens, sent_token_idxs
| aspire-main | examples/ex_aspire_consent.py |
"""
Script to demo example usage of the Aspire Bi-encoder with linear mixing across
BERT layers, i.e the models: aspire-biencoder-biomed-scib-full,
aspire-biencoder-biomed-spec-full, and aspire-biencoder-compsci-spec-full.
*-all models released as zip folders alongside:
https://huggingface.co/allenai/aspire-biencoder-biomed-scib
https://huggingface.co/allenai/aspire-biencoder-biomed-spec
https://huggingface.co/allenai/aspire-biencoder-compsci-spec
Requirements:
- transformers version: 4.5.1
- torch version: 1.8.1
Code here is used here: https://github.com/allenai/aspire#specter-cocite
"""
import torch
from torch import nn as nn
from torch.nn import functional
from transformers import AutoModel, AutoTokenizer
# Define the linear mixing layer:
class SoftmaxMixLayers(torch.nn.Linear):
def forward(self, input):
# the weight vector is out_dim x in_dim.
# so we want to softmax along in_dim.
weight = functional.softmax(self.weight, dim=1)
return functional.linear(input, weight, self.bias)
# Define the Aspire biencoder:
class AspireBiEnc(nn.Module):
def __init__(self, model_hparams):
"""
:param model_hparams: dict; model hyperparams.
"""
torch.nn.Module.__init__(self)
self.bert_encoding_dim = 768
self.bert_layer_count = 12 + 1 # plus 1 for the bottom most layer.
self.bert_encoder = AutoModel.from_pretrained(model_hparams['base-pt-layer'])
self.bert_encoder.config.output_hidden_states = True
self.bert_layer_weights = SoftmaxMixLayers(in_features=self.bert_layer_count, out_features=1, bias=False)
def forward(self, bert_batch):
"""
Pass the title+abstract through BERT, read off CLS reps, and weighted combine across layers.
"""
model_outputs = self.bert_encoder(**bert_batch)
# Weighted combine the hidden_states which is a list of [bs x max_seq_len x bert_encoding_dim]
# with as many tensors as layers + 1 input layer.
hs_stacked = torch.stack(model_outputs.hidden_states, dim=3)
weighted_sum_hs = self.bert_layer_weights(hs_stacked) # [bs x max_seq_len x bert_encoding_dim x 1]
weighted_sum_hs = torch.squeeze(weighted_sum_hs, dim=3)
# Read of CLS token as document representation: (batch_size, sequence_length, hidden_size)
cls_doc_reps = weighted_sum_hs[:, 0, :]
return cls_doc_reps
| aspire-main | examples/ex_aspire_bienc.py |
# For relative imports to work in Python 3.6
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__))) | aspire-main | src/__init__.py |
"""
For the faceted similarity models:
Call code from everywhere, read data, initialize model, train model and make
sure training is doing something meaningful, predict with trained model and run
evaluation
"""
import argparse, os, sys
import logging
import codecs, pprint, json
import torch
from . import batchers, trainer
from .facetid_models import sentsim_models
def train_model(model_name, data_path, config_path, run_path, cl_args):
"""
Read the int training and dev data, initialize and train the model.
:param model_name: string; says which model to use.
:param data_path: string; path to the directory with unshuffled data
and the test and dev json files.
:param config_path: string; path to the directory json config for model
and trainer.
:param run_path: string; path for shuffled training data for run and
to which results and model gets saved.
:param cl_args: argparse command line object.
:return: None.
"""
run_name = os.path.basename(run_path)
# Load label maps and configs.
with codecs.open(config_path, 'r', 'utf-8') as fp:
all_hparams = json.load(fp)
# Unpack hyperparameter settings.
logging.info('All hyperparams:')
logging.info(pprint.pformat(all_hparams))
# Save hyperparams to disk.
run_info = {'all_hparams': all_hparams}
with codecs.open(os.path.join(run_path, 'run_info.json'), 'w', 'utf-8') as fp:
json.dump(run_info, fp)
# Initialize model.
if model_name == 'cosentbert':
model = sentsim_models.SentBERTWrapper(model_name=all_hparams['base-pt-layer'])
elif model_name == 'ictsentbert':
model = sentsim_models.ICTBERTWrapper(model_name=all_hparams['base-pt-layer'])
else:
logging.error('Unknown model: {:s}'.format(model_name))
sys.exit(1)
# Model class internal logic uses the names at times so set this here so it
# is backward compatible.
model.model_name = model_name
logging.info(model)
# Save an untrained model version.
trainer.sentbert_save_function(model=model, save_path=run_path, model_suffix='init')
# Move model to the GPU.
if torch.cuda.is_available():
model.cuda()
logging.info('Running on GPU.')
# Initialize the trainer.
if model_name in ['cosentbert', 'ictsentbert']:
batcher_cls = batchers.SentTripleBatcher
batcher_cls.bert_config_str = all_hparams['base-pt-layer']
else:
logging.error('Unknown model: {:s}'.format(model_name))
sys.exit(1)
if model_name in ['cosentbert']:
model_trainer = trainer.BasicRankingTrainer(model=model, batcher=batcher_cls, data_path=data_path, model_path=run_path,
early_stop=True, dev_score='loss', train_hparams=all_hparams)
model_trainer.save_function = trainer.sentbert_save_function
elif model_name in ['ictsentbert']:
model_trainer = trainer.BasicRankingTrainer(model=model, batcher=batcher_cls, data_path=data_path, model_path=run_path,
early_stop=True, dev_score='loss', train_hparams=all_hparams)
model_trainer.save_function = trainer.ictbert_save_function
# Train and save the best model to model_path.
model_trainer.train()
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subcommand',
help='The action to perform.')
# Train the model.
train_args = subparsers.add_parser('train_model')
# Where to get what.
train_args.add_argument('--model_name', required=True,
choices=['cosentbert', 'ictsentbert'],
help='The name of the model to train.')
train_args.add_argument('--dataset', required=True,
choices=['s2orccompsci', 's2orcbiomed'],
help='The dataset to train and predict on.')
train_args.add_argument('--data_path', required=True,
help='Path to the jsonl dataset.')
train_args.add_argument('--run_path', required=True,
help='Path to directory to save all run items to.')
train_args.add_argument('--config_path', required=True,
help='Path to directory json config file for model.')
train_args.add_argument('--log_fname',
help='File name for the log file to which logs get'
' written.')
cl_args = parser.parse_args()
# If a log file was passed then write to it.
try:
logging.basicConfig(level='INFO', format='%(message)s',
filename=cl_args.log_fname)
# Print the called script and its args to the log.
logging.info(' '.join(sys.argv))
# Else just write to stdout.
except AttributeError:
logging.basicConfig(level='INFO', format='%(message)s',
stream=sys.stdout)
# Print the called script and its args to the log.
logging.info(' '.join(sys.argv))
if cl_args.subcommand == 'train_model':
train_model(model_name=cl_args.model_name, data_path=cl_args.data_path,
run_path=cl_args.run_path, config_path=cl_args.config_path, cl_args=cl_args)
if __name__ == '__main__':
main()
| aspire-main | src/learning/main_sentsim.py |
"""
Classes to stream int-mapped data from file in batches, pad and sort them (as needed)
and return batch dicts for the models.
"""
import codecs
import sys
import re
import numpy as np
import torch
from transformers import AutoTokenizer
from . import data_utils as du
replace_sep = re.compile(r'\[SEP\]')
class GenericBatcher:
def __init__(self, num_examples, batch_size):
"""
Maintain batcher variables, state and such. Any batcher for a specific
model is a subclass of this and implements specific methods that it
needs.
- A batcher needs to know how to read from an int-mapped raw-file.
- A batcher should yield a dict which you model class knows how to handle.
:param num_examples: the number of examples in total.
:param batch_size: the number of examples to have in a batch.
"""
# Batch sizes book-keeping; the 0 and -1 happen in the case of test time usage.
if num_examples > 0 and batch_size > -1:
self.full_len = num_examples
self.batch_size = batch_size
if self.full_len > self.batch_size:
self.num_batches = int(np.ceil(float(self.full_len) / self.batch_size))
else:
self.num_batches = 1
# Get batch indices.
self.batch_start = 0
self.batch_end = self.batch_size
def next_batch(self):
"""
This should yield the dict which your model knows how to make sense of.
:return:
"""
raise NotImplementedError
@staticmethod
def raw_batch_from_file(ex_file, to_read_count):
"""
Implement whatever you need for reading a raw batch of examples.
Read the next batch from the file.
:param ex_file: File-like with a next() method.
:param to_read_count: int; number of examples to read from the file.
:return:
"""
raise NotImplementedError
class SentTripleBatcher(GenericBatcher):
"""
Feeds a model which inputs query, positive. Negatives are in-batch.
"""
bert_config_str = None
def __init__(self, ex_fnames, num_examples, batch_size):
"""
Batcher class for the em style trained models.
This batcher is also used at test time, at this time all the arguments here are
meaningless. Only the make_batch and ones beneath it will be used.
:param ex_fnames: dict('pos_ex_fname': str, 'neg_ex_fname': str)
:param num_examples: int.
:param batch_size: int.
:param bert_config: string; BERT config string to initialize tokenizer with.
:param max_pos_neg: int; maximum number of positive and negative examples per
query to train with.
"""
GenericBatcher.__init__(self, num_examples=num_examples,
batch_size=batch_size)
# Call it pos ex fname even so code elsewhere can be re-used.
if ex_fnames:
pos_ex_fname = ex_fnames['pos_ex_fname']
# Access the file with the sentence level examples.
self.pos_ex_file = codecs.open(pos_ex_fname, 'r', 'utf-8')
self.pt_lm_tokenizer = AutoTokenizer.from_pretrained(self.bert_config_str)
def next_batch(self):
"""
Yield the next batch. Based on whether its train_mode or not yield a
different set of items.
:return:
batch_doc_ids: list; with the doc_ids corresponding to the
examples in the batch.
batch_dict: see make_batch.
"""
for nb in range(self.num_batches):
# Read the batch of data from the file.
if self.batch_end < self.full_len:
cur_batch_size = self.batch_size
else:
cur_batch_size = self.full_len - self.batch_start
batch_query_docids, batch_queries, batch_pos, batch_neg = \
next(SentTripleBatcher.raw_batch_from_file(self.pos_ex_file, cur_batch_size))
self.batch_start = self.batch_end
self.batch_end += self.batch_size
try:
if batch_neg and batch_pos:
feed = {'query_texts': batch_queries, 'pos_texts': batch_pos, 'neg_texts': batch_neg}
elif batch_pos:
feed = {'query_texts': batch_queries, 'pos_texts': batch_pos}
else:
feed = {'query_texts': batch_queries}
batch_dict = self.make_batch(raw_feed=feed, pt_lm_tokenizer=self.pt_lm_tokenizer)
except (IndexError, AssertionError) as error:
print(batch_query_docids)
print(batch_queries)
print(batch_pos)
sys.exit()
batch_dict = {
'batch_rank': batch_dict
}
yield batch_query_docids, batch_dict
@staticmethod
def raw_batch_from_file(ex_file, to_read_count):
"""
Read the next batch from the file. In reading the examples:
- For every query only read max_pos_neg positive and negative examples.
:param ex_file: File-like with a next() method.
:param to_read_count: int; number of lines to read from the file.
:return:
query_abs: list(str); list of query sentences
pos_abs: list(str); list of positive sentences
neg_abs: list(str); list of negative sentences
"""
# Initial values.
read_ex_count = 0
# These will be to_read_count long.
ex_query_docids = []
query_texts = []
pos_texts = []
neg_texts = []
# Read content from file until the file content is exhausted.
for ex in du.read_json(ex_file):
docids = read_ex_count
ex_query_docids.append(docids)
query_texts.append(ex['query'])
# Dont assume even a positive is present -- happens because of
# SimCSE like pretraining.
try:
pos_texts.append(ex['pos_context'])
except KeyError:
pass
# Only dev files have neg examples. Pos used inbatch negs.
try:
neg_texts.append(ex['neg_context'])
except KeyError:
pass
read_ex_count += 1
if read_ex_count == to_read_count:
yield ex_query_docids, query_texts, pos_texts, neg_texts
# Once execution is back here empty the lists and reset counters.
read_ex_count = 0
ex_query_docids = []
query_texts = []
pos_texts = []
neg_texts = []
@staticmethod
def make_batch(raw_feed, pt_lm_tokenizer):
"""
Creates positive and query batches. Only used for training. Test use happens
with embeddings generated in the pre_proc_buildreps scripts.
:param raw_feed: dict; a dict with the set of things you want to feed
the model.
:return:
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with query sents;
Tokenized and int mapped sentences and other inputs to BERT.
'pos_bert_batch': dict(); The batch which BERT inputs with positive sents;
Tokenized and int mapped sentences and other inputs to BERT.
}
"""
# Unpack arguments.
query_texts = raw_feed['query_texts']
pos_texts = raw_feed['pos_texts']
# Get bert batches and prepare sep token indices.
qbert_batch, _, _ = SentTripleBatcher.prepare_bert_sentences(sents=query_texts, tokenizer=pt_lm_tokenizer)
pbert_batch, _, _ = SentTripleBatcher.prepare_bert_sentences(sents=pos_texts, tokenizer=pt_lm_tokenizer)
# Happens with the dev set in models using triple losses and in batch negs.
if 'neg_texts' in raw_feed:
neg_texts = raw_feed['neg_texts']
nbert_batch, _, _ = SentTripleBatcher.prepare_bert_sentences(sents=neg_texts, tokenizer=pt_lm_tokenizer)
batch_dict = {
'query_bert_batch': qbert_batch,
'pos_bert_batch': pbert_batch,
'neg_bert_batch': nbert_batch
}
else:
batch_dict = {
'query_bert_batch': qbert_batch,
'pos_bert_batch': pbert_batch
}
return batch_dict
@staticmethod
def prepare_bert_sentences(sents, tokenizer):
"""
Given a batch of sentences prepare a batch which can be passed through BERT.
:param sents: list(string)
:param tokenizer: an instance of the appropriately initialized BERT tokenizer.
:return:
"""
max_num_toks = 500
# Construct the batch.
tokenized_batch = []
tokenized_text = []
batch_seg_ids = []
batch_attn_mask = []
seq_lens = []
max_seq_len = -1
for sent in sents:
bert_tokenized_text = tokenizer.tokenize(sent)
bert_tokenized_text = bert_tokenized_text[:max_num_toks]
tokenized_text.append(bert_tokenized_text)
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(bert_tokenized_text)
# Append CLS and SEP tokens to the text..
indexed_tokens = tokenizer.build_inputs_with_special_tokens(token_ids_0=indexed_tokens)
if len(indexed_tokens) > max_seq_len:
max_seq_len = len(indexed_tokens)
seq_lens.append(len(indexed_tokens))
tokenized_batch.append(indexed_tokens)
batch_seg_ids.append([0] * len(indexed_tokens))
batch_attn_mask.append([1] * len(indexed_tokens))
# Pad the batch.
for ids_sent, seg_ids, attn_mask in zip(tokenized_batch, batch_seg_ids, batch_attn_mask):
pad_len = max_seq_len - len(ids_sent)
ids_sent.extend([tokenizer.pad_token_id] * pad_len)
seg_ids.extend([tokenizer.pad_token_id] * pad_len)
attn_mask.extend([tokenizer.pad_token_id] * pad_len)
# The batch which the BERT model will input.
bert_batch = {
'tokid_tt': torch.tensor(tokenized_batch),
'seg_tt': torch.tensor(batch_seg_ids),
'attnmask_tt': torch.tensor(batch_attn_mask),
'seq_lens': seq_lens
}
return bert_batch, tokenized_text, tokenized_batch
class AbsTripleBatcher(SentTripleBatcher):
@staticmethod
def make_batch(raw_feed, pt_lm_tokenizer):
"""
Creates positive and query batches. Only used for training. Test use happens
with embeddings generated in the pre_proc_buildreps scripts.
:param raw_feed: dict; a dict with the set of things you want to feed
the model.
:return:
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with query sents;
Tokenized and int mapped sentences and other inputs to BERT.
'pos_bert_batch': dict(); The batch which BERT inputs with positive sents;
Tokenized and int mapped sentences and other inputs to BERT.
}
"""
# Unpack arguments.
query_texts = raw_feed['query_texts']
# Get bert batches and prepare sep token indices.
qbert_batch = AbsTripleBatcher.prepare_abstracts(batch_abs=query_texts, pt_lm_tokenizer=pt_lm_tokenizer)
# Happens with the dev set in models using triple losses and in batch negs.
if 'neg_texts' in raw_feed and 'pos_texts' in raw_feed:
neg_texts = raw_feed['neg_texts']
nbert_batch = AbsTripleBatcher.prepare_abstracts(batch_abs=neg_texts, pt_lm_tokenizer=pt_lm_tokenizer)
pos_texts = raw_feed['pos_texts']
pbert_batch = AbsTripleBatcher.prepare_abstracts(batch_abs=pos_texts, pt_lm_tokenizer=pt_lm_tokenizer)
batch_dict = {
'query_bert_batch': qbert_batch,
'pos_bert_batch': pbert_batch,
'neg_bert_batch': nbert_batch
}
# Happens at train when using in batch negs.
elif 'pos_texts' in raw_feed:
pos_texts = raw_feed['pos_texts']
pbert_batch = AbsTripleBatcher.prepare_abstracts(batch_abs=pos_texts, pt_lm_tokenizer=pt_lm_tokenizer)
batch_dict = {
'query_bert_batch': qbert_batch,
'pos_bert_batch': pbert_batch
}
# Happens when the function is called from other scripts to encode text.
else:
batch_dict = {
'bert_batch': qbert_batch,
}
return batch_dict
@staticmethod
def prepare_abstracts(batch_abs, pt_lm_tokenizer):
"""
Given the abstracts sentences as a list of strings prep them to pass through model.
:param batch_abs: list(dict); list of example dicts with sentences, facets, titles.
:return:
bert_batch: dict(); returned from prepare_bert_sentences.
"""
# Prepare bert batch.
batch_abs_seqs = []
# Add the title and abstract concated with seps because thats how SPECTER did it.
for ex_abs in batch_abs:
seqs = [ex_abs['TITLE']]
seqs.extend([s for s in ex_abs['ABSTRACT']])
batch_abs_seqs.append(' [SEP] '.join([replace_sep.sub('', s) for s in seqs]))
bert_batch, tokenized_abs, tokenized_ids = SentTripleBatcher.prepare_bert_sentences(
sents=batch_abs_seqs, tokenizer=pt_lm_tokenizer)
return bert_batch
class AbsSentBatcher(SentTripleBatcher):
"""
Feeds a model which inputs query, positive and negative abstracts and sentence
idxs for the abstracts. Negatives only at dev time, else the model uses in-batch
negatives.
"""
@staticmethod
def make_batch(raw_feed, pt_lm_tokenizer):
"""
- Create [SEP] demarcated abs sents to feed bert.
- Generate indices to read off sentence reps from bert output.
(accounting for max sent len and special tokens inserted by BERT tokenizers)
- Generate indices for facet reps.
If 'neg_abs' is in the raw_feed this is a call during training else it is
a call during test and the pos_abs contains candidates to rank wrt the query.
:param raw_feed: dict; a dict with the set of things you want to feed
the model.
:return:
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'query_abs_lens': list(int); Number of sentences in query abs.
'query_sep_idxs': LongTensor; Indices of the sep tokens to get sent reps,
flattened and indices adjusted to index the one dimensional token reps.
'pos_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from positive abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'pos_abs_lens': list(int);
'pos_sep_idxs': LongTensor; Indices of the sep tokens to get sent reps,
flattened and indices adjusted to index the one dimensional token reps.
'neg_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'neg_abs_lens': list(int);
'neg_sep_idxs': LongTensor; Indices of the sep tokens to get sent reps,
flattened and indices adjusted to index the one dimensional token reps.
}
"""
# Unpack arguments.
query_texts = raw_feed['query_texts']
# Get bert batches and prepare sep token indices.
qbert_batch, qabs_sep_idxs, qabs_len, qabs_flatsep_idxs = AbsSentBatcher.prepare_abstracts(
query_texts, pt_lm_tokenizer)
# Happens in the dev set.
if 'neg_texts' in raw_feed and 'pos_texts' in raw_feed:
neg_texts = raw_feed['neg_texts']
nbert_batch, nabs_sep_idxs, nabs_len, nabs_flatsep_idxs = AbsSentBatcher.prepare_abstracts(
neg_texts, pt_lm_tokenizer)
pos_texts = raw_feed['pos_texts']
pbert_batch, pabs_sep_idxs, pabs_len, pabs_flatsep_idxs = AbsSentBatcher.prepare_abstracts(
pos_texts, pt_lm_tokenizer)
batch_dict = {
'query_bert_batch': qbert_batch, 'query_abs_lens': qabs_len, 'query_sep_idxs': qabs_flatsep_idxs,
'pos_bert_batch': pbert_batch, 'pos_abs_lens': pabs_len, 'pos_sep_idxs': pabs_flatsep_idxs,
'neg_bert_batch': nbert_batch, 'neg_abs_lens': nabs_len, 'neg_sep_idxs': nabs_flatsep_idxs
}
# Happens at train when using in batch negs.
elif 'pos_texts' in raw_feed:
pos_texts = raw_feed['pos_texts']
pbert_batch, pabs_sep_idxs, pabs_len, pabs_flatsep_idxs = AbsSentBatcher.prepare_abstracts(
pos_texts, pt_lm_tokenizer)
batch_dict = {
'query_bert_batch': qbert_batch, 'query_abs_lens': qabs_len, 'query_sep_idxs': qabs_flatsep_idxs,
'pos_bert_batch': pbert_batch, 'pos_abs_lens': pabs_len, 'pos_sep_idxs': pabs_flatsep_idxs
}
# Happens when the function is called from other scripts to encode text.
else:
batch_dict = {
'bert_batch': qbert_batch, 'abs_lens': qabs_len, 'sep_idxs': qabs_flatsep_idxs
}
return batch_dict
@staticmethod
def prepare_abstracts(batch_abs, pt_lm_tokenizer):
"""
Given the abstracts sentences as a list of strings prep them to pass through model.
:param batch_abs: list(dict); list of example dicts with sentences, facets, titles.
:return:
bert_batch: dict(); returned from prepare_bert_sentences.
abs_lens: list(int); number of sentences per abstract.
sep_idxs: LongTensor; indices of SEP tokens demarcating sentences in abs adjusted
to index a one dim token array for the whole batch. Also making sure of pading sents.
facet_idxs: list(list(list(int))); indices of sentences belonging to the query facet.
"""
# Prepare bert batch.
batch_abs_seqs = []
# Add the title and abstract concated with seps because thats how SPECTER did it.
for ex_abs in batch_abs:
seqs = [ex_abs['TITLE']]
seqs.extend([s for s in ex_abs['ABSTRACT']])
batch_abs_seqs.append(' [SEP] '.join([replace_sep.sub('', s) for s in seqs]))
bert_batch, tokenized_abs, tokenized_ids = SentTripleBatcher.prepare_bert_sentences(
sents=batch_abs_seqs, tokenizer=pt_lm_tokenizer)
# Get SEP indices from the sentences; some of the sentences may have been cut off
# at some max length.
num_abs_sents = []
batch_sep_idxs = []
max_num_sents = -1
max_num_toks = -1
for tokid_seq in tokenized_ids:
sep_idxs = []
for i, tok_id in enumerate(tokid_seq):
if tok_id == pt_lm_tokenizer.sep_token_id:
sep_idxs.append(i)
# Skip the first sep token because it is intended for the title sentence.
sep_idxs = sep_idxs[1:]
assert(len(sep_idxs) > 0)
batch_sep_idxs.append(sep_idxs)
num_sents = len(sep_idxs)
num_abs_sents.append(num_sents)
if num_sents > max_num_sents:
max_num_sents = len(sep_idxs)
if len(tokid_seq) > max_num_toks:
max_num_toks = len(tokid_seq)
# Pad the sep indices to max_num_sents and adjust them for max_num_toks
flat_adjusted_sep_idxs = []
for i, sep_idxs in enumerate(batch_sep_idxs):
adjusted_idxs = [si+i*max_num_toks for si in sep_idxs]
pad_len = max_num_sents - len(sep_idxs)
# Pad so that indexing this gives the cls rep; which will be zeroed out eventually.
adjusted_idxs.extend([0]*pad_len)
flat_adjusted_sep_idxs.extend(adjusted_idxs)
flat_sep_idxs = torch.LongTensor(flat_adjusted_sep_idxs)
return bert_batch, batch_sep_idxs, num_abs_sents, flat_sep_idxs
class AbsSentTokBatcher(SentTripleBatcher):
"""
Feeds a model which inputs query, positive and negative abstracts and sentence
TOKEN indices for the abstracts. Negatives only at dev time, else the model uses in-batch
negatives.
"""
@staticmethod
def make_batch(raw_feed, pt_lm_tokenizer):
"""
:param raw_feed: dict; a dict with the set of things you want to feed
the model.
:return:
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'query_abs_lens': list(int); Number of sentences in query abs.
'query_senttok_idxs': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'pos_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from positive abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'pos_abs_lens': list(int);
'pos_senttok_idxs': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'neg_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'neg_abs_lens': list(int);
'neg_senttok_idxs': list(list(list(int))); batch_size(
num_abs_sents(num_sent_tokens(ints)))
}
"""
# Unpack arguments.
query_texts = raw_feed['query_texts']
# Get bert batches and prepare sep token indices.
qbert_batch, qabs_len, qabs_senttok_idxs = AbsSentTokBatcher.prepare_abstracts(
query_texts, pt_lm_tokenizer)
# Happens in the dev set.
if 'neg_texts' in raw_feed and 'pos_texts' in raw_feed:
neg_texts = raw_feed['neg_texts']
nbert_batch, nabs_len, nabs_senttok_idxs = AbsSentTokBatcher.prepare_abstracts(
neg_texts, pt_lm_tokenizer)
pos_texts = raw_feed['pos_texts']
pbert_batch, pabs_len, pabs_senttok_idxs = AbsSentTokBatcher.prepare_abstracts(
pos_texts, pt_lm_tokenizer)
batch_dict = {
'query_bert_batch': qbert_batch, 'query_abs_lens': qabs_len, 'query_senttok_idxs': qabs_senttok_idxs,
'pos_bert_batch': pbert_batch, 'pos_abs_lens': pabs_len, 'pos_senttok_idxs': pabs_senttok_idxs,
'neg_bert_batch': nbert_batch, 'neg_abs_lens': nabs_len, 'neg_senttok_idxs': nabs_senttok_idxs
}
# Happens at train when using in batch negs.
elif 'pos_texts' in raw_feed:
pos_texts = raw_feed['pos_texts']
pbert_batch, pabs_len, pabs_senttok_idxs = AbsSentTokBatcher.prepare_abstracts(
pos_texts, pt_lm_tokenizer)
batch_dict = {
'query_bert_batch': qbert_batch, 'query_abs_lens': qabs_len, 'query_senttok_idxs': qabs_senttok_idxs,
'pos_bert_batch': pbert_batch, 'pos_abs_lens': pabs_len, 'pos_senttok_idxs': pabs_senttok_idxs
}
# Happens when the function is called from other scripts to encode text.
else:
batch_dict = {
'bert_batch': qbert_batch, 'abs_lens': qabs_len, 'senttok_idxs': qabs_senttok_idxs
}
return batch_dict
@staticmethod
def prepare_abstracts(batch_abs, pt_lm_tokenizer):
"""
Given the abstracts sentences as a list of strings prep them to pass through model.
:param batch_abs: list(dict); list of example dicts with sentences, facets, titles.
:return:
bert_batch: dict(); returned from prepare_bert_sentences.
abs_lens: list(int); number of sentences per abstract.
sent_token_idxs: list(list(list(int))); batch_size(num_abs_sents(num_sent_tokens(ints)))
"""
# Prepare bert batch.
batch_abs_seqs = []
# Add the title and abstract concated with seps because thats how SPECTER did it.
for ex_abs in batch_abs:
seqs = [ex_abs['TITLE'] + ' [SEP] ']
seqs.extend([s for s in ex_abs['ABSTRACT']])
batch_abs_seqs.append(seqs)
bert_batch, tokenized_abs, sent_token_idxs = AbsSentTokBatcher.prepare_bert_sentences(
sents=batch_abs_seqs, tokenizer=pt_lm_tokenizer)
# Get SEP indices from the sentences; some of the sentences may have been cut off
# at some max length.
abs_lens = []
for abs_sent_tok_idxs in sent_token_idxs:
num_sents = len(abs_sent_tok_idxs)
abs_lens.append(num_sents)
assert (num_sents > 0)
return bert_batch, abs_lens, sent_token_idxs
@staticmethod
def prepare_bert_sentences(sents, tokenizer):
"""
Given a batch of documents with sentences prepare a batch which can be passed through BERT.
Also keep track of the token indices for every sentence so sentence reps can be aggregated
by averaging word embeddings.
:param sents: list(list(string)); [batch_size[title and abstract sentences]]
:param tokenizer: an instance of the appropriately initialized BERT tokenizer.
:return:
All truncated to max_num_toks by lopping off final sentence.
bert_batch: dict(); bert batch.
batch_tokenized_text: list(string); tokenized concated title and abstract.
batch_sent_token_idxs: list(list(list(int))); batch_size([num_sents_per_abs[num_tokens_in_sent]])
"""
max_num_toks = 500
# Construct the batch.
tokenized_batch = []
batch_tokenized_text = []
batch_sent_token_idxs = []
batch_seg_ids = []
batch_attn_mask = []
seq_lens = []
max_seq_len = -1
for abs_sents in sents:
abs_tokenized_text = []
abs_indexed_tokens = []
abs_sent_token_indices = [] # list of list for every abstract.
cur_len = 0
for sent_i, sent in enumerate(abs_sents):
tokenized_sent = tokenizer.tokenize(sent)
# Convert token to vocabulary indices
sent_indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_sent)
# Add 1 for accounting for the CLS token which will be added
# at the start of the sequence below.
cur_sent_tok_idxs = [cur_len+i+1 for i in range(len(tokenized_sent))]
# Store the token indices but account for the max_num_tokens
if cur_len + len(cur_sent_tok_idxs) <= max_num_toks:
abs_sent_token_indices.append(cur_sent_tok_idxs)
abs_tokenized_text.extend(tokenized_sent)
abs_indexed_tokens.extend(sent_indexed_tokens)
else:
len_exceded_by = cur_len + len(cur_sent_tok_idxs) - max_num_toks
reduced_len = len(cur_sent_tok_idxs) - len_exceded_by
# It can be that len_exceded_by is exactly len(cur_sent_tok_idxs)
# dont append a empty list then.
if reduced_len > 0:
abs_sent_token_indices.append(cur_sent_tok_idxs[:reduced_len])
abs_tokenized_text.extend(tokenized_sent[:reduced_len])
abs_indexed_tokens.extend(sent_indexed_tokens[:reduced_len])
break
cur_len += len(cur_sent_tok_idxs)
batch_tokenized_text.append(abs_tokenized_text)
# Exclude the titles token indices.
batch_sent_token_idxs.append(abs_sent_token_indices[1:])
# Append CLS and SEP tokens to the text..
abs_indexed_tokens = tokenizer.build_inputs_with_special_tokens(token_ids_0=abs_indexed_tokens)
if len(abs_indexed_tokens) > max_seq_len:
max_seq_len = len(abs_indexed_tokens)
seq_lens.append(len(abs_indexed_tokens))
tokenized_batch.append(abs_indexed_tokens)
batch_seg_ids.append([0] * len(abs_indexed_tokens))
batch_attn_mask.append([1] * len(abs_indexed_tokens))
# Pad the batch.
for ids_sent, seg_ids, attn_mask in zip(tokenized_batch, batch_seg_ids, batch_attn_mask):
pad_len = max_seq_len - len(ids_sent)
ids_sent.extend([tokenizer.pad_token_id] * pad_len)
seg_ids.extend([tokenizer.pad_token_id] * pad_len)
attn_mask.extend([tokenizer.pad_token_id] * pad_len)
# The batch which the BERT model will input.
bert_batch = {
'tokid_tt': torch.tensor(tokenized_batch),
'seg_tt': torch.tensor(batch_seg_ids),
'attnmask_tt': torch.tensor(batch_attn_mask),
'seq_lens': seq_lens
}
return bert_batch, batch_tokenized_text, batch_sent_token_idxs
class AbsSentTokBatcherPreAlign(AbsSentTokBatcher):
"""
Feeds a model which inputs query, positive and negative abstracts and sentence
TOKEN indices for the abstracts. Negatives only at dev time, else the model uses in-batch
negatives.
"""
# Which pre-aligned index to read. Can be: {'cc_align', 'abs_align'}
align_type = 'cc_align'
@staticmethod
def make_batch(raw_feed, pt_lm_tokenizer):
"""
:param raw_feed: dict; a dict with the set of things you want to feed
the model.
:return:
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'query_abs_lens': list(int); Number of sentences in query abs.
'query_senttok_idxs': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'pos_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from positive abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'pos_abs_lens': list(int);
'pos_align_idxs': list([int int]); query align sent idx, cand align sent idx
'pos_senttok_idxs': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'neg_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'neg_abs_lens': list(int);
'neg_align_idxs': list([int int]); query align sent idx, cand align sent idx
'neg_senttok_idxs': list(list(list(int))); batch_size(
num_abs_sents(num_sent_tokens(ints)))
}
"""
# Unpack arguments.
query_texts = raw_feed['query_texts']
# Get bert batches and prepare sep token indices.
qbert_batch, qabs_len, qabs_senttok_idxs = AbsSentTokBatcher.prepare_abstracts(
query_texts, pt_lm_tokenizer)
# Happens in the dev set.
if 'neg_texts' in raw_feed and 'pos_texts' in raw_feed:
neg_texts = raw_feed['neg_texts']
nbert_batch, nabs_len, nabs_senttok_idxs, neg_align_idxs = AbsSentTokBatcherPreAlign.prepare_abstracts(
neg_texts, pt_lm_tokenizer)
pos_texts = raw_feed['pos_texts']
pbert_batch, pabs_len, pabs_senttok_idxs, pos_align_idxs = AbsSentTokBatcherPreAlign.prepare_abstracts(
pos_texts, pt_lm_tokenizer)
batch_dict = {
'query_bert_batch': qbert_batch, 'query_abs_lens': qabs_len, 'query_senttok_idxs': qabs_senttok_idxs,
'pos_bert_batch': pbert_batch, 'pos_abs_lens': pabs_len, 'pos_senttok_idxs': pabs_senttok_idxs,
'neg_bert_batch': nbert_batch, 'neg_abs_lens': nabs_len, 'neg_senttok_idxs': nabs_senttok_idxs,
'pos_align_idxs': pos_align_idxs, 'neg_align_idxs': neg_align_idxs
}
# Happens at train when using in batch negs.
elif 'pos_texts' in raw_feed:
pos_texts = raw_feed['pos_texts']
pbert_batch, pabs_len, pabs_senttok_idxs, pos_align_idxs = AbsSentTokBatcherPreAlign.prepare_abstracts(
pos_texts, pt_lm_tokenizer)
batch_dict = {
'query_bert_batch': qbert_batch, 'query_abs_lens': qabs_len, 'query_senttok_idxs': qabs_senttok_idxs,
'pos_bert_batch': pbert_batch, 'pos_abs_lens': pabs_len, 'pos_senttok_idxs': pabs_senttok_idxs,
'pos_align_idxs': pos_align_idxs
}
# Happens when the function is called from other scripts to encode text.
else:
batch_dict = {
'bert_batch': qbert_batch, 'abs_lens': qabs_len, 'senttok_idxs': qabs_senttok_idxs
}
return batch_dict
@staticmethod
def prepare_abstracts(batch_abs, pt_lm_tokenizer):
"""
Given the abstracts sentences as a list of strings prep them to pass through model.
:param batch_abs: list(dict); list of example dicts with sentences, facets, titles.
:return:
bert_batch: dict(); returned from prepare_bert_sentences.
abs_lens: list(int); number of sentences per abstract.
sent_token_idxs: list(list(list(int))); batch_size(num_abs_sents(num_sent_tokens(ints)))
pre_computed_alignments: list(list(int)); batch_size([q_idx, c_idx])
"""
# Prepare bert batch.
batch_abs_seqs = []
pre_computed_alignments = []
# Add the title and abstract concated with seps because thats how SPECTER did it.
for ex_abs in batch_abs:
seqs = [ex_abs['TITLE'] + ' [SEP] ']
seqs.extend([s for s in ex_abs['ABSTRACT']])
batch_abs_seqs.append(seqs)
if AbsSentTokBatcherPreAlign.align_type in ex_abs:
assert(len(ex_abs[AbsSentTokBatcherPreAlign.align_type]) == 2)
pre_computed_alignments.append(ex_abs[AbsSentTokBatcherPreAlign.align_type])
bert_batch, tokenized_abs, sent_token_idxs = AbsSentTokBatcher.prepare_bert_sentences(
sents=batch_abs_seqs, tokenizer=pt_lm_tokenizer)
# Get SEP indices from the sentences; some of the sentences may have been cut off
# at some max length.
abs_lens = []
for abs_sent_tok_idxs in sent_token_idxs:
num_sents = len(abs_sent_tok_idxs)
abs_lens.append(num_sents)
assert (num_sents > 0)
if pre_computed_alignments:
assert(len(pre_computed_alignments) == len(abs_lens))
return bert_batch, abs_lens, sent_token_idxs, pre_computed_alignments
else:
return bert_batch, abs_lens, sent_token_idxs
| aspire-main | src/learning/batchers.py |
aspire-main | src/learning/__init__.py |
|
"""
For the fine-grained similarity models:
Call code from everywhere, read data, initialize model, train model and make
sure training is doing something meaningful.
"""
import argparse, os, sys
import codecs, pprint, json
import datetime
import logging
import torch
import torch.multiprocessing as torch_mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from . import batchers, trainer
from .facetid_models import disent_models
# Copying from: https://discuss.pytorch.org/t/why-do-we-have-to-create-logger-in-process-for-correct-logging-in-ddp/102164/3
# Had double printing errors, solution finagled from:
# https://stackoverflow.com/q/6729268/3262406
def get_logger():
logger = logging.getLogger()
if logger.handlers:
logger.handlers.pop()
# Handlers.
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter())
logger.addHandler(
handler
)
logger.setLevel(logging.INFO)
return logger
def setup_ddp(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group(
backend='nccl',
world_size=world_size,
rank=rank,
timeout=datetime.timedelta(0, 3600)
)
def cleanup_ddp():
dist.destroy_process_group()
def ddp_train_model(process_rank, args):
"""
Read the int training and dev data, initialize and train the model.
:param model_name: string; says which model to use.
:param data_path: string; path to the directory with unshuffled data
and the test and dev json files.
:param config_path: string; path to the directory json config for model
and trainer.
:param run_path: string; path for shuffled training data for run and
to which results and model gets saved.
:param cl_args: argparse command line object.
:return: None.
"""
cl_args = args
model_name, data_path, config_path, run_path = \
cl_args.model_name, cl_args.data_path, cl_args.config_path, cl_args.run_path
run_name = os.path.basename(run_path)
# Load label maps and configs.
with codecs.open(config_path, 'r', 'utf-8') as fp:
all_hparams = json.load(fp)
setup_ddp(rank=process_rank, world_size=cl_args.num_gpus)
# Setup logging and experiment tracking.
if process_rank == 0:
# Print the called script and its args to the log.
logger = get_logger()
print(' '.join(sys.argv))
# Unpack hyperparameter settings.
print('All hyperparams:')
print(pprint.pformat(all_hparams))
# Save hyperparams to disk from a single process.
run_info = {'all_hparams': all_hparams}
with codecs.open(os.path.join(run_path, 'run_info.json'), 'w', 'utf-8') as fp:
json.dump(run_info, fp)
else:
logger = None
# Initialize model.
if model_name in {'cospecter'}:
model = disent_models.MySPECTER(model_hparams=all_hparams)
elif model_name in {'miswordbienc'}:
model = disent_models.WordSentAlignBiEnc(model_hparams=all_hparams)
elif model_name in {'sbalisentbienc'}:
model = disent_models.WordSentAbsSupAlignBiEnc(model_hparams=all_hparams)
elif model_name in {'miswordpolyenc'}:
model = disent_models.WordSentAlignPolyEnc(model_hparams=all_hparams)
else:
sys.exit(1)
# Model class internal logic uses the names at times so set this here so it
# is backward compatible.
model.model_name = model_name
if process_rank == 0:
# Save an untrained model version.
trainer.generic_save_function_ddp(model=model, save_path=run_path, model_suffix='init')
print(model)
# Move model to the GPU.
torch.cuda.set_device(process_rank)
if torch.cuda.is_available():
model.cuda(process_rank)
if process_rank == 0: print('Running on GPU.')
model = DistributedDataParallel(model, device_ids=[process_rank], find_unused_parameters=True)
# Initialize the trainer.
if model_name in ['cospecter']:
batcher_cls = batchers.AbsTripleBatcher
batcher_cls.bert_config_str = all_hparams['base-pt-layer']
elif model_name in ['miswordbienc', 'miswordpolyenc']:
batcher_cls = batchers.AbsSentTokBatcher
batcher_cls.bert_config_str = all_hparams['base-pt-layer']
elif model_name in ['sbalisentbienc']:
batcher_cls = batchers.AbsSentTokBatcherPreAlign
# Use the context based alignment by default.
batcher_cls.align_type = all_hparams.get('align_type', 'cc_align')
batcher_cls.bert_config_str = all_hparams['base-pt-layer']
else:
sys.exit(1)
if model_name in ['cospecter', 'miswordbienc',
'miswordpolyenc', 'sbalisentbienc']:
model_trainer = trainer.BasicRankingTrainerDDP(
logger=logger, process_rank=process_rank, num_gpus=cl_args.num_gpus,
model=model, batcher=batcher_cls, data_path=data_path, model_path=run_path,
early_stop=True, dev_score='loss', train_hparams=all_hparams)
model_trainer.save_function = trainer.generic_save_function_ddp
# Train and save the best model to model_path.
model_trainer.train()
cleanup_ddp()
def train_model(model_name, data_path, config_path, run_path, cl_args):
"""
Read the int training and dev data, initialize and train the model.
:param model_name: string; says which model to use.
:param data_path: string; path to the directory with unshuffled data
and the test and dev json files.
:param config_path: string; path to the directory json config for model
and trainer.
:param run_path: string; path for shuffled training data for run and
to which results and model gets saved.
:param cl_args: argparse command line object.
:return: None.
"""
run_name = os.path.basename(run_path)
# Load label maps and configs.
with codecs.open(config_path, 'r', 'utf-8') as fp:
all_hparams = json.load(fp)
# Unpack hyperparameter settings.
logging.info('All hyperparams:')
logging.info(pprint.pformat(all_hparams))
# Save hyperparams to disk.
run_info = {'all_hparams': all_hparams}
with codecs.open(os.path.join(run_path, 'run_info.json'), 'w', 'utf-8') as fp:
json.dump(run_info, fp)
# Initialize model.
if model_name in {'cospecter'}:
model = disent_models.MySPECTER(model_hparams=all_hparams)
# Save an untrained model version.
trainer.generic_save_function(model=model, save_path=run_path, model_suffix='init')
elif model_name in {'miswordbienc'}:
model = disent_models.WordSentAlignBiEnc(model_hparams=all_hparams)
# Save an untrained model version.
trainer.generic_save_function(model=model, save_path=run_path, model_suffix='init')
elif model_name in {'sbalisentbienc'}:
model = disent_models.WordSentAbsSupAlignBiEnc(model_hparams=all_hparams)
trainer.generic_save_function(model=model, save_path=run_path, model_suffix='init')
else:
logging.error('Unknown model: {:s}'.format(model_name))
sys.exit(1)
# Model class internal logic uses the names at times so set this here so it
# is backward compatible.
model.model_name = model_name
logging.info(model)
# Move model to the GPU.
if torch.cuda.is_available():
model.cuda()
logging.info('Running on GPU.')
# Initialize the trainer.
if model_name in ['cospecter']:
batcher_cls = batchers.AbsTripleBatcher
batcher_cls.bert_config_str = all_hparams['base-pt-layer']
elif model_name in ['miswordbienc']:
batcher_cls = batchers.AbsSentTokBatcher
batcher_cls.bert_config_str = all_hparams['base-pt-layer']
elif model_name in ['sbalisentbienc']:
batcher_cls = batchers.AbsSentTokBatcherPreAlign
# Use the context based alignment by default.
batcher_cls.align_type = all_hparams.get('align_type', 'cc_align')
batcher_cls.bert_config_str = all_hparams['base-pt-layer']
else:
logging.error('Unknown model: {:s}'.format(model_name))
sys.exit(1)
if model_name in ['cospecter', 'miswordbienc',
'sbalisentbienc']:
model_trainer = trainer.BasicRankingTrainer(model=model, batcher=batcher_cls, data_path=data_path, model_path=run_path,
early_stop=True, dev_score='loss', train_hparams=all_hparams)
model_trainer.save_function = trainer.generic_save_function
# Train and save the best model to model_path.
model_trainer.train()
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subcommand',
help='The action to perform.')
# Train the model.
train_args = subparsers.add_parser('train_model')
# Where to get what.
train_args.add_argument('--model_name', required=True,
choices=['cospecter', 'miswordbienc',
'miswordpolyenc', 'sbalisentbienc'],
help='The name of the model to train.')
train_args.add_argument('--dataset', required=True,
choices=['s2orcscidocs', 's2orccompsci', 's2orcbiomed', 'relish', 'treccovid'],
help='The dataset to train and predict on.')
train_args.add_argument('--num_gpus', required=True, type=int,
help='Number of GPUs to train on/number of processes running parallel training.')
train_args.add_argument('--data_path', required=True,
help='Path to the jsonl dataset.')
train_args.add_argument('--run_path', required=True,
help='Path to directory to save all run items to.')
train_args.add_argument('--config_path', required=True,
help='Path to directory json config file for model.')
cl_args = parser.parse_args()
# If a log file was passed then write to it.
try:
logging.basicConfig(level='INFO', format='%(message)s',
filename=cl_args.log_fname)
# Print the called script and its args to the log.
logging.info(' '.join(sys.argv))
# Else just write to stdout.
except AttributeError:
logging.basicConfig(level='INFO', format='%(message)s',
stream=sys.stdout)
# Print the called script and its args to the log.
logging.info(' '.join(sys.argv))
if cl_args.subcommand == 'train_model':
if cl_args.num_gpus > 1:
torch_mp.spawn(ddp_train_model, nprocs=cl_args.num_gpus, args=(cl_args,))
else:
train_model(model_name=cl_args.model_name, data_path=cl_args.data_path,
run_path=cl_args.run_path, config_path=cl_args.config_path, cl_args=cl_args)
if __name__ == '__main__':
main()
| aspire-main | src/learning/main_fsim.py |
"""
Utilities to feed and initialize the models.
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
from sklearn import metrics as skmetrics
import torch
def batched_loss_ddp(model, batcher, loss_helper, logger, batch_size, ex_fnames, num_examples):
"""
Make predictions batch by batch.
:param model: the model object with a predict method.
:param batcher: reference to model_utils.Batcher class.
:param loss_helper: function; models return dict with different loss components
which the loss helper knows how to handle.
:param batch_size: int; number of docs to consider in a batch.
:param ex_fnames: dict; which the batcher understands as having example
file names.
:param num_examples: int; number of examples in above files.
:return: loss: float; total loss for the data passed.
"""
# Intialize batcher.
loss_batcher = batcher(ex_fnames=ex_fnames, num_examples=num_examples,
batch_size=batch_size)
with torch.no_grad():
loss = torch.FloatTensor([0])
if torch.cuda.is_available():
loss = loss.cuda()
iteration = 0
# No need for conditional logging here because this is only called from one process all the time.
print('Dev pass; Num batches: {:d}'.format(loss_batcher.num_batches))
for batch_ids, batch_dict in loss_batcher.next_batch():
with torch.no_grad():
ret_dict = model.forward(batch_dict=batch_dict)
batch_objective = loss_helper(ret_dict)
# Objective is a variable; Do your summation on the GPU.
loss += batch_objective.data
if iteration % 100 == 0:
print('\tDev pass; Iteration: {:d}/{:d}'.format(iteration, loss_batcher.num_batches))
iteration += 1
if torch.cuda.is_available():
loss = float(loss.cpu().numpy())
return loss
def batched_loss(model, batcher, loss_helper, batch_size, ex_fnames, num_examples):
"""
Make predictions batch by batch.
:param model: the model object with a predict method.
:param batcher: reference to model_utils.Batcher class.
:param loss_helper: function; models return dict with different loss components
which the loss helper knows how to handle.
:param batch_size: int; number of docs to consider in a batch.
:param ex_fnames: dict; which the batcher understands as having example
file names.
:param num_examples: int; number of examples in above files.
:return: loss: float; total loss for the data passed.
"""
# Intialize batcher.
loss_batcher = batcher(ex_fnames=ex_fnames, num_examples=num_examples,
batch_size=batch_size)
with torch.no_grad():
loss = torch.FloatTensor([0])
if torch.cuda.is_available():
loss = loss.cuda()
iteration = 0
logging.info('Dev pass; Num batches: {:d}'.format(loss_batcher.num_batches))
for batch_ids, batch_dict in loss_batcher.next_batch():
with torch.no_grad():
ret_dict = model.forward(batch_dict=batch_dict)
batch_objective = loss_helper(ret_dict)
# Objective is a variable; Do your summation on the GPU.
loss += batch_objective.data
if iteration % 100 == 0:
logging.info('\tDev pass; Iteration: {:d}/{:d}'.
format(iteration, loss_batcher.num_batches))
iteration += 1
if torch.cuda.is_available():
loss = float(loss.cpu().numpy())
return loss
def batched_dev_scores(model, batcher, batch_size, ex_fnames, num_examples):
"""
Make predictions batch by batch.
:param model: the model object with a predict method.
:param batcher: reference to model_utils.Batcher class.
:param batch_size: int; number of docs to consider in a batch.
:param ex_fnames: dict; which the batcher understands as having example
file names.
:param num_examples: int; number of examples in above files.
:return: weightedf1: float; this is also the microaverage f1.
"""
batch_pred_generator = batched_predict(
model=model, batcher=batcher, batch_size=batch_size,
ex_fnames=ex_fnames, num_examples=num_examples)
target_labels = []
predicted_labels = []
for batch_doc_ids, batch_pred_dict in batch_pred_generator:
target_labels.extend(batch_pred_dict['targets'])
predicted_labels.extend(batch_pred_dict['preds'])
# Get classification report.
logging.info(skmetrics.classification_report(y_true=target_labels, y_pred=predicted_labels,
digits=4, output_dict=False))
metrics = skmetrics.classification_report(y_true=target_labels, y_pred=predicted_labels,
digits=4, output_dict=True)
return metrics['weighted avg']['f1-score']
def batched_predict(model, batcher, batch_size, ex_fnames, num_examples):
"""
Make predictions batch by batch. Dont do any funky shuffling shit.
:param model: the model object with a predict method.
:param batcher: reference to model_utils.Batcher class.
:param batch_size: int; number of docs to consider in a batch.
:param ex_fnames: dict; which the batcher understands as having example
file names.
:param num_examples: int; number of examples in above file.
:return:
"""
# Intialize batcher.
predict_batcher = batcher(ex_fnames=ex_fnames, num_examples=num_examples,
batch_size=batch_size)
iteration = 0
logging.info('Predict pass; Num batches: {:d}'.format(predict_batcher.num_batches))
for batch_doc_ids, batch_dict in predict_batcher.next_batch():
# Make a prediction.
# this can be: batch_probs, batch_col_rep, batch_row_rep
# or: batch_probs, batch_col_rep, batch_row_rep, batch_role_rep, batch_arg_lens
# having it be a tuple allows this function to be reused.
with torch.no_grad():
ret_dict = model.predict(batch_dict=batch_dict)
if iteration % 100 == 0:
logging.info('\tPredict pass; Iteration: {:d}/{:d}'.
format(iteration, predict_batcher.num_batches))
iteration += 1
# Map int mapped tokens back to strings.
yield batch_doc_ids, ret_dict
| aspire-main | src/learning/predict_utils.py |
"""
Miscellaneous utilities to read and work with the json files and such.
Stuff multiple functions use.
"""
import sys
import os
import errno
import json
import logging
import numpy as np
# Use mpl on remote.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def print_sorted_dict(d, out_file):
for k in sorted(d, key=d.get, reverse=True):
try:
out_file.write("{}, {}\n".format(k, d[k]))
except UnicodeError:
pass
def create_dir(dir_name):
"""
Create the directory whose name is passed.
:param dir_name: String saying the name of directory to create.
:return: None.
"""
# Create output directory if it doesnt exist.
try:
os.makedirs(dir_name)
print('Created: {}.'.format(dir_name))
except OSError as ose:
# For the case of *file* by name of out_dir existing
if (not os.path.isdir(dir_name)) and (ose.errno == errno.EEXIST):
sys.stderr.write('IO ERROR: Could not create output directory\n')
sys.exit(1)
# If its something else you don't know; report it and exit.
if ose.errno != errno.EEXIST:
sys.stderr.write('OS ERROR: {:d}: {:s}: {:s}\n'.format(ose.errno,
ose.strerror,
dir_name))
sys.exit(1)
def read_json(json_file):
"""
Read per line JSON and yield.
:param json_file: File-like with a next() method.
:return: yield one json object.
"""
for json_line in json_file:
# Try to manually skip bad chars.
# https://stackoverflow.com/a/9295597/3262406
try:
f_dict = json.loads(json_line.replace('\r\n', '\\r\\n'),
encoding='utf-8')
yield f_dict
# Skip case which crazy escape characters.
except ValueError:
raise
def plot_train_hist(y_vals, checked_iters, fig_path, ylabel, suffix=None):
"""
Plot y_vals against the number of iterations.
:param y_vals: list; values along the y-axis.
:param checked_iters: list; len(y_vals)==len(checked_iters); the iterations
the values in y_vals correspond to.
:param fig_path: string; the directory to write the plots to.
:param ylabel: string; the label for the y-axis.
:param suffix: string; string to add to the figure filename.
:return: None.
"""
# If there is nothing to plot just return.
if len(checked_iters) <= 3:
return
x_vals = np.array(checked_iters)
y_vals = np.vstack(y_vals)
plt.plot(x_vals, y_vals, '-', linewidth=2)
plt.xlabel('Training iteration')
plt.ylabel(ylabel)
plt.title('Evaluated every: {:d} iterations'.format(
checked_iters[1]-checked_iters[0]))
plt.tight_layout()
ylabel = '_'.join(ylabel.lower().split())
if suffix:
fig_file = os.path.join(fig_path, '{:s}_history-{:s}.eps'.format(ylabel, suffix))
else:
fig_file = os.path.join(fig_path, '{:s}_history.eps'.format(ylabel))
plt.savefig(fig_file)
if suffix:
plt.savefig(os.path.join(fig_path, '{:s}_history-{:s}.png'.format(ylabel, suffix)))
else:
plt.savefig(os.path.join(fig_path, '{:s}_history.png'.format(ylabel)))
plt.clf()
logging.info('Wrote: {:s}'.format(fig_file))
| aspire-main | src/learning/data_utils.py |
"""
Train the passed model given the data and the batcher and save the best to disk.
"""
from __future__ import print_function
import os
import logging
import time, copy
from collections import defaultdict
import numpy as np
import torch
import torch.distributed as dist
import torch.optim as optim
import transformers
from . import predict_utils as pu
from . import data_utils as du
def consume_prefix_in_state_dict_if_present(state_dict, prefix):
r"""Strip the prefix in state_dict, if any.
..note::
Given a `state_dict` from a DP/DDP model, a local model can load it by applying
`consume_prefix_in_state_dict_if_present(state_dict, "module.")` before calling
:meth:`torch.nn.Module.load_state_dict`.
Args:
state_dict (OrderedDict): a state-dict to be loaded to the model.
prefix (str): prefix.
Copied from here cause im using version 1.8.1 and this is in 1.9.0
https://github.com/pytorch/pytorch/blob/1f2b96e7c447210072fe4d2ed1a39d6121031ba6/torch/nn/modules/utils.py
"""
keys = sorted(state_dict.keys())
for key in keys:
if key.startswith(prefix):
newkey = key[len(prefix) :]
state_dict[newkey] = state_dict.pop(key)
# also strip the prefix in metadata if any.
if "_metadata" in state_dict:
metadata = state_dict["_metadata"]
for key in list(metadata.keys()):
# for the metadata dict, the key can be:
# '': for the DDP module, which we want to remove.
# 'module': for the actual model.
# 'module.xx.xx': for the rest.
if len(key) == 0:
continue
newkey = key[len(prefix) :]
metadata[newkey] = metadata.pop(key)
def generic_save_function_ddp(model, save_path, model_suffix):
"""
Model saving function used in the training loop.
This is saving with the assumption that non DDP models will use this model.
"""
model_fname = os.path.join(save_path, f'model_{model_suffix}.pt')
model = copy.deepcopy(model.state_dict())
consume_prefix_in_state_dict_if_present(model, "module.")
torch.save(model, model_fname)
print('Wrote: {:s}'.format(model_fname))
def generic_save_function(model, save_path, model_suffix):
"""
Model saving function used in the training loop.
"""
model_fname = os.path.join(save_path, f'model_{model_suffix}.pt')
torch.save(model.state_dict(), model_fname)
logging.info('Wrote: {:s}'.format(model_fname))
def sentbert_save_function(model, save_path, model_suffix):
"""
Model saving function used in the training loop for sentence bert
"""
model_fname = os.path.join(save_path, f'sent_encoder_{model_suffix}.pt')
torch.save(model.sent_encoder.state_dict(), model_fname)
logging.info('Wrote: {:s}'.format(model_fname))
def ictbert_save_function(model, save_path, model_suffix):
"""
Model saving function used in the training loop for sentence bert
"""
model_fname = os.path.join(save_path, f'sent_encoder_{model_suffix}.pt')
torch.save(model.sent_encoder.state_dict(), model_fname)
logging.info('Wrote: {:s}'.format(model_fname))
model_fname = os.path.join(save_path, f'context_encoder_{model_suffix}.pt')
torch.save(model.context_encoder.state_dict(), model_fname)
logging.info('Wrote: {:s}'.format(model_fname))
class GenericTrainer:
# If this isnt set outside of here it crashes with: "got multiple values for argument"
# todo: Look into who this happens --low-pri.s
save_function = generic_save_function
def __init__(self, model, batcher, model_path, train_hparams,
early_stop=True, verbose=True, dev_score='loss'):
"""
A generic trainer class that defines the training procedure. Trainers
for other models should subclass this and define the data that the models
being trained consume.
:param model: pytorch model.
:param batcher: a model_utils.Batcher class.
:param model_path: string; directory to which model should get saved.
:param early_stop: boolean;
:param verbose: boolean;
:param dev_score: string; {'loss'/'f1'} How dev set evaluation should be done.
# train_hparams dict elements.
:param train_size: int; number of training examples.
:param dev_size: int; number of dev examples.
:param batch_size: int; number of examples per batch.
:param accumulated_batch_size: int; number of examples to accumulate gradients
for in smaller batch size before computing the gradient. If this is not present
in the dictionary or is smaller than batch_size then assume no gradient
accumulation.
:param update_rule: string;
:param num_epochs: int; number of passes through the training data.
:param learning_rate: float;
:param es_check_every: int; check some metric on the dev set every check_every iterations.
:param lr_decay_method: string; {'exponential', 'warmuplin', 'warmupcosine'}
:param decay_lr_by: float; decay the learning rate exponentially by the following
factor.
:param num_warmup_steps: int; number of steps for which to do warm up.
:param decay_lr_every: int; decay learning rate every few iterations.
"""
# Book keeping
self.dev_score = dev_score
self.verbose = verbose
self.es_check_every = train_hparams['es_check_every']
self.num_train = train_hparams['train_size']
self.num_dev = train_hparams['dev_size']
self.batch_size = train_hparams['batch_size']
self.num_epochs = train_hparams['num_epochs']
try:
self.accumulated_batch_size = train_hparams['accumulated_batch_size']
# You can set accumulated_batch_size to 0 or -1 and it will assume no grad accumulation.
if self.accumulated_batch_size > 0:
# It should be bigger and an exact multiple of the batch size.
assert(self.accumulated_batch_size > self.batch_size
and self.accumulated_batch_size % self.batch_size == 0)
self.accumulate_gradients = True
self.update_params_every = self.accumulated_batch_size/self.batch_size
logging.info('Accumulating gradients for: {:}; updating params every: {:}; with batch size: {:}'
.format(self.accumulated_batch_size, self.update_params_every, self.batch_size))
else:
self.accumulate_gradients = False
except KeyError:
self.accumulate_gradients = False
if self.num_train > self.batch_size:
self.num_batches = int(np.ceil(float(self.num_train)/self.batch_size))
else:
self.num_batches = 1
self.model_path = model_path # Save model and checkpoints.
self.total_iters = self.num_epochs*self.num_batches
self.iteration = 0
# Model, batcher and the data.
self.model = model
self.batcher = batcher
self.time_per_batch = 0
self.time_per_dev_pass = 0
# Different trainer classes can add this based on the data that the model
# they are training needs.
self.train_fnames = []
self.dev_fnames = {}
# Optimizer args.
self.early_stop = early_stop
self.update_rule = train_hparams['update_rule']
self.learning_rate = train_hparams['learning_rate']
# Initialize optimizer.
if self.update_rule == 'adam':
self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
elif self.update_rule == 'adagrad':
self.optimizer = optim.Adagrad(self.model.parameters(), lr=self.learning_rate)
else:
raise ValueError('Unknown upate rule: {:s}'.format(self.update_rule))
# Reduce the learning rate every few iterations.
self.lr_decay_method = train_hparams['lr_decay_method']
self.decay_lr_every = train_hparams['decay_lr_every']
self.log_every = 5
# self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
# self.optimizer, mode='min', factor=0.1, patience=1,
# verbose=True)
if self.lr_decay_method == 'exponential':
self.decay_lr_by = train_hparams['decay_lr_by']
self.scheduler = optim.lr_scheduler.ExponentialLR(optimizer=self.optimizer,
gamma=self.decay_lr_by)
elif self.lr_decay_method == 'warmuplin':
self.num_warmup_steps = train_hparams['num_warmup_steps']
self.scheduler = transformers.get_linear_schedule_with_warmup(
optimizer=self.optimizer, num_warmup_steps=self.num_warmup_steps,
# Total number of training batches.
num_training_steps=self.num_epochs*self.num_batches)
elif self.lr_decay_method == 'warmupcosine':
self.num_warmup_steps = train_hparams['num_warmup_steps']
self.scheduler = transformers.get_cosine_schedule_with_warmup(
optimizer=self.optimizer, num_warmup_steps=self.num_warmup_steps,
num_training_steps=self.num_epochs*self.num_batches)
else:
raise ValueError('Unknown lr_decay_method: {:}'.format(train_hparams['lr_decay_method']))
# Train statistics.
self.loss_history = defaultdict(list)
self.loss_checked_iters = []
self.dev_score_history = []
self.dev_checked_iters = []
# Every subclass needs to set this.
self.loss_function_cal = GenericTrainer.compute_loss
def train(self):
"""
Make num_epoch passes through the training set and train the model.
:return:
"""
# Pick the model with the least loss.
best_params = self.model.state_dict()
best_epoch, best_iter = 0, 0
best_dev_score = -np.inf
total_time_per_batch = 0
total_time_per_dev = 0
train_start = time.time()
logging.info('num_train: {:d}; num_dev: {:d}'.format(
self.num_train, self.num_dev))
logging.info('Training {:d} epochs, {:d} iterations'.
format(self.num_epochs, self.total_iters))
for epoch, ex_fnames in zip(range(self.num_epochs), self.train_fnames):
# Initialize batcher. Shuffle one time before the start of every
# epoch.
epoch_batcher = self.batcher(ex_fnames=ex_fnames,
num_examples=self.num_train,
batch_size=self.batch_size)
# Get the next training batch.
iters_start = time.time()
for batch_doc_ids, batch_dict in epoch_batcher.next_batch():
self.model.train()
batch_start = time.time()
# Impelemented according to:
# https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-
# manually-to-zero-in-pytorch/4903/20
# With my implementation a final batch update may not happen sometimes but shrug.
if self.accumulate_gradients:
# Compute objective.
ret_dict = self.model.forward(batch_dict=batch_dict)
objective = self.compute_loss(loss_components=ret_dict)
# Gradients wrt the parameters.
objective.backward()
if (self.iteration + 1) % self.update_params_every == 0:
self.optimizer.step()
self.optimizer.zero_grad()
else:
# Clear all gradient buffers.
self.optimizer.zero_grad()
# Compute objective.
ret_dict = self.model.forward(batch_dict=batch_dict)
objective = self.compute_loss(loss_components=ret_dict)
# Gradients wrt the parameters.
objective.backward()
# Step in the direction of the gradient.
self.optimizer.step()
if self.iteration % self.log_every == 0:
# Save every loss component separately.
loss_str = []
for key in ret_dict:
if torch.cuda.is_available():
loss_comp = float(ret_dict[key].data.cpu().numpy())
else:
loss_comp = float(ret_dict[key].data.numpy())
self.loss_history[key].append(loss_comp)
loss_str.append('{:s}: {:.4f}'.format(key, loss_comp))
self.loss_checked_iters.append(self.iteration)
if self.verbose:
log_str = 'Epoch: {:d}; Iteration: {:d}/{:d}; '.format(epoch, self.iteration, self.total_iters)
logging.info(log_str + '; '.join(loss_str))
elif self.verbose:
logging.info('Epoch: {:d}; Iteration: {:d}/{:d}'.
format(epoch, self.iteration, self.total_iters))
# The decay_lr_every doesnt need to be a multiple of self.log_every
if self.iteration > 0 and self.iteration % self.decay_lr_every == 0:
self.scheduler.step()
# logging.info('Decayed learning rates: {}'.
# format([g['lr'] for g in self.optimizer.param_groups]))
batch_end = time.time()
total_time_per_batch += batch_end-batch_start
# Check every few iterations how you're doing on the dev set.
if self.iteration % self.es_check_every == 0 and self.iteration != 0 and self.early_stop:
# Save the loss at this point too.
for key in ret_dict:
if torch.cuda.is_available():
loss_comp = float(ret_dict[key].data.cpu().numpy())
else:
loss_comp = float(ret_dict[key].data.numpy())
self.loss_history[key].append(loss_comp)
self.loss_checked_iters.append(self.iteration)
# Switch to eval model and check loss on dev set.
self.model.eval()
dev_start = time.time()
# Returns the dev F1.
if self.dev_score == 'f1':
dev_score = pu.batched_dev_scores(
model=self.model, batcher=self.batcher, batch_size=self.batch_size,
ex_fnames=self.dev_fnames, num_examples=self.num_dev)
elif self.dev_score == 'loss':
dev_score = -1.0 * pu.batched_loss(
model=self.model, batcher=self.batcher, batch_size=self.batch_size,
ex_fnames=self.dev_fnames, num_examples=self.num_dev,
loss_helper=self.loss_function_cal)
dev_end = time.time()
total_time_per_dev += dev_end-dev_start
self.dev_score_history.append(dev_score)
self.dev_checked_iters.append(self.iteration)
if dev_score > best_dev_score:
best_dev_score = dev_score
# Deep copy so you're not just getting a reference.
best_params = copy.deepcopy(self.model.state_dict())
best_epoch = epoch
best_iter = self.iteration
everything = (epoch, self.iteration, self.total_iters, dev_score)
if self.verbose:
logging.info('Current best model; Epoch {:d}; '
'Iteration {:d}/{:d}; Dev score: {:.4f}'.format(*everything))
self.save_function(model=self.model, save_path=self.model_path, model_suffix='cur_best')
else:
everything = (epoch, self.iteration, self.total_iters, dev_score)
if self.verbose:
logging.info('Epoch {:d}; Iteration {:d}/{:d}; Dev score: {:.4f}'.format(*everything))
self.iteration += 1
epoch_time = time.time()-iters_start
logging.info('Epoch {:d} time: {:.4f}s'.format(epoch, epoch_time))
logging.info('\n')
# Say how long things took.
train_time = time.time()-train_start
logging.info('Training time: {:.4f}s'.format(train_time))
if self.total_iters > 0:
self.time_per_batch = float(total_time_per_batch)/self.total_iters
else:
self.time_per_batch = 0.0
logging.info('Time per batch: {:.4f}s'.format(self.time_per_batch))
if self.early_stop and self.dev_score_history:
if len(self.dev_score_history) > 0:
self.time_per_dev_pass = float(total_time_per_dev) / len(self.dev_score_history)
else:
self.time_per_dev_pass = 0
logging.info('Time per dev pass: {:4f}s'.format(self.time_per_dev_pass))
# Save the learnt model: save both the final model and the best model.
# https://stackoverflow.com/a/43819235/3262406
self.save_function(model=self.model, save_path=self.model_path, model_suffix='final')
logging.info('Best model; Epoch {:d}; Iteration {:d}; Dev loss: {:.4f}'
.format(best_epoch, best_iter, best_dev_score))
# self.model.load_state_dict(best_params)
# self.save_function(model=self.model, save_path=self.model_path, model_suffix='best')
# Plot training time stats.
for key in self.loss_history:
du.plot_train_hist(self.loss_history[key], self.loss_checked_iters,
fig_path=self.model_path, ylabel=key)
du.plot_train_hist(self.dev_score_history, self.dev_checked_iters,
fig_path=self.model_path, ylabel='Dev-set Score')
@staticmethod
def compute_loss(loss_components):
"""
Models will return dict with different loss components, use this and compute batch loss.
:param loss_components: dict('str': Variable)
:return:
"""
raise NotImplementedError
class BasicTrainer(GenericTrainer):
def __init__(self, model, data_path, batcher, train_size, dev_size,
batch_size, update_rule, num_epochs, learning_rate,
check_every, decay_lr_by, decay_lr_every, model_path, early_stop=True,
verbose=True, dev_score='f1'):
"""
Trainer for any model returning NLL. Uses everything from the
generic trainer but needs specification of how the loss components
should be put together.
:param data_path: string; directory with all the int mapped data.
"""
# Todo: Change trainer API.
raise NotImplementedError
GenericTrainer.__init__(self, model, batcher, train_size, dev_size,
batch_size, update_rule, num_epochs, learning_rate,
check_every, decay_lr_by, decay_lr_every, model_path,
early_stop, verbose, dev_score)
# Expect the presence of a directory with as many shuffled copies of the
# dataset as there are epochs and a negative examples file.
self.train_fnames = []
for i in range(self.num_epochs):
ex_fname = {
'pos_ex_fname': os.path.join(data_path, 'shuffled_data', 'train-{:d}.jsonl'.format(i)),
}
self.train_fnames.append(ex_fname)
self.dev_fnames = {
'pos_ex_fname': os.path.join(data_path, 'dev.jsonl'),
}
# Every subclass needs to set this.
self.loss_function_cal = BasicTrainer.compute_loss
@staticmethod
def compute_loss(loss_components):
"""
Simply add loss components.
:param loss_components: dict('nll': data likelihood)
:return: Variable.
"""
return loss_components['nll']
class BasicRankingTrainer(GenericTrainer):
def __init__(self, model, batcher, model_path, data_path,
train_hparams, early_stop=True, verbose=True, dev_score='loss'):
"""
Trainer for any model returning a ranking loss. Uses everything from the
generic trainer but needs specification of how the loss components
should be put together.
:param data_path: string; directory with all the int mapped data.
"""
GenericTrainer.__init__(self, model=model, batcher=batcher, model_path=model_path,
train_hparams=train_hparams, early_stop=early_stop, verbose=verbose,
dev_score=dev_score)
# Expect the presence of a directory with as many shuffled copies of the
# dataset as there are epochs and a negative examples file.
self.train_fnames = []
# Expect these to be there for the case of using diff kinds of training data for the same
# model; hard negatives models, different alignment models and so on.
if 'train_suffix' in train_hparams:
suffix = train_hparams['train_suffix']
train_basename = 'train-{:s}'.format(suffix)
dev_basename = 'dev-{:s}'.format(suffix)
else:
train_basename = 'train'
dev_basename = 'dev'
for i in range(self.num_epochs):
# Each run contains a copy of shuffled data for itself.
ex_fname = {
'pos_ex_fname': os.path.join(model_path, 'shuffled_data', '{:s}-{:d}.jsonl'.format(train_basename, i)),
}
self.train_fnames.append(ex_fname)
self.dev_fnames = {
'pos_ex_fname': os.path.join(data_path, '{:s}.jsonl'.format(dev_basename)),
}
# Every subclass needs to set this.
self.loss_function_cal = BasicRankingTrainer.compute_loss
@staticmethod
def compute_loss(loss_components):
"""
Simply add loss components.
:param loss_components: dict('rankl': rank loss value)
:return: Variable.
"""
return loss_components['rankl']
def conditional_log(logger, process_rank, message):
"""
Helper to log only from one process when using DDP.
-- logger is entirely unused. Dint seem to work when used in conjuncton with cometml.
"""
if process_rank == 0:
print(message)
class GenericTrainerDDP:
# If this isnt set outside of here it crashes with: "got multiple values for argument"
# todo: Look into who this happens --low-pri.s
save_function = generic_save_function_ddp
def __init__(self, logger, process_rank, num_gpus, model, batcher, model_path, train_hparams,
early_stop=True, verbose=True, dev_score='loss'):
"""
A generic trainer class that defines the training procedure. Trainers
for other models should subclass this and define the data that the models
being trained consume.
:param logger: a logger to write logs with.
:param process_rank: int; which process this is.
:param num_gpus: int; how many gpus are being used to train.
:param model: pytorch model.
:param batcher: a model_utils.Batcher class.
:param model_path: string; directory to which model should get saved.
:param early_stop: boolean;
:param verbose: boolean;
:param dev_score: string; {'loss'/'f1'} How dev set evaluation should be done.
# train_hparams dict elements.
:param train_size: int; number of training examples.
:param dev_size: int; number of dev examples.
:param batch_size: int; number of examples per batch.
:param accumulated_batch_size: int; number of examples to accumulate gradients
for in smaller batch size before computing the gradient. If this is not present
in the dictionary or is smaller than batch_size then assume no gradient
accumulation.
:param update_rule: string;
:param num_epochs: int; number of passes through the training data.
:param learning_rate: float;
:param es_check_every: int; check some metric on the dev set every check_every iterations.
:param lr_decay_method: string; {'exponential', 'warmuplin', 'warmupcosine'}
:param decay_lr_by: float; decay the learning rate exponentially by the following
factor.
:param num_warmup_steps: int; number of steps for which to do warm up.
:param decay_lr_every: int; decay learning rate every few iterations.
"""
# Book keeping
self.process_rank = process_rank
self.logger = logger
self.dev_score = dev_score
self.verbose = verbose
self.es_check_every = train_hparams['es_check_every']//num_gpus
self.num_dev = train_hparams['dev_size']
self.batch_size = train_hparams['batch_size']
self.num_epochs = train_hparams['num_epochs']
try:
self.accumulated_batch_size = train_hparams['accumulated_batch_size']
# You can set accumulated_batch_size to 0 or -1 and it will assume no grad accumulation.
if self.accumulated_batch_size > 0:
# It should be bigger and an exact multiple of the batch size.
assert(self.accumulated_batch_size > self.batch_size
and self.accumulated_batch_size % self.batch_size == 0)
self.accumulate_gradients = True
self.update_params_every = self.accumulated_batch_size/self.batch_size
conditional_log(logger, process_rank,
'Accumulating gradients for: {:}; updating params every: {:}; with batch size: {:}'
.format(self.accumulated_batch_size, self.update_params_every, self.batch_size))
else:
self.accumulate_gradients = False
except KeyError:
self.accumulate_gradients = False
self.model_path = model_path # Save model and checkpoints.
self.iteration = 0
# Model, batcher and the data.
self.model = model
self.batcher = batcher
self.time_per_batch = 0
self.time_per_dev_pass = 0
# Different trainer classes can add this based on the data that the model
# they are training needs.
self.train_fnames = []
self.dev_fnames = {}
# Optimizer args.
self.early_stop = early_stop
self.update_rule = train_hparams['update_rule']
self.learning_rate = train_hparams['learning_rate']
# Initialize optimizer.
if self.update_rule == 'adam':
self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
elif self.update_rule == 'adagrad':
self.optimizer = optim.Adagrad(self.model.parameters(), lr=self.learning_rate)
else:
raise ValueError('Unknown upate rule: {:s}'.format(self.update_rule))
# Reduce the learning rate every few iterations.
self.lr_decay_method = train_hparams['lr_decay_method']
self.decay_lr_every = train_hparams['decay_lr_every']
self.log_every = 5
# Train statistics.
self.loss_history = defaultdict(list)
self.loss_checked_iters = []
self.dev_score_history = []
self.dev_checked_iters = []
# Every subclass needs to set this.
self.loss_function_cal = GenericTrainer.compute_loss
def train(self):
"""
Make num_epoch passes through the training set and train the model.
:return:
"""
# Pick the model with the least loss.
best_params = self.model.state_dict()
best_epoch, best_iter = 0, 0
best_dev_score = -np.inf
total_time_per_batch = 0
total_time_per_dev = 0
train_start = time.time()
conditional_log(self.logger, self.process_rank,
f'num_train: {self.num_train}; num_dev: {self.num_dev}')
conditional_log(self.logger, self.process_rank,
f'Training {self.num_epochs} epochs, {self.total_iters} iterations')
for epoch, ex_fnames in zip(range(self.num_epochs), self.train_fnames):
# Initialize batcher. Shuffle one time before the start of every
# epoch.
epoch_batcher = self.batcher(ex_fnames=ex_fnames,
num_examples=self.num_train,
batch_size=self.batch_size)
# Get the next training batch.
iters_start = time.time()
for batch_doc_ids, batch_dict in epoch_batcher.next_batch():
self.model.train()
batch_start = time.time()
# Impelemented according to:
# https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-
# manually-to-zero-in-pytorch/4903/20
# With my implementation a final batch update may not happen sometimes but shrug.
if self.accumulate_gradients:
# Compute objective.
ret_dict = self.model.forward(batch_dict=batch_dict)
objective = self.compute_loss(loss_components=ret_dict)
# Gradients wrt the parameters.
objective.backward()
if (self.iteration + 1) % self.update_params_every == 0:
self.optimizer.step()
self.optimizer.zero_grad()
else:
# Clear all gradient buffers.
self.optimizer.zero_grad()
# Compute objective.
ret_dict = self.model.forward(batch_dict=batch_dict)
objective = self.compute_loss(loss_components=ret_dict)
# Gradients wrt the parameters.
objective.backward()
# Step in the direction of the gradient.
self.optimizer.step()
if self.iteration % self.log_every == 0:
# Save every loss component separately.
loss_str = []
for key in ret_dict:
if torch.cuda.is_available():
loss_comp = float(ret_dict[key].data.cpu().numpy())
else:
loss_comp = float(ret_dict[key].data.numpy())
self.loss_history[key].append(loss_comp)
loss_str.append('{:s}: {:.4f}'.format(key, loss_comp))
self.loss_checked_iters.append(self.iteration)
if self.verbose:
log_str = 'Epoch: {:d}; Iteration: {:d}/{:d}; '.format(epoch, self.iteration, self.total_iters)
conditional_log(self.logger, self.process_rank, log_str + '; '.join(loss_str))
elif self.verbose:
conditional_log(self.logger, self.process_rank,
f'Epoch: {epoch}; Iteration: {self.iteration}/{self.total_iters}')
# The decay_lr_every doesnt need to be a multiple of self.log_every
if self.iteration > 0 and self.iteration % self.decay_lr_every == 0:
self.scheduler.step()
batch_end = time.time()
total_time_per_batch += batch_end-batch_start
# Check every few iterations how you're doing on the dev set.
if self.iteration % self.es_check_every == 0 and self.iteration != 0 and self.early_stop\
and self.process_rank == 0:
# Save the loss at this point too.
for key in ret_dict:
if torch.cuda.is_available():
loss_comp = float(ret_dict[key].data.cpu().numpy())
else:
loss_comp = float(ret_dict[key].data.numpy())
self.loss_history[key].append(loss_comp)
self.loss_checked_iters.append(self.iteration)
# Switch to eval model and check loss on dev set.
self.model.eval()
dev_start = time.time()
# Using the module as it is for eval:
# https://discuss.pytorch.org/t/distributeddataparallel-
# barrier-doesnt-work-as-expected-during-evaluation/99867/11
dev_score = -1.0 * pu.batched_loss_ddp(
model=self.model.module, batcher=self.batcher, batch_size=self.batch_size,
ex_fnames=self.dev_fnames, num_examples=self.num_dev,
loss_helper=self.loss_function_cal, logger=self.logger)
dev_end = time.time()
total_time_per_dev += dev_end-dev_start
self.dev_score_history.append(dev_score)
self.dev_checked_iters.append(self.iteration)
if dev_score > best_dev_score:
best_dev_score = dev_score
# Deep copy so you're not just getting a reference.
best_params = copy.deepcopy(self.model.state_dict())
best_epoch = epoch
best_iter = self.iteration
everything = (epoch, self.iteration, self.total_iters, dev_score)
if self.verbose:
self.logger.info('Current best model; Epoch {:d}; '
'Iteration {:d}/{:d}; Dev score: {:.4f}'.format(*everything))
self.save_function(model=self.model, save_path=self.model_path, model_suffix='cur_best')
else:
everything = (epoch, self.iteration, self.total_iters, dev_score)
if self.verbose:
self.logger.info('Epoch {:d}; Iteration {:d}/{:d}; Dev score: {:.4f}'
.format(*everything))
dist.barrier()
self.iteration += 1
epoch_time = time.time()-iters_start
conditional_log(self.logger, self.process_rank, 'Epoch {:d} time: {:.4f}s'.format(epoch, epoch_time))
conditional_log(self.logger, self.process_rank, '\n')
# Say how long things took.
train_time = time.time()-train_start
conditional_log(self.logger, self.process_rank, 'Training time: {:.4f}s'.format(train_time))
if self.total_iters > 0:
self.time_per_batch = float(total_time_per_batch)/self.total_iters
else:
self.time_per_batch = 0.0
conditional_log(self.logger, self.process_rank, 'Time per batch: {:.4f}s'.format(self.time_per_batch))
if self.early_stop and self.dev_score_history:
if len(self.dev_score_history) > 0:
self.time_per_dev_pass = float(total_time_per_dev) / len(self.dev_score_history)
else:
self.time_per_dev_pass = 0
conditional_log(self.logger, self.process_rank, 'Time per dev pass: {:4f}s'.format(self.time_per_dev_pass))
# Save the learnt model: save both the final model and the best model.
# https://stackoverflow.com/a/43819235/3262406
if self.process_rank == 0:
self.save_function(model=self.model, save_path=self.model_path, model_suffix='final')
conditional_log(self.logger, self.process_rank, 'Best model; Epoch {:d}; Iteration {:d}; Dev loss: {:.4f}'
.format(best_epoch, best_iter, best_dev_score))
# self.model.load_state_dict(best_params)
# self.save_function(model=self.model, save_path=self.model_path, model_suffix='best')
@staticmethod
def compute_loss(loss_components):
"""
Models will return dict with different loss components, use this and compute batch loss.
:param loss_components: dict('str': Variable)
:return:
"""
raise NotImplementedError
class BasicRankingTrainerDDP(GenericTrainerDDP):
def __init__(self, logger, process_rank, num_gpus, model, batcher, model_path, data_path,
train_hparams, early_stop=True, verbose=True, dev_score='loss'):
"""
Trainer for any model returning a ranking loss. Uses everything from the
generic trainer but needs specification of how the loss components
should be put together.
:param data_path: string; directory with all the int mapped data.
"""
GenericTrainerDDP.__init__(self, logger=logger, process_rank=process_rank, num_gpus=num_gpus,
model=model, batcher=batcher, model_path=model_path,
train_hparams=train_hparams, early_stop=early_stop, verbose=verbose,
dev_score=dev_score)
# Expect the presence of a directory with as many shuffled copies of the
# dataset as there are epochs and a negative examples file.
self.train_fnames = []
# Expect these to be there for the case of using diff kinds of training data for the same
# model; hard negatives models, different alignment models and so on.
if 'train_suffix' in train_hparams:
suffix = train_hparams['train_suffix']
train_basename = 'train-{:s}'.format(suffix)
dev_basename = 'dev-{:s}'.format(suffix)
else:
train_basename = 'train'
dev_basename = 'dev'
for i in range(self.num_epochs):
# Each run contains a copy of shuffled data for itself
# Each process gets a part of the data to consume in training.
ex_fname = {
'pos_ex_fname': os.path.join(model_path, 'shuffled_data', f'{train_basename}-{process_rank}-{i}.jsonl'),
}
self.train_fnames.append(ex_fname)
self.dev_fnames = {
'pos_ex_fname': os.path.join(data_path, '{:s}.jsonl'.format(dev_basename)),
}
# The split command in bash is asked to make exactly equal sized splits with
# the remainder in a final file which is unused
self.num_train = train_hparams['train_size']//num_gpus
if self.num_train > self.batch_size:
self.num_batches = int(np.ceil(float(self.num_train)/self.batch_size))
else:
self.num_batches = 1
self.total_iters = self.num_epochs*self.num_batches
if self.lr_decay_method == 'exponential':
self.decay_lr_by = train_hparams['decay_lr_by']
self.scheduler = optim.lr_scheduler.ExponentialLR(optimizer=self.optimizer,
gamma=self.decay_lr_by)
elif self.lr_decay_method == 'warmuplin':
self.num_warmup_steps = train_hparams['num_warmup_steps']//num_gpus
self.scheduler = transformers.get_linear_schedule_with_warmup(
optimizer=self.optimizer, num_warmup_steps=self.num_warmup_steps,
# Total number of training batches.
num_training_steps=self.num_epochs*self.num_batches)
elif self.lr_decay_method == 'warmupcosine':
self.num_warmup_steps = train_hparams['num_warmup_steps']//num_gpus
self.scheduler = transformers.get_cosine_schedule_with_warmup(
optimizer=self.optimizer, num_warmup_steps=self.num_warmup_steps,
num_training_steps=self.num_epochs*self.num_batches)
else:
raise ValueError('Unknown lr_decay_method: {:}'.format(train_hparams['lr_decay_method']))
# Every subclass needs to set this.
self.loss_function_cal = BasicRankingTrainer.compute_loss
@staticmethod
def compute_loss(loss_components):
"""
Simply add loss components.
:param loss_components: dict('rankl': rank loss value)
:return: Variable.
"""
return loss_components['rankl']
| aspire-main | src/learning/trainer.py |
aspire-main | src/learning/facetid_models/__init__.py |
|
"""
Models which learn contextual sentence representations of paper abstracts.
"""
from collections import namedtuple
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn import functional
from transformers import AutoModel
from . import pair_distances as pair_dist
from ..models_common import generic_layers as gl
rep_len_tup = namedtuple('RepLen', ['embed', 'abs_lens'])
cf_rep_len_tup = namedtuple('CFRepLen', ['embed', 'embed_cf', 'abs_lens'])
rep_len_ali_tup = namedtuple('RepLenAli', ['embed', 'abs_lens', 'align_idxs'])
rep_len_logits_tup = namedtuple('RepLenLogits', ['embed', 'abs_lens', 'sent_logits'])
rep_len_con_tup = namedtuple('RepLenAli', ['embed', 'abs_lens', 'align_reps', 'align_num'])
rep_len_distr_tup = namedtuple('RepLenDistr', ['embed', 'abs_lens', 'q2cc_sims', 'c2cc_sims'])
cf_rep_len_con_tup = namedtuple('CFRepLenAli', ['embed', 'embed_cf', 'abs_lens', 'align_reps', 'align_num'])
class MySPECTER(nn.Module):
"""
Pass abstract through SciBERT all in one shot, read off cls token and use
it to compute similarities. This is an unfaceted model and is meant to
be similar to SPECTER in all aspects:
- triplet loss function
- only final layer cls bert representation
- no SEP tokens in between abstract sentences
"""
def __init__(self, model_hparams, bert_config=None):
"""
:param model_hparams: dict(string:int); model hyperparams.
num_code_vecs: int; number of code vectors to disentangle into.
The number of facets.
num_tf_heads: int; number of heads in the context transformer.
:param bert_config: transformers.configuration_bert.BertConfig; bert
hyperparam instance.
"""
torch.nn.Module.__init__(self)
self.bert_config = bert_config
self.bert_encoding_dim = 768 # bert_config.hidden_size or DistilBertConfig.dim
self.bert_layer_count = 12 + 1 # plus 1 for the bottom most layer.
self.bert_encoder = AutoModel.from_pretrained(model_hparams['base-pt-layer'])
self.bert_encoder.config.output_hidden_states = True
# If fine tune is False then freeze the bert params.
if not model_hparams['fine_tune']:
for param in self.bert_encoder.base_model.parameters():
param.requires_grad = False
self.bert_layer_weights = gl.SoftmaxMixLayers(in_features=self.bert_layer_count, out_features=1, bias=False)
self.criterion = nn.TripletMarginLoss(margin=1, p=2, reduction='sum')
def caching_score(self, query_encode_ret_dict, cand_encode_ret_dicts):
"""
Called externally from a class using the trained model.
- Create as many repetitions of query_reps as cand_reps.
- Compute scores and return.
query_encode_ret_dict: {'sent_reps': numpy.array, 'doc_cls_reps': numpy.array}
cand_encode_ret_dict: list({'sent_reps': numpy.array, 'doc_cls_reps': numpy.array})
"""
# Pack representations as padded gpu tensors.
query_cls_rep = query_encode_ret_dict['doc_cls_reps']
cand_cls_reps = [d['doc_cls_reps'] for d in cand_encode_ret_dicts]
query_cls_reps = []
for bi in range(len(cand_cls_reps)):
query_cls_reps.append(query_cls_rep)
query_cls_reps, cand_cls_reps = Variable(torch.FloatTensor(np.vstack(query_cls_reps))), \
Variable(torch.FloatTensor(np.vstack(cand_cls_reps)))
if torch.cuda.is_available():
query_cls_reps = query_cls_reps.cuda()
cand_cls_reps = cand_cls_reps.cuda()
# Compute scores as at train time.
doc_sims = -1*functional.pairwise_distance(query_cls_reps, cand_cls_reps, p=2.0)
doc_sims = doc_sims.squeeze()
# Make numpy arrays and return.
if torch.cuda.is_available():
batch_scores = doc_sims.cpu().data.numpy()
else:
batch_scores = doc_sims.data.numpy()
# Return the same thing as batch_scores and pair_scores because the pp_gen_nearest class expects it.
ret_dict = {
'batch_scores': batch_scores,
'pair_scores': batch_scores
}
return ret_dict
def caching_encode(self, batch_dict):
"""
Function used at test time.
batch_dict: dict of the form accepted by forward_rank but without any of the
negative examples.
:return: ret_dict
"""
doc_bert_batch, batch_size = batch_dict['bert_batch'], len(batch_dict['bert_batch']['seq_lens'])
# Get the representations from the model; batch_size x encoding_dim x max_sents
doc_cls_reps = self.partial_forward(bert_batch=doc_bert_batch)
# Make numpy arrays and return.
if torch.cuda.is_available():
doc_cls_reps = doc_cls_reps.cpu().data.numpy()
else:
doc_cls_reps = doc_cls_reps.data.numpy()
# Return a list of reps instead of reps collated as one np array.
batch_reps = []
for i in range(batch_size):
batch_reps.append({'doc_cls_reps': doc_cls_reps[i, :]})
return batch_reps
def encode(self, batch_dict):
"""
Function used at test time.
batch_dict: dict of the form accepted by forward_rank but without any of the
negative examples.
:return: ret_dict
"""
doc_bert_batch = batch_dict['bert_batch']
# Get the representations from the model.
doc_reps = self.partial_forward(bert_batch=doc_bert_batch)
# Make numpy arrays and return.
if torch.cuda.is_available():
doc_reps = doc_reps.cpu().data.numpy()
else:
doc_reps = doc_reps.data.numpy()
ret_dict = {
'doc_reps': doc_reps, # batch_size x encoding_dim
}
return ret_dict
def forward(self, batch_dict):
batch_loss = self.forward_rank(batch_dict['batch_rank'])
loss_dict = {
'rankl': batch_loss
}
return loss_dict
def forward_rank(self, batch_rank):
"""
Function used at training time.
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'pos_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from positive abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'neg_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
}
:return: loss_val; torch Variable.
"""
qbert_batch = batch_rank['query_bert_batch']
pbert_batch = batch_rank['pos_bert_batch']
# Get the representations from the model.
q_sent_reps = self.partial_forward(bert_batch=qbert_batch)
p_context_reps = self.partial_forward(bert_batch=pbert_batch)
# Happens when running on the dev set.
if 'neg_bert_batch' in batch_rank:
nbert_batch = batch_rank['neg_bert_batch']
n_context_reps = self.partial_forward(bert_batch=nbert_batch)
else:
# Use a shuffled set of positives as the negatives. -- in-batch negatives.
n_context_reps = p_context_reps[torch.randperm(p_context_reps.size()[0])]
loss_val = self.criterion(q_sent_reps, p_context_reps, n_context_reps)
return loss_val
def partial_forward(self, bert_batch):
"""
Function shared between the training and test time behaviour. Pass a batch
of sentences through BERT and return cls representations.
:return:
cls_doc_reps: batch_size x encoding_dim
"""
# batch_size x bert_encoding_dim
cls_doc_reps = self.doc_reps_bert(bert_batch=bert_batch)
if len(cls_doc_reps.size()) == 1:
cls_doc_reps = cls_doc_reps.unsqueeze(0)
return cls_doc_reps
def doc_reps_bert(self, bert_batch):
"""
Pass the concated abstract through BERT, and read off [SEP] token reps to get sentence reps,
and weighted combine across layers.
:param bert_batch: dict('tokid_tt', 'seg_tt', 'attnmask_tt', 'seq_lens'); items to use for getting BERT
representations. The sentence mapped to BERT vocab and appropriately padded.
:return:
doc_cls_reps: FloatTensor [batch_size x bert_encoding_dim]
"""
tokid_tt, seg_tt, attnmask_tt = bert_batch['tokid_tt'], bert_batch['seg_tt'], bert_batch['attnmask_tt']
if torch.cuda.is_available():
tokid_tt, seg_tt, attnmask_tt = tokid_tt.cuda(), seg_tt.cuda(), attnmask_tt.cuda()
# Pass input through BERT and return all layer hidden outputs.
model_outputs = self.bert_encoder(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
# Weighted combine the hidden_states which is a list of [bs x max_seq_len x bert_encoding_dim]
# with as many tensors as layers + 1 input layer.
hs_stacked = torch.stack(model_outputs.hidden_states, dim=3)
weighted_sum_hs = self.bert_layer_weights(hs_stacked) # [bs x max_seq_len x bert_encoding_dim x 1]
weighted_sum_hs = torch.squeeze(weighted_sum_hs, dim=3)
# Read of CLS token as document representation: (batch_size, sequence_length, hidden_size)
cls_doc_reps = weighted_sum_hs[:, 0, :]
cls_doc_reps = cls_doc_reps.squeeze()
return cls_doc_reps
class WordSentAlignBiEnc(MySPECTER):
"""
- Pass abstracts through Transformer LM, get contextualized sentence reps.
(sentence reps are obtained by averaging contextual word embeddings)
- Compute pairwise sentence similarities for query and candidate.
- Maximize maximum similarity of anchor and positive.
"""
def __init__(self, model_hparams, bert_config=None):
"""
:param model_hparams: dict(string:int); model hyperparams.
num_code_vecs: int; number of code vectors to disentangle into.
The number of facets.
num_tf_heads: int; number of heads in the context transformer.
:param bert_config: transformers.configuration_bert.BertConfig; bert
hyperparam instance.
"""
torch.nn.Module.__init__(self)
self.bert_config = bert_config
self.bert_encoding_dim = 768 # bert_config.hidden_size or DistilBertConfig.dim
self.bert_layer_count = 12 + 1 # plus 1 for the bottom most layer.
self.bert_encoder = AutoModel.from_pretrained(model_hparams['base-pt-layer'])
self.bert_encoder.config.output_hidden_states = True
# If fine tune is False then freeze the bert params.
if not model_hparams['fine_tune']:
for param in self.bert_encoder.base_model.parameters():
param.requires_grad = False
self.score_agg_type = model_hparams['score_aggregation']
if self.score_agg_type == 'l2max':
self.dist_function = pair_dist.allpair_masked_dist_l2max
elif self.score_agg_type == 'l2top2':
self.dist_function = pair_dist.allpair_masked_dist_l2topk
elif self.score_agg_type == 'l2wasserstein':
ot_distance = pair_dist.AllPairMaskedWasserstein(model_hparams)
self.dist_function = ot_distance.compute_distance
elif self.score_agg_type == 'l2attention':
ot_distance = pair_dist.AllPairMaskedAttention(model_hparams)
self.dist_function = ot_distance.compute_distance
else:
raise ValueError(f'Unknown aggregation: {self.score_agg_type}')
# Not using the random weights because they'll spoil initial alignments.
# self.bert_layer_weights = gl.SoftmaxMixLayers(in_features=self.bert_layer_count, out_features=1, bias=False)
self.criterion = nn.TripletMarginWithDistanceLoss(distance_function=self.dist_function,
margin=1.0, reduction='sum')
self.cd_svalue_l1_prop = float(model_hparams.get('cd_svalue_l1_prop', 0.0))
self.sent_loss_prop = 1.0
self.abs_loss_prop = 0.0
def caching_score(self, query_encode_ret_dict, cand_encode_ret_dicts):
"""
Called externally from a class using the trained model.
- Create as many repetitions of query_reps as cand_reps.
- Pad candidate reps to max length.
- Compute scores and return.
query_encode_ret_dict: {'sent_reps': numpy.array, 'doc_cls_reps': numpy.array}
cand_encode_ret_dict: list({'sent_reps': numpy.array, 'doc_cls_reps': numpy.array})
"""
# Pack representations as padded gpu tensors.
query_cls_rep, query_sent_reps = query_encode_ret_dict['doc_cls_reps'], query_encode_ret_dict['sent_reps']
cand_cls_reps = [d['doc_cls_reps'] for d in cand_encode_ret_dicts]
cand_sent_reps = [d['sent_reps'] for d in cand_encode_ret_dicts]
batch_size = len(cand_sent_reps)
cand_lens = [r.shape[0] for r in cand_sent_reps]
cmax_sents = max(cand_lens)
qmax_sents, encoding_dim = query_sent_reps.shape[0], query_sent_reps.shape[1]
query_lens = [qmax_sents]*batch_size
padded_cand_sent_reps = np.zeros((batch_size, cmax_sents, encoding_dim))
padded_query_sent_reps = np.zeros((batch_size, qmax_sents, encoding_dim))
query_cls_reps = []
for bi, ex_reps in enumerate(cand_sent_reps):
padded_cand_sent_reps[bi, :cand_lens[bi], :] = ex_reps
# Just repeat the query sents for now.
padded_query_sent_reps[bi, :qmax_sents, :] = query_sent_reps
query_cls_reps.append(query_cls_rep)
padded_query_sent_reps = Variable(torch.FloatTensor(padded_query_sent_reps))
padded_cand_sent_reps = Variable(torch.FloatTensor(padded_cand_sent_reps))
query_cls_reps, cand_cls_reps = Variable(torch.FloatTensor(np.vstack(query_cls_reps))), \
Variable(torch.FloatTensor(np.vstack(cand_cls_reps)))
if torch.cuda.is_available():
padded_query_sent_reps = padded_query_sent_reps.cuda()
padded_cand_sent_reps = padded_cand_sent_reps.cuda()
query_cls_reps = query_cls_reps.cuda()
cand_cls_reps = cand_cls_reps.cuda()
# Compute scores as at train time.
qt = rep_len_tup(embed=padded_query_sent_reps.permute(0, 2, 1), abs_lens=query_lens)
ct = rep_len_tup(embed=padded_cand_sent_reps.permute(0, 2, 1), abs_lens=cand_lens)
if self.score_agg_type in {'l2lse'}:
batch_sent_sims, pair_sims = pair_dist.allpair_masked_dist_l2max(query=qt, cand=ct, return_pair_sims=True)
else:
batch_sent_sims, pair_sims = self.dist_function(query=qt, cand=ct, return_pair_sims=True)
# In the case of WordSentAbsSupAlignBiEnc which also uses this function if sent_loss_prop is zero
# use the supervised sent prop instead.
try:
sent_loss_prop = max(self.sent_loss_prop, self.sentsup_loss_prop)
except AttributeError:
sent_loss_prop = self.sent_loss_prop
batch_scores = sent_loss_prop*batch_sent_sims
if self.abs_loss_prop > 0.0:
batch_doc_sims = -1*functional.pairwise_distance(query_cls_reps, cand_cls_reps, p=2.0)
batch_scores += self.abs_loss_prop*batch_doc_sims
# Make numpy arrays and return.
if torch.cuda.is_available():
batch_scores = batch_scores.cpu().data.numpy()
if isinstance(pair_sims, list):
pair_sims = [t.cpu().data.numpy() for t in pair_sims]
else:
pair_sims = pair_sims.cpu().data.numpy()
else:
batch_scores = batch_scores.data.numpy()
if isinstance(pair_sims, list):
pair_sims = [t.data.numpy() for t in pair_sims]
else:
pair_sims = pair_sims.data.numpy()
unpadded_pair_sm = []
for i, (clen, qlen) in enumerate(zip(cand_lens, query_lens)):
# Happens in the case of wasserstein distance.
if len(pair_sims) == 5:
upsm = [pair_sims[0][i, :qlen], pair_sims[1][i, :clen],
pair_sims[2][i, :qlen, :clen], pair_sims[3][i, :qlen, :clen],
pair_sims[4][i, :qlen, :clen]]
# Happens in the case of attention distance.
elif len(pair_sims) == 3:
upsm = [pair_sims[0][i, :qlen, :clen], pair_sims[1][i, :qlen, :clen],
pair_sims[2][i, :qlen, :clen]]
else:
# encoding_dim x num_sents
upsm = pair_sims[i, :qlen, :clen]
# return: # num_sents x encoding_dim
unpadded_pair_sm.append(upsm)
ret_dict = {
'batch_scores': batch_scores,
'pair_scores': unpadded_pair_sm
}
return ret_dict
def caching_encode(self, batch_dict):
"""
Function used at test time.
batch_dict: dict of the form accepted by forward_rank but without any of the
negative examples.
:return: ret_dict
"""
doc_bert_batch, doc_abs_lens = batch_dict['bert_batch'], batch_dict['abs_lens']
doc_query_senttoki = batch_dict['senttok_idxs']
# Get the representations from the model; batch_size x encoding_dim x max_sents
doc_cls_reps, sent_reps = self.partial_forward(bert_batch=doc_bert_batch, abs_lens=doc_abs_lens,
sent_tok_idxs=doc_query_senttoki)
# Make numpy arrays and return.
if torch.cuda.is_available():
sent_reps = sent_reps.cpu().data.numpy()
doc_cls_reps = doc_cls_reps.cpu().data.numpy()
else:
sent_reps = sent_reps.data.numpy()
doc_cls_reps = doc_cls_reps.data.numpy()
# Return a list of reps instead of reps collated as one np array.
batch_reps = []
for i, num_sents in enumerate(doc_abs_lens):
# encoding_dim x num_sents
upsr = sent_reps[i, :, :num_sents]
# return: # num_sents x encoding_dim
batch_reps.append({'doc_cls_reps': doc_cls_reps[i, :],
'sent_reps': upsr.transpose(1, 0)})
return batch_reps
def encode(self, batch_dict):
"""
Function used at test time.
batch_dict: dict of the form accepted by forward_rank but without any of the
negative examples.
:return: ret_dict
"""
doc_bert_batch, doc_abs_lens = batch_dict['bert_batch'], batch_dict['abs_lens']
doc_query_senttoki = batch_dict['senttok_idxs']
# Get the representations from the model; batch_size x encoding_dim x max_sents
sent_reps = self.partial_forward(bert_batch=doc_bert_batch, abs_lens=doc_abs_lens,
sent_tok_idxs=doc_query_senttoki)
# Make numpy arrays and return.
if torch.cuda.is_available():
sent_reps = sent_reps.cpu().data.numpy()
else:
sent_reps = sent_reps.data.numpy()
unpadded_sent_reps = []
for i, num_sents in enumerate(doc_abs_lens):
# encoding_dim x num_sents
upsr = sent_reps[i, :, :num_sents]
# return: # num_sents x encoding_dim
unpadded_sent_reps.append(upsr.transpose(1, 0))
ret_dict = {
'sent_reps': unpadded_sent_reps,
}
return ret_dict
def forward_rank(self, batch_rank):
"""
Function used at training time.
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'query_abs_lens': list(int); Number of sentences in query abs.
'query_senttoki': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'pos_abs_lens': list(int);
'pos_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from positive abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'pos_senttoki': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'neg_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'neg_abs_lens': list(int);
'neg_senttoki': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
}
:return: loss_val; torch Variable.
"""
qbert_batch, qabs_lens = batch_rank['query_bert_batch'], batch_rank['query_abs_lens']
pbert_batch, pabs_lens = batch_rank['pos_bert_batch'], batch_rank['pos_abs_lens']
query_senttoki, pos_senttoki = batch_rank['query_senttok_idxs'], batch_rank['pos_senttok_idxs']
# Get the representations from the model.
_, q_sent_reps = self.partial_forward(bert_batch=qbert_batch, abs_lens=qabs_lens, sent_tok_idxs=query_senttoki)
_, p_sent_reps = self.partial_forward(bert_batch=pbert_batch, abs_lens=pabs_lens, sent_tok_idxs=pos_senttoki)
# Happens when running on the dev set.
if 'neg_bert_batch' in batch_rank:
nbert_batch, nabs_lens = batch_rank['neg_bert_batch'], batch_rank['neg_abs_lens']
neg_senttoki = batch_rank['neg_senttok_idxs']
_, n_sent_reps = self.partial_forward(bert_batch=nbert_batch, abs_lens=nabs_lens, sent_tok_idxs=neg_senttoki)
# Bundle the lengths with the embeds so the similarity
# function can use the lens for masking.
query_sents = rep_len_tup(embed=q_sent_reps, abs_lens=qabs_lens)
pos_sents = rep_len_tup(embed=p_sent_reps, abs_lens=pabs_lens)
neg_sents = rep_len_tup(embed=n_sent_reps, abs_lens=nabs_lens)
loss_val = self.criterion(query_sents, pos_sents, neg_sents)
return loss_val
else:
# Use a shuffled set of positives as the negatives. -- in-batch negatives.
random_idxs = torch.randperm(p_sent_reps.size()[0])
n_sent_reps = p_sent_reps[random_idxs]
nabs_lens = [pabs_lens[i] for i in random_idxs.tolist()]
# Bundle the lengths with the embeds so the similarity
# function can use the lens for masking.
query_sents = rep_len_tup(embed=q_sent_reps, abs_lens=qabs_lens)
pos_sents = rep_len_tup(embed=p_sent_reps, abs_lens=pabs_lens)
neg_sents = rep_len_tup(embed=n_sent_reps, abs_lens=nabs_lens)
loss_val = self.criterion(query_sents, pos_sents, neg_sents)
# If asked to regularize the cross doc singular values, do so to make them more sparse.
if self.cd_svalue_l1_prop > 0:
# Pad values will be zeros.
pair_sims = -1*torch.cdist(q_sent_reps.permute(0, 2, 1), p_sent_reps.permute(0, 2, 1))
_, svalues, _ = torch.linalg.svd(pair_sims)
if len(svalues.size()) < 2:
svalues = svalues.unsqueeze(dim=0)
svalue_norm = torch.linalg.norm(svalues, ord=1, dim=1)
svalue_reg = torch.sum(svalue_norm)
loss_val += self.cd_svalue_l1_prop * svalue_reg
return loss_val
def partial_forward(self, bert_batch, abs_lens, sent_tok_idxs):
"""
Pass a batch of sentences through BERT and read off sentence
representations based on SEP idxs.
:return:
sent_reps: batch_size x encoding_dim x num_sents
"""
# batch_size x num_sents x encoding_dim
doc_cls_reps, sent_reps = self.sent_reps_bert(bert_batch=bert_batch, num_sents=abs_lens,
batch_senttok_idxs=sent_tok_idxs)
if len(sent_reps.size()) == 2:
sent_reps = sent_reps.unsqueeze(0)
if len(doc_cls_reps.size()) == 1:
doc_cls_reps = doc_cls_reps.unsqueeze(0)
# Similarity function expects: batch_size x encoding_dim x q_max_sents;
return doc_cls_reps, sent_reps.permute(0, 2, 1)
def sent_reps_bert(self, bert_batch, batch_senttok_idxs, num_sents):
"""
Pass the concated abstract through BERT, and average token reps to get sentence reps.
-- NO weighted combine across layers.
:param bert_batch: dict('tokid_tt', 'seg_tt', 'attnmask_tt', 'seq_lens'); items to use for getting BERT
representations. The sentence mapped to BERT vocab and appropriately padded.
:param batch_senttok_idxs: list(list(list(int))); batch_size([num_sents_per_abs[num_tokens_in_sent]])
:param num_sents: list(int); number of sentences in each example in the batch passed.
:return:
doc_cls_reps: FloatTensor [batch_size x bert_encoding_dim]
sent_reps: FloatTensor [batch_size x num_sents x bert_encoding_dim]
"""
seq_lens = bert_batch['seq_lens']
batch_size, max_seq_len = len(seq_lens), max(seq_lens)
max_sents = max(num_sents)
tokid_tt, seg_tt, attnmask_tt = bert_batch['tokid_tt'], bert_batch['seg_tt'], bert_batch['attnmask_tt']
if torch.cuda.is_available():
tokid_tt, seg_tt, attnmask_tt = tokid_tt.cuda(), seg_tt.cuda(), attnmask_tt.cuda()
# Pass input through BERT and return all layer hidden outputs.
model_outputs = self.bert_encoder(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
final_hidden_state = model_outputs.last_hidden_state
# Read of CLS token as document representation.
doc_cls_reps = final_hidden_state[:, 0, :]
doc_cls_reps = doc_cls_reps.squeeze()
# Average token reps for every sentence to get sentence representations.
# Build the first sent for all batch examples, second sent ... and so on in each iteration below.
sent_reps = []
for sent_i in range(max_sents):
cur_sent_mask = np.zeros((batch_size, max_seq_len, self.bert_encoding_dim))
# Build a mask for the ith sentence for all the abstracts of the batch.
for batch_abs_i in range(batch_size):
abs_sent_idxs = batch_senttok_idxs[batch_abs_i]
try:
sent_i_tok_idxs = abs_sent_idxs[sent_i]
except IndexError: # This happens in the case where the abstract has fewer than max sents.
sent_i_tok_idxs = []
cur_sent_mask[batch_abs_i, sent_i_tok_idxs, :] = 1.0
sent_mask = Variable(torch.FloatTensor(cur_sent_mask))
if torch.cuda.is_available():
sent_mask = sent_mask.cuda()
# batch_size x seq_len x encoding_dim
sent_tokens = final_hidden_state * sent_mask
# The sent_masks non zero elements in one slice along embedding dim is the sentence length.
cur_sent_reps = torch.sum(sent_tokens, dim=1)/\
torch.count_nonzero(sent_mask[:, :, 0], dim=1).clamp(min=1).unsqueeze(dim=1)
sent_reps.append(cur_sent_reps.unsqueeze(dim=1))
# batch_size x max_sents x encoding_dim
sent_reps = torch.cat(sent_reps, dim=1)
return doc_cls_reps, sent_reps
class WordSentAbsAlignBiEnc(WordSentAlignBiEnc):
"""
- Pass abstracts through Transformer LM, get contextualized sentence reps.
(sentence reps are obtained by averaging contextual word embeddings)
- Compute pairwise sentence similarities for query and candidate and whole doc rep.
- Maximize maximum similarity of anchor and positive.
- At test time caching encode and score are called externally on test data.
- Preferred class for all WordSentAlignBiEnc experiments too because
i can use caching scorer.
"""
def __init__(self, model_hparams, bert_config=None):
"""
:param model_hparams: dict(string:int); model hyperparams.
num_code_vecs: int; number of code vectors to disentangle into.
The number of facets.
num_tf_heads: int; number of heads in the context transformer.
:param bert_config: transformers.configuration_bert.BertConfig; bert
hyperparam instance.
"""
torch.nn.Module.__init__(self)
self.bert_config = bert_config
self.bert_encoding_dim = 768 # bert_config.hidden_size or DistilBertConfig.dim
self.bert_layer_count = 12 + 1 # plus 1 for the bottom most layer.
self.bert_encoder = AutoModel.from_pretrained(model_hparams['base-pt-layer'])
self.bert_encoder.config.output_hidden_states = True
# If fine tune is False then freeze the bert params.
if not model_hparams['fine_tune']:
for param in self.bert_encoder.base_model.parameters():
param.requires_grad = False
self.score_agg_type = model_hparams['score_aggregation']
if self.score_agg_type == 'l2max':
self.dist_function = pair_dist.allpair_masked_dist_l2max
elif self.score_agg_type == 'l2top2':
self.dist_function = pair_dist.allpair_masked_dist_l2topk
elif self.score_agg_type == 'l2wasserstein':
ot_distance = pair_dist.AllPairMaskedWasserstein(model_hparams)
self.dist_function = ot_distance.compute_distance
else:
raise ValueError(f'Unknown aggregation: {self.score_agg_type}')
# Not using the random weights because they'll spoil initial alignments.
# self.bert_layer_weights = gl.SoftmaxMixLayers(in_features=self.bert_layer_count, out_features=1, bias=False)
self.criterion_sent = nn.TripletMarginWithDistanceLoss(distance_function=self.dist_function,
margin=1.0, reduction='sum')
self.criterion_abs = nn.TripletMarginLoss(margin=1, p=2, reduction='sum')
self.abs_loss_prop = float(model_hparams['abs_loss_prop'])
self.sent_loss_prop = float(model_hparams['sent_loss_prop'])
self.cd_l1_prop = float(model_hparams.get('cd_l1_prop', 0.0))
def forward_rank(self, batch_rank):
"""
Function used at training time.
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'query_abs_lens': list(int); Number of sentences in query abs.
'query_senttoki': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'pos_abs_lens': list(int);
'pos_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from positive abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'pos_senttoki': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'neg_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'neg_abs_lens': list(int);
'neg_senttoki': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
}
:return: loss_val; torch Variable.
"""
qbert_batch, qabs_lens = batch_rank['query_bert_batch'], batch_rank['query_abs_lens']
pbert_batch, pabs_lens = batch_rank['pos_bert_batch'], batch_rank['pos_abs_lens']
query_senttoki, pos_senttoki = batch_rank['query_senttok_idxs'], batch_rank['pos_senttok_idxs']
# Get the representations from the model.
q_cls_rep, q_sent_reps = self.partial_forward(bert_batch=qbert_batch, abs_lens=qabs_lens,
sent_tok_idxs=query_senttoki)
p_cls_rep, p_sent_reps = self.partial_forward(bert_batch=pbert_batch, abs_lens=pabs_lens,
sent_tok_idxs=pos_senttoki)
# Happens when running on the dev set.
if 'neg_bert_batch' in batch_rank:
nbert_batch, nabs_lens = batch_rank['neg_bert_batch'], batch_rank['neg_abs_lens']
neg_senttoki = batch_rank['neg_senttok_idxs']
n_cls_reps, n_sent_reps = self.partial_forward(bert_batch=nbert_batch, abs_lens=nabs_lens,
sent_tok_idxs=neg_senttoki)
# Bundle the lengths with the embeds so the similarity
# function can use the lens for masking.
query_sents = rep_len_tup(embed=q_sent_reps, abs_lens=qabs_lens)
pos_sents = rep_len_tup(embed=p_sent_reps, abs_lens=pabs_lens)
neg_sents = rep_len_tup(embed=n_sent_reps, abs_lens=nabs_lens)
sent_loss_val = self.criterion_sent(query_sents, pos_sents, neg_sents)
abs_loss_val = self.criterion_abs(q_cls_rep, p_cls_rep, n_cls_reps)
loss_val = self.sent_loss_prop*sent_loss_val + self.abs_loss_prop*abs_loss_val
return loss_val
else:
# Use a shuffled set of positives as the negatives. -- in-batch negatives.
random_idxs = torch.randperm(p_sent_reps.size()[0])
n_sent_reps = p_sent_reps[random_idxs]
n_cls_reps = p_cls_rep[random_idxs]
nabs_lens = [pabs_lens[i] for i in random_idxs.tolist()]
# Bundle the lengths with the embeds so the similarity
# function can use the lens for masking.
query_sents = rep_len_tup(embed=q_sent_reps, abs_lens=qabs_lens)
pos_sents = rep_len_tup(embed=p_sent_reps, abs_lens=pabs_lens)
neg_sents = rep_len_tup(embed=n_sent_reps, abs_lens=nabs_lens)
sent_loss_val = self.criterion_sent(query_sents, pos_sents, neg_sents)
abs_loss_val = self.criterion_abs(q_cls_rep, p_cls_rep, n_cls_reps)
loss_val = self.sent_loss_prop*sent_loss_val + self.abs_loss_prop*abs_loss_val
# If asked to regularize the cross doc values, do so to make them more sparse.
if self.cd_l1_prop > 0:
# Pad values will be zeros.
pair_sims = -1*torch.cdist(q_sent_reps.permute(0, 2, 1), p_sent_reps.permute(0, 2, 1))
ef_batch_size, qmax_sents, cmax_sents = pair_sims.size()
sims_norm = torch.linalg.norm(pair_sims.view(ef_batch_size, qmax_sents*cmax_sents), ord=1, dim=1)
sims_reg = torch.sum(sims_norm)
loss_val += self.cd_l1_prop * sims_reg
return loss_val
class WordSentAbsSupAlignBiEnc(WordSentAbsAlignBiEnc):
"""
- Pass abstracts through Transformer LM, get contextualized sentence reps.
(sentence reps are obtained by averaging contextual word embeddings)
- Compute pairwise sentence similarities for query and candidate.
- Maximize maximum similarity of anchor and positive:
using a sentence alignment loss, using whole abstract loss, and using
pre-computed alignments (based on co-cotation contexts)
"""
def __init__(self, model_hparams, bert_config=None):
"""
:param model_hparams: dict(string:int); model hyperparams.
num_code_vecs: int; number of code vectors to disentangle into.
The number of facets.
num_tf_heads: int; number of heads in the context transformer.
:param bert_config: transformers.configuration_bert.BertConfig; bert
hyperparam instance.
"""
torch.nn.Module.__init__(self)
self.bert_config = bert_config
self.bert_encoding_dim = 768 # bert_config.hidden_size or DistilBertConfig.dim
self.bert_layer_count = 12 + 1 # plus 1 for the bottom most layer.
self.bert_encoder = AutoModel.from_pretrained(model_hparams['base-pt-layer'])
self.bert_encoder.config.output_hidden_states = True
# If fine tune is False then freeze the bert params.
if not model_hparams['fine_tune']:
for param in self.bert_encoder.base_model.parameters():
param.requires_grad = False
self.score_agg_type = model_hparams['score_aggregation']
if self.score_agg_type == 'l2max':
self.dist_function = pair_dist.allpair_masked_dist_l2max
elif self.score_agg_type == 'l2top2':
self.dist_function = pair_dist.allpair_masked_dist_l2topk
elif self.score_agg_type == 'l2wasserstein':
ot_distance = pair_dist.AllPairMaskedWasserstein(model_hparams)
self.dist_function = ot_distance.compute_distance
else:
raise ValueError(f'Unknown aggregation: {self.score_agg_type}')
# Use multi instance sentence alignment, supervised sentence alignment,
# and the abs similarity for supervision.
weighted_sup = model_hparams.get('weighted_sup', False)
if weighted_sup:
self.criterion_sentsup = nn.TripletMarginWithDistanceLoss(
distance_function=pair_dist.allpair_masked_dist_l2sup_weighted, margin=1.0, reduction='sum')
else:
self.criterion_sentsup = nn.TripletMarginWithDistanceLoss(
distance_function=pair_dist.allpair_masked_dist_l2sup, margin=1.0, reduction='sum')
self.criterion_sent = nn.TripletMarginWithDistanceLoss(distance_function=self.dist_function,
margin=1.0, reduction='sum')
self.criterion_abs = nn.TripletMarginLoss(margin=1, p=2, reduction='sum')
self.abs_loss_prop = float(model_hparams.get('abs_loss_prop', 0.0))
self.sent_loss_prop = float(model_hparams.get('sent_loss_prop', 0.0))
self.sentsup_loss_prop = float(model_hparams['sentsup_loss_prop'])
self.cd_svalue_l1_prop = float(model_hparams.get('cd_svalue_l1_prop', 0.0))
def encode(self, batch_dict):
"""
Function used at test time.
- This is used when using only the sentence embeddings for score computation.
- When using more complex scoring use the cachine_score and caching_encode methods
from the parent class.
batch_dict: dict of the form accepted by forward_rank but without any of the
negative examples.
:return: ret_dict
"""
doc_bert_batch, doc_abs_lens = batch_dict['bert_batch'], batch_dict['abs_lens']
doc_query_senttoki = batch_dict['senttok_idxs']
# Get the representations from the model; batch_size x encoding_dim x max_sents
cls_reps, sent_reps = self.partial_forward(bert_batch=doc_bert_batch, abs_lens=doc_abs_lens,
sent_tok_idxs=doc_query_senttoki)
# Make numpy arrays and return.
if torch.cuda.is_available():
sent_reps = sent_reps.cpu().data.numpy()
else:
sent_reps = sent_reps.data.numpy()
unpadded_sent_reps = []
for i, num_sents in enumerate(doc_abs_lens):
# encoding_dim x num_sents
upsr = sent_reps[i, :, :num_sents]
# return: # num_sents x encoding_dim
unpadded_sent_reps.append(upsr.transpose(1, 0))
ret_dict = {
'sent_reps': unpadded_sent_reps,
}
return ret_dict
def forward_rank(self, batch_rank):
"""
Function used at training time.
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'query_abs_lens': list(int); Number of sentences in query abs.
'query_senttoki': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'pos_abs_lens': list(int);
'pos_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from positive abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'pos_align_idxs': list([int int]); query align sent idx, cand align sent idx
'pos_senttoki': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
'neg_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'neg_abs_lens': list(int);
'neg_align_idxs': list([int int]); query align sent idx, cand align sent idx
'neg_senttoki': list(list(list(int))); batch_size(num_abs_sents(
num_sent_tokens(ints)))
}
:return: loss_val; torch Variable.
"""
qbert_batch, qabs_lens = batch_rank['query_bert_batch'], batch_rank['query_abs_lens']
pbert_batch, pabs_lens = batch_rank['pos_bert_batch'], batch_rank['pos_abs_lens']
query_senttoki, pos_senttoki = batch_rank['query_senttok_idxs'], batch_rank['pos_senttok_idxs']
pos_align_idxs = batch_rank['pos_align_idxs']
# Get the representations from the model.
qu_cls_rep, qu_sent_reps = self.partial_forward(bert_batch=qbert_batch, abs_lens=qabs_lens,
sent_tok_idxs=query_senttoki)
pos_cls_rep, pos_sent_reps = self.partial_forward(bert_batch=pbert_batch, abs_lens=pabs_lens,
sent_tok_idxs=pos_senttoki)
# Happens when running on the dev set.
if 'neg_bert_batch' in batch_rank:
nbert_batch, nabs_lens = batch_rank['neg_bert_batch'], batch_rank['neg_abs_lens']
neg_senttoki = batch_rank['neg_senttok_idxs']
ne_cls_reps, ne_sent_reps = self.partial_forward(bert_batch=nbert_batch, abs_lens=nabs_lens,
sent_tok_idxs=neg_senttoki)
query_sents = rep_len_tup(embed=qu_sent_reps, abs_lens=qabs_lens)
pos_sents = rep_len_tup(embed=pos_sent_reps, abs_lens=pabs_lens)
neg_sents = rep_len_tup(embed=ne_sent_reps, abs_lens=nabs_lens)
# Dev set based on "predictions" not the pre-alignments. (they can be noisey!)
loss_val = self.criterion_sent(query_sents, pos_sents, neg_sents)
if self.abs_loss_prop > 0:
abs_loss_val = self.criterion_abs(qu_cls_rep, pos_cls_rep, ne_cls_reps)
loss_val += self.abs_loss_prop*abs_loss_val
return loss_val
else:
# Use a shuffled set of positives as the negatives. -- in-batch negatives.
random_idxs = torch.randperm(pos_sent_reps.size()[0])
ne_sent_reps = pos_sent_reps[random_idxs]
ne_cls_reps = pos_cls_rep[random_idxs]
nabs_lens = [pabs_lens[i] for i in random_idxs.tolist()]
neg_align_idxs = [pos_align_idxs[i] for i in random_idxs.tolist()]
# Bundle the lengths with the embeds so the similarity
# function can use the lens for masking.
query_sents = rep_len_tup(embed=qu_sent_reps, abs_lens=qabs_lens)
pos_sents = rep_len_tup(embed=pos_sent_reps, abs_lens=pabs_lens)
neg_sents = rep_len_tup(embed=ne_sent_reps, abs_lens=nabs_lens)
pos_sents_ali = rep_len_ali_tup(embed=pos_sent_reps, abs_lens=pabs_lens, align_idxs=pos_align_idxs)
neg_sents_ali = rep_len_ali_tup(embed=ne_sent_reps, abs_lens=nabs_lens, align_idxs=neg_align_idxs)
loss_val = self.sentsup_loss_prop*self.criterion_sentsup(query_sents, pos_sents_ali, neg_sents_ali)
if self.sent_loss_prop > 0:
sent_loss_val = self.criterion_sent(query_sents, pos_sents, neg_sents)
loss_val += self.sent_loss_prop*sent_loss_val
if self.abs_loss_prop > 0:
abs_loss_val = self.criterion_abs(qu_cls_rep, pos_cls_rep, ne_cls_reps)
loss_val += self.abs_loss_prop*abs_loss_val
# If asked to regularize the cross doc singular values, do so to make them more sparse.
if self.cd_svalue_l1_prop > 0:
# Pad values will be zeros.
pair_sims = -1*torch.cdist(qu_sent_reps.permute(0, 2, 1), pos_sent_reps.permute(0, 2, 1))
_, svalues, _ = torch.linalg.svd(pair_sims)
if len(svalues.size()) < 2:
svalues = svalues.unsqueeze(dim=0)
svalue_norm = torch.linalg.norm(svalues, ord=1, dim=1)
svalue_reg = torch.sum(svalue_norm)
loss_val += self.cd_svalue_l1_prop * svalue_reg
return loss_val
class WordSentAlignPolyEnc(WordSentAlignBiEnc):
"""
- Pass abstracts through Transformer LM, get contextualized sentence reps.
(sentence reps are obtained by averaging contextual word embeddings)
- Compute pairwise sentence similarities for query and candidate using a mechanism similar
to the polyencoder applied to a pair docs setting.
- Maximize maximum similarity of anchor and positive.
"""
def __init__(self, model_hparams, bert_config=None):
"""
:param model_hparams: dict(string:int); model hyperparams.
num_code_vecs: int; number of code vectors to disentangle into.
The number of facets.
num_tf_heads: int; number of heads in the context transformer.
:param bert_config: transformers.configuration_bert.BertConfig; bert
hyperparam instance.
"""
torch.nn.Module.__init__(self)
self.bert_config = bert_config
self.bert_encoding_dim = 768 # bert_config.hidden_size or DistilBertConfig.dim
self.bert_layer_count = 12 + 1 # plus 1 for the bottom most layer.
self.bert_encoder = AutoModel.from_pretrained(model_hparams['base-pt-layer'])
self.bert_encoder.config.output_hidden_states = True
# If fine tune is False then freeze the bert params.
if not model_hparams['fine_tune']:
for param in self.bert_encoder.base_model.parameters():
param.requires_grad = False
if model_hparams['score_aggregation'] == 'jointsm':
self.dist_function = pair_dist.allpair_joint_sm_negscore
else:
raise ValueError(f'Unknown aggregation: {model_hparams["score_aggregation"]}')
# Not using the random weights because they'll spoil initial alignments.
# self.bert_layer_weights = gl.SoftmaxMixLayers(in_features=self.bert_layer_count, out_features=1, bias=False)
self.criterion = nn.TripletMarginWithDistanceLoss(distance_function=self.dist_function,
margin=1.0, reduction='sum')
@staticmethod
def score(query_reps, cand_reps):
"""
Called externally from a class using the trained model.
- Create as many repetitions of query_reps as cand_reps.
- Pad candidate reps to max length.
- Compute scores and return.
query_reps: numpy.array; num_sents x encoding_dim.
cand_reps: list(numpy.array); batch_size(num_sents x encoding_dim)
"""
batch_size = len(cand_reps)
cand_lens = [r.shape[0] for r in cand_reps]
cmax_sents = max(cand_lens)
qmax_sents, encoding_dim = query_reps.shape[0], query_reps.shape[1]
query_lens = [qmax_sents]*batch_size
padded_cand_reps = np.zeros((batch_size, cmax_sents, encoding_dim))
padded_query_reps = np.zeros((batch_size, qmax_sents, encoding_dim))
for bi, ex_reps in enumerate(cand_reps):
padded_cand_reps[bi, :cand_lens[bi], :] = ex_reps
# Just repeat the query sents for now.
padded_query_reps[bi, :qmax_sents, :] = query_reps
padded_query_reps = Variable(torch.FloatTensor(padded_query_reps))
padded_cand_reps = Variable(torch.FloatTensor(padded_cand_reps))
if torch.cuda.is_available():
padded_query_reps = padded_query_reps.cuda()
padded_cand_reps = padded_cand_reps.cuda()
qt = rep_len_tup(embed=padded_query_reps.permute(0, 2, 1), abs_lens=query_lens)
ct = rep_len_tup(embed=padded_cand_reps.permute(0, 2, 1), abs_lens=cand_lens)
batch_scores, pair_sm = pair_dist.allpair_joint_sm_negscore(query=qt, cand=ct, return_pair_sims=True)
batch_scores = -1.0*batch_scores
# Make numpy arrays and return.
if torch.cuda.is_available():
batch_scores = batch_scores.cpu().data.numpy()
pair_sm = pair_sm.cpu().data.numpy()
else:
batch_scores = batch_scores.data.numpy()
pair_sm = pair_sm.data.numpy()
unpadded_pair_sm = []
for i, (clen, qlen) in enumerate(zip(cand_lens, query_lens)):
# encoding_dim x num_sents
upsm = pair_sm[i, :qlen, :clen]
# return: # num_sents x encoding_dim
unpadded_pair_sm.append(upsm)
ret_dict = {
'batch_scores': batch_scores,
'pair_scores': unpadded_pair_sm
}
return ret_dict
| aspire-main | src/learning/facetid_models/disent_models.py |
"""
Models which learn sentence representations.
Mostly a bunch of wrappers for raw bert models which are finetuned.
"""
import torch
from torch import nn as nn
from torch.autograd import Variable
from transformers import AutoModel
class SentBERTWrapper(nn.Module):
"""
Pass sentence through encoder and minimize triple loss
with inbatch negatives.
"""
def __init__(self, model_name):
"""
"""
torch.nn.Module.__init__(self)
self.bert_encoding_dim = 768
self.sent_encoder = AutoModel.from_pretrained(model_name)
self.criterion = nn.TripletMarginLoss(margin=1, p=2, reduction='sum')
def forward(self, batch_dict):
batch_bpr = self.forward_rank(batch_dict['batch_rank'])
loss_dict = {
'rankl': batch_bpr
}
return loss_dict
def forward_rank(self, batch_rank):
"""
Function used at training time.
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'pos_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from positive abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
}
:return: loss_val; torch Variable.
"""
qbert_batch = batch_rank['query_bert_batch']
pbert_batch = batch_rank['pos_bert_batch']
# Get the representations from the model.
q_sent_reps = self.sent_reps_bert(bert_model=self.sent_encoder, bert_batch=qbert_batch)
p_context_reps = self.sent_reps_bert(bert_model=self.sent_encoder, bert_batch=pbert_batch)
# Happens when running on the dev set.
if 'neg_bert_batch' in batch_rank:
nbert_batch = batch_rank['neg_bert_batch']
n_context_reps = self.sent_reps_bert(bert_model=self.sent_encoder, bert_batch=nbert_batch)
else:
# Use a shuffled set of positives as the negatives. -- in-batch negatives.
n_context_reps = p_context_reps[torch.randperm(p_context_reps.size()[0])]
loss_val = self.criterion(q_sent_reps, p_context_reps, n_context_reps)
return loss_val
@staticmethod
def sent_reps_bert(bert_model, bert_batch):
"""
Pass the concated abstract through BERT, and read off [SEP] token reps to get sentence reps,
and weighted combine across layers.
:param bert_model: torch.nn.Module subclass. A bert model.
:param bert_batch: dict('tokid_tt', 'seg_tt', 'attnmask_tt', 'seq_lens'); items to use for getting BERT
representations. The sentence mapped to BERT vocab and appropriately padded.
:return:
doc_cls_reps: FloatTensor [batch_size x bert_encoding_dim]
"""
tokid_tt, seg_tt, attnmask_tt = bert_batch['tokid_tt'], bert_batch['seg_tt'], bert_batch['attnmask_tt']
if torch.cuda.is_available():
tokid_tt, seg_tt, attnmask_tt = tokid_tt.cuda(), seg_tt.cuda(), attnmask_tt.cuda()
# Pass input through BERT and return all layer hidden outputs.
model_outputs = bert_model(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
cls_doc_reps = model_outputs.last_hidden_state[:, 0, :]
return cls_doc_reps.squeeze()
class ICTBERTWrapper(SentBERTWrapper):
"""
Pass sentence through encoder and minimize triple loss
with inbatch negatives.
"""
def __init__(self, model_name):
"""
"""
torch.nn.Module.__init__(self)
self.bert_encoding_dim = 768
self.sent_encoder = AutoModel.from_pretrained(model_name)
self.context_encoder = AutoModel.from_pretrained(model_name)
self.criterion = nn.CrossEntropyLoss(reduction='sum')
def forward_rank(self, batch_rank):
"""
Function used at training time.
batch_dict: dict of the form:
{
'query_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from query abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
'pos_bert_batch': dict(); The batch which BERT inputs with flattened and
concated sentences from positive abstracts; Tokenized and int mapped
sentences and other inputs to BERT.
}
:return: loss_val; torch Variable.
"""
qbert_batch = batch_rank['query_bert_batch']
pbert_batch = batch_rank['pos_bert_batch']
# Get the representations from the model.
q_sent_reps = self.sent_reps_bert(bert_model=self.sent_encoder, bert_batch=qbert_batch)
p_context_reps = self.sent_reps_bert(bert_model=self.context_encoder, bert_batch=pbert_batch)
batch_size = q_sent_reps.size(0)
assert(q_sent_reps.size(1) == p_context_reps.size(1) == self.bert_encoding_dim)
# Get similarities from query sent reps to all contexts (non pos ones are inbatch negs).
dot_sims = torch.matmul(q_sent_reps, p_context_reps.T)
assert(dot_sims.size(0) == dot_sims.size(1) == batch_size)
# Correct context targets are just the corresponding ids for every element.
targets = torch.arange(batch_size)
targets = Variable(targets)
if torch.cuda.is_available():
targets = targets.cuda()
loss_val = self.criterion(dot_sims, targets)
return loss_val
| aspire-main | src/learning/facetid_models/sentsim_models.py |
"""
Functions for computing distances between documents with fine grained representations.
"""
import math
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn import functional
import geomloss
from ..models_common import activations
class AllPairMaskedWasserstein:
def __init__(self, model_hparams):
self.geoml_blur = model_hparams.get('geoml_blur', 0.05)
self.geoml_scaling = model_hparams.get('geoml_scaling', 0.9)
self.geoml_reach = model_hparams.get('geoml_reach', None)
self.sent_sm_temp = model_hparams.get('sent_sm_temp', 1.0)
def compute_distance(self, query, cand, return_pair_sims=False):
"""
Given a set of query and candidate reps compute the wasserstein distance between
the query and candidates.
:param query: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:param cand: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:return:
batch_sims: ef_batch_size; pooled pairwise _distances_ between
input reps. (distances are just negated similarities here)
"""
query_reps, query_abs_lens = query.embed, query.abs_lens
cand_reps, cand_abs_lens = cand.embed, cand.abs_lens
qef_batch_size, _, qmax_sents = query_reps.size()
cef_batch_size, encoding_dim, cmax_sents = cand_reps.size()
pad_mask = np.ones((qef_batch_size, qmax_sents, cmax_sents))*-10e8
for i in range(qef_batch_size):
ql, cl = query_abs_lens[i], cand_abs_lens[i]
pad_mask[i, :ql, :cl] = 0.0
pad_mask = Variable(torch.FloatTensor(pad_mask))
if torch.cuda.is_available():
pad_mask = pad_mask.cuda()
assert (qef_batch_size == cef_batch_size)
# (effective) batch_size x qmax_sents x cmax_sents
# inputs are: batch_size x encoding_dim x c/qmax_sents so permute them.
neg_pair_dists = -1*torch.cdist(query_reps.permute(0, 2, 1).contiguous(),
cand_reps.permute(0, 2, 1).contiguous())
if len(neg_pair_dists.size()) == 2:
neg_pair_dists = neg_pair_dists.unsqueeze(0)
assert (neg_pair_dists.size(1) == qmax_sents)
assert (neg_pair_dists.size(2) == cmax_sents)
# Add very large negative values in the pad positions which will be zero.
neg_pair_dists = neg_pair_dists + pad_mask
q_max_sent_sims, _ = torch.max(neg_pair_dists, dim=2)
c_max_sent_sims, _ = torch.max(neg_pair_dists, dim=1)
query_distr = functional.log_softmax(q_max_sent_sims/self.sent_sm_temp, dim=1).exp()
cand_distr = functional.log_softmax(c_max_sent_sims/self.sent_sm_temp, dim=1).exp()
if return_pair_sims:
# This is only used at test time -- change the way the pad mask is changed in place
# if you want to use at train time too.
pad_mask[pad_mask == 0] = 1.0
pad_mask[pad_mask == -10e8] = 0.0
neg_pair_dists = neg_pair_dists * pad_mask
# p=1 is the L2 distance oddly enough.
ot_solver = geomloss.SamplesLoss("sinkhorn", p=1, blur=self.geoml_blur, reach=self.geoml_reach,
scaling=self.geoml_scaling, debias=False, potentials=True)
# Input reps to solver need to be: batch_size x c/qmax_sents x encoding_dim
q_pot, c_pot = ot_solver(query_distr, query_reps.permute(0, 2, 1).contiguous(),
cand_distr, cand_reps.permute(0, 2, 1).contiguous())
# Implement the expression to compute the plan from the potentials:
# https://www.kernel-operations.io/geomloss/_auto_examples/optimal_transport/
# plot_optimal_transport_labels.html?highlight=plan#regularized-optimal-transport
outersum = q_pot.unsqueeze(dim=2).expand(-1, -1, cmax_sents) + \
c_pot.unsqueeze(dim=2).expand(-1, -1, qmax_sents).permute(0, 2, 1)
# Zero out the pad values because they seem to cause nans to occur.
outersum = outersum * pad_mask
exps = torch.exp(torch.div(outersum+neg_pair_dists, self.geoml_blur))
outerprod = torch.einsum('bi,bj->bij', query_distr, cand_distr)
transport_plan = exps*outerprod
pair_sims = neg_pair_dists
masked_sims = transport_plan*pair_sims
wasserstein_dists = torch.sum(torch.sum(masked_sims, dim=1), dim=1)
return wasserstein_dists, [query_distr, cand_distr, pair_sims, transport_plan, masked_sims]
else:
ot_solver_distance = geomloss.SamplesLoss("sinkhorn", p=1, blur=self.geoml_blur, reach=self.geoml_reach,
scaling=self.geoml_scaling, debias=False, potentials=False)
wasserstein_dists = ot_solver_distance(query_distr, query_reps.permute(0, 2, 1).contiguous(),
cand_distr, cand_reps.permute(0, 2, 1).contiguous())
return wasserstein_dists
class AllPairMaskedAttention:
def __init__(self, model_hparams):
self.cdatt_sm_temp = model_hparams.get('cdatt_sm_temp', 1.0)
def compute_distance(self, query, cand, return_pair_sims=False):
"""
Given a set of query and candidate reps compute the wasserstein distance between
the query and candidates.
:param query: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:param cand: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:return:
batch_sims: ef_batch_size; pooled pairwise _distances_ between
input reps. (distances are just negated similarities here)
"""
query_reps, query_abs_lens = query.embed, query.abs_lens
cand_reps, cand_abs_lens = cand.embed, cand.abs_lens
qef_batch_size, _, qmax_sents = query_reps.size()
cef_batch_size, encoding_dim, cmax_sents = cand_reps.size()
assert (qef_batch_size == cef_batch_size)
# (effective) batch_size x qmax_sents x cmax_sents
# inputs are: batch_size x encoding_dim x c/qmax_sents so permute them.
neg_pair_dists = -1*torch.cdist(query_reps.permute(0, 2, 1).contiguous(),
cand_reps.permute(0, 2, 1).contiguous())
pair_softmax = activations.masked_2d_softmax(neg_pair_dists/self.cdatt_sm_temp,
target_lens1=query_abs_lens,
target_lens2=cand_abs_lens)
if return_pair_sims:
pair_sims = neg_pair_dists
masked_sims = pair_softmax*pair_sims
doc_sims = torch.sum(torch.sum(masked_sims, dim=1), dim=1)
return doc_sims, [pair_sims, pair_softmax, masked_sims]
else:
# Happens at train time.
pair_dists = -1*neg_pair_dists
masked_dists = pair_softmax*pair_dists
doc_dists = torch.sum(torch.sum(masked_dists, dim=1), dim=1)
return doc_dists
def allpair_masked_dist_l2max(query, cand, return_pair_sims=False):
"""
Given a set of query and candidate reps compute dot product similarity
between all the query facet reps and all the candidate facet reps,
then aggregate similarity with a log sum exp.
:param query: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:param cand: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:return:
batch_sims: ef_batch_size; pooled pairwise _distances_ between
input reps. (distances are just negated similarities here)
"""
query_reps, query_abs_lens = query.embed, query.abs_lens
cand_reps, cand_abs_lens = cand.embed, cand.abs_lens
qef_batch_size, _, qmax_sents = query_reps.size()
cef_batch_size, encoding_dim, cmax_sents = cand_reps.size()
pad_mask = np.ones((qef_batch_size, qmax_sents, cmax_sents))*-10e8
for i in range(qef_batch_size):
ql, cl = query_abs_lens[i], cand_abs_lens[i]
pad_mask[i, :ql, :cl] = 0.0
pad_mask = Variable(torch.FloatTensor(pad_mask))
if torch.cuda.is_available():
pad_mask = pad_mask.cuda()
assert (qef_batch_size == cef_batch_size)
# (effective) batch_size x qmax_sents x cmax_sents
# inputs are: batch_size x encoding_dim x c/qmax_sents so permute them.
neg_pair_dists = -1*torch.cdist(query_reps.permute(0, 2, 1), cand_reps.permute(0, 2, 1))
if len(neg_pair_dists.size()) == 2:
neg_pair_dists = neg_pair_dists.unsqueeze(0)
assert (neg_pair_dists.size(1) == qmax_sents)
assert (neg_pair_dists.size(2) == cmax_sents)
# Add very large negative values in the pad positions which will be zero.
neg_pair_dists = neg_pair_dists + pad_mask
# Max across all the pairwise distances
# - because these are negative distances the smallest distance will be picked.
batch_dists, indices = torch.max(neg_pair_dists.view(qef_batch_size, qmax_sents*cmax_sents), dim=1)
# At test time return similarities which can be used for ranking.
# Negation of L2 distance isnt a similarity.
if return_pair_sims:
# L2 distance to similarity: https://stats.stackexchange.com/a/53070/55807
batch_sims = batch_dists
pair_sims = neg_pair_dists
return batch_sims, pair_sims
# Return a positive distance - the smallest distance is minimized even further.
else:
return -1*batch_dists
def allpair_masked_dist_l2sup(query, cand):
"""
Given a set of query and candidate reps compute l2 distance
between all the reps and all the candidate reps and return similarity
of (pre) aligned pair of sentences.
:param query: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:param cand: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.
align_idxs: list(int, int); alignment from query to cand)
:return:
batch_sims: ef_batch_size; pooled pairwise _distances_ between
input reps. (distances are just negated similarities here)
"""
query_reps, query_abs_lens = query.embed, query.abs_lens
cand_reps, cand_abs_lens, cand_align_idxs = cand.embed, cand.abs_lens, cand.align_idxs
qef_batch_size, _, qmax_sents = query_reps.size()
cef_batch_size, encoding_dim, cmax_sents = cand_reps.size()
# pad_mask = np.ones((qef_batch_size, qmax_sents, cmax_sents))*-10e8
for i in range(qef_batch_size):
ql, cl = query_abs_lens[i], cand_abs_lens[i]
# pad_mask[i, :ql, :cl] = 0.0
# If the index is beyond what is present in the q or c cause of truncation then clip it.
cand_align_idxs[i][0] = min(cand_align_idxs[i][0], ql-1)
cand_align_idxs[i][1] = min(cand_align_idxs[i][1], cl-1)
# pad_mask = Variable(torch.FloatTensor(pad_mask))
cand_align_idxs = Variable(torch.LongTensor(cand_align_idxs))
if torch.cuda.is_available():
# pad_mask = pad_mask.cuda()
cand_align_idxs = cand_align_idxs.cuda()
assert (qef_batch_size == cef_batch_size)
# (effective) batch_size x qmax_sents x cmax_sents
# inputs are: batch_size x encoding_dim x c/qmax_sents so permute them.
pair_sims = -1*torch.cdist(query_reps.permute(0, 2, 1), cand_reps.permute(0, 2, 1))
if len(pair_sims.size()) == 2:
pair_sims = pair_sims.unsqueeze(0)
assert (pair_sims.size(1) == qmax_sents)
assert (pair_sims.size(2) == cmax_sents)
# Add very large negative values in the pad positions which will be zero.
# pair_sims = pair_sims + pad_mask
# Read of distances to minimize
batch_sims = pair_sims[torch.arange(qef_batch_size), cand_align_idxs[torch.arange(qef_batch_size), 0],
cand_align_idxs[torch.arange(qef_batch_size), 1]]
# Return a distance instead of a similarity - so the smallest distance is minimized even further.
return -1*batch_sims
def allpair_masked_dist_l2sup_weighted(query, cand):
"""
Given a set of query and candidate reps compute l2 distance
between all the reps and all the candidate reps and return similarity
of (pre) aligned pair of sentences.
- Also weight the distances by the number of values in the cross-doc sim matrix.
- This is for use in multi tasking with the OT loss.
:param query: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:param cand: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.
align_idxs: list(int, int); alignment from query to cand)
:return:
batch_sims: ef_batch_size; pooled pairwise _distances_ between
input reps. (distances are just negated similarities here)
"""
query_reps, query_abs_lens = query.embed, query.abs_lens
cand_reps, cand_abs_lens, cand_align_idxs = cand.embed, cand.abs_lens, cand.align_idxs
qef_batch_size, _, qmax_sents = query_reps.size()
cef_batch_size, encoding_dim, cmax_sents = cand_reps.size()
# pad_mask = np.ones((qef_batch_size, qmax_sents, cmax_sents))*-10e8
cd_sizes = []
for i in range(qef_batch_size):
ql, cl = query_abs_lens[i], cand_abs_lens[i]
cd_sizes.append(ql*cl)
# pad_mask[i, :ql, :cl] = 0.0
# If the index is beyond what is present in the q or c cause of truncation then clip it.
cand_align_idxs[i][0] = min(cand_align_idxs[i][0], ql-1)
cand_align_idxs[i][1] = min(cand_align_idxs[i][1], cl-1)
# pad_mask = Variable(torch.FloatTensor(pad_mask))
cand_align_idxs = Variable(torch.LongTensor(cand_align_idxs))
cd_sizes = Variable(torch.FloatTensor(cd_sizes))
if torch.cuda.is_available():
# pad_mask = pad_mask.cuda()
cand_align_idxs = cand_align_idxs.cuda()
cd_sizes = cd_sizes.cuda()
assert (qef_batch_size == cef_batch_size)
# (effective) batch_size x qmax_sents x cmax_sents
# inputs are: batch_size x encoding_dim x c/qmax_sents so permute them.
pair_sims = -1*torch.cdist(query_reps.permute(0, 2, 1), cand_reps.permute(0, 2, 1))
if len(pair_sims.size()) == 2:
pair_sims = pair_sims.unsqueeze(0)
assert (pair_sims.size(1) == qmax_sents)
assert (pair_sims.size(2) == cmax_sents)
# Add very large negative values in the pad positions which will be zero.
# pair_sims = pair_sims + pad_mask
# Read of distances to minimize
batch_sims = pair_sims[torch.arange(qef_batch_size), cand_align_idxs[torch.arange(qef_batch_size), 0],
cand_align_idxs[torch.arange(qef_batch_size), 1]]
# divide by the number of elements in the cross-doc matrix.
batch_sims = batch_sims/cd_sizes
# Return a distance instead of a similarity - so the smallest distance is minimized even further.
return -1*batch_sims
def allpair_masked_dist_l2topk(query, cand, return_pair_sims=False):
"""
Given a set of query and candidate reps compute dot product similarity
between all the query facet reps and all the candidate facet reps,
then aggregate similarity with a log sum exp.
:param query: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:param cand: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:return:
batch_sims: ef_batch_size; pooled pairwise _distances_ between
input reps. (distances are just negated similarities here)
"""
query_reps, query_abs_lens = query.embed, query.abs_lens
cand_reps, cand_abs_lens = cand.embed, cand.abs_lens
qef_batch_size, _, qmax_sents = query_reps.size()
cef_batch_size, encoding_dim, cmax_sents = cand_reps.size()
pad_mask = np.ones((qef_batch_size, qmax_sents, cmax_sents))*-10e8
for i in range(qef_batch_size):
ql, cl = query_abs_lens[i], cand_abs_lens[i]
pad_mask[i, :ql, :cl] = 0.0
pad_mask = Variable(torch.FloatTensor(pad_mask))
if torch.cuda.is_available():
pad_mask = pad_mask.cuda()
assert (qef_batch_size == cef_batch_size)
# (effective) batch_size x qmax_sents x cmax_sents
# inputs are: batch_size x encoding_dim x c/qmax_sents so permute them.
neg_pair_dists = -1*torch.cdist(query_reps.permute(0, 2, 1), cand_reps.permute(0, 2, 1))
if len(neg_pair_dists.size()) == 2:
neg_pair_dists = neg_pair_dists.unsqueeze(0)
assert (neg_pair_dists.size(1) == qmax_sents)
assert (neg_pair_dists.size(2) == cmax_sents)
# Add very large negative values in the pad positions which will be zero.
neg_pair_dists = neg_pair_dists + pad_mask
# Topk across all the pairwise similarities
# - because these are negative distances the smallest distances will be picked.
batch_dists, indices = torch.topk(neg_pair_dists.view(qef_batch_size, qmax_sents*cmax_sents), dim=1, k=2)
# At test time return similarities which can be used for ranking.
# Negation of L2 distance isnt a similarity.
if return_pair_sims:
# L2 distance to similarity: https://stats.stackexchange.com/a/53070/55807
batch_sims = batch_dists
batch_sims = batch_sims.sum(dim=1)
pair_sims = neg_pair_dists
return batch_sims, pair_sims
# Return a positive distance - the smallest distances are minimized even further.
else:
batch_dists = batch_dists.sum(dim=1)
return -1*batch_dists
def allpair_joint_sm_negscore(query, cand, return_pair_sims=False):
"""
Given a set of query and candidate reps:
- compute pairwise dot product similarities and scaled softmax (sqrt(dim)) normalize it.
- aggregate candidate sentences aligned to the query sentences. (same as the number of q sents)
- aggregate query sentences aligned to the candidate sentences. (same as the number of c sents)
- compute dot similarity of the q sents to the c sents.
- compute dot similarity of the c sents to the q sents.
- sum all the similarities.
:param query: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:param cand: namedtuple(
embed: batch_size x encoding_dim x q_max_sents;
abs_lens: list(int); number of sentences in every batch element.)
:return:
batch_sims: return a distance because the optimizer calls for it.
"""
query_reps, query_abs_lens = query.embed, query.abs_lens
cand_reps, cand_abs_lens = cand.embed, cand.abs_lens
qef_batch_size, _, qmax_sents = query_reps.size()
cef_batch_size, encoding_dim, cmax_sents = cand_reps.size()
# (effective) batch_size x qmax_sents x cmax_sents
pair_sims = torch.bmm(query_reps.permute(0, 2, 1), cand_reps)
scaled_pair_sims = torch.div(pair_sims, math.sqrt(encoding_dim))
pair_sm = activations.masked_2d_softmax(scaled_pair_sims, target_lens1=query_abs_lens, target_lens2=cand_abs_lens)
# Compute query/cand reps in terms of the aligned cand/query reps.
# This needs to be: batch_size x encoding_dim x qmax_sents
cand_aligned2query = torch.empty_like(query_reps)
for i in range(qmax_sents):
# batch_size x encoding_dim x qmax_sents
q_sent_align = cand_reps*pair_sm[:, i, :].unsqueeze(dim=1)
# batch_size x encoding_dim
q_sent_align = q_sent_align.sum(dim=2)
cand_aligned2query[:, :, i] = q_sent_align
# This needs to be: batch_size x encoding_dim x cmax_sents
query_aligned2cand = torch.empty_like(cand_reps)
for j in range(cmax_sents):
# batch_size x encoding_dim x cmax_sents
c_sent_align = query_reps*pair_sm[:, :, j].unsqueeze(dim=2).permute(0, 2, 1)
# batch_size x encoding_dim
c_sent_align = c_sent_align.sum(dim=2)
query_aligned2cand[:, :, j] = c_sent_align
# Compute scores for query and cands.
# batch_size x qmax_sents; this is dot products of the query and cand_aligned2query
# and then sentence similarities summed.
query_sent_scores = torch.sum(query_reps*cand_aligned2query, dim=1)
cand_sent_scores = torch.sum(cand_reps*query_aligned2cand, dim=1)
summed_scores = torch.sum(query_sent_scores, dim=1) + torch.sum(cand_sent_scores, dim=1)
if return_pair_sims:
return -1.0*summed_scores, pair_sm
else:
return -1.0*summed_scores
| aspire-main | src/learning/facetid_models/pair_distances.py |
"""
Functions used across models.
"""
import numpy as np
import torch
from torch.nn import functional
from torch.autograd import Variable
def masked_softmax(batch_scores, target_lens):
"""
Given the scores for the assignments for every example in the batch apply
a masked softmax for the variable number of assignments.
:param batch_scores: torch Tensor; batch_size x max_num_asgns; With non target
scores set to zero.
:param target_lens: list(int) [batch_size]; number of elemenets over which to
compute softmax in each example of the batch.
:return: probs: torch Tensor; same size as batch_scores.
"""
batch_size, max_num_targets = batch_scores.size()
# Set all the logits beyond the targets to very large negative values
# so they contribute minimally to the softmax.
logit_mask = np.zeros((batch_size, max_num_targets))
for i, len in enumerate(target_lens):
logit_mask[i, len:] = -1e32
logit_mask = Variable(torch.FloatTensor(logit_mask))
if torch.cuda.is_available():
logit_mask = logit_mask.cuda()
# Work with log probabilities because its the numerically stable softmax.
batch_scores = batch_scores + logit_mask
log_probs = functional.log_softmax(batch_scores, dim=1)
return log_probs.exp()
def masked_2d_softmax(batch_scores, target_lens1, target_lens2):
"""
Given the scores for the assignments for every example in the batch apply
a masked softmax for the variable number of assignments.
:param batch_scores: torch Tensor; batch_size x dim1 x dim2; With non target
scores set to zero.
:param target_lens1: list(int) [batch_size]; number of elemenets over which to
compute softmax in each example of the batch along dim 1.
:param target_lens2: list(int) [batch_size]; number of elemenets over which to
compute softmax in each example of the batch along dim 2.
:return: probs: torch Tensor; same size as batch_scores.
"""
batch_size, q_max_size, c_max_size = batch_scores.size()
# Set all the logits beyond the targets to very large negative values
# so they contribute minimally to the softmax.
logit_mask = np.zeros((batch_size, q_max_size, c_max_size))
for i, (len1, len2) in enumerate(zip(target_lens1, target_lens2)):
logit_mask[i, len1:, :] = -1e32
logit_mask[i, :, len2:] = -1e32
logit_mask = Variable(torch.FloatTensor(logit_mask))
if torch.cuda.is_available():
logit_mask = logit_mask.cuda()
# Work with log probabilities because its the numerically stable softmax.
batch_scores = batch_scores + logit_mask
log_probs = functional.log_softmax(batch_scores.view(batch_size, q_max_size*c_max_size), dim=1)
log_probs = log_probs.view(batch_size, q_max_size, c_max_size)
return log_probs.exp()
| aspire-main | src/learning/models_common/activations.py |
aspire-main | src/learning/models_common/__init__.py |
|
"""
Generic layers used across models.
"""
import torch
from torch import nn as nn
from torch.nn import functional
import collections
from . import activations
non_linearities = {
'tanh': torch.nn.Tanh,
'relu': torch.nn.ReLU,
'sigmoid': torch.nn.Sigmoid,
'softplus': torch.nn.Softplus
}
class FeedForwardNet(nn.Module):
def __init__(self, in_dim, out_dim, non_linearity,
ffn_composition_dims=None, dropoutp=0.3, use_bias=True, score_ffn=False):
"""
:param in_dim: int; dimension of input to ffn.
:param ffn_composition_dims: tuple(int); hidden layers dimensions for the classifier.
:param out_dim: int; dimensions of output from ffn.
:param dropoutp: float; dropout probability.
:param non_linearity: string; non-lin after linear layer.
:param use_bias: bool; says if linear layer should have a bias.
:param score_ffn: bool; says if the final layer output is an attention score - if so,
doesnt apply a non-linearity on it.
"""
torch.nn.Module.__init__(self)
# Layers of the feed-forward network.
self.in_dim = in_dim
self.out_dim = out_dim
layers = collections.OrderedDict()
if ffn_composition_dims:
# Concat the dimensionality of the output layer
ffn_composition_dims = ffn_composition_dims + (out_dim,)
layers['lin_0'] = torch.nn.Linear(in_features=in_dim,
out_features=ffn_composition_dims[0], bias=use_bias)
layers['nonlin_0'] = non_linearities[non_linearity]()
layers['dropout_0'] = torch.nn.Dropout(p=dropoutp)
for layer_i in range(len(ffn_composition_dims) - 1):
layers['lin_{:d}'.format(layer_i + 1)] = \
torch.nn.Linear(in_features=ffn_composition_dims[layer_i],
out_features=ffn_composition_dims[layer_i + 1],
bias=use_bias)
# If its a score ffn then dont add a non-linearity at the final layer.
if layer_i == len(ffn_composition_dims) - 2 and score_ffn:
assert(ffn_composition_dims[layer_i + 1] == 1)
pass
else:
layers['nonlin_{:d}'.format(layer_i + 1)] = non_linearities[non_linearity]()
# Dont add dropout at the final layer.
if layer_i != len(ffn_composition_dims) - 2:
layers['dropout_{:d}'.format(layer_i + 1)] = torch.nn.Dropout(p=dropoutp)
else:
layers['lin_0'] = torch.nn.Linear(in_features=self.in_dim,
out_features=out_dim, bias=use_bias)
layers['nonlin_0'] = non_linearities[non_linearity]()
self.ffn = nn.Sequential(layers)
def forward(self, in_feats):
"""
:param in_feats: torch.Tensor(batch_size, in_dim)
:return: out_feats: torch.Tensor(batch_size, out_dim)
"""
return self.ffn.forward(in_feats)
class SoftmaxMixLayers(torch.nn.Linear):
"""
Combine bert representations across layers with a weighted sum
where the weights are softmaxes over a set of learned parameters.
"""
def forward(self, input):
# the weight vector is out_dim x in_dim.
# so we want to softmax along in_dim.
weight = functional.softmax(self.weight, dim=1)
return functional.linear(input, weight, self.bias)
class GatedAttention(nn.Module):
"""
Implements the gated attention in:
Attention-based Deep Multiple Instance Learning
http://proceedings.mlr.press/v80/ilse18a/ilse18a.pdf
"""
def __init__(self, embed_dim):
torch.nn.Module.__init__(self)
self.embed_dim = embed_dim
self.internal_dim = embed_dim
self.lin_V = nn.Linear(in_features=embed_dim, out_features=self.internal_dim, bias=False)
self.V_nonlin = nn.Tanh()
self.lin_U = nn.Linear(in_features=embed_dim, out_features=self.internal_dim, bias=False)
self.gate_sigm = nn.Sigmoid()
self.score_weight = nn.Linear(in_features=embed_dim, out_features=1, bias=False)
def forward(self, in_seq, seq_lens):
"""
:param in_seq: torch.tensor; batch_size x max_seq_len x embed_dim
:param seq_lens: list(int); batch_size
:return attention_weights: batch_size x max_seq_len
"""
batch_size, max_seq_len = in_seq.size(0), in_seq.size(1)
in_seq = in_seq.view(batch_size*max_seq_len, self.embed_dim)
# batch_size*max_seq_len x internal_dim
hidden = self.V_nonlin(self.lin_V(in_seq))
gates = self.gate_sigm(self.lin_U(in_seq))
scores = self.score_weight(hidden*gates).squeeze()
scores = scores.view(batch_size, max_seq_len)
# This expects the padded elements to be zero.
attention_weights = activations.masked_softmax(batch_scores=scores, target_lens=seq_lens)
return attention_weights
# Straught through estimator from: https://www.hassanaskary.com/python/pytorch/
# deep%20learning/2020/09/19/intuitive-explanation-of-straight-through-estimators.html
class STEFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return (input >= 0).float()
@staticmethod
def backward(ctx, grad_output):
return functional.hardtanh(grad_output)
class StraightThroughEstimator(nn.Module):
def __init__(self):
super(StraightThroughEstimator, self).__init__()
def forward(self, x):
x = STEFunction.apply(x)
return x
| aspire-main | src/learning/models_common/generic_layers.py |
"""
Build abstract or sentence embeddings from own trained models or a pretrained model
and save to disk to use for ranking.
"""
import os
import sys
import logging
import re
import time
import codecs, json
import argparse
import torch
from transformers import AutoModel, AutoTokenizer
import numpy as np
from sentence_transformers import SentenceTransformer, models
from . import data_utils as du
from ..learning.facetid_models import disent_models
from ..learning import batchers
np_random_ng = np.random.default_rng()
class BertMLM:
def __init__(self, model_name='specter'):
mapping = {
'specter': 'allenai/specter',
# Using roberta here causes the tokenizers below to break cause roberta inputs != bert inputs.
'supsimcse': 'princeton-nlp/sup-simcse-bert-base-uncased',
'unsupsimcse': 'princeton-nlp/unsup-simcse-bert-base-uncased'
}
full_name = mapping[model_name]
self.tokenizer = AutoTokenizer.from_pretrained(full_name)
if model_name == 'sentence-transformers/all-mpnet-base-v2':
self.bert_max_seq_len = 500
else:
self.bert_max_seq_len = 500
self.model = AutoModel.from_pretrained(full_name)
self.model.config.output_hidden_states = True
if torch.cuda.is_available():
self.model.cuda()
self.model.eval()
def _prepare_batch(self, batch):
"""
Prepare the batch for Bert.
:param batch: list(string); batch of strings.
:return:
"""
# Construct the batch.
tokenized_batch = []
batch_seg_ids = []
batch_attn_mask = []
seq_lens = []
max_seq_len = -1
for sent in batch:
bert_tokenized_text = self.tokenizer.tokenize(sent)
if len(bert_tokenized_text) > self.bert_max_seq_len:
bert_tokenized_text = bert_tokenized_text[:self.bert_max_seq_len]
# Convert token to vocabulary indices
indexed_tokens = self.tokenizer.convert_tokens_to_ids(bert_tokenized_text)
# Append CLS and SEP tokens to the text.
indexed_tokens = self.tokenizer.build_inputs_with_special_tokens(token_ids_0=indexed_tokens)
if len(indexed_tokens) > max_seq_len:
max_seq_len = len(indexed_tokens)
tokenized_batch.append(indexed_tokens)
batch_seg_ids.append([0] * len(indexed_tokens))
batch_attn_mask.append([1] * len(indexed_tokens))
# Pad the batch.
for ids_sent, seg_ids, attn_mask in \
zip(tokenized_batch, batch_seg_ids, batch_attn_mask):
pad_len = max_seq_len - len(ids_sent)
seq_lens.append(len(ids_sent))
ids_sent.extend([self.tokenizer.pad_token_id] * pad_len)
seg_ids.extend([self.tokenizer.pad_token_id] * pad_len)
attn_mask.extend([self.tokenizer.pad_token_id] * pad_len)
return torch.tensor(tokenized_batch), torch.tensor(batch_seg_ids), \
torch.tensor(batch_attn_mask), torch.FloatTensor(seq_lens)
def predict(self, batch):
"""
:param batch:
:return:
"""
tokid_tt, seg_tt, attnmask_tt, seq_lens_tt = self._prepare_batch(batch)
if torch.cuda.is_available():
tokid_tt = tokid_tt.cuda()
seg_tt = seg_tt.cuda()
attnmask_tt = attnmask_tt.cuda()
seq_lens_tt = seq_lens_tt.cuda()
with torch.no_grad():
model_out = self.model(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
# top_l is [bs x max_seq_len x bert_encoding_dim]
top_l = model_out.last_hidden_state
batch_reps_cls = top_l[:, 0, :]
batch_reps_av = torch.sum(top_l[:, 1:-1, :], dim=1)
batch_reps_av = batch_reps_av / seq_lens_tt.unsqueeze(dim=1)
if torch.cuda.is_available():
batch_reps_av = batch_reps_av.cpu().data.numpy()
batch_reps_cls = batch_reps_cls.cpu().data.numpy()
return batch_reps_av, batch_reps_cls
class SimCSE(BertMLM):
def predict(self, batch):
"""
:param batch:
:return:
"""
tokid_tt, seg_tt, attnmask_tt, seq_lens_tt = self._prepare_batch(batch)
if torch.cuda.is_available():
tokid_tt = tokid_tt.cuda()
seg_tt = seg_tt.cuda()
attnmask_tt = attnmask_tt.cuda()
seq_lens_tt = seq_lens_tt.cuda()
with torch.no_grad():
model_out = self.model(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
# top_l is [bs x max_seq_len x bert_encoding_dim]
top_l = model_out.last_hidden_state
batch_reps_pooler = model_out.pooler_output
batch_reps_cls = top_l[:, 0, :]
if torch.cuda.is_available():
batch_reps_pooler = batch_reps_pooler.cpu().data.numpy()
batch_reps_cls = batch_reps_cls.cpu().data.numpy()
return batch_reps_cls, batch_reps_pooler
class TrainedModel:
"""
Own trained model using which we want to build up document embeddings.
"""
def __init__(self, model_name, trained_model_path, model_version='cur_best'):
# Load label maps and configs.
with codecs.open(os.path.join(trained_model_path, 'run_info.json'), 'r', 'utf-8') as fp:
run_info = json.load(fp)
all_hparams = run_info['all_hparams']
# Init model:
if model_name in {'cospecter'}:
model = disent_models.MySPECTER(model_hparams=all_hparams)
else:
raise ValueError(f'Unknown model: {model_name}')
model_fname = os.path.join(trained_model_path, 'model_{:s}.pt'.format(model_version))
model.load_state_dict(torch.load(model_fname))
# Move model to the GPU.
if torch.cuda.is_available():
model.cuda()
logging.info('Running on GPU.')
model.eval()
self.model_name = model_name
self.model = model
self.tokenizer = AutoTokenizer.from_pretrained(all_hparams['base-pt-layer'])
def predict(self, batch):
"""
:param batch:
:return:
"""
if self.model_name in {'cospecter'}:
bert_batch, _, _ = batchers.SentTripleBatcher.prepare_bert_sentences(sents=batch, tokenizer=self.tokenizer)
ret_dict = self.model.encode(batch_dict={'bert_batch': bert_batch})
return ret_dict, ret_dict['doc_reps']
class SplitStream:
"""
Given a jsonl file yield text in the corus.
Returns the title vs the abstract or both based on what is asked.
"""
def __init__(self, in_fname, num_to_read=None, attribute='title',
return_pid=False, insert_sep=False):
"""
:param in_fname: string; input jsonl filename from which to read exampled.
:param num_to_read: int; number of examples to read.
None if everything in the file should be read.
:param attribute: string; which attribute from the input example should be read.
:param return_pid: bool; Return PID if True else dont.
:param insert_sep: bool; Insert [SEP] tokens between sentences of the abstract. For use by bert.
"""
self.in_fname = in_fname
self.attr_to_read = attribute
self.num_to_read = num_to_read
self.return_pid = return_pid
self.insert_sep = insert_sep
self.read_count = 0
def __iter__(self):
# "Rewind" the input file at the start of the loop
self.in_file = codecs.open(self.in_fname, 'r', 'utf-8')
return self.next()
def next(self):
# In each loop iteration return one example.
for jsonline in self.in_file:
self.read_count += 1
if self.num_to_read and self.read_count == self.num_to_read:
break
if self.attr_to_read in {'sent'}:
# If this happens then it is yielding sentences so say next.
doc = self.get_gorc_sents(jsonline, return_pid=self.return_pid)
for sent in doc:
yield sent
elif self.attr_to_read in {'abstract'}:
doc = self.get_gorc(jsonline, attr_to_read=self.attr_to_read,
return_pid=self.return_pid, insert_sep=self.insert_sep)
# Check to make sure that the text is a non empty string.
ret_text = doc[1].strip() if self.return_pid else doc.strip()
if ret_text:
yield doc
elif self.attr_to_read in {'title-abstract'}:
doc = self.get_gorc_specter(jsonline, return_pid=self.return_pid)
yield doc
elif self.attr_to_read in {'title-abstract-dict'}:
doc = self.get_gorc_absdict(jsonline, return_pid=self.return_pid)
yield doc
else:
raise ValueError('Unknown attribute to read: {:s}'.format(self.attr_to_read))
@staticmethod
def get_gorc(in_line, attr_to_read, return_pid, insert_sep):
"""
Read in a gorc doc line of text and return concated sentences.
Also replace all numbers with <NUM> to match processing in the "Ask the GRU" paper.
:param in_line: string; json string example.
:param attr_to_read: string; says what should be read from the json example.
:param return_pid: bool; Return PID if True else dont.
:param insert_sep: bool; Insert [SEP] tokens between sentences of the abstract. For use by bert.
:return:
if 'abstract': all the sentences of the abstract concated into one string.
if 'title': the title sentence.
"""
in_ex = json.loads(in_line.strip())
pid = in_ex['paper_id']
if attr_to_read == 'abstract':
sents = in_ex['abstract']
if insert_sep:
ret_text = ' [SEP] '.join(sents)
else:
ret_text = ' '.join(sents)
else:
raise ValueError('Unknown attribute to read: {:}'.format(attr_to_read))
# Replace numbers a place holder.
ret_text = re.sub(r"\d+", "<NUM>", ret_text)
if return_pid:
return pid, ret_text
else:
return ret_text
@staticmethod
def get_gorc_specter(in_line, return_pid):
"""
Read in a gorc doc line of text and return title and abstract concatenated.
:param in_line: string; json string example.
:param attr_to_read: string; says what should be read from the json example.
:param return_pid: bool; Return PID if True else dont.
:return:
if 'abstract': all the sentences of the abstract concated into one string.
"""
in_ex = json.loads(in_line.strip())
pid = in_ex['paper_id']
sents = in_ex['abstract']
abs_text = ' '.join(sents)
ret_text = in_ex['title'] + '[SEP]' + abs_text
if return_pid:
return pid, ret_text
else:
return ret_text
@staticmethod
def get_gorc_absdict(in_line, return_pid):
"""
Read in a gorc doc line of text and return title and abstract in a dict as expected
by src.learning.batchers.*.prepare_abstracts and others
:param in_line: string; json string example.
:param return_pid: bool; Return PID if True else dont.
:return:
ret_dict: dict('TITLE': string, 'ABSTRACT': list(string))
"""
in_ex = json.loads(in_line.strip())
pid = in_ex['paper_id']
ret_dict = {'TITLE': in_ex['title'], 'ABSTRACT': in_ex['abstract']}
if return_pid:
return pid, ret_dict
else:
return ret_dict
@staticmethod
def get_gorc_sents(in_line, return_pid):
"""
Read in a gorc doc line of text and return sentences one at a time.
:param in_line: string; json string example.
:param return_pid: bool; Return PID if True else dont.
:return:
ret_toks: list(str); tokenized sentence with numbers replaced with num and
unknown (ie low freq) tokens with unk.
"""
in_ex = json.loads(in_line.strip())
pid = in_ex['paper_id']
sents = in_ex['abstract']
for i, sent in enumerate(sents):
if return_pid:
yield '{:s}-{:d}'.format(pid, i), sent
else:
yield sent
def build_sentbert_reps(data_path, run_path, data_to_read, dataset, sb_model_name, trained_model_path=None):
"""
Build per sentence sentence BERT representations for csfcube.
:param data_path: string; path from which to read raw abstracts and vocab data.
:param run_path: string; path to save reps for documents.
:param data_to_read: string; {'sent'}
:param dataset: string; {'csfcube', 'relish'}
:param sb_model_name: string; The original sent bert model trained on NLI alone
or the one trained on citations+NLI+Paraphrases achieving SOTA
SciDOCS performance.
:param trained_model_path: string; directory where torch.save was used to store
a bert encoder fine tuned on my own data.
:return:
"""
if sb_model_name in {'sbtinybertsota', 'sbrobertanli', 'sbmpnet1B'}:
normname2model_names = {
'sbtinybertsota': 'paraphrase-TinyBERT-L6-v2',
'sbrobertanli': 'nli-roberta-base-v2',
'sbmpnet1B': 'sentence-transformers/all-mpnet-base-v2'
}
pt_model_name = normname2model_names[sb_model_name]
sentbert_model = SentenceTransformer(pt_model_name)
# The easy way to get sentence reps from any bert model.
elif sb_model_name in {'cosentbert', 'ictsentbert'} and trained_model_path:
word_embedding_model = models.Transformer('allenai/scibert_scivocab_uncased',
max_seq_length=512)
# Loading local model: https://github.com/huggingface/transformers/issues/2422#issuecomment-571496558
trained_model_fname = os.path.join(trained_model_path, 'sent_encoder_cur_best.pt')
word_embedding_model.auto_model.load_state_dict(torch.load(trained_model_fname))
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
sentbert_model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
embedding_dim = 768
if dataset in {'csfcube'}:
in_fname = os.path.join(data_path, 'abstracts-{:s}-preds.jsonl'.format(dataset))
elif dataset in {'relish', 'treccovid', 'gorcmatscicit', 'scidcite',
'scidcocite', 'scidcoread', 'scidcoview'}:
in_fname = os.path.join(data_path, 'abstracts-{:s}.jsonl'.format(dataset))
out_fname = os.path.join(run_path, '{:s}-{:s}.npy'.format(dataset, data_to_read))
out_map_fname = os.path.join(run_path, 'pid2idx-{:s}-{:s}.json'.format(dataset, data_to_read))
abs_sent_stream = SplitStream(in_fname=in_fname, attribute=data_to_read,
return_pid=True)
sent_docs = list(abs_sent_stream)
# Docs are returned in the same order.
pid2idx = {}
for absi, (pid, abs_sentence) in enumerate(sent_docs):
pid2idx[pid] = absi
logging.info('pid2idx: {:}'.format(len(pid2idx)))
del sent_docs
# Go over documents and form sb reps for documents.
abs_sent_stream = list(SplitStream(in_fname=in_fname, attribute=data_to_read))
start = time.time()
vectors = sentbert_model.encode(abs_sent_stream)
logging.info('Forming vectors took: {:.4f}s'.format(time.time() - start))
logging.info('Shape: {:}'.format(vectors.shape))
# Save vectors to disk.
with codecs.open(out_fname, 'wb') as fp:
np.save(fp, vectors)
logging.info('Wrote: {:s}'.format(fp.name))
with codecs.open(out_map_fname, 'w', 'utf-8') as fp:
json.dump(pid2idx, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def write_wholeabs_reps(data_path, run_path, dataset, model_name, trained_model_path=None):
"""
Given a corpus: read the abstract sentences and write out the bert representations of
the abstracts. The entire abstract is passed through bert as one string with [SEP] tokens
marking off sentences.
Also (mis)-using this function to get sentence reps for sup/unsupsimcse.
:param data_path: base directory with abstract jsonl docs.
:param run_path: directory to which bert reps, and maps of bert reps to strings will be written.
:param dataset: string; {'relish', 'treccovid', 'csfcube'}
:param model_name: string; {'specter', 'cospecter'}
:return: None. Writes to disk.
"""
sent_enc_dim = 768
in_fname = os.path.join(data_path, 'abstracts-{:s}.jsonl'.format(dataset))
cls_out_fname = os.path.join(run_path, '{:s}-abstracts.npy'.format(dataset))
out_map_fname = os.path.join(run_path, 'pid2idx-{:s}-abstract.json'.format(dataset))
num_docs = len(list(SplitStream(in_fname=in_fname, attribute='title-abstract', return_pid=True)))
if model_name in {'supsimcse', 'unsupsimcse'}:
in_fname = os.path.join(data_path, 'abstracts-{:s}.jsonl'.format(dataset))
# Over write the above values.
cls_out_fname = os.path.join(run_path, '{:s}-sent.npy'.format(dataset))
out_map_fname = os.path.join(run_path, 'pid2idx-{:s}-sent.json'.format(dataset))
num_docs = len(list(SplitStream(in_fname=in_fname, attribute='sent', return_pid=True)))
doc_stream = SplitStream(in_fname=in_fname, attribute='sent', return_pid=True)
model = SimCSE(model_name)
batch_size = 120
elif model_name in {'specter'}:
doc_stream = SplitStream(in_fname=in_fname, attribute='title-abstract', return_pid=True)
model = BertMLM(model_name)
batch_size = 90
elif model_name in {'cospecter'}:
doc_stream = SplitStream(in_fname=in_fname, attribute='title-abstract', return_pid=True)
model = TrainedModel(model_name=model_name, trained_model_path=trained_model_path)
batch_size = 32
start = time.time()
logging.info('Processing files in: {:s}'.format(in_fname))
logging.info('Num docs: {:d}'.format(num_docs))
# Write out sentence reps incrementally.
sent2idx = {}
doc_reps_cls = np.empty((num_docs, sent_enc_dim))
logging.info('Allocated space for reps: {:}'.format(doc_reps_cls.shape))
batch_docs = []
batch_start_idx = 0
for doci, (pid, abs_text) in enumerate(doc_stream):
if doci % 1000 == 0:
logging.info('Processing document: {:d}/{:d}'.format(doci, num_docs))
batch_docs.append(abs_text)
sent2idx[pid] = len(sent2idx)
if len(batch_docs) == batch_size:
batch_reps_av, batch_reps_cls = model.predict(batch_docs)
batch_docs = []
doc_reps_cls[batch_start_idx:batch_start_idx+batch_size, :] = batch_reps_cls
batch_start_idx = batch_start_idx+batch_size
# Handle left over sentences.
if len(batch_docs) > 0:
batch_reps_av, batch_reps_cls = model.predict(batch_docs)
final_bsize = batch_reps_cls.shape[0]
doc_reps_cls[batch_start_idx:batch_start_idx + final_bsize, :] = batch_reps_cls
logging.info('Doc reps shape: {:}; Map length: {:d}'.format(doc_reps_cls.shape, len(sent2idx)))
with codecs.open(out_map_fname, 'w', 'utf-8') as fp:
json.dump(sent2idx, fp)
logging.info('Wrote: {:s}'.format(fp.name))
with codecs.open(cls_out_fname, 'wb') as fp:
np.save(fp, doc_reps_cls)
logging.info('Wrote: {:s}'.format(fp.name))
logging.info('Took: {:.4f}s'.format(time.time() - start))
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subcommand',
help='The action to perform.')
# Get tfidf reps.
build_vecs_args = subparsers.add_parser('build_reps')
build_vecs_args.add_argument('--model_name', required=True,
choices=['sbtinybertsota', 'sbrobertanli', 'specter',
'cosentbert', 'ictsentbert', 'cospecter',
'supsimcse', 'unsupsimcse', 'sbmpnet1B'],
help='The name of the model to run.')
build_vecs_args.add_argument('--dataset', required=True,
choices=['gorcmatscicit', 'csfcube', 'relish', 'treccovid',
'scidcite', 'scidcocite', 'scidcoread', 'scidcoview'],
help='The dataset to train and predict on.')
build_vecs_args.add_argument('--data_path', required=True,
help='Path to directory with jsonl data.')
build_vecs_args.add_argument('--run_path', required=True,
help='Path to directory to save all run items to.')
build_vecs_args.add_argument('--model_path',
help='Path to directory with trained model to use for getting reps.')
build_vecs_args.add_argument('--run_name',
help='Basename for the trained model directory.')
build_vecs_args.add_argument('--log_fname',
help='File name for the log file to which logs get'
' written.')
cl_args = parser.parse_args()
# If a log file was passed then write to it.
try:
logging.basicConfig(level='INFO', format='%(message)s',
filename=cl_args.log_fname)
# Print the called script and its args to the log.
logging.info(' '.join(sys.argv))
# Else just write to stdout.
except AttributeError:
logging.basicConfig(level='INFO', format='%(message)s',
stream=sys.stdout)
# Print the called script and its args to the log.
logging.info(' '.join(sys.argv))
if cl_args.subcommand == 'build_reps':
if cl_args.model_name in {'sbtinybertsota', 'sbrobertanli', 'sbmpnet1B'}:
build_sentbert_reps(data_path=cl_args.data_path, run_path=cl_args.run_path,
data_to_read='sent', dataset=cl_args.dataset,
sb_model_name=cl_args.model_name)
elif cl_args.model_name in {'cosentbert', 'ictsentbert'}:
# Write reps to a different directory per run.
run_path = os.path.join(cl_args.run_path, cl_args.run_name)
du.create_dir(run_path)
build_sentbert_reps(data_path=cl_args.data_path, run_path=run_path,
data_to_read='sent', dataset=cl_args.dataset,
sb_model_name=cl_args.model_name,
trained_model_path=cl_args.model_path)
elif cl_args.model_name in {'specter', 'supsimcse', 'unsupsimcse'}:
write_wholeabs_reps(data_path=cl_args.data_path, run_path=cl_args.run_path,
dataset=cl_args.dataset, model_name=cl_args.model_name)
elif cl_args.model_name in {'cospecter'}:
# Write reps to a different directory per run.
run_path = os.path.join(cl_args.run_path, cl_args.run_name)
du.create_dir(run_path)
write_wholeabs_reps(data_path=cl_args.data_path, run_path=run_path,
dataset=cl_args.dataset,
model_name=cl_args.model_name,
trained_model_path=cl_args.model_path)
if __name__ == '__main__':
main()
| aspire-main | src/pre_process/pre_proc_buildreps.py |
"""
Process the RELISH dataset.
"""
import os
import codecs
import json
import csv
import pandas as pd
import random
import spacy
scispacy_model = spacy.load("en_core_sci_sm")
scispacy_model.add_pipe('sentencizer')
def annotation_pmids(in_path):
"""
Write out pmids of the RELISH documents.
:param in_path:
:return:
"""
with codecs.open(os.path.join(in_path, 'RELISH_v1_ann.json'), 'r', 'utf-8') as fp:
ann_dicts = json.load(fp)
dataset_pmids = set()
dataset_pmids_rep = []
for ann_dict in ann_dicts:
dataset_pmids.add(ann_dict['pmid'])
dataset_pmids_rep.append(ann_dict['pmid'])
dataset_pmids.update(ann_dict['response']['relevant'])
dataset_pmids_rep.extend(ann_dict['response']['relevant'])
dataset_pmids.update(ann_dict['response']['partial'])
dataset_pmids_rep.extend(ann_dict['response']['partial'])
dataset_pmids.update(ann_dict['response']['irrelevant'])
dataset_pmids_rep.extend(ann_dict['response']['irrelevant'])
print('All PMIDs: {:d}; Unique PMIDs: {:d}'.format(len(dataset_pmids_rep), len(dataset_pmids)))
with codecs.open(os.path.join(in_path, 'RELISH_v1_pmids.txt'), 'w', 'utf-8') as fp:
for pmid in dataset_pmids:
fp.write('{:s}\n'.format(pmid))
print('Wrote: {:s}'.format(fp.name))
def ann_stats2json(in_abs_path, in_path, out_path):
"""
- Write out jsonl file of abstracts and title.
:param in_abs_path: directory with title and abstracts for papers.
:param in_path: directory with annotations json.
:return:
"""
filenames = os.listdir(in_abs_path)
out_file = codecs.open(os.path.join(out_path, 'abstracts-relish.jsonl'), 'w', 'utf-8')
pid2abstract = {}
for fname in filenames:
with codecs.open(os.path.join(in_abs_path, fname), 'r', 'utf-8') as fp:
file_lines = fp.readlines()
title = file_lines[0].strip()
abs_text = ' '.join([s.strip() for s in file_lines[1:]])
abs_sentences = scispacy_model(abs_text,
disable=['tok2vec', 'tagger', 'attribute_ruler',
'lemmatizer', 'parser', 'ner'])
abs_sentences = [sent.text for sent in abs_sentences.sents]
if title and len(abs_sentences) > 0:
pmid = fname[7:-4] # filenames are like: PubMed-25010440.txt
doc_dict = {'title': title, 'abstract': abs_sentences, 'paper_id': pmid}
pid2abstract[pmid] = doc_dict
out_file.write(json.dumps(doc_dict)+'\n')
print('Docs with data: {:d}'.format(len(pid2abstract)))
print('Wrote: {:s}'.format(out_file.name))
out_file.close()
with codecs.open(os.path.join(in_path, 'RELISH_v1_ann.json'), 'r', 'utf-8') as fp:
ann_dicts = json.load(fp)
query_meta_file = codecs.open(os.path.join(out_path, 'relish-queries-release.csv'), 'w', 'utf-8')
query_meta_csv = csv.DictWriter(query_meta_file, extrasaction='ignore',
fieldnames=['paper_id', 'title'])
query_meta_csv.writeheader()
query_pmids = []
num_cands_perq = []
relevant_num_cands_perq = []
partial_num_cands_perq = []
irrelevant_num_cands_perq = []
qpmid2cands = {}
for ann_dict in ann_dicts:
qpid = ann_dict['pmid']
query_pmids.append(ann_dict['pmid'])
if qpid not in pid2abstract:
continue
cands = []
relevances = []
for cpid in ann_dict['response']['relevant']:
if cpid not in pid2abstract:
continue
cands.append(cpid)
relevances.append(2)
relevant_num_cands_perq.append(len(ann_dict['response']['relevant']))
for cpid in ann_dict['response']['partial']:
if cpid not in pid2abstract:
continue
cands.append(cpid)
relevances.append(1)
partial_num_cands_perq.append(len(ann_dict['response']['partial']))
for cpid in ann_dict['response']['irrelevant']:
if cpid not in pid2abstract:
continue
cands.append(cpid)
relevances.append(0)
irrelevant_num_cands_perq.append(len(ann_dict['response']['irrelevant']))
if cands:
qpmid2cands[qpid] = {'cands': cands, 'relevance_adju': relevances}
query_meta_csv.writerow({'title': pid2abstract[qpid]['title'], 'paper_id': qpid})
# Check that there arent papers with multiple ratings.
assert(len(set(cands)) == len(cands))
num_cands_perq.append(len(cands))
print('Query PMIDs: {:d}'.format(len(query_pmids)))
cand_summ = pd.DataFrame(num_cands_perq).describe()
print('Candidates per query: {:}'.format(cand_summ))
cand_summ = pd.DataFrame(relevant_num_cands_perq).describe()
print('Relevant candidates per query: {:}'.format(cand_summ))
cand_summ = pd.DataFrame(partial_num_cands_perq).describe()
print('Partial candidates per query: {:}'.format(cand_summ))
cand_summ = pd.DataFrame(irrelevant_num_cands_perq).describe()
print('Irrelevant candidates per query: {:}'.format(cand_summ))
with codecs.open(os.path.join(out_path, 'test-pid2anns-relish.json'), 'w') as fp:
json.dump(qpmid2cands, fp)
print('Wrote: {:s}'.format(fp.name))
print('Wrote: {:}'.format(query_meta_file.name))
query_meta_file.close()
def pprint_graded_anns(data_path):
"""
Given jsonl abstracts of the papers and the pid2anns-relish.json files print out for the
query all the papers which are similar.
:param data_path:
:return:
"""
sim2str = {
0: 'Irrelevant (0)',
1: 'Partial (+1)',
2: 'Relevant (+2)'
}
pid2abstract = {}
with codecs.open(os.path.join(data_path, 'abstracts-relish.jsonl'), 'r', 'utf-8') as fp:
for line in fp:
jsond = json.loads(line.strip())
pid2abstract[jsond['paper_id']] = jsond
with codecs.open(os.path.join(data_path, 'test-pid2anns-relish.json'), 'r', 'utf-8') as fp:
pid2anns = json.load(fp)
for qpid in pid2anns.keys():
print('Processing: {:}'.format(qpid))
resfile = codecs.open(os.path.join(data_path, 'readable_annotations/{:}.txt'.format(qpid)), 'w', 'utf-8')
cand_pids = pid2anns[qpid]['cands']
relevances = pid2anns[qpid]['relevance_adju']
cand2rel = dict([(c, r) for c, r in zip(cand_pids, relevances)])
# Write query.
try:
qtitle = pid2abstract[qpid]['title']
qabs = '\n'.join(pid2abstract[qpid]['abstract'])
except KeyError:
print('Missing query: {:}'.format(qpid))
continue
resfile.write('======================================================================\n')
resfile.write('paper_id: {:s}\n'.format(qpid))
resfile.write('TITLE: {:s}\n'.format(qtitle))
resfile.write('ABSTRACT: {:s}\n'.format(qabs))
for cpid in sorted(cand2rel, key=cand2rel.get, reverse=True):
resfile.write('===================================\n')
try:
ntitle = pid2abstract[cpid]['title']
nabs = '\n'.join(pid2abstract[cpid]['abstract'])
except KeyError:
print('Missing candidate: {:s}'.format(cpid))
continue
resfile.write('paper_id: {:s}\n'.format(cpid))
resfile.write('relevance: {:}\n'.format(sim2str[cand2rel[cpid]]))
resfile.write('TITLE: {:s}\n'.format(ntitle))
resfile.write('ABSTRACT: {:s}\n\n'.format(nabs))
resfile.close()
def setup_splits(in_path, out_path):
"""
Read in queries release file and write out half the queries as
dev and the rest as test. Make the splits at the level of topics.
"""
random.seed(582)
with codecs.open(os.path.join(in_path, 'relish-queries-release.csv'), 'r', 'utf-8') as fp:
csv_reader = csv.DictReader(fp)
query_pids = []
for row in csv_reader:
query_pids.append(row['paper_id'])
random.shuffle(query_pids)
dev = query_pids[:len(query_pids)//2]
test = query_pids[len(query_pids)//2:]
eval_splits = {'dev': dev, 'test': test}
print(f'dev_pids: {len(dev)}; test_pids: {len(test)}')
with codecs.open(os.path.join(out_path, 'relish-evaluation_splits.json'), 'w', 'utf-8') as fp:
json.dump(eval_splits, fp)
print('Wrote: {:s}'.format(fp.name))
if __name__ == '__main__':
# annotation_pmids(in_path='/iesl/canvas/smysore/facetid_apps/datasets_raw/relish_v1/')
# ann_stats2json(in_abs_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/relish/'
# 'neves_collected/RELISH-DB/texts',
# in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/relish',
# out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/relish')
# pprint_graded_anns(data_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/relish/')
setup_splits(in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/relish',
out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/relish')
| aspire-main | src/pre_process/pre_proc_relish.py |
"""
Explore the GORC corpus for corpora included and such.
"""
import os
import ast
import argparse
import time
import gzip
import multiprocessing as mp
import collections
import pprint
import pickle
import codecs, json
import csv
import pandas as pd
import spacy
import data_utils as du
import pp_settings as pps
scispacy_model = spacy.load("en_core_sci_sm")
scispacy_model.add_pipe('sentencizer')
def filter_for_fulltext(args):
"""
Open the metadata file, and return papaers of specific area.
Not checking for hosting sites, only Microsoft Academic Graph Field of Study.
:param in_fname: string.
:param filter_columns: will always be None here. Just preserving the same function
prototype to not change the filter_by_hostingservice function.
:return:
"""
in_fname, filter_columns = args
meta_csv = pd.read_csv(in_fname, delimiter='\t', error_bad_lines=False,
engine='python', quoting=csv.QUOTE_NONE)
total_row_count = meta_csv.shape[0]
valid_rows = meta_csv
valid_rows = valid_rows[valid_rows['has_grobid_text'] == True]
return total_row_count, valid_rows
def filter_metadata(raw_meta_path, filtered_meta_path, filter_nan_cols=None, filter_method=None):
"""
Look at the paper meta data and print out the metadata for the papers
from different hosting services: arxiv, pubmed, acl-anthalogy etc.
:param raw_meta_path:
:param filtered_meta_path:
:param filter_nan_cols: list(column names); column names based on which to exclude row
if it contains a nan value.
:param filter_method: string; {'Computer science', 'Materials science', 'full text'}
:return:
"""
if filter_method == 'full text':
filt_function = filter_for_fulltext
else:
raise ValueError('Dont know what filter function to pick.')
raw_metadata_files = os.listdir(raw_meta_path)
output_tsv = []
total_rows = 0
print('Filtering metadata in: {:s}'.format(raw_meta_path))
print('Filtering by columns: {:}'.format(filter_nan_cols))
di = du.DirIterator(root_path=raw_meta_path, yield_list=raw_metadata_files,
args=(filter_nan_cols,))
# Start a pool of worker processes.
process_pool = mp.Pool(processes=mp.cpu_count(), maxtasksperchild=10000)
start = time.time()
for total_row_count, valid_rows in process_pool.imap_unordered(filt_function, di,
chunksize=mp.cpu_count()):
total_rows += total_row_count
print('meta_csv: {:}; valid: {:}'.format(total_rows, valid_rows.shape))
if valid_rows.shape[0] > 0:
output_tsv.append(valid_rows)
# Close the pool.
process_pool.close()
process_pool.join()
output_tsv = pd.concat(output_tsv)
print('Total rows: {:d}; filtered rows: {:}'.format(total_rows, output_tsv.shape))
if filter_method == 'Computer science' and filter_nan_cols:
filt_file = os.path.join(filtered_meta_path, 'metadata-{:s}-cs.tsv'.format('-'.join(filter_nan_cols)))
elif filter_method == 'Materials science':
filt_file = os.path.join(filtered_meta_path, 'metadata-gorcmatsci.tsv')
elif filter_method == 'full text':
filt_file = os.path.join(filtered_meta_path, 'metadata-gorcfulltext.tsv')
else:
filt_file = os.path.join(filtered_meta_path, 'metadata-{:s}.tsv'.format('-'.join(filter_nan_cols)))
output_tsv.to_csv(filt_file, sep='\t')
print('Wrote: {:s}'.format(filt_file))
print('Took: {:.4f}s'.format(time.time()-start))
def write_batch_papers(args):
"""
Given a batch file, read the papers from it mentioned in the metadatadf
and write it to disk as a jsonl file.
:param jsonl_fname: string; filename for current batch.
:param filtered_data_path: directory to which outputs should be written.
:param pids: pids of the papers we want from the current batch file.
:return: wrote_count: int; how many jsonl rows were written to the batch output.
"""
jsonl_fname, pids, filtered_data_path = args
batch_num = int(os.path.basename(jsonl_fname)[:-9])
if len(pids) > 0:
data_file = gzip.open(jsonl_fname)
out_file = codecs.open(os.path.join(filtered_data_path, '{:d}.jsonl'.format(batch_num)), 'w', 'utf-8')
for line in data_file:
data_json = json.loads(line.strip())
if int(data_json['paper_id']) in pids:
out_file.write(json.dumps(data_json)+'\n')
out_file.close()
return len(pids)
else:
return 0
def gather_papers(meta_fname, raw_data_path):
"""
Read metadata for (filtered) files and gather the filtered files from the full
collection.
:return:
"""
# Construct output dir path by removing "meta" and ".tsv" from end.
filtered_data_path = os.path.join(os.path.dirname(meta_fname), os.path.basename(meta_fname)[4:-4])
du.create_dir(filtered_data_path)
metadata_df = pd.read_csv(meta_fname, delimiter='\t', error_bad_lines=False,
engine='python', quoting=csv.QUOTE_NONE)
# Get the papers with full text + section labels; include grobid parses also.
# metadata_df = metadata_df[metadata_df['has_latex'] == True]
unique_batch_fnames = ['{:d}.jsonl.gz'.format(bid) for bid in metadata_df['batch_num'].unique()]
di = du.DirMetaIterator(root_path=raw_data_path, yield_list=unique_batch_fnames, metadata_df=metadata_df,
args=(filtered_data_path,))
# Start a pool of worker processes.
process_pool = mp.Pool(processes=mp.cpu_count(), maxtasksperchild=10000)
start = time.time()
gathered_total = 0
print('Gathering data from: {:s}; Shape: {:}'.format(meta_fname, metadata_df.shape))
# Open it in the child processes cause the meta file can be too big to pass
# with pickle files.
for wrote_count in process_pool.imap_unordered(write_batch_papers, di,
chunksize=mp.cpu_count()):
gathered_total += wrote_count
print('Wrote rows: {:d}'.format(wrote_count))
print('Wrote papers to: {:s}'.format(filtered_data_path))
print('Wrote papers: {:d}'.format(gathered_total))
print('Took: {:.4f}s'.format(time.time()-start))
# Close the pool.
process_pool.close()
process_pool.join()
def exclude_abstract(abstract_sents):
"""
Given a json string check if it has everything an example should and return filtered dict.
:param abstract_sents: list(string)
:return: bool;
True if the abstract looks noisey (too many sents or too many tokens in a sentence)
False if things look fine.
"""
abs_sent_count = len(abstract_sents)
if abs_sent_count < pps.MIN_ABS_LEN or abs_sent_count > pps.MAX_ABS_LEN:
return True
# Keep count of how many sentences in an abstract and how many tokens in a sentence.
all_small_sents = True
for sent in abstract_sents:
num_toks = len(sent.split())
if num_toks > pps.MIN_NUM_TOKS:
all_small_sents = False
if num_toks > pps.MAX_NUM_TOKS:
return True
# If all the sentences are smaller than a threshold then exclude the abstract.
if all_small_sents:
return True
return False
def write_batch_absmeta(args):
"""
Given a batch file, read the papers from it mentioned in the pids,
filter out obviously noisey papers and write out the title and abstract
and limited metadata to disk.
:param jsonl_fname: string; filename for current batch.
:param filtered_data_path: directory to which outputs should be written.
:param to_write_pids: pids of the papers we want from the current batch file.
:return:
to_write_pids: list(int); to write pids.
pids_written: list(string); actually written pids.
"""
jsonl_fname, to_write_pids, filtered_data_path = args
batch_num = int(os.path.basename(jsonl_fname)[:-9])
pids_written = set()
if len(to_write_pids) < 0:
return 0, pids_written
data_file = gzip.open(jsonl_fname)
out_file = codecs.open(os.path.join(filtered_data_path, '{:d}.jsonl'.format(batch_num)), 'w', 'utf-8')
for line in data_file:
data_json = json.loads(line.strip())
# The pids comes from metadata which saves it as an integer.
if int(data_json['paper_id']) not in to_write_pids:
continue
# Get title and abstract.
title_sent = data_json['metadata'].pop('title', None)
# Assuming this is present in the metadata; Suspect this is only if its gold and provided.
abstract_sents = []
try:
abstrast_str = data_json['metadata'].pop('abstract', None)
abstract_sents = scispacy_model(abstrast_str,
disable=['tok2vec', 'tagger', 'attribute_ruler',
'lemmatizer', 'parser', 'ner'])
abstract_sents = [sent.text for sent in abstract_sents.sents]
# Sometimes abstract is missing (is None) in the metadata.
except TypeError:
try:
for abs_par_dict in data_json['grobid_parse']['abstract']:
par_sents = scispacy_model(abs_par_dict['text'],
disable=['tok2vec', 'tagger', 'attribute_ruler',
'lemmatizer', 'parser', 'ner'])
par_sents = [sent.text for sent in par_sents.sents]
abstract_sents.extend(par_sents)
# Sometimes, abstract is altogether missing.
except TypeError:
pass
if title_sent == None or abstract_sents == []:
continue
# Filter out abstrasts which are noisey.
if exclude_abstract(abstract_sents):
continue
pids_written.add(data_json['paper_id'])
out_dict = {
'paper_id': data_json['paper_id'],
'metadata': data_json['metadata'],
'title': title_sent,
'abstract': abstract_sents
}
out_file.write(json.dumps(out_dict)+'\n')
# if len(pids_written) > 20:
# break
out_file.close()
return to_write_pids, pids_written
def cocit_corpus_to_jsonl(meta_path, batch_data_path, root_path, out_path, area):
"""
Given the co-citation information (which sets of papers are co-cited), write out a jsonl
file with the abstracts and the metadata based on which training data for model will
be formed (this will still need subsampling and additional cocitation stats based filtering)
Also filter out data which is obviously noisey in the process.
In multiprocessing each thread will write one jsonl. In the end, using bash to merge
all the jsonl files into one jsonl file.
:param meta_path: strong; directory with pid2citcount files.
:param batch_data_path: string; directoy with batched jsonl files.
:param root_path: string; top level directory with pid2batch file.
:param out_path: string; directory to write batch jsonl files to. Also where filtered citations
get written.
:param area: string; {'compsci', 'biomed'}
:return: writes to disk.
"""
batch_out_path = os.path.join(out_path, 'batch_data')
du.create_dir(batch_out_path)
with codecs.open(os.path.join(root_path, 'pid2batch.json'), 'r', 'utf-8') as fp:
pid2batch = json.load(fp)
print('Read: {:s}'.format(fp.name))
print(f'pid2batch: {len(pid2batch)}')
with codecs.open(os.path.join(meta_path, f'cocitpids2contexts-{area}.pickle'), 'rb') as fp:
cocitpids2contexts = pickle.load(fp)
print('Read: {:s}'.format(fp.name))
# Get all co-cited papers.
co_cited_pids = set()
for cocited_tuple in cocitpids2contexts.keys():
co_cited_pids.update(cocited_tuple)
# Get the batch numbers for the pids.
batch2pids = collections.defaultdict(list)
missing = 0
for pid in co_cited_pids:
try:
batch_num = pid2batch[pid]
batch2pids[batch_num].append(pid)
except KeyError:
missing += 1
continue
batch2pids = dict(batch2pids)
print(f'Total unique co-cited docs: {len(co_cited_pids)}; Missing in map: {missing}')
print(f'Number of batches: {len(batch2pids)}')
del pid2batch
unique_batch_fnames = ['{:d}.jsonl.gz'.format(bid) for bid in batch2pids.keys()]
di = du.DirMetaIterator(root_path=batch_data_path, yield_list=unique_batch_fnames, metadata_df=batch2pids,
args=(batch_out_path,))
# Start a pool of worker processes.
process_pool = mp.Pool(processes=mp.cpu_count()//2, maxtasksperchild=10000)
start = time.time()
processed_total = 0
written_total = 0
all_written_pids = set()
for batch_to_writepids, batch_written_pids in process_pool.imap_unordered(write_batch_absmeta, di,
chunksize=mp.cpu_count()//2):
all_written_pids.update(batch_written_pids)
processed_total += len(batch_to_writepids)
written_total += len(batch_written_pids)
print('Processed: {:d} Written: {:d}'.format(len(batch_to_writepids), len(batch_written_pids)))
# Close the pool.
process_pool.close()
process_pool.join()
# Exclude pids which were excluded.
cocitedpids2contexts_filt = {}
for cocit_pids, citcontexts in cocitpids2contexts.items():
filt_cocit_pids = []
for ccpid in cocit_pids:
if ccpid not in all_written_pids:
continue
else:
filt_cocit_pids.append(ccpid)
if len(filt_cocit_pids) > 1:
cocitedpids2contexts_filt[tuple(filt_cocit_pids)] = citcontexts
# Write out filtered co-citations and their stats.
with codecs.open(os.path.join(out_path, f'cocitpids2contexts-{area}-absfilt.pickle'), 'wb') as fp:
pickle.dump(cocitedpids2contexts_filt, fp)
print(f'Wrote: {fp.name}')
# Writing this out solely for readability.
with codecs.open(os.path.join(out_path, f'cocitpids2contexts-{area}-absfilt.json'), 'w', 'utf-8') as fp:
sorted_cocits = collections.OrderedDict()
for cocitpids, citcontexts in sorted(cocitedpids2contexts_filt.items(), key=lambda i: len(i[1])):
cocit_key = '-'.join(cocitpids)
sorted_cocits[cocit_key] = citcontexts
json.dump(sorted_cocits, fp, indent=1)
print(f'Wrote: {fp.name}')
num_cocited_pids = []
num_citcons = []
for cocitpids, citcontexts in cocitedpids2contexts_filt.items():
num_cocited_pids.append(len(cocitpids))
num_citcons.append(len(citcontexts))
all_summ = pd.DataFrame(num_cocited_pids).describe()
print('Papers co-cited together:\n {:}'.format(all_summ))
pprint.pprint(dict(collections.Counter(num_cocited_pids)))
all_summ = pd.DataFrame(num_citcons).describe()
print('Papers co-cited frequency:\n {:}'.format(all_summ))
pprint.pprint(dict(collections.Counter(num_citcons)))
print('Unfiltered: {:d} Filtered written papers: {:d}'.format(processed_total, written_total))
print('Unfiltered cocited sets: {:d}; Filtered cocited sets: {:d}'.
format(len(cocitpids2contexts), len(cocitedpids2contexts_filt)))
print('Took: {:.4f}s'.format(time.time() - start))
def gather_paper_batches(in_path, out_path):
"""
For the entire GORC corpus build a map of batch to paper id.
:return:
"""
batch_fnames = os.listdir(in_path)
batch2pid = {}
total_papers = 0
pid2batch = []
start = time.time()
for bi, bfname in enumerate(batch_fnames):
meta_csv = pd.read_csv(os.path.join(in_path, bfname), delimiter='\t', error_bad_lines=False,
engine='python', quoting=csv.QUOTE_NONE)
pids = meta_csv['pid'].tolist()
batch_num = int(bfname[:-4])
batch2pid[batch_num] = pids
total_papers += len(pids)
pid2batch.extend([(pid, batch_num) for pid in pids])
if bi % 100 == 0:
print('batch: {:d}; total_papers: {:d}'.format(bi, total_papers))
print('Total papers: {:d}'.format(total_papers))
with codecs.open(os.path.join(out_path, 'pid2batch.json'), 'w', 'utf-8') as fp:
pid2batch = dict(pid2batch)
json.dump(pid2batch, fp)
print('pid2batch: {:d}'.format(len(pid2batch)))
print('Wrote: {:s}'.format(fp.name))
with codecs.open(os.path.join(out_path, 'batch2pids.json'), 'w', 'utf-8') as fp:
json.dump(batch2pid, fp)
print('batch2pid: {:d}'.format(len(batch2pid)))
print('Wrote: {:s}'.format(fp.name))
print('Took: {:.4f}s'.format(time.time() - start))
def get_citation_count_large(query_meta_row, data_json):
"""
Given the metadata row for the paper making the citations and the
full text json data, return the outgoing citation contexts counts.
:param query_meta_row: dict(); Generated from a pd.Series.
:param data_json: dict(); full paper dict from batch jsonl.
:return:
"""
# Sometimes the citations are NaN
try:
# Use the grobid ones because thats used to parse the text.
outbound_cits = ast.literal_eval(query_meta_row['grobid_bib_links'])
except ValueError:
return {}, {}
# Sometimes its an empty list.
if not outbound_cits:
return {}, {}
# Find the citation contexts in the passed json.
parsed_paper = data_json['grobid_parse']
# Get the mapping from bibid to the paper id in the dataset.
linked_bibid2pid = {}
for bibid, bibmetadata in parsed_paper['bib_entries'].items():
if bibmetadata['links']:
linked_bibid2pid[bibid] = bibmetadata['links']
# Go over the citations and count up how often they occur in the text.
# Only the linked citations will be counted up I think.
pid2citcount = collections.defaultdict(int)
# Each list element here will be (par_number, sentence_number, sentence_context)
pid2citcontext = collections.defaultdict(list)
for par_i, par_dict in enumerate(parsed_paper['body_text']):
par_text = par_dict['text']
par_sentences = scispacy_model(par_text,
disable=['tok2vec', 'tagger', 'attribute_ruler',
'lemmatizer', 'parser', 'ner'])
par_sentences = [sent.text for sent in par_sentences.sents]
for cit_span in par_dict['cite_spans']:
# Check for the refid being in the linked bib2pids.
if cit_span['ref_id'] and cit_span['ref_id'] in linked_bibid2pid:
cit_span_text = par_text[cit_span['start']:cit_span['end']]
pid = linked_bibid2pid[cit_span['ref_id']]
pid2citcount[pid] += 1
for sent_i, sent in enumerate(par_sentences):
if cit_span_text in sent:
context_tuple = (par_i, sent_i, sent)
pid2citcontext[pid].append(context_tuple)
return dict(pid2citcount), dict(pid2citcontext)
def write_batch_citation_contexts(args):
"""
Given a batch file, read the papers from it mentioned in the metadatadf
and write sentence contexts of outgoing citations.
:param jsonl_fname: string; filename for current batch.
:param filtered_data_path: directory to which outputs should be written.
:param pids: pids of the papers we want from the current batch file.
:return: wrote_count: int; how many jsonl rows were written to the batch output.
"""
jsonl_fname, pids, batch_metadat_df, filtered_data_path = args
batch_num = int(os.path.basename(jsonl_fname)[:-6]) # Its 'batch_num.jsonl'
if len(pids) > 0:
data_file = codecs.open(jsonl_fname, 'r', 'utf-8')
citcontextf = codecs.open(os.path.join(filtered_data_path, 'pid2citcontext-{:d}.jsonl'.
format(batch_num)), 'w', 'utf-8')
citcountf = codecs.open(os.path.join(filtered_data_path, 'pid2citcount-{:d}.jsonl'.
format(batch_num)), 'w', 'utf-8')
pid2jsonlidx = {}
total_papers = 0
valid_papers = 0
for line in data_file:
data_json = json.loads(line.strip())
if int(data_json['paper_id']) in pids:
row = batch_metadat_df[batch_metadat_df['pid'] == int(data_json['paper_id'])]
assert(row.empty == False)
row = row.to_dict('records')
assert(len(row) == 1)
row = row[0]
total_papers += 1
citation_counts, citation_contexts = get_citation_count_large(
query_meta_row=row, data_json=data_json)
if len(citation_counts) == 0:
continue
pid2jsonlidx[row['pid']] = valid_papers
valid_papers += 1
citcontextf.write(json.dumps({row['pid']: citation_contexts})+'\n')
citcountf.write(json.dumps({row['pid']: citation_counts})+'\n')
# if valid_papers > 20:
# break
with codecs.open(os.path.join(filtered_data_path, 'pid2jsonlidx-{:d}.json'.format(batch_num)),
'w', 'utf-8') as fp:
json.dump(pid2jsonlidx, fp)
citcontextf.close()
citcountf.close()
return total_papers, valid_papers
else:
return 0, 0
def gather_from_citationnw_large(filt_data_path, meta_fname):
"""
Open up a metadata file of a host-service-filtered subset of the gorc dataset and
check if the cited file is part of the gorc data and count the number of times
a cited paper is cited in the query paper and the citation contexts it is in and
write out these counts for a set of query papers.
Write out citation contexts and counts as per line jsons for a huge dataset
and per batch which can them be merged with bash and python scripts (for the pid2idx file).
:param filt_data_path:
:param meta_fname: metadata file to gather cited papers for.
:return:
"""
query_meta = pd.read_csv(meta_fname, delimiter='\t', error_bad_lines=False,
engine='python', quoting=csv.QUOTE_NONE)
unique_batch_fnames = ['{:d}.jsonl'.format(bid) for bid in query_meta['batch_num'].unique()]
di = du.DirMetaIterator(root_path=filt_data_path, yield_list=unique_batch_fnames, metadata_df=query_meta,
args=(filt_data_path,), yield_meta=True)
process_pool = mp.Pool(processes=mp.cpu_count()//2, maxtasksperchild=10000)
start = time.time()
total_papers = 0
valid_papers = 0
for batch_processed_papers, batch_valid_papers in process_pool.imap_unordered(
write_batch_citation_contexts, di, chunksize=mp.cpu_count()//2):
total_papers += batch_processed_papers
valid_papers += batch_valid_papers
print('Wrote rows: {:d}'.format(valid_papers))
# Close the pool.
process_pool.close()
process_pool.join()
print('Examined papers: {:d}; Valid query papers: {:d}'.format(total_papers, valid_papers))
print('Took: {:.4f}s'.format(time.time() - start))
def get_filtbatch_citation_contexts(args):
"""
Given a batch file, read the citation context jsonls for the pids in the filtered
batch and return those cit contexts.
:param jsonl_fname: string; filename for current batch.
:param filtered_data_path: directory to which outputs should be written.
:param pids: pids of the papers we want from the current batch file.
:return:
writes outgoing cits for the area out to disk in a jsonl
- can be merged with bash after.
valid_citing_papers; number of citing papers written out.
outgoing_cits: set(string) pids for papers which are cited.
"""
citcontext_jsonl_fname, filt_pids, filtered_data_path, area = args
batch_num = int(os.path.basename(citcontext_jsonl_fname)[15:-6]) # Its 'pid2citcontext-{:d}.jsonl'
if len(filt_pids) > 0:
data_file = codecs.open(citcontext_jsonl_fname, 'r', 'utf-8')
citcontextf = codecs.open(os.path.join(filtered_data_path, f'pid2citcontext-{batch_num}-{area}.jsonl'),
'w', 'utf-8')
outgoing_cits = set()
valid_citing_papers = 0
for line in data_file:
citcontext_json = json.loads(line.strip())
assert(len(citcontext_json) == 1)
citing_pid = list(citcontext_json.keys())[0]
if int(citing_pid) in filt_pids:
cited_contexts = list(citcontext_json.values())[0]
outgoing_cits.update(list(cited_contexts.keys()))
citcontextf.write(json.dumps(citcontext_json)+'\n')
valid_citing_papers += 1
# if valid_citing_papers > 20:
# break
return valid_citing_papers, outgoing_cits
else:
return 0, {}
def filter_area_citcontexts(filt_data_path, root_path, area):
"""
- Open metadata file for full-text set of papers and get subset of rows
which belong to a single area.
- Send rows which are from the same batch to a batch function which returns
citcontext json lines for the pids which are of the same area.
- Write out the citcontext lines for the area to one file.
- Also get a list of all the papers which are outgoing so their metadata
can be gathered for future use.
:param filt_data_path: directory with jsonl files with the cotation context.
:param meta_fname: fulltext metadata file from which to get filtered area metadata.
:param root_path: directory where outgoing cit pids are written.
:param area: {'biomed', 'compsci'}
:return:
"""
# The area metadata files are written to disk a-priori from the ipython shell.
meta_fname = os.path.join(root_path, f'metadata-gorcfulltext-{area}.tsv')
area_meta = pd.read_csv(meta_fname, delimiter='\t', error_bad_lines=False,
engine='python', quoting=csv.QUOTE_NONE)
unique_batch_fnames = [f'pid2citcontext-{bid}.jsonl' for bid in area_meta['batch_num'].unique()]
di = du.DirMetaIterator(root_path=filt_data_path, yield_list=unique_batch_fnames, metadata_df=area_meta,
args=(filt_data_path, area), yield_meta=False)
process_pool = mp.Pool(processes=mp.cpu_count()//2, maxtasksperchild=10000)
start = time.time()
valid_citing_papers = 0
outgoing_cits = set()
for batch_citing_paper_count, batch_outgoing_cits in process_pool.imap_unordered(
get_filtbatch_citation_contexts, di, chunksize=mp.cpu_count()//2):
valid_citing_papers += batch_citing_paper_count
print('Wrote rows: {:d}'.format(batch_citing_paper_count))
outgoing_cits.update(batch_outgoing_cits)
with open(os.path.join(root_path, f'outgoing-citpids-{area}.pickle'), 'wb') as fp:
pickle.dump(outgoing_cits, fp)
print(f'Wrote: {fp.name}')
# Close the pool.
process_pool.close()
process_pool.join()
print(f'Area metadata: {area_meta.shape}')
print(f'Valid query papers: {valid_citing_papers}')
print(f'Total unique outgoing citations: {len(outgoing_cits)}')
print('Took: {:.4f}s'.format(time.time() - start))
def gather_cocitations(root_path, area):
"""
- Read in citation contexts.
- Go over the citation contexts and group them into co-citations.
- Compute stats.
- Save co-citations to disk.
"""
citation_contexts = codecs.open(os.path.join(root_path, f'pid2citcontext-{area}.jsonl'), 'r', 'utf-8')
all_cocitedpids2contexts = collections.defaultdict(list)
singlecited2contexts = collections.defaultdict(list)
examined_papers = 0
for citcon_line in citation_contexts:
if examined_papers % 1000 == 0:
print(f'Examined papers: {examined_papers}')
citcond = json.loads(citcon_line.strip())
citing_pid, cited2contexts = list(citcond.keys())[0], list(citcond.values())[0]
paper_co_citations = collections.defaultdict(list)
# Go over all the cited papers and get the co-citations by sentence position.
for cited_pid, context_tuples in cited2contexts.items():
# Cited papers can have multiple instances in the citing paper.
for ct in context_tuples: # ct is (par_i, sent_i, sent)
par_i, sent_i, con_sent = ct[0], ct[1], ct[2]
# Papers in the same sentence are co-cited.
paper_co_citations[(par_i, sent_i)].append((cited_pid, con_sent))
# Gather the co-cited papers by pid.
paper_cocitpids2contexts = collections.defaultdict(list)
for co_cited_tuple in paper_co_citations.values():
# There has to be one element atleast and all of the sents will be the same.
cit_sent = co_cited_tuple[0][1]
# There can be repeated citations of the same thing in the same sentence
# or somehow multiple instances of the same pid occur in the parsed spans.
co_cited_pids = list(set([t[0] for t in co_cited_tuple]))
co_cited_pids.sort()
# The same co-cited set of pids in a paper may have mulitiple diff
# cit contexts. Gather those here.
paper_cocitpids2contexts[tuple(co_cited_pids)].append((citing_pid, cit_sent))
# Merge the co-citations across the corpus.
for cocitpids, citcontexts in paper_cocitpids2contexts.items():
# Use this if writing to a json file instead of pickle.
# cocitpids_key = '-'.join(list(cocitpids))
if len(cocitpids) == 1:
singlecited2contexts[cocitpids].extend(citcontexts)
else:
all_cocitedpids2contexts[cocitpids].extend(citcontexts)
examined_papers += 1
# if examined_papers > 50000:
# break
# Write out single citations and their stats.
with codecs.open(os.path.join(root_path, f'singlecitpids2contexts-{area}.pickle'), 'wb') as fp:
pickle.dump(singlecited2contexts, fp)
print(f'Wrote: {fp.name}')
num_sincited_pids = []
num_sincitcons = []
for cocitpids, citcontexts in singlecited2contexts.items():
num_sincited_pids.append(len(cocitpids))
num_sincitcons.append(len(citcontexts))
all_summ = pd.DataFrame(num_sincitcons).describe()
print('Single papers cited frequency:\n {:}'.format(all_summ))
pprint.pprint(dict(collections.Counter(num_sincitcons)))
# Write out co-citations and their stats.
with codecs.open(os.path.join(root_path, f'cocitpids2contexts-{area}.pickle'), 'wb') as fp:
pickle.dump(all_cocitedpids2contexts, fp)
print(f'Wrote: {fp.name}')
# Writing this out solely for readability.
with codecs.open(os.path.join(root_path, f'cocitpids2contexts-{area}.json'), 'w', 'utf-8') as fp:
sorted_cocits = collections.OrderedDict()
for cocitpids, citcontexts in sorted(all_cocitedpids2contexts.items(), key=lambda i: len(i[1])):
cocit_key = '-'.join(cocitpids)
sorted_cocits[cocit_key] = citcontexts
json.dump(sorted_cocits, fp, indent=1)
print(f'Wrote: {fp.name}')
num_cocited_pids = []
num_citcons = []
for cocitpids, citcontexts in all_cocitedpids2contexts.items():
num_cocited_pids.append(len(cocitpids))
num_citcons.append(len(citcontexts))
all_summ = pd.DataFrame(num_cocited_pids).describe()
print('Papers co-cited together:\n {:}'.format(all_summ))
pprint.pprint(dict(collections.Counter(num_cocited_pids)))
all_summ = pd.DataFrame(num_citcons).describe()
print('Papers co-cited frequency:\n {:}'.format(all_summ))
pprint.pprint(dict(collections.Counter(num_citcons)))
def main():
"""
Parse command line arguments and call all the above routines.
:return:
"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest=u'subcommand',
help=u'The action to perform.')
# Filter the metadata to group them by hosting service.
filter_hostserv = subparsers.add_parser('filter_by_hostserv')
filter_hostserv.add_argument('-i', '--raw_meta_path', required=True,
help='Directory with batchwise metadata.')
filter_hostserv.add_argument('-o', '--filt_meta_path', required=True,
help='Directory where filtered metadata files should get written.')
filter_hostserv.add_argument('-d', '--dataset', required=True,
choices=['gorcfulltext'],
help='Directory where split files should get written.')
# Gather filtered papers.
gather_hostserv = subparsers.add_parser('gather_by_hostserv')
gather_hostserv.add_argument('-i', '--in_meta_path', required=True,
help='Directory with a filtered metadata tsv file.')
gather_hostserv.add_argument('-o', '--raw_data_path', required=True,
help='Directory where batches of raw data.')
gather_hostserv.add_argument('-d', '--dataset', required=True,
choices=['gorcfulltext'],
help='Directory where split files should get written.')
# Gather pids and batches.
batch_pids = subparsers.add_parser('get_batch_pids')
batch_pids.add_argument('-i', '--in_path', required=True,
help='Directory with a batched tsv files.')
batch_pids.add_argument('-o', '--out_path', required=True,
help='Directory to write batch to pid maps.')
# Gather pids and batches.
gather_citnw = subparsers.add_parser('gather_from_citationnw')
gather_citnw.add_argument('-r', '--root_path', required=True,
help='Directory metadata, paper data and where outputs should be written.')
gather_citnw.add_argument('-d', '--dataset', required=True,
choices=['gorcfulltext'])
# Filter co-citation contexts.
filter_citcon_area = subparsers.add_parser('filter_area_citcontexts')
filter_citcon_area.add_argument('--root_path', required=True,
help='Directory with metadata, paper data and where '
'outputs should be written.')
filter_citcon_area.add_argument('--area', required=True,
choices=['compsci', 'biomed'])
# Gather co-citation contexts.
gather_cocit_cons = subparsers.add_parser('gather_area_cocits')
gather_cocit_cons.add_argument('--root_path', required=True,
help='Directory with metadata, paper data and where '
'outputs should be written.')
gather_cocit_cons.add_argument('--area', required=True,
choices=['compsci', 'biomed'])
gather_cocitjsonl = subparsers.add_parser('gather_filtcocit_corpus')
gather_cocitjsonl.add_argument('--root_path', required=True,
help='Directory with pid2batch.')
gather_cocitjsonl.add_argument('--in_meta_path', required=True,
help='Directory with a filtered metadata tsv file.')
gather_cocitjsonl.add_argument('--raw_data_path', required=True,
help='Directory where batches of raw data.')
gather_cocitjsonl.add_argument('--out_path', required=True,
help='Directory where batches of title/abstract jsonl files '
'and filtered citation map should be written.')
gather_cocitjsonl.add_argument('--dataset', required=True,
choices=['s2orcbiomed', 's2orccompsci', 's2orcmatsci'],
help='Dataset for which outputs should be written.')
cl_args = parser.parse_args()
if cl_args.subcommand == 'filter_by_hostserv':
if cl_args.dataset == 'gorcfulltext':
filter_metadata(raw_meta_path=cl_args.raw_meta_path,
filtered_meta_path=cl_args.filt_meta_path,
filter_method='full text')
elif cl_args.subcommand == 'gather_by_hostserv':
if cl_args.dataset in {'gorcfulltext'}:
meta_fname = os.path.join(cl_args.in_meta_path, 'metadata-{:s}.tsv'.format(cl_args.dataset))
gather_papers(meta_fname=meta_fname, raw_data_path=cl_args.raw_data_path)
elif cl_args.subcommand == 'get_batch_pids':
# Run once for the entire gorc corpus, no need to re-run over and over.
gather_paper_batches(in_path=cl_args.in_path, out_path=cl_args.out_path)
elif cl_args.subcommand == 'gather_from_citationnw':
filt_root_path = os.path.join(cl_args.root_path, 'hostservice_filt')
if cl_args.dataset == 'gorcfulltext':
meta_fname = os.path.join(filt_root_path, 'metadata-gorcfulltext.tsv')
batch_data_path = os.path.join(filt_root_path, 'data-gorcfulltext')
gather_from_citationnw_large(filt_data_path=batch_data_path, meta_fname=meta_fname)
elif cl_args.subcommand == 'filter_area_citcontexts':
filt_root_path = os.path.join(cl_args.root_path, 'hostservice_filt')
batch_data_path = os.path.join(filt_root_path, 'data-gorcfulltext')
filter_area_citcontexts(filt_data_path=batch_data_path, area=cl_args.area,
root_path=filt_root_path)
elif cl_args.subcommand == 'gather_area_cocits':
filt_root_path = os.path.join(cl_args.root_path, 'hostservice_filt')
gather_cocitations(root_path=filt_root_path, area=cl_args.area)
elif cl_args.subcommand == 'gather_filtcocit_corpus':
if cl_args.dataset == 's2orccompsci':
area = 'compsci'
elif cl_args.dataset == 's2orcbiomed':
area = 'biomed'
cocit_corpus_to_jsonl(meta_path=cl_args.in_meta_path, batch_data_path=cl_args.raw_data_path,
out_path=cl_args.out_path, area=area, root_path=cl_args.root_path)
if __name__ == '__main__':
main()
| aspire-main | src/pre_process/pre_proc_gorc.py |
# For relative imports to work in Python 3.6
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__))) | aspire-main | src/pre_process/__init__.py |
# Constants for filtering absracts for training data.
MIN_ABS_LEN = 3
MAX_ABS_LEN = 20
MAX_NUM_TOKS = 80
MIN_NUM_TOKS = 4
| aspire-main | src/pre_process/pp_settings.py |
"""
Process the TREC-COVID dataset into a form i use.
"""
import os
import codecs
import json
import collections
import random
import sys
import xml.etree.ElementTree as ET
import pandas as pd
import csv
import spacy
import data_utils as du
scispacy_model = spacy.load("en_core_sci_sm")
scispacy_model.add_pipe('sentencizer')
def topics2json(in_path, out_path):
"""
Convert the xml topics file to a json file.
"""
in_fname = os.path.join(in_path, 'topics-rnd5.xml')
doc_tree = ET.parse(in_fname)
doc_root = doc_tree.getroot()
topic2meta = {}
for child in doc_root.iter():
if child.tag == 'topic':
number = child.attrib['number']
d = {}
for s in child.iter():
if s.tag in {'query', 'question', 'narrative'}:
d[s.tag] = s.text
topic2meta[number] = d
with codecs.open(os.path.join(out_path, 'topics-rnd5.json'), 'w', 'utf-8') as fp:
json.dump(topic2meta, fp, indent=2)
def print_relevances(in_path, out_path):
"""
- Read in qrels.
- Read in metadata.
- For every topics relevant articles create a pool consisting of not relevant ones
from every other topic.
"""
# Read in abstracts.
meta_fname = os.path.join(in_path, 'metadata.csv')
abstracts_meta = pd.read_csv(meta_fname, delimiter=',', error_bad_lines=False)
# Read in qrels file.
qrel_file = codecs.open(os.path.join(in_path, 'qrels-covid_d5_j0.5-5.txt'), 'r', 'utf-8')
topic2judgement_pool = collections.defaultdict(list)
for qrel_line in qrel_file:
parts = qrel_line.strip().split()
topic_id, jround, doc_id, judgement = parts[0], parts[1], parts[2], parts[3]
topic2judgement_pool[topic_id].append((doc_id, judgement))
topic2judgement_pool = dict(topic2judgement_pool)
for topic_id in topic2judgement_pool:
topic2judgement_pool[topic_id] = dict(topic2judgement_pool[topic_id])
# Read in topics json.
with codecs.open(os.path.join(in_path, 'topics-rnd5.json'), 'r', 'utf-8') as fp:
topics = json.load(fp)
# Print out relevance ratings for the original query for examination.
topic_qrels_path = os.path.join(in_path, 'readable_topic_qrels')
du.create_dir(topic_qrels_path)
for topic_id in range(1, 51, 1):
# Print out distribution of relevances.
judgement2cand = collections.defaultdict(list)
for cand_did, rel in topic2judgement_pool[str(topic_id)].items():
judgement2cand[rel].append(cand_did)
j2len = []
for rel in judgement2cand:
j2len.append((rel, len(judgement2cand[rel])))
j2len = dict(j2len)
print('topic: {:d}; relevances: {:}'.format(topic_id, j2len))
query, question, narrative = topics[str(topic_id)]['query'], \
topics[str(topic_id)]['question'], \
topics[str(topic_id)]['narrative']
# Print out a handful of documents from each relevance level.
outf = codecs.open(os.path.join(topic_qrels_path, f'{topic_id}-readable.txt'), 'w', 'utf-8')
print(f'topic_id: {topic_id}', file=outf)
print(f'query: {query}', file=outf)
print(f'question: {question}', file=outf)
print(f'narrative: {narrative}\n\n', file=outf)
for rel, cands in sorted(judgement2cand.items(), key=lambda i: i[0], reverse=True):
out_cands = random.sample(cands, min(25, len(cands)))
for doc_id in out_cands:
doc_row = abstracts_meta.loc[abstracts_meta['cord_uid'] == doc_id]
doc_row = doc_row.to_dict()
print(f'cord_uid: {doc_id}', file=outf)
print(f'relevance: {rel}', file=outf)
try:
title = list(doc_row['title'].values())[0]
except IndexError:
title = None
try:
abstract = list(doc_row['abstract'].values())[0]
except IndexError:
abstract = None
print(f"Title: {title}", file=outf)
print(f"Abstract:\n {abstract}", file=outf)
print('====================', file=outf)
def get_qbe_pools(in_path, out_path):
"""
- Read in qrels.
- Get abstracts
"""
random.seed(472945)
# Read in abstracts.
meta_fname = os.path.join(in_path, 'metadata-2021-06-21.csv')
rel_meta = pd.read_csv(meta_fname, delimiter=',', error_bad_lines=False)
# Read in topics json.
with codecs.open(os.path.join(in_path, 'topics-rnd5.json'), 'r', 'utf-8') as fp:
topics = json.load(fp)
# Read in only the top relevant docs.
qrel_file = codecs.open(os.path.join(in_path, 'qrels-covid_d5_j0.5-5.txt'), 'r', 'utf-8')
topic2relevant_pool = collections.defaultdict(list)
docid2reltopic = collections.defaultdict(list)
for qrel_line in qrel_file:
parts = qrel_line.strip().split()
topic_id, jround, doc_id, judgement = parts[0], parts[1], parts[2].strip(), parts[3]
if judgement == '2':
topic2relevant_pool[topic_id].append(doc_id)
docid2reltopic[doc_id].append(topic_id)
num_relevant = []
for topic, reldocs in topic2relevant_pool.items():
num_relevant.append(len(reldocs))
summary = pd.DataFrame(num_relevant).describe()
print('Relevant docs: {:}'.format(summary))
all_docs = [item for sublist in topic2relevant_pool.values() for item in sublist]
all_docs_uniq = list(set(all_docs))
print('Corpus size: {:d}; Unique corpus size: {:d}'.format(len(all_docs), len(all_docs_uniq)))
# Read in abstracts of the papers which are relevant
abstract_jsonl = codecs.open(os.path.join(out_path, 'abstracts-treccovid.jsonl'), 'w', 'utf-8')
pid2abstract = {}
abstract_not_obtained = 0
docs_with_data = set()
useful_subset = rel_meta.loc[rel_meta['cord_uid'].isin(all_docs_uniq)]
print('Docs found in metadata: {:}'.format(useful_subset.shape))
for idx, doc_row in useful_subset.iterrows():
doc_id = doc_row['cord_uid']
# doc_row = rel_meta.loc[rel_meta['cord_uid'] == doc_id]
# doc_row = doc_row.to_dict()
try:
title = doc_row['title']
abs_text = doc_row['abstract']
except IndexError:
title = None
abs_text = None
if isinstance(title, str) and isinstance(abs_text, str) and (doc_id not in docs_with_data):
date = doc_row['publish_time']
year = date.split('-')[0] if isinstance(date, str) else None
authors = doc_row['authors']
if not isinstance(authors, str):
authors = None
try:
abstract_sents = scispacy_model(abs_text,
disable=['tok2vec', 'tagger', 'attribute_ruler',
'lemmatizer', 'parser', 'ner'])
except TypeError:
print(doc_id)
print(abs_text)
sys.exit()
abs_sentences = [sent.text for sent in abstract_sents.sents]
doc_rel_topics = docid2reltopic[doc_id]
narratives = [topics[t] for t in doc_rel_topics]
docd = {
'paper_id': doc_id,
'title': title.strip(),
'abstract': abs_sentences,
'metadata': {'year': year, 'authors': authors},
'topic_ids': '-'.join(doc_rel_topics),
'topic_narratives': narratives
}
pid2abstract[doc_id] = docd
abstract_jsonl.write(json.dumps(docd)+'\n')
docs_with_data.add(doc_id)
else:
abstract_not_obtained += 1
print('Docs without abstract/titles: {:d}'.format(abstract_not_obtained))
print('Wrote: {:}'.format(abstract_jsonl.name))
abstract_jsonl.close()
assert(len(docs_with_data) == len(pid2abstract))
# Build relevance annotation file;
# Only do this for docs which have abstracts present.
topic2relevant_pool_present = collections.defaultdict(list)
for topicid, pool in topic2relevant_pool.items():
for pid in pool:
if pid in docs_with_data:
topic2relevant_pool_present[topicid].append(pid)
print('Topics with valid docs: {:d}'.format(len(topic2relevant_pool_present)))
# Only use queries which are relevant for a single topic.
multi_rel_docs = []
for doc_id, reltopics in docid2reltopic.items():
if len(reltopics) > 1:
multi_rel_docs.append(doc_id)
print('Docs relevant for multiple topics: {:d}'.format(len(multi_rel_docs)))
qpid2anns = {}
all_qbe_qpids = []
num_cands = []
# Go over the 50 topics and selectt 50 docs at random to act as queries
# and get positives and negatives wrt these.
for topicid, relpool in sorted(topic2relevant_pool_present.items(), key=lambda i: len(i[1])):
tqpids = []
random.shuffle(relpool)
for tpid in relpool:
# Get per topic queries such that they're unique across topics;
# exclude docs relevant to multiple topics and there are atmost 50 queries per query
if (tpid not in all_qbe_qpids) and (tpid not in multi_rel_docs) and (len(tqpids) < 50):
tqpids.append(tpid)
print(f'topic: {topicid}; QBE queries: {len(tqpids)}')
all_qbe_qpids.extend(tqpids)
for qpid in tqpids:
pos_cand_pool = [pid for pid in relpool if pid != qpid]
pool_rels = [1]*len(pos_cand_pool)
# All docs relevant to other topics are negatives. -
# if there are docs relevant to multiple topics those are not included as negatives.
neg_cand_pool = list(set.difference(set(docs_with_data), set(relpool)))
negpool_rels = [0]*len(neg_cand_pool)
cands = pos_cand_pool + neg_cand_pool
rels = pool_rels + negpool_rels
assert(len(cands) == len(rels))
qpid2anns[qpid] = {'cands': cands, 'relevance_adju': rels}
num_cands.append(len(cands))
print('Number of QBE queries: {:d}; unique QBE queries: {:d}'.
format(len(all_qbe_qpids), len(set(all_qbe_qpids))))
csum = pd.DataFrame(num_cands).describe()
print('Number of candidates per QBE query: {:}'.format(csum))
with codecs.open(os.path.join(out_path, 'test-pid2anns-treccovid.json'), 'w', 'utf-8') as fp:
json.dump(qpid2anns, fp)
print('Wrote: {:}'.format(fp.name))
# Build queries release file.
query_meta_file = codecs.open(os.path.join(out_path, 'treccovid-queries-release.csv'), 'w', 'utf-8')
query_meta_csv = csv.DictWriter(query_meta_file, extrasaction='ignore',
fieldnames=['paper_id', 'title', 'year', 'topic_ids'])
query_meta_csv.writeheader()
for qpid in all_qbe_qpids:
md = {'paper_id': qpid,
'title': pid2abstract[qpid]['title'],
'year': pid2abstract[qpid]['metadata']['year'],
'topic_ids': pid2abstract[qpid]['topic_ids']}
query_meta_csv.writerow(md)
print('Wrote: {:}'.format(query_meta_file.name))
query_meta_file.close()
def setup_splits(in_path, out_path):
"""
Read in queries release file and write out half the queries as
dev and the rest as test. Make the splits at the level of topics.
"""
random.seed(582)
with codecs.open(os.path.join(in_path, 'treccovid-queries-release.csv'), 'r', 'utf-8') as fp:
csv_reader = csv.DictReader(fp)
topic2pids = collections.defaultdict(list)
for row in csv_reader:
topic2pids[row['topic_ids']].append(row['paper_id'])
topic_ids = list(topic2pids.keys())
topic_ids.sort()
random.shuffle(topic_ids)
dev_topics = topic_ids[:len(topic_ids)//2]
dev = [topic2pids[tid] for tid in dev_topics]
dev = [item for sublist in dev for item in sublist]
test_topics = topic_ids[len(topic_ids)//2:]
test = [topic2pids[tid] for tid in test_topics]
test = [item for sublist in test for item in sublist]
eval_splits = {'dev': dev, 'test': test}
print(f'dev_topics: {len(dev_topics)}; test_topics: {len(test_topics)}')
print(f'dev_pids: {len(dev)}; test_pids: {len(test)}')
with codecs.open(os.path.join(out_path, 'treccovid-evaluation_splits.json'), 'w', 'utf-8') as fp:
json.dump(eval_splits, fp)
print('Wrote: {:s}'.format(fp.name))
if __name__ == '__main__':
# topics2json(in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/trec-covid',
# out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/trec-covid')
# print_relevances(in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/trec-covid',
# out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/trec-covid')
# get_qbe_pools(in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/trec-covid',
# out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/trec-covid')
setup_splits(in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/trec-covid',
out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/trec-covid')
| aspire-main | src/pre_process/pre_proc_treccovid.py |
"""
Miscellaneous utilities to read and work with the json files and such.
Stuff multiple functions use.
"""
import sys
import os
import errno
import pandas as pd
class DirIterator:
def __init__(self, root_path, yield_list, args=None, max_count=None, ):
"""
Generator over the file names. Typically consumed by the map_unordered
executable which map_unordered would run.
:param root_path: string; the directory with the files to iterate over.
:param yield_list: list; the list of things in_path to yield.
:param args: tuple; the set of arguments to be returned with each in_file
and out_file name. This could be the set of arguments which the
callable consuming the arguments might need. This needs to be fixed
however.
:param max_count: int; how many items to yield.
:returns:
tuple:
(in_paper,): a path to a file to open and do things with.
(in_paper, out_paper): paths to input file and file to write
processed content to.
(in_paper, args): set of arguments to the function
doing the processing.
(in_paper, out_paper, args): set of arguments to the function
doing the processing.
"""
self.in_path = root_path
self.yield_list = yield_list
self.optional_args = args
self.max_count = max_count
def __iter__(self):
count = 0
for doi in self.yield_list:
if self.max_count:
if count >= self.max_count:
raise StopIteration
in_paper = os.path.join(self.in_path, doi.strip())
if self.optional_args:
yield (in_paper,) + self.optional_args
else:
yield in_paper
count += 1
class DirMetaIterator:
def __init__(self, root_path, yield_list, metadata_df, yield_meta=False, args=None, max_count=None):
"""
Generator over the file names and yields pids of papers in a file.
Typically consumed by the map_unordered executable which map_unordered
would run; specifically consumed by pre_proc_gorc.gather_papers()
:param root_path: string; the directory with the files to iterate over.
:param yield_list: list; the list of things in_path to yield.
:param yield_meta: bool; whether the yielded items should contain parts of metadata_df
:param metadata_df: pandas.df; metadata data from which to select subsets of
rows with the same batch id and get pids for.
29 June 2021: Hack but this can also be a dict.
:param args: tuple; the set of arguments to be returned with each in_file
and out_file name. This could be the set of arguments which the
callable consuming the arguments might need. This needs to be fixed
however.
:param max_count: int; how many items to yield.
:returns:
tuple:
(in_paper,): a path to a file to open and do things with.
(in_paper, out_paper): paths to input file and file to write
processed content to.
(in_paper, args): set of arguments to the function
doing the processing.
(in_paper, out_paper, args): set of arguments to the function
doing the processing.
"""
self.in_path = root_path
self.yield_list = yield_list
self.yield_meta = yield_meta
self.optional_args = args
self.max_count = max_count
self.metadata_df = metadata_df
def __iter__(self):
count = 0
for batch_fname in self.yield_list:
if self.max_count:
if count >= self.max_count:
raise StopIteration
in_fname = os.path.join(self.in_path, batch_fname.strip())
if str.endswith(batch_fname, '.jsonl') and str.startswith(batch_fname, 'pid2citcontext-'):
batch_num = int(batch_fname[15:-6])
else:
if str.endswith(batch_fname, 'jsonl.gz'):
batch_num = int(batch_fname[:-9])
elif str.endswith(batch_fname, '.jsonl'):
batch_num = int(batch_fname[:-6])
if isinstance(self.metadata_df, pd.DataFrame):
batch_metadata_df = self.metadata_df[self.metadata_df['batch_num'] == batch_num]
pids = set(batch_metadata_df['pid'].values)
elif isinstance(self.metadata_df, dict):
pids = set(self.metadata_df[batch_num])
pids = [int(p) for p in pids]
if self.yield_meta:
yield_items = (in_fname, pids, batch_metadata_df)
else:
yield_items = (in_fname, pids)
if self.optional_args:
yield yield_items + self.optional_args
else:
yield yield_items
count += 1
def create_dir(dir_name):
"""
Create the directory whose name is passed.
:param dir_name: String saying the name of directory to create.
:return: None.
"""
# Create output directory if it doesnt exist.
try:
os.makedirs(dir_name)
print('Created: {}.'.format(dir_name))
except OSError as ose:
# For the case of *file* by name of out_dir existing
if (not os.path.isdir(dir_name)) and (ose.errno == errno.EEXIST):
sys.stderr.write('IO ERROR: Could not create output directory\n')
sys.exit(1)
# If its something else you don't know; report it and exit.
if ose.errno != errno.EEXIST:
sys.stderr.write('OS ERROR: {:d}: {:s}: {:s}\n'.format(ose.errno,
ose.strerror,
dir_name))
sys.exit(1)
| aspire-main | src/pre_process/data_utils.py |
"""
Functions to work with co-citations in each area.
"""
import os
import random
import math
import argparse
import time
import collections
import itertools
import re
import pprint
import pickle
import codecs, json
import pandas as pd
import torch
import numpy as np
# import spacy
from sentence_transformers import SentenceTransformer, models
import data_utils as du
import pp_settings as pps
# scispacy_model = spacy.load("en_core_sci_sm")
# scispacy_model.add_pipe('sentencizer')
class AbsSentenceStream:
"""
Given a list of pids, returns their sentences.
"""
def __init__(self, in_pids, pid2abstract):
"""
:param in_pids:
:param pid2abstract:
"""
self.in_pids = in_pids
self.pid2abstract = pid2abstract
self.num_sents = self.count_sents()
def __len__(self):
return self.num_sents
def count_sents(self):
nsents = 0
for pid in self.in_pids:
doc = self.pid2abstract[pid]['abstract']
nsents += len(doc)
return nsents
def __iter__(self):
return self.next()
def next(self):
# In each loop iteration return one example.
for pid in self.in_pids:
doc = self.pid2abstract[pid]['abstract']
for sent in doc:
yield sent
class ContextSentenceStream:
"""
Given a list of pids, returns their sentences.
"""
def __init__(self, listofcontexts):
"""
:param listofcontexts: list(list(tuple(pid, sent)))
"""
self.listofcontexts = listofcontexts
self.num_sents = self.count_sents()
def __len__(self):
return self.num_sents
def count_sents(self):
nsents = 0
for clist in self.listofcontexts:
nsents += len(clist)
return nsents
def __iter__(self):
return self.next()
def next(self):
# In each loop iteration return one example.
for clist in self.listofcontexts:
for c in clist:
yield c[1]
def filter_cocitation_papers(run_path, dataset):
"""
Read in the absfilt co-cotations and filter out co-citations using:
- the number of cocited papers.
- the number of tokens in the citation context.
- if the citation context was supriously tagged as a citation context:
- The heuristic for this is when the sentence doesnt contain any [] or ().
This is more important in biomed papers than in compsci papers.
This is used to train the abstract level similarity models.
"""
dataset2area = {
's2orccompsci': 'compsci',
's2orcbiomed': 'biomed'
}
area = dataset2area[dataset]
with open(os.path.join(run_path, f'cocitpids2contexts-{area}-absfilt.pickle'), 'rb') as fp:
cocitpids2contexts = pickle.load(fp)
# Filter out noise.
cocitedpids2contexts_filt = {}
sc_copy_count = 0
for cocitpids, contexts in cocitpids2contexts.items():
if len(cocitpids) > 3:
continue
else:
# Sometimes the contexts are exact copies but from diff papers.
# Get rid of these.
con2pids = collections.defaultdict(list)
for sc in contexts:
# Sometimes they differ only by the inline citation numbers, replace those.
sc_no_nums = re.sub(r'\d', '', sc[1])
con2pids[sc_no_nums].append(sc)
if len(con2pids) < len(contexts):
sc_copy_count += 1
uniq_scons = []
for norm_con, contextt in con2pids.items():
uniq_scons.append(contextt[0])
fcons = []
citing_pids = set()
for sc in uniq_scons:
# If the same paper is making the co-citation multiple times
# only use the first of the co-citations. Multiple by the same citing
# paper count as a single co-citation.
if sc[0] in citing_pids:
continue
# Filter context by length.
if len(sc[1].split()) > 60 or len(sc[1].split()) < 5:
continue
# Filter noisey citation contexts.
elif ("(" not in sc[1] and ")" not in sc[1]) and ("[" not in sc[1] and "]" not in sc[1]):
continue
else:
fcons.append(sc)
# Update pids only if the sentence was used.
citing_pids.add(sc[0])
if len(fcons) > 0:
cocitedpids2contexts_filt[cocitpids] = fcons
# Write out filtered co-citations and their stats.
with codecs.open(os.path.join(run_path, f'cocitpids2contexts-{area}-absnoisefilt.pickle'), 'wb') as fp:
pickle.dump(cocitedpids2contexts_filt, fp)
print(f'Wrote: {fp.name}')
# Writing this out solely for readability.
with codecs.open(os.path.join(run_path, f'cocitpids2contexts-{area}-absnoisefilt.json'), 'w', 'utf-8') as fp:
sorted_cocits = collections.OrderedDict()
for cocitpids, citcontexts in sorted(cocitedpids2contexts_filt.items(), key=lambda i: len(i[1])):
cocit_key = '-'.join(cocitpids)
sorted_cocits[cocit_key] = citcontexts
json.dump(sorted_cocits, fp, indent=1)
print(f'Wrote: {fp.name}')
num_citcons = []
example_count = 0 # The approximate number of triples which will be generated as training data.
for cocitpids, citcontexts in cocitedpids2contexts_filt.items():
num_citcons.append(len(citcontexts))
if len(cocitpids) == 2:
example_count += 1
elif len(cocitpids) == 3:
example_count += 3
all_summ = pd.DataFrame(num_citcons).describe()
print('Papers co-cited frequency:\n {:}'.format(all_summ))
pprint.pprint(dict(collections.Counter(num_citcons)))
print(f'Copies of co-citation context: {sc_copy_count}')
print(f'Approximate number of possible triple examples: {example_count}')
def filter_cocitation_sentences(run_path, dataset):
"""
Generate data to train sentence level "paraphrasing" models like SentBERT.
For papers which are cocited cited more than once:
- the number of tokens in the citation context.
- if the citation context was supriously tagged as a citation context:
- The heuristic for this is when the sentence doesnt contain any [] or ().
This is more important in biomed papers than in compsci papers.
"""
dataset2area = {
's2orccompsci': 'compsci',
's2orcbiomed': 'biomed'
}
area = dataset2area[dataset]
with open(os.path.join(run_path, f'cocitpids2contexts-{area}-absfilt.pickle'), 'rb') as fp:
cocitpids2contexts = pickle.load(fp)
# Gather sentences which are roughly paraphrases.
cocitedpids2contexts_filt = {}
sc_copy_count = 0
for cocitpids, contexts in cocitpids2contexts.items():
if len(contexts) < 2:
continue
else:
# Sometimes the contexts are exact copies but from diff papers.
# Get rid of these.
con2pids = collections.defaultdict(list)
for sc in contexts:
# Sometimes they differ only by the inline citation numbers, replace those.
sc_no_nums = re.sub(r'\d', '', sc[1])
con2pids[sc_no_nums].append(sc)
if len(con2pids) < len(contexts):
sc_copy_count += 1
uniq_scons = []
for norm_con, contextt in con2pids.items():
uniq_scons.append(contextt[0])
fcons = []
citing_pids = set()
for sc in uniq_scons:
# If the same paper is making the co-citation multiple times
# only use the first of the co-citations. Multiple by the same citing
# paper count as a single co-citation.
if sc[0] in citing_pids:
continue
# Filter context by length.
if len(sc[1].split()) > 60 or len(sc[1].split()) < 5:
continue
# Filter noisey citation contexts.
elif ("(" not in sc[1] and ")" not in sc[1]) and ("[" not in sc[1] and "]" not in sc[1]):
continue
else:
fcons.append(sc)
# Update pids only if the sentence was used.
citing_pids.add(sc[0])
if len(fcons) > 1:
cocitedpids2contexts_filt[cocitpids] = fcons
# Write out filtered co-citations and their stats.
with codecs.open(os.path.join(run_path, f'cocitpids2contexts-{area}-sentfilt.pickle'), 'wb') as fp:
pickle.dump(cocitedpids2contexts_filt, fp)
print(f'Wrote: {fp.name}')
# Writing this out solely for readability.
with codecs.open(os.path.join(run_path, f'cocitpids2contexts-{area}-sentfilt.json'), 'w', 'utf-8') as fp:
sorted_cocits = collections.OrderedDict()
for cocitpids, citcontexts in sorted(cocitedpids2contexts_filt.items(), key=lambda i: len(i[1])):
cocit_key = '-'.join(cocitpids)
sorted_cocits[cocit_key] = citcontexts
json.dump(sorted_cocits, fp, indent=1)
print(f'Wrote: {fp.name}')
num_cocited_pids = []
num_citcons = []
example_count = 0
for cocitpids, citcontexts in cocitedpids2contexts_filt.items():
num_cocited_pids.append(len(cocitpids))
num_cons = len(citcontexts)
num_citcons.append(num_cons)
ex = math.factorial(num_cons)/(math.factorial(2)*math.factorial(num_cons-2))
example_count += ex
all_summ = pd.DataFrame(num_cocited_pids).describe()
print('Papers co-cited together:\n {:}'.format(all_summ))
pprint.pprint(dict(collections.Counter(num_cocited_pids)))
all_summ = pd.DataFrame(num_citcons).describe()
print('Papers co-cited frequency:\n {:}'.format(all_summ))
pprint.pprint(dict(collections.Counter(num_citcons)))
print(f'Copies of co-citation context: {sc_copy_count}')
print(f'Approximate number of possible triple examples: {example_count}')
def generate_examples_sent_rand(in_path, out_path, dataset):
"""
Assumes random (in-batch) negatives are used and only generates pair
examples of query/anchor and positive.
- Generate negative sentences for the dev set so its a frozen dev set.
"""
random.seed(57395)
dataset2area = {
's2orccompsci': 'compsci',
's2orcbiomed': 'biomed'
}
area = dataset2area[dataset]
with codecs.open(os.path.join(in_path, f'cocitpids2contexts-{area}-sentfilt.pickle'), 'rb') as fp:
cocitedpids2contexts = pickle.load(fp)
print(f'Read: {fp.name}')
all_cocits = list(cocitedpids2contexts.keys())
random.shuffle(all_cocits)
random.shuffle(all_cocits)
total_copids = len(all_cocits)
train_copids, dev_copids = all_cocits[:int(0.8*total_copids)], all_cocits[int(0.8*total_copids):]
print(f'cocited pid sets; train: {len(train_copids)}; dev: {len(dev_copids)}')
for split_str, split_copids in [('train', train_copids), ('dev', dev_copids)]:
out_ex_file = codecs.open(os.path.join(out_path, f'{split_str}-coppsent.jsonl'), 'w', 'utf-8')
out_examples = 0
for cocitedpids in split_copids:
contexts = cocitedpids2contexts[cocitedpids]
# Generate all combinations of length 2 given the contexts.
cidxs = itertools.combinations(range(len(contexts)), 2)
for idxs in cidxs:
anchor_context = contexts[idxs[0]]
pos_context = contexts[idxs[1]]
out_ex = {
'citing_pids': (anchor_context[0], pos_context[0]),
'cited_pids': cocitedpids,
'query': anchor_context[1],
'pos_context': pos_context[1]
}
# Of its dev also add a random negative context.
if split_str == 'dev':
neg_copids = random.choice(split_copids)
neg_contexts = cocitedpids2contexts[neg_copids]
neg_context = random.choice(neg_contexts)
out_ex['neg_context'] = neg_context[1]
out_ex_file.write(json.dumps(out_ex)+'\n')
out_examples += 1
if out_examples % 200000 == 0:
print(f'{split_str}; {out_examples}')
print(f'Wrote: {out_ex_file.name}')
out_ex_file.close()
print(f'Number of examples: {out_examples}')
def generate_examples_ict(in_path, out_path, dataset):
"""
Assumes random (in-batch) negatives are used and only generates pair
examples of sentence and abstract context.
"""
random.seed(6036)
if dataset == 's2orccompsci':
area, num_abs, perabssentex = 'compsci', 1479197, 2
elif dataset == 's2orcbiomed':
area, num_abs, perabssentex = 'biomed', 10602028, 1
# Use a shuffled jsonl file so its not ordered by batch number or something.
in_abs_file = codecs.open(os.path.join(in_path, f'abstracts-{dataset}-shuf.jsonl'), 'r', 'utf-8')
print(f'Reading: {in_abs_file.name}')
num_train_abs, num_dev_abs = int(0.8*num_abs), int(0.2*num_abs)
print(f'abstracts; train: {num_train_abs}; dev: {num_dev_abs}')
ninty_plexoverlap = [1, 1, 1, 1, 1, 1, 1, 0, 1, 1]
out_ex_file = codecs.open(os.path.join(out_path, 'train-ictsent.jsonl'), 'w', 'utf-8')
out_examples = 0
out_abs = 0
split_str = 'train'
for abs_line in in_abs_file:
abs_json = json.loads(abs_line.strip())
abs_sents = abs_json['abstract']
query_sents = random.sample(abs_sents, perabssentex)
for qsent in query_sents:
# 90% of the time this will be 1 and qsent will be redacted else its present.
lex_overlap = random.sample(ninty_plexoverlap, 1)
if lex_overlap[0]:
pos_context = ' '.join([s for s in abs_sents if s != qsent])
else:
pos_context = ' '.join(abs_sents)
out_ex = {
'paper_id': abs_json['paper_id'],
'query': qsent,
'pos_context': pos_context
}
out_ex_file.write(json.dumps(out_ex)+'\n')
out_examples += 1
if out_examples % 200000 == 0:
print(f'{split_str}; {out_examples}')
out_abs += 1
if out_abs == num_train_abs:
print(f'Wrote: {out_ex_file.name}')
out_ex_file.close()
print(f'Number of examples: {out_examples}')
out_examples = 0
out_abs = 0
split_str = 'dev'
out_ex_file = codecs.open(os.path.join(out_path, 'dev-ictsent.jsonl'), 'w', 'utf-8')
# For the dev set.
print(f'Wrote: {out_ex_file.name}')
out_ex_file.close()
print(f'Number of examples: {out_examples}')
def generate_examples_aligned_cocitabs_rand(in_path, out_path, dataset, alignment_model, trained_model_path=None):
"""
Assumes random (in-batch) negatives are used and only generates pair
examples of query/anchor and positive for co-cited abstracts.
- Also generate a alignment for the positive and negative based
- Generate negatives for the dev set so its a frozen dev set.
"""
train_size, dev_size = 1276820, 10000
random.seed(69306)
dataset2area = {
's2orccompsci': 'compsci',
's2orcbiomed': 'biomed'
}
area = dataset2area[dataset]
with codecs.open(os.path.join(in_path, f'cocitpids2contexts-{area}-absnoisefilt.pickle'), 'rb') as fp:
cocitedpids2contexts = pickle.load(fp)
print(f'Read: {fp.name}')
with codecs.open(os.path.join(in_path, f'abstracts-s2orc{area}.pickle'), 'rb') as fp:
pid2abstract = pickle.load(fp)
all_abs_pids = list(pid2abstract.keys())
print(f'Read: {fp.name}')
if alignment_model in {'cosentbert'}:
outfname_suffix = 'cocitabsalign'
word_embedding_model = models.Transformer('allenai/scibert_scivocab_uncased',
max_seq_length=512)
trained_model_fname = os.path.join(trained_model_path, 'sent_encoder_cur_best.pt')
word_embedding_model.auto_model.load_state_dict(torch.load(trained_model_fname))
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
sent_alignment_model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
if alignment_model in {'sbmpnet1B'}:
outfname_suffix = 'cocitabsalign-sb1b'
sent_alignment_model = SentenceTransformer('sentence-transformers/all-mpnet-base-v2')
elif alignment_model in {'specter'}:
outfname_suffix = 'cocitabsalign-spec'
word_embedding_model = models.Transformer('allenai/specter', max_seq_length=512)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='mean')
sent_alignment_model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
all_cocits = list(cocitedpids2contexts.keys())
random.shuffle(all_cocits)
random.shuffle(all_cocits)
total_copids = len(all_cocits)
train_copids, dev_copids = all_cocits[:int(0.8*total_copids)], all_cocits[int(0.8*total_copids):]
print(f'cocited pid sets; train: {len(train_copids)}; dev: {len(dev_copids)}')
all_contexts = []
all_pids = set()
for split_str, split_copids in [('train', train_copids), ('dev', dev_copids)]:
split_examples = 0
for cocitedpids in split_copids:
contexts = cocitedpids2contexts[cocitedpids]
# Sample at most 10 context sentences at random to use for supervision.
out_contexts = random.sample(contexts, min(10, len(contexts)))
all_contexts.append(out_contexts)
# Generate all combinations of length 2 given the contexts.
cidxs = itertools.combinations(range(len(cocitedpids)), 2)
all_pids.update(cocitedpids)
split_examples += len(list(cidxs))
if split_str == 'train' and split_examples > train_size:
break
elif split_str == 'dev' and split_examples > dev_size:
break
all_pids = list(all_pids)
print(f'Number of contexts: {len(all_contexts)}; Number of unique abstracts: {len(all_pids)}')
context_stream = ContextSentenceStream(listofcontexts=all_contexts)
abstract_stream = AbsSentenceStream(in_pids=all_pids, pid2abstract=pid2abstract)
# Encode documents.
pool = sent_alignment_model.start_multi_process_pool()
# Compute the embeddings using the multi-process pool
start = time.time()
all_context_reps = sent_alignment_model.encode_multi_process(context_stream, pool)
print(f"Context reps shape: {all_context_reps.shape}; Stream sents: {len(context_stream)}")
all_abs_sent_reps = sent_alignment_model.encode_multi_process(abstract_stream, pool)
print(f"Abs sent reps shape: {all_abs_sent_reps.shape}; Stream sents: {len(abstract_stream)}")
# Optional: Stop the proccesses in the pool
sent_alignment_model.stop_multi_process_pool(pool)
print('Encoding took: {:.4f}s'.format(time.time()-start))
# Go over the abstract reps and put them into a dict
abs_reps_start_idx = 0
pid2abs_reps = {}
for pid in all_pids:
num_sents = len(pid2abstract[pid]['abstract'])
abs_reps = all_abs_sent_reps[abs_reps_start_idx:abs_reps_start_idx+num_sents, :]
abs_reps_start_idx += num_sents
pid2abs_reps[pid] = abs_reps
# Now form examples.
contextsi = 0
context_reps_start_idx = 0
for split_str, split_copids in [('train', train_copids), ('dev', dev_copids)]:
out_ex_file = codecs.open(os.path.join(out_path, f'{split_str}-{outfname_suffix}.jsonl'), 'w', 'utf-8')
out_examples = 0
num_context_sents = []
for cocitedpids in split_copids:
out_contexts = all_contexts[contextsi]
context_sents = [cc[1] for cc in out_contexts]
citing_pids = [cc[0] for cc in out_contexts]
context_reps = all_context_reps[context_reps_start_idx: context_reps_start_idx+len(context_sents), :]
context_reps_start_idx += len(context_sents)
contextsi += 1
# Generate all combinations of length 2 given the contexts.
cidxs = itertools.combinations(range(len(cocitedpids)), 2)
for idxs in cidxs:
anchor_pid = cocitedpids[idxs[0]]
pos_pid = cocitedpids[idxs[1]]
qabs_reps = pid2abs_reps[anchor_pid]
posabs_reps = pid2abs_reps[pos_pid]
cc2query_abs_sims = np.matmul(qabs_reps, context_reps.T)
cc2query_idxs = np.unravel_index(cc2query_abs_sims.argmax(), cc2query_abs_sims.shape)
cc2pos_abs_sims = np.matmul(posabs_reps, context_reps.T)
cc2pos_idxs = np.unravel_index(cc2pos_abs_sims.argmax(), cc2pos_abs_sims.shape)
abs2cc2abs_idx = (int(cc2query_idxs[0]), int(cc2pos_idxs[0]))
q2pos_abs_sims = np.matmul(qabs_reps, posabs_reps.T)
q2pos_idxs = np.unravel_index(q2pos_abs_sims.argmax(), q2pos_abs_sims.shape)
abs2abs_idx = (int(q2pos_idxs[0]), int(q2pos_idxs[1]))
anchor_abs = {'TITLE': pid2abstract[anchor_pid]['title'],
'ABSTRACT': pid2abstract[anchor_pid]['abstract']}
pos_abs = {'TITLE': pid2abstract[pos_pid]['title'],
'ABSTRACT': pid2abstract[pos_pid]['abstract'],
'cc_align': abs2cc2abs_idx,
'abs_align': abs2abs_idx}
out_ex = {
'citing_pids': citing_pids,
'cited_pids': cocitedpids,
'query': anchor_abs,
'pos_context': pos_abs,
'citing_contexts': context_sents
}
num_context_sents.append(len(citing_pids))
# Of its dev also add a random negative context.
if split_str == 'dev':
neg_pid = random.choice(all_abs_pids)
rand_anch_idx, rand_neg_idx = random.choice(range(len(pid2abstract[anchor_pid]['abstract']))), \
random.choice(range(len(pid2abstract[neg_pid]['abstract'])))
neg_cc_align = (rand_anch_idx, rand_neg_idx)
rand_anch_idx, rand_neg_idx = random.choice(range(len(pid2abstract[anchor_pid]['abstract']))), \
random.choice(range(len(pid2abstract[neg_pid]['abstract'])))
neg_abs_align = (rand_anch_idx, rand_neg_idx)
neg_abs = {'TITLE': pid2abstract[neg_pid]['title'],
'ABSTRACT': pid2abstract[neg_pid]['abstract'],
'cc_align': neg_cc_align, 'abs_align': neg_abs_align}
out_ex['neg_context'] = neg_abs
out_ex_file.write(json.dumps(out_ex)+'\n')
out_examples += 1
if out_examples % 1000 == 0:
print(f'{split_str}; {out_examples}')
# if out_examples > 1000:
# break
# Do this only for 1.2m triples, then exit.
if split_str == 'train' and out_examples > train_size:
break
elif split_str == 'dev' and out_examples > dev_size:
break
print(f'Wrote: {out_ex_file.name}')
out_ex_file.close()
all_summ = pd.DataFrame(num_context_sents).describe()
print('Number of cit contexts per triple: {:}'.format(all_summ))
print(f'Number of examples: {out_examples}')
def generate_examples_cocitabs_rand(in_path, out_path, dataset):
"""
Assumes random (in-batch) negatives are used and only generates pair
examples of query/anchor and positive for co-cited abstracts.
- Generate negatives for the dev set so its a frozen dev set.
"""
random.seed(69306)
dataset2area = {
's2orccompsci': 'compsci',
's2orcbiomed': 'biomed'
}
area = dataset2area[dataset]
with codecs.open(os.path.join(in_path, f'cocitpids2contexts-{area}-absnoisefilt.pickle'), 'rb') as fp:
cocitedpids2contexts = pickle.load(fp)
print(f'Read: {fp.name}')
with codecs.open(os.path.join(in_path, f'abstracts-s2orc{area}.pickle'), 'rb') as fp:
pid2abstract = pickle.load(fp)
all_abs_pids = list(pid2abstract.keys())
print(f'Read: {fp.name}')
all_cocits = list(cocitedpids2contexts.keys())
random.shuffle(all_cocits)
random.shuffle(all_cocits)
total_copids = len(all_cocits)
train_copids, dev_copids = all_cocits[:int(0.8*total_copids)], all_cocits[int(0.8*total_copids):]
print(f'cocited pid sets; train: {len(train_copids)}; dev: {len(dev_copids)}')
for split_str, split_copids in [('train', train_copids), ('dev', dev_copids)]:
out_ex_file = codecs.open(os.path.join(out_path, f'{split_str}-cocitabs.jsonl'), 'w', 'utf-8')
out_examples = 0
num_context_sents = []
for cocitedpids in split_copids:
contexts = cocitedpids2contexts[cocitedpids]
# Sample at most 10 context sentences at random to use for supervision.
out_contexts = random.sample(contexts, min(10, len(contexts)))
context_sents = [cc[1] for cc in out_contexts]
citing_pids = [cc[0] for cc in out_contexts]
# Generate all combinations of length 2 given the contexts.
cidxs = itertools.combinations(range(len(cocitedpids)), 2)
for idxs in cidxs:
anchor_pid = cocitedpids[idxs[0]]
pos_pid = cocitedpids[idxs[1]]
anchor_abs = {'TITLE': pid2abstract[anchor_pid]['title'],
'ABSTRACT': pid2abstract[anchor_pid]['abstract']}
pos_abs = {'TITLE': pid2abstract[pos_pid]['title'],
'ABSTRACT': pid2abstract[pos_pid]['abstract']}
out_ex = {
'citing_pids': citing_pids,
'cited_pids': cocitedpids,
'query': anchor_abs,
'pos_context': pos_abs,
'citing_contexts': context_sents
}
num_context_sents.append(len(citing_pids))
# Of its dev also add a random negative context.
if split_str == 'dev':
neg_pid = random.choice(all_abs_pids)
neg_abs = {'TITLE': pid2abstract[neg_pid]['title'],
'ABSTRACT': pid2abstract[neg_pid]['abstract']}
out_ex['neg_context'] = neg_abs
out_ex_file.write(json.dumps(out_ex)+'\n')
out_examples += 1
if out_examples % 200000 == 0:
print(f'{split_str}; {out_examples}')
print(f'Wrote: {out_ex_file.name}')
out_ex_file.close()
all_summ = pd.DataFrame(num_context_sents).describe()
print('Number of cit contexts per triple: {:}'.format(all_summ))
print(f'Number of examples: {out_examples}')
def generate_examples_cocitabs_contexts_rand(in_path, out_path, dataset):
"""
Assumes random (in-batch) negatives are used and only generates pair
examples of query/anchor and positive for co-cited abstracts.
- Bundles the co-citation context for the positive with the pos abstract.
- Generate negatives for the dev set so its a frozen dev set.
Additionally, generates negatives which are sampled from a valid co-cite
set so they come with negative contexts.
"""
train_size, dev_size = 1276820, 10000
random.seed(69306)
dataset2area = {
's2orccompsci': 'compsci',
's2orcbiomed': 'biomed'
}
area = dataset2area[dataset]
with codecs.open(os.path.join(in_path, f'cocitpids2contexts-{area}-absnoisefilt.pickle'), 'rb') as fp:
cocitedpids2contexts = pickle.load(fp)
print(f'Read: {fp.name}')
with codecs.open(os.path.join(in_path, f'abstracts-s2orc{area}.pickle'), 'rb') as fp:
pid2abstract = pickle.load(fp)
print(f'Read: {fp.name}')
all_cocits = list(cocitedpids2contexts.keys())
random.shuffle(all_cocits)
random.shuffle(all_cocits)
total_copids = len(all_cocits)
train_copids, dev_copids = all_cocits[:int(0.8*total_copids)], all_cocits[int(0.8*total_copids):]
print(f'cocited pid sets; train: {len(train_copids)}; dev: {len(dev_copids)}')
for split_str, split_copids in [('train', train_copids), ('dev', dev_copids)]:
out_ex_file = codecs.open(os.path.join(out_path, f'{split_str}-concocitabs-seq.jsonl'), 'w', 'utf-8')
out_examples = 0
num_context_sents = []
for cocitedpids in split_copids:
contexts = cocitedpids2contexts[cocitedpids]
# Sample at most 10 context sentences at random to use for supervision.
out_contexts = random.sample(contexts, min(10, len(contexts)))
context_sents = [cc[1] for cc in out_contexts]
citing_pids = [cc[0] for cc in out_contexts]
# Generate all combinations of length 2 given the contexts.
cidxs = itertools.combinations(range(len(cocitedpids)), 2)
for idxs in cidxs:
anchor_pid = cocitedpids[idxs[0]]
pos_pid = cocitedpids[idxs[1]]
anchor_abs = {'TITLE': pid2abstract[anchor_pid]['title'],
'ABSTRACT': pid2abstract[anchor_pid]['abstract']}
pos_abs = {'TITLE': pid2abstract[pos_pid]['title'],
'ABSTRACT': pid2abstract[pos_pid]['abstract'],
'citing_contexts': context_sents,
'citing_pids': citing_pids}
out_ex = {
'cited_pids': cocitedpids,
'query': anchor_abs,
'pos_context': pos_abs
}
num_context_sents.append(len(citing_pids))
# Of its dev also add a random negative context.
if split_str == 'dev':
neg_cocit_pids = random.choice(all_cocits)
neg_contexts = cocitedpids2contexts[neg_cocit_pids]
neg_out_contexts = random.sample(neg_contexts, min(10, len(neg_contexts)))
neg_context_sents = [cc[1] for cc in neg_out_contexts]
neg_citing_pids = [cc[0] for cc in neg_out_contexts]
# Sample at most 10 context sentences at random to use for supervision.
neg_pid = random.choice(neg_cocit_pids)
neg_abs = {'TITLE': pid2abstract[neg_pid]['title'],
'ABSTRACT': pid2abstract[neg_pid]['abstract'],
'citing_contexts': neg_context_sents,
'citing_pids': neg_citing_pids}
out_ex['neg_context'] = neg_abs
out_ex_file.write(json.dumps(out_ex)+'\n')
out_examples += 1
if out_examples % 200000 == 0:
print(f'{split_str}; {out_examples}')
# if out_examples > 1000:
# break
# Do this only for 1.2m triples, then exit.
if split_str == 'train' and out_examples > train_size:
break
elif split_str == 'dev' and out_examples > dev_size:
break
print(f'Wrote: {out_ex_file.name}')
out_ex_file.close()
all_summ = pd.DataFrame(num_context_sents).describe()
print('Number of cit contexts per triple: {:}'.format(all_summ))
print(f'Number of examples: {out_examples}')
def main():
"""
Parse command line arguments and call all the above routines.
:return:
"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest=u'subcommand',
help=u'The action to perform.')
# Filter for abstract level models.
filter_cocit_papers = subparsers.add_parser('filt_cocit_papers')
filter_cocit_papers.add_argument('--run_path', required=True,
help='Directory with absfilt cocitation pickle file. '
'Also where outputs are written.')
filter_cocit_papers.add_argument('--dataset', required=True,
choices=['s2orccompsci', 's2orcbiomed'],
help='Files of area to process.')
# Filter for sentence level models.
filter_cocit_sents = subparsers.add_parser('filt_cocit_sents')
filter_cocit_sents.add_argument('--run_path', required=True,
help='Directory with absfilt cocitation pickle file. '
'Also where outputs are written.')
filter_cocit_sents.add_argument('--dataset', required=True,
choices=['s2orccompsci', 's2orcbiomed'],
help='Files of area to process.')
# Write examples for sentence level models.
write_example_sents = subparsers.add_parser('write_examples')
write_example_sents.add_argument('--in_path', required=True,
help='Directory with absfilt cocitation pickle file.')
write_example_sents.add_argument('--out_path', required=True,
help='Directory where outputs are written.')
write_example_sents.add_argument('--model_path',
help='Directory where trained sentence bert model is.')
write_example_sents.add_argument('--model_name', choices=['cosentbert', 'specter', 'sbmpnet1B'],
help='Model to use for getting alignments between abstracts.')
write_example_sents.add_argument('--dataset', required=True,
choices=['s2orccompsci', 's2orcbiomed'],
help='Files of area to process.')
write_example_sents.add_argument('--experiment', required=True,
choices=['cosentbert', 'ictsentbert', 'cospecter',
'sbalisentbienc'],
help='Model writing examples for.')
cl_args = parser.parse_args()
if cl_args.subcommand == 'filt_cocit_papers':
filter_cocitation_papers(run_path=cl_args.run_path, dataset=cl_args.dataset)
elif cl_args.subcommand == 'filt_cocit_sents':
filter_cocitation_sentences(run_path=cl_args.run_path, dataset=cl_args.dataset)
elif cl_args.subcommand == 'write_examples':
if cl_args.experiment in {'cosentbert'}:
generate_examples_sent_rand(in_path=cl_args.in_path, out_path=cl_args.out_path,
dataset=cl_args.dataset)
elif cl_args.experiment in {'ictsentbert'}:
generate_examples_ict(in_path=cl_args.in_path, out_path=cl_args.out_path,
dataset=cl_args.dataset)
elif cl_args.experiment in {'cospecter'}:
generate_examples_cocitabs_rand(in_path=cl_args.in_path, out_path=cl_args.out_path,
dataset=cl_args.dataset)
elif cl_args.experiment in {'sbalisentbienc'}:
generate_examples_aligned_cocitabs_rand(in_path=cl_args.in_path, out_path=cl_args.out_path,
dataset=cl_args.dataset, trained_model_path=cl_args.model_path,
alignment_model=cl_args.model_name)
if __name__ == '__main__':
main()
| aspire-main | src/pre_process/pre_proc_cocits.py |
"""
Generate rankings over randidates for queries for different datasets and trained models
or baselines. There are three types of functions here: one assumes a set of embeddings
from a model stored to disk and ranks based on distance/similarity metrics of these
embeddings, another type of function uses a more complex late interaction method for
scoring query and candidate, and a final type of function consumes data, embeds and caches
the reps in memory and computes scores for ranking. The last kind of function is used
most often in practice. For each of these type of function there are also variants for
faceted datasets and whole abstract datasets.
"""
import copy
import os
import sys
import logging
import time
import codecs, json
import argparse
import collections
import joblib
import torch
from sklearn import neighbors
from sklearn import metrics as skmetrics
import numpy as np
from scipy import spatial
from transformers import AutoModel, AutoTokenizer
from . import data_utils as du
from ..learning.facetid_models import disent_models
from ..learning import batchers
# https://stackoverflow.com/a/46635273/3262406
np.set_printoptions(suppress=True)
class TrainedScoringModel:
"""
Class to initialize trained model, load precomputed reps, and score query candidate pairs.
"""
def __init__(self, model_name, trained_model_path, model_version='cur_best'):
# Load label maps and configs.
with codecs.open(os.path.join(trained_model_path, 'run_info.json'), 'r', 'utf-8') as fp:
run_info = json.load(fp)
all_hparams = run_info['all_hparams']
# Init model:
if model_name in {'miswordpolyenc'}:
model = disent_models.WordSentAlignPolyEnc(model_hparams=all_hparams)
else:
raise ValueError(f'Unknown model: {model_name}')
model_fname = os.path.join(trained_model_path, 'model_{:s}.pt'.format(model_version))
model.load_state_dict(torch.load(model_fname))
logging.info(f'Scoring model: {model_fname}')
# Move model to the GPU.
if torch.cuda.is_available():
model.cuda()
logging.info('Running on GPU.')
model.eval()
self.model_name = model_name
self.model = model
def predict(self, query, cands):
"""
Use trained model to return scores between query and candidate.
:param query: numpy.array; num_sents x encoding_dim.
:param cands: list(numpy.array); pool_depth(num_sents x encoding_dim)
:return:
"""
batch_size = 128
cand_batch = []
cand_scores = []
pair_sm = []
for ci, cand_sents in enumerate(cands):
cand_batch.append(cand_sents)
if ci % 1000 == 0:
logging.info(f'Examples: {ci}/{len(cands)}')
if len(cand_batch) == batch_size:
score_dict = self.model.score(query_reps=query, cand_reps=cand_batch)
cand_scores.extend(score_dict['batch_scores'].tolist())
pair_sm.extend(score_dict['pair_scores'])
cand_batch = []
# Handle final few candidates.
if cand_batch:
score_dict = self.model.score(query_reps=query, cand_reps=cand_batch)
cand_scores.extend(score_dict['batch_scores'].tolist())
pair_sm.extend(score_dict['pair_scores'])
ret_dict = {'cand_scores': cand_scores, 'pair_scores': pair_sm}
return ret_dict
class CachingTrainedScoringModel:
"""
Class to initialize trained model, build reps, cache them, and score query candidate pairs.
"""
def __init__(self, model_name, trained_model_path, model_version='cur_best'):
# Load label maps and configs.
with codecs.open(os.path.join(trained_model_path, 'run_info.json'), 'r', 'utf-8') as fp:
run_info = json.load(fp)
all_hparams = run_info['all_hparams']
# Init model:
if model_name in {'miswordbienc'}:
model = disent_models.WordSentAlignBiEnc(model_hparams=all_hparams)
batcher = batchers.AbsSentTokBatcher
elif model_name in {'sbalisentbienc'}:
model = disent_models.WordSentAbsSupAlignBiEnc(model_hparams=all_hparams)
batcher = batchers.AbsSentTokBatcher
elif model_name in {'cospecter'}:
model = disent_models.MySPECTER(model_hparams=all_hparams)
batcher = batchers.AbsTripleBatcher
else:
raise ValueError(f'Unknown model: {model_name}')
model_fname = os.path.join(trained_model_path, 'model_{:s}.pt'.format(model_version))
model.load_state_dict(torch.load(model_fname))
logging.info(f'Scoring model: {model_fname}')
self.tokenizer = AutoTokenizer.from_pretrained(all_hparams['base-pt-layer'])
# Move model to the GPU.
if torch.cuda.is_available():
model.cuda()
logging.info('Running on GPU.')
model.eval()
self.model_name = model_name
self.model = model
self.batcher = batcher
self.pid2model_reps = {}
def save_cache(self, out_fname):
"""
Saves the cache to disk in case we want to use it ever.
"""
joblib.dump(self.pid2model_reps, out_fname, compress=('gzip', 3))
def predict(self, query_pid, cand_pids, pid2abstract, facet='all'):
"""
Use trained model to return scores between query and candidate.
:param query_pid: string
:param cand_pids: list(string)
:param pid2abstract: dict(string: dict)
:param facet: string; {'all', 'background', 'method', 'result'}
:return:
"""
# Gets reps of uncached documents.
encode_batch_size = 32
uncached_pids = [cpid for cpid in cand_pids if cpid not in self.pid2model_reps]
if query_pid not in self.pid2model_reps: uncached_pids.append(query_pid)
if uncached_pids:
batch_docs = []
batch_pids = []
for i, pid in enumerate(uncached_pids):
batch_docs.append({'TITLE': pid2abstract[pid]['title'],
'ABSTRACT': pid2abstract[pid]['abstract']})
batch_pids.append(pid)
if i % 1000 == 0:
logging.info(f'Encoding: {i}/{len(uncached_pids)}')
if len(batch_docs) == encode_batch_size:
batch_dict = self.batcher.make_batch(raw_feed={'query_texts': batch_docs},
pt_lm_tokenizer=self.tokenizer)
with torch.no_grad():
batch_rep_dicts = self.model.caching_encode(batch_dict)
assert(len(batch_pids) == len(batch_rep_dicts))
for upid, batch_reps in zip(batch_pids, batch_rep_dicts):
self.pid2model_reps[upid] = batch_reps
batch_docs = []
batch_pids = []
if batch_docs: # Last batch.
batch_dict = self.batcher.make_batch(raw_feed={'query_texts': batch_docs},
pt_lm_tokenizer=self.tokenizer)
with torch.no_grad():
batch_rep_dicts = self.model.caching_encode(batch_dict)
assert(len(batch_pids) == len(batch_rep_dicts))
for upid, batch_reps in zip(batch_pids, batch_rep_dicts):
self.pid2model_reps[upid] = batch_reps
# Score documents based on reps.
# Get query facet sent idxs.
if facet != 'all':
query_abs_labs = ['background_label' if lab == 'objective_label' else lab for lab
in pid2abstract[query_pid]['pred_labels']]
qf_idxs = [i for i, l in enumerate(query_abs_labs) if f'{facet}_label' == l]
query_rep = copy.deepcopy(self.pid2model_reps[query_pid])
# Select only the query sentence reps.
query_rep['sent_reps'] = query_rep['sent_reps'][qf_idxs, :]
else:
query_rep = self.pid2model_reps[query_pid]
score_batch_size = 64
cand_batch = []
cand_scores = []
pair_sm = []
for ci, cpid in enumerate(cand_pids):
cand_batch.append(self.pid2model_reps[cpid])
if ci % 1000 == 0:
logging.info(f'Scoring: {ci}/{len(cand_pids)}')
if len(cand_batch) == score_batch_size:
with torch.no_grad():
score_dict = self.model.caching_score(query_encode_ret_dict=query_rep,
cand_encode_ret_dicts=cand_batch)
cand_scores.extend(score_dict['batch_scores'].tolist())
pair_sm.extend(score_dict['pair_scores'])
cand_batch = []
if cand_batch: # Handle final few candidates.
with torch.no_grad():
score_dict = self.model.caching_score(query_encode_ret_dict=query_rep,
cand_encode_ret_dicts=cand_batch)
cand_scores.extend(score_dict['batch_scores'].tolist())
pair_sm.extend(score_dict['pair_scores'])
ret_dict = {'cand_scores': cand_scores, 'pair_scores': pair_sm}
return ret_dict
def caching_scoringmodel_rank_pool_sentfaceted(root_path, trained_model_path, sent_rep_type,
dataset, facet, run_name):
"""
Given a pool of candidates re-rank the pool based on the model scores.
Function for use when model classes provide methods to encode data, and then score
documents. Representations are generated at the same time as scoringg, not apriori saved on disk.
:param root_path: string; directory with abstracts jsonl and citation network data and subdir of
reps to use for retrieval.
:param dataset: string; {'csfcube'}; eval dataset to use.
:param sent_rep_type: string
:param facet: string; {'background', 'method', 'result'} background and objective merged.
:return: write to disk.
"""
reps_path = os.path.join(root_path, sent_rep_type, run_name)
# read candidate reps from the whole abstract reps and query reps from the faceted ones.
pool_fname = os.path.join(root_path, f'test-pid2anns-{dataset}-{facet}.json')
# Read test pool.
with codecs.open(pool_fname, 'r', 'utf-8') as fp:
qpid2pool = json.load(fp)
query_pids = [qpid for qpid in qpid2pool.keys() if qpid in qpid2pool]
logging.info(f'Read anns: {dataset}; total: {len(qpid2pool)}')
# Load trained model.
model = CachingTrainedScoringModel(model_name=sent_rep_type, trained_model_path=trained_model_path)
# Read in abstracts for printing readable.
pid2abstract = {}
with codecs.open(os.path.join(root_path, 'abstracts-csfcube-preds.jsonl'), 'r', 'utf-8') as absfile:
for line in absfile:
injson = json.loads(line.strip())
pid2abstract[injson['paper_id']] = injson
# Go over every query and get the query rep and the reps for the pool and generate ranking.
query2rankedcands = collections.defaultdict(list)
readable_dir_path = os.path.join(reps_path, f'{dataset}-{sent_rep_type}-ranked')
du.create_dir(readable_dir_path)
start = time.time()
for qi, qpid in enumerate(query_pids):
logging.info('Ranking query {:d}: {:s}'.format(qi, qpid))
resfile = codecs.open(os.path.join(readable_dir_path, f'{qpid}-{dataset}-{sent_rep_type}-{facet}-ranked.txt'),
'w', 'utf-8')
cand_pids = qpid2pool[qpid]['cands']
cand_pid_rels = qpid2pool[qpid]['relevance_adju']
ret_dict = model.predict(query_pid=qpid, cand_pids=cand_pids, pid2abstract=pid2abstract,
facet='all' if sent_rep_type in {'cospecter'} else facet)
cand_scores = ret_dict['cand_scores']
pair_softmax = ret_dict['pair_scores']
assert(len(cand_pids) == len(cand_scores))
# Get nearest neighbours.
cand2sims = {}
cand_pair_sims_string = {}
for cpid, cand_sim, pair_sent_sm in zip(cand_pids, cand_scores, pair_softmax):
cand2sims[cpid] = cand_sim
if isinstance(pair_sent_sm, list):
mat = '\n'.join([np.array2string(np.around(t, 4), precision=3) for t in pair_sent_sm])
else:
mat = np.array2string(pair_sent_sm, precision=3)
cand_pair_sims_string[cpid] = '{:.4f}\n{:s}'.format(cand_sim, mat)
# Build the re-ranked list of paper_ids.
ranked_cand_pids = []
ranked_cand_pid_rels = []
ranked_pair_sim_strings = []
for cpid, sim in sorted(cand2sims.items(), key=lambda i: i[1], reverse=True):
ranked_cand_pids.append(cpid)
rel = cand_pid_rels[cand_pids.index(cpid)]
ranked_cand_pid_rels.append(rel)
ranked_pair_sim_strings.append(cand_pair_sims_string[cpid])
query2rankedcands[qpid].append((cpid, sim))
# Print out the neighbours.
print_one_pool_nearest_neighbours(qdocid=qpid, all_neighbour_docids=ranked_cand_pids,
pid2paperdata=pid2abstract, resfile=resfile,
pid_sources=ranked_cand_pid_rels,
ranked_pair_sim_strings=ranked_pair_sim_strings)
resfile.close()
logging.info('Ranking candidates took: {:.4f}s'.format(time.time()-start))
model.save_cache(out_fname=os.path.join(reps_path, f'pid2model_reps-{dataset}-{sent_rep_type}-{facet}.pickle'))
with codecs.open(os.path.join(reps_path, f'test-pid2pool-{dataset}-{sent_rep_type}-{facet}-ranked.json'),
'w', 'utf-8') as fp:
json.dump(query2rankedcands, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def caching_scoringmodel_rank_pool_sent(root_path, trained_model_path, sent_rep_type,
dataset, run_name):
"""
Given a pool of candidates re-rank the pool based on the model scores.
Function for use when model classes provide methods to encode data, and then score
documents. Representations are generated at the same time as scoringg, not apriori saved on disk.
:param root_path: string; directory with abstracts jsonl and citation network data and subdir of
reps to use for retrieval.
:param dataset: string; {'csfcube'}; eval dataset to use.
:param sent_rep_type: string; {'sbtinybertsota', 'sbrobertanli'}
:return: write to disk.
"""
reps_path = os.path.join(root_path, sent_rep_type, run_name)
pool_fname = os.path.join(root_path, f'test-pid2anns-{dataset}.json')
with codecs.open(pool_fname, 'r', 'utf-8') as fp:
qpid2pool = json.load(fp)
query_pids = [qpid for qpid in qpid2pool.keys() if qpid in qpid2pool]
logging.info('Read anns: {:}; total: {:}; valid: {:}'.
format(dataset, len(qpid2pool), len(query_pids)))
# Load trained model.
model = CachingTrainedScoringModel(model_name=sent_rep_type, trained_model_path=trained_model_path)
# Read in abstracts for printing readable.
pid2abstract = {}
with codecs.open(os.path.join(root_path, f'abstracts-{dataset}.jsonl'), 'r', 'utf-8') as absfile:
for line in absfile:
injson = json.loads(line.strip())
pid2abstract[injson['paper_id']] = injson
# Go over every query and get the query rep and the reps for the pool and generate ranking.
query2rankedcands = collections.defaultdict(list)
readable_dir_path = os.path.join(reps_path, f'{dataset}-{sent_rep_type}-ranked')
du.create_dir(readable_dir_path)
start = time.time()
for qi, qpid in enumerate(query_pids):
logging.info('Ranking query {:d}: {:s}'.format(qi, qpid))
resfile = codecs.open(os.path.join(readable_dir_path, f'{qpid}-{dataset}-{sent_rep_type}-ranked.txt'),
'w', 'utf-8')
cand_pids = qpid2pool[qpid]['cands']
cand_pid_rels = qpid2pool[qpid]['relevance_adju']
ret_dict = model.predict(query_pid=qpid, cand_pids=cand_pids, pid2abstract=pid2abstract)
cand_scores = ret_dict['cand_scores']
pair_softmax = ret_dict['pair_scores']
assert(len(cand_pids) == len(cand_scores))
# Get nearest neighbours.
cand2sims = {}
cand_pair_sims_string = {}
for cpid, cand_sim, pair_sent_sm in zip(cand_pids, cand_scores, pair_softmax):
cand2sims[cpid] = cand_sim
cand_pair_sims_string[cpid] = (cand_sim, pair_sent_sm)
# Build the re-ranked list of paper_ids.
ranked_cand_pids = []
ranked_cand_pid_rels = []
ranked_pair_sim_strings = []
for cpid, sim in sorted(cand2sims.items(), key=lambda i: i[1], reverse=True):
ranked_cand_pids.append(cpid)
rel = cand_pid_rels[cand_pids.index(cpid)]
ranked_cand_pid_rels.append(rel)
if len(ranked_pair_sim_strings) < 110:
pair_sent_sm = cand_pair_sims_string[cpid][1]
if isinstance(pair_sent_sm, list):
mat = '\n'.join([np.array2string(np.around(t, 4), precision=3) for t in pair_sent_sm])
else:
mat = np.array2string(pair_sent_sm, precision=3)
string = '{:.4f}\n{:s}'.format(cand_pair_sims_string[cpid][0], mat)
ranked_pair_sim_strings.append(string)
query2rankedcands[qpid].append((cpid, sim))
# Print out the neighbours.
print_one_pool_nearest_neighbours(qdocid=qpid, all_neighbour_docids=ranked_cand_pids,
pid2paperdata=pid2abstract, resfile=resfile,
pid_sources=ranked_cand_pid_rels,
ranked_pair_sim_strings=ranked_pair_sim_strings)
resfile.close()
logging.info('Ranking candidates took: {:.4f}s'.format(time.time()-start))
# model.save_cache(out_fname=os.path.join(reps_path, f'pid2model_reps-{dataset}-{sent_rep_type}.pickle'))
with codecs.open(os.path.join(reps_path, f'test-pid2pool-{dataset}-{sent_rep_type}-ranked.json'),
'w', 'utf-8') as fp:
json.dump(query2rankedcands, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def scoringmodel_rank_pool_sentfaceted(root_path, trained_model_path, sent_rep_type,
data_to_read, dataset, facet, run_name):
"""
Given vectors on disk and a pool of candidates re-rank the pool based on the sentence rep
and the facet passed. Function for use when the pool candidate reps are part of the gorc
datasets reps. All reps are sentence level - this function is mainly for use with sentence bert
outputs.
:param root_path: string; directory with abstracts jsonl and citation network data and subdir of
reps to use for retrieval.
:param dataset: string; {'csfcube'}; eval dataset to use.
:param sent_rep_type: string; {'sbtinybertsota', 'sbrobertanli'}
:param data_to_read: string; {'sent'}
:param facet: string; {'background', 'method', 'result'} background and objective merged.
:return: write to disk.
"""
if run_name:
reps_path = os.path.join(root_path, sent_rep_type, run_name)
else:
reps_path = os.path.join(root_path, sent_rep_type)
# read candidate reps from the whole abstract reps and query reps from the faceted ones.
pool_fname = os.path.join(root_path, f'test-pid2anns-{dataset}-{facet}.json')
all_map_fname = os.path.join(reps_path, f'pid2idx-{dataset}-sent.json')
# Read test pool.
with codecs.open(pool_fname, 'r', 'utf-8') as fp:
qpid2pool = json.load(fp)
with codecs.open(all_map_fname, 'r', 'utf-8') as fp:
all_docsents2idx = json.load(fp)
query_pids = [qpid for qpid in qpid2pool.keys() if qpid in qpid2pool]
logging.info(f'Read anns: {dataset}; total: {len(qpid2pool)}')
# Read vector reps.
all_doc_reps = np.load(os.path.join(reps_path, f'{dataset}-{data_to_read}.npy'))
np.nan_to_num(all_doc_reps, copy=False)
logging.info(f'Read {dataset} sent reps: {all_doc_reps.shape}')
# Load trained model.
if sent_rep_type in {'miswordpolyenc'}:
model = TrainedScoringModel(model_name=sent_rep_type, trained_model_path=trained_model_path)
# Read in abstracts for printing readable.
pid2abstract = {}
with codecs.open(os.path.join(root_path, 'abstracts-csfcube-preds.jsonl'), 'r', 'utf-8') as absfile:
for line in absfile:
injson = json.loads(line.strip())
pid2abstract[injson['paper_id']] = injson
# Go over every query and get the query rep and the reps for the pool and generate ranking.
query2rankedcands = collections.defaultdict(list)
readable_dir_path = os.path.join(reps_path, f'{dataset}-{sent_rep_type}-ranked')
du.create_dir(readable_dir_path)
for qpid in query_pids:
resfile = codecs.open(os.path.join(readable_dir_path, f'{qpid}-{dataset}-{sent_rep_type}-{facet}-ranked.txt'),
'w', 'utf-8')
cand_pids = qpid2pool[qpid]['cands']
cand_pid_rels = qpid2pool[qpid]['relevance_adju']
# Get the query abstracts query facet sentence representations
query_abs_labs = ['background_label' if lab == 'objective_label' else lab for lab
in pid2abstract[qpid]['pred_labels']]
# query_sent_repids = [f'{qpid}-{i}' for i, l in enumerate(query_abs_labs) if f'{facet}_label' == l]
query_sent_repids = [f'{qpid}-{i}' for i, l in enumerate(query_abs_labs)]
query_idx = [all_docsents2idx[i] for i in query_sent_repids]
query_fsent_rep = all_doc_reps[query_idx]
if query_fsent_rep.shape[0] == 768:
query_fsent_rep = query_fsent_rep.reshape(1, query_fsent_rep.shape[0])
# Get representations of all sentences in the pool.
candpool_sent_reps = []
cand_lens = []
for cpid in cand_pids:
cand_abs_labs = ['background_label' if lab == 'objective_label' else lab for lab
in pid2abstract[cpid]['pred_labels']]
cand_ids = [f'{cpid}-{i}' for i in range(len(cand_abs_labs))]
cand_lens.append(len(cand_ids))
cand_doc_idxs = [all_docsents2idx[csent_id] for csent_id in cand_ids]
candpool_sent_reps.append(all_doc_reps[cand_doc_idxs, :])
ret_dict = model.predict(query=query_fsent_rep, cands=candpool_sent_reps)
cand_scores = ret_dict['cand_scores']
pair_softmax = ret_dict['pair_scores']
assert(len(cand_pids) == len(cand_scores))
# Get nearest neighbours.
cand2sims = {}
cand_pair_sims_string = {}
for cpid, cand_sim, pair_sent_sm in zip(cand_pids, cand_scores, pair_softmax):
cand2sims[cpid] = cand_sim
cand_pair_sims_string[cpid] = '{:.4f}\n{:s}'.format(cand_sim, np.array2string(pair_sent_sm, precision=2))
# Build the re-ranked list of paper_ids.
ranked_cand_pids = []
ranked_cand_pid_rels = []
ranked_pair_sim_strings = []
for cpid, sim in sorted(cand2sims.items(), key=lambda i: i[1], reverse=True):
ranked_cand_pids.append(cpid)
rel = cand_pid_rels[cand_pids.index(cpid)]
ranked_cand_pid_rels.append(rel)
ranked_pair_sim_strings.append(cand_pair_sims_string[cpid])
# Save a distance because its what prior things saved.
query2rankedcands[qpid].append((cpid, -1*sim))
# Print out the neighbours.
print_one_pool_nearest_neighbours(qdocid=qpid, all_neighbour_docids=ranked_cand_pids,
pid2paperdata=pid2abstract, resfile=resfile,
pid_sources=ranked_cand_pid_rels,
ranked_pair_sim_strings=ranked_pair_sim_strings)
resfile.close()
with codecs.open(os.path.join(reps_path, f'test-pid2pool-{dataset}-{sent_rep_type}-{facet}-ranked.json'),
'w', 'utf-8') as fp:
json.dump(query2rankedcands, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def scoringmodel_rank_pool_sent(root_path, trained_model_path, sent_rep_type,
data_to_read, dataset, run_name):
"""
Given vectors on disk and a pool of candidates re-rank the pool based on the sentence rep
and the facet passed. Function for use when the pool candidate reps are part of the gorc
datasets reps. All reps are sentence level - this function is mainly for use with sentence bert
outputs.
:param root_path: string; directory with abstracts jsonl and citation network data and subdir of
reps to use for retrieval.
:param dataset: string; {'csfcube'}; eval dataset to use.
:param sent_rep_type: string; {'sbtinybertsota', 'sbrobertanli'}
:param data_to_read: string; {'sent'}
:param facet: string; {'background', 'method', 'result'} background and objective merged.
:return: write to disk.
"""
dataset, split = dataset, ''
if run_name:
reps_path = os.path.join(root_path, sent_rep_type, run_name)
else:
reps_path = os.path.join(root_path, sent_rep_type)
# read candidate reps from the whole abstract reps and query reps from the faceted ones.
pool_fname = os.path.join(root_path, 'test-pid2anns-{:s}{:s}.json'.format(dataset, split))
# Also allow experimentation with unfaceted reps.
all_map_fname = os.path.join(reps_path, 'pid2idx-{:s}-sent.json'.format(dataset))
# Read test pool.
with codecs.open(pool_fname, 'r', 'utf-8') as fp:
qpid2pool = json.load(fp)
with codecs.open(all_map_fname, 'r', 'utf-8') as fp:
all_docsents2idx = json.load(fp)
query_pids = [qpid for qpid in qpid2pool.keys() if qpid in qpid2pool]
logging.info('Read anns: {:}; total: {:}; valid: {:}'.
format(dataset, len(qpid2pool), len(query_pids)))
# Read vector reps.
all_doc_reps = np.load(os.path.join(reps_path, '{:s}-{:s}.npy'.
format(dataset, data_to_read)))
np.nan_to_num(all_doc_reps, copy=False)
logging.info('Read {:s} sent reps: {:}'.format(dataset, all_doc_reps.shape))
# Load trained model.
if sent_rep_type in {'miswordpolyenc'}:
model = TrainedScoringModel(model_name=sent_rep_type, trained_model_path=trained_model_path)
# Read in abstracts for printing readable.
pid2abstract = {}
with codecs.open(os.path.join(root_path, f'abstracts-{dataset}.jsonl'), 'r', 'utf-8') as absfile:
for line in absfile:
injson = json.loads(line.strip())
pid2abstract[injson['paper_id']] = injson
# Go over every query and get the query rep and the reps for the pool and generate ranking.
query2rankedcands = collections.defaultdict(list)
readable_dir_path = os.path.join(reps_path, f'{dataset}-{sent_rep_type}-ranked')
du.create_dir(readable_dir_path)
for qi, qpid in enumerate(query_pids):
logging.info('Ranking query {:d}: {:s}'.format(qi, qpid))
resfile = codecs.open(os.path.join(readable_dir_path, f'{qpid}-{dataset}-{sent_rep_type}-ranked.txt'),
'w', 'utf-8')
cand_pids = qpid2pool[qpid]['cands']
cand_pid_rels = qpid2pool[qpid]['relevance_adju']
# Get the query abstracts query facet sentence representations
query_sent_repids = [f'{qpid}-{i}' for i, l in enumerate(pid2abstract[qpid]['abstract'])]
query_idx = [all_docsents2idx[i] for i in query_sent_repids]
query_fsent_rep = all_doc_reps[query_idx]
if query_fsent_rep.shape[0] == 768:
query_fsent_rep = query_fsent_rep.reshape(1, query_fsent_rep.shape[0])
# Get representations of all sentences in the pool.
candpool_sent_reps = []
cand_lens = []
for cpid in cand_pids:
cand_ids = [f'{cpid}-{i}' for i in range(len(pid2abstract[cpid]['abstract']))]
cand_lens.append(len(cand_ids))
cand_doc_idxs = [all_docsents2idx[csent_id] for csent_id in cand_ids]
candpool_sent_reps.append(all_doc_reps[cand_doc_idxs, :])
ret_dict = model.predict(query=query_fsent_rep, cands=candpool_sent_reps)
cand_scores = ret_dict['cand_scores']
pair_softmax = ret_dict['pair_scores']
assert(len(cand_pids) == len(cand_scores))
# Get nearest neighbours.
cand2sims = {}
cand_pair_sims_string = {}
for cpid, cand_sim, pair_sent_sm in zip(cand_pids, cand_scores, pair_softmax):
cand2sims[cpid] = cand_sim
cand_pair_sims_string[cpid] = (cand_sim, pair_sent_sm)
# Build the re-ranked list of paper_ids.
ranked_cand_pids = []
ranked_cand_pid_rels = []
ranked_pair_sim_strings = []
for cpid, sim in sorted(cand2sims.items(), key=lambda i: i[1], reverse=True):
ranked_cand_pids.append(cpid)
rel = cand_pid_rels[cand_pids.index(cpid)]
ranked_cand_pid_rels.append(rel)
if len(ranked_pair_sim_strings) < 110:
string = '{:.4f}\n{:s}'.format(cand_pair_sims_string[cpid][0],
np.array2string(cand_pair_sims_string[cpid][1], precision=2))
ranked_pair_sim_strings.append(string)
# Save a distance because its what prior things saved.
query2rankedcands[qpid].append((cpid, -1*sim))
# Print out the neighbours.
print_one_pool_nearest_neighbours(qdocid=qpid, all_neighbour_docids=ranked_cand_pids,
pid2paperdata=pid2abstract, resfile=resfile,
pid_sources=ranked_cand_pid_rels,
ranked_pair_sim_strings=ranked_pair_sim_strings)
resfile.close()
with codecs.open(os.path.join(reps_path, f'test-pid2pool-{dataset}-{sent_rep_type}-ranked.json'),
'w', 'utf-8') as fp:
json.dump(query2rankedcands, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def print_one_pool_nearest_neighbours(qdocid, all_neighbour_docids, pid2paperdata, resfile, pid_sources,
ranked_pair_sim_strings=None):
"""
Given the nearest neighbours indices write out the title and abstract and
if the neighbour is cited in the query.
:return:
"""
# Print out the nearest neighbours to disk.
qtitle = pid2paperdata[qdocid]['title']
resfile.write('======================================================================\n')
try:
year = pid2paperdata[qdocid]['metadata']['year']
# -6 is because the label is named {:s}_label.format(facet) by the predictor.
qabs = '\n'.join(['{:s}: {:s}'.format(facet[:-6], sent) for sent, facet in
zip(pid2paperdata[qdocid]['abstract'], pid2paperdata[qdocid]['pred_labels'])])
except KeyError:
year = None
qabs = '\n'.join(['{:d}: {:s}'.format(i, sent) for i, sent in
enumerate(pid2paperdata[qdocid]['abstract'])])
resfile.write('PAPER_ID: {:s}; YEAR: {:}\n'.format(qdocid, year))
resfile.write('TITLE: {:s}\n'.format(qtitle))
resfile.write('ABSTRACT:\n{:s}\n'.format(qabs))
# This happens in the case of treccovid.
if 'topic_narratives' in pid2paperdata[qdocid]:
resfile.write('TOPIC-ID: {:s}\n'.format(pid2paperdata[qdocid]['topic_ids']))
narratives = [tn['narrative'] for tn in pid2paperdata[qdocid]['topic_narratives']]
resfile.write('TOPIC Narrative:\n{:s}\n'.format('\n'.join(narratives)))
resfile.write('===================================\n')
written_candidates = 0
for ranki, (ndocid, sources) in enumerate(zip(all_neighbour_docids, pid_sources)):
# Do this only for treccovid.
if written_candidates > 100 and 'topic_narratives' in pid2paperdata[qdocid]:
break
# These are the two noise documents which trip people up. >_<
if ndocid in {'5111924', '41022419'}: continue
try:
ntitle = pid2paperdata[ndocid]['title']
except KeyError:
continue
try:
nabs = '\n'.join(['{:s}: {:s}'.format(facet[:-6], sent) for sent, facet in
zip(pid2paperdata[ndocid]['abstract'], pid2paperdata[ndocid]['pred_labels'])])
year = pid2paperdata[ndocid]['metadata']['year']
except KeyError:
year = None
nabs = '\n'.join(['{:d}: {:s}'.format(i, sent) for i, sent in
enumerate(pid2paperdata[ndocid]['abstract'])])
resfile.write('RANK: {:d}\n'.format(ranki))
resfile.write('PAPER_ID: {:s}; YEAR: {:}\n'.format(ndocid, year))
# This is either a list of strings or a int value of relevance.
if isinstance(sources, list):
resfile.write('sources: {:}\n'.format(', '.join(sources)))
elif isinstance(sources, int):
resfile.write('RELS: {:}\n'.format(sources))
if ranked_pair_sim_strings:
resfile.write('Query sent sims:\n{:}\n'.format(ranked_pair_sim_strings[ranki]))
resfile.write('TITLE: {:s}\n'.format(ntitle))
resfile.write('ABSTRACT:\n{:s}\n\n'.format(nabs))
written_candidates += 1
resfile.write('======================================================================\n')
resfile.write('\n')
def rank_pool(root_path, sent_rep_type, data_to_read, dataset, run_name):
"""
Given vectors on disk and a pool of candidates combined with gold citations
re-rank the pool based on the whole abstract rep alone.
:param root_path: string; directory with abstracts jsonl and citation network data and subdir of
reps to use for retrieval.
:param sent_rep_type: string;
:param data_to_read: string; {'abstract', 'title'}
:param dataset: string;
:return: write to disk.
"""
dataset, split = dataset, ''
if run_name:
reps_path = os.path.join(root_path, sent_rep_type, run_name)
else:
reps_path = os.path.join(root_path, sent_rep_type)
pool_fname = os.path.join(root_path, 'test-pid2anns-{:s}.json'.format(dataset))
all_map_fname = os.path.join(reps_path, 'pid2idx-{:s}-{:s}.json'.format(dataset, data_to_read))
# Read test pool.
with codecs.open(pool_fname, 'r', 'utf-8') as fp:
qpid2pool = json.load(fp)
# Read doc2idx maps.
with codecs.open(all_map_fname, 'r', 'utf-8') as fp:
all_doc2idx = json.load(fp)
logging.info('Read maps {:s}: {:}'.format(dataset, len(all_doc2idx)))
# Get queries pids (all queries have a required document)
logging.info('Read map all: {:}; total queries: {:}'.
format(len(all_doc2idx), len(qpid2pool)))
# Read vector reps.
if sent_rep_type in {'specter', 'cospecter'}:
all_doc_reps = np.load(os.path.join(reps_path, '{:s}-{:s}s.npy'.format(dataset, data_to_read)))
np.nan_to_num(all_doc_reps, copy=False)
query_pids = [qpid for qpid in qpid2pool.keys() if qpid in qpid2pool]
logging.info('Read anns: {:}; total: {:}; valid: {:}'.
format(dataset, len(qpid2pool), len(query_pids)))
# Read in abstracts for printing readable.
pid2abstract = {}
with codecs.open(os.path.join(root_path, f'abstracts-{dataset}.jsonl'), 'r', 'utf-8') as absfile:
for line in absfile:
injson = json.loads(line.strip())
pid2abstract[injson['paper_id']] = injson
# Go over every query and get the query rep and the reps for the pool and generate ranking.
query2rankedcands = collections.defaultdict(list)
readable_dir_path = os.path.join(reps_path, '{:s}{:s}-{:s}-ranked'.format(dataset, split, sent_rep_type))
du.create_dir(readable_dir_path)
for qi, qpid in enumerate(qpid2pool.keys()):
logging.info('Ranking query {:d}: {:s}'.format(qi, qpid))
resfile = codecs.open(os.path.join(readable_dir_path, '{:s}-{:s}{:s}-{:s}-ranked.txt'.
format(qpid, dataset, split, sent_rep_type)), 'w', 'utf-8')
cand_pids = qpid2pool[qpid]['cands']
cand_pid_rels = qpid2pool[qpid]['relevance_adju']
query_idx = all_doc2idx[qpid]
query_rep = all_doc_reps[query_idx]
if query_rep.shape[0] != 1: # The sparse one is already reshaped somehow.
query_rep = query_rep.reshape(1, query_rep.shape[0])
pool_idxs = []
for cpid in cand_pids:
try:
pool_idxs.append(all_doc2idx[cpid])
except KeyError:
continue
pool_reps = all_doc_reps[pool_idxs, :]
index = neighbors.NearestNeighbors(n_neighbors=len(pool_idxs), algorithm='brute')
index.fit(pool_reps)
# Get nearest neighbours.
nearest_dists, nearest_idxs = index.kneighbors(X=query_rep)
# Build the re-ranked list of paper_ids.
neigh_ids = list(nearest_idxs[0])
neigh_dists = list(nearest_dists[0])
ranked_cand_pids = [cand_pids[nidx] for nidx in neigh_ids]
ranked_cand_pid_rels = [cand_pid_rels[nidx] for nidx in neigh_ids]
for nidx, ndist in zip(neigh_ids, neigh_dists):
# cand_pids is a list of pids
ndocid = cand_pids[nidx]
if ndocid == qpid:
# This should never happen but sometimes the gold cited data
# contains the query id. hmmmmm.
logging.info(qpid)
continue
query2rankedcands[qpid].append((ndocid, ndist))
# Print out the neighbours.
print_one_pool_nearest_neighbours(qdocid=qpid, all_neighbour_docids=ranked_cand_pids,
pid2paperdata=pid2abstract, resfile=resfile,
pid_sources=ranked_cand_pid_rels)
resfile.close()
with codecs.open(os.path.join(reps_path, 'test-pid2pool-{:s}-{:s}-ranked.json'.
format(dataset, sent_rep_type)), 'w', 'utf-8') as fp:
json.dump(query2rankedcands, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def rank_pool_sent_treccovid(root_path, sent_rep_type, data_to_read, dataset, run_name):
"""
Given vectors on disk and a pool of candidates re-rank the pool based on the sentence rep.
Function for use when the pool candidate reps are part of the gorc
datasets reps. All reps are sentence level - this function is mainly for use with sentence encoder
outputs.
This is a function to use when the candidate pools are deep (and have overlaps across queries)
and indivudually going over every query is a waste.
:param root_path: string; directory with abstracts jsonl and citation network data and subdir of
reps to use for retrieval.
:param dataset: string; {'csfcube'}; eval dataset to use.
:param sent_rep_type: string; {'sbtinybertsota', 'sbrobertanli'}
:param data_to_read: string; {'sent'}
:return: write to disk.
"""
dataset, split = dataset, ''
if run_name:
reps_path = os.path.join(root_path, sent_rep_type, run_name)
try:
with codecs.open(os.path.join(reps_path, 'run_info.json'), 'r', 'utf-8') as fp:
run_info = json.load(fp)
all_hparams = run_info['all_hparams']
score_type = all_hparams['score_aggregation']
except (FileNotFoundError, KeyError) as err:
logging.info(f'Error loading run_info.json: {err}')
score_type = 'cosine'
else:
reps_path = os.path.join(root_path, sent_rep_type)
score_type = 'cosine'
logging.info(f'Score type: {score_type}')
pool_fname = os.path.join(root_path, 'test-pid2anns-{:s}{:s}.json'.format(dataset, split))
all_map_fname = os.path.join(reps_path, 'pid2idx-{:s}-sent.json'.format(dataset))
# Read test pool.
with codecs.open(pool_fname, 'r', 'utf-8') as fp:
qpid2pool = json.load(fp)
with codecs.open(all_map_fname, 'r', 'utf-8') as fp:
all_docsents2idx = json.load(fp)
query_pids = [qpid for qpid in qpid2pool.keys() if qpid in qpid2pool]
logging.info('Read anns: {:}; total: {:}; valid: {:}'.
format(dataset, len(qpid2pool), len(query_pids)))
# Read vector reps.
all_doc_reps = np.load(os.path.join(reps_path, '{:s}-{:s}.npy'.
format(dataset, data_to_read)))
np.nan_to_num(all_doc_reps, copy=False)
logging.info('Read {:s} sent reps: {:}'.format(dataset, all_doc_reps.shape))
# Read in abstracts for printing readable.
pid2abstract = {}
with codecs.open(os.path.join(root_path, f'abstracts-{dataset}.jsonl'), 'r', 'utf-8') as absfile:
for line in absfile:
injson = json.loads(line.strip())
pid2abstract[injson['paper_id']] = injson
logging.info(f'Abstracts: {len(pid2abstract)}')
# Get ids for all the queries.
all_query_sent_repids = []
for qpid in query_pids:
query_sent_repids = [f'{qpid}-{i}' for i, l in enumerate(pid2abstract[qpid]['abstract'])]
all_query_sent_repids.extend(query_sent_repids)
allqsentrep2idx = dict([(repid, idx) for idx, repid in enumerate(all_query_sent_repids)])
all_query_idxs = [all_docsents2idx[i] for i in all_query_sent_repids]
all_query_sent_reps = all_doc_reps[all_query_idxs, :]
if score_type in {'dotlse'}: # Dot product was used for training.
allquery2cand_sims = np.matmul(all_query_sent_reps, all_doc_reps.T)
elif score_type in {'l2lse', 'l2max', 'l2top2'}:
allquery2cand_sims = -1.0*spatial.distance.cdist(all_query_sent_reps, all_doc_reps)
elif score_type in {'cosinemax', 'cosine'}:
allquery2cand_sims = skmetrics.pairwise.cosine_similarity(all_query_sent_reps, all_doc_reps)
logging.info('All query cand sims: {:}'.format(allquery2cand_sims.shape))
# Go over every query and get the query rep and the reps for the pool and generate ranking.
query2rankedcands = collections.defaultdict(list)
readable_dir_path = os.path.join(reps_path, '{:s}{:s}-{:s}-ranked'.format(dataset, split, sent_rep_type))
du.create_dir(readable_dir_path)
for qi, qpid in enumerate(query_pids):
resfile = codecs.open(os.path.join(readable_dir_path, '{:s}-{:s}{:s}-{:s}-ranked.txt'.
format(qpid, dataset, split, sent_rep_type)), 'w', 'utf-8')
cand_pids = qpid2pool[qpid]['cands']
cand_pid_rels = qpid2pool[qpid]['relevance_adju']
# Get the query abstracts sentence representations
query_sent_repids = [f'{qpid}-{i}' for i, l in enumerate(pid2abstract[qpid]['abstract'])]
query_idx = [allqsentrep2idx[i] for i in query_sent_repids]
# Get idxs of all sentences in the pool.
pool_sent_ids = []
cand_lens = []
for cpid in cand_pids:
cand_ids = [f'{cpid}-{i}' for i in range(len(pid2abstract[cpid]['abstract']))]
cand_lens.append(len(cand_ids))
pool_sent_ids.extend(cand_ids)
pool_idxs = [all_docsents2idx[csent_id] for csent_id in pool_sent_ids]
query2cand_sims = allquery2cand_sims[np.ix_(query_idx, pool_idxs)]
logging.info('Ranking query {:d}: {:s}; {:}'.format(qi, qpid, query2cand_sims.shape))
# Get nearest neighbours.
start_idx = 0
cand_sims = {}
cand_pair_sims = {}
for cpid, num_csents in zip(cand_pids, cand_lens):
pair_sent_sims = query2cand_sims[:, start_idx: start_idx+num_csents]
if score_type == 'l2top2':
try:
# partial sort smallest distance to largest.
temp = np.partition(-1*pair_sent_sims.flatten(), kth=2)
# sum the top2 similarities.
max_sim = float(np.sum(-1*temp[:2]))
# Some q-cand pairs have 2 or fewer sentences.
except ValueError:
max_sim = float(np.sum(pair_sent_sims.flatten()))
else:
max_sim = float(np.max(pair_sent_sims))
cand_sims[cpid] = max_sim
cand_pair_sims[cpid] = pair_sent_sims
start_idx += num_csents
# Build the re-ranked list of paper_ids.
ranked_cand_pids = []
ranked_cand_pid_rels = []
ranked_pair_sim_strings = []
for cpid, sim in sorted(cand_sims.items(), key=lambda i: i[1], reverse=True):
ranked_cand_pids.append(cpid)
rel = cand_pid_rels[cand_pids.index(cpid)]
ranked_cand_pid_rels.append(rel)
# Only save these for the cands which you will print out.
if len(ranked_pair_sim_strings) < 110:
ranked_pair_sim_strings.append(np.array2string(cand_pair_sims[cpid], precision=2))
# Save a distance because its what prior things saved.
query2rankedcands[qpid].append((cpid, -1*sim))
# Print out the neighbours.
print_one_pool_nearest_neighbours(qdocid=qpid, all_neighbour_docids=ranked_cand_pids,
pid2paperdata=pid2abstract, resfile=resfile,
pid_sources=ranked_cand_pid_rels,
ranked_pair_sim_strings=ranked_pair_sim_strings)
resfile.close()
with codecs.open(os.path.join(reps_path, 'test-pid2pool-{:s}{:s}-{:s}-ranked.json'.
format(dataset, split, sent_rep_type)), 'w', 'utf-8') as fp:
json.dump(query2rankedcands, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def rank_pool_sent(root_path, sent_rep_type, data_to_read, dataset, run_name):
"""
Given vectors on disk and a pool of candidates re-rank the pool based on the sentence rep.
Function for use when the pool candidate reps are part of the gorc
datasets reps. All reps are sentence level - this function is mainly for use with sentence encoder
outputs.
:param root_path: string; directory with abstracts jsonl and citation network data and subdir of
reps to use for retrieval.
:param dataset: string; {'csfcube'}; eval dataset to use.
:param sent_rep_type: string; {'sbtinybertsota', 'sbrobertanli'}
:param data_to_read: string; {'sent'}
:return: write to disk.
"""
dataset, split = dataset, ''
if run_name:
reps_path = os.path.join(root_path, sent_rep_type, run_name)
try:
with codecs.open(os.path.join(reps_path, 'run_info.json'), 'r', 'utf-8') as fp:
run_info = json.load(fp)
all_hparams = run_info['all_hparams']
score_type = all_hparams['score_aggregation']
except (FileNotFoundError, KeyError) as err:
logging.info(f'Error loading run_info.json: {err}')
score_type = 'cosine'
else:
reps_path = os.path.join(root_path, sent_rep_type)
score_type = 'cosine'
logging.info(f'Score type: {score_type}')
# read candidate reps from the whole abstract reps and query reps from the faceted ones.
pool_fname = os.path.join(root_path, 'test-pid2anns-{:s}{:s}.json'.format(dataset, split))
# Also allow experimentation with unfaceted reps.
all_map_fname = os.path.join(reps_path, 'pid2idx-{:s}-sent.json'.format(dataset))
# Read test pool.
with codecs.open(pool_fname, 'r', 'utf-8') as fp:
qpid2pool = json.load(fp)
with codecs.open(all_map_fname, 'r', 'utf-8') as fp:
all_docsents2idx = json.load(fp)
query_pids = [qpid for qpid in qpid2pool.keys() if qpid in qpid2pool]
logging.info('Read anns: {:}; total: {:}; valid: {:}'.
format(dataset, len(qpid2pool), len(query_pids)))
# Read vector reps.
all_doc_reps = np.load(os.path.join(reps_path, '{:s}-{:s}.npy'.
format(dataset, data_to_read)))
np.nan_to_num(all_doc_reps, copy=False)
logging.info('Read {:s} sent reps: {:}'.format(dataset, all_doc_reps.shape))
# Read in abstracts for printing readable.
pid2abstract = {}
with codecs.open(os.path.join(root_path, f'abstracts-{dataset}.jsonl'), 'r', 'utf-8') as absfile:
for line in absfile:
injson = json.loads(line.strip())
pid2abstract[injson['paper_id']] = injson
# Go over every query and get the query rep and the reps for the pool and generate ranking.
query2rankedcands = collections.defaultdict(list)
readable_dir_path = os.path.join(reps_path, '{:s}{:s}-{:s}-ranked'.format(dataset, split, sent_rep_type))
du.create_dir(readable_dir_path)
for qi, qpid in enumerate(query_pids):
logging.info('Ranking query {:d}: {:s}'.format(qi, qpid))
resfile = codecs.open(os.path.join(readable_dir_path, '{:s}-{:s}{:s}-{:s}-ranked.txt'.
format(qpid, dataset, split, sent_rep_type)), 'w', 'utf-8')
cand_pids = qpid2pool[qpid]['cands']
cand_pid_rels = qpid2pool[qpid]['relevance_adju']
# Get the query abstracts sentence representations
query_sent_repids = [f'{qpid}-{i}' for i, l in enumerate(pid2abstract[qpid]['abstract'])]
query_idx = [all_docsents2idx[i] for i in query_sent_repids]
query_fsent_rep = all_doc_reps[query_idx]
if query_fsent_rep.shape[0] == 768:
query_fsent_rep = query_fsent_rep.reshape(1, query_fsent_rep.shape[0])
# Get representations of all sentences in the pool.
pool_sent_ids = []
cand_lens = []
for cpid in cand_pids:
cand_ids = [f'{cpid}-{i}' for i in range(len(pid2abstract[cpid]['abstract']))]
cand_lens.append(len(cand_ids))
pool_sent_ids.extend(cand_ids)
pool_idxs = [all_docsents2idx[csent_id] for csent_id in pool_sent_ids]
candpool_sent_reps = all_doc_reps[pool_idxs, :]
if score_type in {'dotlse'}: # Dot product was used for training.
query2cand_sims = np.matmul(query_fsent_rep, candpool_sent_reps.T)
elif score_type in {'l2lse', 'l2max', 'l2top2'}:
query2cand_sims = -1.0*spatial.distance.cdist(query_fsent_rep, candpool_sent_reps)
elif score_type in {'cosinemax', 'cosine'}:
query2cand_sims = skmetrics.pairwise.cosine_similarity(query_fsent_rep, candpool_sent_reps)
# Get nearest neighbours.
start_idx = 0
cand_sims = {}
cand_pair_sims_string = {}
for cpid, num_csents in zip(cand_pids, cand_lens):
pair_sent_sims = query2cand_sims[:, start_idx: start_idx+num_csents]
if score_type == 'l2top2':
try:
# partial sort largest sim to smallest.
temp = np.partition(-1*pair_sent_sims.flatten(), kth=2)
# sum the top2 similarities.
max_sim = float(np.sum(-1*temp[:2]))
# Some q-cand pairs have 2 or fewer sentences.
except ValueError:
max_sim = float(np.sum(pair_sent_sims.flatten()))
else:
max_sim = float(np.max(pair_sent_sims))
cand_sims[cpid] = max_sim
cand_pair_sims_string[cpid] = np.array2string(pair_sent_sims, precision=2)
start_idx += num_csents
# Build the re-ranked list of paper_ids.
ranked_cand_pids = []
ranked_cand_pid_rels = []
ranked_pair_sim_strings = []
for cpid, sim in sorted(cand_sims.items(), key=lambda i: i[1], reverse=True):
ranked_cand_pids.append(cpid)
rel = cand_pid_rels[cand_pids.index(cpid)]
ranked_cand_pid_rels.append(rel)
ranked_pair_sim_strings.append(cand_pair_sims_string[cpid])
# Save a distance because its what prior things saved.
query2rankedcands[qpid].append((cpid, -1*sim))
# Print out the neighbours.
print_one_pool_nearest_neighbours(qdocid=qpid, all_neighbour_docids=ranked_cand_pids,
pid2paperdata=pid2abstract, resfile=resfile,
pid_sources=ranked_cand_pid_rels,
ranked_pair_sim_strings=ranked_pair_sim_strings)
resfile.close()
with codecs.open(os.path.join(reps_path, 'test-pid2pool-{:s}{:s}-{:s}-ranked.json'.
format(dataset, split, sent_rep_type)), 'w', 'utf-8') as fp:
json.dump(query2rankedcands, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def rank_pool_sentfaceted(root_path, sent_rep_type, data_to_read, dataset, facet, run_name):
"""
Given vectors on disk and a pool of candidates re-rank the pool based on the sentence rep
and the facet passed. Function for use when the pool candidate reps are part of the gorc
datasets reps. All reps are sentence level - this function is mainly for use with sentence bert
outputs.
:param root_path: string; directory with abstracts jsonl and citation network data and subdir of
reps to use for retrieval.
:param dataset: string; {'csfcube'}; eval dataset to use.
:param sent_rep_type: string; {'sbtinybertsota', 'sbrobertanli'}
:param data_to_read: string; {'sent'}
:param facet: string; {'background', 'method', 'result'} background and objective merged.
:return: write to disk.
"""
dataset, split = dataset, ''
if run_name:
reps_path = os.path.join(root_path, sent_rep_type, run_name)
try:
with codecs.open(os.path.join(reps_path, 'run_info.json'), 'r', 'utf-8') as fp:
run_info = json.load(fp)
all_hparams = run_info['all_hparams']
score_type = all_hparams['score_aggregation']
except (FileNotFoundError, KeyError) as err:
logging.info(f'Error loading run_info.json: {err}')
score_type = 'cosine'
else:
reps_path = os.path.join(root_path, sent_rep_type)
score_type = 'cosine'
logging.info(f'Score type: {score_type}')
# read candidate reps from the whole abstract reps and query reps from the faceted ones.
pool_fname = os.path.join(root_path, 'test-pid2anns-{:s}{:s}-{:s}.json'.format(dataset, split, facet))
# Also allow experimentation with unfaceted reps.
all_map_fname = os.path.join(reps_path, 'pid2idx-{:s}-sent.json'.format(dataset))
# Read test pool.
with codecs.open(pool_fname, 'r', 'utf-8') as fp:
qpid2pool = json.load(fp)
with codecs.open(all_map_fname, 'r', 'utf-8') as fp:
all_docsents2idx = json.load(fp)
query_pids = [qpid for qpid in qpid2pool.keys() if qpid in qpid2pool]
logging.info('Read anns: {:}; total: {:}; valid: {:}'.
format(dataset, len(qpid2pool), len(query_pids)))
# Read vector reps.
all_doc_reps = np.load(os.path.join(reps_path, '{:s}-{:s}.npy'.format(dataset, data_to_read)))
np.nan_to_num(all_doc_reps, copy=False)
logging.info('Read {:s} sent reps: {:}'.format(dataset, all_doc_reps.shape))
# Read in abstracts for printing readable.
pid2abstract = {}
with codecs.open(os.path.join(root_path, 'abstracts-csfcube-preds.jsonl'), 'r', 'utf-8') as absfile:
for line in absfile:
injson = json.loads(line.strip())
pid2abstract[injson['paper_id']] = injson
# Go over every query and get the query rep and the reps for the pool and generate ranking.
query2rankedcands = collections.defaultdict(list)
readable_dir_path = os.path.join(reps_path, '{:s}{:s}-{:s}-ranked'.format(dataset, split, sent_rep_type))
du.create_dir(readable_dir_path)
for qpid in query_pids:
resfile = codecs.open(os.path.join(readable_dir_path, '{:s}-{:s}{:s}-{:s}-{:s}-ranked.txt'.
format(qpid, dataset, split, sent_rep_type, facet)), 'w', 'utf-8')
cand_pids = qpid2pool[qpid]['cands']
cand_pid_rels = qpid2pool[qpid]['relevance_adju']
# Get the query abstracts query facet sentence representations
query_abs_labs = ['background_label' if lab == 'objective_label' else lab for lab
in pid2abstract[qpid]['pred_labels']]
query_sent_repids = [f'{qpid}-{i}' for i, l in enumerate(query_abs_labs) if f'{facet}_label' == l]
query_idx = [all_docsents2idx[i] for i in query_sent_repids]
query_fsent_rep = all_doc_reps[query_idx]
if query_fsent_rep.shape[0] == 768:
query_fsent_rep = query_fsent_rep.reshape(1, query_fsent_rep.shape[0])
# Get representations of all sentences in the pool.
pool_sent_ids = []
cand_lens = []
for cpid in cand_pids:
cand_abs_labs = ['background_label' if lab == 'objective_label' else lab for lab
in pid2abstract[cpid]['pred_labels']]
cand_ids = [f'{cpid}-{i}' for i in range(len(cand_abs_labs))]
cand_lens.append(len(cand_ids))
pool_sent_ids.extend(cand_ids)
pool_idxs = [all_docsents2idx[csent_id] for csent_id in pool_sent_ids]
candpool_sent_reps = all_doc_reps[pool_idxs, :]
if score_type in {'dotlse'}: # Dot product was used for training.
query2cand_sims = np.matmul(query_fsent_rep, candpool_sent_reps.T)
elif score_type in {'l2lse', 'l2max', 'l2top2'}:
query2cand_sims = -1.0*spatial.distance.cdist(query_fsent_rep, candpool_sent_reps)
elif score_type in {'cosinemax', 'cosine'}:
query2cand_sims = skmetrics.pairwise.cosine_similarity(query_fsent_rep, candpool_sent_reps)
# Get nearest neighbours.
start_idx = 0
cand_sims = {}
cand_pair_sims_string = {}
for cpid, num_csents in zip(cand_pids, cand_lens):
pair_sent_sims = query2cand_sims[:, start_idx: start_idx+num_csents]
if score_type == 'l2top2':
# partial sort largest sim to smallest.
temp = np.partition(-1*pair_sent_sims.flatten(), kth=2)
# sum the top2 similarities.
max_sim = float(np.sum(-1*temp[:2]))
else:
max_sim = float(np.max(pair_sent_sims))
# elif score_method == 'maxsum':
# max_sim = float(np.sum(np.max(pair_sent_sims, axis=1)))
# elif score_method == 'top3':
# flat = pair_sent_sims.flatten()
# topidx = np.argpartition(flat, -3)[-3:]
# max_sim = float(np.sum(flat[topidx]))
# else:
# raise AssertionError
cand_sims[cpid] = max_sim
cand_pair_sims_string[cpid] = np.array2string(pair_sent_sims, precision=2)
start_idx += num_csents
# Build the re-ranked list of paper_ids.
ranked_cand_pids = []
ranked_cand_pid_rels = []
ranked_pair_sim_strings = []
for cpid, sim in sorted(cand_sims.items(), key=lambda i: i[1], reverse=True):
ranked_cand_pids.append(cpid)
rel = cand_pid_rels[cand_pids.index(cpid)]
ranked_cand_pid_rels.append(rel)
ranked_pair_sim_strings.append(cand_pair_sims_string[cpid])
# Save a distance because its what prior things saved.
query2rankedcands[qpid].append((cpid, -1*sim))
# Print out the neighbours.
print_one_pool_nearest_neighbours(qdocid=qpid, all_neighbour_docids=ranked_cand_pids,
pid2paperdata=pid2abstract, resfile=resfile,
pid_sources=ranked_cand_pid_rels,
ranked_pair_sim_strings=ranked_pair_sim_strings)
resfile.close()
with codecs.open(os.path.join(reps_path, 'test-pid2pool-{:s}{:s}-{:s}-{:s}-ranked.json'.
format(dataset, split, sent_rep_type, facet)), 'w', 'utf-8') as fp:
json.dump(query2rankedcands, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def rank_pool_faceted(root_path, sent_rep_type, data_to_read, dataset, facet, run_name):
"""
Given vectors on disk and a pool of candidates re-rank the pool based on the sentence rep
and the facet passed. Function for use when the pool candidate reps are part of the gorc
datasets reps. Query reps per facet will be on disk.
:param root_path: string; directory with abstracts jsonl and citation network data and subdir of
reps to use for retrieval.
:param dataset: string;
:param sent_rep_type: string;
:param data_to_read: string; {'abstract', 'title'}
:param facet: string; {'background', 'method', 'result'} backgroud and objective merged.
:return: write to disk.
"""
if run_name:
reps_path = os.path.join(root_path, sent_rep_type, run_name)
else:
reps_path = os.path.join(root_path, sent_rep_type)
# read candidate reps from the whole abstract reps and query reps from the faceted ones.
pool_fname = os.path.join(root_path, f'test-pid2anns-{dataset}-{facet}.json')
# Also allow experimentation with unfaceted reps.
if sent_rep_type in {'cospecter'}:
all_map_fname = os.path.join(reps_path, f'pid2idx-{dataset}-{data_to_read}.json')
# Read test pool.
with codecs.open(pool_fname, 'r', 'utf-8') as fp:
qpid2pool = json.load(fp)
# Read doc2idx maps.
with codecs.open(all_map_fname, 'r', 'utf-8') as fp:
all_doc2idx = json.load(fp)
query_pids = [qpid for qpid in qpid2pool.keys() if qpid in all_doc2idx]
logging.info('Read maps facet {:s}: total: {:}; valid: {:}'.
format(dataset, len(qpid2pool), len(query_pids)))
# Read vector reps.
if sent_rep_type in {'cospecter', 'specter'}:
all_doc_reps = np.load(os.path.join(reps_path, f'{dataset}-{data_to_read}s.npy'))
np.nan_to_num(all_doc_reps, copy=False)
logging.info('Read faceted {:s}: {:}'.format(dataset, all_doc_reps.shape))
# Read in abstracts for printing readable.
pid2abstract = {}
with codecs.open(os.path.join(root_path, 'abstracts-csfcube-preds.jsonl'), 'r', 'utf-8') as absfile:
for line in absfile:
injson = json.loads(line.strip())
pid2abstract[injson['paper_id']] = injson
# Go over every query and get the query rep and the reps for the pool and generate ranking.
query2rankedcands = collections.defaultdict(list)
readable_dir_path = os.path.join(reps_path, '{:s}-{:s}-ranked'.format(dataset, sent_rep_type))
du.create_dir(readable_dir_path)
for qpid in query_pids:
resfile = codecs.open(os.path.join(readable_dir_path, '{:s}-{:s}-{:s}-{:s}-ranked.txt'.
format(qpid, dataset, sent_rep_type, facet)), 'w', 'utf-8')
cand_pids = qpid2pool[qpid]['cands']
cand_pid_rels = qpid2pool[qpid]['relevance_adju']
query_idx = all_doc2idx[qpid]
query_rep = all_doc_reps[query_idx]
if query_rep.shape[0] != 1: # The sparse one is already reshaped somehow.
query_rep = query_rep.reshape(1, query_rep.shape[0])
pool_idxs = [all_doc2idx[pid] for pid in cand_pids]
pool_reps = all_doc_reps[pool_idxs, :]
index = neighbors.NearestNeighbors(n_neighbors=len(pool_idxs), algorithm='brute')
index.fit(pool_reps)
# Get nearest neighbours.
nearest_dists, nearest_idxs = index.kneighbors(X=query_rep)
# Build the re-ranked list of paper_ids.
neigh_ids = list(nearest_idxs[0])
neigh_dists = list(nearest_dists[0])
ranked_cand_pids = [cand_pids[nidx] for nidx in neigh_ids]
ranked_cand_pid_rels = [cand_pid_rels[nidx] for nidx in neigh_ids]
for nidx, ndist in zip(neigh_ids, neigh_dists):
# cand_pids is a list of pids
ndocid = cand_pids[nidx]
if ndocid == qpid:
continue
query2rankedcands[qpid].append((ndocid, ndist))
# Print out the neighbours.
print_one_pool_nearest_neighbours(qdocid=qpid, all_neighbour_docids=ranked_cand_pids,
pid2paperdata=pid2abstract, resfile=resfile, pid_sources=ranked_cand_pid_rels)
resfile.close()
with codecs.open(os.path.join(reps_path, 'test-pid2pool-{:s}-{:s}-{:s}-ranked.json'.
format(dataset, sent_rep_type, facet)), 'w', 'utf-8') as fp:
json.dump(query2rankedcands, fp)
logging.info('Wrote: {:s}'.format(fp.name))
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subcommand',
help='The action to perform.')
# Rank the pool for every query.
dataset_rank_pool = subparsers.add_parser('rank_pool')
dataset_rank_pool.add_argument('--root_path', required=True,
help='Path with abstracts, sentence reps and citation info.')
dataset_rank_pool.add_argument('--run_name', default=None,
help='Path with trained sentence reps if using.')
dataset_rank_pool.add_argument('--rep_type', required=True,
choices=['sbtinybertsota', 'sbrobertanli', 'sentpubmedbert', 'sbmpnet1B',
'cosentbert', 'ictsentbert', 'cospecter',
'miswordbienc', 'supsimcse', 'unsupsimcse',
'miswordpolyenc', 'sbalisentbienc'],
help='The kind of rep to use for nearest neighbours.')
dataset_rank_pool.add_argument('--model_path',
help='Path to directory with trained model to use for getting scoring function.')
dataset_rank_pool.add_argument('--dataset', required=True,
choices=['csfcube', 'relish', 'treccovid',
'scidcite', 'scidcocite', 'scidcoread', 'scidcoview'],
help='The dataset to predict for.')
dataset_rank_pool.add_argument('--facet',
choices=['background', 'method', 'result'],
help='Facet of abstract to read from.')
dataset_rank_pool.add_argument('--log_fname',
help='File name for the log file to which logs get written.')
dataset_rank_pool.add_argument('--caching_scorer', action="store_true", default=False)
cl_args = parser.parse_args()
# If a log file was passed then write to it.
try:
logging.basicConfig(level='INFO', format='%(message)s',
filename=cl_args.log_fname)
# Print the called script and its args to the log.
logging.info(' '.join(sys.argv))
# Else just write to stdout.
except AttributeError:
logging.basicConfig(level='INFO', format='%(message)s',
stream=sys.stdout)
# Print the called script and its args to the log.
logging.info(' '.join(sys.argv))
if cl_args.subcommand == 'rank_pool':
if cl_args.rep_type in {'sbtinybertsota', 'sbrobertanli', 'cosentbert', 'ictsentbert',
'miswordbienc', 'supsimcse', 'unsupsimcse',
'miswordpolyenc', 'sbalisentbienc', 'sbmpnet1B'}:
data_to_read = 'sent'
else:
data_to_read = 'abstract'
if cl_args.dataset in {'csfcube'}:
if cl_args.rep_type in {'sbtinybertsota', 'sbrobertanli', 'sbmpnet1B', 'cosentbert', 'ictsentbert',
'miswordbienc', 'supsimcse', 'unsupsimcse', 'sbalisentbienc'} \
and not cl_args.caching_scorer:
rank_pool_sentfaceted(root_path=cl_args.root_path, sent_rep_type=cl_args.rep_type,
data_to_read=data_to_read, dataset=cl_args.dataset, facet=cl_args.facet,
run_name=cl_args.run_name)
elif cl_args.rep_type in {'miswordpolyenc'}:
scoringmodel_rank_pool_sentfaceted(root_path=cl_args.root_path, sent_rep_type=cl_args.rep_type,
data_to_read=data_to_read, dataset=cl_args.dataset,
facet=cl_args.facet,
run_name=cl_args.run_name, trained_model_path=cl_args.model_path)
elif cl_args.rep_type in {'cospecter', 'sbalisentbienc', 'miswordbienc'} \
and cl_args.caching_scorer:
caching_scoringmodel_rank_pool_sentfaceted(
root_path=cl_args.root_path, sent_rep_type=cl_args.rep_type, dataset=cl_args.dataset,
facet=cl_args.facet, run_name=cl_args.run_name, trained_model_path=cl_args.model_path)
else:
rank_pool_faceted(root_path=cl_args.root_path, sent_rep_type=cl_args.rep_type,
data_to_read=data_to_read, dataset=cl_args.dataset, facet=cl_args.facet,
run_name=cl_args.run_name)
elif cl_args.dataset in {'relish', 'treccovid', 'scidcite', 'scidcocite', 'scidcoread', 'scidcoview'}:
if cl_args.rep_type in {'sbtinybertsota', 'sbrobertanli', 'sbmpnet1B', 'cosentbert',
'ictsentbert', 'miswordbienc',
'supsimcse', 'unsupsimcse', 'sbalisentbienc'} and \
not cl_args.caching_scorer and \
cl_args.dataset in {'relish', 'scidcite', 'scidcocite', 'scidcoread', 'scidcoview'}:
rank_pool_sent(root_path=cl_args.root_path, sent_rep_type=cl_args.rep_type,
data_to_read=data_to_read, dataset=cl_args.dataset,
run_name=cl_args.run_name)
elif cl_args.rep_type in {'sbtinybertsota', 'sbrobertanli', 'sbmpnet1B', 'cosentbert',
'ictsentbert', 'miswordbienc', 'supsimcse', 'unsupsimcse',
'sbalisentbienc'} and \
not cl_args.caching_scorer and cl_args.dataset == 'treccovid':
rank_pool_sent_treccovid(root_path=cl_args.root_path, sent_rep_type=cl_args.rep_type,
data_to_read=data_to_read, dataset=cl_args.dataset,
run_name=cl_args.run_name)
elif cl_args.rep_type in {'miswordpolyenc'}:
scoringmodel_rank_pool_sent(root_path=cl_args.root_path, sent_rep_type=cl_args.rep_type,
data_to_read=data_to_read, dataset=cl_args.dataset,
run_name=cl_args.run_name, trained_model_path=cl_args.model_path)
elif cl_args.rep_type in {'cospecter', 'sbalisentbienc', 'miswordbienc'} \
and cl_args.caching_scorer:
caching_scoringmodel_rank_pool_sent(
root_path=cl_args.root_path, sent_rep_type=cl_args.rep_type, dataset=cl_args.dataset,
run_name=cl_args.run_name, trained_model_path=cl_args.model_path)
else:
rank_pool(root_path=cl_args.root_path, sent_rep_type=cl_args.rep_type,
data_to_read=data_to_read, dataset=cl_args.dataset, run_name=cl_args.run_name)
if __name__ == '__main__':
main()
| aspire-main | src/pre_process/pp_gen_nearest.py |
"""
Process the RELISH dataset.
"""
import os
import codecs
import json
import collections
import csv
import pandas as pd
import spacy
scispacy_model = spacy.load("en_core_sci_sm")
scispacy_model.add_pipe('sentencizer')
def scidocs2myjson(in_path, out_path, dataset_name):
"""
- Write out jsonl file of abstracts and title.
- Write out the annotations in a json.
- Write out a csv file of the queries metadata.
- Write out json file of the evaluation splits.
:param in_path: directory with json file of title and abstracts for papers
and subdir with annotations.
:param in_path: directory with annotation and split text files.
:param dataset_name: {'cite', 'cocite', 'coread', 'coview'}
:return:
"""
print(f'Dataset: {dataset_name}')
# Read json file of paper data.
with codecs.open(os.path.join(in_path, 'paper_metadata_view_cite_read.json'), 'r', 'utf-8') as fp:
pid2paper_data = json.load(fp)
# Read splits and relevance labels.
qpids2pool = collections.defaultdict(list)
dev_qpids, test_qpids = set(), set()
allpid2data = {}
invalid_queries = set()
missing_cands = set()
for split in ['val', 'test']:
with codecs.open(os.path.join(in_path, dataset_name, f'{split}.qrel'), 'r', 'utf-8') as val_file:
for line in val_file:
items = line.strip().split()
qpid, _, cand_pid, relevance = str(items[0]), items[1], str(items[2]), int(items[3])
try:
assert(pid2paper_data[qpid]['abstract'] != None)
assert(pid2paper_data[qpid]['title'] != None)
except (AssertionError, KeyError):
invalid_queries.add(qpid)
continue
try:
assert(pid2paper_data[cand_pid]['abstract'] != None)
assert(pid2paper_data[cand_pid]['title'] != None)
except (AssertionError, KeyError):
missing_cands.add(cand_pid)
continue
allpid2data[cand_pid] = pid2paper_data[cand_pid]
qpids2pool[qpid].append((cand_pid, relevance))
allpid2data[qpid] = pid2paper_data[qpid]
if split == 'val':
dev_qpids.add(qpid)
else:
test_qpids.add(qpid)
print(f'Invalid queries: {len(invalid_queries)}')
print(f'Missing candidates: {len(missing_cands)}')
assert(len(set.intersection(dev_qpids, test_qpids)) == 0)
print(f'Dev queries: {len(dev_qpids)}')
print(f'Test queries: {len(test_qpids)}')
print(f'All papers: {len(allpid2data)}')
# Write out split files:
eval_splits = {'dev': list(dev_qpids), 'test': list(test_qpids)}
with codecs.open(os.path.join(out_path, f'scid{dataset_name}-evaluation_splits.json'), 'w', 'utf-8') as fp:
json.dump(eval_splits, fp)
print('Wrote: {:s}'.format(fp.name))
# Write abstracts in jsonl file.
out_file = codecs.open(os.path.join(out_path, f'abstracts-scid{dataset_name}.jsonl'), 'w', 'utf-8')
pid2abstract = {}
papers_without_abstracts = 0
for pid, pdata in allpid2data.items():
metadata = {'year': pdata['year']}
try:
abs_sentences = scispacy_model(pdata['abstract'],
disable=['tok2vec', 'tagger', 'attribute_ruler',
'lemmatizer', 'parser', 'ner'])
except TypeError:
papers_without_abstracts += 1
continue
abs_sentences = [sent.text for sent in abs_sentences.sents]
title = pdata['title']
assert(title and len(abs_sentences) > 0)
doc_dict = {'title': title, 'abstract': abs_sentences, 'paper_id': pid, 'metadata': metadata}
pid2abstract[pid] = doc_dict
out_file.write(json.dumps(doc_dict)+'\n')
print(f'Invalid documents: {papers_without_abstracts}')
print(f'Docs with data: {len(pid2abstract)}')
print(f'Wrote: {out_file.name}')
out_file.close()
# Build qpids2anns and write and queries metadata.
query_meta_file = codecs.open(os.path.join(out_path, f'scid{dataset_name}-queries-release.csv'), 'w', 'utf-8')
query_meta_csv = csv.DictWriter(query_meta_file, extrasaction='ignore',
fieldnames=['paper_id', 'title'])
query_meta_csv.writeheader()
num_cands_perq = []
qpmid2cands = {}
for qpid, rel_pool in qpids2pool.items():
cands = [i[0] for i in rel_pool]
relevances = [i[1] for i in rel_pool]
if cands:
qpmid2cands[qpid] = {'cands': cands, 'relevance_adju': relevances}
query_meta_csv.writerow({'title': pid2abstract[qpid]['title'], 'paper_id': qpid})
# Check that there arent papers with multiple ratings.
assert(len(set(cands)) == len(cands))
num_cands_perq.append(len(cands))
cand_summ = pd.DataFrame(num_cands_perq).describe()
print('Candidates per query: {:}'.format(cand_summ))
with codecs.open(os.path.join(out_path, f'test-pid2anns-scid{dataset_name}.json'), 'w') as fp:
json.dump(qpmid2cands, fp)
print('Wrote: {:s}'.format(fp.name))
print('Wrote: {:}\n'.format(query_meta_file.name))
query_meta_file.close()
if __name__ == '__main__':
scidocs2myjson(in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/scidocs/data',
out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/my_scidocs',
dataset_name='cite')
scidocs2myjson(in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/scidocs/data',
out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/my_scidocs',
dataset_name='cocite')
scidocs2myjson(in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/scidocs/data',
out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/my_scidocs',
dataset_name='coread')
scidocs2myjson(in_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/scidocs/data',
out_path='/iesl/canvas/smysore/2021-ai2-scisim/datasets_raw/my_scidocs',
dataset_name='coview')
| aspire-main | src/pre_process/pre_proc_scidocs.py |
from PURE.shared.const import task_ner_labels, get_labelmap
from PURE.entity.models import EntityModel
import codecs
import json
import os
from scipy.special import softmax
from collections import namedtuple
from tqdm import tqdm
import argparse
from typing import List
### constants ###
TASK_NAME = 'scierc'
NUM_LABELS = len(task_ner_labels[TASK_NAME]) + 1
LABEL2ID, ID2LABEL = get_labelmap(task_ner_labels[TASK_NAME])
MAX_SPAN_LENGTH = 8
def load_entity_model(entity_model_dir: str):
"""
:param entity_model_dir: path to dir where PURE's entity berty mode was downloaded.
e.g. /aspire/PURE/scierc_models/ent-scib-ctx0
:return: loaded entity model
"""
Args = namedtuple("Args", "model bert_model_dir use_albert max_span_length")
args = Args(model="allenai/scibert_scivocab_uncased",
bert_model_dir=entity_model_dir,
use_albert=False,
max_span_length=MAX_SPAN_LENGTH)
return EntityModel(args, num_ner_labels=NUM_LABELS)
def load_dataset(fname: str):
"""
:param fname: filename for csfcube raw data
:return: dict of {pid: data}
"""
dataset = dict()
with codecs.open(fname, 'r', 'utf-8') as f:
for jsonline in f:
data = json.loads(jsonline.strip())
pid = data['paper_id']
ret_dict = {
'TITLE': data['title'],
'ABSTRACT': data['abstract'],
}
dataset[pid] = ret_dict
return dataset
def prepare_sentence(text: str, max_span_length: int):
"""
Pre process input data for entity model
:param text: A single sentence
:param max_span_length: Maximum number of words we expect in a span
:return: input data for entity model for this sentence
"""
sample = {
'tokens': text,
'sent_length': len(text),
'sent_start': 0,
'send_end': len(text),
'sent_start_in_doc': 0,
}
spans = list()
for i in range(len(text)):
for j in range(i, min(len(text), i + max_span_length)):
spans.append((i, j, j - i + 1))
span_labels = [0 for _ in range(len(spans))]
sample['spans'] = spans
sample['spans_label'] = span_labels
return sample
def predict_batch(model: EntityModel, batch):
"""
Runs an input batch through the entity model
:param model: entity model
:param batch: input batch
:return: output
"""
output_dict = model.run_batch(batch, training=False)
batch_pred = output_dict['pred_ner']
batch_probs = output_dict['ner_probs']
batch_ans = []
for i, sample in enumerate(batch):
non_zero_spans = list()
for span, label, probs in zip(sample['spans'], batch_pred[i], batch_probs[i]):
if label != 0:
max_prob = softmax(probs).max(axis=-1)
non_zero_spans.append((span, max_prob, label))
batch_ans.append(non_zero_spans)
return batch_ans
# https://stackoverflow.com/questions/66232938/how-to-untokenize-bert-tokens
def untokenize(tokens):
pretok_sent = ""
for tok in tokens:
if tok.startswith("##"):
pretok_sent += tok[2:]
else:
pretok_sent += " " + tok
pretok_sent = pretok_sent[1:]
return pretok_sent
def extract_ner_spans(sentences: List[str], model: EntityModel) -> List[List[str]]:
"""
Extracts NER entities from sentences using the entity model provided.
For each sentence, returns a list of all entities extracted from it,
given as plain string.
Entities may appear different in the sentence and as an entity,
because they are tokenized then untokenized.
:param model: PURE entity model
:param sentences: List[str]]
:return: List of entities for each sentence
"""
# tokenize and preprocess sentences
tokens = [model.tokenizer.tokenize(s) for s in sentences]
inputs = [prepare_sentence(text = t, max_span_length=MAX_SPAN_LENGTH) for t in tokens]
# run through entity model
predictions = predict_batch(model, inputs)
# collect to output shape
entities = []
for i, ners in enumerate(predictions):
sentence_entities = list()
for ner in ners:
untokenized_entity = untokenize(tokens[i][ner[0][0]:ner[0][1] + 1])
sentence_entities.append(untokenized_entity)
entities.append(sentence_entities)
return entities
def main(dataset_dir,
dataset_name,
entity_model_dir):
"""
:param dataset_dir: Data path where CSFCube is located
:param bert_model_dir: Path to Entity model's bert model
:return:
"""
# load entity model and dataset
print("Loading model and dataset")
model = load_entity_model(entity_model_dir)
dataset = load_dataset(os.path.join(dataset_dir, f'abstracts-{dataset_name}.jsonl'))
# find entities for each paper
print("Extracting entities from abstracts")
entities = dict()
for (doc_id, doc) in tqdm(list(dataset.items())[:10]):
doc_entities = extract_ner_spans(doc['ABSTRACT'], model)
entities[doc_id] = doc_entities
# save results
output_filename = os.path.join(dataset_dir, f'{dataset_name}-ner2.jsonl')
print(f"Writing output to: {output_filename}")
with codecs.open(output_filename, 'w', 'utf-8') as fp:
json.dump(entities, fp)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', required=True, help='Name of dataset to extract entities on')
parser.add_argument('--dataset_dir', required=True, help='Dataset dir. abstracts-{dataset_name}.jsonl should be inside')
parser.add_argument('--entity_model_dir', required=True, help="Path where PURE Entity model was downloaded to")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(dataset_dir=args.dataset_dir,
dataset_name=args.dataset_name,
entity_model_dir=args.entity_model_dir) | aspire-main | src/pre_process/extract_entities.py |
"""
Read in abstracts and co-citation sentences and print similarities of
abstract sentences to co-citation sentences.
This is a quick script to examine training data placed in pre-process
so the imports work.
"""
import os
import codecs, json
import numpy as np
import scipy
from scipy import special, spatial
import torch
from sklearn import metrics as skmetrics
from sentence_transformers import SentenceTransformer, models
import ot
from . pre_proc_buildreps import TrainedModel
def print_cocite_contextualsentsim(trained_model_path, examples_path):
"""
Go over the co-cited abstracts and print out their pairwise similarity
with contextual sentence representations in a bid to understand how
well multi-instance alignment would work.
"""
model = TrainedModel(model_name='conswordbienc', trained_model_path=trained_model_path, model_version='init')
in_triples = codecs.open(os.path.join(examples_path, 'dev-cocitabs.jsonl'), 'r', 'utf-8')
out_file = codecs.open(os.path.join(examples_path, 'dev-abs_cc-conswordbienc-sims.txt'), 'w', 'utf-8')
out_file.write(f"Models:\nAbs model: {trained_model_path}\n")
written_count = 0
for jsonl in in_triples:
# Encode sentences for triple.
ex_dict = json.loads(jsonl.strip())
qabs = ex_dict['query']['ABSTRACT']
pos_abs = ex_dict['pos_context']['ABSTRACT']
_, sent_reps = model.predict([ex_dict['query'], ex_dict['pos_context']])
qabs_reps = sent_reps[0]
posabs_reps = sent_reps[1]
q2pos_abs_sims = np.matmul(qabs_reps, posabs_reps.T)
q2pos_softmax = special.softmax(q2pos_abs_sims.flatten()/np.sqrt(768))
q2pos_softmax = q2pos_softmax.reshape(q2pos_abs_sims.shape)
q2pos_abs_sims = np.array2string(q2pos_softmax, precision=2)
# Print abstracts and similarities.
qabs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(qabs)])
out_file.write(f'Query abstract:\n{ex_dict["query"]["TITLE"]}\n{qabs_str}\n')
out_file.write(q2pos_abs_sims+'\n')
pabs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(pos_abs)])
out_file.write(f'Positive abstract:\n{ex_dict["pos_context"]["TITLE"]}\n{pabs_str}\n')
out_file.write('==================================\n')
written_count += 1
if written_count > 1000:
break
print(f'Wrote: {out_file.name}')
out_file.close()
def print_cocite_contextualsentsim_contextsent(trained_abs_model_path, trained_sentmodel_path, examples_path):
"""
Go over the co-cited abstracts and print out their pairwise similarity
with contextual sentence representations to the sentence context in which
they occur to understand if the context sentences provide reasonable
supervision.
"""
# Init the sentence model.
word_embedding_model = models.Transformer('allenai/scibert_scivocab_uncased',
max_seq_length=512)
# Loading local model: https://github.com/huggingface/transformers/issues/2422#issuecomment-571496558
trained_model_fname = os.path.join(trained_sentmodel_path, 'sent_encoder_cur_best.pt')
word_embedding_model.auto_model.load_state_dict(torch.load(trained_model_fname))
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
sentbert_model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Init the abstract model.
model = TrainedModel(model_name='conswordbienc', trained_model_path=trained_abs_model_path, model_version='init')
in_triples = codecs.open(os.path.join(examples_path, 'dev-cocitabs.jsonl'), 'r', 'utf-8')
out_file = codecs.open(os.path.join(examples_path, 'dev-abs2context-conswordbienc-sims.txt'), 'w', 'utf-8')
out_file.write(f"Models:\nAbs model: {trained_abs_model_path}\nSent model: {trained_abs_model_path}\n")
written_count = 0
for jsonl in in_triples:
# Encode sentences for triple.
ex_dict = json.loads(jsonl.strip())
qabs = ex_dict['query']['ABSTRACT']
pos_abs = ex_dict['pos_context']['ABSTRACT']
_, sent_reps = model.predict([ex_dict['query'], ex_dict['pos_context']])
qabs_reps = sent_reps[0]
posabs_reps = sent_reps[1]
citing_contexts = ex_dict['citing_contexts']
_, sent_reps = model.predict([{'TITLE': '', 'ABSTRACT': citing_contexts}])
cc_reps = sent_reps[0]
# Get pairwise sims.
cc2query_abs_sims = np.matmul(qabs_reps, cc_reps.T)
cc2query_idxs = np.unravel_index(cc2query_abs_sims.argmax(), cc2query_abs_sims.shape)
cc2query_abs_sims = np.array2string(cc2query_abs_sims, precision=2)
cc2pos_abs_sims = np.matmul(posabs_reps, cc_reps.T)
cc2pos_idxs = np.unravel_index(cc2pos_abs_sims.argmax(), cc2pos_abs_sims.shape)
cc2pos_abs_sims = np.array2string(cc2pos_abs_sims, precision=2)
q2pos_abs_sims = np.matmul(qabs_reps, posabs_reps.T)
q2pos_idxs = np.unravel_index(q2pos_abs_sims.argmax(), q2pos_abs_sims.shape)
# Print abstracts and similarities.
qabs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(qabs)])
out_file.write(f'Query abstract:\n{ex_dict["query"]["TITLE"]}\n{qabs_str}\n')
out_file.write(cc2query_abs_sims+'\n')
contextalign_diff = True if (cc2query_idxs[0], cc2pos_idxs[0]) != (q2pos_idxs[0], q2pos_idxs[1]) else False
out_file.write(f'cc2q: {cc2query_idxs}; cc2p: {cc2pos_idxs}; q2p: {q2pos_idxs}\n')
out_file.write(f'contextalign_diff: {contextalign_diff}\n')
out_file.write('Citing contexts:\n{:}\n'.format('\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(citing_contexts)])))
out_file.write(cc2pos_abs_sims+'\n')
pabs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(pos_abs)])
out_file.write(f'Positive abstract:\n{ex_dict["pos_context"]["TITLE"]}\n{pabs_str}\n')
out_file.write('==================================\n')
written_count += 1
if written_count > 1000:
break
print(f'Wrote: {out_file.name}')
out_file.close()
def print_context_abs_sims(trained_model_path, examples_path):
"""
- Read a triple example,
- Encode the sentences of the abstract.
- Encode the citation contexts.
- Compute pairwise dot products between citation context and the encoded sentences.
- Do the abstract with a abstract encoder and the sentence with CoSentBert.
"""
# Init the model.
word_embedding_model = models.Transformer('allenai/scibert_scivocab_uncased',
max_seq_length=512)
# Loading local model: https://github.com/huggingface/transformers/issues/2422#issuecomment-571496558
trained_model_fname = os.path.join(trained_model_path, 'sent_encoder_cur_best.pt')
word_embedding_model.auto_model.load_state_dict(torch.load(trained_model_fname))
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
sentbert_model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
in_triples = codecs.open(os.path.join(examples_path, 'dev-cocitabs.jsonl'), 'r', 'utf-8')
out_file = codecs.open(os.path.join(examples_path, 'dev-abs_cc-sims.txt'), 'w', 'utf-8')
out_file.write(f'Sentence model: {trained_model_path}\n')
written_count = 0
for jsonl in in_triples:
# Encode sentences for triple.
ex_dict = json.loads(jsonl.strip())
qabs = ex_dict['query']['ABSTRACT']
pos_abs = ex_dict['pos_context']['ABSTRACT']
citing_contexts = ex_dict['citing_contexts']
reps = sentbert_model.encode(qabs+pos_abs+citing_contexts)
qabs_reps = reps[:len(qabs)]
posabs_reps = reps[len(qabs): len(qabs)+len(pos_abs)]
cc_reps = reps[len(qabs)+len(pos_abs):]
# Get pairwise sims.
cc2query_abs_sims = np.matmul(qabs_reps, cc_reps.T)
cc2query_idxs = np.unravel_index(cc2query_abs_sims.argmax(), cc2query_abs_sims.shape)
cc2query_abs_sims = np.array2string(cc2query_abs_sims, precision=2)
cc2pos_abs_sims = np.matmul(posabs_reps, cc_reps.T)
cc2pos_idxs = np.unravel_index(cc2pos_abs_sims.argmax(), cc2pos_abs_sims.shape)
cc2pos_abs_sims = np.array2string(cc2pos_abs_sims, precision=2)
q2pos_abs_sims = np.matmul(qabs_reps, posabs_reps.T)
q2pos_idxs = np.unravel_index(q2pos_abs_sims.argmax(), q2pos_abs_sims.shape)
# Print abstracts and similarities.
# Print abstracts and similarities.
qabs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(qabs)])
out_file.write(f'Query abstract:\n{ex_dict["query"]["TITLE"]}\n{qabs_str}\n')
out_file.write(cc2query_abs_sims+'\n')
contextalign_diff = True if (cc2query_idxs[0], cc2pos_idxs[0]) != (q2pos_idxs[0], q2pos_idxs[1]) else False
out_file.write(f'cc2q: {cc2query_idxs}; cc2p: {cc2pos_idxs}; q2p: {q2pos_idxs}\n')
out_file.write(f'contextalign_diff: {contextalign_diff}\n')
out_file.write('Citing contexts:\n{:}\n'.format('\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(citing_contexts)])))
out_file.write(cc2pos_abs_sims+'\n')
pabs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(pos_abs)])
out_file.write(f'Positive abstract:\n{ex_dict["pos_context"]["TITLE"]}\n{pabs_str}\n')
out_file.write('==================================\n')
written_count += 1
if written_count > 2000:
break
print(f'Wrote: {out_file.name}')
out_file.close()
def print_context_abs_sims_ot(trained_model_path, examples_path):
"""
- Read a triple example,
- Encode the sentences of the abstract.
- Encode the citation contexts.
- Compute pairwise dot products between citation context and the encoded sentences.
- Do the abstract with a abstract encoder and the sentence with CoSentBert.
"""
# Init the model.
word_embedding_model = models.Transformer('allenai/scibert_scivocab_uncased',
max_seq_length=512)
# Loading local model: https://github.com/huggingface/transformers/issues/2422#issuecomment-571496558
trained_model_fname = os.path.join(trained_model_path, 'sent_encoder_cur_best.pt')
word_embedding_model.auto_model.load_state_dict(torch.load(trained_model_fname))
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
sentbert_model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
in_triples = codecs.open(os.path.join(examples_path, 'dev-cocitabs.jsonl'), 'r', 'utf-8')
out_file = codecs.open(os.path.join(examples_path, 'dev-abs2abs-otaligns-sims.txt'), 'w', 'utf-8')
written_count = 0
for jsonl in in_triples:
# Encode sentences for triple.
ex_dict = json.loads(jsonl.strip())
qabs = ex_dict['query']['ABSTRACT']
pos_abs = ex_dict['pos_context']['ABSTRACT']
citing_contexts = ex_dict['citing_contexts']
reps = sentbert_model.encode(qabs+pos_abs+citing_contexts)
qabs_reps = reps[:len(qabs)]
posabs_reps = reps[len(qabs): len(qabs)+len(pos_abs)]
cc_reps = reps[len(qabs)+len(pos_abs):]
# Get pairwise sims.
cc2query_abs_sims = -1*spatial.distance.cdist(qabs_reps, cc_reps)
cc2query_idxs = np.unravel_index(cc2query_abs_sims.argmax(), cc2query_abs_sims.shape)
cc2query_abs_sims = np.array2string(cc2query_abs_sims, precision=2)
cc2pos_abs_sims = -1*spatial.distance.cdist(posabs_reps, cc_reps)
cc2pos_idxs = np.unravel_index(cc2pos_abs_sims.argmax(), cc2pos_abs_sims.shape)
cc2pos_abs_sims = np.array2string(cc2pos_abs_sims, precision=2)
q2pos_abs_sims = -1*spatial.distance.cdist(qabs_reps, posabs_reps)
q2pos_idxs = np.unravel_index(q2pos_abs_sims.argmax(), q2pos_abs_sims.shape)
# quniform = np.array([1.0/len(qabs) for i in range(len(qabs))])
# cuniform = np.array([1.0/len(pos_abs) for i in range(len(pos_abs))])
# Consider the sentences importance weighted by their best alignment.
query_distr = scipy.special.softmax(np.max(q2pos_abs_sims, axis=1))
cand_distr = scipy.special.softmax(np.max(q2pos_abs_sims, axis=0))
transport_plan_reg = ot.bregman.sinkhorn_epsilon_scaling(query_distr, cand_distr, -1*q2pos_abs_sims, 0.01)
transport_plan_reg = np.array2string(np.around(transport_plan_reg, 4), precision=3)
# Print abstracts and similarities.
qabs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(qabs)])
out_file.write(f'Query abstract:\n{ex_dict["query"]["TITLE"]}\n{qabs_str}\n')
# out_file.write(cc2query_abs_sims+'\n')
# contextalign_diff = True if (cc2query_idxs[0], cc2pos_idxs[0]) != (q2pos_idxs[0], q2pos_idxs[1]) else False
# out_file.write(f'cc2q: {cc2query_idxs}; cc2p: {cc2pos_idxs}; q2p: {q2pos_idxs}\n')
# out_file.write(f'contextalign_diff: {contextalign_diff}\n')
# out_file.write('Citing contexts:\n{:}\n'.format('\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(citing_contexts)])))
# out_file.write(cc2pos_abs_sims+'\n')
out_file.write(f'Q_distr:\n{np.array2string(query_distr, precision=3)}\n')
out_file.write(f'C_distr:\n{np.array2string(cand_distr, precision=3)}\n')
out_file.write(f'Distances:\n{np.array2string(q2pos_abs_sims, precision=3)}\n')
out_file.write(f'Transport plan:\n{transport_plan_reg}\n')
pabs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(pos_abs)])
out_file.write(f'Positive abstract:\n{ex_dict["pos_context"]["TITLE"]}\n{pabs_str}\n')
out_file.write('==================================\n')
written_count += 1
if written_count > 2000:
break
print(f'Wrote: {out_file.name}')
out_file.close()
def print_abs2abs_contextual_sims(embeddings_path, abstracts_path):
"""
Read embeddings of the contextualized sentences for csfcube and print out
pairwise sentence similarities for the same abstract.
"""
with codecs.open(os.path.join(embeddings_path, 'pid2idx-csfcube-sent.json'), 'r', 'utf-8') as fp:
pid2idx = json.load(fp)
embeddings = np.load(os.path.join(embeddings_path, 'csfcube-sent.npy'))
abs_file = codecs.open(os.path.join(abstracts_path, 'abstracts-csfcube-preds.jsonl'), 'r', 'utf-8')
out_file = codecs.open(os.path.join(abstracts_path, 'abstracts-csfcube-preds-selfsims-ctxt.txt'), 'w', 'utf-8')
out_file.write(f'Embeddings with: {embeddings_path}\n')
for abs_line in abs_file:
abs_dict = json.loads(abs_line.strip())
pid = abs_dict['paper_id']
sent_idx = [pid2idx[f'{pid}-{i}'] for i in range(len(abs_dict['abstract']))]
sent_reps = embeddings[sent_idx]
abs_self_sims = skmetrics.pairwise.cosine_similarity(sent_reps, sent_reps)
abs_self_sims = np.array2string(abs_self_sims, precision=2)
abs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(abs_dict['abstract'])])
out_file.write(f'Query abstract:\n{abs_dict["title"]}\n{abs_str}\n')
out_file.write(abs_self_sims+'\n\n')
print(f'Wrote: {out_file.name}')
out_file.close()
def print_abs2abs_nocontext_sims(trained_model_path, abstracts_path):
"""
Read embeddings of the contextualized sentences for csfcube and print out
pairwise sentence similarities for the same abstract.
"""
# Init the model.
word_embedding_model = models.Transformer('allenai/scibert_scivocab_uncased',
max_seq_length=512)
# Loading local model: https://github.com/huggingface/transformers/issues/2422#issuecomment-571496558
trained_model_fname = os.path.join(trained_model_path, 'sent_encoder_cur_best.pt')
word_embedding_model.auto_model.load_state_dict(torch.load(trained_model_fname))
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
sentbert_model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
abs_file = codecs.open(os.path.join(abstracts_path, 'abstracts-csfcube-preds.jsonl'), 'r', 'utf-8')
out_file = codecs.open(os.path.join(abstracts_path, 'abstracts-csfcube-preds-selfsims-noctxt.txt'), 'w', 'utf-8')
out_file.write(f'Embeddings with: {trained_model_path}\n')
for abs_line in abs_file:
abs_dict = json.loads(abs_line.strip())
sent_reps = sentbert_model.encode(abs_dict['abstract'])
abs_self_sims = skmetrics.pairwise.cosine_similarity(sent_reps, sent_reps)
abs_self_sims = np.array2string(abs_self_sims, precision=2)
abs_str = '\n'.join(['{:d}: {:s}'.format(i, s) for i, s in enumerate(abs_dict['abstract'])])
out_file.write(f'Query abstract:\n{abs_dict["title"]}\n{abs_str}\n')
out_file.write(abs_self_sims+'\n\n')
print(f'Wrote: {out_file.name}')
out_file.close()
if __name__ == '__main__':
print_context_abs_sims_ot(trained_model_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/model_runs/'
's2orccompsci/cosentbert/cosentbert-2021_07_11-22_58_06-fixeddev',
examples_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/datasets_proc/'
's2orccompsci/cospecter/')
# print_abs2abs_contextual_sims(embeddings_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/datasets_raw/'
# 's2orccompsci/conswordbienc/conswordbienc-2021_07_23-17_46_54-specter_init',
# abstracts_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/datasets_raw/'
# 's2orccompsci')
# print_abs2abs_nocontext_sims(trained_model_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/model_runs/'
# 's2orccompsci/cosentbert/cosentbert-2021_07_11-22_58_06-fixeddev',
# abstracts_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/datasets_raw/'
# 's2orccompsci')
# print_cocite_contextualsentsim(trained_model_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/model_runs/'
# 's2orccompsci/conswordbienc/'
# 'conswordbienc-2021_07_23-17_46_54-specter_init',
# examples_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/datasets_proc/'
# 's2orccompsci/cospecter/')
# print_cocite_contextualsentsim_contextsent(
# trained_abs_model_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/model_runs/'
# 's2orccompsci/conswordbienc/'
# 'conswordbienc-2021_07_23-17_46_54-specter_init',
# trained_sentmodel_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/model_runs/'
# 's2orccompsci/cosentbert/cosentbert-2021_07_11-22_58_06-fixeddev',
# examples_path='/mnt/nfs/work1/mccallum/smysore/2021-ai2-scisim/datasets_proc/'
# 's2orccompsci/cospecter/')
| aspire-main | src/pre_process/print_cociteabs_sims.py |
"""
From: https://gist.github.com/bwhite/3726239#file-rank_metrics-py
"""
import numpy as np
def mean_reciprocal_rank(rs):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs])
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> r_precision(r)
0.33333333333333331
>>> r = [0, 1, 0]
>>> r_precision(r)
0.5
>>> r = [1, 0, 0]
>>> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> precision_at_k(r, 1)
0.0
>>> precision_at_k(r, 2)
0.0
>>> precision_at_k(r, 3)
0.33333333333333331
>>> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return np.mean(r)
def average_precision(r):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
>>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
>>> delta_r = 1. / sum(r)
>>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])
0.7833333333333333
>>> average_precision(r)
0.78333333333333333
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.
return np.mean(out)
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
>>> mean_average_precision(rs)
0.78333333333333333
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]
>>> mean_average_precision(rs)
0.39166666666666666
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=1):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> dcg_at_k(r, 1)
3.0
>>> dcg_at_k(r, 1, method=1)
3.0
>>> dcg_at_k(r, 2)
5.0
>>> dcg_at_k(r, 2, method=1)
4.2618595071429155
>>> dcg_at_k(r, 10)
9.6051177391888114
>>> dcg_at_k(r, 11)
9.6051177391888114
"An Introduction to Neural Information Retrieval" writes method 1 for DCG
so using that as default.
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> ndcg_at_k(r, 1)
1.0
>>> r = [2, 1, 2, 0]
>>> ndcg_at_k(r, 4)
0.9203032077642922
>>> ndcg_at_k(r, 4, method=1)
0.96519546960144276
>>> ndcg_at_k([0], 1)
0.0
>>> ndcg_at_k([1], 2)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
| aspire-main | src/evaluation/rank_metrics.py |
# For relative imports to work in Python 3.6
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__))) | aspire-main | src/evaluation/__init__.py |
"""
Evaluate the rankings generated by sentence similarity models.
"""
import sys
import os
import errno
import argparse
import statistics
import codecs
import json
import csv
import comet_ml as cml
from scipy import stats as scipystats
import rank_metrics as rm
facet2folds = {
"background": {"fold1_dev": ["3264891_background", "1936997_background", "11844559_background",
"52194540_background", "1791179_background", "6431039_background",
"6173686_background", "7898033_background"],
"fold2_dev": ["5764728_background", "10014168_background", "10695055_background",
"929877_background", "1587_background", "51977123_background",
"8781666_background", "189897839_background"],
"fold1_test": ["5764728_background", "10014168_background", "10695055_background",
"929877_background", "1587_background", "51977123_background",
"8781666_background", "189897839_background"],
"fold2_test": ["3264891_background", "1936997_background", "11844559_background",
"52194540_background", "1791179_background", "6431039_background",
"6173686_background", "7898033_background"]},
"method": {"fold1_dev": ["189897839_method", "1791179_method", "11310392_method", "2468783_method",
"13949438_method", "5270848_method", "52194540_method", "929877_method"],
"fold2_dev": ["5052952_method", "10010426_method", "102353905_method", "174799296_method",
"1198964_method", "53080736_method", "1936997_method", "80628431_method",
"53082542_method"],
"fold1_test": ["5052952_method", "10010426_method", "102353905_method", "174799296_method",
"1198964_method", "53080736_method", "1936997_method", "80628431_method",
"53082542_method"],
"fold2_test": ["189897839_method", "1791179_method", "11310392_method", "2468783_method",
"13949438_method", "5270848_method", "52194540_method", "929877_method"]},
"result": {"fold1_dev": ["2090262_result", "174799296_result", "11844559_result", "2468783_result",
"1306065_result", "5052952_result", "3264891_result", "8781666_result"],
"fold2_dev": ["2865563_result", "10052042_result", "11629674_result", "1587_result",
"1198964_result", "53080736_result", "2360770_result", "80628431_result",
"6431039_result"],
"fold1_test": ["2865563_result", "10052042_result", "11629674_result", "1587_result",
"1198964_result", "53080736_result", "2360770_result", "80628431_result",
"6431039_result"],
"fold2_test": ["2090262_result", "174799296_result", "11844559_result", "2468783_result",
"1306065_result", "5052952_result", "3264891_result", "8781666_result"]},
"all": {"fold1_dev": ["3264891_background", "1936997_background", "11844559_background",
"52194540_background", "1791179_background", "6431039_background",
"6173686_background", "7898033_background", "189897839_method",
"1791179_method", "11310392_method", "2468783_method", "13949438_method",
"5270848_method", "52194540_method", "929877_method", "2090262_result",
"174799296_result", "11844559_result", "2468783_result", "1306065_result",
"5052952_result", "3264891_result", "8781666_result"],
"fold2_dev": ["5764728_background", "10014168_background", "10695055_background",
"929877_background", "1587_background", "51977123_background",
"8781666_background", "189897839_background", "5052952_method", "10010426_method",
"102353905_method", "174799296_method", "1198964_method", "53080736_method",
"1936997_method", "80628431_method", "53082542_method", "2865563_result",
"10052042_result", "11629674_result", "1587_result", "1198964_result",
"53080736_result", "2360770_result", "80628431_result", "6431039_result"],
"fold1_test": ["5764728_background", "10014168_background", "10695055_background",
"929877_background", "1587_background", "51977123_background", "8781666_background",
"189897839_background", "5052952_method", "10010426_method", "102353905_method",
"174799296_method", "1198964_method", "53080736_method", "1936997_method",
"80628431_method", "53082542_method", "2865563_result", "10052042_result",
"11629674_result", "1587_result", "1198964_result", "53080736_result",
"2360770_result", "80628431_result", "6431039_result"],
"fold2_test": ["3264891_background", "1936997_background", "11844559_background",
"52194540_background", "1791179_background", "6431039_background",
"6173686_background", "7898033_background", "189897839_method", "1791179_method",
"11310392_method", "2468783_method", "13949438_method", "5270848_method",
"52194540_method", "929877_method", "2090262_result", "174799296_result",
"11844559_result", "2468783_result", "1306065_result", "5052952_result",
"3264891_result", "8781666_result"]
}
}
def create_dir(dir_name):
"""
Create the directory whose name is passed.
:param dir_name: String saying the name of directory to create.
:return: None.
"""
# Create output directory if it doesnt exist.
try:
os.makedirs(dir_name)
print('Created: {}.'.format(dir_name))
except OSError as ose:
# For the case of *file* by name of out_dir existing
if (not os.path.isdir(dir_name)) and (ose.errno == errno.EEXIST):
sys.stderr.write('IO ERROR: Could not create output directory\n')
sys.exit(1)
# If its something else you don't know; report it and exit.
if ose.errno != errno.EEXIST:
sys.stderr.write('OS ERROR: {:d}: {:s}: {:s}\n'.format(ose.errno,
ose.strerror,
dir_name))
sys.exit(1)
def recall_at_k(ranked_rel, atk, max_total_relevant):
"""
Compute recall at k.
:param ranked_rel: list(int); ranked list of relevance judged data.
:param atk: int; rank at which to compute metric.
:param max_total_relevant: int; maximum relevant to consider in
case there are more relevant in total.
:return: recall: float.
"""
total_relevant = sum(ranked_rel)
total_relevant = min(max_total_relevant, total_relevant)
relatk = sum(ranked_rel[:atk])
if total_relevant > 0:
recall_atk = float(relatk)/total_relevant
else:
recall_atk = 0.0
return recall_atk
def compute_metrics(ranked_judgements, pr_atks, threshold_grade):
"""
Given the ranked judgements compute precision, recall and f1.
:param ranked_judgements: list(int); graded or binary relevances in rank order.
:param pr_atks: list(int); the @K values to use for computing precision and recall.
:param threshold_grade: int; Assuming 0-3 graded relevances, threshold at some point
and convert graded to binary relevance. If passed also compute NDCG.
:return:
"""
metrics = {}
graded_judgements = ranked_judgements
ranked_judgements = [1 if rel >= threshold_grade else 0 for rel in graded_judgements]
# Use the full set of candidate not the pr_atk.
ndcg = rm.ndcg_at_k(graded_judgements, len(ranked_judgements))
ndcg_20 = rm.ndcg_at_k(graded_judgements, 20)
ndcg_50 = rm.ndcg_at_k(graded_judgements, 50)
for atk in [5, 10, 15, 20, 25]:
ndcg_pr_atk = rm.ndcg_at_k(graded_judgements, int((atk/100)*len(ranked_judgements)))
metrics[f'ndcg%{atk}'] = float(ndcg_pr_atk)
max_total_relevant = sum(ranked_judgements)
for atk in pr_atks:
recall = recall_at_k(ranked_rel=ranked_judgements,
atk=atk, max_total_relevant=max_total_relevant)
precision = rm.precision_at_k(r=ranked_judgements, k=atk)
f1 = 2*precision*recall/(precision + recall) if (precision + recall) > 0 else 0.0
metrics[f'precision@{atk}'] = float(precision)
metrics[f'recall@{atk}'] = float(recall)
metrics[f'f1@{atk}'] = float(f1)
r_precision = rm.r_precision(r=ranked_judgements)
av_precision = rm.average_precision(r=ranked_judgements)
reciprocal_rank = rm.mean_reciprocal_rank(rs=[ranked_judgements])
metrics['r_precision'] = float(r_precision)
metrics['av_precision'] = float(av_precision)
metrics['reciprocal_rank'] = float(reciprocal_rank)
metrics['ndcg'] = float(ndcg)
metrics['ndcg@20'] = float(ndcg_20)
metrics['ndcg@50'] = float(ndcg_50)
return metrics
def aggregate_metrics(query_metrics):
"""
Given metrics over individual queries aggregate over different
queries. Simple average for now.
:param query_metrics: dict(query_id: metrics_dict from compute_metrics)
:return:
"""
precision5, precision10, precision20, recall20, f120 = 0.0, 0.0, 0.0, 0.0, 0.0
av_precision, mrr, ndcg, r_precision = 0.0, 0.0, 0.0, 0.0
ndcg_20, ndcg_50 = 0.0, 0.0
ndcg_pr_5, ndcg_pr_10, ndcg_pr_15, ndcg_pr_20, ndcg_pr_25 = 0.0, 0.0, 0.0, 0.0, 0.0
for queri_id, metrics in query_metrics.items():
precision5 += metrics['precision@5']
precision10 += metrics['precision@10']
precision20 += metrics['precision@20']
recall20 += metrics['recall@20']
f120 += metrics['f1@20']
av_precision += metrics['av_precision']
mrr += metrics['reciprocal_rank']
r_precision += metrics['r_precision']
if 'ndcg' in metrics:
ndcg += metrics['ndcg']
ndcg_20 += metrics['ndcg@20']
ndcg_50 += metrics['ndcg@50']
ndcg_pr_5 += metrics['ndcg%5']
ndcg_pr_10 += metrics['ndcg%10']
ndcg_pr_15 += metrics['ndcg%15']
ndcg_pr_20 += metrics['ndcg%20']
ndcg_pr_25 += metrics['ndcg%25']
num_queries = len(query_metrics)
aggmetrics = {
'precision@5': precision5/num_queries,
'precision@10': precision10/num_queries,
'precision@20': precision20/num_queries,
'recall@20': recall20/num_queries,
'f1@20': f120/num_queries,
'r_precision': r_precision/num_queries,
'mean_av_precision': av_precision/num_queries,
'mean_reciprocal_rank': mrr/num_queries,
'ndcg': ndcg/num_queries,
'ndcg@20': ndcg_20/num_queries,
'ndcg@50': ndcg_50/num_queries,
'ndcg%5': ndcg_pr_5/num_queries,
'ndcg%10': ndcg_pr_10/num_queries,
'ndcg%15': ndcg_pr_15/num_queries,
'ndcg%20': ndcg_pr_20/num_queries,
'ndcg%25': ndcg_pr_25/num_queries
}
return aggmetrics
def aggregate_metrics_crossval(query_metrics, split_str, facet_str):
"""
Given metrics over individual queries aggregate over different
queries. Simple average for now.
:param query_metrics: dict(query_id: metrics_dict from compute_metrics)
:param split_str: string; {dev, test}
:param facet_str: string; {background, method, result}
:param query_metadata: dict(qpid_facet: dict(metadata))
:return:
"""
aggmetrics = {
'precision@5': [],
'precision@10': [],
'precision@20': [],
'recall@20': [],
'f1@20': [],
'r_precision': [],
'mean_av_precision': [],
'mean_reciprocal_rank': [],
'ndcg': [],
'ndcg@20': [],
'ndcg@50': [],
'ndcg%5': [],
'ndcg%10': [],
'ndcg%15': [],
'ndcg%20': [],
'ndcg%25': []
}
# For dev only use a part of the fold - using both makes it identical to test.
if split_str == 'dev':
folds = ['fold1_{:s}'.format(split_str)]
elif split_str == 'test':
folds = ['fold1_{:s}'.format(split_str), 'fold2_{:s}'.format(split_str)]
for fold_str in folds:
fold_pids = facet2folds[facet_str][fold_str]
precision5, precision10, precision20, recall20, f120 = 0.0, 0.0, 0.0, 0.0, 0.0
ndcg_20, ndcg_50 = 0.0, 0.0
ndcg_pr_5, ndcg_pr_10, ndcg_pr_15, ndcg_pr_20, ndcg_pr_25 = 0.0, 0.0, 0.0, 0.0, 0.0
av_precision, mrr, ndcg, r_precision = 0.0, 0.0, 0.0, 0.0
for query_id in fold_pids:
# Aggregate across paper types in the fold.
metrics = query_metrics[query_id]
# Aggregate across all papers in the fold
precision5 += metrics['precision@5']
precision10 += metrics['precision@10']
precision20 += metrics['precision@20']
recall20 += metrics['recall@20']
f120 += metrics['f1@20']
av_precision += metrics['av_precision']
mrr += metrics['reciprocal_rank']
r_precision += metrics['r_precision']
ndcg += metrics['ndcg']
ndcg_20 += metrics['ndcg@20']
ndcg_50 += metrics['ndcg@50']
ndcg_pr_5 += metrics['ndcg%5']
ndcg_pr_10 += metrics['ndcg%10']
ndcg_pr_15 += metrics['ndcg%15']
ndcg_pr_20 += metrics['ndcg%20']
ndcg_pr_25 += metrics['ndcg%25']
# Average all folds
num_queries = len(fold_pids)
precision5, precision10, precision20 = precision5/num_queries, precision10/num_queries, \
precision20/num_queries
recall20, f120 = recall20/num_queries, f120/num_queries
av_precision = av_precision/num_queries
mrr, ndcg, r_precision = mrr/num_queries, ndcg/num_queries, r_precision/num_queries
ndcg_20, ndcg_50 = ndcg_20/num_queries, ndcg_50/num_queries
ndcg_pr_5, ndcg_pr_10, ndcg_pr_15, ndcg_pr_20, ndcg_pr_25 = ndcg_pr_5/num_queries, ndcg_pr_10/num_queries,\
ndcg_pr_15/num_queries, ndcg_pr_20/num_queries, \
ndcg_pr_25/num_queries
# Save the averaged metric for every fold.
aggmetrics['precision@5'].append(precision5)
aggmetrics['precision@10'].append(precision10)
aggmetrics['precision@20'].append(precision20)
aggmetrics['recall@20'].append(recall20)
aggmetrics['f1@20'].append(f120)
aggmetrics['r_precision'].append(r_precision)
aggmetrics['mean_av_precision'].append(av_precision)
aggmetrics['mean_reciprocal_rank'].append(mrr)
aggmetrics['ndcg'].append(ndcg)
aggmetrics['ndcg@20'].append(ndcg_20)
aggmetrics['ndcg@50'].append(ndcg_50)
aggmetrics['ndcg%5'].append(ndcg_pr_5)
aggmetrics['ndcg%10'].append(ndcg_pr_10)
aggmetrics['ndcg%15'].append(ndcg_pr_15)
aggmetrics['ndcg%20'].append(ndcg_pr_20)
aggmetrics['ndcg%25'].append(ndcg_pr_25)
aggmetrics = {
'precision@5': statistics.mean(aggmetrics['precision@5']),
'precision@10': statistics.mean(aggmetrics['precision@10']),
'precision@20': statistics.mean(aggmetrics['precision@20']),
'recall@20': statistics.mean(aggmetrics['recall@20']),
'f1@20': statistics.mean(aggmetrics['f1@20']),
'r_precision': statistics.mean(aggmetrics['r_precision']),
'mean_av_precision': statistics.mean(aggmetrics['mean_av_precision']),
'mean_reciprocal_rank': statistics.mean(aggmetrics['mean_reciprocal_rank']),
'ndcg': statistics.mean(aggmetrics['ndcg']),
'ndcg@20': statistics.mean(aggmetrics['ndcg@20']),
'ndcg@50': statistics.mean(aggmetrics['ndcg@50']),
'ndcg%5': statistics.mean(aggmetrics['ndcg%5']),
'ndcg%10': statistics.mean(aggmetrics['ndcg%10']),
'ndcg%15': statistics.mean(aggmetrics['ndcg%15']),
'ndcg%20': statistics.mean(aggmetrics['ndcg%20']),
'ndcg%25': statistics.mean(aggmetrics['ndcg%25']),
}
return aggmetrics
def read_unf_relevances(data_path, run_path, dataset, method):
"""
Read the gold data and the model rankings and the relevances for the
model.
:param data_path: string; directory with gold citations for test pids and rankings
from baseline methods in subdirectories.
:param run_path: string; directory with ranked candidates for baselines a subdir of
data_path else is a model run.
:param method: string; method with which ranks were created.
:param dataset: string; eval dataset.
:return: qpid2rankedcand_relevances: dict('qpid_facet': [relevances]);
candidate gold relevances for the candidates in order ranked by the
model.
"""
gold_fname = os.path.join(data_path, 'test-pid2anns-{:s}.json'.format(dataset))
ranked_fname = os.path.join(run_path, 'test-pid2pool-{:s}-{:s}-ranked.json'.format(dataset, method))
# Load gold test data (citations).
with codecs.open(gold_fname, 'r', 'utf-8') as fp:
pid2pool_source = json.load(fp)
num_query = len(pid2pool_source)
print('Gold query pids: {:d}'.format(num_query))
pid2rels_gold = {}
for qpid, pool_rel in pid2pool_source.items():
pool = pool_rel['cands']
cands_rels = pool_rel['relevance_adju']
pid2rels_gold['{:s}'.format(qpid)] = \
dict([(pid, rel) for pid, rel in zip(pool, cands_rels)])
# Load ranked predictions on test data with methods.
with codecs.open(ranked_fname, 'r', 'utf-8') as fp:
pid2ranks = json.load(fp)
print('Valid ranked query pids: {:d}'.format(len(pid2ranks)))
qpid2rankedcand_relevances = {}
for qpid, citranks in pid2ranks.items():
candpids = [pid_score[0] for pid_score in citranks]
cand_relevances = [pid2rels_gold[qpid][pid] for pid in candpids]
qpid2rankedcand_relevances[qpid] = cand_relevances
return qpid2rankedcand_relevances
def read_facet_specific_relevances(data_path, run_path, dataset, facet, method):
"""
Read the gold data and the model rankings and the relevances for the
model.
:param data_path: string; directory with gold citations for test pids and rankings
from baseline methods in subdirectories.
:param run_path: string; directory with ranked candidates for baselines a subdir of
data_path else is a model run.
:param method: string; method with which ranks were created.
:param dataset: string; eval dataset.
:param facet: string; facet for eval.
:return: qpid2rankedcand_relevances: dict('qpid_facet': [relevances]);
candidate gold relevances for the candidates in order ranked by the
model.
"""
gold_fname = os.path.join(data_path, 'test-pid2anns-{:s}-{:s}.json'.format(dataset, facet))
ranked_fname = os.path.join(run_path, 'test-pid2pool-{:s}-{:s}-{:s}-ranked.json'.format(dataset, method, facet))
# Load gold test data (citations).
with codecs.open(gold_fname, 'r', 'utf-8') as fp:
pid2pool_source = json.load(fp)
num_query = len(pid2pool_source)
print('Gold query pids: {:d}'.format(num_query))
pid2rels_gold = {}
for qpid, pool_rel in pid2pool_source.items():
pool = pool_rel['cands']
cands_rels = pool_rel['relevance_adju']
pid2rels_gold['{:s}_{:s}'.format(qpid, facet)] = \
dict([(pid, rel) for pid, rel in zip(pool, cands_rels)])
# Load ranked predictions on test data with methods.
with codecs.open(ranked_fname, 'r', 'utf-8') as fp:
pid2ranks = json.load(fp)
print('Valid ranked query pids: {:d}'.format(len(pid2ranks)))
qpid2rankedcand_relevances = {}
for qpid, citranks in pid2ranks.items():
candpids = [pid_score[0] for pid_score in citranks]
cand_relevances = [pid2rels_gold['{:s}_{:s}'.format(qpid, facet)][pid] for pid in candpids]
qpid2rankedcand_relevances['{:s}_{:s}'.format(qpid, facet)] = cand_relevances
return qpid2rankedcand_relevances
def read_all_facet_relevances(data_path, run_path, dataset, method, facets):
"""
Read the gold data and the model rankings and the relevances for the
model.
:param data_path: string; directory with gold citations for test pids and rankings
from baseline methods in subdirectories.
:param run_path: string; directory with ranked candidates for baselines a subdir of
data_path else is a model run.
:param method: string; method with which ranks were created.
:param dataset: string; eval dataset.
:param facets: list(string); what facets to read/what counts as "all".
:return: qpid2rankedcand_relevances: dict('qpid_facet': [relevances]);
candidate gold relevances for the candidates in order ranked by the
model.
"""
qpid2rankedcand_relevances = {}
for facet in facets:
print('Reading facet: {:s}'.format(facet))
gold_fname = os.path.join(data_path, 'test-pid2anns-{:s}-{:s}.json'.format(dataset, facet))
ranked_fname = os.path.join(run_path, 'test-pid2pool-{:s}-{:s}-{:s}-ranked.json'.format(dataset, method, facet))
# Load gold test data (citations).
with codecs.open(gold_fname, 'r', 'utf-8') as fp:
pid2pool_source = json.load(fp)
num_query = len(pid2pool_source)
print('Gold query pids: {:d}'.format(num_query))
pid2rels_gold = {}
for qpid, pool_rel in pid2pool_source.items():
pool = pool_rel['cands']
cands_rels = pool_rel['relevance_adju']
pid2rels_gold['{:s}_{:s}'.format(qpid, facet)] = \
dict([(pid, rel) for pid, rel in zip(pool, cands_rels)])
# Load ranked predictions on test data with methods.
with codecs.open(ranked_fname, 'r', 'utf-8') as fp:
pid2ranks = json.load(fp)
print('Valid ranked query pids: {:d}'.format(len(pid2ranks)))
for qpid, citranks in pid2ranks.items():
candpids = [pid_score[0] for pid_score in citranks]
cand_relevances = [pid2rels_gold['{:s}_{:s}'.format(qpid, facet)][pid] for pid in candpids]
qpid2rankedcand_relevances['{:s}_{:s}'.format(qpid, facet)] = cand_relevances
print('Total queries: {:d}'.format(len(qpid2rankedcand_relevances)))
return qpid2rankedcand_relevances
def graded_eval_pool_rerank_faceted(data_path, run_path, method, dataset, facet, split_str, comet_exp_key):
"""
Evaluate the re-ranked pool for the faceted data. Anns use graded relevance scores.
:param data_path: string; directory with gold citations for test pids and rankings
from baseline methods in subdirectories.
:param run_path: string; directory with ranked candidates for baselines a subdir of
data_path else is a model run.
:param method: string; method with which ranks were created.
:param dataset: string; eval dataset.
:param facet: string; facet for eval.
:param split_str: {dev, test}
:param comet_exp_key: string; to allow reporting metrics to existing experiments.
:return:
"""
print(f'EVAL SPLIT: {split_str}')
ATKS = [5, 10, 20]
with codecs.open(os.path.join(data_path, f'{dataset}-queries-release.csv')) as csvfile:
reader = csv.DictReader(csvfile)
query_metadata = dict([('{:s}_{:s}'.format(row['pid'], row['facet']), row) for row in reader])
perq_out_fname = os.path.join(run_path, 'test-pid2pool-{:s}-{:s}-{:s}-perq.csv'.format(dataset, method, facet))
if facet == 'all':
qpid2rankedcand_relevances = read_all_facet_relevances(data_path=data_path, run_path=run_path,
dataset=dataset, method=method,
facets=['background', 'method', 'result'])
else:
qpid2rankedcand_relevances = read_facet_specific_relevances(data_path=data_path, run_path=run_path,
dataset=dataset, facet=facet, method=method)
# Go over test papers and compute metrics.
all_metrics = {}
num_cands = 0.0
num_queries = 0.0
perq_file = codecs.open(perq_out_fname, 'w', 'utf-8')
perq_csv = csv.DictWriter(perq_file, extrasaction='ignore',
fieldnames=['paper_id', 'title',
'recall@5', 'precision@5', 'f1@5',
'recall@10', 'precision@10', 'f1@10',
'recall@20', 'precision@20', 'f1@20',
'r_precision', 'av_precision', 'reciprocal_rank', 'ndcg',
'ndcg@20', 'ndcg%20', 'paper type'])
perq_csv.writeheader()
print('Precision and recall at rank: {:}'.format(ATKS))
for qpid_facet, qranked_judgements in qpid2rankedcand_relevances.items():
all_metrics[qpid_facet] = compute_metrics(qranked_judgements, pr_atks=ATKS,
threshold_grade=2)
num_cands += len(qranked_judgements)
num_queries += 1
metrics = all_metrics[qpid_facet]
metrics['paper_id'] = qpid_facet
metrics['title'] = query_metadata[qpid_facet]['title']
perq_csv.writerow(metrics)
aggmetrics = aggregate_metrics_crossval(query_metrics=all_metrics,
facet_str=facet, split_str=split_str)
print('Wrote: {:s}'.format(perq_file.name))
perq_file.close()
print('Total queries: {:d}; Total candidates: {:d}'.format(int(num_queries), int(num_cands)))
print('R-Precision; Precision@5; Precision@10; Precision@20; Recall@20; MAPrecision; NDCG; NDCG@20; NDCG%20')
print('{:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}\n'.
format(aggmetrics['r_precision'],
aggmetrics['precision@5'], aggmetrics['precision@10'], aggmetrics['precision@20'],
aggmetrics['recall@20'], aggmetrics['mean_av_precision'], aggmetrics['ndcg'],
aggmetrics['ndcg@20'], aggmetrics['ndcg%20']))
# print('NDCG; NDCG%5; NDCG%10; NDCG%15; NDCG%20; NDCG%25')
# print('{:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}\n'.
# format(aggmetrics['ndcg'], aggmetrics['ndcg%5'], aggmetrics['ndcg%10'], aggmetrics['ndcg%15'],
# aggmetrics['ndcg%20'], aggmetrics['ndcg%25']))
# Log metrics to comet.ml.
if comet_exp_key:
run_name = os.path.basename(run_path)
cml_experiment = cml.ExistingExperiment(previous_experiment=comet_exp_key, display_summary_level=0)
cml_experiment.set_name(run_name)
display_metrics = {'r_precision': aggmetrics['r_precision'],
'precision@5': aggmetrics['precision@5'],
'precision@10': aggmetrics['precision@10'],
'precision@20': aggmetrics['precision@20'],
'recall@20': aggmetrics['recall@20'],
'ndcg': aggmetrics['ndcg'],
'ndcg@20': aggmetrics['ndcg@20'],
'ndcg%20': aggmetrics['ndcg%20'],
'mean_av_precision': aggmetrics['mean_av_precision']}
cml_experiment.log_metrics(display_metrics, prefix=f'{dataset}-{split_str}-{facet}')
cml_experiment.log_table(perq_out_fname, prefix=f'{dataset}-{split_str}-{facet}')
def graded_eval_pool_rerank_unf(data_path, run_path, method, dataset, split, comet_exp_key):
"""
Evaluate the re-ranked pool for unfaceted data. Anns use graded relevance scores.
:param data_path: string; directory with gold citations for test pids and rankings
from baseline methods in subdirectories.
:param run_path: string; directory with ranked candidates for baselines a subdir of
data_path else is a model run.
:param method: string; method with which ranks were created.
:param dataset: string; eval dataset.
:param split: string; {dev, test}
:return:
"""
ATKS = [5, 10, 20]
with codecs.open(os.path.join(data_path, f'{dataset}-evaluation_splits.json'), 'r', 'utf-8') as fp:
eval_splits = json.load(fp)
split_paper_ids = eval_splits[split]
print(f'EVAL SPLIT: {split}; Number of queries: {len(split_paper_ids)}')
with codecs.open(os.path.join(data_path, f'{dataset}-queries-release.csv')) as csvfile:
reader = csv.DictReader(csvfile)
query_metadata = dict([(row['paper_id'], row) for row in reader])
perq_out_fname = os.path.join(run_path, f'test-pid2pool-{dataset}-{method}-{split}-perq.txt')
qpid2rankedcand_relevances = read_unf_relevances(data_path=data_path, run_path=run_path,
dataset=dataset, method=method)
# Go over test papers and compute metrics.
all_metrics = {}
num_cands = 0.0
num_queries = 0.0
perq_file = codecs.open(perq_out_fname, 'w', 'utf-8')
perq_csv = csv.DictWriter(perq_file, extrasaction='ignore',
fieldnames=['paper_id', 'title',
'recall@5', 'precision@5', 'f1@5',
'recall@10', 'precision@10', 'f1@10',
'recall@20', 'precision@20', 'f1@20',
'r_precision', 'av_precision', 'reciprocal_rank', 'ndcg'])
perq_csv.writeheader()
print('Precision and recall at rank: {:}'.format(ATKS))
for qpid in split_paper_ids:
qranked_judgements = qpid2rankedcand_relevances[qpid]
threshold = 1 if dataset in {'treccovid', 'scidcite', 'scidcocite', 'scidcoread', 'scidcoview'} else 2
all_metrics[qpid] = compute_metrics(qranked_judgements, pr_atks=ATKS,
threshold_grade=threshold)
num_cands += len(qranked_judgements)
num_queries += 1
metrics = all_metrics[qpid]
metrics['paper_id'] = qpid
metrics['title'] = query_metadata[qpid]['title']
perq_csv.writerow(metrics)
aggmetrics = aggregate_metrics(query_metrics=all_metrics)
print('Wrote: {:s}'.format(perq_file.name))
perq_file.close()
print('Total queries: {:d}; Total candidates: {:d}'.format(int(num_queries), int(num_cands)))
# Precision and recall dont make too much sense.
print('R-Precision; Precision@5; Precision@10; Precision@20; Recall@20; MAPrecision; NDCG; NDCG@20; NDCG%20')
print('{:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}\n'.
format(aggmetrics['r_precision'],
aggmetrics['precision@5'], aggmetrics['precision@10'], aggmetrics['precision@20'],
aggmetrics['recall@20'], aggmetrics['mean_av_precision'], aggmetrics['ndcg'],
aggmetrics['ndcg@20'], aggmetrics['ndcg%20']))
# print('NDCG; NDCG%5; NDCG%10; NDCG%15; NDCG%20; NDCG%25')
# print('{:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}\n'.
# format(aggmetrics['ndcg'], aggmetrics['ndcg%5'], aggmetrics['ndcg%10'], aggmetrics['ndcg%15'],
# aggmetrics['ndcg%20'], aggmetrics['ndcg%25']))
if comet_exp_key:
# Log metrics to comet.ml.
run_name = os.path.basename(run_path)
cml_experiment = cml.ExistingExperiment(previous_experiment=comet_exp_key, display_summary_level=0)
cml_experiment.set_name(run_name)
display_metrics = {'r_precision': aggmetrics['r_precision'],
'precision@5': aggmetrics['precision@5'],
'precision@10': aggmetrics['precision@10'],
'precision@20': aggmetrics['precision@20'],
'recall@20': aggmetrics['recall@20'],
'ndcg': aggmetrics['ndcg'],
'mean_av_precision': aggmetrics['mean_av_precision']}
cml_experiment.log_metrics(display_metrics, prefix=f'{dataset}-{split}')
cml_experiment.log_table(perq_out_fname, prefix=f'{dataset}-{split}')
def eval_significance_faceted(perq_path1, method1, perq_path2, method2, num_baseline_comparisions=2):
"""
Given the per query results for different methods on disk, compute
the statistical significance of the diff between the two results for
a hardcoded set of metrics and for all of the facets.
:param num_baseline_comparisions: int; this indicates the number of comparisions being made
to apply a Bonferroni correction.
:return:
"""
significance_level_5 = 0.05/num_baseline_comparisions
significance_level_10 = 0.10/num_baseline_comparisions
metrics = ['r_precision', 'recall@20', 'av_precision', 'ndcg']
for facet in ['background', 'method', 'result', 'all']:
m1pid2metrics = {}
try:
fp = codecs.open(os.path.join(perq_path1, f'test-pid2pool-csfcube-{method1}-{facet}-perq.csv'), 'r', 'utf-8')
except FileNotFoundError:
fp = codecs.open(os.path.join(perq_path1, f'test-pid2pool-csfcube-{method1}-{facet}-perq.txt'), 'r', 'utf-8')
reader = csv.DictReader(fp)
for res_row in reader:
m1pid2metrics[res_row['paper_id']] = res_row
m2pid2metrics = {}
try:
fp = codecs.open(os.path.join(perq_path2, f'test-pid2pool-csfcube-{method2}-{facet}-perq.csv'), 'r', 'utf-8')
except FileNotFoundError:
fp = codecs.open(os.path.join(perq_path2, f'test-pid2pool-csfcube-{method2}-{facet}-perq.txt'), 'r', 'utf-8')
reader = csv.DictReader(fp)
for res_row in reader:
m2pid2metrics[res_row['paper_id']] = res_row
metric2pval = {}
for metric in metrics:
m1, m2 = [], []
for qpid in m1pid2metrics.keys():
m1.append(float(m1pid2metrics[qpid][metric]))
m2.append(float(m2pid2metrics[qpid][metric]))
tval, pval = scipystats.ttest_ind(m1, m2, equal_var=False, nan_policy='propagate')
metric2pval[metric] = pval
print('Method 1: {:s}; Method 2: {:s}; facet: {:s}'.format(method1, method2, facet))
print('R-Precision; Recall@20; MAP; NDCG; P-Values:')
print('0.05; Bonferroni corrected significance: {:.4f}'.format(significance_level_5))
print('{:}: {:.4f}, {:}: {:.4f}, {:}: {:.4f}, {:}: {:.4f}'.
format(True if metric2pval['r_precision'] < significance_level_5 else False, metric2pval['r_precision'],
True if metric2pval['recall@20'] < significance_level_5 else False, metric2pval['recall@20'],
True if metric2pval['av_precision'] < significance_level_5 else False, metric2pval['av_precision'],
True if metric2pval['ndcg'] < significance_level_5 else False, metric2pval['ndcg']))
print('0.10; Bonferroni corrected significance: {:.4f}'.format(significance_level_10))
print('{:}: {:.4f}, {:}: {:.4f}, {:}: {:.4f}, {:}: {:.4f}\n'.
format(True if metric2pval['r_precision'] < significance_level_10 else False, metric2pval['r_precision'],
True if metric2pval['recall@20'] < significance_level_10 else False, metric2pval['recall@20'],
True if metric2pval['av_precision'] < significance_level_10 else False, metric2pval['av_precision'],
True if metric2pval['ndcg'] < significance_level_10 else False, metric2pval['ndcg']))
def eval_significance_unfaceted(perq_path1, method1, perq_path2, method2, dataset, num_baseline_comparisions=2):
"""
Given the per query results for different methods on disk, compute
the statistical significance of the diff between the two results for
a hardcoded set of metrics and for all of the facets.
:param num_baseline_comparisions: int; this indicates the number of comparisions being made
to apply a Bonferroni correction.
:return:
"""
significance_level_5 = 0.05/num_baseline_comparisions
significance_level_10 = 0.10/num_baseline_comparisions
metrics = ['r_precision', 'recall@20', 'av_precision', 'ndcg']
m1pid2metrics = {}
try:
fp = codecs.open(os.path.join(perq_path1, f'test-pid2pool-{dataset}-{method1}-test-perq.csv'), 'r', 'utf-8')
except FileNotFoundError:
fp = codecs.open(os.path.join(perq_path1, f'test-pid2pool-{dataset}-{method1}-test-perq.txt'), 'r', 'utf-8')
reader = csv.DictReader(fp)
for res_row in reader:
m1pid2metrics[res_row['paper_id']] = res_row
m2pid2metrics = {}
try:
fp = codecs.open(os.path.join(perq_path2, f'test-pid2pool-{dataset}-{method2}-test-perq.csv'), 'r', 'utf-8')
except FileNotFoundError:
fp = codecs.open(os.path.join(perq_path2, f'test-pid2pool-{dataset}-{method2}-test-perq.txt'), 'r', 'utf-8')
reader = csv.DictReader(fp)
for res_row in reader:
m2pid2metrics[res_row['paper_id']] = res_row
metric2pval = {}
for metric in metrics:
m1, m2 = [], []
for qpid in m1pid2metrics.keys():
m1.append(float(m1pid2metrics[qpid][metric]))
m2.append(float(m2pid2metrics[qpid][metric]))
tval, pval = scipystats.ttest_ind(m1, m2, equal_var=False, nan_policy='propagate')
metric2pval[metric] = pval
print('Method 1: {:s}; Method 2: {:s}'.format(method1, method2))
print('R-Precision; Recall@20; MAP; NDCG; P-Values:')
print('0.05; Bonferroni corrected significance: {:.4f}'.format(significance_level_5))
print('{:}: {:.4f}, {:}: {:.4f}, {:}: {:.4f}, {:}: {:.4f}'.
format(True if metric2pval['r_precision'] < significance_level_5 else False, metric2pval['r_precision'],
True if metric2pval['recall@20'] < significance_level_5 else False, metric2pval['recall@20'],
True if metric2pval['av_precision'] < significance_level_5 else False, metric2pval['av_precision'],
True if metric2pval['ndcg'] < significance_level_5 else False, metric2pval['ndcg']))
print('0.10; Bonferroni corrected significance: {:.4f}'.format(significance_level_10))
print('{:}: {:.4f}, {:}: {:.4f}, {:}: {:.4f}, {:}: {:.4f}\n'.
format(True if metric2pval['r_precision'] < significance_level_10 else False, metric2pval['r_precision'],
True if metric2pval['recall@20'] < significance_level_10 else False, metric2pval['recall@20'],
True if metric2pval['av_precision'] < significance_level_10 else False, metric2pval['av_precision'],
True if metric2pval['ndcg'] < significance_level_10 else False, metric2pval['ndcg']))
def main():
"""
Parse command line arguments and call all the above routines.
:return:
"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subcommand',
help='The action to perform.')
# Evaluate the re-ranked pools.
evaluate_pool_ranks = subparsers.add_parser('eval_pool_ranking')
evaluate_pool_ranks.add_argument('--data_path', required=True,
help='Input path where file with ranked candidates'
'and gold data are if its a baseline method.')
evaluate_pool_ranks.add_argument('--run_path',
help='Input path where file with ranked candidates. Model run dir.')
evaluate_pool_ranks.add_argument('--run_name', default=None,
help='Base name of directory with specific model runs embeddings.s')
evaluate_pool_ranks.add_argument('--experiment', required=True,
choices=['specter', 'sbtinybertsota', 'sbrobertanli',
'sbmpnet1B', 'cosentbert', 'ictsentbert', 'cospecter',
'labspecter', 'miswordbienc', 'supsimcse', 'unsupsimcse',
'miswordpolyenc', 'sbalisentbienc'],
help='The experiment to evaluate.')
evaluate_pool_ranks.add_argument('--dataset', required=True,
choices=['csfcube', 'relish', 'treccovid',
'scidcite', 'scidcocite', 'scidcoread', 'scidcoview'],
help='The dataset to evaluate for.')
evaluate_pool_ranks.add_argument('--facet',
choices=['background', 'method', 'result', 'all'],
help='Facet of abstract to read from.')
evaluate_pool_ranks.add_argument('--comet_exp_key', default=None,
help='Hash for comet experiment run. Goto copy this correctly.')
cl_args = parser.parse_args()
if cl_args.subcommand == 'eval_pool_ranking':
try:
facet = cl_args.facet
except AttributeError:
facet = None
if cl_args.experiment in {'specter', 'sbtinybertsota', 'sbrobertanli', 'ictsentbert',
'supsimcse', 'unsupsimcse', 'sbmpnet1B'}:
run_path = os.path.join(cl_args.data_path, cl_args.experiment)
else:
run_path = cl_args.run_path
if cl_args.dataset in {'csfcube'}:
graded_eval_pool_rerank_faceted(data_path=cl_args.data_path, method=cl_args.experiment,
facet=facet, dataset=cl_args.dataset, run_path=run_path,
split_str='dev', comet_exp_key=cl_args.comet_exp_key)
print('\n')
graded_eval_pool_rerank_faceted(data_path=cl_args.data_path, method=cl_args.experiment,
facet=facet, dataset=cl_args.dataset, run_path=run_path,
split_str='test', comet_exp_key=cl_args.comet_exp_key)
elif cl_args.dataset in {'relish', 'treccovid', 'scidcite', 'scidcocite', 'scidcoread', 'scidcoview'}:
graded_eval_pool_rerank_unf(data_path=cl_args.data_path, method=cl_args.experiment,
dataset=cl_args.dataset, run_path=run_path, split='dev',
comet_exp_key=cl_args.comet_exp_key)
print('\n\n')
graded_eval_pool_rerank_unf(data_path=cl_args.data_path, method=cl_args.experiment,
dataset=cl_args.dataset, run_path=run_path, split='test',
comet_exp_key=cl_args.comet_exp_key)
elif cl_args.subcommand == 'result_signf':
if cl_args.dataset == 'csfcube':
eval_significance_faceted(perq_path1=cl_args.run_path_1, method1=cl_args.method_1,
perq_path2=cl_args.run_path_2, method2=cl_args.method_2)
else:
eval_significance_unfaceted(perq_path1=cl_args.run_path_1, method1=cl_args.method_1,
perq_path2=cl_args.run_path_2, method2=cl_args.method_2,
dataset=cl_args.dataset)
else:
sys.stderr.write("Unknown action.")
if __name__ == '__main__':
main()
| aspire-main | src/evaluation/ranking_eval.py |
import argparse
import collections
from tqdm import tqdm
from src.evaluation.utils.models import get_model, SimilarityModel
from src.evaluation.utils.utils import *
from src.evaluation.utils.datasets import EvalDataset
from data_utils import create_dir
from typing import Union
import pandas as pd
from src.evaluation.utils.metrics import compute_metrics
import sys
import logging
def encode(model: SimilarityModel, dataset: EvalDataset):
"""
Cache model encodings of an entire dataseth
:param model: A model for encoding papers
:param dataset: Dataset to encode
"""
# get all dataset paper id which are uncached
dataset_pids = dataset.dataset.keys()
cached_pids = set() if model.cache is None else model.cache.keys()
uncached_pids = set(dataset_pids).difference(set(cached_pids))
uncached_ds = {k: dataset.get(k) for k in uncached_pids}
# cache encodings
if len(uncached_ds) > 0:
logging.info(f"Encoding {len(uncached_ds)} uncached papers in {len(uncached_ds) // model.batch_size} batches")
for batch_pids, batch_papers in tqdm((batchify(uncached_ds, model.batch_size))):
model.cache_encodings(batch_pids, batch_papers)
def score(model: SimilarityModel,
dataset: EvalDataset,
facet: Union[str, None],
scores_filename: str):
"""
Calculate similarity scores between queries and their candidates in a test pool
:param model: Model to test
:param dataset: Dataset to take test pool from
:param facet: Facet of query to use. Relevant only to CSFcube dataset.
:param scores_filename: Saves results here
:return:
"""
# load test pool
test_pool = dataset.get_test_pool(facet=facet)
log_msg = f"Scoring {len(test_pool)} queries in {dataset.name}"
if facet is not None:
log_msg += f', facet: {facet}'
logging.info(log_msg)
# Get model similarity scores between each query and its candidates
results = collections.defaultdict(list)
for query_pid, query_pool in tqdm(list(test_pool.items())):
# get query encoding
# if faceted, also filter the encoding by facet
query_encoding = model.get_encoding(pids=[query_pid], dataset=dataset)[query_pid]
if facet is not None:
query_encoding = model.get_faceted_encoding(query_encoding, facet, dataset.get(query_pid))
# get candidates encoding
candidate_pids = query_pool['cands']
candidate_encodings = model.get_encoding(pids=candidate_pids, dataset=dataset)
# For calculate similarities of each candidate to query encoding
candidate_similarities = dict()
for candidate_pid in candidate_pids:
similarity = model.get_similarity(query_encoding, candidate_encodings[candidate_pid])
candidate_similarities[candidate_pid] = similarity
# sort candidates by similarity, ascending (lower score == closer encodings)
sorted_candidates = sorted(candidate_similarities.items(), key=lambda i: i[1], reverse=True)
results[query_pid] = [(cpid, -1*sim) for cpid, sim in sorted_candidates]
# write scores
with codecs.open(scores_filename, 'w', 'utf-8') as fp:
json.dump(results, fp)
logging.info(f'Wrote: {scores_filename}')
def evaluate(results_dir: str,
facet: Union[str, None],
dataset: EvalDataset,
comet_exp_key=None):
"""
Compute metrics based on a model's similarity scores on a dataset's test pool
Assumes score() has already been called with relevant model_name, dataset and facet
:param results_dir:
:param model_name:
:param facet:
:param dataset:
:param comet_exp_key:
:return:
"""
logging.info('Computing metrics')
# load score results
results = dict()
if facet == 'all':
for facet_i in FACETS:
results[facet_i] = load_score_results(results_dir, dataset, facet_i)
else:
facet_key = 'unfaceted' if facet is None else facet
results[facet_key] = load_score_results(results_dir, dataset, facet)
# get queries metadata
query_metadata = dataset.get_query_metadata()
query_test_dev_split = dataset.get_test_dev_split()
threshold_grade = dataset.get_threshold_grade()
# compute metrics per query
metrics = []
metric_columns = None
for facet_i, facet_results in results.items():
for query_id, sorted_relevancies in facet_results.items():
query_metrics = compute_metrics(sorted_relevancies,
pr_atks=[5, 10, 20],
threshold_grade=threshold_grade)
if metric_columns is None:
metric_columns = list(query_metrics.keys())
query_metrics['facet'] = facet_i
query_metrics['split'] = 'test' if query_test_dev_split is None else query_test_dev_split[query_id]
query_metrics['paper_id'] = query_id
query_metrics['title'] = query_metadata.loc[query_id]['title']
metrics.append(query_metrics)
metrics = pd.DataFrame(metrics)
# write evaluations file per query
query_metrics_filename = get_evaluations_filename(results_dir, facet, aggregated=False)
metrics.to_csv(query_metrics_filename, index=False)
logging.info(f'Wrote: {query_metrics_filename}')
# aggergate metrics per (facet, dev/test_split)
aggregated_metrics = []
for facet_i in metrics.facet.unique():
for split in metrics.split.unique():
agg_results = metrics[(metrics.facet == facet_i) & (metrics.split == split)][metric_columns].mean().round(4).to_dict()
logging.info(f'----------Results for {split}/{facet_i}----------')
logging.info('\n'.join([f'{k}\t{agg_results[k]}' for k in ('av_precision', 'ndcg%20')]))
agg_results['facet'] = facet_i
agg_results['split'] = split
aggregated_metrics.append(agg_results)
if facet == 'all':
for split in metrics.split.unique():
agg_results = metrics[metrics.split == split][metric_columns].mean().round(4).to_dict()
logging.info(f'----------Results for {split}/{facet}----------')
logging.info('\n'.join([f'{k}\t{agg_results[k]}' for k in ('av_precision', 'ndcg%20')]))
agg_results['facet'] = facet
agg_results['split'] = split
aggregated_metrics.append(agg_results)
aggregated_metrics = pd.DataFrame(aggregated_metrics)
# Write evaluation file aggregated per (facet, dev/test_split)
aggregated_metrics_filename = get_evaluations_filename(results_dir, facet, aggregated=True)
aggregated_metrics.to_csv(aggregated_metrics_filename, index=False)
logging.info(f'Wrote: {aggregated_metrics_filename}')
def main(args):
# init log
if args.log_fname is not None:
log_dir = os.path.split(os.path.join(os.getcwd(), args.log_fname))[0]
if not os.path.exists(log_dir):
create_dir(log_dir)
logging.basicConfig(level='INFO', format='%(message)s', filename=args.log_fname)
else:
logging.basicConfig(level='INFO', format='%(message)s', stream=sys.stdout)
# check validity of command-line arguments
check_args(args)
# init results dir
results_dir = get_results_dir(args.results_dir, args.dataset_name, args.model_name, args.run_name)
if not os.path.exists(results_dir):
create_dir(results_dir)
# init model and dataset
dataset = EvalDataset(name=args.dataset_name, root_path=args.dataset_dir)
model= None
if 'encode' in args.actions or 'score' in args.actions:
logging.info(f'Loading model: {args.model_name}')
model = get_model(model_name=args.model_name)
logging.info(f'Loading dataset: {args.dataset_name}')
if args.cache:
# init cache
encodings_filename = get_encodings_filename(results_dir)
logging.info(f'Setting model cache at: {encodings_filename}')
model.set_encodings_cache(encodings_filename)
if 'encode' in args.actions:
# cache model's encodings of entire dataset
encode(model, dataset)
if 'score' in args.actions:
# score model on dataset's test pool
if args.facet == 'all':
for facet in FACETS:
score(model, dataset, facet=facet, scores_filename=get_scores_filename(results_dir, facet=facet))
else:
score(model, dataset, facet=args.facet, scores_filename=get_scores_filename(results_dir, facet=args.facet))
if 'evaluate' in args.actions:
# evaluate metrics for model scores
evaluate(results_dir,
facet=args.facet,
dataset=dataset)
def check_args(args):
if args.facet is not None:
assert args.dataset_name == 'csfcube', f'Faceted query search is only tested on csfcube, not {args.dataset_name}'
if args.dataset_name == 'csfcube' and args.facet is None:
logging.info("No facet selected for CSFcube. Running on all facets.")
args.facet = 'all'
if 'encode' in args.actions and not args.cache:
logging.info("Action 'encode' selected, automatically enabling cache")
args.cache = True
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', required=True, help='The name of the model to run. Choose from a model_name '
'with an implementation in evaluation_models.get_model')
parser.add_argument('--dataset_name', required=True, help='Dataset to evaluate similarities on',
choices=['gorcmatscicit', 'csfcube', 'relish', 'treccovid',
'scidcite', 'scidcocite', 'scidcoread', 'scidcoview'])
parser.add_argument('--dataset_dir', required=True,
help="Dir to dataset files (e.g. abstracts-{dataset}.jsonl)")
parser.add_argument('--results_dir', required=True,
help="Results base dir to store encodings cache, scores and metrics")
parser.add_argument('--facet', help='Relevant only to csfcube dataset. Select a facet to use for the task'
' of faceted similarity search. If "all", tests all facets one at a time.',
choices=['result', 'method', 'background', 'all'], default=None)
parser.add_argument('--cache', action='store_true', help='Use if we would like to cache the encodings of papers.'
'If action "encode" is selected, this is set automatically to true.')
parser.add_argument('--run_name',help='Name of this evaluation run.\n'
'Saves results under results_dir/model_name/run_name/\n'
'to allow different results to same model_name')
parser.add_argument('--trained_model_path', help='Basename for a trained model we would like to evaluate on.')
parser.add_argument('--log_fname', help='Filename of log file')
parser.add_argument('--actions', choices=['encode', 'score', 'evaluate'],
nargs="+", default=['encode', 'score', 'evaluate'],
help="""'Encode' creates vector representations for the entire dataset.
'Score' calculates similarity scores on the dataset's test pool.
'Evaluate' calculates metrics based on the similarity scores predicted.
By default does all three.""")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args) | aspire-main | src/evaluation/evaluate.py |
"""
From: https://gist.github.com/bwhite/3726239#file-rank_metrics-py
"""
import numpy as np
def mean_reciprocal_rank(rs):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs])
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> r_precision(r)
0.33333333333333331
>>> r = [0, 1, 0]
>>> r_precision(r)
0.5
>>> r = [1, 0, 0]
>>> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> precision_at_k(r, 1)
0.0
>>> precision_at_k(r, 2)
0.0
>>> precision_at_k(r, 3)
0.33333333333333331
>>> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return np.mean(r)
def average_precision(r):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
>>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
>>> delta_r = 1. / sum(r)
>>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])
0.7833333333333333
>>> average_precision(r)
0.78333333333333333
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.
return np.mean(out)
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
>>> mean_average_precision(rs)
0.78333333333333333
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]
>>> mean_average_precision(rs)
0.39166666666666666
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=1):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> dcg_at_k(r, 1)
3.0
>>> dcg_at_k(r, 1, method=1)
3.0
>>> dcg_at_k(r, 2)
5.0
>>> dcg_at_k(r, 2, method=1)
4.2618595071429155
>>> dcg_at_k(r, 10)
9.6051177391888114
>>> dcg_at_k(r, 11)
9.6051177391888114
"An Introduction to Neural Information Retrieval" writes method 1 for DCG
so using that as default.
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> ndcg_at_k(r, 1)
1.0
>>> r = [2, 1, 2, 0]
>>> ndcg_at_k(r, 4)
0.9203032077642922
>>> ndcg_at_k(r, 4, method=1)
0.96519546960144276
>>> ndcg_at_k([0], 1)
0.0
>>> ndcg_at_k([1], 2)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def recall_at_k(ranked_rel, atk, max_total_relevant):
"""
Compute recall at k.
:param ranked_rel: list(int); ranked list of relevance judged data.
:param atk: int; rank at which to compute metric.
:param max_total_relevant: int; maximum relevant to consider in
case there are more relevant in total.
:return: recall: float.
"""
total_relevant = sum(ranked_rel)
total_relevant = min(max_total_relevant, total_relevant)
relatk = sum(ranked_rel[:atk])
if total_relevant > 0:
recall_atk = float(relatk)/total_relevant
else:
recall_atk = 0.0
return recall_atk
def compute_metrics(ranked_judgements, pr_atks, threshold_grade):
"""
Given the ranked judgements compute precision, recall and f1.
:param ranked_judgements: list(int); graded or binary relevances in rank order.
:param pr_atks: list(int); the @K values to use for computing precision and recall.
:param threshold_grade: int; Assuming 0-3 graded relevances, threshold at some point
and convert graded to binary relevance. If passed also compute NDCG.
:return:
"""
metrics = {}
graded_judgements = ranked_judgements
ranked_judgements = [1 if rel >= threshold_grade else 0 for rel in graded_judgements]
# Use the full set of candidate not the pr_atk.
ndcg = ndcg_at_k(graded_judgements, len(ranked_judgements))
ndcg_20 = ndcg_at_k(graded_judgements, 20)
ndcg_50 = ndcg_at_k(graded_judgements, 50)
for atk in [5, 10, 15, 20, 25]:
ndcg_pr_atk = ndcg_at_k(graded_judgements, int((atk/100)*len(ranked_judgements)))
metrics[f'ndcg%{atk}'] = float(ndcg_pr_atk)
max_total_relevant = sum(ranked_judgements)
for atk in pr_atks:
recall = recall_at_k(ranked_rel=ranked_judgements,
atk=atk, max_total_relevant=max_total_relevant)
precision = precision_at_k(r=ranked_judgements, k=atk)
f1 = 2*precision*recall/(precision + recall) if (precision + recall) > 0 else 0.0
metrics[f'precision@{atk}'] = float(precision)
metrics[f'recall@{atk}'] = float(recall)
metrics[f'f1@{atk}'] = float(f1)
r_precision_ = r_precision(r=ranked_judgements)
av_precision = average_precision(r=ranked_judgements)
reciprocal_rank = mean_reciprocal_rank(rs=[ranked_judgements])
metrics['r_precision'] = float(r_precision_)
metrics['av_precision'] = float(av_precision)
metrics['reciprocal_rank'] = float(reciprocal_rank)
metrics['ndcg'] = float(ndcg)
metrics['ndcg@20'] = float(ndcg_20)
metrics['ndcg@50'] = float(ndcg_50)
return metrics
| aspire-main | src/evaluation/utils/metrics.py |
from abc import ABCMeta, abstractmethod
from ex_aspire_consent import AspireConSent, AutoTokenizer, prepare_abstracts, AutoModel
from ex_aspire_consent_multimatch import AllPairMaskedWasserstein
from src.learning.facetid_models import disent_models
from src.learning import batchers
from collections import namedtuple
from scipy.spatial.distance import euclidean
import torch
from torch import nn, Tensor
from torch.autograd import Variable
import numpy as np
import h5py
from sentence_transformers import SentenceTransformer
import sklearn
from src.evaluation.utils.utils import batchify
import logging
import codecs
import json
import os
from typing import List, Dict, Union
from src.evaluation.utils.datasets import EvalDataset
class SimilarityModel(metaclass=ABCMeta):
"""
A abstract model for evaluating the paper similarity task.
Two methods to implement:
1. encode: Create paper encodings
2. get_similarity: calculate similarity between two encodings
If set_cache is called, automatically caches paper encodings (and loads them in future runs)
"""
ENCODING_TYPES = ('abstract', 'sentence', 'sentence-entity')
def __init__(self, name: str, encoding_type: str, batch_size: int=8):
self.name = name
assert encoding_type in SimilarityModel.ENCODING_TYPES, 'Model output representation must be either\n' \
'"abstract" (1 embedding for entire document)\n' \
'"sentence" (1 embedding per each sentence)\n' \
'or "sentence-entity" (1 embedding per each sentence ' \
'and 1 embedding per each entity)'
self.encoding_type = encoding_type
self.batch_size = batch_size
self.cache = None
@abstractmethod
def encode(self, batch_papers: List[Dict]):
"""
Create encodings for a batch of papers
:param batch_papers: List of dictionaries, each representing one paper.
Keys are 'ABSTRACT', 'TITLE, 'FACETS'.
If NER extraction ran for the dataset, 'ENTITIES' will exist.
:return: Union[List[Union[Tensor, np.ndarray]], Union[Tensor, np.ndarray]]
Encodings which represent the papers.
"""
raise NotImplementedError()
@abstractmethod
def get_similarity(self, x: Union[Tensor, np.ndarray], y: Union[Tensor, np.ndarray]):
"""
Calculate a similarity score between two encodings
:param x: First encoding
:param y: Second Encoding
:return: Similarity score (higher == better)
"""
raise NotImplementedError()
def set_encodings_cache(self, cache_filename: str):
"""
Creates a cache for encodings of papers.
If the cache exists, loads it.
Note that manually stopping a run while the cache is open might cause corruption of cache,
which forces us to delete it
:param cache_filename: filename for cache
"""
try:
self.cache = h5py.File(cache_filename, 'a')
except Exception as e:
logging.info(f"Error: could not open encodings cache {cache_filename}.\n"
f"Overwriting the cache.")
self.cache = h5py.File(cache_filename, 'w')
def cache_encodings(self, batch_pids: List[str], batch_papers: List[dict]):
"""
Caches paper encodings in format of {paper_id: paper_encoding}
:param batch_pids: paper ids for the batch
:param batch_papers: papers for the batch
:return: Also returns the encodings
"""
assert self.cache is not None, "Cannot cache encodings, cache is not set"
encodings = self.encode(batch_papers)
for i, pid in enumerate(batch_pids):
paper_encoding = encodings[i]
self.cache.create_dataset(name=pid, data=paper_encoding)
return encodings
def get_encoding(self, pids: List[str], dataset: EvalDataset) -> Dict:
"""
Gets paper encodings for the paper ids.
If encodings are cached, loads them.
Else, gets papers from the dataset and encodes.
:param pids: paper ids
:param dataset: EvalDataset object
:return: encodings for all pids passed, in format: {pid: encoding}
"""
# divide to cached and uncached
uncached_pids = [pid for pid in pids if pid not in self.cache] if self.cache is not None else pids
cached_pids = set(pids).difference(set(uncached_pids))
# get all cached pids
all_encodings = dict()
for pid in cached_pids:
all_encodings[pid] = torch.from_numpy(np.array(self.cache.get(pid)))
# encode all uncached pids
for batch_pids, batch_papers in batchify({pid: dataset.get(pid) for pid in uncached_pids},
self.batch_size):
if self.cache is not None:
batch_encodings = self.cache_encodings(batch_pids, batch_papers)
else:
batch_encodings = self.encode(batch_papers)
all_encodings.update({pid: batch_encodings[i] for i, pid in enumerate(batch_pids)})
return all_encodings
def get_faceted_encoding(self, unfaceted_encoding: Union[Tensor, np.ndarray], facet: str, input_data: Dict):
"""
Filters an encoding of a paper for a given facet.
If there is one embedding per entire abstract, returns it without filtering.
If there is one embedding per sentence, filters out sentences which are not part of that facet.
If there is, in addition to sentence embeddings, also one embedding per entity, filters out entities
derived from sentences which are not part of that facet.
:param unfaceted_encoding: Original encoding
:param facet: Facet to filter
:param input_data: Paper data from EvalDataset
:return: the faceted encoding
"""
if self.encoding_type == 'abstract':
# if single encoding for entire abstract, cannot filter by facet
return unfaceted_encoding
else:
# either one embedding per sentence, or one for each sentence and one for each entity
# get facets of each sentence
labels = ['background' if lab == 'objective_label' else lab[:-len('_label')]
for lab in input_data['FACETS']]
# get ids of sentences matching this facet
abstract_facet_ids = [i for i, k in enumerate(labels) if facet == k]
if self.encoding_type == 'sentence':
filtered_ids = abstract_facet_ids
else:
# if embedding for each entity, take embeddings from facet sentences only
ner_cur_id = len(labels)
ner_facet_ids = []
for i, sent_ners in enumerate(input_data['ENTITIES']):
if i in abstract_facet_ids:
ner_facet_ids += list(range(ner_cur_id, ner_cur_id + len(sent_ners)))
ner_cur_id += len(sent_ners)
filtered_ids = abstract_facet_ids + ner_facet_ids
return unfaceted_encoding[filtered_ids]
def __del__(self):
if hasattr(self, 'cache') and self.cache is not None:
self.cache.close()
class AspireModel(SimilarityModel):
"""
Loads and runs otAspire models seen in the paper
"""
# paths to two models uploaded, trained for the compsci and biomed data, respectively
MODEL_PATHS = {
'compsci': 'allenai/aspire-contextualsentence-multim-compsci',
'biomed': 'allenai/aspire-contextualsentence-multim-biomed',
}
def __init__(self, **kwargs):
super(AspireModel, self).__init__(**kwargs)
# load compsci/biomed model based on name
dataset_type = self.name.split('_')[-1]
model_path = AspireModel.MODEL_PATHS[dataset_type]
self.model = AspireConSent(model_path)
self.model.eval()
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_similarity(self, x: Union[Tensor, np.ndarray], y: Union[Tensor, np.ndarray]):
# calculates optimal transport between the two encodings
dist_func = AllPairMaskedWasserstein({})
rep_len_tup = namedtuple('RepLen', ['embed', 'abs_lens'])
xt = rep_len_tup(embed=x[None, :].permute(0, 2, 1), abs_lens=[len(x)])
yt = rep_len_tup(embed=y[None, :].permute(0, 2, 1), abs_lens=[len(y)])
ot_dist = dist_func.compute_distance(query=xt, cand=yt).item()
return -ot_dist
def encode(self, batch_papers: List[Dict]):
# prepare input
bert_batch, abs_lens, sent_token_idxs = prepare_abstracts(batch_abs=batch_papers,
pt_lm_tokenizer=self.tokenizer)
# forward through model
with torch.no_grad():
_, batch_reps_sent = self.model.forward(bert_batch=bert_batch,
abs_lens=abs_lens,
sent_tok_idxs=sent_token_idxs)
batch_reps = [batch_reps_sent[i, :abs_lens[i]] for i in range(len(abs_lens))]
return batch_reps
class AspireNER(AspireModel):
"""
An implementation of the ot_aspire models,
where NER entities which were extracted from the sentences of the abstract are added
as new sentences to the abstract.
Testing on csfcube suggests improved results when using this form of Input Augmentation.
"""
def encode(self, batch_papers: List[Dict]):
assert 'ENTITIES' in batch_papers[0], 'No NER data for input. Please run NER/extract_entity.py and' \
' place result in {dataset_dir}/{dataset_name}-ner.jsonl'
input_batch_with_ner = self._append_entities(batch_papers)
return super(AspireNER, self).encode(input_batch_with_ner)
def _append_entities(self, batch_papers):
# append ners to abstract end as new sentences
input_batch_with_ner = []
for sample in batch_papers:
ner_list = [item for sublist in sample['ENTITIES'] for item in sublist]
input_sample = {'TITLE': sample['TITLE'],
'ABSTRACT': sample['ABSTRACT'] + ner_list
}
input_batch_with_ner.append(input_sample)
return input_batch_with_ner
class BertMLM(SimilarityModel):
"""
Encodings of abstracts based on BERT.
"""
MODEL_PATHS = {
'specter': 'allenai/specter',
# Using roberta here causes the tokenizers below to break cause roberta inputs != bert inputs.
'supsimcse': 'princeton-nlp/sup-simcse-bert-base-uncased',
'unsupsimcse': 'princeton-nlp/unsup-simcse-bert-base-uncased'
}
def __init__(self,**kwargs):
super(BertMLM, self).__init__(**kwargs)
full_name = BertMLM.MODEL_PATHS[self.name]
self.tokenizer = AutoTokenizer.from_pretrained(full_name)
self.bert_max_seq_len = 500
self.model = AutoModel.from_pretrained(full_name)
self.model.config.output_hidden_states = True
# if torch.cuda.is_available():
# self.model.cuda()
self.model.eval()
def _prepare_batch(self, batch):
"""
Prepare the batch for Bert.
:param batch: list(string); batch of strings.
:return:
"""
# Construct the batch.
tokenized_batch = []
batch_seg_ids = []
batch_attn_mask = []
seq_lens = []
max_seq_len = -1
for sent in batch:
bert_tokenized_text = self.tokenizer.tokenize(sent)
if len(bert_tokenized_text) > self.bert_max_seq_len:
bert_tokenized_text = bert_tokenized_text[:self.bert_max_seq_len]
# Convert token to vocabulary indices
indexed_tokens = self.tokenizer.convert_tokens_to_ids(bert_tokenized_text)
# Append CLS and SEP tokens to the text.
indexed_tokens = self.tokenizer.build_inputs_with_special_tokens(token_ids_0=indexed_tokens)
if len(indexed_tokens) > max_seq_len:
max_seq_len = len(indexed_tokens)
tokenized_batch.append(indexed_tokens)
batch_seg_ids.append([0] * len(indexed_tokens))
batch_attn_mask.append([1] * len(indexed_tokens))
# Pad the batch.
for ids_sent, seg_ids, attn_mask in \
zip(tokenized_batch, batch_seg_ids, batch_attn_mask):
pad_len = max_seq_len - len(ids_sent)
seq_lens.append(len(ids_sent))
ids_sent.extend([self.tokenizer.pad_token_id] * pad_len)
seg_ids.extend([self.tokenizer.pad_token_id] * pad_len)
attn_mask.extend([self.tokenizer.pad_token_id] * pad_len)
return torch.tensor(tokenized_batch), torch.tensor(batch_seg_ids), \
torch.tensor(batch_attn_mask), torch.FloatTensor(seq_lens)
def _pre_process_input_batch(self, batch_papers: List[Dict]):
# preprocess the input
batch = [paper['TITLE'] + ' [SEP] ' + ' '.join(paper['ABSTRACT']) for paper in batch_papers]
return batch
def encode(self, batch_papers: List[Dict]):
input_batch = self._pre_process_input_batch(batch_papers)
tokid_tt, seg_tt, attnmask_tt, seq_lens_tt = self._prepare_batch(input_batch)
# if torch.cuda.is_available():
# tokid_tt = tokid_tt.cuda()
# seg_tt = seg_tt.cuda()
# attnmask_tt = attnmask_tt.cuda()
# seq_lens_tt = seq_lens_tt.cuda()
# pass through bert
with torch.no_grad():
model_out = self.model(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
# top_l is [bs x max_seq_len x bert_encoding_dim]
top_l = model_out.last_hidden_state
batch_reps_cls = top_l[:, 0, :]
# if torch.cuda.is_available():
# batch_reps_cls = batch_reps_cls.cpu().data.numpy()
return batch_reps_cls
def get_similarity(self, x: Union[Tensor, np.ndarray], y: Union[Tensor, np.ndarray]):
return -euclidean(x, y)
class SimCSE(BertMLM):
"""
Subclass of BERT model, for 'supsimcse' and 'unsupsimcse' models
"""
def encode(self, batch_papers: List[Dict]):
"""
:param batch:
:return:
"""
# pre-process batch
batch = []
cur_index = 0
abs_splits = []
for paper in batch_papers:
batch += paper['ABSTRACT']
cur_index += len(paper['ABSTRACT'])
abs_splits.append(cur_index)
tokid_tt, seg_tt, attnmask_tt, seq_lens_tt = self._prepare_batch(batch)
if torch.cuda.is_available():
tokid_tt = tokid_tt.cuda()
seg_tt = seg_tt.cuda()
attnmask_tt = attnmask_tt.cuda()
seq_lens_tt = seq_lens_tt.cuda()
# pass through model
with torch.no_grad():
model_out = self.model(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
# top_l is [bs x max_seq_len x bert_encoding_dim]
batch_reps_pooler = model_out.pooler_output
# batch_reps_cls = top_l[:, 0, :]
if torch.cuda.is_available():
batch_reps_pooler = batch_reps_pooler.cpu().data.numpy()
# batch_reps_cls = batch_reps_cls.cpu().data.numpy()
# return batch_reps_cls
batch_reps = np.split(batch_reps_pooler, abs_splits[:-1])
return batch_reps
class BertNER(BertMLM):
"""
An implementation of the Specter model
where extracted NER entities are added as sentences to the abstract before creating the embedding.
"""
def __init__(self, name, **kwargs):
super(BertNER, self).__init__(name=name.split('_ner')[0], **kwargs)
self.name = name
def _pre_process_input_batch(self, batch_papers: List[Dict]):
# preprocess the input
batch = []
for paper in batch_papers:
title_abstract = paper['TITLE'] + ' [SEP] ' + ' '.join(paper['ABSTRACT'])
entity_sentences = '. '.join([item for sublist in paper['ENTITIES'] for item in sublist])
title_abstract_entities = title_abstract + ' ' + entity_sentences + '.'
batch.append(title_abstract_entities)
return batch
class SentenceModel(SimilarityModel):
"""
Class for SentenceTransformer models.
"""
MODEL_PATHS = {
'sbtinybertsota': 'paraphrase-TinyBERT-L6-v2',
'sbrobertanli': 'nli-roberta-base-v2',
'sbmpnet1B': 'sentence-transformers/all-mpnet-base-v2'
}
def __init__(self, **kwargs):
super(SentenceModel, self).__init__(**kwargs)
self.model = SentenceTransformer(SentenceModel.MODEL_PATHS[self.name], device='cpu')
def encode(self, batch_papers: List[Dict]):
# pre-process input data
batch = []
cur_index = 0
abs_splits = []
for paper in batch_papers:
batch += paper['ABSTRACT']
cur_index += len(paper['ABSTRACT'])
abs_splits.append(cur_index)
sent_reps = self.model.encode(batch, show_progress_bar=False)
# re-split sentence embeddings to match lengths of each abstract in batch
batch_reps = np.split(sent_reps, abs_splits[:-1])
return batch_reps
def get_similarity(self, x: Union[Tensor, np.ndarray], y: Union[Tensor, np.ndarray]):
sent_sims = sklearn.metrics.pairwise.cosine_similarity(x, y)
return float(np.max(sent_sims))
# Define the Aspire contextual encoder with embeddings:
class AspireConSenContextual(nn.Module):
def __init__(self, hf_model_name):
"""
:param hf_model_name: dict; model hyperparams.
"""
torch.nn.Module.__init__(self)
self.bert_encoding_dim = 768
self.bert_layer_count = 12 + 1 # plus 1 for the bottom most layer.
self.bert_encoder = AutoModel.from_pretrained(hf_model_name)
self.bert_encoder.config.output_hidden_states = True
def forward(self, bert_batch, abs_lens, sent_tok_idxs, ner_tok_idxs):
"""
Pass a batch of sentences through BERT and get sentence
reps based on averaging contextual token embeddings.
"""
# batch_size x num_sents x encoding_dim
_, sent_reps, ner_reps = self.consent_reps_bert(bert_batch=bert_batch,
batch_senttok_idxs=sent_tok_idxs,
batch_nertok_idxs=ner_tok_idxs,
num_sents=abs_lens)
return sent_reps, ner_reps
def _get_sent_reps(self,
final_hidden_state,
batch_tok_idxs,
batch_size,
max_sents,
max_seq_len):
sent_reps = []
for sent_i in range(max_sents):
cur_sent_mask = np.zeros((batch_size, max_seq_len, self.bert_encoding_dim))
# Build a mask for the ith sentence for all the abstracts of the batch.
for batch_abs_i in range(batch_size):
abs_sent_idxs = batch_tok_idxs[batch_abs_i]
try:
sent_i_tok_idxs = abs_sent_idxs[sent_i]
except IndexError: # This happens in the case where the abstract has fewer than max sents.
sent_i_tok_idxs = []
cur_sent_mask[batch_abs_i, sent_i_tok_idxs, :] = 1.0
sent_mask = Variable(torch.FloatTensor(cur_sent_mask))
# if torch.cuda.is_available():
# sent_mask = sent_mask.cuda()
# batch_size x seq_len x encoding_dim
sent_tokens = final_hidden_state * sent_mask
# The sent_masks non zero elements in one slice along embedding dim is the sentence length.
cur_sent_reps = torch.sum(sent_tokens, dim=1) / \
torch.count_nonzero(sent_mask[:, :, 0], dim=1).clamp(min=1).unsqueeze(dim=1)
sent_reps.append(cur_sent_reps.unsqueeze(dim=1))
return torch.concat(sent_reps, dim=1)
def _get_ner_reps(self, final_hidden_state, batch_nertok_idxs):
ner_reps = []
for i, abs_toks in enumerate(batch_nertok_idxs):
ner_reps_for_abs = []
for ner_toks in abs_toks:
if len(ner_toks) > 0:
tokens_for_ner = final_hidden_state[i, ner_toks]
ner_rep = tokens_for_ner.mean(dim=0)[None, :]
ner_reps_for_abs.append(ner_rep)
else:
ner_reps_for_abs.append([])
ner_reps.append(ner_reps_for_abs)
return ner_reps
def consent_reps_bert(self, bert_batch, batch_senttok_idxs, batch_nertok_idxs, num_sents):
"""
Pass the concated abstract through BERT, and average token reps to get contextual sentence reps.
-- NO weighted combine across layers.
:param bert_batch: dict('tokid_tt', 'seg_tt', 'attnmask_tt', 'seq_lens'); items to use for getting BERT
representations. The sentence mapped to BERT vocab and appropriately padded.
:param batch_senttok_idxs: list(list(list(int))); batch_size([num_sents_per_abs[num_tokens_in_sent]])
:param num_sents: list(int); number of sentences in each example in the batch passed.
:return:
doc_cls_reps: FloatTensor [batch_size x bert_encoding_dim]
sent_reps: FloatTensor [batch_size x num_sents x bert_encoding_dim]
"""
seq_lens = bert_batch['seq_lens']
batch_size, max_seq_len = len(seq_lens), max(seq_lens)
max_sents = max(num_sents)
tokid_tt, seg_tt, attnmask_tt = bert_batch['tokid_tt'], bert_batch['seg_tt'], bert_batch['attnmask_tt']
# Pass input through BERT and return all layer hidden outputs.
model_outputs = self.bert_encoder(tokid_tt, token_type_ids=seg_tt, attention_mask=attnmask_tt)
final_hidden_state = model_outputs.last_hidden_state
# Read of CLS token as document representation.
doc_cls_reps = final_hidden_state[:, 0, :]
doc_cls_reps = doc_cls_reps.squeeze()
# Average token reps for every sentence to get sentence representations.
# Build the first sent for all batch examples, second sent ... and so on in each iteration below.
sent_reps = self._get_sent_reps(final_hidden_state, batch_senttok_idxs, batch_size, max_sents, max_seq_len)
# Do the same for all ners in each sentence to get ner representation
ner_reps = self._get_ner_reps(final_hidden_state, batch_nertok_idxs)
return doc_cls_reps, sent_reps, ner_reps
class TrainedAbstractModel(SimilarityModel):
"""
Class for our trained models which provide abstracts embeddings
"""
# model names mapped to their model class
MODEL_CLASSES = {
'cospecter': disent_models.MySPECTER,
}
MODEL_BATCHERS = {
'cospecter': batchers.AbsTripleBatcher
}
def __init__(self, trained_model_path, model_version='cur_best', **kwargs):
super(TrainedAbstractModel, self).__init__(encoding_type='abstract', **kwargs)
run_info_filename = os.path.join(trained_model_path, 'run_info.json')
weights_filename = os.path.join(trained_model_path, 'model_{:s}.pt'.format(model_version))
assert os.path.exists(run_info_filename)
assert os.path.exists(weights_filename)
# load hyper-params file
with codecs.open(run_info_filename, 'r', 'utf-8') as fp:
run_info = json.load(fp)
hyper_params = run_info['all_hparams']
# get model class and batcher
if self.name == 'cospecter':
ModelClass = disent_models.MySPECTER
batcher = batchers.AbsTripleBatcher
else:
raise NotImplementedError(f"Unknown model {self.name}")
# init trained model
model = ModelClass(hyper_params)
# load weights
model.load_state_dict(torch.load(weights_filename))
# Move model to GPU
if torch.cuda.is_available():
model.cuda()
model.eval()
self.model = model
self.batcher = batcher
self.tokenizer = AutoTokenizer.from_pretrained(hyper_params['base-pt-layer'])
def encode(self, batch_papers: List[Dict]):
# pre-process input
batch = [paper['TITLE'] + ' [SEP] ' + ' '.join(paper['ABSTRACT']) for paper in batch_papers]
# pass through model
bert_batch, _, _ = self.batcher.prepare_bert_sentences(sents=batch, tokenizer=self.tokenizer)
ret_dict = self.model.encode(batch_dict={'bert_batch': bert_batch})
return ret_dict['doc_reps']
def get_similarity(self, x, y):
return -euclidean(x, y)
class TrainedSentModel(SimilarityModel):
"""
Class for our trained models which provide per-sentence embeddings
"""
def __init__(self, trained_model_path, **kwargs):
super(TrainedSentModel, self).__init__(**kwargs)
from sentence_transformers import SentenceTransformer, models
word_embedding_model = models.Transformer('allenai/scibert_scivocab_uncased',
max_seq_length=512)
# Loading local model: https://github.com/huggingface/transformers/issues/2422#issuecomment-571496558
trained_model_fname = os.path.join(trained_model_path, 'sent_encoder_cur_best.pt')
word_embedding_model.auto_model.load_state_dict(torch.load(trained_model_fname))
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls')
self.model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
def encode(self, batch_papers: List[Dict]):
# pre-process papers by extracting all sentences
batch = []
cur_index = 0
abs_splits = []
for paper in batch_papers:
batch += paper['ABSTRACT']
cur_index += len(paper['ABSTRACT'])
abs_splits.append(cur_index)
# pass through model
sent_reps = self.model.encode(batch, show_progress_bar=False)
# re-link sentences from the same paper
batch_reps = np.split(sent_reps, abs_splits[:-1])
return batch_reps
def get_similarity(self, x, y):
sent_sims = sklearn.metrics.pairwise.cosine_similarity(x, y)
return float(np.max(sent_sims))
class AspireContextNER(SimilarityModel):
"""
Class for ASPIRE model, where each entity is represented by the average token embeddings
for all tokens that are within this entitie's span inside the sentence it appears in.
Uses aspire_contextual.AspireContextualModel instead of the regular AspireConSent
"""
def __init__(self, **kwargs):
super(AspireContextNER, self).__init__(**kwargs)
model_path = AspireModel.MODEL_PATHS['compsci']
self.model = AspireConSenContextual(model_path)
self.model.eval()
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
def encode(self, input_data):
# preprocess input
bert_batch, abs_lens, sent_token_idxs, ner_token_idxs = self._preprocess_input(input_data)
# pass through model to get representations for sentences and entities in each paper
with torch.no_grad():
batch_reps_sent, batch_reps_ners = self.model.forward(bert_batch=bert_batch,
abs_lens=abs_lens,
sent_tok_idxs=sent_token_idxs,
ner_tok_idxs=ner_token_idxs)
# concat sentence reps and entity reps for each paper
batch_reps = []
for abs_len, sent_rep, ner_rep in zip(abs_lens, batch_reps_sent, batch_reps_ners):
if len(ner_rep) > 0:
batch_reps.append(torch.concat([sent_rep[:abs_len]] + [n for n in ner_rep if len(n) > 0], dim=-2))
else:
batch_reps.append(sent_rep[:abs_len])
return batch_reps
def _preprocess_input(self, input_data):
# prepare abstracts the normal way
bert_batch, abs_lens, sent_token_idxs = prepare_abstracts(batch_abs=input_data,
pt_lm_tokenizer=self.tokenizer)
# get token idxs of ners
ner_token_idxs = self._get_ner_token_idxs(input_data, sent_token_idxs)
return bert_batch, abs_lens, sent_token_idxs, ner_token_idxs
def _get_ner_token_idxs(self, input_data: Dict, sent_token_idxs: List):
"""
Finds the token_idx corresponding to each entity in the data,
by tokenizing the entity and searching for a sub-range in the abstract that matches.
Entities were originally extracted using a different tokenizer, which means some entities
cannot be properly fitted to the sent_token_idxs passed to the model, so they cannot be used.
Additionally, some entities appear only after the sentence has been truncates; They, two, cannot be used.
:param input_data: paper data
:param sent_token_idxs: all sentence token idx
:return: sent_token_idxs for each NER entity in the paper abstract
"""
ner_token_idxs = []
for sample, sample_sent_idxs in zip(input_data, sent_token_idxs):
sentences = sample['ABSTRACT']
sentence_ners = sample['ENTITIES']
sample_ner_token_idxs = []
for ners, sentence, token_idxs in zip(sentence_ners, sentences, sample_sent_idxs):
tokens = self.tokenizer.tokenize(sentence)
for ner in ners:
# find the tokens in the sentence that correspond to this entity
ner_range = self.find_sublist_range(tokens, self.tokenizer.tokenize(ner))
if ner_range is not None and len(ner_range) > 0:
# get all idxs that happen before hitting the max number of tokens
ner_idxs = [token_idxs[ner_i] for ner_i in ner_range if ner_i < len(token_idxs)]
# take only ners that are completely inside the tokenization
if len(ner_range) == len(ner_idxs):
sample_ner_token_idxs.append(ner_idxs)
else:
sample_ner_token_idxs.append([])
else:
sample_ner_token_idxs.append([])
ner_token_idxs.append(sample_ner_token_idxs)
return ner_token_idxs
@staticmethod
def find_sublist_range(suplist: List, sublist: List):
"""
:return: The range of a mini-list appearing inside a bigger list
"""
for i in range(len(suplist)):
subrange = []
j = 0
while (i + j) < len(suplist) and j < len(sublist) and suplist[i + j] == sublist[j]:
subrange.append(i + j)
j += 1
if j == len(sublist):
return subrange
return None
def get_similarity(self, x: Union[Tensor, np.ndarray], y: Union[Tensor, np.ndarray]):
# uses ot_distance
dist_func = AllPairMaskedWasserstein({})
rep_len_tup = namedtuple('RepLen', ['embed', 'abs_lens'])
xt = rep_len_tup(embed=x[None, :].permute(0, 2, 1), abs_lens=[len(x)])
yt = rep_len_tup(embed=y[None, :].permute(0, 2, 1), abs_lens=[len(y)])
ot_dist = dist_func.compute_distance(query=xt, cand=yt).item()
return -ot_dist
def get_faceted_encoding(self, unfaceted_encoding: Union[Tensor, np.ndarray], facet: str, input_data: Dict):
"""
Must rewrite the filtering of facets, because as explain in _get_ner_token_idxs,
some entities cannot be encoded do to tokenizer mismatch or sentence truncaction.
We filter them out, then carry out the normal script to get a faceted encoding
"""
# get ner_token_idx, the see which entities are valid (& have encoding)
_, _, _, ner_token_idxs = self._preprocess_input([input_data])
ner_token_idxs = ner_token_idxs[0]
is_valid_entity = [len(x) > 0 for x in ner_token_idxs]
# filter the entity list with only valid entities
filtered_entity_list = []
entity_id = 0
for sent_ners in input_data['ENTITIES']:
filtered_sent_ners = []
for entity in sent_ners:
if is_valid_entity[entity_id]:
filtered_sent_ners.append(entity)
entity_id += 1
filtered_entity_list.append(filtered_sent_ners)
filtered_input_data = {**{k: v for k, v in input_data.items() if k != 'ENTITIES'},
**{'ENTITIES': filtered_entity_list}}
# call super method with this updates input
return super(AspireContextNER, self).get_faceted_encoding(unfaceted_encoding, facet, filtered_input_data)
def get_model(model_name, trained_model_path=None) -> SimilarityModel:
"""
Factory method for SimilarityModel used in evaluation
:param model_name: name of model to create
:param trained_model_path: If a trained model, supply path to the training
:return: SimilarityModel
"""
if model_name in {'aspire_compsci', 'aspire_biomed'}:
return AspireModel(name=model_name, encoding_type='sentence')
elif model_name == 'specter':
return BertMLM(name=model_name, encoding_type='abstract')
elif model_name in {'supsimcse', 'unsupsimcse'}:
return SimCSE(name=model_name, encoding_type='abstract')
elif model_name == 'specter_ner':
return BertNER(name=model_name, encoding_type='abstract')
elif model_name in {'sbtinybertsota', 'sbrobertanli', 'sbmpnet1B'}:
return SentenceModel(name=model_name, encoding_type='sentence')
elif model_name in {'aspire_ner_compsci', 'aspire_ner_biomed'}:
return AspireNER(name=model_name, encoding_type='sentence-entity')
elif model_name in {'aspire_context_ner_compsci', 'aspire_context_ner_biomed'}:
return AspireContextNER(name=model_name, encoding_type='sentence-entity')
elif model_name == 'cospecter':
return TrainedAbstractModel(name=model_name,
trained_model_path=trained_model_path,
encoding_type='abstract')
elif model_name in {'cosentbert', 'ictsentbert'}:
return TrainedSentModel(name=model_name,
trained_model_path=trained_model_path,
encoding_type='sentence')
else:
raise NotImplementedError(f"No Implementation for model {model_name}") | aspire-main | src/evaluation/utils/models.py |
import os
import codecs
import json
import pandas as pd
from typing import Dict, Union
class EvalDataset:
"""
Class for datasets used in evaluation
"""
def __init__(self, name: str, root_path: str):
"""
:param name: Name of dataset
:param root_path: Path where dataset files sit (e.g. abstracts-{name}}.json)
"""
self.name = name
self.root_path = root_path
self.dataset = self._load_dataset(fname=os.path.join(root_path, f'abstracts-{self.name}.jsonl'))
# load entity data, if exists
self.ner_data = self._load_ners()
@staticmethod
def _load_dataset(fname: str) -> Dict:
"""
:param fname: File with dataset's papers.
:return: dictionary of {pid: paper_info},
with paper_info being a dict with keys ABSTRACT and TITLE.
If data is CSFcube, also includes FACETS.
If NER extraction was performed, also includes ENTITIES.
"""
dataset = dict()
with codecs.open(fname, 'r', 'utf-8') as f:
for jsonline in f:
data = json.loads(jsonline.strip())
pid = data['paper_id']
ret_dict = {
'TITLE': data['title'],
'ABSTRACT': data['abstract'],
}
if 'pred_labels' in data:
ret_dict['FACETS'] = data['pred_labels']
dataset[pid] = ret_dict
return dataset
def _load_ners(self) -> Union[None, Dict]:
"""
Attempts to load dictionary with NER information on papers, as dictionary.
If not found, returns None.
"""
fname = os.path.join(self.root_path, f'{self.name}-ner.jsonl')
if os.path.exists(fname):
with codecs.open(fname, 'r', 'utf-8') as ner_f:
return json.load(ner_f)
else:
return None
def get(self, pid: str) -> Dict:
"""
:param pid: paper id
:return: relevant information for the paper: title, abstract, and if available also facets and entities.
"""
data = self.dataset[pid]
if self.ner_data is not None:
return {**data, **{'ENTITIES': self.ner_data[pid]}}
else:
return data
def get_test_pool(self, facet=None):
"""
Load the test pool of queries and cadidates.
If performing faceted search, the test pool depends on the facet.
:param facet: If cfscube, one of (result, method, background). Else, None.
:return: test pool
"""
if facet is not None:
fname = os.path.join(self.root_path, f"test-pid2anns-{self.name}-{facet}.json")
else:
fname = os.path.join(self.root_path, f"test-pid2anns-{self.name}.json")
with codecs.open(fname, 'r', 'utf-8') as fp:
test_pool = json.load(fp)
return test_pool
def get_gold_test_data(self, facet=None):
"""
Load the relevancies gold data for the dataset.
:param facet: If cfscube, one of (result, method, background). Else, None.
:return: gold data
"""
# format is {query_id: {candidate_id: relevance_score}}
gold_fname = f'test-pid2anns-{self.name}-{facet}.json' if facet is not None else f'test-pid2anns-{self.name}.json'
with codecs.open(os.path.join(self.root_path, gold_fname), 'r', 'utf-8') as fp:
gold_test_data = {k: dict(zip(v['cands'], v['relevance_adju'])) for k, v in json.load(fp).items()}
return gold_test_data
def get_query_metadata(self):
"""
Load file with metadata on queries in test pool.
:return:
"""
metadata_fname = os.path.join(self.root_path, f'{self.name}-queries-release.csv')
query_metadata = pd.read_csv(metadata_fname, index_col='pid')
query_metadata.index = query_metadata.index.astype(str)
return query_metadata
def get_test_dev_split(self):
"""
Load file that determines dev/test split for dataset.
:return:
"""
if self.name == 'csfcube':
# entire dataset is test set
return None
else:
with codecs.open(os.path.join(self.root_path, f'{self.name}-evaluation_splits.json'), 'r', 'utf-8') as fp:
return json.load(fp)
def get_threshold_grade(self):
"""
Determines threshold grade of relevancy score.
Relevancies are scores in range of 0 to 3. If a score is at least as high as this threshold,
A candidate paper is said to be relevant to the query paper.
:return:
"""
return 1 if self.name in {'treccovid', 'scidcite', 'scidcocite', 'scidcoread', 'scidcoview'} else 2
def __iter__(self):
return self.dataset.items()
| aspire-main | src/evaluation/utils/datasets.py |
aspire-main | src/evaluation/utils/__init__.py |
|
import codecs
import os
import json
from src.evaluation.utils.datasets import EvalDataset
from data_utils import create_dir
from typing import Dict
FACETS = ('background', 'method', 'result')
def batchify(dataset: Dict, batch_size: int):
"""
Splits dataset into batch size groups
:param dataset: dict of {pid: paper_info}
:param batch_size: batch size
:return: Iterator which returns examples in batches of batch_size
"""
pids = []
batch = []
for pid, data in dataset.items():
pids.append(pid)
batch.append(data)
if len(batch) == batch_size:
yield pids, batch
pids = []
batch = []
if len(batch) > 0:
yield pids, batch
def get_scores_filepath(root_path, model_name, run_name, dataset, facet):
if facet is None:
filename = f'test-pid2pool-{dataset}-{model_name}-ranked.json'
else:
filename = f'test-pid2pool-{dataset}-{model_name}-{facet}-ranked.json'
scores_dir = os.path.join(root_path, model_name)
if run_name is not None:
scores_dir = os.path.join(scores_dir, run_name)
return os.path.join(scores_dir, filename)
def get_cache_dir(cache_basedir,
dataset_name,
model_name,
run_name=None):
# get path
cache_path = os.path.join(cache_basedir, dataset_name, model_name)
if run_name is not None:
cache_path = os.path.join(cache_path, run_name)
# create dir if path does not exist
if not os.path.exists(cache_path):
create_dir(cache_path)
return cache_path
def get_results_dir(results_basedir, dataset_name, model_name, run_name):
results_dir = os.path.join(results_basedir, dataset_name, model_name)
if run_name is not None:
results_dir = os.path.join(results_dir, run_name)
return results_dir
def get_scores_filename(results_dir, facet):
filename = 'scores.json' if facet is None else f'scores-{facet}.json'
return os.path.join(results_dir, filename)
def get_encodings_filename(results_dir):
return os.path.join(results_dir, 'encodings.h5')
def get_evaluations_filename(results_dir, facet, aggregated):
metrics_type = 'aggregated' if aggregated else 'query'
filename = f'{metrics_type}-evaluations.csv' if facet is None else f'{metrics_type}-evaluations-{facet}.csv'
return os.path.join(results_dir, filename)
def load_score_results(results_dir, dataset: EvalDataset, facet):
# get gold data relevances for facet
gold_test_data = dataset.get_gold_test_data(facet)
# get model similarity scores for facet
with codecs.open(get_scores_filename(results_dir, facet), 'r', 'utf-8') as fp:
model_scores = json.load(fp)
results = {}
for query_id, candidate_scores in model_scores.items():
# get relevancy of each candidate, sorted by the model's similarity score to the query
sorted_candidate_ids = [x[0] for x in candidate_scores]
sorted_relevancies = [gold_test_data[query_id][pid] for pid in sorted_candidate_ids]
results[query_id] = sorted_relevancies
return results | aspire-main | src/evaluation/utils/utils.py |
from setuptools import find_packages, setup
def parse_requirements_file(path):
requirements = []
with open(path) as requirements_file:
import re
def fix_url_dependencies(req: str) -> str:
"""Pip and setuptools disagree about how URL dependencies should be handled."""
m = re.match(
r"^(git\+)?(https|ssh)://(git@)?github\.com/([\w-]+)/(?P<name>[\w-]+)\.git",
req,
)
if m is None:
return req
elif m.group("name") == "tango":
# There is no way to specify extras on the pip command line when doing `pip install <url>`, so
# there is no way to set up an equivalency between the `pip install` syntax and the `setup.py`
# syntax. So we just hard-code it here in the case of tango.
return f"ai2-tango[all] @ {req}"
elif m.group("name") == "lm-evaluation-harness":
return f"lm-eval @ {req}"
elif m.group("name") == "promptsource":
return f"promptsource @ {req}"
else:
return f"{m.group('name')} @ {req}"
for line in requirements_file:
line = line.strip()
if line.startswith("#") or len(line) <= 0:
continue
req, *comment = line.split("#")
req = fix_url_dependencies(req.strip())
requirements.append(req)
return requirements
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import `cached_path` whilst setting up.
VERSION = {} # type: ignore
with open("catwalk/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="ai2-catwalk",
version=VERSION["VERSION"],
description="A library for evaluating language models.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="",
url="https://github.com/allenai/catwalk",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(
exclude=[
"*.tests",
"*.tests.*",
"tests.*",
"tests",
"test_fixtures",
"test_fixtures.*",
],
),
package_data={
"catwalk": [
"py.typed",
"dependencies/promptsource/templates/*/*.yaml",
"dependencies/promptsource/templates/*/*/*.yaml"
]
},
install_requires=parse_requirements_file("requirements.txt"),
extras_require={"dev": parse_requirements_file("dev-requirements.txt")},
python_requires=">=3.8.0",
)
| catwalk-main | setup.py |
import argparse
from tango import Workspace
from tango.common.logging import initialize_logging
from catwalk.steps import PredictStep, CalculateMetricsStep
from catwalk.tasks import TASK_SETS
SHOTS = [0, 1, 2, 4, 8, 16, 32]
DEFAULT_TASKS = {
"arc_challenge",
"arc_easy",
"boolq",
"copa",
#"headqa_en", # Headqa is broken as of 2022-05-05
"hellaswag",
"lambada",
"logiqa",
"mathqa",
"mc_taco",
"mrpc",
"eai::multirc",
"openbookqa",
"piqa",
"pubmedqa",
"qnli",
"qqp",
"race",
"rte",
"sciq",
"sst",
"triviaqa",
"webqs",
"wic",
"winogrande",
"wnli",
"wsc",
}
def main():
initialize_logging()
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default="eai::gpt2")
parser.add_argument('--task', type=str, nargs="+", default=DEFAULT_TASKS)
parser.add_argument('--split', type=str)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--metric', type=str, nargs="+", default=['acc', 'f1'])
parser.add_argument('--limit', type=int)
parser.add_argument(
'-d', '-w',
type=str,
default=None,
metavar="workspace",
dest="workspace",
help="the Tango workspace with the cache")
args = parser.parse_args()
if args.workspace is None:
workspace = None
else:
workspace = Workspace.from_url(args.workspace)
limit = args.limit if hasattr(args, "limit") else None
tasks = set()
for task in args.task:
try:
tasks |= TASK_SETS[task]
except KeyError:
tasks.add(task)
results = {}
for task in tasks:
for num_shots in SHOTS:
predictions = PredictStep(
model=args.model,
task=task,
batch_size=args.batch_size,
limit=limit,
num_shots=num_shots
)
metrics = CalculateMetricsStep(
model=args.model,
task=task,
predictions=predictions)
result = metrics.result(workspace)
for metric_name in args.metric:
metric_value = result.get(metric_name)
if metric_value is not None:
break
results[(task, num_shots)] = metric_value
for key, value in results.items():
task, num_shots = key
print(f"{task}\t{num_shots}\t{value}")
if __name__ == "__main__":
main()
| catwalk-main | experiments/num_shots.py |
import argparse
from tango import Workspace
from tango.common.logging import initialize_logging
def main():
initialize_logging(log_level="WARNING")
parser = argparse.ArgumentParser()
parser.add_argument('--run', '-r', type=str, required=True)
parser.add_argument('--step', '-s', type=str, default="tabulate")
parser.add_argument(
'-d', '-w',
type=str,
default="beaker://ai2/task-complexity",
metavar="workspace",
dest="workspace",
help="the Tango workspace with the cache")
args = parser.parse_args()
workspace = Workspace.from_url(args.workspace)
r = workspace.step_result_for_run(args.run, args.step)
print("\n".join(r))
if __name__ == "__main__":
main() | catwalk-main | experiments/everything/print_results.py |
import pytest
def suite_A(test_method):
return pytest.mark.suite_A(test_method)
def suite_B(test_method):
return pytest.mark.suite_B(test_method)
def suite_C(test_method):
return pytest.mark.suite_C(test_method)
def suite_D(test_method):
return pytest.mark.suite_D(test_method)
| catwalk-main | tests/util.py |
catwalk-main | tests/__init__.py |
|
import pytest
from catwalk import MODELS
from catwalk.steps import PredictStep, CalculateMetricsStep
from .util import suite_A
task_names = [
"arc_challenge",
"boolq",
"copa",
"headqa_en",
"hellaswag",
"lambada",
"mc_taco",
"mrpc",
"eai::multirc",
"openbookqa",
"qnli",
"qqp",
"rte",
"webqs",
"wic",
"wsc",
]
model_names = [
"eai::tiny-gpt2",
"eai::t5-very-small-random"
]
params = [(t, m) for t in task_names for m in model_names]
generation_task_names = [
"squad2",
"drop",
]
generation_model_names = [
"eai::tiny-gpt2"
]
generation_params = [(t, m) for t in generation_task_names for m in generation_model_names]
params = params + generation_params
@pytest.mark.parametrize("task_name,model_name", params)
@suite_A
def test_task_eval(task_name: str, model_name: str):
if MODELS[model_name].supports_fewshot:
predict_kwargs = {"num_shots": 3}
else:
predict_kwargs = {}
predict_step = PredictStep(model=model_name, task=task_name, limit=10, **predict_kwargs)
metrics_step = CalculateMetricsStep(model=model_name, task=task_name, predictions=predict_step)
result = metrics_step.result()
assert result is not None
| catwalk-main | tests/test_steps.py |
import inspect
from typing import Any, Dict, cast
import pytest
import catwalk.models
import catwalk.tasks
from catwalk.task import InstanceFormat
from catwalk.tasks.huggingface import HFMCInstance
from .util import suite_B, suite_C
# These are tasks are known to fail for now due to an unreachable server.
known_failures = {
"lambada_mt_en",
"lambada_mt_fr",
"lambada_mt_de",
"lambada_mt_it",
"lambada_mt_es",
"triviaqa",
}
# There are too many P3 tasks, so we just pick one.
# MRQA dataset takes too long to load, so we skip it.
task_names = [
pytest.param(
task,
id=task,
marks=pytest.mark.xfail if task in known_failures else (),
)
for task in catwalk.tasks.TASKS.keys()
if not task.startswith("p3::") and not task.startswith("mrqa::")
]
task_names.insert(0, "p3::wiki_qa_Is_This_True_")
@pytest.mark.parametrize("task_name", task_names)
@suite_B
def test_task(task_name: str):
task = catwalk.tasks.TASKS[task_name]
instances = next(
(
task.get_split(split)
for split in ["train", "validation", "test"]
if task.has_split(split)
),
None,
)
if not instances:
return
for conversion in task.instance_conversions.values():
signature = inspect.signature(conversion)
for instance in instances[:10]:
kwargs: Dict[str, Any] = {}
if "num_fewshot" in signature.parameters:
kwargs["num_fewshot"] = 0
try:
if "fewshot_instances" in signature.parameters:
kwargs["fewshot_instances"] = task.get_fewshot_instances(
2, exceptions=instance
)
except ValueError: # This task doesn't support fewshot for the chosen split.
kwargs = {}
assert conversion(instance, **kwargs) is not None
mc_tasks = [
task_name
for task_name, task in catwalk.tasks.TASKS.items()
if task.has_instance_conversion(InstanceFormat.HF_MC)
]
@pytest.mark.parametrize("task_name", mc_tasks)
@suite_C
def test_mc_tasks(task_name):
task = catwalk.tasks.TASKS[task_name]
for split in ["train", "validation", "test"]:
if not task.has_split(split):
continue
for instance in task.get_split(split):
mc_instance = cast(
HFMCInstance, task.convert_instance(instance, InstanceFormat.HF_MC)
)
if mc_instance.correct_answer_index is not None:
assert mc_instance.correct_answer_index >= 0
assert mc_instance.correct_answer_index < len(
mc_instance.answer_choices
)
| catwalk-main | tests/test_all_tasks.py |
import torch
from transformers import AdamW
from catwalk import MODELS, TASKS
from catwalk.steps import CalculateMetricsStep, FinetuneStep, PredictStep
from .util import suite_D
@suite_D
def test_training():
model = MODELS["rc::tiny-gpt2"]
task = TASKS["piqa"]
instances = task.get_split("train")[:16]
predictions_before = list(model.predict(task, instances))
metrics_before = model.calculate_metrics(task, predictions_before)
model = model.trainable_copy()
batch = model.collate_for_training([(task, instance) for instance in instances])
# The smallest training loop in the world.
optimizer = AdamW(model.parameters())
first_loss = None
loss = None
for _ in range(100):
optimizer.zero_grad()
loss = model.forward(**batch)["loss"]
loss.backward()
loss = float(loss)
if first_loss is None:
first_loss = loss
optimizer.step()
assert first_loss > loss
predictions_after = list(model.predict(task, instances))
metrics_after = model.calculate_metrics(task, list(predictions_after))
for prediction_before, prediction_after in zip(
predictions_before, predictions_after
):
assert not torch.allclose(
prediction_before["acc"][0], prediction_after["acc"][0]
)
assert metrics_before != metrics_after
@suite_D
def test_training_step_gpt():
finetune_step = FinetuneStep(
model="rc::tiny-gpt2",
tasks=["piqa", "sst"],
train_steps=10,
validation_steps=10,
)
predict_step = PredictStep(model=finetune_step, task="piqa", limit=10)
metrics_step = CalculateMetricsStep(
model=finetune_step, task="piqa", predictions=predict_step
)
metrics_step.result()
@suite_D
def test_training_step_t5():
finetune_step = FinetuneStep(
model="rc::t5-very-small-random",
tasks=["rte", "boolq"],
train_steps=10,
validation_steps=10,
)
predict_step = PredictStep(model=finetune_step, task="rte", limit=10)
metrics_step = CalculateMetricsStep(
model=finetune_step, task="rte", predictions=predict_step
)
metrics_step.result()
@suite_D
def test_training_step_hf():
finetune_step = FinetuneStep(
model="tiny-bert",
tasks=["piqa"],
train_steps=10,
validation_steps=10,
)
predict_step = PredictStep(model=finetune_step, task="piqa", limit=10)
metrics_step = CalculateMetricsStep(
model=finetune_step, task="piqa", predictions=predict_step
)
metrics_step.result()
| catwalk-main | tests/test_training.py |
import pytest
import catwalk.__main__
from catwalk.steps import CalculateMetricsStep, PredictStep
from .util import suite_C
@suite_C
def test_squad():
args = catwalk.__main__._parser.parse_args(
[
"--model",
"bert-base-uncased",
"--task",
"squad",
"--split",
"validation",
"--limit",
"100",
]
)
catwalk.__main__.main(args)
@pytest.mark.parametrize("task", ["mnli", "cola", "rte", "eai::multirc"])
@suite_C
def test_gpt2_performance(task: str):
model = "rc::gpt2"
predictions = PredictStep(model=model, task=task, limit=100)
metrics = CalculateMetricsStep(model=model, task=task, predictions=predictions)
results = metrics.result()
assert results['relative_improvement'] > 0
def test_lambada_gpt():
model = "gpt2"
task = "lambada"
predictions = PredictStep(model=model, task=task, limit=10)
metrics = CalculateMetricsStep(model=model, task=task, predictions=predictions)
results = metrics.result()
assert results['acc'] >= 0.4
def test_perplexity_gpt():
model = "gpt2"
task = "wikitext"
predictions = PredictStep(model=model, task=task, limit=10)
metrics = CalculateMetricsStep(model=model, task=task, predictions=predictions)
results = metrics.result()
assert results['word_perplexity'] < 40
assert results['byte_perplexity'] < 2.5
assert results['bits_per_byte'] < 1.5
| catwalk-main | tests/test_spotchecks.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import os
import sys
from datetime import datetime
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../../"))
from catwalk.version import VERSION, VERSION_SHORT # noqa: E402
# -- Project information -----------------------------------------------------
project = "AI2 Catwalk"
copyright = f"{datetime.today().year}, Allen Institute for Artificial Intelligence"
author = "Allen Institute for Artificial Intelligence"
version = VERSION_SHORT
release = VERSION
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"myst_parser",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.doctest",
"sphinx_copybutton",
"sphinx_autodoc_typehints",
]
suppress_warnings = ["myst.header"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build"]
source_suffix = [".rst", ".md"]
# -- Extension configuration -------------------------------------------------
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"torch": ("https://pytorch.org/docs/stable", None),
"datasets": ("https://huggingface.co/docs/datasets/master/en", None),
"transformers": ("https://huggingface.co/docs/transformers/master/en", None),
}
# Tell myst-parser to assign header anchors for h1-h3.
myst_heading_anchors = 3
# By default, sort documented members by type within classes and modules.
autodoc_member_order = "groupwise"
python_use_unqualified_type_names = True
# Include default values when documenting parameter types.
typehints_defaults = "comma"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_title = f"ai2-catwalk v{VERSION}"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
html_favicon = "_static/favicon.ico"
html_theme_options = {
"light_css_variables": {
"color-announcement-background": "#1B4596",
"color-announcement-text": "#FFFFFF",
},
"dark_css_variables": {},
"footer_icons": [
{
"name": "GitHub",
"url": "https://github.com/allenai/catwalk",
"html": """
<svg stroke="currentColor" fill="currentColor" stroke-width="0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path>
</svg>
""", # noqa: E501
"class": "",
},
],
}
#
# The following is a workaround for a bug where Sphinx 5.3.0 tries to find a reference that isn't used anywhere.
#
def on_missing_reference(app, env, node, contnode):
if node['reftarget'] == 'metric kwargs':
return contnode
else:
return None
def setup(app):
app.connect('missing-reference', on_missing_reference) | catwalk-main | docs/source/conf.py |
from datetime import datetime
from pathlib import Path
from catwalk.version import VERSION
def main():
changelog = Path("CHANGELOG.md")
with changelog.open() as f:
lines = f.readlines()
insert_index: int
for i in range(len(lines)):
line = lines[i]
if line.startswith("## Unreleased"):
insert_index = i + 1
elif line.startswith(f"## [v{VERSION}]"):
print("CHANGELOG already up-to-date")
return
elif line.startswith("## [v"):
break
else:
raise RuntimeError("Couldn't find 'Unreleased' section")
lines.insert(insert_index, "\n")
lines.insert(
insert_index + 1,
f"## [v{VERSION}](https://github.com/allenai/catwalk/releases/tag/v{VERSION}) - "
f"{datetime.now().strftime('%Y-%m-%d')}\n",
)
with changelog.open("w") as f:
f.writelines(lines)
if __name__ == "__main__":
main()
| catwalk-main | scripts/prepare_changelog.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List
import packaging.version
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change_log_notes() -> str:
in_current_section = False
current_section_notes: List[str] = []
with open("CHANGELOG.md") as changelog:
for line in changelog:
if line.startswith("## "):
if line.startswith("## Unreleased"):
continue
if line.startswith(f"## [{TAG}]"):
in_current_section = True
continue
break
if in_current_section:
if line.startswith("### Added"):
line = ADDED_HEADER + "\n"
elif line.startswith("### Changed"):
line = CHANGED_HEADER + "\n"
elif line.startswith("### Fixed"):
line = FIXED_HEADER + "\n"
elif line.startswith("### Removed"):
line = REMOVED_HEADER + "\n"
current_section_notes.append(line)
assert current_section_notes
return "## What's new\n\n" + "".join(current_section_notes).strip() + "\n"
def get_commit_history() -> str:
new_version = packaging.version.parse(TAG)
# Get all tags sorted by version, latest first.
all_tags = os.popen("git tag -l --sort=-version:refname 'v*'").read().split("\n")
assert len(all_tags) > 0
# Out of `all_tags`, find the latest previous version so that we can collect all
# commits between that version and the new version we're about to publish.
# Note that we ignore pre-releases unless the new version is also a pre-release.
last_tag: str
for tag in all_tags:
if not tag.strip(): # could be blank line
continue
version = packaging.version.parse(tag)
if new_version.pre is None and version.pre is not None:
continue
if version < new_version:
last_tag = tag
break
commits = os.popen(f"git log {last_tag}..{TAG}^ --oneline --first-parent").read()
return "## Commits\n\n" + commits
def main():
print(get_change_log_notes())
print(get_commit_history())
if __name__ == "__main__":
main()
| catwalk-main | scripts/release_notes.py |
import math
from typing import (
Union,
Dict,
Any,
Optional,
Sequence,
Iterable,
List,
)
from collections import defaultdict
from random import Random
import tango
import transformers.optimization
from tango import Step, JsonFormat
from tango.common import Lazy, DatasetDict
from tango.common.exceptions import ConfigurationError
from tango.common.sequences import SqliteSparseSequence
from tango.format import SqliteSequenceFormat, TextFormat
from tango.integrations.torch import (
TorchFormat,
TorchTrainingEngine,
DataLoader, TrainingEngine, TrainConfig, StopEarlyCallback,
)
from tango.integrations.torch.model import Model as TangoModel
import torch
from catwalk.task import Task, WithAnswerOptionsMixin
from catwalk.tasks import TASKS
from catwalk.tasks import short_name_for_task_object
from catwalk.model import Model
from catwalk.models import MODELS
from catwalk.models import short_name_for_model_object
@Step.register("catwalk::predict")
class PredictStep(Step):
VERSION = "001"
SKIP_ID_ARGUMENTS = {"batch_size"}
FORMAT = SqliteSequenceFormat
def massage_kwargs(cls, kwargs: Dict[str, Any]) -> Dict[str, Any]:
if isinstance(kwargs["model"], str):
kwargs["model"] = MODELS[kwargs["model"]]
if isinstance(kwargs["task"], str):
kwargs["task"] = TASKS[kwargs["task"]]
if kwargs["split"] is None:
kwargs["split"] = kwargs["task"].default_split
return kwargs
def run(
self,
model: Union[str, Model],
task: Union[str, Task],
split: Optional[str] = None,
limit: Optional[int] = None,
random_subsample_seed: Optional[int] = None,
**kwargs
) -> Sequence[Any]:
if isinstance(model, str):
model = MODELS[model]
if isinstance(task, str):
task = TASKS[task]
if split is None:
split = task.default_split
results = SqliteSparseSequence(self.work_dir_for_run / "result.sqlite")
instances = task.get_split(split)
if limit is not None and len(instances) > limit:
instances = instances[:limit] if random_subsample_seed is None else Random(random_subsample_seed).sample(instances, limit)
instances = instances[len(results):]
for result in model.predict(task, instances, **kwargs):
results.append(result)
return results
@Step.register("catwalk::calculate_metrics")
class CalculateMetricsStep(Step):
VERSION = "001"
FORMAT = JsonFormat
def massage_kwargs(cls, kwargs: Dict[str, Any]) -> Dict[str, Any]:
if isinstance(kwargs["model"], str):
kwargs["model"] = MODELS[kwargs["model"]]
if isinstance(kwargs["task"], str):
kwargs["task"] = TASKS[kwargs["task"]]
return kwargs
def run(
self,
model: Union[str, Model],
task: Union[str, Task],
predictions: Sequence[Any]
) -> Dict[str, float]:
if isinstance(model, str):
model = MODELS[model]
if isinstance(task, str):
task = TASKS[task]
return model.calculate_metrics(task, predictions)
@Step.register("catwalk::finetune")
class FinetuneStep(Step):
VERSION = "002vmn"
FORMAT = TorchFormat
SKIP_ID_ARGUMENTS = {"wandb_entity", "wandb_project"}
SKIP_DEFAULT_ARGUMENTS = {
"early_stopping_patience": None
}
def massage_kwargs(cls, kwargs: Dict[str, Any]) -> Dict[str, Any]:
if isinstance(kwargs["model"], str):
kwargs["model"] = MODELS[kwargs["model"]]
kwargs["tasks"] = [TASKS[task] if isinstance(task, str) else task for task in kwargs["tasks"]]
return kwargs
def run(
self,
model: Union[str, Model],
tasks: List[Union[str, Task]],
train_epochs: Optional[int] = 25,
train_steps: Optional[int] = None,
validate_every: int = 100,
validation_steps: Optional[int] = None,
val_metric_name: str = "loss",
minimize_val_metric: bool = True,
training_engine: Lazy[TrainingEngine] = Lazy(
TorchTrainingEngine,
optimizer=Lazy(torch.optim.AdamW, lr=1e-5)
),
model_wrapper: Optional[Lazy[TangoModel]] = None,
random_seed: int = 42,
batch_size: int = 16,
grad_accum: int = 1,
device_count: int = 1,
distributed_port: int = 54761,
train_split: str = "train",
validation_split: Optional[str] = "validation",
wandb_entity: Optional[str] = None,
wandb_project: Optional[str] = None,
early_stopping_patience: Optional[int] = None
) -> Model: # type: ignore
if isinstance(model, str):
model = MODELS[model]
tasks_in_a_special_variable_because_mypy_is_insane = [
TASKS[task] if isinstance(task, str) else task for task in tasks
]
devices: List[int]
if torch.cuda.is_available() and torch.cuda.device_count() >= device_count:
devices = list(range(device_count))
self.logger.info("Training on %d GPU%s", device_count, "s" if device_count > 1 else "")
else:
devices = [-1] * device_count
self.logger.info(
"Training on CPU with %d worker%s", device_count, "s" if device_count > 1 else ""
)
if devices and len(devices) > 1:
is_distributed = True
num_workers = len(devices)
else:
is_distributed = False
num_workers = 1
if val_metric_name == "loss" and not minimize_val_metric:
raise ConfigurationError(
"This training run is configured to maximize the loss. "
"We don't think you want to do that."
)
train_config = TrainConfig(
self.unique_id,
self.work_dir,
step_name=self.name,
seed=random_seed,
train_steps=train_steps,
train_epochs=train_epochs,
val_metric_name=val_metric_name,
minimize_val_metric=minimize_val_metric,
train_split="train",
validation_split=None if validation_split is None else "validation",
validate_every=validate_every,
checkpoint_every=validate_every,
validation_steps=validation_steps,
grad_accum=grad_accum,
is_distributed=is_distributed,
world_size=num_workers,
distributed_port=distributed_port,
devices=devices
)
# construct dataset from the tasks
splits = {
"train": [
(task, i)
for task in tasks_in_a_special_variable_because_mypy_is_insane
for i in task.get_split(train_split)
],
}
if validation_split is not None:
splits["validation"] = [
(task, i)
for task in tasks_in_a_special_variable_because_mypy_is_insane
for i in task.get_split(validation_split)
]
dataset_dict = DatasetDict(splits=splits, metadata={})
trainable_model_kwargs: Dict[str, Any] = {}
for task in tasks_in_a_special_variable_because_mypy_is_insane:
if isinstance(task, WithAnswerOptionsMixin):
trainable_model_kwargs["num_classification_labels"] = max(
trainable_model_kwargs.get("num_classification_labels", 0),
len(task.answer_options))
trainable_model = model.trainable_copy(**trainable_model_kwargs)
data_loader = Lazy(
DataLoader,
collate_fn=trainable_model.collate_for_training,
batch_size=batch_size,
shuffle=True
)
if model_wrapper is None:
wrapped_model = trainable_model
else:
wrapped_model = Lazy(model_wrapper.construct, model=trainable_model)
callbacks = []
if wandb_entity is not None or wandb_project is not None:
if wandb_entity is None or wandb_project is None:
raise ConfigurationError("You have to set wandb_entity and wandp_project together.")
from tango.integrations.wandb.torch_train_callback import WandbTrainCallback
tags = [short_name_for_task_object(task) for task in tasks_in_a_special_variable_because_mypy_is_insane]
tags.append(short_name_for_model_object(model))
tags.append(f"seed={random_seed}")
callbacks.append(
Lazy(
WandbTrainCallback,
project=wandb_project,
entity=wandb_entity,
tags=[t for t in tags if t is not None]
)
)
if early_stopping_patience is not None:
callbacks.append(Lazy(StopEarlyCallback, patience=early_stopping_patience))
# Hack a default LR scheduler into the training engine
if train_steps is None:
if train_epochs is None:
raise ConfigurationError("You have to set either train_steps or train_epochs.")
train_steps = train_epochs * math.ceil(
len(splits["train"]) / (device_count * grad_accum * batch_size)
)
if (training_engine._constructor == TorchTrainingEngine) or (
training_engine._constructor == TrainingEngine and training_engine._params.get("type") == "torch"
):
if "lr_scheduler" not in training_engine._constructor_extras:
training_engine._constructor_extras["lr_scheduler"] = Lazy(
transformers.optimization.get_linear_schedule_with_warmup,
num_warmup_steps=200,
num_training_steps=train_steps
)
if is_distributed:
import torch.multiprocessing as mp
from tango.common.util import get_extra_imported_modules
mp.spawn(
tango.integrations.torch.train._train,
args=(
self.workspace,
train_config,
wrapped_model,
training_engine,
dataset_dict,
data_loader,
None,
callbacks,
get_extra_imported_modules(),
),
nprocs=num_workers,
)
else:
tango.integrations.torch.train._train(
0,
self.workspace,
train_config,
wrapped_model,
training_engine,
dataset_dict,
data_loader,
callbacks=callbacks
)
# Load best checkpoint before returning model.
if train_config.final_weights_path.is_file():
self.logger.info(
f"Loading best weights from {str(train_config.final_weights_path.resolve())}"
)
state = torch.load(train_config.final_weights_path, map_location="cpu")
# We use `strict=False` because there might be missing keys due to weight tying.
trainable_model.load_state_dict(state, strict=False)
return trainable_model
@Step.register("catwalk::tabulate_metrics")
class TabulateMetricsStep(Step):
VERSION = "001"
FORMAT = TextFormat
def run(self, metrics: Dict[str, Dict[str, float]], format: str = "text") -> Iterable[str]:
flattend_metrics: Dict[str, Dict[str, float]] = defaultdict(dict)
for task_name, task_metrics in metrics.items():
for metric_name, metric_value in task_metrics.items():
# if metric_value is a dict, then it's a nested metric
if isinstance(metric_value, dict):
for nested_metric_name, nested_metric_value in metric_value.items():
flattend_metrics[task_name][f"{metric_name}.{nested_metric_name}"] = nested_metric_value.item() if isinstance(nested_metric_value, torch.Tensor) else nested_metric_value
else:
flattend_metrics[task_name][metric_name] = metric_value
if format == "text":
for task_name, task_metrics in flattend_metrics.items():
for metric_name, metric_value in task_metrics.items():
yield f"{task_name}\t{metric_name}\t{metric_value}"
elif format == "latex":
raise NotImplementedError()
else:
raise AttributeError("At the moment, only the 'text' format is supported.")
| catwalk-main | catwalk/steps.py |
from abc import ABC
from dataclasses import dataclass
from enum import Enum
from functools import partial
from random import Random
from typing import Dict, Any, Optional, Sequence, Union, List, Callable, Mapping, Tuple, Iterable
import torchmetrics
from mypy_extensions import KwArg
from tango.common import Registrable, det_hash
import catwalk.metrics
from catwalk.metrics.entropy import EntropyMetric
from catwalk.metrics.perplexity import PerplexityMetric
PERPLEXITY_METRICS = {
"word_perplexity": PerplexityMetric,
"byte_perplexity": PerplexityMetric,
"bits_per_byte": EntropyMetric,
}
QA_METRICS = {
"squad_metrics": torchmetrics.SQuAD,
}
try:
from functools import cache as memoize # type: ignore
except ImportError:
def memoize(user_function, /): # type: ignore
import functools
return functools.lru_cache(maxsize=None)(user_function)
@memoize
def mc_metrics(num_classes: int):
return {
"acc": catwalk.metrics.AccuracyMetric,
"relative_improvement": partial(catwalk.metrics.RelativeAccuracyImprovementMetric, num_classes=num_classes)
}
@memoize
def classification_metrics(num_classes: int):
return {
"acc": catwalk.metrics.AccuracyMetric,
"relative_improvement": partial(catwalk.metrics.RelativeAccuracyImprovementMetric, num_classes=num_classes)
}
ENTAILMENT_METRICS = classification_metrics(2)
BINARY_CLASSIFICATION_METRICS = classification_metrics(2)
class InstanceFormat(Enum):
HF_DICT = 1
HF_MC = 2
HF_QA = 8
HF_CLASSIFICATION = 10
ELEUTHER_DOC = 3
ELEUTHER_CONTEXT = 4
ELEUTHER_REQUESTS = 5
T5_PROMPT = 6
RANK_CLASSIFICATION = 7
PROMPTSOURCE = 9
@dataclass
class RankClassificationInstance:
choices: List[Tuple[str, str]]
correct_choice: Optional[int]
InstanceConversion = Union[Callable[[Dict[str, Any]], Any], Callable[[Dict[str, Any], KwArg()], Any]]
class Task(Registrable, ABC):
"""
Base class for tasks in Catwalk
"""
def __init__(self, *, version_override: Optional[str] = None):
if version_override is not None:
self.VERSION = version_override
self.metrics: Dict[str, Callable[[], torchmetrics.Metric]] = {}
self.instance_conversions: Dict[InstanceFormat, InstanceConversion] = {}
def det_hash_object(self) -> Any:
# It sucks that we need this. We have to fix this in Tango.
if hasattr(self, "__getstate__"):
return self.__getstate__()
else:
return self.__dict__
def has_split(self, split: str) -> bool:
"""Returns ``True`` if a split with the given name exists. ``False`` otherwise."""
raise NotImplementedError
def get_split(self, split: str) -> Sequence[Dict[str, Any]]:
"""Returns the split with the given name."""
raise NotImplementedError
@property
def default_split(self) -> str:
"""Returns the name of the default split to run evaluations on."""
return "test"
@property
def fewshot_instances_split(self) -> str:
"""Returns the name of the split to use to find few-shot instances in."""
for split_name in ["train", "training", "validation"]:
if self.has_split(split_name):
return split_name
raise ValueError("This task has no split to take fewshot instances from.")
def make_metrics(self) -> Dict[str, torchmetrics.Metric]:
return {
name: metric_fn()
for name, metric_fn in self.metrics.items()
}
def has_instance_conversion(self, format: InstanceFormat) -> bool:
return format in self.instance_conversions
def convert_instance(self, instance: Dict[str, Any], format: InstanceFormat, **kwargs):
return self.instance_conversions[format](instance, **kwargs)
def get_fewshot_instances(
self,
num_shots: int,
*,
exceptions: Union[None, Dict[str, Any], Iterable[Dict[str, Any]]] = None,
random_seed: int = 18830087
) -> Sequence[Dict[str, Any]]:
if num_shots <= 0:
return []
if exceptions is None:
exceptions = []
elif isinstance(exceptions, Dict):
exceptions = [exceptions]
exceptions = frozenset(det_hash(e) for e in exceptions)
r = Random(random_seed)
instances = self.get_split(self.fewshot_instances_split)
sampled_instances = [
instance
for instance in r.sample(instances, num_shots + len(exceptions))
if det_hash(instance) not in exceptions
]
return sampled_instances[:num_shots]
#
# builder-style methods
#
def add_metric(self, name: str, metric: Callable[[], torchmetrics.Metric]):
if isinstance(metric, torchmetrics.Metric):
# Users should not do this, but they will, so we try to handle it.
metric_object = metric.clone()
metric_object.reset()
self.metrics[name] = metric_object.clone
else:
self.metrics[name] = metric
return self
def add_metrics(self, metrics: Mapping[str, Callable[[], torchmetrics.Metric]]):
for name, metric_fn in metrics.items():
self.add_metric(name, metric_fn)
return self
def add_instance_conversion(self, format: InstanceFormat, conversion: InstanceConversion):
self.instance_conversions[format] = conversion
return self
class WithAnswerOptionsMixin:
def __init__(self, answer_options: Sequence[str]):
self.answer_options = answer_options
| catwalk-main | catwalk/task.py |
_MAJOR = "0"
_MINOR = "2"
# On main and in a nightly release the patch should be one ahead of the last
# released build.
_PATCH = "2"
# This is mainly for pre-releases which have the suffix "rc[0-9]+".
_SUFFIX = ""
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| catwalk-main | catwalk/version.py |
import logging
from dataclasses import dataclass, field
from typing import Optional, Dict, TypeVar, Type, Any
import torch
import transformers
from cached_path import cached_path
from tango.common import det_hash
logger = logging.getLogger(__name__)
@dataclass
class TransformerSpec:
cls: type
model_name: str
override_weights_file: Optional[str] = None
override_weights_strip_prefix: Optional[str] = None
load_weights: bool = True
kwargs: Dict[str, Any] = field(default_factory=dict)
def __hash__(self):
return hash((
f"{self.cls.__module__}.{self.cls.__name__}",
self.model_name,
self.override_weights_file,
self.override_weights_strip_prefix,
self.load_weights,
det_hash(self.kwargs)
))
_model_cache: Dict[TransformerSpec, transformers.PreTrainedModel] = {}
T = TypeVar('T')
def get(
cls: Type[T],
model_name: str,
make_copy: bool,
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None,
load_weights: bool = True,
**kwargs,
) -> T:
"""
Returns a transformer model from the cache.
# Parameters
cls : `type`
The type of model we are constructing.
model_name : `str`
The name of the transformer, for example `"bert-base-cased"`
make_copy : `bool`
If this is `True`, return a copy of the model instead of the cached model itself. If you want to modify the
parameters of the model, set this to `True`. If you want only part of the model, set this to `False`, but
make sure to `copy.deepcopy()` the bits you are keeping.
override_weights_file : `str`, optional (default = `None`)
If set, this specifies a file from which to load alternate weights that override the
weights from huggingface. The file is expected to contain a PyTorch `state_dict`, created
with `torch.save()`.
override_weights_strip_prefix : `str`, optional (default = `None`)
If set, strip the given prefix from the state dict when loading it.
load_weights : `bool`, optional (default = `True`)
If set to `False`, no weights will be loaded. This is helpful when you only
want to initialize the architecture, like when you've already fine-tuned a model
and are going to load the weights from a state dict elsewhere.
"""
global _model_cache
spec = TransformerSpec(
cls,
model_name,
override_weights_file,
override_weights_strip_prefix,
load_weights,
kwargs
)
transformer = _model_cache.get(spec, None)
if transformer is None:
if not load_weights:
config = transformers.AutoConfig.from_pretrained(model_name, **kwargs)
transformer = cls.from_config(config) # type: ignore
elif override_weights_file is not None:
override_weights_file = cached_path(override_weights_file)
override_weights = torch.load(override_weights_file)
if override_weights_strip_prefix is not None:
prefix = str(override_weights_strip_prefix) # mypy insanity
def strip_prefix(s: str) -> str:
if s.startswith(prefix):
return s[len(prefix) :]
else:
return s
valid_keys = {
k
for k in override_weights.keys()
if k.startswith(prefix)
}
if len(valid_keys) > 0:
logger.info(
"Loading %d tensors from %s", len(valid_keys), override_weights_file
)
else:
raise ValueError(
f"Specified prefix of '{prefix}' means no tensors "
f"will be loaded from {prefix}."
)
override_weights = {strip_prefix(k): override_weights[k] for k in valid_keys}
# load from config to avoid loading default weights
config = transformers.AutoConfig.from_pretrained(model_name, **kwargs)
transformer = cls.from_config(config) # type: ignore
# When DistributedDataParallel or DataParallel is used, the state dict of the
# DistributedDataParallel/DataParallel wrapper prepends "module." to all parameters
# of the actual model, since the actual model is stored within the module field.
# This accounts for if a pretained model was saved without removing the
# DistributedDataParallel/DataParallel wrapper.
if hasattr(transformer, "module"):
transformer.module.load_state_dict(override_weights)
else:
transformer.load_state_dict(override_weights)
else:
transformer = cls.from_pretrained( # type: ignore
model_name,
**kwargs,
)
_model_cache[spec] = transformer
if make_copy:
import copy
return copy.deepcopy(transformer)
else:
return transformer
@dataclass
class TokenizerSpec:
cls: type
model_name: str
kwargs: Dict[str, Any]
def __hash__(self):
return hash((
f"{self.cls.__module__}.{self.cls.__name__}",
self.model_name,
det_hash(self.kwargs),
))
_tokenizer_cache: Dict[TokenizerSpec, transformers.PreTrainedTokenizer] = {}
def get_tokenizer(cls: Type[T], model_name: str, **kwargs) -> T:
cache_key = TokenizerSpec(cls, model_name, kwargs)
global _tokenizer_cache
tokenizer = _tokenizer_cache.get(cache_key, None)
if tokenizer is None:
# Currenty GPT2's fast tokenizer does NOT support adding a BOS token.
# This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005. so that the fast tokenizer works correctly.
if model_name.startswith('facebook/opt'):
kwargs['use_fast'] = False
elif model_name.startswith('t5-'):
# Workaround for another huggingface tokenizer bug.
kwargs['model_max_length'] = int(1e30)
tokenizer = cls.from_pretrained( # type: ignore
model_name,
**kwargs,
)
_tokenizer_cache[cache_key] = tokenizer
return tokenizer
def _clear_caches():
"""
Clears in-memory transformer and tokenizer caches.
"""
global _model_cache
global _tokenizer_cache
_model_cache.clear()
_tokenizer_cache.clear()
| catwalk-main | catwalk/cached_transformers.py |
from catwalk.model import Model
from catwalk.models import MODELS
from catwalk.task import Task
from catwalk.tasks import TASKS
| catwalk-main | catwalk/__init__.py |
import inspect
from abc import ABC
from copy import deepcopy
from typing import Sequence, Dict, Any, Iterator, Tuple, List, Optional, Union
import torch
from tango.common import Registrable, Tqdm
from tango.common.det_hash import DetHashWithVersion
from catwalk.task import Task
Instance = Dict[str, Any]
def tensor_args(args: Tuple[Any]) -> Tuple[Any, ...]:
"""
Annoyingly, torchmetrics only supports tensors as input, not raw values. So we have to convert raw values
into tensors.
"""
fixed_args: List[Any] = []
for arg in args:
if isinstance(arg, (float, int)):
fixed_args.append(torch.tensor(arg))
else:
fixed_args.append(arg)
return tuple(fixed_args)
def unsqueeze_args(args: Tuple[Any]) -> Tuple[Any, ...]:
"""
Further, torchmetrics can't handle single-instance calls when given tensors. It always needs the first
dimension of the tensors to be the instance dimension. So we add one.
"""
fixed_args: List[Any] = []
for arg in args:
if isinstance(arg, torch.Tensor):
fixed_args.append(arg.unsqueeze(0))
else:
fixed_args.append(arg)
return tuple(fixed_args)
_TorchmetricsResult = Union[torch.Tensor, Dict[str, '_TorchmetricsResult']]
_CatwalkResult = Union[float, Dict[str, '_CatwalkResult']]
def recursive_tolist(args: _TorchmetricsResult) -> _CatwalkResult:
if isinstance(args, dict):
return { key: recursive_tolist(value) for key, value in args.items() }
else:
return args.tolist()
class Model(Registrable, DetHashWithVersion, ABC):
VERSION = "002lst"
def predict(self, task: Task, instances: Sequence[Dict[str, Any]], **kwargs) -> Iterator[Dict[str, Any]]:
raise NotImplementedError()
def calculate_metrics(self, task: Task, predictions: Sequence[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
metrics = task.make_metrics()
with Tqdm.tqdm(predictions, desc="Calculating metrics") as predictions_tqdm:
for prediction in predictions_tqdm:
for metric_name, metric_args in prediction.items():
try:
metric = metrics[metric_name]
except KeyError:
continue
metric_args = tensor_args(metric_args)
metric_args = unsqueeze_args(metric_args)
metric.update(*metric_args)
return {
metric_name: recursive_tolist(metric.compute())
for metric_name, metric in metrics.items()
}
@property
def supports_fewshot(self) -> bool:
return "num_shots" in inspect.signature(self.predict).parameters
def trainable_copy(self, **kwargs) -> "TrainableModel":
"""Returns a trainable version of this model.
Catwalk models by default are immutable. Trainable models are not, because they can be trained.
This is an optional method. Only implement it if you want to train your model through catwalk.
"""
raise NotImplementedError("This model does not support training.")
class TrainableModel(Model, torch.nn.Module, ABC):
"""
This is a catwalk model that also supports utility functions to make it possible to train.
"""
def __init__(self, inner_module: Optional[torch.nn.Module]):
super().__init__()
self.inner_module = inner_module
def forward(self, *args, **kwargs):
"""
This method takes the input created by the :meth:`collate()` method and returns a dictionary that contains
the loss under the key ``"loss"``.
"""
if self.inner_module is None:
raise NotImplementedError(
"If you want to be able to pass None as the inner_module to TrainableModule, "
"you need to override the forward() method."
)
return self.inner_module.forward(*args, **kwargs)
def collate_for_training(self, instances: Sequence[Tuple[Task, Instance]]) -> Any:
"""
Takes a batch of instances and turns them into inputs to the forward method (usually tensors).
Usually you would call this method from a PyTorch DataLoader. If you don't use PyTorch, you might have to
do something else.
:param instances: The instances to turn into tensors. Note that the input includes the task. Instances
could come from different tasks.
:return: Input suitable for the trainable model's ``forward()`` method.
"""
raise NotImplementedError
def trainable_copy(self, **kwargs) -> "TrainableModel":
return deepcopy(self)
class UnsupportedTaskError(Exception):
"""Thrown when the model doesn't support the task it's given."""
def __init__(self, model: Model, task: Task):
super().__init__(f"Model {model} does not support task {task}.")
self.model = model
self.task = task
| catwalk-main | catwalk/model.py |
import argparse
from tango import Workspace
from tango.common.logging import initialize_logging
from catwalk.steps import TabulateMetricsStep, FinetuneStep
from catwalk.tasks import TASK_SETS
def main():
initialize_logging()
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--task", type=str, nargs="+")
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--grad_acc", type=int, default=1)
parser.add_argument("--val_metric", type=str, default="acc")
parser.add_argument(
"-d",
"-w",
type=str,
default=None,
metavar="workspace",
dest="workspace",
help="the Tango workspace with the cache",
)
args = parser.parse_args()
if args.workspace is None:
workspace = None
else:
workspace = Workspace.from_url(args.workspace)
from catwalk.steps import CalculateMetricsStep
from catwalk.steps import PredictStep
tasks = set()
for task in args.task:
try:
tasks |= TASK_SETS[task]
except KeyError:
tasks.add(task)
model_step = FinetuneStep(
model=args.model,
tasks=tasks,
batch_size=args.batch_size,
grad_accum=args.grad_acc,
val_metric_name=args.val_metric
)
metric_task_dict = {}
for task in tasks:
predictions = PredictStep(model=model_step, task=task, batch_size=args.batch_size)
metrics = CalculateMetricsStep(
model=model_step, task=task, predictions=predictions
)
metric_task_dict[task] = metrics
table_step = TabulateMetricsStep(metrics=metric_task_dict)
table_step_result = table_step.result(workspace)
print("\n".join(table_step_result))
if __name__ == "__main__":
main()
| catwalk-main | catwalk/train.py |
import argparse
from tango import Workspace
from tango.common.logging import initialize_logging
from catwalk.steps import TabulateMetricsStep
from catwalk.tasks import TASK_SETS
_parser = argparse.ArgumentParser()
_parser.add_argument('--model', type=str, required=True)
_parser.add_argument('--task', type=str, nargs="+")
_parser.add_argument('--split', type=str)
_parser.add_argument('--batch_size', type=int, default=32)
_parser.add_argument('--num_shots', type=int)
_parser.add_argument('--fewshot_seed', type=int)
_parser.add_argument('--limit', type=int)
_parser.add_argument(
'-d', '-w',
type=str,
default=None,
metavar="workspace",
dest="workspace",
help="the Tango workspace with the cache")
def main(args: argparse.Namespace):
initialize_logging(log_level="WARNING")
if args.workspace is None:
workspace = None
else:
workspace = Workspace.from_url(args.workspace)
limit = args.limit if hasattr(args, "limit") else None
from catwalk.steps import CalculateMetricsStep
from catwalk.steps import PredictStep
tasks = set()
for task in args.task:
try:
tasks |= TASK_SETS[task]
except KeyError:
tasks.add(task)
kwargs = {}
if args.num_shots is not None:
kwargs["num_shots"] = args.num_shots
if args.fewshot_seed is not None:
kwargs["fewshot_seed"] = args.fewshot_seed
metric_task_dict = {}
for task in tasks:
predictions = PredictStep(
model=args.model,
task=task,
split=args.split,
batch_size=args.batch_size,
limit=limit,
**kwargs)
metrics = CalculateMetricsStep(
model=args.model,
task=task,
predictions=predictions)
metric_task_dict[task] = metrics
table_step = TabulateMetricsStep(metrics=metric_task_dict)
table_step_result = table_step.result(workspace)
print("\n".join(table_step_result))
if __name__ == "__main__":
main(_parser.parse_args())
| catwalk-main | catwalk/__main__.py |
from typing import Optional, cast, List
from tango.integrations.torch import TrainCallback
from catwalk import Task, Model
from catwalk.tasks import short_name_for_task_object
class CatwalkEvaluationCallback(TrainCallback):
def __init__(
self,
*args,
tasks: List[Task],
eval_limit: Optional[int],
eval_split: str = "validation",
**kwargs
):
super().__init__(*args, **kwargs)
self.tasks = tasks
self.eval_limit = eval_limit
self.eval_split = eval_split
def post_val_loop(
self, step: int, epoch: int, val_metric: float, best_val_metric: float
) -> None:
model_was_training = self.model.training
self.model.eval()
try:
catwalk_model = cast(Model, self.model)
for task in self.tasks:
instances = task.get_split(self.eval_split)
if self.eval_limit is not None:
instances = instances[:self.eval_limit]
predictions = catwalk_model.predict(task, instances)
metrics = catwalk_model.calculate_metrics(task, list(predictions))
metrics_string = []
for metric_name, metric_value in metrics.items():
try:
metric_value_string = ", ".join(f"{v:.3f}" for v in metric_value)
except TypeError as e:
if "object is not iterable" in str(e):
metric_value_string = f"{metric_value:.3f}"
else:
raise
metrics_string.append(f"{metric_name}: {metric_value_string}")
task_name = short_name_for_task_object(task) or str(task)
print(f"Metrics for {task_name}: {' '.join(metrics_string)}")
finally:
self.model.train(model_was_training) | catwalk-main | catwalk/training_callback.py |
from typing import Union, Optional, Dict, Any
import torch
from torchmetrics.aggregation import BaseAggregator
class PerplexityMetric(BaseAggregator):
def __init__(
self,
nan_strategy: Union[str, float] = "warn",
**kwargs: Dict[str, Any],
):
super().__init__("sum", [], nan_strategy, **kwargs)
self.add_state("loglikelihood", default=torch.tensor(0.0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("num_tokens", default=torch.tensor(0, dtype=torch.int), dist_reduce_fx="sum")
def update(
self,
loglikelihood: Union[float, torch.Tensor],
num_tokens: Union[int, torch.Tensor]
) -> None: # type: ignore
loglikelihood = self._cast_and_nan_check_input(loglikelihood)
if not isinstance(num_tokens, torch.Tensor):
num_tokens = torch.tensor(num_tokens)
self.loglikelihood += loglikelihood.sum()
self.num_tokens += num_tokens.sum()
def compute(self) -> torch.Tensor:
return torch.exp(-self.loglikelihood / self.num_tokens)
| catwalk-main | catwalk/metrics/perplexity.py |
from catwalk.metrics.entropy import EntropyMetric
from catwalk.metrics.perplexity import PerplexityMetric
from catwalk.metrics.accuracy import AccuracyMetric
from catwalk.metrics.accuracy import RelativeAccuracyImprovementMetric | catwalk-main | catwalk/metrics/__init__.py |
from typing import Union, List
import torch
from torchmetrics.aggregation import BaseAggregator
class AccuracyMetric(BaseAggregator):
"""
Unfortunately torchmetrics' multilabel accuracy makes you decide on the number of possible labels
beforehand. We need a metric that does not require this.
"""
def __iter__(self):
pass
def __init__(self):
super().__init__(fn="mean", default_value=torch.tensor(0.0, dtype=torch.float), nan_strategy="error")
self.add_state("correct_count", default=torch.tensor(0, dtype=torch.int), dist_reduce_fx="sum")
self.add_state("total_count", default=torch.tensor(0, dtype=torch.int), dist_reduce_fx="sum")
def update(self, logits: Union[List[float], torch.Tensor], label: Union[int, torch.Tensor]) -> None: # type: ignore
if isinstance(logits, List):
logits = torch.tensor(logits)
if logits.argmax() == label:
self.correct_count += 1
self.total_count += 1
def compute(self) -> torch.Tensor:
return torch.tensor(self.correct_count / self.total_count, dtype=torch.float)
class RelativeAccuracyImprovementMetric(AccuracyMetric):
def __init__(self, num_classes: int):
super().__init__()
self.baseline = 1 / num_classes
def compute(self) -> torch.Tensor:
return (super().compute() - self.baseline) / self.baseline
| catwalk-main | catwalk/metrics/accuracy.py |
import math
from typing import Union, Optional, Any, Dict
import torch
from torchmetrics.aggregation import BaseAggregator
class EntropyMetric(BaseAggregator):
def __init__(
self,
base: int = 2, # Does anyone ever use anything but 2 here?
nan_strategy: Union[str, float] = "warn",
**kwargs: Dict[str, Any],
):
super().__init__("sum", [], nan_strategy, **kwargs)
self.base = base
self.add_state("loglikelihood", default=torch.tensor(0.0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("characters", default=torch.tensor(0, dtype=torch.int), dist_reduce_fx="sum")
def update(
self,
loglikelihood: Union[float, torch.Tensor],
characters: Union[int, torch.Tensor]
) -> None: # type: ignore
loglikelihood = self._cast_and_nan_check_input(loglikelihood)
if not isinstance(characters, torch.Tensor):
characters = torch.tensor(characters)
self.loglikelihood += loglikelihood.sum()
self.characters += characters.sum()
def compute(self) -> torch.Tensor:
return -(self.loglikelihood / self.characters) / math.log(self.base)
| catwalk-main | catwalk/metrics/entropy.py |
from catwalk.tasks import HFDatasetsTask
from datasets import load_dataset
import functools
from typing import Optional
class MrqaTask(HFDatasetsTask):
TEST_DATASETS = {"race", "drop", "bioasq", "relationextraction", "textbookqa", "duorc.paraphraserc"}
DEV_DATASETS = {"newsqa", "searchqa", "triviaqa-web", "naturalquestionsshort", "hotpotqa"}
@functools.lru_cache
def has_split(self, split: str) -> bool:
if self.dataset_name in self.TEST_DATASETS:
return split == "test"
elif self.dataset_name in self.DEV_DATASETS:
return split == "validation"
return False
@functools.lru_cache
def dataset(self, split: str):
assert self.dataset_name is not None, "MRQA requires a dataset name as it contains multiple subsets"
assert_message = f"Specified task, {self.dataset_name}, is not in specified split, {split}." if split in {"validation", "test"} else f"No such split {split}."
assert self.has_split(split), assert_message
def filter_subset(example):
return example["subset"].lower() == self.dataset_name
loaded_dataset = load_dataset(self.dataset_path, split=split).filter(
filter_subset, load_from_cache_file=False) # caching results in wrong loading of cached dataset
# rename columns to match SQuAD format
loaded_dataset = loaded_dataset.rename_column("qid", "id")
loaded_dataset = loaded_dataset.remove_columns(["context_tokens", "question_tokens","answers"])
loaded_dataset = loaded_dataset.rename_column("detected_answers", "answers")
# preprocess answers to match format expected by SQuAD metric
def preprocess_answers(example):
example["answers"].update(
{'answer_start': [x["start"][0] for x in example["answers"]["char_spans"]]})
del example["answers"]["char_spans"]
del example["answers"]["token_spans"]
return example
loaded_dataset = loaded_dataset.map(preprocess_answers, load_from_cache_file=False)
return loaded_dataset
| catwalk-main | catwalk/tasks/mrqa.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.