python_code
stringlengths 0
869k
|
---|
from setuptools import setup, find_packages
setup(
name='coinrun',
packages=find_packages(),
version='0.0.1',
)
|
import numpy as np
from coinrun import setup_utils, make
def random_agent(num_envs=1, max_steps=100000):
setup_utils.setup_and_load(use_cmd_line_args=False)
env = make('standard', num_envs=num_envs)
for step in range(max_steps):
acts = np.array([env.action_space.sample() for _ in range(env.num_envs)])
_obs, rews, _dones, _infos = env.step(acts)
print("step", step, "rews", rews)
env.close()
if __name__ == '__main__':
random_agent() |
"""
Load an agent trained with train_agent.py and
"""
import time
import tensorflow as tf
import numpy as np
from coinrun import setup_utils
import coinrun.main_utils as utils
from coinrun.config import Config
from coinrun import policies, wrappers
mpi_print = utils.mpi_print
def create_act_model(sess, env, nenvs):
ob_space = env.observation_space
ac_space = env.action_space
policy = policies.get_policy()
act = policy(sess, ob_space, ac_space, nenvs, 1, reuse=False)
return act
def enjoy_env_sess(sess):
should_render = True
should_eval = Config.TRAIN_EVAL or Config.TEST_EVAL
rep_count = Config.REP
if should_eval:
env = utils.make_general_env(Config.NUM_EVAL)
should_render = False
else:
env = utils.make_general_env(1)
env = wrappers.add_final_wrappers(env)
if should_render:
from gym.envs.classic_control import rendering
nenvs = env.num_envs
agent = create_act_model(sess, env, nenvs)
sess.run(tf.global_variables_initializer())
loaded_params = utils.load_params_for_scope(sess, 'model')
if not loaded_params:
print('NO SAVED PARAMS LOADED')
obs = env.reset()
t_step = 0
if should_render:
viewer = rendering.SimpleImageViewer()
should_render_obs = not Config.IS_HIGH_RES
def maybe_render(info=None):
if should_render and not should_render_obs:
env.render()
maybe_render()
scores = np.array([0] * nenvs)
score_counts = np.array([0] * nenvs)
curr_rews = np.zeros((nenvs, 3))
def should_continue():
if should_eval:
return np.sum(score_counts) < rep_count * nenvs
return True
state = agent.initial_state
done = np.zeros(nenvs)
while should_continue():
action, values, state, _ = agent.step(obs, state, done)
obs, rew, done, info = env.step(action)
if should_render and should_render_obs:
if np.shape(obs)[-1] % 3 == 0:
ob_frame = obs[0,:,:,-3:]
else:
ob_frame = obs[0,:,:,-1]
ob_frame = np.stack([ob_frame] * 3, axis=2)
viewer.imshow(ob_frame)
curr_rews[:,0] += rew
for i, d in enumerate(done):
if d:
if score_counts[i] < rep_count:
score_counts[i] += 1
if 'episode' in info[i]:
scores[i] += info[i].get('episode')['r']
if t_step % 100 == 0:
mpi_print('t', t_step, values[0], done[0], rew[0], curr_rews[0], np.shape(obs))
maybe_render(info[0])
t_step += 1
if should_render:
time.sleep(.02)
if done[0]:
if should_render:
mpi_print('ep_rew', curr_rews)
curr_rews[:] = 0
result = 0
if should_eval:
mean_score = np.mean(scores) / rep_count
max_idx = np.argmax(scores)
mpi_print('scores', scores / rep_count)
print('mean_score', mean_score)
mpi_print('max idx', max_idx)
mpi_mean_score = utils.mpi_average([mean_score])
mpi_print('mpi_mean', mpi_mean_score)
result = mean_score
return result
def main():
utils.setup_mpi_gpus()
setup_utils.setup_and_load()
with tf.Session() as sess:
enjoy_env_sess(sess)
if __name__ == '__main__':
main() |
"""
Train an agent using a PPO2 based on OpenAI Baselines.
"""
import time
from mpi4py import MPI
import tensorflow as tf
from baselines.common import set_global_seeds
import coinrun.main_utils as utils
from coinrun import setup_utils, policies, wrappers, ppo2
from coinrun.config import Config
def main():
args = setup_utils.setup_and_load()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
seed = int(time.time()) % 10000
set_global_seeds(seed * 100 + rank)
utils.setup_mpi_gpus()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
nenvs = Config.NUM_ENVS
total_timesteps = int(256e6)
save_interval = args.save_interval
env = utils.make_general_env(nenvs, seed=rank)
with tf.Session(config=config):
env = wrappers.add_final_wrappers(env)
policy = policies.get_policy()
ppo2.learn(policy=policy,
env=env,
save_interval=save_interval,
nsteps=Config.NUM_STEPS,
nminibatches=Config.NUM_MINIBATCHES,
lam=0.95,
gamma=Config.GAMMA,
noptepochs=Config.PPO_EPOCHS,
log_interval=1,
ent_coef=Config.ENTROPY_COEFF,
lr=lambda f : f * Config.LEARNING_RATE,
cliprange=lambda f : f * 0.2,
total_timesteps=total_timesteps)
if __name__ == '__main__':
main()
|
from mpi4py import MPI
import argparse
import os
class ConfigSingle(object):
"""
A global config object that can be initialized from command line arguments or
keyword arguments.
"""
def __init__(self):
self.WORKDIR = './saved_models/'
self.TB_DIR = '/tmp/tensorflow'
if not os.path.exists(self.WORKDIR):
os.makedirs(self.WORKDIR, exist_ok=True)
self.LOG_ALL_MPI = True
self.SYNC_FROM_ROOT = True
arg_keys = []
bool_keys = []
type_keys = []
# The runid, used to determine the name for save files.
type_keys.append(('runid', 'run_id', str, 'tmp'))
# The runid whose parameters and settings you want to load.
type_keys.append(('resid', 'restore_id', str, None))
# The game to be played.
# One of {'standard', 'platform', 'maze'} (for CoinRun, CoinRun-Platforms, Random-Mazes)
type_keys.append(('gamet', 'game_type', str, 'standard', True))
# The convolutional architecture to use
# One of {'nature', 'impala', 'impalalarge'}
type_keys.append(('arch', 'architecture', str, 'impala', True))
# Should the model include an LSTM
type_keys.append(('lstm', 'use_lstm', int, 0, True))
# The number of parallel environments to run
type_keys.append(('ne', 'num_envs', int, 32, True))
# The number of levels in the training set.
# If NUM_LEVELS = 0, the training set is unbounded. All level seeds will be randomly generated.
# Use SET_SEED = -1 and NUM_LEVELS = 500 to train with the same levels in the paper.
type_keys.append(('nlev', 'num_levels', int, 0, True))
# Provided as a seed for training set generation.
# If SET_SEED = -1, this seed is not used and level seeds with be drawn from the range [0, NUM_LEVELS).
# Use SET_SEED = -1 and NUM_LEVELS = 500 to train with the same levels in the paper.
# NOTE: This value must and will be saved, in order to use the same training set for evaluation and/or visualization.
type_keys.append(('set-seed', 'set_seed', int, -1, True))
# PPO Hyperparameters
type_keys.append(('ns', 'num_steps', int, 256))
type_keys.append(('nmb', 'num_minibatches', int, 8))
type_keys.append(('ppoeps', 'ppo_epochs', int, 3))
type_keys.append(('ent', 'entropy_coeff', float, .01))
type_keys.append(('lr', 'learning_rate', float, 5e-4))
type_keys.append(('gamma', 'gamma', float, 0.999))
# Should the agent's velocity be painted in the upper left corner of observations.
# 1/0 means True/False
# PAINT_VEL_INFO = -1 uses smart defaulting -- will default to 1 if GAME_TYPE is 'standard' (CoinRun), 0 otherwise
type_keys.append(('pvi', 'paint_vel_info', int, -1, True))
# Should batch normalization be used after each convolutional layer
# 1/0 means True/False
# This code only supports training-mode batch normalization (normalizing with statistics of the current batch).
# In practice, we found this is nearly as effective as tracking the moving average of the statistics.
# NOTE: Only applies to IMPALA and IMPALA-Large architectures
type_keys.append(('norm', 'use_batch_norm', int, 0, True))
# What dropout probability to use after each convolutional layer
# NOTE: Only applies to IMPALA and IMPALA-Large architectures
type_keys.append(('dropout', 'dropout', float, 0.0, True))
# Should data augmentation be used
# 1/0 means True/False
type_keys.append(('uda', 'use_data_augmentation', int, 0))
# The l2 penalty to use during training
type_keys.append(('l2', 'l2_weight', float, 0.0))
# The probability the agent's action is replaced with a random action
type_keys.append(('eps', 'epsilon_greedy', float, 0.0))
# The number of frames to stack for each observation.
# No frame stack is necessary if PAINT_VEL_INFO = 1
type_keys.append(('fs', 'frame_stack', int, 1, True))
# Should observations be transformed to grayscale
# 1/0 means True/False
type_keys.append(('ubw', 'use_black_white', int, 0, True))
# Overwrite the latest save file after this many updates
type_keys.append(('si', 'save_interval', int, 10))
# The number of evaluation environments to use
type_keys.append(('num-eval', 'num_eval', int, 20, True))
# The number of episodes to evaluate with each evaluation environment
type_keys.append(('rep', 'rep', int, 1))
# Should half the workers act solely has test workers for evaluation
# These workers will run on test levels and not contributing to training
bool_keys.append(('test', 'test'))
# Perform evaluation with all levels sampled from the training set
bool_keys.append(('train-eval', 'train_eval'))
# Perform evaluation with all levels sampled from the test set (unseen levels of high difficulty)
bool_keys.append(('test-eval', 'test_eval'))
# Only generate high difficulty levels
bool_keys.append(('highd', 'high_difficulty'))
# Use high resolution images for rendering
bool_keys.append(('hres', 'is_high_res'))
self.RES_KEYS = []
for tk in type_keys:
arg_keys.append(self.process_field(tk[1]))
if (len(tk) > 4) and tk[4]:
self.RES_KEYS.append(tk[1])
for bk in bool_keys:
arg_keys.append(bk[1])
if (len(bk) > 2) and bk[2]:
self.RES_KEYS.append(bk[1])
self.arg_keys = arg_keys
self.bool_keys = bool_keys
self.type_keys = type_keys
self.load_data = {}
self.args_dict = {}
def is_test_rank(self):
if self.TEST:
rank = MPI.COMM_WORLD.Get_rank()
return rank % 2 == 1
return False
def get_test_frac(self):
return .5 if self.TEST else 0
def get_load_data(self, load_key='default'):
if not load_key in self.load_data:
return None
return self.load_data[load_key]
def set_load_data(self, ld, load_key='default'):
self.load_data[load_key] = ld
def process_field(self, name):
return name.replace('-','_')
def deprocess_field(self, name):
return name.replace('_','-')
def parse_all_args(self, args):
assert isinstance(args, argparse.Namespace), 'expected argparse.Namespace object'
update_dict = vars(args)
self.parse_args_dict(update_dict)
def parse_args_dict(self, update_dict):
self.args_dict.update(update_dict)
for ak in self.args_dict:
val = self.args_dict[ak]
if isinstance(val, str):
val = self.process_field(val)
setattr(self, ak.upper(), val)
self.compute_args_dependencies()
def compute_args_dependencies(self):
if self.is_test_rank():
self.NUM_LEVELS = 0
self.USE_DATA_AUGMENTATION = 0
self.EPSILON_GREEDY = 0
self.HIGH_DIFFICULTY = 1
if self.PAINT_VEL_INFO < 0:
if self.GAME_TYPE == 'standard':
self.PAINT_VEL_INFO = 1
else:
self.PAINT_VEL_INFO = 0
if self.TEST_EVAL:
self.NUM_LEVELS = 0
self.HIGH_DIFFICULTY = 1
self.TRAIN_TEST_COMM = MPI.COMM_WORLD.Split(1 if self.is_test_rank() else 0, 0)
def get_load_filename(self, base_name=None, restore_id=None):
if restore_id is None:
restore_id = Config.RESTORE_ID
if restore_id is None:
return None
filename = Config.get_save_file_for_rank(0, self.process_field(restore_id), base_name=base_name)
return filename
def get_save_path(self, runid=None):
return self.WORKDIR + self.get_save_file(runid)
def get_save_file_for_rank(self, rank, runid=None, base_name=None):
if runid is None:
runid = self.RUN_ID
extra = ''
if base_name is not None:
extra = '_' + base_name
return 'sav_' + runid + extra + '_' + str(rank)
def get_save_file(self, runid=None, base_name=None):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
return self.get_save_file_for_rank(rank, runid, base_name=base_name)
def get_arg_text(self):
arg_strs = []
for key in self.args_dict:
arg_strs.append(key + '=' + str(self.args_dict[key]))
return arg_strs
def get_args_dict(self):
_args_dict = {}
_args_dict.update(self.args_dict)
return _args_dict
def initialize_args(self, use_cmd_line_args=True, **kwargs):
default_args = {}
for tk in self.type_keys:
default_args[self.process_field(tk[1])] = tk[3]
for bk in self.bool_keys:
default_args[bk[1]] = False
default_args.update(kwargs)
parser = argparse.ArgumentParser()
for tk in self.type_keys:
parser.add_argument('-' + tk[0], '--' + self.deprocess_field(tk[1]), type=tk[2], default=default_args[tk[1]])
for bk in self.bool_keys:
parser.add_argument('--' + bk[0], dest=bk[1], action='store_true')
bk_kwargs = {bk[1]: default_args[bk[1]]}
parser.set_defaults(**bk_kwargs)
if use_cmd_line_args:
args = parser.parse_args()
else:
args = parser.parse_args(args=[])
self.parse_all_args(args)
return args
Config = ConfigSingle()
|
"""
This is a copy of PPO from openai/baselines (https://github.com/openai/baselines/blob/52255beda5f5c8760b0ae1f676aa656bb1a61f80/baselines/ppo2/ppo2.py) with some minor changes.
"""
import time
import joblib
import numpy as np
import tensorflow as tf
from collections import deque
from mpi4py import MPI
from coinrun.tb_utils import TB_Writer
import coinrun.main_utils as utils
from coinrun.config import Config
mpi_print = utils.mpi_print
from baselines.common.runners import AbstractEnvRunner
from baselines.common.tf_util import initialize
from baselines.common.mpi_util import sync_from_root
class MpiAdamOptimizer(tf.train.AdamOptimizer):
"""Adam optimizer that averages gradients across mpi processes."""
def __init__(self, comm, **kwargs):
self.comm = comm
self.train_frac = 1.0 - Config.get_test_frac()
tf.train.AdamOptimizer.__init__(self, **kwargs)
def compute_gradients(self, loss, var_list, **kwargs):
grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
if Config.is_test_rank():
flat_grad = tf.zeros_like(flat_grad)
shapes = [v.shape.as_list() for g, v in grads_and_vars]
sizes = [int(np.prod(s)) for s in shapes]
num_tasks = self.comm.Get_size()
buf = np.zeros(sum(sizes), np.float32)
def _collect_grads(flat_grad):
self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
np.divide(buf, float(num_tasks) * self.train_frac, out=buf)
return buf
avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
avg_flat_grad.set_shape(flat_grad.shape)
avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
for g, (_, v) in zip(avg_grads, grads_and_vars)]
return avg_grads_and_vars
class Model(object):
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm):
sess = tf.get_default_session()
train_model = policy(sess, ob_space, ac_space, nbatch_train, nsteps)
norm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
act_model = policy(sess, ob_space, ac_space, nbatch_act, 1)
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [None])
R = tf.placeholder(tf.float32, [None])
OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
OLDVPRED = tf.placeholder(tf.float32, [None])
LR = tf.placeholder(tf.float32, [])
CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
vf_losses1 = tf.square(vpred - R)
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
params = tf.trainable_variables()
weight_params = [v for v in params if '/b' not in v.name]
total_num_params = 0
for p in params:
shape = p.get_shape().as_list()
num_params = np.prod(shape)
mpi_print('param', p, num_params)
total_num_params += num_params
mpi_print('total num params:', total_num_params)
l2_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in weight_params])
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef + l2_loss * Config.L2_WEIGHT
if Config.SYNC_FROM_ROOT:
trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=LR, epsilon=1e-5)
else:
trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
grads_and_var = trainer.compute_gradients(loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
_train = trainer.apply_gradients(grads_and_var)
def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
advs = returns - values
adv_mean = np.mean(advs, axis=0, keepdims=True)
adv_std = np.std(advs, axis=0, keepdims=True)
advs = (advs - adv_mean) / (adv_std + 1e-8)
td_map = {train_model.X:obs, A:actions, ADV:advs, R:returns, LR:lr,
CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
return sess.run(
[pg_loss, vf_loss, entropy, approxkl, clipfrac, l2_loss, _train],
td_map
)[:-1]
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac', 'l2_loss']
def save(save_path):
ps = sess.run(params)
joblib.dump(ps, save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
self.train = train
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = save
self.load = load
if Config.SYNC_FROM_ROOT:
if MPI.COMM_WORLD.Get_rank() == 0:
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
sync_from_root(sess, global_variables) #pylint: disable=E1101
else:
initialize()
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
self.lam = lam
self.gamma = gamma
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def constfn(val):
def f(_):
return val
return f
def learn(*, policy, env, nsteps, total_timesteps, ent_coef, lr,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
mpi_size = comm.Get_size()
sess = tf.get_default_session()
tb_writer = TB_Writer(sess)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
utils.load_all_params(sess)
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
epinfobuf10 = deque(maxlen=10)
epinfobuf100 = deque(maxlen=100)
tfirststart = time.time()
active_ep_buf = epinfobuf100
nupdates = total_timesteps//nbatch
mean_rewards = []
datapoints = []
run_t_total = 0
train_t_total = 0
can_save = True
checkpoints = [32, 64]
saved_key_checkpoints = [False] * len(checkpoints)
if Config.SYNC_FROM_ROOT and rank != 0:
can_save = False
def save_model(base_name=None):
base_dict = {'datapoints': datapoints}
utils.save_params_in_scopes(sess, ['model'], Config.get_save_file(base_name=base_name), base_dict)
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
nbatch_train = nbatch // nminibatches
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lrnow = lr(frac)
cliprangenow = cliprange(frac)
mpi_print('collecting rollouts...')
run_tstart = time.time()
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run()
epinfobuf10.extend(epinfos)
epinfobuf100.extend(epinfos)
run_elapsed = time.time() - run_tstart
run_t_total += run_elapsed
mpi_print('rollouts complete')
mblossvals = []
mpi_print('updating parameters...')
train_tstart = time.time()
if states is None: # nonrecurrent version
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
envsperbatch = nbatch_train // nsteps
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# update the dropout mask
sess.run([model.train_model.dropout_assign_ops])
train_elapsed = time.time() - train_tstart
train_t_total += train_elapsed
mpi_print('update complete')
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
step = update*nbatch
rew_mean_10 = utils.process_ep_buf(active_ep_buf, tb_writer=tb_writer, suffix='', step=step)
ep_len_mean = np.nanmean([epinfo['l'] for epinfo in active_ep_buf])
mpi_print('\n----', update)
mean_rewards.append(rew_mean_10)
datapoints.append([step, rew_mean_10])
tb_writer.log_scalar(ep_len_mean, 'ep_len_mean')
tb_writer.log_scalar(fps, 'fps')
mpi_print('time_elapsed', tnow - tfirststart, run_t_total, train_t_total)
mpi_print('timesteps', update*nsteps, total_timesteps)
mpi_print('eplenmean', ep_len_mean)
mpi_print('eprew', rew_mean_10)
mpi_print('fps', fps)
mpi_print('total_timesteps', update*nbatch)
mpi_print([epinfo['r'] for epinfo in epinfobuf10])
if len(mblossvals):
for (lossval, lossname) in zip(lossvals, model.loss_names):
mpi_print(lossname, lossval)
tb_writer.log_scalar(lossval, lossname)
mpi_print('----\n')
if can_save:
if save_interval and (update % save_interval == 0):
save_model()
for j, checkpoint in enumerate(checkpoints):
if (not saved_key_checkpoints[j]) and (step >= (checkpoint * 1e6)):
saved_key_checkpoints[j] = True
save_model(str(checkpoint) + 'M')
save_model()
env.close()
return mean_rewards
|
import tensorflow as tf
from mpi4py import MPI
from coinrun.config import Config
import numpy as np
def clean_tb_dir():
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
if tf.gfile.Exists(Config.TB_DIR):
tf.gfile.DeleteRecursively(Config.TB_DIR)
tf.gfile.MakeDirs(Config.TB_DIR)
comm.Barrier()
class TB_Writer(object):
def __init__(self, sess):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
clean_tb_dir()
tb_writer = tf.summary.FileWriter(Config.TB_DIR + '/' + Config.RUN_ID + '_' + str(rank), sess.graph)
total_steps = [0]
should_log = (rank == 0 or Config.LOG_ALL_MPI)
if should_log:
hyperparams = np.array(Config.get_arg_text())
hyperparams_tensor = tf.constant(hyperparams)
summary_op = tf.summary.text("hyperparameters info", hyperparams_tensor)
summary = sess.run(summary_op)
tb_writer.add_summary(summary)
def add_summary(_merged, interval=1):
if should_log:
total_steps[0] += 1
if total_steps[0] % interval == 0:
tb_writer.add_summary(_merged, total_steps[0])
tb_writer.flush()
tuples = []
def make_scalar_graph(name):
scalar_ph = tf.placeholder(name='scalar_' + name, dtype=tf.float32)
scalar_summary = tf.summary.scalar(name, scalar_ph)
merged = tf.summary.merge([scalar_summary])
tuples.append((scalar_ph, merged))
name_dict = {}
curr_name_idx = [0]
def log_scalar(x, name, step=-1):
if not name in name_dict:
name_dict[name] = curr_name_idx[0]
tf_name = (name + '_' + Config.RUN_ID) if curr_name_idx[0] == 0 else name
make_scalar_graph(tf_name)
curr_name_idx[0] += 1
idx = name_dict[name]
scalar_ph, merged = tuples[idx]
if should_log:
if step == -1:
step = total_steps[0]
total_steps[0] += 1
_merged = sess.run(merged, {scalar_ph: x})
tb_writer.add_summary(_merged, step)
tb_writer.flush()
self.add_summary = add_summary
self.log_scalar = log_scalar
|
from .coinrunenv import init_args_and_threads
from .coinrunenv import make
__all__ = [
'init_args_and_threads',
'make'
]
|
import gym
import numpy as np
class EpsilonGreedyWrapper(gym.Wrapper):
"""
Wrapper to perform a random action each step instead of the requested action,
with the provided probability.
"""
def __init__(self, env, prob=0.05):
gym.Wrapper.__init__(self, env)
self.prob = prob
self.num_envs = env.num_envs
def reset(self):
return self.env.reset()
def step(self, action):
if np.random.uniform()<self.prob:
action = np.random.randint(self.env.action_space.n, size=self.num_envs)
return self.env.step(action)
class EpisodeRewardWrapper(gym.Wrapper):
def __init__(self, env):
env.metadata = {'render.modes': []}
env.reward_range = (-float('inf'), float('inf'))
nenvs = env.num_envs
self.num_envs = nenvs
super(EpisodeRewardWrapper, self).__init__(env)
self.aux_rewards = None
self.num_aux_rews = None
def reset(**kwargs):
self.rewards = np.zeros(nenvs)
self.lengths = np.zeros(nenvs)
self.aux_rewards = None
self.long_aux_rewards = None
return self.env.reset(**kwargs)
def step(action):
obs, rew, done, infos = self.env.step(action)
if self.aux_rewards is None:
info = infos[0]
if 'aux_rew' in info:
self.num_aux_rews = len(infos[0]['aux_rew'])
else:
self.num_aux_rews = 0
self.aux_rewards = np.zeros((nenvs, self.num_aux_rews), dtype=np.float32)
self.long_aux_rewards = np.zeros((nenvs, self.num_aux_rews), dtype=np.float32)
self.rewards += rew
self.lengths += 1
use_aux = self.num_aux_rews > 0
if use_aux:
for i, info in enumerate(infos):
self.aux_rewards[i,:] += info['aux_rew']
self.long_aux_rewards[i,:] += info['aux_rew']
for i, d in enumerate(done):
if d:
epinfo = {'r': round(self.rewards[i], 6), 'l': self.lengths[i], 't': 0}
aux_dict = {}
for nr in range(self.num_aux_rews):
aux_dict['aux_' + str(nr)] = self.aux_rewards[i,nr]
if 'ale.lives' in infos[i]:
game_over_rew = np.nan
is_game_over = infos[i]['ale.lives'] == 0
if is_game_over:
game_over_rew = self.long_aux_rewards[i,0]
self.long_aux_rewards[i,:] = 0
aux_dict['game_over_rew'] = game_over_rew
epinfo['aux_dict'] = aux_dict
infos[i]['episode'] = epinfo
self.rewards[i] = 0
self.lengths[i] = 0
self.aux_rewards[i,:] = 0
return obs, rew, done, infos
self.reset = reset
self.step = step
def add_final_wrappers(env):
env = EpisodeRewardWrapper(env)
return env |
"""
Run a CoinRun environment in a window where you can interact with it using the keyboard
"""
from coinrun.coinrunenv import lib
from coinrun import setup_utils
def main():
setup_utils.setup_and_load(paint_vel_info=0)
print("""Control with arrow keys,
F1, F2 -- switch resolution,
F5, F6, F7, F8 -- zoom,
F9 -- switch reconstruction target picture,
F10 -- switch lasers
""")
lib.test_main_loop()
if __name__ == '__main__':
main() |
import tensorflow as tf
import os
import joblib
import numpy as np
from mpi4py import MPI
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from coinrun.config import Config
from coinrun import setup_utils, wrappers
import platform
def make_general_env(num_env, seed=0, use_sub_proc=True):
from coinrun import coinrunenv
env = coinrunenv.make(Config.GAME_TYPE, num_env)
if Config.FRAME_STACK > 1:
env = VecFrameStack(env, Config.FRAME_STACK)
epsilon = Config.EPSILON_GREEDY
if epsilon > 0:
env = wrappers.EpsilonGreedyWrapper(env, epsilon)
return env
def file_to_path(filename):
return setup_utils.file_to_path(filename)
def load_all_params(sess):
load_params_for_scope(sess, 'model')
def load_params_for_scope(sess, scope, load_key='default'):
load_data = Config.get_load_data(load_key)
if load_data is None:
return False
params_dict = load_data['params']
if scope in params_dict:
print('Loading saved file for scope', scope)
loaded_params = params_dict[scope]
loaded_params, params = get_savable_params(loaded_params, scope, keep_heads=True)
restore_params(sess, loaded_params, params)
return True
def get_savable_params(loaded_params, scope, keep_heads=False):
params = tf.trainable_variables(scope)
filtered_params = []
filtered_loaded = []
if len(loaded_params) != len(params):
print('param mismatch', len(loaded_params), len(params))
assert(False)
for p, loaded_p in zip(params, loaded_params):
keep = True
if any((scope + '/' + x) in p.name for x in ['v','pi']):
keep = keep_heads
if keep:
filtered_params.append(p)
filtered_loaded.append(loaded_p)
else:
print('drop', p)
return filtered_loaded, filtered_params
def restore_params(sess, loaded_params, params):
if len(loaded_params) != len(params):
print('param mismatch', len(loaded_params), len(params))
assert(False)
restores = []
for p, loaded_p in zip(params, loaded_params):
print('restoring', p)
restores.append(p.assign(loaded_p))
sess.run(restores)
def save_params_in_scopes(sess, scopes, filename, base_dict=None):
data_dict = {}
if base_dict is not None:
data_dict.update(base_dict)
save_path = file_to_path(filename)
data_dict['args'] = Config.get_args_dict()
param_dict = {}
for scope in scopes:
params = tf.trainable_variables(scope)
if len(params) > 0:
print('saving scope', scope, filename)
ps = sess.run(params)
param_dict[scope] = ps
data_dict['params'] = param_dict
joblib.dump(data_dict, save_path)
def setup_mpi_gpus():
if 'RCALL_NUM_GPU' not in os.environ:
return
num_gpus = int(os.environ['RCALL_NUM_GPU'])
node_id = platform.node()
nodes = MPI.COMM_WORLD.allgather(node_id)
local_rank = len([n for n in nodes[:MPI.COMM_WORLD.Get_rank()] if n == node_id])
os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus)
def is_mpi_root():
return MPI.COMM_WORLD.Get_rank() == 0
def tf_initialize(sess):
sess.run(tf.initialize_all_variables())
sync_from_root(sess)
def sync_from_root(sess, vars=None):
if vars is None:
vars = tf.trainable_variables()
if Config.SYNC_FROM_ROOT:
rank = MPI.COMM_WORLD.Get_rank()
print('sync from root', rank)
for var in vars:
if rank == 0:
MPI.COMM_WORLD.bcast(sess.run(var))
else:
sess.run(tf.assign(var, MPI.COMM_WORLD.bcast(None)))
def mpi_average(values):
return mpi_average_comm(values, MPI.COMM_WORLD)
def mpi_average_comm(values, comm):
size = comm.size
x = np.array(values)
buf = np.zeros_like(x)
comm.Allreduce(x, buf, op=MPI.SUM)
buf = buf / size
return buf
def mpi_average_train_test(values):
return mpi_average_comm(values, Config.TRAIN_TEST_COMM)
def mpi_print(*args):
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
print(*args)
def process_ep_buf(epinfobuf, tb_writer=None, suffix='', step=0):
rewards = [epinfo['r'] for epinfo in epinfobuf]
rew_mean = np.nanmean(rewards)
if Config.SYNC_FROM_ROOT:
rew_mean = mpi_average_train_test([rew_mean])[0]
if tb_writer is not None:
tb_writer.log_scalar(rew_mean, 'rew_mean' + suffix, step)
aux_dicts = []
if len(epinfobuf) > 0 and 'aux_dict' in epinfobuf[0]:
aux_dicts = [epinfo['aux_dict'] for epinfo in epinfobuf]
if len(aux_dicts) > 0:
keys = aux_dicts[0].keys()
for key in keys:
sub_rews = [aux_dict[key] for aux_dict in aux_dicts]
sub_rew = np.nanmean(sub_rews)
if tb_writer is not None:
tb_writer.log_scalar(sub_rew, key, step)
return rew_mean
|
from coinrun.config import Config
import os
import joblib
def load_for_setup_if_necessary():
restore_file(Config.RESTORE_ID)
def restore_file(restore_id, load_key='default'):
if restore_id is not None:
load_file = Config.get_load_filename(restore_id=restore_id)
filepath = file_to_path(load_file)
load_data = joblib.load(filepath)
Config.set_load_data(load_data, load_key=load_key)
restored_args = load_data['args']
sub_dict = {}
res_keys = Config.RES_KEYS
for key in res_keys:
if key in restored_args:
sub_dict[key] = restored_args[key]
else:
print('warning key %s not restored' % key)
Config.parse_args_dict(sub_dict)
from coinrun.coinrunenv import init_args_and_threads
init_args_and_threads(4)
def setup_and_load(use_cmd_line_args=True, **kwargs):
"""
Initialize the global config using command line options, defaulting to the values in `config.py`.
`use_cmd_line_args`: set to False to ignore command line arguments passed to the program
`**kwargs`: override the defaults from `config.py` with these values
"""
args = Config.initialize_args(use_cmd_line_args=use_cmd_line_args, **kwargs)
load_for_setup_if_necessary()
return args
def file_to_path(filename):
return os.path.join(Config.WORKDIR, filename) |
from coinrun import random_agent
def test_coinrun():
random_agent.random_agent(num_envs=16, max_steps=100)
if __name__ == '__main__':
test_coinrun() |
import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm
from baselines.common.distributions import make_pdtype
from baselines.common.input import observation_input
from coinrun.config import Config
def impala_cnn(images, depths=[16, 32, 32]):
"""
Model used in the paper "IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures" https://arxiv.org/abs/1802.01561
"""
use_batch_norm = Config.USE_BATCH_NORM == 1
dropout_layer_num = [0]
dropout_assign_ops = []
def dropout_layer(out):
if Config.DROPOUT > 0:
out_shape = out.get_shape().as_list()
num_features = np.prod(out_shape[1:])
var_name = 'mask_' + str(dropout_layer_num[0])
batch_seed_shape = out_shape[1:]
batch_seed = tf.get_variable(var_name, shape=batch_seed_shape, initializer=tf.random_uniform_initializer(minval=0, maxval=1), trainable=False)
batch_seed_assign = tf.assign(batch_seed, tf.random_uniform(batch_seed_shape, minval=0, maxval=1))
dropout_assign_ops.append(batch_seed_assign)
curr_mask = tf.sign(tf.nn.relu(batch_seed[None,...] - Config.DROPOUT))
curr_mask = curr_mask * (1.0 / (1.0 - Config.DROPOUT))
out = out * curr_mask
dropout_layer_num[0] += 1
return out
def conv_layer(out, depth):
out = tf.layers.conv2d(out, depth, 3, padding='same')
out = dropout_layer(out)
if use_batch_norm:
out = tf.contrib.layers.batch_norm(out, center=True, scale=True, is_training=True)
return out
def residual_block(inputs):
depth = inputs.get_shape()[-1].value
out = tf.nn.relu(inputs)
out = conv_layer(out, depth)
out = tf.nn.relu(out)
out = conv_layer(out, depth)
return out + inputs
def conv_sequence(inputs, depth):
out = conv_layer(inputs, depth)
out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same')
out = residual_block(out)
out = residual_block(out)
return out
out = images
for depth in depths:
out = conv_sequence(out, depth)
out = tf.layers.flatten(out)
out = tf.nn.relu(out)
out = tf.layers.dense(out, 256, activation=tf.nn.relu)
return out, dropout_assign_ops
def nature_cnn(scaled_images, **conv_kwargs):
"""
Model used in the paper "Human-level control through deep reinforcement learning"
https://www.nature.com/articles/nature14236
"""
def activ(curr):
return tf.nn.relu(curr)
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
def choose_cnn(images):
arch = Config.ARCHITECTURE
scaled_images = tf.cast(images, tf.float32) / 255.
dropout_assign_ops = []
if arch == 'nature':
out = nature_cnn(scaled_images)
elif arch == 'impala':
out, dropout_assign_ops = impala_cnn(scaled_images)
elif arch == 'impalalarge':
out, dropout_assign_ops = impala_cnn(scaled_images, depths=[32, 64, 64, 64, 64])
else:
assert(False)
return out, dropout_assign_ops
class LstmPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, nlstm=256):
nenv = nbatch // nsteps
self.pdtype = make_pdtype(ac_space)
X, processed_x = observation_input(ob_space, nbatch)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
h, self.dropout_assign_ops = choose_cnn(processed_x)
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)
h5 = seq_to_batch(h5)
vf = fc(h5, 'v', 1)[:,0]
self.pd, self.pi = self.pdtype.pdfromlatent(h5)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
def step(ob, state, mask):
return sess.run([a0, vf, snew, neglogp0], {X:ob, S:state, M:mask})
def value(ob, state, mask):
return sess.run(vf, {X:ob, S:state, M:mask})
self.X = X
self.M = M
self.S = S
self.vf = vf
self.step = step
self.value = value
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, **conv_kwargs): #pylint: disable=W0613
self.pdtype = make_pdtype(ac_space)
X, processed_x = observation_input(ob_space, nbatch)
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
h, self.dropout_assign_ops = choose_cnn(processed_x)
vf = fc(h, 'v', 1)[:,0]
self.pd, self.pi = self.pdtype.pdfromlatent(h, init_scale=0.01)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.vf = vf
self.step = step
self.value = value
def get_policy():
use_lstm = Config.USE_LSTM
if use_lstm == 1:
policy = LstmPolicy
elif use_lstm == 0:
policy = CnnPolicy
else:
assert(False)
return policy
|
"""
Python interface to the CoinRun shared library using ctypes.
On import, this will attempt to build the shared library.
"""
import os
import atexit
import random
import sys
from ctypes import c_int, c_char_p, c_float, c_bool
import gym
import gym.spaces
import numpy as np
import numpy.ctypeslib as npct
from baselines.common.vec_env import VecEnv
from baselines import logger
from coinrun.config import Config
from mpi4py import MPI
from baselines.common import mpi_util
# if the environment is crashing, try using the debug build to get
# a readable stack trace
DEBUG = False
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
game_versions = {
'standard': 1000,
'platform': 1001,
'maze': 1002,
}
def build():
lrank, _lsize = mpi_util.get_local_rank_size(MPI.COMM_WORLD)
if lrank == 0:
dirname = os.path.dirname(__file__)
if len(dirname):
make_cmd = "QT_SELECT=5 make -C %s" % dirname
else:
make_cmd = "QT_SELECT=5 make"
r = os.system(make_cmd)
if r != 0:
logger.error('coinrun: make failed')
sys.exit(1)
MPI.COMM_WORLD.barrier()
build()
if DEBUG:
lib_path = '.build-debug/coinrun_cpp_d'
else:
lib_path = '.build-release/coinrun_cpp'
lib = npct.load_library(lib_path, os.path.dirname(__file__))
lib.init.argtypes = [c_int]
lib.get_NUM_ACTIONS.restype = c_int
lib.get_RES_W.restype = c_int
lib.get_RES_H.restype = c_int
lib.get_VIDEORES.restype = c_int
lib.vec_create.argtypes = [
c_int, # game_type
c_int, # nenvs
c_int, # lump_n
c_bool, # want_hires_render
c_float, # default_zoom
]
lib.vec_create.restype = c_int
lib.vec_close.argtypes = [c_int]
lib.vec_step_async_discrete.argtypes = [c_int, npct.ndpointer(dtype=np.int32, ndim=1)]
lib.initialize_args.argtypes = [npct.ndpointer(dtype=np.int32, ndim=1)]
lib.initialize_set_monitor_dir.argtypes = [c_char_p, c_int]
lib.vec_wait.argtypes = [
c_int,
npct.ndpointer(dtype=np.uint8, ndim=4), # normal rgb
npct.ndpointer(dtype=np.uint8, ndim=4), # larger rgb for render()
npct.ndpointer(dtype=np.float32, ndim=1), # rew
npct.ndpointer(dtype=np.bool, ndim=1), # done
]
already_inited = False
def init_args_and_threads(cpu_count=4,
monitor_csv_policy='all',
rand_seed=None):
"""
Perform one-time global init for the CoinRun library. This must be called
before creating an instance of CoinRunVecEnv. You should not
call this multiple times from the same process.
"""
os.environ['COINRUN_RESOURCES_PATH'] = os.path.join(SCRIPT_DIR, 'assets')
is_high_difficulty = Config.HIGH_DIFFICULTY
if rand_seed is None:
rand_seed = random.SystemRandom().randint(0, 1000000000)
# ensure different MPI processes get different seeds (just in case SystemRandom implementation is poor)
mpi_rank, mpi_size = mpi_util.get_local_rank_size(MPI.COMM_WORLD)
rand_seed = rand_seed - rand_seed % mpi_size + mpi_rank
int_args = np.array([int(is_high_difficulty), Config.NUM_LEVELS, int(Config.PAINT_VEL_INFO), Config.USE_DATA_AUGMENTATION, game_versions[Config.GAME_TYPE], Config.SET_SEED, rand_seed]).astype(np.int32)
lib.initialize_args(int_args)
lib.initialize_set_monitor_dir(logger.get_dir().encode('utf-8'), {'off': 0, 'first_env': 1, 'all': 2}[monitor_csv_policy])
global already_inited
if already_inited:
return
lib.init(cpu_count)
already_inited = True
@atexit.register
def shutdown():
global already_inited
if not already_inited:
return
lib.coinrun_shutdown()
class CoinRunVecEnv(VecEnv):
"""
This is the CoinRun VecEnv, all CoinRun environments are just instances
of this class with different values for `game_type`
`game_type`: int game type corresponding to the game type to create, see `enum GameType` in `coinrun.cpp`
`num_envs`: number of environments to create in this VecEnv
`lump_n`: only used when the environment creates `monitor.csv` files
`default_zoom`: controls how much of the level the agent can see
"""
def __init__(self, game_type, num_envs, lump_n=0, default_zoom=5.0):
self.metadata = {'render.modes': []}
self.reward_range = (-float('inf'), float('inf'))
self.NUM_ACTIONS = lib.get_NUM_ACTIONS()
self.RES_W = lib.get_RES_W()
self.RES_H = lib.get_RES_H()
self.VIDEORES = lib.get_VIDEORES()
self.buf_rew = np.zeros([num_envs], dtype=np.float32)
self.buf_done = np.zeros([num_envs], dtype=np.bool)
self.buf_rgb = np.zeros([num_envs, self.RES_H, self.RES_W, 3], dtype=np.uint8)
self.hires_render = Config.IS_HIGH_RES
if self.hires_render:
self.buf_render_rgb = np.zeros([num_envs, self.VIDEORES, self.VIDEORES, 3], dtype=np.uint8)
else:
self.buf_render_rgb = np.zeros([1, 1, 1, 1], dtype=np.uint8)
num_channels = 1 if Config.USE_BLACK_WHITE else 3
obs_space = gym.spaces.Box(0, 255, shape=[self.RES_H, self.RES_W, num_channels], dtype=np.uint8)
super().__init__(
num_envs=num_envs,
observation_space=obs_space,
action_space=gym.spaces.Discrete(self.NUM_ACTIONS),
)
self.handle = lib.vec_create(
game_versions[game_type],
self.num_envs,
lump_n,
self.hires_render,
default_zoom)
self.dummy_info = [{} for _ in range(num_envs)]
def __del__(self):
if hasattr(self, 'handle'):
lib.vec_close(self.handle)
self.handle = 0
def close(self):
lib.vec_close(self.handle)
self.handle = 0
def reset(self):
print("CoinRun ignores resets")
obs, _, _, _ = self.step_wait()
return obs
def get_images(self):
if self.hires_render:
return self.buf_render_rgb
else:
return self.buf_rgb
def step_async(self, actions):
assert actions.dtype in [np.int32, np.int64]
actions = actions.astype(np.int32)
lib.vec_step_async_discrete(self.handle, actions)
def step_wait(self):
self.buf_rew = np.zeros_like(self.buf_rew)
self.buf_done = np.zeros_like(self.buf_done)
lib.vec_wait(
self.handle,
self.buf_rgb,
self.buf_render_rgb,
self.buf_rew,
self.buf_done)
obs_frames = self.buf_rgb
if Config.USE_BLACK_WHITE:
obs_frames = np.mean(obs_frames, axis=-1).astype(np.uint8)[...,None]
return obs_frames, self.buf_rew, self.buf_done, self.dummy_info
def make(env_id, num_envs, **kwargs):
assert env_id in game_versions, 'cannot find environment "%s", maybe you mean one of %s' % (env_id, list(game_versions.keys()))
return CoinRunVecEnv(env_id, num_envs, **kwargs)
|
import json
import pickle
import math
import sys
import argparse
import warnings
from os import makedirs
from os.path import basename, join, exists, dirname, splitext, realpath
from wikidata_linker_utils.progressbar import get_progress_bar
from dataset import TSVDataset, CombinedDataset, H5Dataset, ClassificationHandler
from batchifier import (iter_batches_single_threaded,
requires_vocab,
requires_character_convolution,
get_feature_vocabs)
import tensorflow as tf
import numpy as np
try:
RNNCell = tf.nn.rnn_cell.RNNCell
TFLSTMCell = tf.nn.rnn_cell.LSTMCell
MultiRNNCell = tf.nn.rnn_cell.MultiRNNCell
LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple
from tensorflow.contrib.cudnn_rnn import CudnnLSTM
except AttributeError:
RNNCell = tf.contrib.rnn.RNNCell
TFLSTMCell = tf.contrib.rnn.LSTMCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
LSTMStateTuple = tf.contrib.rnn.LSTMStateTuple
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnLSTM
from tensorflow.python.client import device_lib
class LazyAdamOptimizer(tf.train.AdamOptimizer):
"""Variant of the Adam optimizer that handles sparse updates more efficiently.
The original Adam algorithm maintains two moving-average accumulators for
each trainable variable; the accumulators are updated at every step.
This class provides lazier handling of gradient updates for sparse variables.
It only updates moving-average accumulators for sparse variable indices that
appear in the current batch, rather than updating the accumulators for all
indices. Compared with the original Adam optimizer, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original Adam algorithm, and
may lead to different empirical results.
"""
def _apply_sparse(self, grad, var):
beta1_power = tf.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = tf.cast(self._beta2_power, var.dtype.base_dtype)
lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power))
# m := beta1 * m + (1 - beta1) * g_t
# We use a slightly different version of the moving-average update formula
# that does a better job of handling concurrent lockless updates:
# m -= (1 - beta1) * (m - g_t)
m = self.get_slot(var, "m")
m_t_delta = tf.gather(m, grad.indices) - grad.values
m_t = tf.scatter_sub(m, grad.indices,
(1 - beta1_t) * m_t_delta,
use_locking=self._use_locking)
# v := beta2 * v + (1 - beta2) * (g_t * g_t)
# We reformulate the update as:
# v -= (1 - beta2) * (v - g_t * g_t)
v = self.get_slot(var, "v")
v_t_delta = tf.gather(v, grad.indices) - tf.square(grad.values)
v_t = tf.scatter_sub(v, grad.indices,
(1 - beta2_t) * v_t_delta,
use_locking=self._use_locking)
# variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))
m_t_slice = tf.gather(m_t, grad.indices)
v_t_slice = tf.gather(v_t, grad.indices)
denominator_slice = tf.sqrt(v_t_slice) + epsilon_t
var_update = tf.scatter_sub(var, grad.indices,
lr * m_t_slice / denominator_slice,
use_locking=self._use_locking)
return tf.group(var_update, m_t, v_t)
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def split(values, axis, num_splits, name=None):
return tf.split(values, num_splits, axis=axis, name=name)
def reverse(values, axis):
return tf.reverse(values, [axis])
def sparse_softmax_cross_entropy_with_logits(logits, labels):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
def concat(values, axis, name=None):
if len(values) == 1:
return values[0]
return tf.concat(values, axis, name=name)
def concat_tensor_array(values, name=None):
return values.stack(name=name)
def batch_gather_3d(values, indices):
return tf.gather(tf.reshape(values, [-1, tf.shape(values)[2]]),
tf.range(0, tf.shape(values)[0]) * tf.shape(values)[1] +
indices)
def batch_gather_2d(values, indices):
return tf.gather(tf.reshape(values, [-1]),
tf.range(0, tf.shape(values)[0]) * tf.shape(values)[1] +
indices)
def viterbi_decode(score, transition_params, sequence_lengths, back_prop=False,
parallel_iterations=1):
"""Decode the highest scoring sequence of tags inside of TensorFlow!!!
This can be used anytime.
Args:
score: A [batch, seq_len, num_tags] matrix of unary potentials.
transition_params: A [num_tags, num_tags] matrix of binary potentials.
sequence_lengths: A [batch] int32 vector of the length of each score
sequence.
Returns:
viterbi: A [batch, seq_len] list of integers containing the highest
scoring tag indices.
viterbi_score: A vector of float containing the score for the Viterbi
sequence.
"""
sequence_lengths = tf.convert_to_tensor(
sequence_lengths, name="sequence_lengths")
score = tf.convert_to_tensor(score, name="score")
transition_params = tf.convert_to_tensor(
transition_params, name="transition_params")
if sequence_lengths.dtype != tf.int32:
sequence_lengths = tf.cast(sequence_lengths, tf.int32)
def condition(t, *args):
"""Stop when full score sequence has been read in."""
return tf.less(t, tf.shape(score)[1])
def body(t, trellis, backpointers, trellis_val):
"""Perform forward viterbi pass."""
v = tf.expand_dims(trellis_val, 2) + tf.expand_dims(transition_params, 0)
new_trellis_val = score[:, t, :] + tf.reduce_max(v, axis=1)
new_trellis = trellis.write(t, new_trellis_val)
new_backpointers = backpointers.write(
t, tf.cast(tf.argmax(v, axis=1), tf.int32))
return t + 1, new_trellis, new_backpointers, new_trellis_val
trellis_arr = tf.TensorArray(score.dtype, size=0,
dynamic_size=True, clear_after_read=False, infer_shape=False)
first_trellis_val = score[:, 0, :]
trellis_arr = trellis_arr.write(0, first_trellis_val)
backpointers_arr = tf.TensorArray(tf.int32, size=0,
dynamic_size=True, clear_after_read=False, infer_shape=False)
backpointers_arr = backpointers_arr.write(0,
tf.zeros_like(score[:, 0, :], dtype=tf.int32))
_, trellis_out, backpointers_out, _ = tf.while_loop(
condition, body,
(tf.constant(1, name="t", dtype=tf.int32), trellis_arr, backpointers_arr, first_trellis_val),
parallel_iterations=parallel_iterations,
back_prop=back_prop)
trellis_out = concat_tensor_array(trellis_out)
backpointers_out = concat_tensor_array(backpointers_out)
# make batch-major:
trellis_out = tf.transpose(trellis_out, [1, 0, 2])
backpointers_out = tf.transpose(backpointers_out, [1, 0, 2])
def condition(t, *args):
return tf.less(t, tf.shape(score)[1])
def body(t, viterbi, last_decision):
backpointers_timestep = batch_gather_3d(
backpointers_out, tf.maximum(sequence_lengths - t, 0))
new_last_decision = batch_gather_2d(
backpointers_timestep, last_decision)
new_viterbi = viterbi.write(t, new_last_decision)
return t + 1, new_viterbi, new_last_decision
last_timestep = batch_gather_3d(trellis_out, sequence_lengths - 1)
# get scores for last timestep of each batch element inside
# trellis:
scores = tf.reduce_max(last_timestep, axis=1)
# get choice index for last timestep:
last_decision = tf.cast(tf.argmax(last_timestep, axis=1), tf.int32)
# decode backwards using backpointers:
viterbi = tf.TensorArray(tf.int32, size=0,
dynamic_size=True, clear_after_read=False, infer_shape=False)
viterbi = viterbi.write(0, last_decision)
_, viterbi_out, _ = tf.while_loop(
condition, body,
(tf.constant(1, name="t", dtype=tf.int32), viterbi, last_decision),
parallel_iterations=parallel_iterations,
back_prop=back_prop)
viterbi_out = concat_tensor_array(viterbi_out)
# make batch-major:
viterbi_out = tf.transpose(viterbi_out, [1, 0])
viterbi_out_fwd = tf.reverse_sequence(
viterbi_out, sequence_lengths, seq_dim=1)
return viterbi_out_fwd, scores
def sum_list(elements):
total = elements[0]
for el in elements[1:]:
total += el
return total
def explicitly_set_fields():
received = set()
for argument in sys.argv:
if argument.startswith("--"):
received.add(argument[2:])
if argument[2:].startswith("no"):
received.add(argument[4:])
return received
def save_session(session, saver, path, verbose=False):
"""
Call save on tf.train.Saver on a specific path to store all the variables
of the current tensorflow session to a file for later restoring.
Arguments:
session : tf.Session
path : str, place to save session
"""
makedirs(path, exist_ok=True)
if not path.endswith("/"):
path = path + "/"
path = join(path, "model.ckpt")
if verbose:
print("Saving session under %r" % (path,), flush=True)
saver.save(session, path)
print("Saved", flush=True)
### constants for saving & loading
# model config:
OBJECTIVE_NAMES = "OBJECTIVE_NAMES"
OBJECTIVE_TYPES = "OBJECTIVE_TYPES"
# inputs:
INPUT_PLACEHOLDERS = "INPUT_PLACEHOLDERS"
LABEL_PLACEHOLDERS = "LABEL_PLACEHOLDERS"
LABEL_MASK_PLACEHOLDERS = "LABEL_MASK_PLACEHOLDERS"
TRAIN_OP = "TRAIN_OP"
SEQUENCE_LENGTHS = "SEQUENCE_LENGTHS"
IS_TRAINING = "IS_TRAINING"
# outputs:
DECODED = "DECODED"
DECODED_SCORES = "DECODED_SCORES"
UNARY_SCORES = "UNARY_SCORES"
# per objective metrics:
TOKEN_CORRECT = "TOKEN_CORRECT"
TOKEN_CORRECT_TOTAL = "TOKEN_CORRECT_TOTAL"
SENTENCE_CORRECT = "SENTENCE_CORRECT"
SENTENCE_CORRECT_TOTAL = "SENTENCE_CORRECT_TOTAL"
# aggregate metrics over all objectives
NLL = "NLL"
NLL_TOTAL = "NLL_TOTAL"
TOKEN_CORRECT_ALL = "TOKEN_CORRECT_ALL"
TOKEN_CORRECT_ALL_TOTAL = "TOKEN_CORRECT_ALL_TOTAL"
SENTENCE_CORRECT_ALL = "SENTENCE_CORRECT_ALL"
SENTENCE_CORRECT_ALL_TOTAL = "SENTENCE_CORRECT_ALL_TOTAL"
CONFUSION_MATRIX = "CONFUSION_MATRIX"
GLOBAL_STEP = "global_step"
SUMMARIES_ASSIGNS = "SUMMARIES_ASSIGNS"
SUMMARIES_PLACEHOLDERS = "SUMMARIES_PLACEHOLDERS"
SUMMARIES_NAMES = "SUMMARIES_NAMES"
TRAIN_SUMMARIES = "TRAIN_SUMMARIES"
TRUE_POSITIVES = "TRUE_POSITIVES"
FALSE_POSITIVES = "FALSE_POSITIVES"
FALSE_NEGATIVES = "FALSE_NEGATIVES"
def maybe_dropout(inputs, keep_prob, is_training):
return tf.cond(is_training,
lambda : tf.nn.dropout(inputs, keep_prob),
lambda : inputs
) if keep_prob < 1 else inputs
def compute_sentence_correct(correct, sequence_mask):
any_label = tf.reduce_max(tf.cast(sequence_mask, tf.int32), 1)
sentence_correct_total = tf.reduce_sum(any_label)
# is 1 when all is correct, 0 otherwise
sentence_correct = tf.reduce_sum(tf.reduce_prod(
tf.cast(
tf.logical_or(correct, tf.logical_not(sequence_mask)),
tf.int32
),
1
) * any_label)
return sentence_correct, sentence_correct_total
def lstm_activation(inputs, input_h, input_c, W, b, activation):
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = concat([inputs, input_h], axis=1)
lstm_matrix = tf.nn.xw_plus_b(cell_inputs, W, b)
preactiv = split(lstm_matrix, axis=1, num_splits=4)
# from CUDNN docs:
# Values 0 and 4 reference the input gate.
# Values 1 and 5 reference the forget gate.
# Values 2 and 6 reference the new memory gate.
# Values 3 and 7 reference the output gate
i, f, j, o = (
preactiv[CUDNN_MAPPING["i"]],
preactiv[CUDNN_MAPPING["f"]],
preactiv[CUDNN_MAPPING["j"]],
preactiv[CUDNN_MAPPING["o"]]
)
c = (tf.nn.sigmoid(f) * input_c +
tf.nn.sigmoid(i) * activation(j))
m = tf.nn.sigmoid(o) * activation(c)
return (c, m)
class Logger(object):
def __init__(self, session, writer):
self.session = session
self.writer = writer
self._placeholders = {}
summaries = tf.get_collection(SUMMARIES_ASSIGNS)
summaries_pholders = tf.get_collection(SUMMARIES_PLACEHOLDERS)
summaries_names = [name.decode("utf-8")
for name in tf.get_collection(SUMMARIES_NAMES)]
for summary, pholder, name in zip(summaries, summaries_pholders, summaries_names):
self._placeholders[name] = (pholder, summary)
def log(self, name, value, step):
if name not in self._placeholders:
pholder = tf.placeholder(tf.float32, [], name=name)
summary = tf.summary.scalar(name, pholder)
tf.add_to_collection(SUMMARIES_ASSIGNS, summary)
tf.add_to_collection(SUMMARIES_NAMES, name)
tf.add_to_collection(SUMMARIES_PLACEHOLDERS, pholder)
self._placeholders[name] = (pholder, summary)
pholder, summary = self._placeholders[name]
res = self.session.run(summary, {pholder:value})
self.writer.add_summary(res, step)
class ParametrizedLSTMCell(RNNCell):
def __init__(self, weights, biases, hidden_size):
self._weights = weights
self._biases = biases
self.hidden_size = hidden_size
@property
def state_size(self):
return (self.hidden_size, self.hidden_size)
@property
def output_size(self):
return self.hidden_size
def __call__(self, inputs, state, scope=None):
input_h, input_c = state
c, m = lstm_activation(inputs,
input_h=input_h,
input_c=input_c,
b=self._biases,
W=self._weights,
activation=tf.nn.tanh)
return m, (m, c)
class LSTMCell(TFLSTMCell):
def __init__(self,
num_units,
keep_prob=1.0,
is_training=False):
self._is_training = is_training
self._keep_prob = keep_prob
TFLSTMCell.__init__(
self,
num_units=num_units,
state_is_tuple=True
)
def __call__(self, inputs, state, scope=None):
(c_prev, m_prev) = state
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with tf.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [input_size.value + self._num_units, 4 * self._num_units],
dtype, 1)
b = tf.get_variable(
"B", shape=[4 * self._num_units],
initializer=tf.zeros_initializer(), dtype=dtype)
c, m = lstm_activation(inputs,
input_c=c_prev,
input_h=m_prev,
W=concat_w,
b=b,
activation=self._activation,
keep_prob=self._keep_prob,
is_training=self._is_training,
forget_bias=self._forget_bias)
return m, LSTMStateTuple(c, m)
def cudnn_lstm_parameter_size(input_size, hidden_size):
"""Number of parameters in a single CuDNN LSTM cell."""
biases = 8 * hidden_size
weights = 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size)
return biases + weights
def direction_to_num_directions(direction):
if direction == "unidirectional":
return 1
elif direction == "bidirectional":
return 2
else:
raise ValueError("Unknown direction: %r." % (direction,))
def estimate_cudnn_parameter_size(num_layers,
input_size,
hidden_size,
input_mode,
direction):
"""
Compute the number of parameters needed to
construct a stack of LSTMs. Assumes the hidden states
of bidirectional LSTMs are concatenated before being
sent to the next layer up.
"""
num_directions = direction_to_num_directions(direction)
params = 0
isize = input_size
for layer in range(num_layers):
for direction in range(num_directions):
params += cudnn_lstm_parameter_size(
isize, hidden_size
)
isize = hidden_size * num_directions
return params
# cudnn conversion to dynamic RNN:
CUDNN_LAYER_WEIGHT_ORDER = [
"x", "x", "x", "x", "h", "h", "h", "h"
]
CUDNN_LAYER_BIAS_ORDER = [
"bx", "bx", "bx", "bx", "bh", "bh", "bh", "bh"
]
CUDNN_TRANSPOSED = True
CUDNN_MAPPING = {"i": 0, "f": 1, "j": 2, "o": 3}
def consume_biases_direction(params, old_offset, hidden_size, isize):
offset = old_offset
layer_biases_x = []
layer_biases_h = []
for piece in CUDNN_LAYER_BIAS_ORDER:
if piece == "bx":
layer_biases_x.append(
params[offset:offset + hidden_size]
)
offset += hidden_size
elif piece == "bh":
layer_biases_h.append(
params[offset:offset + hidden_size]
)
offset += hidden_size
else:
raise ValueError("Unknown cudnn piece %r." % (piece,))
b = concat(layer_biases_x, axis=0) + concat(layer_biases_h, axis=0)
return b, offset
def consume_weights_direction(params, old_offset, hidden_size, isize):
offset = old_offset
layer_weights_x = []
layer_weights_h = []
for piece in CUDNN_LAYER_WEIGHT_ORDER:
if piece == "x":
layer_weights_x.append(
tf.reshape(
params[offset:offset + hidden_size * isize],
[hidden_size, isize] if CUDNN_TRANSPOSED else [isize, hidden_size]
)
)
offset += hidden_size * isize
elif piece == "h":
layer_weights_h.append(
tf.reshape(
params[offset:offset + hidden_size * hidden_size],
[hidden_size, hidden_size]
)
)
offset += hidden_size * hidden_size
else:
raise ValueError("Unknown cudnn piece %r." % (piece,))
if CUDNN_TRANSPOSED:
W_T = concat([concat(layer_weights_x, axis=0), concat(layer_weights_h, axis=0)], axis=1)
W = tf.transpose(W_T)
else:
W = concat([concat(layer_weights_x, axis=1), concat(layer_weights_h, axis=1)], axis=0)
return W, offset
def decompose_layer_params(params, num_layers,
hidden_size, cell_input_size,
input_mode, direction, create_fn):
"""
This operation converts the opaque cudnn params into a set of
usable weight matrices.
Args:
params : Tensor, opaque cudnn params tensor
num_layers : int, number of stacked LSTMs.
hidden_size : int, number of neurons in each LSTM.
cell_input_size : int, input size for the LSTMs.
input_mode: whether a pre-projection was used or not. Currently only
'linear_input' is supported (e.g. CuDNN does its own projection
internally)
direction : str, 'unidirectional' or 'bidirectional'.
create_fn: callback for weight creation. Receives parameter slice (op),
layer (int), direction (0 = fwd, 1 = bwd),
parameter_index (0 = W, 1 = b).
Returns:
weights : list of lists of Tensors in the format:
first list is indexed layers,
inner list is indexed by direction (fwd, bwd),
tensors in the inner list are (Weights, biases)
"""
if input_mode != "linear_input":
raise ValueError("Only input_mode == linear_input supported for now.")
num_directions = direction_to_num_directions(direction)
offset = 0
all_weights = [[[] for j in range(num_directions)]
for i in range(num_layers)]
isize = cell_input_size
with tf.variable_scope("DecomposeCudnnParams"):
for layer in range(num_layers):
with tf.variable_scope("Layer{}".format(layer)):
for direction in range(num_directions):
with tf.variable_scope("fwd" if direction == 0 else "bwd"):
with tf.variable_scope("weights"):
W, offset = consume_weights_direction(
params,
old_offset=offset,
hidden_size=hidden_size,
isize=isize)
all_weights[layer][direction].append(
create_fn(W, layer, direction, 0))
isize = hidden_size * num_directions
isize = cell_input_size
for layer in range(num_layers):
with tf.variable_scope("Layer{}".format(layer)):
for direction in range(num_directions):
with tf.variable_scope("fwd" if direction == 0 else "bwd"):
with tf.variable_scope("biases"):
b, offset = consume_biases_direction(
params,
old_offset=offset,
hidden_size=hidden_size,
isize=isize)
all_weights[layer][direction].append(
create_fn(b, layer, direction, 1))
isize = hidden_size * num_directions
return all_weights
def create_decomposed_variable(param, lidx, didx, pidx):
with tf.device("cpu"):
return tf.get_variable("w" if pidx == 0 else "b",
shape=param.get_shape().as_list(),
dtype=param.dtype,
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
"excluded_variables"])
def cpu_cudnn_params(params, num_layers, hidden_size, cell_input_size, input_mode,
direction):
"""
This operation converts the opaque cudnn params into a set of
usable weight matrices, and caches the conversion.
Args:
params : Tensor, opaque cudnn params tensor
num_layers : int, number of stacked LSTMs.
hidden_size : int, number of neurons in each LSTM.
cell_input_size : int, input size for the LSTMs.
input_mode: whether a pre-projection was used or not. Currently only
'linear_input' is supported (e.g. CuDNN does its own projection
internally)
direction : str, 'unidirectional' or 'bidirectional'.
skip_creation : bool, whether to build variables.
Returns:
weights : list of lists of Tensors in the format:
first list is indexed layers,
inner list is indexed by direction (fwd, bwd),
tensors in the inner list are (Weights, biases)
"""
# create a boolean status variable that checks whether the
# weights have been converted to cpu format:
with tf.device("cpu"):
cpu_conversion_status = tf.get_variable(
name="CudnnConversionStatus", dtype=tf.float32,
initializer=tf.zeros_initializer(), shape=[],
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES])
# create a fresh copy of the weights (not trainable)
reshaped = decompose_layer_params(
params,
num_layers=num_layers,
hidden_size=hidden_size,
cell_input_size=cell_input_size,
input_mode=input_mode,
direction=direction,
create_fn=create_decomposed_variable)
def cpu_convert():
all_assigns = decompose_layer_params(
params,
num_layers=num_layers,
hidden_size=hidden_size,
cell_input_size=cell_input_size,
input_mode=input_mode,
direction=direction,
create_fn=lambda p, lidx, didx, pidx: tf.assign(reshaped[lidx][didx][pidx], p))
all_assigns = [assign for layer_assign in all_assigns
for dir_assign in layer_assign
for assign in dir_assign]
all_assigns.append(tf.assign(cpu_conversion_status, tf.constant(1.0, dtype=tf.float32)))
all_assigns.append(tf.Print(cpu_conversion_status, [0],
message="Converted cudnn weights to CPU format. "))
with tf.control_dependencies(all_assigns):
ret = tf.identity(cpu_conversion_status)
return ret
# cache the reshaping/concatenating
ensure_conversion = tf.cond(tf.greater(cpu_conversion_status, 0),
lambda: cpu_conversion_status,
cpu_convert)
# if weights are already reshaped, go ahead:
with tf.control_dependencies([ensure_conversion]):
# wrap with identity to ensure there is a dependency between assignment
# and using the weights:
all_params = [[[tf.identity(p) for p in dir_param]
for dir_param in layer_param]
for layer_param in reshaped]
return all_params
class CpuCudnnLSTM(object):
def __init__(self, num_layers, hidden_size,
cell_input_size, input_mode, direction):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.cell_input_size = cell_input_size
self.input_mode = input_mode
self.direction = direction
def __call__(self,
inputs,
input_h,
input_c,
params,
is_training=True):
layer_params = cpu_cudnn_params(params,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
cell_input_size=self.cell_input_size,
input_mode=self.input_mode,
direction=self.direction)
REVERSED = 1
layer_inputs = inputs
cell_idx = 0
for layer_param in layer_params:
hidden_fwd_bwd = []
final_output_c = []
final_output_h = []
for direction, (W, b) in enumerate(layer_param):
if direction == REVERSED:
layer_inputs = reverse(layer_inputs, axis=0)
hiddens, (output_h, output_c) = tf.nn.dynamic_rnn(
cell=ParametrizedLSTMCell(W, b, self.hidden_size),
inputs=layer_inputs,
dtype=inputs.dtype,
time_major=True,
initial_state=(input_h[cell_idx], input_c[cell_idx]))
if direction == REVERSED:
hiddens = reverse(hiddens, axis=0)
hidden_fwd_bwd.append(hiddens)
final_output_c.append(tf.expand_dims(output_c, 0))
final_output_h.append(tf.expand_dims(output_h, 0))
cell_idx += 1
if len(hidden_fwd_bwd) > 1:
layer_inputs = concat(hidden_fwd_bwd, axis=2)
final_output_c = concat(final_output_c, axis=0)
final_output_h = concat(final_output_h, axis=0)
else:
layer_inputs = hidden_fwd_bwd[0]
final_output_c = final_output_c[0]
final_output_h = final_output_h[0]
return layer_inputs, final_output_h, final_output_c
def highway(x, activation_fn=tf.nn.relu, scope=None):
size = x.get_shape()[-1].value
with tf.variable_scope(scope or "HighwayLayer"):
activ = tf.contrib.layers.fully_connected(
x, size * 2, activation_fn=None, scope="FC"
)
transform = tf.sigmoid(activ[..., :size], name="transform_gate")
hidden = activation_fn(activ[..., size:])
carry = 1.0 - transform
return tf.add(hidden * transform, x * carry, "y")
def conv2d(inputs, output_dim, k_h, k_w,
stddev=0.02, scope=None,
weight_noise=0.0, is_training=True):
with tf.variable_scope(scope or "Conv2D"):
w = tf.get_variable('w', [k_h, k_w, inputs.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
if weight_noise > 0 and not isinstance(is_training, bool):
w = add_weight_noise(w, is_training=is_training, stddev=weight_noise)
return tf.nn.conv2d(inputs, w, strides=[1, 1, 1, 1], padding="VALID")
def character_convolution(inputs, feature):
inputs_2d = tf.reshape(inputs,
[tf.shape(inputs)[0] * tf.shape(inputs)[1], tf.shape(inputs)[2]]
)
inputs_3d = embedding_lookup(
inputs_2d,
dim=feature["dimension"],
# 255 different bytes (uint8)
# & start and end symbol:
size=257,
dtype=tf.float32,
mask_negative=True)
inputs_4d = tf.expand_dims(inputs_3d, 1)
feature_pools = []
for idx, conv_filter in enumerate(feature["filters"]):
width, channels = conv_filter["width"], conv_filter["channels"]
# [batch * time x 1 x word_length x embed_dim x feature_map_dim]
conv = tf.squeeze(conv2d(inputs_4d, channels, 1, width, scope="CharacterConvolution%d" % (idx,)), [1])
# remove word dimension
pool = tf.reduce_max(conv, 1)
feature_pools.append(pool)
activations = concat(feature_pools, axis=1)
channels_out = sum(conv_filter["channels"] for conv_filter in feature["filters"])
activations = tf.reshape(
tf.tanh(activations),
[tf.shape(inputs)[0], tf.shape(inputs)[1], channels_out],
name="CharacterConvolutionPooled")
for idx in range(feature["highway_layers"]):
activations = highway(activations, scope="HighwayLayer%d" % (idx,),
activation_fn=tf.tanh)
return activations
def feature_dtype(feat):
if requires_vocab(feat):
return tf.int32
elif feat["type"] in {"digit", "punctuation_count", "uppercase"}:
return tf.float32
elif requires_character_convolution(feat):
return tf.int32
else:
raise ValueError("unknown feature %r." % (feat,))
def feature_shape(feature):
if requires_vocab(feature) or feature["type"] in {'digit', 'punctuation_count', 'uppercase'}:
return [None, None]
elif requires_character_convolution(feature):
return [None, None, None]
else:
raise ValueError("unknown feature %r." % (feature,))
def build_inputs(features, objectives, fused, class_weights,
class_weights_clipval):
input_placeholders = []
labels = []
labels_mask = []
labels_class_weights = []
max_output_vocab = max(len(obj["vocab"]) for obj in objectives)
with tf.variable_scope("Inputs"):
is_training = tf.placeholder(tf.bool, [], name="is_training")
tf.add_to_collection(IS_TRAINING, is_training)
for idx, feat in enumerate(features):
input_placeholder = tf.placeholder(
feature_dtype(feat), feature_shape(feat),
name="input_placeholders_%d" % (idx,)
)
input_placeholders.append(input_placeholder)
tf.add_to_collection(INPUT_PLACEHOLDERS, input_placeholder)
if fused:
label_placeholder = tf.placeholder(
tf.int32, [None, None, len(objectives)]
)
labels_mask_placeholder = tf.placeholder(
tf.bool, [None, None, len(objectives)], name="labels_mask"
)
labels.append(label_placeholder)
labels_mask.append(labels_mask_placeholder)
tf.add_to_collection(LABEL_PLACEHOLDERS, label_placeholder)
tf.add_to_collection(LABEL_MASK_PLACEHOLDERS, labels_mask_placeholder)
if class_weights:
with tf.variable_scope("FusedClassWeights"):
init_class_weights = tf.get_variable(
name="class_weights",
shape=[len(objectives) * max_output_vocab],
initializer=tf.constant_initializer(1),
dtype=tf.int64,
trainable=False)
init_class_count = tf.get_variable(
name="class_weights_denominator",
shape=[len(objectives)],
initializer=tf.constant_initializer(1),
dtype=tf.int64,
trainable=False)
def update_class_weights():
mask_as_ints = tf.cast(tf.reshape(labels_mask_placeholder, [-1, len(objectives)]), tf.int64)
updated_cls_weights = tf.scatter_add(
init_class_weights,
tf.reshape(label_placeholder + tf.reshape(tf.range(len(objectives)) * max_output_vocab, [1, 1, len(objectives)]), [-1]),
tf.reshape(mask_as_ints, [-1])
)
updated_class_count = tf.assign_add(init_class_count, tf.reduce_sum(mask_as_ints, 0))
# class weight: weight_i = total / class_i
weights = tf.clip_by_value(tf.expand_dims(updated_class_count, 1) /
tf.reshape(updated_cls_weights, [len(objectives), max_output_vocab]),
1e-6, class_weights_clipval)
return tf.cast(weights, tf.float32)
def return_class_weights():
# class weight: weight_i = total / class_i
return tf.cast(
tf.clip_by_value(tf.expand_dims(init_class_count, 1) /
tf.reshape(init_class_weights, [len(objectives), max_output_vocab]),
1e-6, class_weights_clipval), tf.float32)
labels_class_weights.append(
tf.cond(is_training,
update_class_weights,
return_class_weights))
else:
labels_class_weights.append(None)
else:
for objective in objectives:
with tf.variable_scope(objective["name"]):
label_placeholder = tf.placeholder(
tf.int32, [None, None], name="labels"
)
labels.append(label_placeholder)
if objective["type"] == "crf":
labels_mask_placeholder = tf.placeholder(
tf.bool, [None], name="labels_mask"
)
labels_class_weights.append(None)
elif objective["type"] == "softmax":
labels_mask_placeholder = tf.placeholder(
tf.bool, [None, None], name="labels_mask"
)
if class_weights:
init_class_weights = tf.get_variable(
name="class_weights",
shape=len(objective["vocab"]),
initializer=tf.constant_initializer(1),
dtype=tf.int64,
trainable=False)
init_class_count = tf.get_variable(
name="class_weights_denominator",
shape=[],
initializer=tf.constant_initializer(1),
dtype=tf.int64,
trainable=False)
def update_class_weights():
mask_as_ints = tf.cast(tf.reshape(labels_mask_placeholder, [-1]), tf.int64)
updated_cls_weights = tf.scatter_add(
init_class_weights,
tf.reshape(label_placeholder, [-1]),
mask_as_ints
)
updated_class_count = tf.assign_add(init_class_count, tf.reduce_sum(mask_as_ints))
# class weight: weight_i = total / class_i
weights = tf.clip_by_value(updated_class_count / updated_cls_weights,
1e-6, class_weights_clipval)
return tf.cast(weights, tf.float32)
def return_class_weights():
# class weight: weight_i = total / class_i
return tf.cast(
tf.clip_by_value(init_class_count / init_class_weights,
1e-6, class_weights_clipval), tf.float32)
labels_class_weights.append(
tf.cond(is_training, update_class_weights, return_class_weights)
)
else:
labels_class_weights.append(None)
else:
raise ValueError(
"unknown objective type %r." % (
objective["type"]
)
)
labels_mask.append(labels_mask_placeholder)
tf.add_to_collection(LABEL_PLACEHOLDERS, label_placeholder)
tf.add_to_collection(LABEL_MASK_PLACEHOLDERS, labels_mask_placeholder)
sequence_lengths = tf.placeholder(tf.int32, [None],
name="sequence_lengths")
tf.add_to_collection(SEQUENCE_LENGTHS, sequence_lengths)
return (input_placeholders,
labels,
labels_mask,
labels_class_weights,
sequence_lengths,
is_training)
def add_weight_noise(x, is_training, stddev):
return tf.cond(is_training,
lambda: x + tf.random_normal(
shape=tf.shape(x), stddev=stddev),
lambda: x)
def build_recurrent(inputs, cudnn, faux_cudnn, hidden_sizes, is_training,
keep_prob, weight_noise):
dtype = tf.float32
if cudnn:
if len(hidden_sizes) == 0:
raise ValueError("hidden_sizes must be a list of length > 1.")
hidden_size = hidden_sizes[0]
if any(hidden_size != hsize for hsize in hidden_sizes):
raise ValueError("cudnn RNN requires all hidden units "
"to be the same size (got %r)" % (
hidden_sizes,
))
num_layers = len(hidden_sizes)
cell_input_size = inputs.get_shape()[-1].value
est_size = estimate_cudnn_parameter_size(
num_layers=num_layers,
hidden_size=hidden_size,
input_size=cell_input_size,
input_mode="linear_input",
direction="bidirectional"
)
# autoswitch to GPUs based on availability of alternatives:
cudnn_params = tf.get_variable("RNNParams",
shape=[est_size],
dtype=tf.float32,
initializer=tf.contrib.layers.variance_scaling_initializer())
if weight_noise > 0:
cudnn_params = add_weight_noise(cudnn_params,
stddev=weight_noise, is_training=is_training)
if faux_cudnn:
cudnn_cell = CpuCudnnLSTM(num_layers,
hidden_size,
cell_input_size,
input_mode="linear_input",
direction="bidirectional")
else:
cpu_cudnn_params(cudnn_params,
num_layers=num_layers,
hidden_size=hidden_size,
cell_input_size=cell_input_size,
input_mode="linear_input",
direction="bidirectional")
cudnn_cell = CudnnLSTM(num_layers,
hidden_size,
cell_input_size,
input_mode="linear_input",
direction="bidirectional")
init_state = tf.fill(
(2 * num_layers, tf.shape(inputs)[1], hidden_size),
tf.constant(np.float32(0.0)))
hiddens, output_h, output_c = cudnn_cell(
inputs,
input_h=init_state,
input_c=init_state,
params=cudnn_params,
is_training=True)
hiddens = maybe_dropout(
hiddens,
keep_prob,
is_training)
else:
cell = MultiRNNCell(
[LSTMCell(hsize, is_training=is_training, keep_prob=keep_prob)
for hsize in hidden_sizes]
)
hiddens, _ = bidirectional_dynamic_rnn(
cell,
inputs,
time_major=True,
dtype=dtype,
swap_memory=True
)
return hiddens
def build_embed(inputs, features, index2words, keep_prob, is_training):
embeddings = []
for idx, (values, feature, index2word) in enumerate(zip(inputs, features, index2words)):
if requires_vocab(feature):
with tf.variable_scope("embedding_%d" % (idx,)):
embedding = embedding_lookup(
values,
dim=feature["dimension"],
size=len(index2word),
dtype=tf.float32,
mask_negative=True
)
embeddings.append(embedding)
elif requires_character_convolution(feature):
embeddings.append(
character_convolution(values, feature)
)
else:
embeddings.append(tf.expand_dims(values, 2))
return maybe_dropout(concat(embeddings, axis=2), keep_prob, is_training)
def crf_metrics(unary_scores, labels, transition_params, sequence_lengths,
mask):
"""
Computes CRF output metrics.
Receives:
unary_scores : batch-major order
labels : batch-major order
transition_params : nclasses x nclasses matrix.
sequence_lengths : length of each time-sequence
mask : batch-major example mask
Returns:
token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total
"""
classes = unary_scores.get_shape()[-1].value
decoded, scores = viterbi_decode(unary_scores,
transition_params,
sequence_lengths)
tf.add_to_collection(UNARY_SCORES, unary_scores)
tf.add_to_collection(DECODED, decoded)
tf.add_to_collection(DECODED_SCORES, scores)
equals_label = tf.equal(labels, decoded)
token_correct = tf.reduce_sum(
tf.cast(
tf.logical_and(equals_label, mask),
tf.int32
)
)
token_correct_total = tf.reduce_sum(tf.cast(mask, tf.int32))
tf.add_to_collection(TOKEN_CORRECT, token_correct)
tf.add_to_collection(TOKEN_CORRECT_TOTAL, token_correct_total)
sentence_correct, _ = compute_sentence_correct(equals_label, mask)
sentence_correct_total = tf.reduce_sum(tf.cast(mask[:, 0], tf.int32))
tf.add_to_collection(SENTENCE_CORRECT, sentence_correct)
tf.add_to_collection(SENTENCE_CORRECT_TOTAL, sentence_correct_total)
build_true_false_positives(decoded, mask, labels,
classes, equals_label)
return (token_correct, token_correct_total,
sentence_correct, sentence_correct_total)
def build_true_false_positives(decoded, mask_batch_major, labels_batch_major,
classes, equals_label):
masked_equals_label = tf.logical_and(equals_label, mask_batch_major)
# now for each class compute tp, fp, fn
# [nclasses x batch x time]
masked_per_class = tf.logical_and(
tf.equal(labels_batch_major[None, :, :], tf.range(classes)[:, None, None]),
mask_batch_major)
# correct, and on label
correct = tf.reduce_sum(tf.cast(tf.logical_and(masked_per_class, equals_label[None, :, :]), tf.int32),
axis=[1, 2])
# predicted a particular class
guessed = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(decoded[None, :, :], tf.range(classes)[:, None, None]), mask_batch_major), tf.int32),
axis=[1, 2])
total = tf.reduce_sum(tf.cast(masked_per_class, tf.int32), axis=[1, 2])
tp, fp, fn = correct, guessed - correct, total - correct
tf.add_to_collection(TRUE_POSITIVES, tp)
tf.add_to_collection(FALSE_POSITIVES, fp)
tf.add_to_collection(FALSE_NEGATIVES, fn)
def softmax_metrics(unary_scores, labels, mask):
"""
Compute softmax output stats for correct/accuracy per-token/per-sentence.
Receive
unary_scores : time-major
labels : time-major
mask : time-major
Returns:
token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total
"""
classes = unary_scores.get_shape()[-1].value
unary_scores_batch_major = tf.transpose(unary_scores, [1, 0, 2])
labels_batch_major = tf.transpose(labels, [1, 0])
mask_batch_major = tf.transpose(mask, [1, 0])
decoded = tf.cast(tf.argmax(unary_scores_batch_major, 2), labels.dtype)
unary_probs_batch_major = tf.nn.softmax(unary_scores_batch_major)
scores = tf.reduce_max(unary_probs_batch_major, 2)
tf.add_to_collection(UNARY_SCORES, unary_probs_batch_major)
tf.add_to_collection(DECODED, decoded)
tf.add_to_collection(DECODED_SCORES, scores)
equals_label = tf.equal(decoded, labels_batch_major)
token_correct = tf.reduce_sum(
tf.cast(
tf.logical_and(
equals_label,
mask_batch_major
),
tf.int32
)
)
token_correct_total = tf.reduce_sum(tf.cast(mask, tf.int32))
tf.add_to_collection(TOKEN_CORRECT, token_correct)
tf.add_to_collection(TOKEN_CORRECT_TOTAL, token_correct_total)
sentence_correct, sentence_correct_total = compute_sentence_correct(
equals_label, mask_batch_major
)
tf.add_to_collection(SENTENCE_CORRECT, sentence_correct)
tf.add_to_collection(SENTENCE_CORRECT_TOTAL, sentence_correct_total)
build_true_false_positives(decoded, mask_batch_major, labels_batch_major,
classes, equals_label)
return (token_correct, token_correct_total,
sentence_correct, sentence_correct_total)
def add_objective_names_types(objectives):
for objective in objectives:
with tf.variable_scope(objective["name"]):
# store objective names in graph:
tf.add_to_collection(OBJECTIVE_NAMES,
tf.constant(objective["name"], name="objective_name")
)
tf.add_to_collection(OBJECTIVE_TYPES,
tf.constant(objective["type"], name="objective_type")
)
def build_loss(inputs, objectives, labels, labels_mask,
labels_class_weights, fused, sequence_lengths,
class_weights_normalize):
"""
Compute loss function given the objectives.
Assumes inputs are of the form [time, batch, features].
Arguments:
----------
inputs : tf.Tensor
objectives : list<dict>, objective specs
labels : list<tf.Tensor>
labels_mask : list<tf.Tensor>
labels_class_weights : list<tf.Tensor>
sequence_lengths : tf.Tensor
Returns:
loss : tf.Tensor (scalar)
"""
losses = []
negative_log_likelihoods = []
sentence_corrects = []
sentence_corrects_total = []
token_corrects = []
token_corrects_total = []
max_output_vocab = max(len(obj["vocab"]) for obj in objectives)
total_output_size = len(objectives) * max_output_vocab
add_objective_names_types(objectives)
if fused:
with tf.variable_scope("FusedOutputs"):
objective_labels = labels[0]
mask = labels_mask[0]
objective_class_weights = labels_class_weights[0]
# perform all classifications at once:
unary_scores = tf.contrib.layers.fully_connected(
inputs, total_output_size,
activation_fn=None
)
unary_scores = tf.reshape(unary_scores,
[tf.shape(unary_scores)[0],
tf.shape(unary_scores)[1],
len(objectives),
max_output_vocab])
negative_log_likelihood = sparse_softmax_cross_entropy_with_logits(
logits=unary_scores,
labels=objective_labels
)
labels_mask_casted = tf.cast(mask, negative_log_likelihood.dtype)
masked_negative_log_likelihood = negative_log_likelihood * labels_mask_casted
if objective_class_weights is not None:
class_weights_mask = tf.gather(
tf.reshape(objective_class_weights, [-1]),
objective_labels +
tf.reshape(tf.range(len(objectives)) * max_output_vocab, [1, 1, len(objectives)]))
if class_weights_normalize:
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted * class_weights_mask), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives))
else:
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives))
else:
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives))
masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood)
losses.append(normed_loss)
negative_log_likelihoods.append(masked_negative_log_likelihood_sum)
for idx, objective in enumerate(objectives):
with tf.variable_scope(objective["name"]):
(token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total) = softmax_metrics(unary_scores[:, :, idx, :len(objective["vocab"])],
labels=objective_labels[:, :, idx],
mask=mask[:, :, idx])
token_corrects.append(token_correct)
token_corrects_total.append(token_correct_total)
sentence_corrects.append(sentence_correct)
sentence_corrects_total.append(sentence_correct_total)
else:
for objective, objective_labels, mask, objective_class_weights in zip(objectives, labels, labels_mask, labels_class_weights):
with tf.variable_scope(objective["name"]):
if objective["type"] == "crf":
unary_scores = tf.contrib.layers.fully_connected(
inputs,
len(objective["vocab"]),
activation_fn=None
)
unary_scores_batch_major = tf.transpose(unary_scores, [1, 0, 2])
labels_batch_major = tf.transpose(objective_labels, [1, 0])
padded_unary_scores_batch_major = tf.cond(tf.greater(tf.shape(unary_scores_batch_major)[1], 1),
lambda: unary_scores_batch_major,
lambda: tf.pad(unary_scores_batch_major, [[0, 0], [0, 1], [0, 0]]))
padded_labels_batch_major = tf.cond(tf.greater(tf.shape(labels_batch_major)[1], 1),
lambda: labels_batch_major,
lambda: tf.pad(labels_batch_major, [[0, 0], [0, 1]]))
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
padded_unary_scores_batch_major, padded_labels_batch_major, sequence_lengths
)
labels_mask_casted = tf.cast(mask, log_likelihood.dtype)
masked_log_likelihood = (
log_likelihood * labels_mask_casted
)
masked_negative_log_likelihood_sum = -tf.reduce_sum(masked_log_likelihood)
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
losses.append(masked_negative_log_likelihood_sum / num_predictions)
negative_log_likelihoods.append(masked_negative_log_likelihood_sum)
sequence_mask = tf.logical_and(
tf.sequence_mask(sequence_lengths),
# pad the time dimension:
tf.expand_dims(mask, 1)
)
(token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total) = crf_metrics(unary_scores_batch_major,
labels=labels_batch_major,
mask=sequence_mask,
transition_params=transition_params,
sequence_lengths=sequence_lengths)
elif objective["type"] == 'softmax':
unary_scores = tf.contrib.layers.fully_connected(
inputs,
len(objective["vocab"]),
activation_fn=None
)
negative_log_likelihood = sparse_softmax_cross_entropy_with_logits(
logits=unary_scores,
labels=objective_labels
)
labels_mask_casted = tf.cast(mask, negative_log_likelihood.dtype)
masked_negative_log_likelihood = (
negative_log_likelihood * labels_mask_casted
)
if objective_class_weights is not None:
class_weights_mask = tf.gather(objective_class_weights, objective_labels)
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask
masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood)
if class_weights_normalize:
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted * class_weights_mask), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions
else:
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions
else:
masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood
masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood)
num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)
normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions
losses.append(normed_loss)
negative_log_likelihoods.append(masked_negative_log_likelihood_sum)
(token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total) = softmax_metrics(unary_scores,
labels=objective_labels,
mask=mask)
else:
raise ValueError(
"unknown objective type %r" % (objective["type"],)
)
token_corrects.append(token_correct)
token_corrects_total.append(token_correct_total)
sentence_corrects.append(sentence_correct)
sentence_corrects_total.append(sentence_correct_total)
# aggregate metrics for all objectives:
total_loss = tf.reduce_sum(sum_list(losses))
tf.summary.scalar("BatchLoss", total_loss)
neg_log_likelihood_total = sum_list(negative_log_likelihoods)
tf.summary.scalar("BatchNLL", neg_log_likelihood_total)
tf.add_to_collection(NLL, neg_log_likelihood_total)
tf.add_to_collection(NLL_TOTAL, tf.shape(inputs)[1])
sentence_corrects_total = sum_list(sentence_corrects_total)
sentence_corrects = sum_list(sentence_corrects)
tf.add_to_collection(SENTENCE_CORRECT_ALL, sentence_corrects)
tf.add_to_collection(SENTENCE_CORRECT_ALL_TOTAL, sentence_corrects_total)
token_corrects_total = sum_list(token_corrects_total)
token_corrects = sum_list(token_corrects)
tf.add_to_collection(TOKEN_CORRECT_ALL, token_corrects)
tf.add_to_collection(TOKEN_CORRECT_ALL_TOTAL, token_corrects_total)
return total_loss
def build_model(name,
trainable,
features,
feature_index2words,
objectives,
keep_prob,
input_keep_prob,
hidden_sizes,
freeze_rate,
freeze_rate_anneal,
solver,
cudnn,
fused,
faux_cudnn,
class_weights,
class_weights_normalize,
class_weights_clipval,
lr,
weight_noise,
anneal_rate,
clip_norm):
# mixed output fusing is currently unsupported
if fused and any(obj["type"] != "softmax" for obj in objectives):
raise ValueError("cannot fuse outputs and use non-softmax output.")
# clear all existing collections to ensure every new collection is
# is created fresh
graph = tf.get_default_graph()
for collection_name in graph.get_all_collection_keys():
graph.clear_collection(collection_name)
# build a model under the model's name to prevent collisions
# when multiple models are restored simultaneously
with tf.variable_scope(name):
global_step = tf.Variable(0, trainable=False, name="global_step")
tf.add_to_collection(GLOBAL_STEP, global_step)
# model placeholders:
(input_placeholders,
labels,
labels_mask,
labels_class_weights,
sequence_lengths,
is_training) = build_inputs(features,
objectives=objectives,
fused=fused,
class_weights=class_weights,
class_weights_clipval=class_weights_clipval)
embed = build_embed(input_placeholders,
features=features,
index2words=feature_index2words,
is_training=is_training,
keep_prob=input_keep_prob)
hiddens = embed
if len(hidden_sizes) > 0:
hiddens = build_recurrent(hiddens,
cudnn=cudnn,
faux_cudnn=faux_cudnn,
hidden_sizes=hidden_sizes,
keep_prob=keep_prob,
weight_noise=weight_noise,
is_training=is_training)
loss = build_loss(hiddens,
objectives=objectives,
fused=fused,
labels=labels,
labels_mask=labels_mask,
labels_class_weights=labels_class_weights,
class_weights_normalize=class_weights_normalize,
sequence_lengths=sequence_lengths)
if trainable:
learning_rate = tf.train.exponential_decay(lr, global_step,
33000, anneal_rate, staircase=True)
if solver == "adam":
optimizer = LazyAdamOptimizer(learning_rate)
elif solver == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError("Unknown solver %r." % (solver))
grad_vars = optimizer.compute_gradients(loss)
if clip_norm > 0:
grad_vars = [(grad if isinstance(grad, tf.IndexedSlices) else tf.clip_by_norm(grad, clip_norm), var) for grad, var in grad_vars]
train_op = optimizer.apply_gradients(grad_vars, global_step=global_step)
else:
train_op = tf.no_op()
tf.add_to_collection(TRAIN_OP, train_op)
tf.add_to_collection(TRAIN_SUMMARIES, tf.summary.merge_all())
def restore_session(session,
path,
replace_to=None,
replace_from=None,
verbose=False,
use_metagraph=True,
only_features=False):
"""
Call restore on tf.train.Saver on a specific path to store all the
variables of the current tensorflow session to a file for later restoring.
Arguments:
session : tf.Session
path : str, place containing the session data to restore
verbose : bool, print status messages.
use_metagraph : bool, restore by re-creating saved metagraph.
Returns:
bool : success or failure of the restoration
"""
makedirs(path, exist_ok=True)
if not path.endswith("/"):
path = path + "/"
checkpoint = tf.train.get_checkpoint_state(path)
if verbose:
print("Looking for saved session under %r" % (path,), flush=True)
if checkpoint is None or checkpoint.model_checkpoint_path is None:
if verbose:
print("No saved session found", flush=True)
return False
fname = basename(checkpoint.model_checkpoint_path)
if verbose:
print("Restoring saved session from %r" % (join(path, fname),), flush=True)
if use_metagraph:
param_saver = tf.train.import_meta_graph(join(path, fname + ".meta"),
clear_devices=True)
missing_vars = []
else:
if only_features:
to_restore = {}
whitelist = ["embedding", "/RNN/", "/RNNParams", "CharacterConvolution", "HighwayLayer"]
for var in tf.global_variables():
if any(keyword in var.name for keyword in whitelist):
to_restore[var.name[:-2]] = var
param_saver = tf.train.Saver(to_restore)
else:
if replace_to is not None and replace_from is not None:
to_restore = {}
for var in tf.global_variables():
var_name = var.name[:var.name.rfind(":")]
old_name = var_name.replace(replace_to, replace_from)
to_restore[old_name] = var
param_saver = tf.train.Saver(to_restore)
missing_vars = []
else:
reader = tf.train.NewCheckpointReader(join(path, fname))
saved_shapes = reader.get_variable_to_shape_map()
found_vars = [var for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes]
missing_vars = [var for var in tf.global_variables()
if var.name.split(':')[0] not in saved_shapes]
param_saver = tf.train.Saver(found_vars)
param_saver.restore(session, join(path, fname))
session.run([var.initializer for var in missing_vars])
return True
def bidirectional_dynamic_rnn(cell, inputs, dtype, time_major=True, swap_memory=False):
with tf.variable_scope("forward"):
out_fwd, final_fwd = tf.nn.dynamic_rnn(
cell,
inputs,
time_major=time_major,
dtype=dtype,
swap_memory=swap_memory
)
if time_major:
reverse_axis = 0
else:
reverse_axis = 1
with tf.variable_scope("backward"):
out_bwd, final_bwd = tf.nn.dynamic_rnn(
cell,
reverse(inputs, axis=reverse_axis),
time_major=time_major,
dtype=dtype,
swap_memory=swap_memory
)
out_bwd = reverse(out_bwd, axis=reverse_axis)
return concat([out_fwd, out_bwd], axis=2), (final_fwd, final_bwd)
def get_embedding_lookup(size, dim, dtype, reuse=None, trainable=True):
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
W = tf.get_variable(
name="embedding",
shape=[size, dim],
dtype=dtype,
initializer=tf.random_uniform_initializer(
-1.0 / math.sqrt(dim),
1.0 / math.sqrt(dim)
),
trainable=trainable
)
return W
def embedding_lookup(inputs,
size,
dim,
dtype,
reuse=None,
mask_negative=False,
trainable=True,
place_on_cpu_if_big=True):
"""
Construct an Embedding layer that gathers
elements from a matrix with `size` rows,
and `dim` features using the indices stored in `x`.
Arguments:
----------
inputs : tf.Tensor, of integer type
size : int, how many symbols in the lookup table
dim : int, how many columns per symbol.
dtype : data type for the lookup table (e.g. tf.float32)
reuse : bool, (default None) whether the lookup table
was already used before (thus this is weight sharing).
mask_negative : bool, (default False) should -1s in the
lookup input indicate padding (e.g. no lookup),
and thus should those values be masked out post-lookup.
trainable : bool (default True), whether the parameters of
this lookup table can be backpropagated into (e.g.
for Glove word vectors that are fixed pre-trained, this
can be set to False).
place_on_cpu_if_big : bool, if matrix is big, store it on cpu.
Returns:
--------
tf.Tensor, result of tf.nn.embedding_lookup(LookupTable, inputs)
"""
W = get_embedding_lookup(size, dim, dtype, reuse, trainable=trainable)
if mask_negative:
embedded = tf.nn.embedding_lookup(W, tf.maximum(inputs, 0))
null_mask = tf.expand_dims(
tf.cast(
tf.not_equal(inputs, -1),
dtype
),
-1
)
return embedded * null_mask
else:
return tf.nn.embedding_lookup(W, inputs)
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(
tf.get_variable(
name + "_%d" % i,
[current_size] + shape[1:],
dtype=dtype
)
)
return shards
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = tf.get_variable_scope().name + "/" + concat_name + ":0"
for value in tf.get_collection(tf.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = tf.concat_v2(sharded_variable, 0, name=concat_name)
tf.add_to_collection(tf.GraphKeys.CONCATENATED_VARIABLES, concat_variable)
return concat_variable
class SequenceModel(object):
def __init__(self,
objectives,
features,
feature_index2words,
hidden_sizes,
keep_prob,
lr,
solver,
seed=1234,
input_keep_prob=0.7,
clip_norm=-1,
name="SequenceTagger",
cudnn=False,
anneal_rate=0.99,
trainable=True,
weight_noise=0.0,
class_weights_normalize=False,
faux_cudnn=False,
class_weights=False,
class_weights_clipval=1000.0,
freeze_rate=1.0,
fused=False,
freeze_rate_anneal=0.8,
create_variables=True):
if fused and objectives[0]["type"] == "crf":
fused = False
self.keep_prob = keep_prob
self.input_keep_prob = input_keep_prob
self.hidden_sizes = hidden_sizes
self.name = name
self.objectives = objectives
self.features = features
self.feature_index2words = feature_index2words
self.seed = seed
self.lr = lr
self.fused = fused
self.weight_noise = weight_noise
self.anneal_rate = anneal_rate
self.clip_norm = clip_norm
self.solver = solver
self.class_weights_normalize = class_weights_normalize
self.class_weights = class_weights
self.class_weights_clipval = class_weights_clipval
self.rng = np.random.RandomState(seed)
self.cudnn = cudnn
self.feature_word2index = [
{w: k for k, w in enumerate(index2word)} if index2word is not None else None
for index2word in self.feature_index2words
]
self.label2index = [
{w: k for k, w in enumerate(objective["vocab"])}
for objective in self.objectives
]
if create_variables:
# 1) build graph here (TF functional code pattern)
build_model(name=self.name,
trainable=trainable,
objectives=self.objectives,
features=self.features,
feature_index2words=self.feature_index2words,
hidden_sizes=self.hidden_sizes,
keep_prob=self.keep_prob,
solver=self.solver,
freeze_rate=freeze_rate,
class_weights_normalize=self.class_weights_normalize,
class_weights=self.class_weights,
class_weights_clipval=self.class_weights_clipval,
freeze_rate_anneal=freeze_rate_anneal,
cudnn=self.cudnn,
lr=self.lr,
fused=self.fused,
weight_noise=self.weight_noise,
anneal_rate=self.anneal_rate,
input_keep_prob=self.input_keep_prob,
faux_cudnn=faux_cudnn,
clip_norm=self.clip_norm)
# 2) and use meta graph to recover these fields:
self.recover_graph_variables()
def recover_graph_variables(self):
"""Use TF meta graph to obtain key metrics
and outputs from model."""
self.labels = tf.get_collection(LABEL_PLACEHOLDERS)
self.labels_mask = tf.get_collection(LABEL_MASK_PLACEHOLDERS)
self.input_placeholders = tf.get_collection(INPUT_PLACEHOLDERS)
self.sequence_lengths = tf.get_collection(SEQUENCE_LENGTHS)[0]
self.decoded = tf.get_collection(DECODED)
self.decoded_scores = tf.get_collection(DECODED_SCORES)
self.unary_scores = tf.get_collection(UNARY_SCORES)
self.token_correct = tf.get_collection(TOKEN_CORRECT)
self.token_correct_total = tf.get_collection(TOKEN_CORRECT_TOTAL)
self.sentence_correct = tf.get_collection(SENTENCE_CORRECT)
self.sentence_correct_total = tf.get_collection(SENTENCE_CORRECT_TOTAL)
self.token_correct_all = tf.get_collection(TOKEN_CORRECT_ALL)[0]
self.token_correct_all_total = tf.get_collection(TOKEN_CORRECT_ALL_TOTAL)[0]
self.sentence_correct_all = tf.get_collection(SENTENCE_CORRECT_ALL)[0]
self.sentence_correct_all_total = tf.get_collection(SENTENCE_CORRECT_ALL_TOTAL)[0]
self.true_positives = tf.get_collection(TRUE_POSITIVES)
self.false_positives = tf.get_collection(FALSE_POSITIVES)
self.false_negatives = tf.get_collection(FALSE_NEGATIVES)
if len(self.true_positives) == 0 and len(self.token_correct) != 0:
self.true_positives = [None for _ in self.token_correct]
self.false_positives = [None for _ in self.token_correct]
self.false_negatives = [None for _ in self.token_correct]
if len(tf.get_collection(GLOBAL_STEP)) > 0:
self.global_step = tf.get_collection(GLOBAL_STEP)[0]
else:
try:
self.global_step = tf.get_default_graph().get_tensor_by_name(
self.name + "/" + "global_step:0")
except KeyError:
self.global_step = tf.Variable(0, trainable=False, name="global_step")
tf.add_to_collection(GLOBAL_STEP, self.global_step)
self.is_training = tf.get_collection(IS_TRAINING)[0]
self.noop = tf.no_op()
self.train_op = tf.get_collection(TRAIN_OP)[0]
train_summaries = tf.get_collection(TRAIN_SUMMARIES)
self.train_summaries = train_summaries[0] if len(train_summaries) > 0 else None
self.nll = tf.get_collection(NLL)[0]
self.nll_total = tf.get_collection(NLL_TOTAL)[0]
self.saver = tf.train.Saver()
@classmethod
def overrideable_fields(cls):
return [
"keep_prob",
"name",
"lr",
"clip_norm",
"class_weights_normalize",
"class_weights_clipval",
"cudnn",
"anneal_rate",
"weight_noise",
"input_keep_prob"
]
@classmethod
def fields_to_save(cls):
return [
"hidden_sizes",
"objectives",
"name",
"cudnn",
"class_weights",
"features",
"fused",
"class_weights_normalize",
"weight_noise",
"anneal_rate",
"feature_index2words",
"solver",
"lr",
"clip_norm",
"keep_prob",
"input_keep_prob",
"class_weights_clipval"
]
def predict(self, session, feed_dict):
feed_dict[self.is_training] = False
outputs, outputs_probs = session.run(
(self.decoded, self.decoded_scores), feed_dict
)
predictions_out = {}
for value, val_prob, objective in zip(outputs, outputs_probs, self.objectives):
predictions_out[objective["name"]] = (value, val_prob)
return predictions_out
def predict_proba(self, session, feed_dict):
feed_dict[self.is_training] = False
outputs = session.run(
self.unary_scores, feed_dict
)
predictions_out = {}
for value, objective in zip(outputs, self.objectives):
predictions_out[objective["name"]] = value
return predictions_out
def save(self, session, path):
makedirs(path, exist_ok=True)
with open(join(path, "model.json"), "wt") as fout:
save_dict = {}
for field in type(self).fields_to_save():
save_dict[field] = getattr(self, field)
json.dump(save_dict, fout)
with open(join(path, "rng.pkl"), "wb") as fout:
pickle.dump(self.rng, fout)
save_session(session, self.saver, path, verbose=True)
@classmethod
def load(cls, session, path, args=None, verbose=True, trainable=True,
rebuild_graph=False, faux_cudnn=False, replace_to=None, replace_from=None):
"""Convenience method for using a tensorflow session to reload
a previously saved + serialized model from disk."""
with open(join(path, "model.json"), "rt") as fin:
model_props = json.load(fin)
# update fields based on CLI:
if args is not None:
ex_fields = explicitly_set_fields()
for field in cls.overrideable_fields():
if field in ex_fields:
model_props[field] = getattr(args, field)
# prune old fields based on changes to saveable fields:
relevant_props = {}
for field in cls.fields_to_save():
if field in model_props:
relevant_props[field] = model_props[field]
relevant_props["trainable"] = trainable
relevant_props["faux_cudnn"] = faux_cudnn
if rebuild_graph:
print("Using rebuild_graph mode: creating a new graph.", flush=True)
relevant_props["create_variables"] = True
model = cls(**relevant_props)
restore_session(
session, path,
replace_to=replace_to,
replace_from=replace_from,
verbose=verbose,
use_metagraph=False
)
else:
if model_props.get("cudnn", False):
import tensorflow.contrib.cudnn_rnn
relevant_props["create_variables"] = False
restore_session(
session, path,
verbose=verbose,
use_metagraph=True
)
model = cls(**relevant_props)
rng_path = join(path, "rng.pkl")
if exists(rng_path):
# apply the saved random number generator to this
# model:
with open(rng_path, "rb") as fin:
model.rng = pickle.load(fin)
return model
def make_path_absolute(obj, basepath):
copied = obj.copy()
for key in ["path", "vocab"]:
if key in copied:
copied[key] = join(basepath, copied[key])
return copied
class Config(object):
def __init__(self, datasets, features, objectives,
wikidata_path, classification_path):
assert(len(features) > 0)
self.datasets = datasets
self.features = features
self.objectives = objectives
self.classifications = None
self.wikidata_path = wikidata_path
self.classification_path = classification_path
# build the objective names:
self._named_objectives = [obj["name"] for obj in self.objectives]
@classmethod
def load(cls, path):
with open(path, "rt") as fin:
config = json.load(fin)
config_dirname = dirname(path)
return cls(
datasets=[make_path_absolute(dataset, config_dirname) for dataset in config['datasets']],
features=[make_path_absolute(feat, config_dirname) for feat in config['features']],
objectives=[make_path_absolute(objective, config_dirname) for objective in config['objectives']],
wikidata_path=config.get("wikidata_path", None),
classification_path=(
join(config_dirname, config.get("classification_path", None))
if "classification_path" in config else None)
)
def load_dataset_separate(self, dataset_type):
paths = [dataset for dataset in self.datasets if dataset["type"] == dataset_type]
all_examples = {}
for dataset in paths:
_, extension = splitext(dataset["path"])
if extension == ".h5" or extension == ".hdf5":
if self.classifications is None:
if self.wikidata_path is None or self.classification_path is None:
raise ValueError("missing wikidata_path and "
"classification_path, cannot "
"construct H5Dataset.")
self.classifications = ClassificationHandler(
self.wikidata_path,
self.classification_path
)
examples = H5Dataset(
dataset["path"],
dataset["x"],
dataset["y"],
self._named_objectives,
ignore_value=dataset.get('ignore', None),
classifications=self.classifications)
else:
examples = TSVDataset(
dataset["path"],
dataset["x"],
dataset["y"],
self._named_objectives,
comment=dataset.get('comment', '#'),
ignore_value=dataset.get('ignore', None),
retokenize=dataset.get('retokenize', False))
title = dataset["path"].split('/')[-1].split(".")[0]
name = title
iteration = 1
while name in all_examples:
name = title + "-%d" % (iteration,)
iteration += 1
all_examples[name] = examples
return all_examples
def load_dataset(self, dataset_type, merge=True):
datasets = self.load_dataset_separate(dataset_type)
if merge:
return CombinedDataset(list(datasets.values()))
return datasets
def boolean_argument(parser, name, default):
parser.add_argument("--" + name, action="store_true", default=default)
parser.add_argument("--no" + name, action="store_false", dest=name)
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--anneal_rate', type=float, default=0.99)
parser.add_argument('--clip_norm', type=float, default=-1)
parser.add_argument('--weight_noise', type=float, default=0.0)
parser.add_argument('--hidden_sizes', type=int, nargs="*", default=[200, 200])
parser.add_argument('--load_dir', type=str, default=None)
parser.add_argument('--restore_input_features', type=str, default=None)
parser.add_argument('--improvement_key', type=str, default="token_correct")
parser.add_argument('--freeze_rate', type=float, default=1.0)
parser.add_argument('--freeze_rate_anneal', type=float, default=0.8)
parser.add_argument('--save_dir', type=str, default=None)
parser.add_argument('--max_epochs', type=int, default=1000)
parser.add_argument('--test_every', type=int, default=10000,
help="Number of training iterations after which testing should occur.")
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--max_patience', type=int, default=10)
parser.add_argument('--class_weights_clipval', type=float, default=1000.0)
parser.add_argument('--device', type=str, default="gpu:0")
parser.add_argument('--keep_prob', type=float, default=0.5)
parser.add_argument('--input_keep_prob', type=float, default=0.7)
parser.add_argument('--solver', type=str, default="adam",
choices=["adam", "sgd"])
parser.add_argument("--name", type=str, default="SequenceTagger")
parser.add_argument("--old_name", type=str, default=None)
boolean_argument(parser, "cudnn", True)
boolean_argument(parser, "faux_cudnn", False)
boolean_argument(parser, "class_weights", False)
boolean_argument(parser, "rebuild_graph", False)
boolean_argument(parser, "class_weights_normalize", False)
boolean_argument(parser, "fused", True)
boolean_argument(parser, "report_metrics_per_axis", True)
boolean_argument(parser, "report_class_f1", False)
return parser.parse_args(args=args)
def get_vocab(dataset, max_vocab=-1, extra_words=None):
index2word = []
occurrence = {}
for el in dataset:
if el not in occurrence:
index2word.append(el)
occurrence[el] = 1
else:
occurrence[el] += 1
index2word = sorted(index2word, key=lambda x: occurrence[x], reverse=True)
if max_vocab > 0:
index2word = index2word[:max_vocab]
if extra_words is not None:
index2word = extra_words + index2word
return index2word
def get_objectives(objectives, dataset):
out = []
for obj_idx, objective in enumerate(objectives):
if "vocab" in objective:
with open(objective["vocab"], "rt") as fin:
vocab = fin.read().splitlines()
else:
vocab = get_vocab((w[obj_idx] for _, y in dataset for w in y if w[obj_idx] is not None), -1)
out.append(
{
"vocab": vocab,
"type": objective["type"],
"name": objective["name"]
}
)
return out
def merge_all_metrics(metrics):
out = {}
for key, metric in metrics.items():
for subkey, submetric in metric.items():
if len(key) > 0:
out[key + "_" + subkey] = submetric
if subkey not in out:
out[subkey] = submetric
else:
out[subkey] += submetric
else:
out[subkey] = submetric
return out
def log_outcome(logger, outcome, step, name):
for k, v in sorted(outcome.items()):
if "total" in k:
continue
else:
total = outcome[k + "_total"]
if total == 0:
continue
logger.log(k, v / total, step=step)
logger.writer.flush()
def compute_f1(metrics, objectives, report_class_f1):
total_f1 = 0.0
total_precision = 0.0
total_recall = 0.0
total = 0
for objective in objectives:
name = objective["name"]
key = "%s_true_positives" % (name,)
if key not in metrics:
continue
tp = metrics[key]
fp = metrics["%s_false_positives" % (name,)]
fn = metrics["%s_false_negatives" % (name,)]
del metrics[key]
del metrics["%s_false_positives" % (name,)]
del metrics["%s_false_negatives" % (name,)]
precision = 1.* tp / np.maximum((tp + fp), 1e-6)
recall = 1. * tp / np.maximum((tp + fn), 1e-6)
f1 = 2.0 * precision * recall / np.maximum((precision + recall), 1e-6)
support = tp + fn
full_f1 = np.average(f1, weights=support) * 100.0
full_recall = np.average(recall, weights=support) * 100.0
full_precision = np.average(precision, weights=support) * 100.0
total_f1 += full_f1
total_recall += full_recall
total_precision += full_precision
total += 1
if report_class_f1:
print("F1 %s: %r" % (name, full_f1))
print("Name\tF1\tTP\tFP\tFN")
rows = zip([label for label, has_support in zip(objective["vocab"],
support > 0)
if has_support],
f1, tp, fp, fn)
for val, f1_val, val_tp, val_fp, val_fn in rows:
print("%s\t%r\t%d\t%d\t%d" % (
val, f1_val, val_tp, val_fp, val_fn))
print("")
if total > 0:
metrics["F1"] = total_f1
metrics["recall"] = total_recall
metrics["precision"] = total_precision
metrics["F1_total"] = total
metrics["recall_total"] = total
metrics["precision_total"] = total
def accuracy(model, session, datasets, batch_size, train,
report_metrics_per_axis, report_class_f1,
callback=None,
callback_period=None, writer=None):
pbar = get_progress_bar("train" if train else "validation", item="batches")
if not isinstance(datasets, dict):
datasets = {'':datasets}
all_metrics_agg = {}
if callback is not None:
if callback_period is None:
raise ValueError("callback_period cannot be None if "
"callback is used.")
else:
callback_period = None
if train:
train_op = model.train_op
else:
train_op = model.noop
is_training = model.is_training
metrics = {"nll": model.nll, "nll_total": model.nll_total}
summaries = []
if not train:
metric_iter = zip(
model.objectives,
model.token_correct,
model.token_correct_total,
model.sentence_correct,
model.sentence_correct_total,
model.true_positives,
model.false_positives,
model.false_negatives
)
for metric_vars in metric_iter:
(
objective,
token_correct,
token_correct_total,
sentence_correct,
sentence_correct_total,
true_positives,
false_positives,
false_negatives
) = metric_vars
name = objective["name"]
if report_metrics_per_axis:
metrics["%s_token_correct" % (name,)] = token_correct
metrics["%s_token_correct_total" % (name,)] = token_correct_total
metrics["%s_sentence_correct" % (name,)] = sentence_correct
metrics["%s_sentence_correct_total" % (name,)] = sentence_correct_total
if true_positives is not None:
metrics["%s_true_positives" % (name,)] = true_positives
metrics["%s_false_positives" % (name,)] = false_positives
metrics["%s_false_negatives" % (name,)] = false_negatives
metrics["token_correct"] = model.token_correct_all
metrics["token_correct_total"] = model.token_correct_all_total
metrics["sentence_correct"] = model.sentence_correct_all
metrics["sentence_correct_total"] = model.sentence_correct_all_total
summaries = []
else:
if writer is not None and model.train_summaries is not None:
summaries = model.train_summaries
metrics_values = [v for _, v in sorted(metrics.items())]
metrics_names = [name for name, _ in sorted(metrics.items())]
outputs_val = [train_op, model.global_step, summaries, metrics_values]
for title, dataset in datasets.items():
batches = iter_batches_single_threaded(
model=model,
dataset=dataset,
batch_size=batch_size,
train=train,
pbar=pbar
)
metrics_agg = {}
iteration = 0
for feed_dict in batches:
feed_dict[is_training] = train
_, step, summary_out, outputs = session.run(outputs_val, feed_dict)
if writer is not None:
writer.add_summary(summary_out, step)
for key, value in zip(metrics_names, outputs[:len(metrics_names)]):
if key not in metrics_agg:
metrics_agg[key] = value
else:
metrics_agg[key] += value
iteration += 1
if callback_period is not None and iteration % callback_period == 0:
callback(iteration)
if np.isnan(metrics_agg['nll']):
print("loss is NaN.", flush=True, file=sys.stderr)
sys.exit(1)
compute_f1(metrics_agg, model.objectives, report_class_f1)
all_metrics_agg[title] = metrics_agg
del batches
return merge_all_metrics(all_metrics_agg)
def present_outcome(outcome, epoch, name):
string_rows = []
for k, v in sorted(outcome.items()):
if "total" in k:
continue
else:
total = outcome[k + "_total"]
if total == 0:
continue
if "correct" in k:
string_rows.append(
[
k,
"%.2f%%" % (100.0 * v / total),
"(%d correct / %d)" % (v, total)
]
)
else:
string_rows.append(
[
k,
"%.3f" % (v / total),
""
]
)
max_len_cols = [
max(len(row[colidx]) for row in string_rows)
for colidx in range(len(string_rows[0]))
] if len(string_rows) > 0 else []
rows = []
for row in string_rows:
rows.append(
" ".join(
[col + " " * (max_len_cols[colidx] - len(col))
for colidx, col in enumerate(row)]
)
)
return "\n".join(["Epoch {epoch}: {name}".format(epoch=epoch, name=name)] + rows)
def print_outcome(outcome, objectives, epoch, step, name, logger=None):
outcome_report = present_outcome(outcome, epoch, name)
if logger is not None:
log_outcome(logger, outcome, step, name)
print(outcome_report)
class SequenceTagger(object):
def __init__(self, path, device="gpu", faux_cudnn=False, rebuild_graph=False):
tf.reset_default_graph()
session_conf = tf.ConfigProto(
allow_soft_placement=True
)
self.session = tf.InteractiveSession(config=session_conf)
with tf.device(device):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
self._model = SequenceModel.load(
self.session,
path,
args=None,
verbose=False,
trainable=False,
rebuild_graph=rebuild_graph,
faux_cudnn=faux_cudnn
)
@property
def objectives(self):
return self._model.objectives
def predict_proba(self, tokens):
blank_labels = tuple(None for _ in self._model.objectives)
batches = list(iter_batches_single_threaded(
model=self._model,
dataset=[
(tokens, [blank_labels for t in tokens])
],
batch_size=1,
train=False,
autoresize=False
))
outputs = []
batches[0][self._model.is_training] = False
probs_out = self._model.predict_proba(
self.session, batches[0]
)
return probs_out
def predict_proba_sentences(self, sentences):
blank_labels = tuple(None for _ in self._model.objectives)
batches = iter_batches_single_threaded(
model=self._model,
dataset=[
(sentence, [blank_labels for t in sentence])
for sentence in sentences
],
batch_size=min(256, len(sentences)),
train=False,
autoresize=False
)
for batch in batches:
batch[self._model.is_training] = False
yield self._model.predict_proba(
self.session, batch
)
def predict_topk_sentences(self, sentences, k=5):
blank_labels = tuple(None for _ in self._model.objectives)
batches = iter_batches_single_threaded(
model=self._model,
dataset=[
(sentence, [blank_labels for t in sentence])
for sentence in sentences
],
batch_size=min(256, len(sentences)),
train=False,
autoresize=False
)
for batch in batches:
outputs = self._model.predict_proba(
self.session, batch
)
named_outputs = {}
for objective in self._model.objectives:
obj_name = objective["name"]
tags, scores = outputs[obj_name]
if objective["type"] == "crf":
named_outputs[obj_name] = [
[(token, [objective["vocab"][tag]], [score]) for token, tag in zip(tokens, tags)]
for tokens, tags, score in zip(sentences, tags, scores)
]
elif objective["type"] == 'softmax':
all_sent_scores = []
for tokens, scores in zip(sentences, scores):
sent_scores = []
for token, token_scores in zip(tokens, scores):
topk = np.argsort(token_scores)[::-1][:k]
sent_scores.append(
(
token,
[objective["vocab"][idx] for idx in topk],
[token_scores[idx] for idx in topk]
)
)
all_sent_scores.append(sent_scores)
named_outputs[obj_name] = all_sent_scores
else:
raise ValueError("unknown objective type %r." % (objective["type"],))
yield named_outputs
def tag_sentences(self, sentences):
if len(sentences) == 0:
return {
objective["name"]: []
for objective in self._model.objectives
}
blank_labels = tuple(None for _ in self._model.objectives)
batches = list(iter_batches_single_threaded(
self._model,
[
(sentence, [blank_labels for t in sentence])
for sentence in sentences
],
batch_size=min(256, len(sentences)),
train=False,
autoresize=False
))
named_outputs = {}
sentence_idx = 0
for batch in batches:
outputs = self._model.predict(self.session, batch)
for objective in self._model.objectives:
obj_name = objective["name"]
if obj_name not in named_outputs:
named_outputs[obj_name] = []
tags, scores = outputs[obj_name]
nsentences = len(tags)
if objective["type"] == "crf":
named_outputs[obj_name].extend([
[(token, objective["vocab"][tag], score) for token, tag in zip(tokens, tags)]
for tokens, tags, score in zip(sentences[sentence_idx:sentence_idx+nsentences], tags, scores)
])
elif objective["type"] == 'softmax':
named_outputs[obj_name].extend([
[(token, objective["vocab"][tag], score)
for token, tag, score in zip(tokens, tags, scores)]
for tokens, tags, scores in zip(sentences[sentence_idx:sentence_idx+nsentences], tags, scores)
])
else:
raise ValueError("unknown objective type %r." % (objective["type"],))
sentence_idx += nsentences
return named_outputs
def count_number_of_parameters():
return int(sum([np.prod(var.get_shape().as_list())
for var in tf.trainable_variables()]))
class TestCallback(object):
def __init__(self, model, session, dataset, epoch, args, logger):
self.model = model
self.session = session
self.dataset = dataset
self.epoch = epoch
self.args = args
self.logger = logger
self.report_metrics_per_axis = args.report_metrics_per_axis
self.report_class_f1 = args.report_class_f1
def test(self, iteration):
dev_outcome = accuracy(self.model, self.session, self.dataset, self.args.batch_size,
train=False, report_metrics_per_axis=self.report_metrics_per_axis,
report_class_f1=self.report_class_f1)
print_outcome(dev_outcome, self.model.objectives,
epoch="{}-{}".format(self.epoch, iteration),
step=self.session.run(self.model.global_step),
name="validation",
logger=self.logger
)
if self.args.save_dir is not None:
self.model.save(self.session, self.args.save_dir)
def compute_epoch(session, model, train_set,
validation_set, test_callback, epoch,
train_writer, test_writer,
args):
test_callback.epoch = epoch
train_outcome = accuracy(model,
session,
train_set,
args.batch_size,
train=True,
callback_period=args.test_every,
writer=train_writer.writer if train_writer is not None else None,
report_metrics_per_axis=args.report_metrics_per_axis,
report_class_f1=args.report_class_f1,
callback=test_callback.test)
global_step = session.run(model.global_step)
print_outcome(train_outcome,
model.objectives,
epoch=epoch,
name="train",
step=global_step,
logger=train_writer)
dev_outcome = accuracy(
model, session, validation_set, args.batch_size,
train=False,
report_metrics_per_axis=args.report_metrics_per_axis,
report_class_f1=args.report_class_f1)
print_outcome(dev_outcome,
model.objectives,
epoch=epoch,
step=global_step,
name="validation",
logger=test_writer)
if args.save_dir is not None:
model.save(session, args.save_dir)
return dev_outcome
def main():
args = parse_args()
config = Config.load(args.config)
validation_set = config.load_dataset("dev", merge=False)
session_conf = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=session_conf) as session, tf.device(args.device):
if args.load_dir is not None:
model = SequenceModel.load(session, args.load_dir,
args=args, rebuild_graph=args.rebuild_graph, faux_cudnn=args.faux_cudnn,
replace_to=args.name,
replace_from=args.old_name)
dev_outcome = accuracy(
model, session, validation_set, args.batch_size, train=False,
report_metrics_per_axis=args.report_metrics_per_axis,
report_class_f1=args.report_class_f1)
print_outcome(dev_outcome,
model.objectives, 0,
name="loaded validation",
step=session.run(model.global_step),
logger=None)
# dev_outcome = None
if args.rebuild_graph and args.save_dir is not None:
model.save(session, args.save_dir)
train_set = config.load_dataset("train")
else:
# load classes and index2word from a file.
dev_outcome = None
train_set = config.load_dataset("train")
model = SequenceModel(
objectives=get_objectives(config.objectives, train_set),
features=config.features,
feature_index2words=get_feature_vocabs(config.features, train_set, ["<UNK>"]),
lr=args.lr,
anneal_rate=args.anneal_rate,
weight_noise=args.weight_noise,
freeze_rate=args.freeze_rate,
freeze_rate_anneal=args.freeze_rate_anneal,
clip_norm=args.clip_norm,
hidden_sizes=args.hidden_sizes,
solver=args.solver,
fused=args.fused,
class_weights_normalize=args.class_weights_normalize,
class_weights=args.class_weights,
class_weights_clipval=args.class_weights_clipval,
keep_prob=args.keep_prob,
input_keep_prob=args.input_keep_prob,
name=args.name,
cudnn=args.cudnn,
faux_cudnn=args.faux_cudnn,
create_variables=True)
session.run(tf.global_variables_initializer())
if args.restore_input_features is not None:
restore_session(
session, args.restore_input_features,
verbose=True,
use_metagraph=False,
only_features=True)
print("Model has {} trainable parameters.".format(count_number_of_parameters()), flush=True)
best_dev_score = 0.0
patience = 0
best_epoch = 0
best_outcome = None
improvement_key = args.improvement_key
if dev_outcome is not None:
best_dev_score = dev_outcome[improvement_key]
best_epoch = -1
best_outcome = dev_outcome
if args.save_dir is not None:
train_writer = Logger(session, tf.summary.FileWriter(join(args.save_dir, "train")))
test_writer = Logger(session, tf.summary.FileWriter(join(args.save_dir, "test")))
else:
train_writer, test_writer = None, None
test_callback = TestCallback(model,
session,
validation_set,
-1,
args,
logger=test_writer)
if len(train_set) > 0:
train_set.set_randomize(True)
train_set.set_rng(model.rng)
for epoch in range(args.max_epochs):
dev_outcome = compute_epoch(
session, model,
train_set=train_set, validation_set=validation_set,
epoch=epoch, test_callback=test_callback,
train_writer=train_writer,
test_writer=test_writer,
args=args)
if dev_outcome[improvement_key] > best_dev_score:
best_dev_score = dev_outcome[improvement_key]
best_epoch = epoch
best_outcome = dev_outcome
patience = 0
if args.save_dir is not None:
model.save(session, join(args.save_dir, "best"))
else:
patience += 1
if patience >= args.max_patience:
print("No improvements for {} epochs. Stopping.".format(args.max_patience))
break
del dev_outcome
print_outcome(
best_outcome,
model.objectives,
epoch=best_epoch,
name="validation-best",
step=session.run(model.global_step),
logger=None)
if __name__ == "__main__":
main()
|
import numpy as np
import subprocess
import h5py
import ciseau
from os.path import exists, splitext, join
from wikidata_linker_utils.wikidata_ids import load_wikidata_ids
def count_examples(lines, comment, ignore_value, column_indices):
example_length = 0
has_labels = False
found = 0
for line in lines:
if len(line) == 0 or (comment is not None and line.startswith(comment)):
if example_length > 0 and has_labels:
found += 1
example_length = 0
has_labels = False
else:
example_length += 1
if not has_labels:
cols = line.split("\t")
if len(cols) > 1:
if ignore_value is not None:
for col_index in column_indices:
if cols[col_index] != ignore_value:
has_labels = True
break
else:
has_labels = True
if example_length > 0 and has_labels:
found += 1
return found
def retokenize_example(x, y):
tokens = ciseau.tokenize(" ".join(w for w in x),
normalize_ascii=False)
out_y = []
regular_cursor = 0
tokens_length_total = 0
regular_length_total = len(x[regular_cursor]) + 1 if len(x) > 0 else 0
if regular_cursor + 1 == len(x):
regular_length_total -= 1
for i in range(len(tokens)):
tokens_length_total = tokens_length_total + len(tokens[i])
while regular_length_total < tokens_length_total:
regular_cursor += 1
regular_length_total = regular_length_total + len(x[regular_cursor]) + 1
if regular_cursor + 1 == len(x):
regular_length_total -= 1
out_y.append(y[regular_cursor])
assert(regular_cursor + 1 == len(x)), "error with %r" % (x,)
return ([tok.rstrip() for tok in tokens], out_y)
def convert_lines_to_examples(lines, comment, ignore_value,
column_indices, x_column, empty_column,
retokenize=False):
examples = []
x = []
y = []
for line in lines:
if len(line) == 0 or (comment is not None and line.startswith(comment)):
if len(x) > 0:
if not all(row == empty_column for row in y):
examples.append((x, y))
x = []
y = []
else:
cols = line.split("\t")
x.append(cols[x_column])
if len(cols) == 1:
y.append(empty_column)
else:
if ignore_value is not None:
y.append(
tuple(
cols[col_index] if col_index is not None and cols[col_index] != ignore_value else None
for col_index in column_indices
)
)
else:
y.append(
tuple(
cols[col_index] if col_index is not None else None
for col_index in column_indices
)
)
if len(x) > 0 and not all(row == empty_column for row in y):
examples.append((x, y))
if retokenize:
examples = [retokenize_example(x, y) for x, y in examples]
return examples
def load_tsv(path, x_column, y_columns, objective_names, comment, ignore_value,
retokenize):
""""
Deprecated method for loading a tsv file as a training/test set for a model.
Arguments:
----------
path: str, location of tsv file
x_column: int
y_columns: list<dict>, objectives in this file along with their column.
(e.g. `y_columns=[{"objective": "POS", "column": 2}, ...])`)
objective_names: name of all desired columns
comment: line beginning indicating it's okay to skip
ignore_value: label value that should be treated as missing
retokenize: run tokenizer again.
Returns
-------
list<tuple> : examples loaded into memory
Note: can use a lot of memory since entire file is loaded.
"""
objective2column = {col['objective']: col['column'] for col in y_columns}
column_indices = [objective2column.get(name, None) for name in objective_names]
empty_column = tuple(None for _ in objective_names)
if all(col_index is None for col_index in column_indices):
return []
with open(path, "rt") as fin:
lines = fin.read().splitlines()
return convert_lines_to_examples(lines,
ignore_value=ignore_value,
empty_column=empty_column,
x_column=x_column,
column_indices=column_indices,
comment=comment,
retokenize=retokenize)
class RandomizableDataset(object):
def set_rng(self, rng):
self.rng = rng
def set_randomize(self, randomize):
self.randomize = randomize
def set_ignore_y(self, ignore):
self.ignore_y = ignore
class TSVDataset(RandomizableDataset):
_fhandle = None
_fhandle_position = 0
_examples = None
_example_indices = None
_example_index = 0
_eof = False
ignore_y = False
def __init__(self, path, x_column, y_columns, objective_names, comment, ignore_value,
retokenize=False, chunksize=50000000, randomize=False, rng=None):
""""
Arguments:
----------
path: str, location of tsv file
x_column: int
y_columns: list<dict>, objectives in this file along with their column.
(e.g. `y_columns=[{"objective": "POS", "column": 2}, ...])`)
objective_names: name of all desired columns
comment: line beginning indicating it's okay to skip
ignore_value: label value that should be treated as missing
chunksize: how many bytes to read from the file at a time.
rng: numpy RandomState
retokenize: run tokenizer on x again.
"""
self.path = path
self.randomize = randomize
self.x_column = x_column
self.y_columns = y_columns
self.objective_names = objective_names
self.comment = comment
self.ignore_value = ignore_value
self.retokenize = retokenize
self.chunksize = chunksize
if rng is None:
rng = np.random.RandomState(0)
self.rng = rng
# column picking setup:
objective2column = {col['objective']: col['column'] for col in y_columns}
self.column_indices = [objective2column.get(name, None) for name in objective_names]
self.empty_column = tuple(None for _ in objective_names)
if all(col_index is None for col_index in self.column_indices):
self.length = 0
else:
self._compute_length()
def _signature(self):
try:
file_sha1sum = subprocess.check_output(
["sha1sum", self.path], universal_newlines=True
).split(" ")[0]
except FileNotFoundError:
file_sha1sum = subprocess.check_output(
["shasum", self.path], universal_newlines=True
).split(" ")[0]
sorted_cols = list(
map(
str,
sorted(
[col for col in self.column_indices if col is not None]
)
)
)
return "-".join([file_sha1sum] + sorted_cols)
def _compute_length(self):
length_file = (
splitext(self.path)[0] +
"-length-" +
self._signature() + ".txt"
)
if exists(length_file):
with open(length_file, "rt") as fin:
total = int(fin.read())
else:
total = 0
while True:
total += self._count_examples()
if self._eof:
break
with open(length_file, "wt") as fout:
fout.write(str(total) + "\n")
self.length = total
def __len__(self):
return self.length
def close(self):
if self._fhandle is not None:
self._fhandle.close()
self._fhandle = None
self._fhandle_position = 0
self._eof = False
self._examples = None
self._example_indices = None
def __del__(self):
self.close()
def _read_file_until_newline(self):
if self._fhandle is None:
self._fhandle = open(self.path, "rb")
if self._eof:
self._fhandle_position = 0
self._fhandle.seek(0)
self._eof = False
read_chunk = None
while True:
new_read_chunk = self._fhandle.read(self.chunksize)
if read_chunk is None:
read_chunk = new_read_chunk
else:
read_chunk += new_read_chunk
if len(new_read_chunk) < self.chunksize:
del new_read_chunk
self._fhandle_position += len(read_chunk)
self._eof = True
break
else:
del new_read_chunk
newline_pos = read_chunk.rfind(b"\n\n")
if newline_pos != -1:
# move to last line end position (so that we don't get
# half an example.)
self._fhandle.seek(self._fhandle_position + newline_pos + 2)
self._fhandle_position += newline_pos + 2
read_chunk = read_chunk[:newline_pos]
break
return read_chunk
def _count_examples(self):
read_chunk = self._read_file_until_newline()
return count_examples(
read_chunk.decode("utf-8").splitlines(),
ignore_value=self.ignore_value,
column_indices=self.column_indices,
comment=self.comment
)
def _load_examples(self):
read_chunk = self._read_file_until_newline()
if self._examples is not None:
del self._examples
self._examples = convert_lines_to_examples(
read_chunk.decode("utf-8").splitlines(),
ignore_value=self.ignore_value,
empty_column=self.empty_column,
x_column=self.x_column,
column_indices=self.column_indices,
comment=self.comment,
retokenize=self.retokenize
)
self._example_indices = np.arange(len(self._examples))
if self.randomize:
# access loaded data randomly:
self.rng.shuffle(self._example_indices)
self._example_index = 0
def __getitem__(self, index):
"""Retrieve the next example (index is ignored)"""
if index >= self.length:
raise StopIteration()
if self._example_indices is None or self._example_index == len(self._example_indices):
self._load_examples()
while len(self._examples) == 0:
self._load_examples()
if len(self._examples) > 0:
break
if self._eof:
raise StopIteration()
ex = self._examples[self._example_indices[self._example_index]]
self._example_index += 1
return ex
def set_randomize(self, randomize):
if randomize != self.randomize:
self.randomize = randomize
def close(self):
if self._fhandle is not None:
self._fhandle.close()
self._fhandle = None
class OracleClassification(object):
def __init__(self, classes, classification, path):
self.classes = classes
self.classification = classification
self.path = path
self.contains_other = self.classes[-1] == "other"
def classify(self, index):
return self.classification[index]
def load_oracle_classification(path):
with open(join(path, "classes.txt"), "rt", encoding="UTF-8") as fin:
classes = fin.read().splitlines()
classification = np.load(join(path, "classification.npy"))
return OracleClassification(classes, classification, path)
class ClassificationHandler(object):
def __init__(self, wikidata_path, classification_path):
self.classification_path = classification_path
_, self.name2index = load_wikidata_ids(wikidata_path, verbose=False)
self.classifiers = {}
def get_classifier(self, name):
if name not in self.classifiers:
self.classifiers[name] = load_oracle_classification(
join(self.classification_path, name)
)
return self.classifiers[name]
class H5Dataset(RandomizableDataset):
handle_open = False
ignore_y = False
_max_generated_example = 0
_min_generated_example = 0
def __init__(self, path, x_column, y_columns, objective_names,
classifications, ignore_value, randomize=False, rng=None):
self.x_column = str(x_column)
self.y_columns = y_columns
self.ignore_value = ignore_value
self.objective_names = objective_names
self.randomize = randomize
if rng is None:
rng = np.random.RandomState(0)
self.rng = rng
self._classifications = classifications
self.handle = h5py.File(path, "r")
self.path = path
self.handle_open = True
self.length = len(self.handle[self.x_column])
self.chunksize = self.handle[self.x_column].chunks[0]
self._example_indices = None
objective2column = {
col['objective']: (
str(col['column']),
self._classifications.get_classifier(col['classification'])
) for col in y_columns
}
if self.ignore_value is not None:
for _, classifier in objective2column.values():
if self.ignore_value in classifier.classes:
classifier.classes[classifier.classes.index(self.ignore_value)] = None
self.column2col_indices = {}
for col_idx, name in enumerate(self.objective_names):
if name not in objective2column:
continue
column, classifier = objective2column[name]
if column not in self.column2col_indices:
self.column2col_indices[column] = [(classifier, col_idx)]
else:
self.column2col_indices[column].append((classifier, col_idx))
def close(self):
if self.handle_open:
self.handle.close()
self.handle_open = False
def __del__(self):
self.close()
def __len__(self):
return self.length
def _build_examples(self, index):
x = [x_chunk.split("\n") for x_chunk in self.handle[self.x_column][index:index + self.chunksize]]
y = [[[None for k in range(len(self.objective_names))] for j in range(len(x[i]))] for i in range(len(x))]
if not self.ignore_y:
for handle_column, col_content in self.column2col_indices.items():
col_ids = [[self._classifications.name2index[name] if name != "" else None
for name in y_chunk.split("\n")]
for y_chunk in self.handle[handle_column][index:index + self.chunksize]]
for i in range(len(col_ids)):
for j, idx in enumerate(col_ids[i]):
if idx is not None:
for classifier, k in col_content:
y[i][j][k] = classifier.classify(idx)
return x, y
def set_randomize(self, randomize):
if self.randomize != randomize:
self.randomize = randomize
if self._max_generated_example != self._min_generated_example:
self.xorder = np.arange(self._min_generated_example, self._max_generated_example)
self.rng.shuffle(self.xorder)
def __getitem__(self, index):
if index >= len(self):
raise StopIteration()
if self.randomize:
if self._example_indices is None or index == 0:
self._example_indices = np.arange(0, len(self), self.chunksize)
self.rng.shuffle(self._example_indices)
# transformed index:
index = (self._example_indices[index // self.chunksize] + (index % self.chunksize)) % len(self)
if index < self._min_generated_example or index >= self._max_generated_example:
self.x, self.y = self._build_examples(index)
# store bounds of generated data:
self._min_generated_example = index
self._max_generated_example = index + len(self.x)
if self.randomize:
self.xorder = np.arange(self._min_generated_example, self._max_generated_example)
self.rng.shuffle(self.xorder)
if self.randomize:
index = self.xorder[index - self._min_generated_example]
return self.x[index - self._min_generated_example], self.y[index - self._min_generated_example]
class CombinedDataset(object):
_which_dataset = None
_dataset_counters = None
def set_rng(self, rng):
self.rng = rng
for dataset in self.datasets:
dataset.rng = rng
def set_randomize(self, randomize):
self.randomize = randomize
for dataset in self.datasets:
dataset.set_randomize(randomize)
def set_ignore_y(self, ignore):
for dataset in self.datasets:
dataset.set_ignore_y(ignore)
def close(self):
for dataset in self.datasets:
dataset.close()
def _build_which_dataset(self):
self._which_dataset = np.empty(self.length, dtype=np.int16)
self._dataset_counters = np.zeros(len(self.datasets), dtype=np.int64)
offset = 0
for index, dataset in enumerate(self.datasets):
# ensure each dataset is seen as much as its content
# says:
self._which_dataset[offset:offset + len(dataset)] = index
offset += len(dataset)
def __getitem__(self, index):
if index == 0:
if self.randomize:
# visit datasets in random orders:
self.rng.shuffle(self._which_dataset)
self._dataset_counters[:] = 0
which = self._which_dataset[index]
idx = self._dataset_counters[which]
self._dataset_counters[which] += 1
return self.datasets[which][idx]
def __init__(self, datasets, rng=None, randomize=False):
self.datasets = datasets
if rng is None:
rng = np.random.RandomState(0)
self.set_rng(rng)
self.set_randomize(randomize)
self.length = sum(len(dataset) for dataset in datasets)
self._build_which_dataset()
def __len__(self):
return self.length
|
import queue
import threading
def prefetch_generator(generator, to_fetch=10):
q = queue.Queue(maxsize=to_fetch)
def thread_worker(queue, gen):
for val in gen:
queue.put(val)
queue.put(None)
t = threading.Thread(target=thread_worker, args=(q, generator))
some_exception = None
try:
t.start()
while True:
job = q.get()
if job is None:
break
yield job
del job
# print("q.qsize() %d" % (q.qsize(),), flush=True)
except Exception as e:
some_exception = e
finally:
if some_exception is not None:
raise some_exception
t.join()
del t
|
"""
Obtain a learnability score for each type axis.
Trains a binary classifier for each type and
gets its AUC.
Usage
-----
```
python3 evaluate_learnability.py sample_data.tsv --out report.json --wikidata /path/to/wikidata
```
"""
import json
import time
import argparse
from os.path import dirname, realpath, join
SCRIPT_DIR = dirname(realpath(__file__))
import numpy as np
import tensorflow as tf
from sklearn import metrics
from collections import Counter
from wikidata_linker_utils.type_collection import TypeCollection, offset_values_mask
import wikidata_linker_utils.wikidata_properties as wprop
from wikidata_linker_utils.progressbar import get_progress_bar
from generator import prefetch_generator
def learnability(collection, lines, mask, truth_tables, qids, id2pos,
epochs=5, batch_size=128, max_dataset_size=-1,
max_vocab_size=10000, hidden_sizes=None, lr=0.001,
window_size=5, input_size=5, keep_prob=0.5,
verbose=True):
if hidden_sizes is None:
hidden_sizes = []
tf.reset_default_graph()
dset = list(get_windows(lines, mask, window_size, truth_tables, lambda x: id2pos[x]))
if max_dataset_size > 0:
dset = dset[:max_dataset_size]
pos_num = np.zeros(len(qids))
for _, labels in dset:
pos_num += labels
neg_num = np.ones(len(qids)) * len(dset) - pos_num
pos_weight = (pos_num / (pos_num + neg_num))[None, :]
vocab = ["<UNK>"] + [w for w, _ in Counter(lines[:, 0]).most_common(max_vocab_size)]
inv_vocab = {w: k for k, w in enumerate(vocab)}
with tf.device("gpu"):
W = tf.get_variable(
"W", shape=[len(vocab), input_size],
dtype=tf.float32,
initializer=tf.random_normal_initializer()
)
indices = tf.placeholder(tf.int32, [None, window_size*2], name="indices")
labels = tf.placeholder(tf.bool, [None, len(qids)], name="label")
keep_prob_pholder = tf.placeholder_with_default(keep_prob, [])
lookup = tf.reshape(tf.nn.embedding_lookup(
W, indices
), [tf.shape(indices)[0], input_size * window_size*2])
lookup = tf.nn.dropout(lookup, keep_prob_pholder)
hidden = lookup
for layer_idx, hidden_size in enumerate(hidden_sizes):
hidden = tf.contrib.layers.fully_connected(
hidden,
num_outputs=hidden_size,
scope="FC%d" % (layer_idx,)
)
out = tf.contrib.layers.fully_connected(
hidden,
num_outputs=len(qids),
activation_fn=None)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=out, labels=tf.cast(labels, tf.float32))
cost = tf.where(tf.is_finite(cost), cost, tf.zeros_like(cost))
cost_mean = tf.reduce_mean(
(tf.cast(labels, tf.float32) * 1.0 / (pos_weight)) * cost +
(tf.cast(tf.logical_not(labels), tf.float32) * 1.0 / (1.0 - pos_weight)) * cost
)
cost_sum = tf.reduce_sum(cost)
size = tf.shape(indices)[0]
noop = tf.no_op()
correct = tf.reduce_sum(tf.cast(tf.equal(tf.greater_equal(out, 0), labels), tf.int32), 0)
out_activated = tf.sigmoid(out)
train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost_mean)
session = tf.InteractiveSession()
session.run(tf.global_variables_initializer())
def accuracy(dataset, batch_size, train):
epoch_correct = np.zeros(len(qids))
epoch_nll = 0.0
epoch_total = np.zeros(len(qids))
op = train_op if train else noop
all_labels = []
all_preds = []
for i in get_progress_bar("train" if train else "dev", item="batches")(range(0, len(dataset), batch_size)):
batch_labels = [label for _, label in dataset[i:i+batch_size]]
csum, corr, num_examples, preds, _ = session.run([cost_sum, correct, size, out_activated, op],
feed_dict={
indices: [[inv_vocab.get(w, 0) for w in window] for window, _ in dataset[i:i+batch_size]],
labels: batch_labels,
keep_prob_pholder: keep_prob if train else 1.0
})
epoch_correct += corr
epoch_nll += csum
epoch_total += num_examples
all_labels.extend(batch_labels)
all_preds.append(preds)
return (epoch_nll, epoch_correct, epoch_total, np.vstack(all_preds), np.vstack(all_labels))
dataset_indices = np.arange(len(dset))
train_indices = dataset_indices[:int(0.8 * len(dset))]
dev_indices = dataset_indices[int(0.8 * len(dset)):]
train_dataset = [dset[idx] for idx in train_indices]
dev_dataset = [dset[idx] for idx in dev_indices]
learnability = []
for epoch in range(epochs):
t0 = time.time()
train_epoch_nll, train_epoch_correct, train_epoch_total, _, _ = accuracy(train_dataset, batch_size, train=True)
t1 = time.time()
if verbose:
print("epoch %d train: %.3f%% in %.3fs" % (
epoch, 100.0 * train_epoch_correct.sum() / train_epoch_total.sum(), t1 - t0),)
t0 = time.time()
dev_epoch_nll, dev_epoch_correct, dev_epoch_total, pred, y = accuracy(dev_dataset, batch_size, train=False)
t1 = time.time()
learnability = []
for qidx in range(len(qids)):
try:
fpr, tpr, thresholds = metrics.roc_curve(y[:,qidx], pred[:,qidx], pos_label=1)
auc = metrics.auc(fpr, tpr)
if not np.isnan(auc):
average_precision_score = metrics.average_precision_score(y[:,qidx], pred[:,qidx])
learnability.append((qids[qidx],
auc,
average_precision_score,
100.0 * dev_epoch_correct[qidx] / dev_epoch_total[qidx],
int(pos_num[qidx]),
int(neg_num[qidx])))
except ValueError:
continue
if verbose:
learnability = sorted(learnability, key=lambda x: x[1], reverse=True)
print("epoch %d dev: %.3fs" % (epoch, t1-t0))
for qid, auc, average_precision_score, acc, pos, neg in learnability:
print(" %r AUC: %.3f, APS: %.3f, %.3f%% positive: %d, negative: %d" % (
collection.ids[qid], auc, average_precision_score, acc, pos, neg))
print("")
return learnability
def generate_training_data(collection, path):
with open(path, "rt") as fin:
lines = [row.split("\t")[:2] for row in fin.read().splitlines()]
lines_arr = np.zeros((len(lines), 2), dtype=np.object)
mask = np.zeros(len(lines), dtype=np.bool)
for i, l in enumerate(lines):
lines_arr[i, 0] = l[0]
if len(l) > 1:
lines_arr[i, 1] = collection.name2index[l[1]]
mask[i] = True
return lines_arr, mask
def get_proposal_sets(collection, article_ids, seed):
np.random.seed(seed)
relation = collection.relation(wprop.CATEGORY_LINK)
relation_mask = offset_values_mask(relation.values, relation.offsets, article_ids)
counts = np.bincount(relation.values[relation_mask])
is_fp = collection.relation(wprop.FIXED_POINTS).edges() > 0
is_fp = is_fp[:counts.shape[0]]
counts = counts * is_fp
topfields_fp = np.argsort(counts)[::-1][:(counts > 0).sum()]
relation = collection.relation(wprop.INSTANCE_OF)
relation_mask = offset_values_mask(relation.values, relation.offsets, article_ids)
counts = np.bincount(relation.values[relation_mask])
topfields_instance_of = np.argsort(counts)[::-1][:(counts > 0).sum()]
np.random.shuffle(topfields_instance_of)
np.random.shuffle(topfields_fp)
return [(topfields_instance_of, wprop.INSTANCE_OF), (topfields_fp, wprop.CATEGORY_LINK)]
def build_truth_tables(collection, lines, qids, relation_name):
truth_tables = []
all_ids = list(sorted(set(lines[:, 1])))
id2pos = {idx: pos for pos, idx in enumerate(all_ids)}
for qid in qids:
truth_tables.append(collection.satisfy([relation_name], [qid])[all_ids])
collection.reset_cache()
truth_tables = np.stack(truth_tables, axis=1)
qid_sums = truth_tables.sum(axis=0)
kept_qids = []
kept_dims = []
for i, (qid, qid_sum) in enumerate(zip(qids, qid_sums)):
if qid_sum != 0 and qid_sum != truth_tables.shape[0]:
kept_qids.append(qid)
kept_dims.append(i)
truth_tables = truth_tables[:, kept_dims]
return truth_tables, kept_qids, id2pos
def get_windows(lines, mask, window, truth_table, id_mapper):
for i in np.where(mask)[0]:
if i >= window and i < len(lines) - window:
yield (lines[max(0, i - window):i + window, 0],
truth_table[id_mapper(lines[i, 1])])
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, required=True)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--max_epochs", type=int, default=2)
parser.add_argument("--max_vocab_size", type=int, default=10000)
parser.add_argument("--simultaneous_fields", type=int, default=512)
parser.add_argument("--window_size", type=int, default=5)
parser.add_argument("--input_size", type=int, default=5)
parser.add_argument("--wikidata", type=str, required=True)
parser.add_argument("--out", type=str, required=True)
return parser.parse_args()
def generate_truth_tables(collection, lines_arr, proposal_sets, simultaneous_fields):
for topfields, relation_name in proposal_sets:
for i in range(0, len(topfields), simultaneous_fields):
truth_tables, qids, id2pos = build_truth_tables(
collection,
lines_arr,
qids=topfields[i:i+simultaneous_fields],
relation_name=relation_name)
yield (topfields[i:i+simultaneous_fields],
relation_name,
truth_tables,
qids,
id2pos)
def main():
args = parse_args()
collection = TypeCollection(args.wikidata, num_names_to_load=0)
collection.load_blacklist(join(dirname(SCRIPT_DIR), "extraction", "blacklist.json"))
lines_arr, mask = generate_training_data(collection, args.dataset)
article_ids = np.array(list(set(lines_arr[:, 1])), dtype=np.int32)
proposal_sets = get_proposal_sets(collection, article_ids, args.seed)
report = []
total = sum(len(topfields) for topfields, _ in proposal_sets)
seen = 0
t0 = time.time()
data_source = generate_truth_tables(collection, lines_arr, proposal_sets,
args.simultaneous_fields)
for topfields, relation_name, truth_tables, qids, id2pos in prefetch_generator(data_source):
# for each of these properties and given relation
# construct the truth table for each item and discover
# their 'learnability':
seen += len(topfields)
field_auc_scores = learnability(
collection,
lines_arr,
mask,
qids=qids,
truth_tables=truth_tables,
id2pos=id2pos,
batch_size=args.batch_size,
epochs=args.max_epochs,
input_size=args.input_size,
window_size=args.window_size,
max_vocab_size=args.max_vocab_size,
verbose=True)
for qid, auc, average_precision_score, correct, pos, neg in field_auc_scores:
report.append(
{
"qid": collection.ids[qid],
"auc": auc,
"average_precision_score": average_precision_score,
"correct": correct,
"relation": relation_name,
"positive": pos,
"negative": neg
}
)
with open(args.out, "wt") as fout:
json.dump(report, fout)
t1 = time.time()
speed = seen / (t1 - t0)
print("AUC obtained for %d / %d items (%.3f items/s)" % (seen, total, speed), flush=True)
if __name__ == "__main__":
main()
|
import numpy as np
import string
from dataset import TSVDataset, H5Dataset, CombinedDataset
from generator import prefetch_generator
def word_dropout(inputs, rng, keep_prob):
inputs_ndim = inputs.ndim
mask_shape = [len(inputs)] + [1] * (inputs_ndim - 1)
return (
inputs *
(
rng.random_sample(size=mask_shape) <
keep_prob
)
).astype(inputs.dtype)
def extract_feat(feat):
if feat["type"] == "word":
return lambda x: x
elif feat["type"] == "suffix":
length = feat["length"]
return lambda x: x[-length:]
elif feat["type"] == "prefix":
length = feat["length"]
return lambda x: x[:length]
elif feat["type"] == "digit":
return lambda x: x.isdigit()
elif feat["type"] == "punctuation_count":
return lambda x: sum(c in string.punctuation for c in x)
elif feat["type"] == "uppercase":
return lambda x: len(x) > 0 and x[0].isupper()
elif feat["type"] == "character-conv":
max_size = feat["max_word_length"]
def extract(x):
x_bytes = x.encode("utf-8")
if len(x_bytes) > max_size:
return np.concatenate(
[
[255],
list(x_bytes[:max_size]),
[256]
]
)
else:
return np.concatenate(
[
[255],
list(x_bytes),
[256],
-np.ones(max_size - len(x_bytes), dtype=np.int32),
]
)
return extract
else:
raise ValueError("unknown feature %r." % (feat,))
def extract_word_keep_prob(feat):
return feat.get("word_keep_prob", 0.85)
def extract_case_keep_prob(feat):
return feat.get("case_keep_prob", 0.95)
def extract_s_keep_prob(feat):
return feat.get("s_keep_prob", 0.95)
def apply_case_s_keep_prob(feat, rng, keep_case, keep_s):
if len(feat) == 0:
return feat
if keep_case < 1 and feat[0].isupper() and rng.random_sample() >= keep_case:
feat = feat.lower()
if keep_s < 1 and feat.endswith("s") and rng.random_sample() >= keep_s:
feat = feat[:-1]
return feat
def requires_character_convolution(feat):
return feat["type"] in {"character-conv"}
def requires_vocab(feat):
return feat["type"] in {"word", "suffix", "prefix"}
def feature_npdtype(feat):
if requires_vocab(feat):
return np.int32
elif feat["type"] in {"digit", "punctuation_count", "uppercase"}:
return np.float32
elif requires_character_convolution(feat):
return np.int32
else:
raise ValueError("unknown feature %r." % (feat,))
def get_vocabs(dataset, max_vocabs, extra_words=None):
index2words = [[] for i in range(len(max_vocabs))]
occurrences = [{} for i in range(len(max_vocabs))]
for els in dataset:
for el, index2word, occurrence in zip(els, index2words, occurrences):
if el not in occurrence:
index2word.append(el)
occurrence[el] = 1
else:
occurrence[el] += 1
index2words = [
sorted(index2word, key=lambda x: occurrence[x], reverse=True)
for index2word, occurrence in zip(index2words, occurrences)
]
index2words = [
index2word[:max_vocab] if max_vocab > 0 else index2word
for index2word, max_vocab in zip(index2words, max_vocabs)
]
if extra_words is not None:
index2words = [
extra_words + index2word for index2word in index2words
]
return index2words
def get_feature_vocabs(features, dataset, extra_words=None):
out, feats_needing_vocab, feats_with_vocabs, vocabs = [], [], [], []
if hasattr(dataset, "set_ignore_y"):
dataset.set_ignore_y(True)
try:
for feat in features:
if requires_vocab(feat):
if feat.get("path") is not None:
with open(feat["path"], "rt") as fin:
index2word = fin.read().splitlines()
if feat.get("max_vocab", -1) > 0:
index2word = index2word[:feat["max_vocab"]]
if extra_words is not None:
index2word = extra_words + index2word
feats_with_vocabs.append(index2word)
else:
feats_needing_vocab.append(feat)
if len(feats_needing_vocab) > 0:
extractors = tuple(
[extract_feat(feat) for feat in feats_needing_vocab]
)
vocabs = get_vocabs(
((extractor(w) for extractor in extractors)
for x, _ in dataset for w in x),
max_vocabs=[feat.get("max_vocab", -1) for feat in feats_needing_vocab],
extra_words=extra_words
)
vocab_feature_idx = 0
preexisting_vocab_feature_idx = 0
for feat in features:
if requires_vocab(feat):
if feat.get("path") is not None:
out.append(feats_with_vocabs[preexisting_vocab_feature_idx])
preexisting_vocab_feature_idx += 1
else:
out.append(vocabs[vocab_feature_idx])
vocab_feature_idx+=1
else:
out.append(None)
finally:
if hasattr(dataset, "set_ignore_y"):
dataset.set_ignore_y(False)
return out
def pad_arrays_into_array(arrays, padding):
out_ndim = arrays[0].ndim + 1
out_shape = [0] * out_ndim
out_shape[0] = len(arrays)
for arr in arrays:
for dim_idx in range(arr.ndim):
out_shape[1 + dim_idx] = max(out_shape[1 + dim_idx], arr.shape[dim_idx])
out = np.empty(out_shape, dtype=arrays[0].dtype)
out.fill(padding)
for arr_idx, array in enumerate(arrays):
arr_slice = [arr_idx]
for dim_idx in range(arr.ndim):
arr_slice.append(slice(0, array.shape[dim_idx]))
arr_slice = tuple(arr_slice)
out[arr_slice] = array
return out
def build_objective_mask(label_sequence, objective_idx, objective_type):
if objective_type == 'crf':
if len(label_sequence) == 0 or label_sequence[0][objective_idx] is None:
return np.array(False, dtype=np.bool)
else:
return np.array(True, dtype=np.bool)
elif objective_type == 'softmax':
return np.array(
[w[objective_idx] is not None for w in label_sequence], dtype=np.bool
)
else:
raise ValueError(
"unknown objective type %r." % (objective_type,)
)
def allocate_shrunk_batches(max_length, batch_size, lengths):
typical_indices = max_length * batch_size
i = 0
ranges = []
while i < len(lengths):
j = i + 1
current_batch_size = 1
longest_ex = lengths[j - 1]
while j < len(lengths) and j - i < batch_size:
# can grow?
new_batch_size = current_batch_size + 1
new_j = j + 1
if max(longest_ex, lengths[new_j - 1]) * new_batch_size < typical_indices:
j = new_j
longest_ex = max(longest_ex, lengths[new_j - 1])
current_batch_size = new_batch_size
else:
break
ranges.append((i, j))
i = j
return ranges
def convert_label_to_index(label, label2index):
if label is None:
return 0
if isinstance(label, str):
return label2index[label]
return label
class Batchifier(object):
def __init__(self, rng, feature_word2index, objective_types, label2index,
fused, sequence_lengths, labels, labels_mask,
input_placeholders, features, dataset, batch_size, train,
autoresize=True, max_length=100):
assert(batch_size > 0), (
"batch size must be strictly positive (got %r)." % (batch_size,)
)
# dictionaries, strings defined by model:
self.objective_types = objective_types
self.label2index = label2index
self.feature_word2index = feature_word2index
self.rng = rng
self.fused = fused
# tf placeholders:
self.sequence_lengths = sequence_lengths
self.labels = labels
self.labels_mask = labels_mask
self.input_placeholders = input_placeholders
self.dataset = dataset
self.batch_size = batch_size
self.train = train
self.dataset_is_lazy = isinstance(dataset, (TSVDataset, H5Dataset, CombinedDataset))
self.autoresize = autoresize
self.max_length = max_length
indices = np.arange(len(dataset))
if train:
if self.dataset_is_lazy:
dataset.set_rng(rng)
dataset.set_randomize(True)
elif isinstance(dataset, list):
rng.shuffle(indices)
self.batch_indices = []
if self.autoresize and not self.dataset_is_lazy:
ranges = allocate_shrunk_batches(
max_length=self.max_length,
batch_size=self.batch_size,
lengths=[len(dataset[indices[i]][0]) for i in range(len(indices))]
)
for i, j in ranges:
self.batch_indices.append(indices[i:j])
else:
for i in range(0, len(indices), self.batch_size):
self.batch_indices.append(indices[i:i + self.batch_size])
self.extractors = [
(extract_feat(feat), requires_vocab(feat), feature_npdtype(feat),
extract_word_keep_prob(feat), extract_case_keep_prob(feat), extract_s_keep_prob(feat))
for feat in features
]
def generate_batch(self, examples):
X = [[] for i in range(len(self.extractors))]
Y = []
Y_mask = []
for ex, label in examples:
for idx, (extractor, uses_vocab, dtype, word_keep_prob, case_keep_prob, s_keep_prob) in enumerate(self.extractors):
if self.train and (case_keep_prob < 1 or s_keep_prob < 1):
ex = [apply_case_s_keep_prob(w, self.rng, case_keep_prob, s_keep_prob) for w in ex]
if uses_vocab:
word_feats = np.array(
[self.feature_word2index[idx].get(extractor(w), 0) for w in ex],
dtype=dtype
)
else:
word_feats = np.array([extractor(w) for w in ex], dtype=dtype)
if self.train and word_keep_prob < 1:
word_feats = word_dropout(
word_feats, self.rng, word_keep_prob
)
X[idx].append(word_feats)
Y.append(
tuple(
np.array([convert_label_to_index(w[objective_idx], label2index)
for w in label], dtype=np.int32)
for objective_idx, label2index in enumerate(self.label2index)
)
)
Y_mask.append(
tuple(
build_objective_mask(label, objective_idx, objective_type)
for objective_idx, objective_type in enumerate(self.objective_types)
)
)
sequence_lengths = np.array([len(x) for x in X[0]], dtype=np.int32)
X = [pad_arrays_into_array(x, -1) for x in X]
Y = [
pad_arrays_into_array([row[objective_idx] for row in Y], 0)
for objective_idx in range(len(self.objective_types))
]
Y_mask = [
pad_arrays_into_array([row[objective_idx] for row in Y_mask], 0.0)
for objective_idx in range(len(self.objective_types))
]
feed_dict = {
self.sequence_lengths: sequence_lengths
}
if self.fused:
feed_dict[self.labels[0]] = np.stack([y.T for y in Y], axis=-1)
feed_dict[self.labels_mask[0]] = np.stack([y.T for y in Y_mask], axis=-1)
else:
for y, placeholder in zip(Y, self.labels):
feed_dict[placeholder] = y.T
for y, placeholder in zip(Y_mask, self.labels_mask):
feed_dict[placeholder] = y.T
for idx, x in enumerate(X):
feed_dict[self.input_placeholders[idx]] = x.swapaxes(0, 1)
return feed_dict
def as_list(self):
return list(self.iter_batches())
def iter_batches(self, pbar=None):
gen = range(len(self.batch_indices))
if pbar is not None:
pbar.max_value = len(self.batch_indices)
pbar.value = 0
gen = pbar(gen)
if self.autoresize and self.dataset_is_lazy:
for idx in gen:
examples = [self.dataset[ex] for ex in self.batch_indices[idx]]
ranges = allocate_shrunk_batches(
max_length=self.max_length,
batch_size=self.batch_size,
lengths=[len(ex[0]) for ex in examples]
)
for i, j in ranges:
yield self.generate_batch(examples[i:j])
else:
for idx in gen:
yield self.generate_batch(
[self.dataset[ex] for ex in self.batch_indices[idx]]
)
def allocate_shrunk_batches(max_length, batch_size, lengths):
typical_indices = max_length * batch_size
i = 0
ranges = []
while i < len(lengths):
j = i + 1
current_batch_size = 1
longest_ex = lengths[j - 1]
while j < len(lengths) and j - i < batch_size:
# can grow?
new_batch_size = current_batch_size + 1
new_j = j + 1
if max(longest_ex, lengths[new_j - 1]) * new_batch_size < typical_indices:
j = new_j
longest_ex = max(longest_ex, lengths[new_j - 1])
current_batch_size = new_batch_size
else:
break
ranges.append((i, j))
i = j
return ranges
def batch_worker(rng,
features,
feature_word2index,
objective_types,
label2index,
fused,
sequence_lengths,
labels,
labels_mask,
input_placeholders,
autoresize,
train,
batch_size,
max_length,
dataset,
pbar,
batch_queue,
death_event):
batchifier = Batchifier(
rng=rng,
features=features,
feature_word2index=feature_word2index,
objective_types=objective_types,
label2index=label2index,
fused=fused,
sequence_lengths=sequence_lengths,
labels=labels,
labels_mask=labels_mask,
input_placeholders=input_placeholders,
autoresize=autoresize,
train=train,
batch_size=batch_size,
max_length=max_length,
dataset=dataset
)
for batch in batchifier.iter_batches(pbar=pbar):
if death_event.is_set():
break
batch_queue.put(batch)
if not death_event.is_set():
batch_queue.put(None)
def range_size(start, size):
return [i for i in range(start, start + size)]
class ProcessHolder(object):
def __init__(self, process, death_event, batch_queue):
self.process = process
self.batch_queue = batch_queue
self.death_event = death_event
def close(self):
self.death_event.set()
try:
self.batch_queue.close()
while True:
self.batch_queue.get_nowait()
except Exception as e:
pass
self.process.terminate()
self.process.join()
def __del__(self):
self.close()
def iter_batches_single_threaded(model,
dataset,
batch_size,
train,
autoresize=True,
max_length=100,
pbar=None):
tensorflow_placeholders = [model.sequence_lengths] + model.labels + model.labels_mask + model.input_placeholders
labels_start = 1
labels_mask_start = labels_start + len(model.labels)
placeholder_start = labels_mask_start + len(model.labels_mask)
batchifier = Batchifier(
rng=model.rng,
features=model.features,
feature_word2index=model.feature_word2index,
objective_types=[obj["type"] for obj in model.objectives],
label2index=model.label2index,
fused=model.fused,
sequence_lengths=0,
labels=range_size(labels_start, len(model.labels)),
labels_mask=range_size(labels_mask_start, len(model.labels_mask)),
input_placeholders=range_size(placeholder_start, len(model.input_placeholders)),
autoresize=autoresize,
train=train,
batch_size=batch_size,
max_length=max_length,
dataset=dataset
)
for batch in prefetch_generator(batchifier.iter_batches(pbar=pbar), to_fetch=100):
feed_dict = {}
for idx, key in enumerate(tensorflow_placeholders):
feed_dict[key] = batch[idx]
yield feed_dict
def iter_batches(model,
dataset,
batch_size,
train,
autoresize=True,
max_length=100,
pbar=None):
import multiprocessing
batch_queue = multiprocessing.Queue(maxsize=10)
tensorflow_placeholders = [model.sequence_lengths] + model.labels + model.labels_mask + model.input_placeholders
labels_start = 1
labels_mask_start = labels_start + len(model.labels)
placeholder_start = labels_mask_start + len(model.labels_mask)
death_event = multiprocessing.Event()
batch_process = ProcessHolder(multiprocessing.Process(
target=batch_worker,
daemon=True,
args=(
model.rng,
model.features,
model.feature_word2index,
[obj["type"] for obj in model.objectives],
model.label2index,
model.fused,
0,
range_size(labels_start, len(model.labels)),
range_size(labels_mask_start, len(model.labels_mask)),
range_size(placeholder_start, len(model.input_placeholders)),
autoresize,
train,
batch_size,
max_length,
dataset,
pbar,
batch_queue,
death_event
)
), death_event, batch_queue)
batch_process.process.name = "iter_batches"
batch_process.process.start()
while True:
batch = batch_queue.get()
if batch is None:
break
else:
feed_dict = {}
for idx, key in enumerate(tensorflow_placeholders):
feed_dict[key] = batch[idx]
yield feed_dict
del batch
|
import distutils.ccompiler
import distutils.sysconfig
import re
import numpy as np
import sys
import subprocess
from setuptools import setup, find_packages
from os.path import join, dirname, realpath, relpath, splitext, exists, getmtime, relpath, lexists, islink
from os import walk, sep, remove, listdir, stat, symlink
from Cython.Distutils.extension import Extension
from Cython.Distutils import build_ext
from distutils.core import setup
from distutils.command import build as build_module, clean as clean_module
from distutils.spawn import find_executable
SCRIPT_DIR = dirname(realpath(__file__))
WIKIDATA_LINKER_SOURCE_DIR = join(SCRIPT_DIR, "src")
WIKIDATA_LINKER_MODULE_NAME = "wikidata_linker_utils"
WIKIDATA_LINKER_INTERNAL_MODULE_NAME = WIKIDATA_LINKER_MODULE_NAME
version_file = join(SCRIPT_DIR, "VERSION")
if exists(version_file):
with open(version_file) as f:
VERSION = f.read().strip()
else:
VERSION = "1.0.0"
def path_to_module_name(path):
BASE_DIRS = ["python", "cython"]
relative_path = relpath(path, join(WIKIDATA_LINKER_SOURCE_DIR))
path_no_ext, _ = splitext(relative_path)
for base_dir in BASE_DIRS:
if path_no_ext.startswith(base_dir):
return path_no_ext.lstrip(base_dir + sep).replace(sep, '.')
raise Exception("Cannot convert path %r to module name" % (relative_path,))
def find_files_by_suffix(path, suffix):
"""Recursively find files with specific suffix in a directory"""
for relative_path, dirs, files in walk(path):
for fname in files:
if fname.endswith(suffix):
yield join(path, relative_path, fname)
# Make a `cleanall` rule to get rid of intermediate and library files
class clean(clean_module.clean):
def run(self):
print("Cleaning up cython files...")
# Just in case the build directory was created by accident,
# note that shell=True should be OK here because the command is constant.
for place in ["build",
join("src", "cython", WIKIDATA_LINKER_INTERNAL_MODULE_NAME, "*.c"),
join("src", "cython", WIKIDATA_LINKER_INTERNAL_MODULE_NAME, "*.cpp"),
join("src", "cython", WIKIDATA_LINKER_INTERNAL_MODULE_NAME, "*.so")]:
subprocess.Popen("rm -rf %s" % (place,),
shell=True,
executable="/bin/bash",
cwd=SCRIPT_DIR)
compiler = distutils.ccompiler.new_compiler()
distutils.sysconfig.customize_compiler(compiler)
BLACKLISTED_COMPILER_SO = ['-Wp,-D_FORTIFY_SOURCE=2']
build_ext.compiler = compiler
ext_modules = []
for pyx_file in find_files_by_suffix(join(WIKIDATA_LINKER_SOURCE_DIR, "cython"), ".pyx"):
# pxd files are like header files for pyx files
# and they can also have relevant includes.
relevant_files = [pyx_file]
pxd_file = pyx_file[:-3] + "pxd"
if exists(pxd_file):
relevant_files.append(pxd_file)
ext_modules.append(Extension(
name=path_to_module_name(pyx_file),
sources=[pyx_file],
library_dirs=[],
language='c++',
extra_compile_args=['-std=c++11', '-Wno-unused-function',
'-Wno-sign-compare', '-Wno-unused-local-typedef',
'-Wno-undefined-bool-conversion', '-O3',
'-Wno-reorder'],
extra_link_args=[],
libraries=[],
extra_objects=[],
include_dirs=[np.get_include()]
))
################################################################################
## FIND PYTHON PACKAGES ##
################################################################################
py_packages = []
for file in find_files_by_suffix(join(WIKIDATA_LINKER_SOURCE_DIR, "python"), ".py"):
module_path = dirname(file)
py_packages.append(path_to_module_name(module_path))
################################################################################
## BUILD COMMAND WITH EXTRA WORK WHEN DONE ##
################################################################################
def symlink_built_package(module_name, dest_directory):
build_dir_contents = listdir(join(SCRIPT_DIR, "build"))
lib_dot_fnames = []
for name in build_dir_contents:
if name.startswith("lib."):
lib_dot_fnames.append(join(SCRIPT_DIR, "build", name))
# get latest lib. file created and symlink it to the project
# directory for easier testing
lib_dot_fnames = sorted(
lib_dot_fnames,
key=lambda name: stat(name).st_mtime,
reverse=True
)
if len(lib_dot_fnames) == 0:
return
most_recent_name = join(lib_dot_fnames[0], module_name)
symlink_name = join(dest_directory, module_name)
if lexists(symlink_name):
if islink(symlink_name):
remove(symlink_name)
else:
print(
("non symlink file with name %r found in project directory."
" Please remove to create a symlink on build") % (
symlink_name,
)
)
return
symlink(most_recent_name,
symlink_name,
target_is_directory=True)
print("Created symlink pointing to %r from %r" % (
most_recent_name,
join(SCRIPT_DIR, module_name)
))
class build_with_posthooks(build_module.build):
def run(self):
build_module.build.run(self)
# Make a `cleanall` rule to get rid of intermediate and library files
class clean_with_posthooks(clean_module.clean):
def run(self):
clean_module.clean.run(self)
# remove cython generated sources
for file_path in find_files_by_suffix(join(WIKIDATA_LINKER_SOURCE_DIR, 'cython'), '.cpp'):
remove(file_path)
setup(name=WIKIDATA_LINKER_MODULE_NAME,
version=VERSION,
cmdclass={"build": build_with_posthooks, 'build_ext': build_ext, 'clean': clean_with_posthooks},
install_requires=["numpy"],
extras_require={"dev": []},
author="Jonathan Raiman",
language='c++',
author_email="[email protected]",
ext_modules=ext_modules,
description="Generate data processing utilities for running DeepType.",
package_dir={'': join(WIKIDATA_LINKER_SOURCE_DIR, 'python')},
packages=py_packages)
|
import re
STOP_WORDS = {'a', 'an', 'in', 'the', 'of', 'it', 'from', 'with', 'this', 'that', 'they', 'he',
'she', 'some', 'where', 'what', 'since', 'his', 'her', 'their', 'le', 'la', 'les', 'il',
'elle', 'ce', 'Γ§a', 'ci', 'ceux', 'ceci', 'cela', 'celle', 'se', 'cet', 'cette',
'dans', 'avec', 'con', 'sans', 'pendant', 'durant', 'avant', 'après', 'puis', 'el', 'lo', 'la',
'ese', 'esto', 'que', 'qui', 'quoi', 'dont', 'ou', 'oΓΉ', 'si', 'este', 'esta', 'cual',
'eso', 'ella', 'depuis', 'y', 'a', 'Γ ', 'su', 'de', "des", 'du', 'los', 'las', 'un', 'une', 'una',
'uno', 'para', 'asi', 'later', 'into', 'dentro', 'dedans', 'depuis', 'despuΓ©s', 'desde',
'al', 'et', 'por', 'at', 'for', 'when', 'why', 'how', 'with', 'whether', 'if',
'thus', 'then', 'and', 'but', 'on', 'during', 'while', 'as', 'within', 'was', 'is',
'est', 'au', 'fait', 'font', 'va', 'vont', 'sur', 'en', 'pour', 'del', 'cuando',
'cuan', 'do', 'does', 'until', 'sinon', 'encore', 'to', 'by', 'be', 'which',
'have', 'not', 'were', 'has', 'also', 'its', 'isbn', 'pp.', "&", "p.", 'ces', 'o'}
def starts_with_apostrophe_letter(word):
return (
word.startswith("l'") or
word.startswith("L'") or
word.startswith("d'") or
word.startswith("D'") or
word.startswith("j'") or
word.startswith("J'") or
word.startswith("t'") or
word.startswith("T'")
)
PUNCTUATION = {"'", ",", "-", "!", ".", "?", ":", "β"}
def clean_up_trie_source(source, lowercase=True):
source = source.rstrip().strip('()[]')
if len(source) > 0 and (source[-1] in PUNCTUATION or source[0] in PUNCTUATION):
return ""
# remove l'
if starts_with_apostrophe_letter(source):
source = source[2:]
if source.endswith("'s"):
source = source[:-2]
tokens = source.split()
while len(tokens) > 0 and tokens[0].lower() in STOP_WORDS:
tokens = tokens[1:]
while len(tokens) > 0 and tokens[-1].lower() in STOP_WORDS:
tokens = tokens[:-1]
joined_tokens = " ".join(tokens)
if lowercase:
return joined_tokens.lower()
return joined_tokens
ORDINAL_ANCHOR = re.compile("^\d+(st|th|nd|rd|er|eme|Γ¨me|Γ¨re)$")
NUMBER_PUNCTUATION = re.compile("^\d+([\/\-,\.:;%]\d*)+$")
def anchor_is_ordinal(anchor):
return ORDINAL_ANCHOR.match(anchor) is not None
def anchor_is_numbers_slashes(anchor):
return NUMBER_PUNCTUATION.match(anchor) is not None
def acceptable_anchor(anchor, anchor_trie, blacklist=None):
return (len(anchor) > 0 and
not anchor.isdigit() and
not anchor_is_ordinal(anchor) and
not anchor_is_numbers_slashes(anchor) and
anchor in anchor_trie and
(blacklist is None or anchor not in blacklist))
|
import sys
import importlib.util
import traceback
from os.path import basename, splitext
def reload_module(path):
module_name, extension = splitext(basename(path))
if extension != ".py":
raise ValueError("path must have a .py extension (got %r)" % (path,))
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def enter_or_quit():
wait = input("press any key to continue, q to quit.")
received = wait.rstrip()
if received == 'q':
print("Bye.")
sys.exit(0)
else:
return received
ALLOWED_RUNTIME_ERRORS = (
TypeError,
ValueError,
IndexError,
NameError,
KeyError,
AssertionError,
AttributeError,
ImportError,
KeyboardInterrupt
)
ALLOWED_IMPORT_ERRORS = (
SyntaxError,
NameError,
ImportError
)
def reload_run_retry(module_path, callback):
while True:
try:
module = reload_module(module_path)
except ALLOWED_IMPORT_ERRORS as e:
print("issue reading %r, please fix." % (module_path,))
print(str(e))
traceback.print_exc(file=sys.stdout)
enter_or_quit()
continue
try:
result = callback(module)
except ALLOWED_RUNTIME_ERRORS as e:
print("issue running %r, please fix." % (module_path,))
print(str(e))
traceback.print_exc(file=sys.stdout)
enter_or_quit()
continue
break
return result
|
import subprocess
def execute_bash(command):
"""
Executes bash command, prints output and
throws an exception on failure.
"""
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
for line in process.stdout:
print(line, end='', flush=True)
process.wait()
assert process.returncode == 0
def get_bash_result(command):
"""
Executes bash command, returns output and throws
an exception on failure.
"""
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
out = [line for line in process.stdout]
process.wait()
assert process.returncode == 0
return out
def count_lines(path):
return int(
get_bash_result('wc -l %s' % (path,))[0].strip().split(' ')[0]
)
|
import json
import msgpack
import bz2
def iterate_bytes_jsons(fin, batch_size=1000):
current = []
for l in fin:
if l.startswith(b'{'):
current.append(l)
if len(current) >= batch_size:
docs = json.loads('[' + b"".join(current).decode('utf-8').rstrip(',\n') + ']')
for doc in docs:
yield doc
current = []
if len(current) > 0:
docs = json.loads('[' + b"".join(current).decode('utf-8').rstrip(',\n') + ']')
for doc in docs:
yield doc
current = []
def iterate_text_jsons(fin, batch_size=1000):
current = []
for l in fin:
if l.startswith('{'):
current.append(l)
if len(current) >= batch_size:
docs = json.loads('[' + "".join(current).rstrip(',\n') + ']')
for doc in docs:
yield doc
current = []
if len(current) > 0:
docs = json.loads('[' + "".join(current).rstrip(',\n') + ']')
for doc in docs:
yield doc
current = []
def iterate_message_packs(fin):
unpacker = msgpack.Unpacker(fin, encoding='utf-8', use_list=False)
for obj in unpacker:
yield obj
def open_wikidata_file(path, batch_size):
if path.endswith('bz2'):
with bz2.open(path, 'rb') as fin:
for obj in iterate_bytes_jsons(fin, batch_size):
yield obj
elif path.endswith('json'):
with open(path, 'rt') as fin:
for obj in iterate_text_jsons(fin, batch_size):
yield obj
elif path.endswith('mp'):
with open(path, 'rb') as fin:
for obj in iterate_message_packs(fin):
yield obj
else:
raise ValueError(
"unknown extension for wikidata. "
"Expecting bz2, json, or mp (msgpack)."
)
|
from numpy import logical_and, logical_not, logical_or
def logical_negate(truth, falses):
out = truth
for value in falses:
out = logical_and(out, logical_not(value))
return out
def logical_ors(values):
assert(len(values) > 0), "values cannot be empty."
out = values[0]
for val in values[1:]:
out = logical_or(out, val)
return out
def logical_ands(values):
assert(len(values) > 0), "values cannot be empty."
out = values[0]
for val in values[1:]:
out = logical_and(out, val)
return out
|
import json
import warnings
from os.path import join, exists
from functools import lru_cache
import marisa_trie
import requests
import numpy as np
from .successor_mask import (
successor_mask, invert_relation, offset_values_mask
)
from .offset_array import OffsetArray, SparseAttribute
from .wikidata_ids import (
load_wikidata_ids, load_names, property_names, temporal_property_names
)
from . import wikidata_properties as wprop
class CachedRelation(object):
def __init__(self, use, state):
self.use = use
self.state = state
@lru_cache(maxsize=None)
def get_name(wikidata_id):
res = requests.get("https://www.wikidata.org/wiki/" + wikidata_id)
el = res.text.find('<title>')
el_end = res.text.find('</title>')
return res.text[el + len('<title>'):el_end]
class TypeCollection(object):
def __init__(self, path, num_names_to_load=100000, language_path=None, prefix="enwiki", verbose=True,
cache=True):
self.cache = cache
self.path = path
self.verbose = verbose
self.wikidata_names2prop_names = property_names(
join(path, 'wikidata_property_names.json')
)
self.wikidata_names2temporal_prop_names = temporal_property_names(
join(path, 'wikidata_time_property_names.json')
)
# add wikipedia english category links:
self.wikidata_names2prop_names[wprop.CATEGORY_LINK] = "category_link"
self.wikidata_names2prop_names[wprop.FIXED_POINTS] = "fixed_points"
self.known_names = load_names(
join(path, "wikidata_wikititle2wikidata.tsv"),
num_names_to_load,
prefix=prefix
)
self.num_names_to_load = num_names_to_load
self.ids, self.name2index = load_wikidata_ids(path, verbose=self.verbose)
self._relations = {}
self._attributes = {}
self._inverted_relations = {}
self._article2id = None
self._web_get_name = True
self._satisfy_cache = {}
# empty blacklist:
self.set_bad_node(
set(), set()
)
if language_path is not None:
article_links = np.load(join(language_path, "trie_index2indices_values.npy"))
article_links_counts = np.load(join(language_path, "trie_index2indices_counts.npy"))
self._weighted_articles = np.bincount(article_links, weights=article_links_counts).astype(np.int32)
if len(self._weighted_articles) != len(self.ids):
self._weighted_articles = np.concatenate(
[
self._weighted_articles,
np.zeros(len(self.ids) - len(self._weighted_articles), dtype=np.int32)
]
)
else:
self._weighted_articles = None
def attribute(self, name):
if name not in self._attributes:
is_temporal = name in self.wikidata_names2temporal_prop_names
assert(is_temporal), "load relations using `relation` method."
if self.verbose:
print('load %r (%r)' % (name, self.wikidata_names2prop_names[name],))
self._attributes[name] = SparseAttribute.load(
join(self.path, "wikidata_%s" % (name,))
)
return self._attributes[name]
@property
def article2id(self):
if self._article2id is None:
if self.verbose:
print('load %r' % ("article2id",))
self._article2id = marisa_trie.RecordTrie('i').load(
join(self.path, "wikititle2wikidata.marisa")
)
if self.verbose:
print("done.")
return self._article2id
def relation(self, name):
if name.endswith(".inv"):
return self.get_inverted_relation(name[:-4])
if name not in self._relations:
is_temporal = name in self.wikidata_names2temporal_prop_names
assert(not is_temporal), "load attributes using `attribute` method."
if self.verbose:
print('load %r (%r)' % (name, self.wikidata_names2prop_names[name],))
self._relations[name] = OffsetArray.load(
join(self.path, "wikidata_%s" % (name,)),
compress=True
)
return self._relations[name]
def set_bad_node(self, bad_node, bad_node_pair):
changed = False
if hasattr(self, "_bad_node") and self._bad_node != bad_node:
changed = True
if hasattr(self, "_bad_node_pair") and self._bad_node_pair != bad_node_pair:
changed = True
self._bad_node = bad_node
self._bad_node_pair = bad_node_pair
self._bad_node_array = np.array(list(bad_node), dtype=np.int32)
bad_node_pair_right = {}
for node_left, node_right in self._bad_node_pair:
if node_right not in bad_node_pair_right:
bad_node_pair_right[node_right] = [node_left]
else:
bad_node_pair_right[node_right].append(node_left)
bad_node_pair_right = {
node_right: np.array(node_lefts, dtype=np.int32)
for node_right, node_lefts in bad_node_pair_right.items()
}
self._bad_node_pair_right = bad_node_pair_right
if changed:
self.reset_cache()
def get_name(self, identifier):
if identifier >= self.num_names_to_load and self._web_get_name:
try:
return get_name(self.ids[identifier]) + " (" + self.ids[identifier] + ")"
except requests.exceptions.ConnectionError:
self._web_get_name = False
name = self.known_names.get(identifier, None)
if name is None:
return self.ids[identifier]
else:
return name + " (" + self.ids[identifier] + ")"
def describe_connection(self, source, destination, allowed_edges):
if isinstance(source, str):
if source in self.name2index:
source_index = self.name2index[source]
else:
source_index = self.article2id["enwiki/" + source][0][0]
else:
source_index = source
if isinstance(destination, str):
if destination in self.name2index:
dest_index = self.name2index[destination]
else:
dest_index = self.article2id["enwiki/" + destination][0][0]
else:
dest_index = destination
found_path = self.is_member_with_path(
source_index,
allowed_edges,
[dest_index]
)
if found_path is not None:
_, path = found_path
for el in path:
if isinstance(el, str):
print(" " + el)
else:
print(self.get_name(el), el)
else:
print('%r and %r are not connected' % (source, destination))
def is_member_with_path(self, root, fields, member_fields, max_steps=float("inf"), steps=0, visited=None, path=None):
if steps >= max_steps:
return None
if visited is None:
visited = set()
if path is None:
path = [root]
else:
path = path + [root]
for field in fields:
field_parents = self.relation(field)[root]
for el in field_parents:
if el in member_fields and el not in self._bad_node and (root, el) not in self._bad_node_pair:
return True, path + [field, el]
for el in field_parents:
if el in visited or el in self._bad_node or (root, el) in self._bad_node_pair:
continue
visited.add(el)
res = self.is_member_with_path(el, fields, member_fields, max_steps, steps=steps + 1, visited=visited, path=path + [field])
if res is not None:
return res
return None
def get_inverted_relation(self, relation_name):
if relation_name.endswith(".inv"):
return self.relation(relation_name[:-4])
if relation_name not in self._inverted_relations:
new_values_path = join(self.path, "wikidata_inverted_%s_values.npy" % (relation_name,))
new_offsets_path = join(self.path, "wikidata_inverted_%s_offsets.npy" % (relation_name,))
if not exists(new_values_path):
relation = self.relation(relation_name)
if self.verbose:
print("inverting relation %r (%r)" % (relation_name, self.wikidata_names2prop_names[relation_name],))
new_values, new_offsets = invert_relation(
relation.values,
relation.offsets
)
np.save(new_values_path, new_values)
np.save(new_offsets_path, new_offsets)
if self.verbose:
print("load inverted %r (%r)" % (relation_name, self.wikidata_names2prop_names[relation_name]))
self._inverted_relations[relation_name] = OffsetArray.load(
join(self.path, "wikidata_inverted_%s" % (relation_name,)),
compress=True
)
return self._inverted_relations[relation_name]
def successor_mask(self, relation, active_nodes):
if isinstance(active_nodes, list):
active_nodes = np.array(active_nodes, dtype=np.int32)
if active_nodes.dtype != np.int32:
active_nodes = active_nodes.astype(np.int32)
return successor_mask(
relation.values, relation.offsets, self._bad_node_pair_right, active_nodes
)
def remove_blacklist(self, state):
state[self._bad_node_array] = False
def satisfy(self, relation_names, active_nodes, max_steps=None):
assert(len(relation_names) > 0), (
"relation_names cannot be empty."
)
if self.cache and isinstance(active_nodes, (list, tuple)) and len(active_nodes) < 100:
satisfy_key = (tuple(sorted(relation_names)), tuple(sorted(active_nodes)), max_steps)
if satisfy_key in self._satisfy_cache:
cached = self._satisfy_cache[satisfy_key]
cached.use += 1
return cached.state
else:
satisfy_key = None
inverted_relations = [self.get_inverted_relation(relation_name) for relation_name in relation_names]
state = np.zeros(inverted_relations[0].size(), dtype=np.bool)
state[active_nodes] = True
step = 0
while len(active_nodes) > 0:
succ = None
for relation in inverted_relations:
if succ is None:
succ = self.successor_mask(relation, active_nodes)
else:
succ = succ | self.successor_mask(relation, active_nodes)
new_state = state | succ
self.remove_blacklist(new_state)
(active_nodes,) = np.where(state != new_state)
active_nodes = active_nodes.astype(np.int32)
state = new_state
step += 1
if max_steps is not None and step >= max_steps:
break
if satisfy_key is not None:
self._satisfy_cache[satisfy_key] = CachedRelation(1, state)
return state
def reset_cache(self):
cache_keys = list(self._satisfy_cache.keys())
for key in cache_keys:
if self._satisfy_cache[key].use == 0:
del self._satisfy_cache[key]
else:
self._satisfy_cache[key].use = 0
def print_top_class_members(self, truth_table, name="Other", topn=20):
if self._weighted_articles is not None:
print("%s category, highly linked articles in wikipedia:" % (name,))
sort_weight = self._weighted_articles * truth_table
linked_articles = int((sort_weight > 0).sum())
print("%s category, %d articles linked in wikipedia:" % (name, linked_articles))
top_articles = np.argsort(sort_weight)[::-1]
for art in top_articles[:topn]:
if not truth_table[art]:
break
print("%r (%d)" % (self.get_name(art), self._weighted_articles[art]))
print("")
else:
print("%s category, sample of members:" % (name,))
top_articles = np.where(truth_table)[0]
for art in top_articles[:topn]:
print("%r" % (self.get_name(art),))
print("")
def class_report(self, relation_names, truth_table, name="Other", topn=20):
active_nodes = np.where(truth_table)[0].astype(np.int32)
num_active_nodes = len(active_nodes)
print("%s category contains %d unique items." % (name, num_active_nodes,))
relations = [self.relation(relation_name) for relation_name in relation_names]
for relation, relation_name in zip(relations, relation_names):
mask = offset_values_mask(relation.values, relation.offsets, active_nodes)
counts = np.bincount(relation.values[mask])
topfields = np.argsort(counts)[::-1]
print("%s category, most common %r:" % (name, relation_name,))
for field in topfields[:topn]:
if counts[field] == 0:
break
print("%.3f%% (%d): %r" % (100.0 * counts[field] / num_active_nodes,
counts[field],
self.get_name(field)))
print("")
is_fp = np.logical_and(
np.logical_or(
self.relation(wprop.FIXED_POINTS + ".inv").edges() > 0,
self.relation(wprop.FIXED_POINTS).edges() > 0
),
truth_table
)
self.print_top_class_members(
is_fp, topn=topn, name=name + " (fixed points)"
)
if self._weighted_articles is not None:
self.print_top_class_members(truth_table, topn=topn, name=name)
def load_blacklist(self, path):
with open(path, "rt") as fin:
blacklist = json.load(fin)
filtered_bad_node = []
for el in blacklist["bad_node"]:
if el not in self.name2index:
warnings.warn("Node %r under `bad_node` is not a known wikidata id." % (
el
))
continue
filtered_bad_node.append(el)
bad_node = set(self.name2index[el] for el in filtered_bad_node)
filtered_bad_node_pair = []
for el, oel in blacklist["bad_node_pair"]:
if el not in self.name2index:
warnings.warn("Node %r under `bad_node_pair` is not a known wikidata id." % (
el
))
continue
if oel not in self.name2index:
warnings.warn("Node %r under `bad_node_pair` is not a known wikidata id." % (
oel
))
continue
filtered_bad_node_pair.append((el, oel))
bad_node_pair = set([(self.name2index[el], self.name2index[oel])
for el, oel in filtered_bad_node_pair])
self.set_bad_node(bad_node, bad_node_pair)
|
INSTANCE_OF = "P31"
SUBCLASS_OF = "P279"
PART_OF = "P361"
OCCUPATION = "P106"
FIELD_OF_WORK = "P101"
FIELD_OF_THIS_OCCUPATION = "P425"
MEDICAL_SPECIALITY = "P1995"
GENRE = "P136"
SEX_OR_GENDER = "P21"
COUNTRY_OF_CITIZENSHIP = "P27"
COUNTRY = "P17"
CONTINENT = "P30"
LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY = "P131"
SPORT = "P641"
STUDIES = "P2578"
SERIES = "P179"
USE = "P366"
LOCATION = "P276"
FACET_OF = "P1269"
IS_A_LIST_OF = "P360"
COUNTRY_OF_ORIGIN = "P495"
PRODUCT_OR_MATERIAL_PRODUCED = "P1056"
INDUSTRY = "P452"
PARENT_TAXON = "P171"
APPLIES_TO_TERRITORIAL_JURISDICTION = "P1001"
POSITION_HELD = "P39"
CATEGORYS_MAIN_TOPIC = "P301"
PUBLICATION_DATE = "P577"
DATE_OF_BIRTH = "P569"
DATE_OF_DEATH = "P570"
INCEPTION = "P571"
DISSOLVED_OR_ABOLISHED = "P576"
POINT_IN_TIME = "P585"
START_TIME = "P580"
END_TIME = "P582"
CATEGORY_LINK = "enwiki_category_links"
FIXED_POINTS = "enwiki_fixed_points"
|
from os.path import exists
from os import stat
def true_exists(fname):
return exists(fname) and stat(fname).st_size > 100
|
import progressbar
percentage = progressbar.Percentage()
counter = progressbar.Counter()
bar = progressbar.Bar()
adaptive_eta = progressbar.AdaptiveETA()
class MessageProgressbar(progressbar.ProgressBar):
def set_message(self, message):
self.widgets[0] = message + " "
def set_item(self, item):
self.widgets[4] = " %s) " % (item,)
def get_progress_bar(message, max_value=None, item="lines"):
"""
Construct a progressbar iterator wrapper
with an ETA and percentage information.
Arguments:
----------
message : str, title for the progress bar.
max_value : None or int
Returns:
--------
ProgressBar : object that can wrap an iterator
and print out duration estimates and
iteration stats.
"""
widgets = [
message + " ",
percentage,
" (",
counter,
" %s) " % (item,),
bar,
adaptive_eta
]
return MessageProgressbar(widgets=widgets, maxval=max_value)
|
from os.path import exists
import numpy as np
from .successor_mask import (
convert_to_offset_array, make_dense, make_sparse
)
def count_non_zero(dense):
return len(np.nonzero(dense[1:] - dense[:-1])[0]) + int(dense[0] != 0)
def should_compress(dense):
nonzeros = count_non_zero(dense)
return (2 * nonzeros + 1) < 0.5 * len(dense)
class OffsetArray(object):
def __init__(self, values, offsets):
self.values = values
self.offsets = offsets
def __getitem__(self, idx):
end = self.offsets[idx]
start = 0 if idx == 0 else self.offsets[idx - 1]
return self.values[start:end]
def is_empty(self, idx):
end = self.offsets[idx]
start = 0 if idx == 0 else self.offsets[idx - 1]
return start == end
def size(self):
return self.offsets.shape[0]
def edges(self):
num_edges = np.zeros(len(self.offsets), dtype=np.int32)
num_edges[0] = self.offsets[0]
num_edges[1:] = self.offsets[1:] - self.offsets[:-1]
return num_edges
@classmethod
def load(cls, path, compress=True):
values = np.load(path + "_values.npy")
if exists(path + "_offsets.sparse.npy"):
offsets_compressed = np.load(path + "_offsets.sparse.npy")
offsets = make_dense(offsets_compressed, cumsum=True)
else:
# legacy mode, load dense versions:
offsets = np.load(path + "_offsets.npy")
if compress:
if should_compress(offsets):
offsets_compressed = make_sparse(offsets)
np.save(path + "_offsets.sparse.npy", offsets_compressed)
# optionally delete the old version here
return OffsetArray(
values,
offsets
)
def convert_dict_to_offset_array(dictionary, num_values):
offsets = np.zeros(num_values, dtype=np.int32)
total_num_values = sum(len(v) for _, v in dictionary.items())
values = np.zeros(total_num_values, dtype=np.int32)
position = 0
for key, value in sorted(dictionary.items(), key=lambda x: x[0]):
values[position:position + len(value)] = value
position += len(value)
offsets[key] = len(value)
np.cumsum(offsets, out=offsets)
return values, offsets
def save_record_with_offset(path, index2indices, total_size=None):
if isinstance(index2indices, dict):
if total_size is None:
raise ValueError("cannot leave total_size None "
"when using a dict.")
values, offsets = convert_dict_to_offset_array(index2indices, total_size)
else:
values, offsets = convert_to_offset_array(index2indices)
np.save(path + "_values.npy", values)
if should_compress(offsets):
compressed_offsets = make_sparse(offsets)
np.save(path + "_offsets.sparse.npy", compressed_offsets)
else:
np.save(path + "_offsets.npy", offsets)
def load_sparse(path):
compressed = np.load(path)
dense = make_dense(compressed, cumsum=False)
non_zero_indices = compressed[1::2]
mask = np.zeros(len(dense), dtype=np.bool)
mask[non_zero_indices] = True
return dense, mask
class SparseAttribute(object):
def __init__(self, dense, mask):
self.dense = dense
self.mask = mask
def __lt__(self, value):
return np.logical_and(self.dense < value, self.mask)
def __le__(self, value):
return np.logical_and(self.dense <= value, self.mask)
def __gt__(self, value):
return np.logical_and(self.dense > value, self.mask)
def __ge__(self, value):
return np.logical_and(self.dense >= value, self.mask)
def __eq__(self, value):
return np.logical_and(self.dense == value, self.mask)
@classmethod
def load(cls, path):
dense, mask = load_sparse(path + "_values.sparse.npy")
return SparseAttribute(dense, mask)
|
LANGUAGE_CODES = ["en", "zh", "fr", "ja",
"ru", "pt", "ca", "fa",
"ar", "fi", "hu", "id",
"es", "it", "war", "ceb",
"nl", "de", "sv", "ro",
"cs", "ko", "sr", "ms",
"tr", "min", "eo", "eu",
"kk", "da", "bg", "sk",
"hy", "he", "lt", "sl",
"et", "uz", "gl", "nn",
"la", "vo", "simple",
"el", "ce", "be", "ka",
"hi", "az", "th", "ur",
"oc", "mk", "ta", "mg",
"new", "tt", "cy", "tl",
"bs", "br", "ne", "gu",
"io", "bpy", "nds", "ku",
"als", "pa", "su", "kn",
"bar", "ckb", "ia", "arz"]
|
import json
from collections import namedtuple
from os.path import join, dirname
def dict_fix_relative_paths(basepath, relative_paths):
if relative_paths is None:
relative_paths = []
def load(d):
new_obj = d.copy()
for key in relative_paths:
if key in new_obj:
if isinstance(new_obj[key], str):
new_obj[key] = join(basepath, new_obj[key])
elif isinstance(new_obj[key], list) and len(new_obj[key]) > 0 and isinstance(new_obj[key][0], str):
new_obj[key] = [join(basepath, path) for path in new_obj[key]]
return new_obj
return load
def load_config(path, relative_paths=None, defaults=None, relative_to=None):
if relative_to is None:
relative_to = dirname(path)
object_hook = dict_fix_relative_paths(relative_to, relative_paths)
with open(path, "rt") as fin:
obj = json.load(
fin,
object_hook=object_hook
)
if defaults is not None:
for key, value in defaults.items():
if key not in obj:
obj[key] = value
return json.loads(
json.dumps(obj),
object_hook=lambda d: namedtuple('X', d.keys())(*d.values())
)
def json_loads(bytes):
return json.loads(bytes.decode('utf-8'))
def json_serializer(x):
return json.dumps(
x, check_circular=False, separators=(',', ':')
).encode('utf-8')
|
from os.path import exists, join, dirname
import marisa_trie
import json
from .file import true_exists
from os import makedirs
class MarisaAsDict(object):
def __init__(self, marisa):
self.marisa = marisa
def get(self, key, fallback):
value = self.marisa.get(key, None)
if value is None:
return fallback
else:
return value[0][0]
def __getitem__(self, key):
value = self.marisa[key]
return value[0][0]
def __contains__(self, key):
return key in self.marisa
def load_wikidata_ids(path, verbose=True):
wikidata_ids_inverted_path = join(path, 'wikidata_ids_inverted.marisa')
with open(join(path, "wikidata_ids.txt"), "rt") as fin:
ids = fin.read().splitlines()
if exists(wikidata_ids_inverted_path):
if verbose:
print("loading wikidata id -> index")
name2index = MarisaAsDict(marisa_trie.RecordTrie('i').load(wikidata_ids_inverted_path))
if verbose:
print("done")
else:
if verbose:
print("building trie")
name2index = MarisaAsDict(
marisa_trie.RecordTrie('i', [(name, (k,)) for k, name in enumerate(ids)])
)
name2index.marisa.save(wikidata_ids_inverted_path)
if verbose:
print("done")
return (ids, name2index)
def load_names(path, num, prefix):
names = {}
errors = 0 # debug
if num > 0:
with open(path, "rt", encoding="UTF-8") as fin:
for line in fin:
try:
name, number = line.rstrip('\n').split('\t')
except ValueError:
errors += 1
number = int(number)
if number >= num:
break
else:
if name.startswith(prefix):
names[number] = name[7:]
print(errors) # debug
return names
def sparql_query(query):
import requests
wikidata_url = "https://query.wikidata.org/sparql"
response = requests.get(
wikidata_url,
params={
"format": "json",
"query": query
}
).json()
out = {}
for el in response["results"]['bindings']:
label = el['propertyLabel']['value']
value = el['property']['value']
if value.startswith("http://www.wikidata.org/entity/"):
value = value[len("http://www.wikidata.org/entity/"):]
out[value] = label
return out
def saved_sparql_query(savename, query):
directory = dirname(savename)
makedirs(directory, exist_ok=True)
if true_exists(savename):
with open(savename, "rt") as fin:
out = json.load(fin)
return out
else:
out = sparql_query(query)
with open(savename, "wt") as fout:
json.dump(out, fout)
return out
def property_names(prop_save_path):
""""
Retrieve the mapping between wikidata properties ids (e.g. "P531") and
their human-readable names (e.g. "diplomatic mission sent").
Returns:
dict<str, str> : mapping from property id to property descriptor.
"""
return saved_sparql_query(
prop_save_path,
"""
SELECT DISTINCT ?property ?propertyLabel
WHERE
{
?property a wikibase:Property .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
"""
)
def temporal_property_names(prop_save_path):
""""
Retrieve the mapping between wikidata properties ids (e.g. "P531") and
their human-readable names (e.g. "diplomatic mission sent") only
for fields that are time-based.
Returns:
dict<str, str> : mapping from property id to property descriptor.
"""
return saved_sparql_query(
prop_save_path,
"""
SELECT DISTINCT ?property ?propertyLabel
WHERE
{
?property a wikibase:Property .
{?property wdt:P31 wd:Q18636219} UNION {?property wdt:P31 wd:Q22661913} .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
"""
)
|
import re
import numpy as np
from os.path import join
from epub_conversion import convert_wiki_to_lines
from epub_conversion.wiki_decoder import almost_smart_open
from .wikipedia_language_codes import LANGUAGE_CODES
from .file import true_exists
from .bash import execute_bash
from .successor_mask import (
load_redirections, match_wikipedia_to_wikidata
)
BADS = ["Wikipedia:", "WikipΓ©dia:", "File:", "Media:", "Help:", "User:"]
def _lines_extractor(lines, article_name):
"""
Simply outputs lines
"""
yield (article_name, lines)
def _bad_link(link):
return any(link.startswith(el) for el in BADS)
def iterate_articles(path):
num_articles = 9999999999999
with almost_smart_open(path, "rb") as wiki:
for article_name, lines in convert_wiki_to_lines(
wiki,
max_articles=num_articles,
clear_output=True,
report_every=100,
parse_special_pages=True,
skip_templated_lines=False,
line_converter=_lines_extractor):
if not _bad_link(article_name):
yield (article_name, lines)
def induce_wikipedia_prefix(wikiname):
if wikiname in {code + "wiki" for code in LANGUAGE_CODES}:
return wikiname
else:
raise ValueError("Could not determine prefix for wiki "
"with name %r." % (wikiname,))
def convert_sql_to_lookup(props, propname):
propname = b",'" + propname.encode("utf-8") + b"','"
ending = b"',"
starting = b"("
lookup = {}
offset = 0
while True:
newpos = props.find(propname, offset)
if newpos == -1:
break
begin = props.rfind(starting, offset, newpos)
end = props.find(ending, newpos + len(propname))
key = props[begin + len(starting):newpos]
value = props[newpos + len(propname):end]
lookup[key.decode('utf-8')] = value.decode('utf-8')
offset = end
return lookup
def load_wikipedia_pageid_to_wikidata(data_dir):
fname = join(data_dir, "enwiki-latest-page_props.sql")
if not true_exists(fname):
execute_bash(
"wget -O - https://dumps.wikimedia.org/enwiki/"
"latest/enwiki-latest-page_props.sql.gz | gunzip > %s" % (fname,)
)
with open(fname, "rb") as fin:
props = fin.read()
return convert_sql_to_lookup(props, "wikibase_item")
link_pattern = re.compile(r'\[\[([^\]\[:]*)\]\]')
class WikipediaDoc(object):
def __init__(self, doc):
self.doc = doc
def links(self, wiki_trie, redirections, prefix):
current_pos = 0
for match in re.finditer(link_pattern, self.doc):
match_string = match.group(1)
start = match.start()
end = match.end()
if current_pos != start:
yield self.doc[current_pos:start], None
current_pos = end
if "|" in match_string:
link, anchor = match_string.rsplit("|", 1)
link = link.strip().split("#")[0]
else:
anchor = match_string
link = anchor.strip()
if len(link) > 0:
dest_index = match_wikipedia_to_wikidata(
link,
wiki_trie,
redirections,
prefix
)
yield anchor, dest_index
else:
yield anchor, None
if current_pos != len(self.doc):
yield self.doc[current_pos:], None
def load_wikipedia_docs(path, size):
docs = []
for article_name, doc in iterate_articles(path):
docs.append(WikipediaDoc(doc))
if len(docs) == size:
break
return docs
def transition_trie_index(anchor_idx, dest_index, transitions, all_options):
"""
Recover the new trie index for an index that has gone stale.
Use a transitions array to know how original anchors now map to
new trie indices.
"""
option_transitions = transitions[anchor_idx]
dest_index = option_transitions[option_transitions[:, 0] == dest_index, 1]
if len(dest_index) == 0:
dest_index = -1
else:
dest_index = np.asscalar(dest_index)
if dest_index != -1:
if not np.any(all_options == dest_index):
dest_index = -1
return dest_index
__all__ = ["load_redirections", "induce_wikipedia_prefix",
"load_wikipedia_docs", "WikipediaDoc",
"transition_trie_index", "iterate_articles"]
|
"""
Compress a jsonl version of Wikidata by throwing about descriptions
and converting file to msgpack format.
Usage
-----
```
python3 compress_wikidata_msgpack.py wikidata.json wikidata.msgpack
```
"""
import argparse
import msgpack
from wikidata_linker_utils.wikidata_iterator import open_wikidata_file
from wikidata_linker_utils.progressbar import get_progress_bar
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('wikidata')
parser.add_argument('out')
return parser.parse_args(args=args)
def main():
args = parse_args()
approx_max_quantity = 24642416
pbar = get_progress_bar('compress wikidata', max_value=approx_max_quantity, item='entities')
pbar.start()
seen = 0
with open(args.out, "wb") as fout:
for doc in open_wikidata_file(args.wikidata, 1000):
seen += 1
if 'descriptions' in doc:
del doc['descriptions']
if 'labels' in doc:
del doc['labels']
if 'aliases' in doc:
del doc['aliases']
for claims in doc['claims'].values():
for claim in claims:
if 'id' in claim:
del claim['id']
if 'rank' in claim:
del claim['rank']
if 'references' in claim:
for ref in claim['references']:
if 'hash' in ref:
del ref['hash']
if 'qualifiers' in claim:
for qualifier in claim['qualifiers'].values():
if 'hash' in qualifier:
del qualifier['hash']
fout.write(msgpack.packb(doc))
if seen % 1000 == 0:
if seen < approx_max_quantity:
pbar.update(seen)
pbar.finish()
if __name__ == "__main__":
main()
|
import argparse
from os.path import join
from os import makedirs
import marisa_trie
import numpy as np
from wikidata_linker_utils.bash import count_lines
from wikidata_linker_utils.progressbar import get_progress_bar
from wikidata_linker_utils.wikipedia import match_wikipedia_to_wikidata, load_redirections
from wikidata_linker_utils.successor_mask import construct_mapping, construct_anchor_trie
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("wikipedia2wikidata_trie",
help="Location of wikipedia -> wikidata mapping trie.")
parser.add_argument("prefix", type=str,
help="What language is being processed, e.g. enwiki, frwiki, etc.")
parser.add_argument("anchor_tags", type=str,
help="Location where anchor tags were saved (tsv).")
parser.add_argument("redirections", type=str,
help="Location where redirections were saved (tsv).")
parser.add_argument("out", type=str,
help="Directory to save trie/data in.")
return parser.parse_args(argv)
def main():
args = parse_args()
makedirs(args.out, exist_ok=True)
wikipedia2wikidata_trie = marisa_trie.RecordTrie('i').load(
args.wikipedia2wikidata_trie
)
print('loaded trie')
redirections = load_redirections(args.redirections)
anchor_trie = construct_anchor_trie(
anchor_tags=args.anchor_tags,
wikipedia2wikidata_trie=wikipedia2wikidata_trie,
redirections=redirections,
prefix=args.prefix
)
anchor_trie.save(join(args.out, 'trie.marisa'))
(
(
trie_index2indices_offsets,
trie_index2indices_values,
trie_index2indices_counts
),
(
trie_index2contexts_offsets,
trie_index2contexts_values,
trie_index2contexts_counts
)
) = construct_mapping(
anchor_tags=args.anchor_tags,
wikipedia2wikidata_trie=wikipedia2wikidata_trie,
redirections=redirections,
prefix=args.prefix,
anchor_trie=anchor_trie
)
np.save(join(args.out, "trie_index2indices_offsets.npy"), trie_index2indices_offsets)
np.save(join(args.out, "trie_index2indices_values.npy"), trie_index2indices_values)
np.save(join(args.out, "trie_index2indices_counts.npy"), trie_index2indices_counts)
np.save(join(args.out, "trie_index2contexts_offsets.npy"), trie_index2contexts_offsets)
np.save(join(args.out, "trie_index2contexts_values.npy"), trie_index2contexts_values)
np.save(join(args.out, "trie_index2contexts_counts.npy"), trie_index2contexts_counts)
if __name__ == "__main__":
main()
|
import argparse
import sys
import json
import time
import traceback
from os import makedirs
from os.path import join, dirname, realpath
from wikidata_linker_utils.repl import (
enter_or_quit, reload_module,
ALLOWED_RUNTIME_ERRORS,
ALLOWED_IMPORT_ERRORS
)
from wikidata_linker_utils.logic import logical_ors
from wikidata_linker_utils.type_collection import TypeCollection
import wikidata_linker_utils.wikidata_properties as wprop
import numpy as np
SCRIPT_DIR = dirname(realpath(__file__))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('wikidata', type=str,
help="Location of wikidata properties.")
parser.add_argument('classifiers', type=str, nargs="+",
help="Filename(s) for Python script that classifies entities.")
parser.add_argument('--export_classification', type=str, nargs="+",
default=None,
help="Location to save the result of the entity classification.")
parser.add_argument('--num_names_to_load', type=int, default=20000000,
help="Number of names to load from disk to accelerate reporting.")
parser.add_argument('--language_path', type=str, default=None,
help="Location of a language-wikipedia specific information set to "
"provide language/wikipedia specific metrics.")
parser.add_argument('--interactive', action="store_true", default=True,
help="Operate in a REPL. Reload scripts on errors or on user prompt.")
parser.add_argument('--nointeractive', action="store_false",
dest="interactive", help="Run classification without REPL.")
parser.add_argument('--use-cache', action="store_true",
dest="use_cache", help="store satisfies in cache.")
parser.add_argument('--nouse-cache', action="store_false",
dest="use_cache", help="not store satisfies in cache.")
return parser.parse_args()
def get_other_class(classification):
if len(classification) == 0:
return None
return np.logical_not(logical_ors(
list(classification.values())
))
def export_classification(classification, path):
classes = sorted(list(classification.keys()))
if len(classes) == 0:
return
makedirs(path, exist_ok=True)
num_items = classification[classes[0]].shape[0]
classid = np.zeros(num_items, dtype=np.int32)
selected = np.zeros(num_items, dtype=np.bool)
for index, classname in enumerate(classes):
truth_table = classification[classname]
selected = selected | truth_table
classid = np.maximum(classid, truth_table.astype(np.int32) * index)
other = np.logical_not(selected)
if other.sum() > 0:
classes_with_other = classes + ["other"]
classid = np.maximum(classid, other.astype(np.int32) * len(classes))
else:
classes_with_other = classes
with open(join(path, "classes.txt"), "wt") as fout:
for classname in classes_with_other:
fout.write(classname + "\n")
np.save(join(path, "classification.npy"), classid)
def main():
args = parse_args()
should_export = args.export_classification is not None
if should_export and len(args.export_classification) != len(args.classifiers):
raise ValueError("Must have as many export filenames as classifiers.")
collection = TypeCollection(
args.wikidata,
num_names_to_load=args.num_names_to_load,
language_path=args.language_path,
cache=args.use_cache
)
if args.interactive:
alert_failure = enter_or_quit
else:
alert_failure = lambda: sys.exit(1)
while True:
try:
collection.load_blacklist(join(SCRIPT_DIR, "blacklist.json"))
except (ValueError,) as e:
print("Issue reading blacklist, please fix.")
print(str(e))
alert_failure()
continue
classifications = []
for class_idx, classifier_fname in enumerate(args.classifiers):
while True:
try:
classifier = reload_module(classifier_fname)
except ALLOWED_IMPORT_ERRORS as e:
print("issue reading %r, please fix." % (classifier_fname,))
print(str(e))
traceback.print_exc(file=sys.stdout)
alert_failure()
continue
try:
t0 = time.time()
classification = classifier.classify(collection)
classifications.append(classification)
if class_idx == len(args.classifiers) - 1:
collection.reset_cache()
t1 = time.time()
print("classification took %.3fs" % (t1 - t0,))
except ALLOWED_RUNTIME_ERRORS as e:
print("issue running %r, please fix." % (classifier_fname,))
print(str(e))
traceback.print_exc(file=sys.stdout)
alert_failure()
continue
break
try:
# show cardinality for each truth table:
if args.interactive:
mega_other_class = None
for classification in classifications:
for classname in sorted(classification.keys()):
print("%r: %d members" % (classname, int(classification[classname].sum())))
print("")
summary = {}
for classname, truth_table in classification.items():
(members,) = np.where(truth_table)
summary[classname] = [collection.get_name(int(member)) for member in members[:20]]
print(json.dumps(summary, indent=4))
other_class = get_other_class(classification)
if other_class.sum() > 0:
# there are missing items:
to_report = (
classifier.class_report if hasattr(classifier, "class_report") else
[wprop.SUBCLASS_OF, wprop.INSTANCE_OF, wprop.OCCUPATION, wprop.CATEGORY_LINK]
)
collection.class_report(to_report, other_class, name="Other")
if mega_other_class is None:
mega_other_class = other_class
else:
mega_other_class = np.logical_and(mega_other_class, other_class)
if len(classifications) > 1:
if mega_other_class.sum() > 0:
# there are missing items:
to_report = [wprop.SUBCLASS_OF, wprop.INSTANCE_OF, wprop.OCCUPATION, wprop.CATEGORY_LINK]
collection.class_report(to_report, mega_other_class, name="Other-combined")
if should_export:
assert(len(classifications) == len(args.export_classification)), (
"classification outputs missing for export."
)
for classification, savename in zip(classifications, args.export_classification):
export_classification(classification, savename)
except KeyboardInterrupt as e:
pass
if args.interactive:
enter_or_quit()
else:
break
if __name__ == "__main__":
main()
|
import argparse
from os import remove
from wikidata_linker_utils.bash import execute_bash
import h5py
def produce_window_dataset(path, window_size, out):
num_columns = 0
with open(path, "rt") as fin:
line_locations = []
for idx, line in enumerate(fin):
if "\t" in line:
line_locations.append(idx)
if num_columns == 0:
num_columns = len(line.split("\t"))
if line == "\n":
line_locations.append(-1)
groups = []
current_group = []
max_buffer_size = 250000
read_size = 100000
seen_classes = {}
for line_location in line_locations:
if line_location == -1:
if len(current_group) > 0:
groups.append(current_group)
current_group = []
else:
if len(current_group) == 0:
current_group.append(line_location)
elif abs(current_group[-1] - line_location) <= window_size:
current_group.append(line_location)
else:
groups.append(current_group)
current_group = [line_location]
if len(current_group) > 0:
groups.append(current_group)
num_examples = len(groups)
EMPTY = ""
with h5py.File(out, "w") as handle:
datasets = []
for col in range(num_columns):
datasets.append(
handle.create_dataset(
str(col),
(num_examples,),
dtype=h5py.special_dtype(vlen=str),
chunks=(1500,)
# compression="gzip",
# compression_opts=9
)
)
k = 0
with open(path, "rt") as fin:
current_location = 0
current_lines = fin.readlines(read_size)
current_end = current_location + len(current_lines)
for group in groups:
start = max(0, group[0] - window_size)
end = group[-1] + window_size
if end > current_end:
# read more lines into buffer:
current_lines = current_lines + fin.readlines(read_size)
# advance buffer max location
current_end = current_location + len(current_lines)
if len(current_lines) > max_buffer_size:
# compute how much to remove from buffer
to_chop = len(current_lines) - max_buffer_size
# move start location
current_location += to_chop
# remove extra buffer lines
current_lines = current_lines[to_chop:]
# ensure that we do not cross white space boundaries
start_delay = 0
for idx, line in enumerate(current_lines[start - current_location:group[0] - current_location]):
if line == "\n":
start_delay = idx
start += start_delay
early_end = window_size
for idx, line in enumerate(current_lines[group[-1] - current_location:end - current_location]):
if line == "\n":
early_end = idx
break
end = group[-1] + early_end
cols = [[] for i in range(num_columns)]
for line in current_lines[start - current_location:end - current_location]:
vals = line.rstrip().split("\t")
for col_index in range(num_columns):
if len(vals) > col_index:
cols[col_index].append(vals[col_index])
else:
cols[col_index].append(EMPTY)
for col_index, dataset in zip(cols, datasets):
dataset[k] = "\n".join(col_index)
k += 1
def file_slice(path, start, end, destination, append):
file_operator = ">>" if append else ">"
delta = end - start
command = "head -n %d %s | tail -n %d %s %s" % (
end,
path,
delta,
file_operator,
destination
)
execute_bash(command)
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("path")
parser.add_argument("out_train")
parser.add_argument("out_validation")
parser.add_argument("--window_size", type=int, default=5)
parser.add_argument("--total_size", type=int, required=True)
parser.add_argument("--validation_start", type=int, required=True)
parser.add_argument("--validation_size", type=int, default=500000)
return parser.parse_args(args=args)
def main():
args = parse_args()
if args.total_size < args.validation_size:
raise ValueError("cannot have total_size (%d) < validation_size "
"(%d)" % (args.total_size, args.validation_size))
if args.validation_start > args.total_size:
raise ValueError("cannot have validation_start (%d) begin after "
"total_size (%d)" % (args.validation_start, args.total_size))
if args.validation_start + args.validation_size > args.total_size:
raise ValueError("cannot have validation_start + validation_size (%d)"
" be larger than total_size (%d)" % (
args.validation_start + args.validation_size, args.total_size
))
train_temp = args.out_train + ".train_temp"
try:
file_slice(
args.path,
0,
args.validation_start,
train_temp,
append=False
)
file_slice(
args.path,
args.validation_start + args.validation_size,
args.total_size,
train_temp,
append=True
)
print("created temp file %s" % (train_temp))
produce_window_dataset(
train_temp, args.window_size, args.out_train
)
print("created windowed dataset for train")
finally:
print("removing temp file %s" % (train_temp))
remove(train_temp)
try:
validation_temp = args.out_validation + ".validation_temp"
file_slice(
args.path,
args.validation_start,
args.validation_start + args.validation_size,
validation_temp,
append=False
)
print("created temp file %s" % (validation_temp))
produce_window_dataset(validation_temp, args.window_size, args.out_validation)
print("created windowed dataset for validation")
finally:
print("removing temp file %s" % (validation_temp))
remove(validation_temp)
if __name__ == "__main__":
main()
|
import json
import argparse
import time
import random
import numpy as np
from evaluate_type_system import fix_and_parse_tags
from wikidata_linker_utils.json import load_config
from wikidata_linker_utils.type_collection import TypeCollection
from wikidata_linker_utils.progressbar import get_progress_bar
from wikidata_linker_utils.wikipedia import induce_wikipedia_prefix
from os.path import realpath, dirname, join, exists
from wikidata_linker_utils.fast_disambiguate import (
beam_project, cem_project, ga_project
)
SCRIPT_DIR = dirname(realpath(__file__))
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("config", type=str)
parser.add_argument("out", type=str)
parser.add_argument("--relative_to", default=None, type=str)
parser.add_argument("--penalty", default=0.0005, type=float)
parser.add_argument("--beam_width", default=8, type=float)
parser.add_argument("--beam_search_subset", default=2000, type=int)
parser.add_argument("--log", default=None, type=str)
parser.add_argument("--samples", type=int, default=1000)
parser.add_argument("--ngen", type=int, default=40)
parser.add_argument("--method", type=str,
choices=["cem", "greedy", "beam", "ga"],
default="greedy")
return parser.parse_args(args=args)
def load_aucs():
paths = [
"/home/jonathanraiman/en_field_auc_w10_e10.json",
"/home/jonathanraiman/en_field_auc_w10_e10-s1234.json",
"/home/jonathanraiman/en_field_auc_w5_e5.json",
"/home/jonathanraiman/en_field_auc_w5_e5-s1234.json"
]
aucs = {}
for path in paths:
with open(path, "rt") as fin:
auc_report = json.load(fin)
for report in auc_report:
key = (report["qid"], report["relation"])
if key in aucs:
aucs[key].append(report["auc"])
else:
aucs[key] = [report["auc"]]
for key in aucs.keys():
aucs[key] = np.mean(aucs[key])
return aucs
def greedy_disambiguate(tags):
greedy_correct = 0
total = 0
for dest, other_dest, times_pointed in tags:
total += 1
if len(other_dest) == 1 and dest == other_dest[0]:
greedy_correct += 1
elif other_dest[np.argmax(times_pointed)] == dest:
greedy_correct += 1
return greedy_correct, total
def fast_disambiguate(tags, all_classifications):
correct = 0
total = 0
for dest, other_dest, times_pointed in tags:
total += 1
if len(other_dest) == 1 and dest == other_dest[0]:
correct += 1
else:
identities = np.all(all_classifications[other_dest, :] == all_classifications[dest, :], axis=1)
matches = other_dest[identities]
matches_counts = times_pointed[identities]
if len(matches) == 1 and matches[0] == dest:
correct += 1
elif matches[np.argmax(matches_counts)] == dest:
correct += 1
return correct, total
def get_prefix(config):
return config.prefix or induce_wikipedia_prefix(config.wiki)
MAX_PICKS = 400.0
def rollout(cached_satisfy, key2row, tags, aucs, ids, sample,
penalty, greedy_correct):
mean_auc = 0.0
sample_sum = sample.sum()
if sample_sum == 0:
total = len(tags)
return (greedy_correct / total,
greedy_correct / total)
if sample_sum > MAX_PICKS:
return 0.0, 0.0
all_classifications = None
if sample_sum > 0:
all_classifications = np.zeros((len(ids), int(sample_sum)), dtype=np.bool)
col = 0
for picked, (key, auc) in zip(sample, aucs):
if picked:
all_classifications[:, col] = cached_satisfy[key2row[key]]
col += 1
mean_auc += auc
mean_auc = mean_auc / sample_sum
correct, total = fast_disambiguate(tags, all_classifications)
# here's the benefit of using types:
improvement = correct - greedy_correct
# penalty for using unreliable types:
objective = (
(greedy_correct + improvement * mean_auc) / total -
# number of items is penalized
sample_sum * penalty
)
return objective, correct / total
def get_cached_satisfy(collection, aucs, ids, mmap=False):
path = join(SCRIPT_DIR, "cached_satisfy.npy")
if not exists(path):
cached_satisfy = np.zeros((len(aucs), len(ids)), dtype=np.bool)
for row, (qid, relation_name) in get_progress_bar("satisfy", item="types")(enumerate(sorted(aucs.keys()))):
cached_satisfy[row, :] = collection.satisfy([relation_name], [collection.name2index[qid]])[ids]
collection._satisfy_cache.clear()
np.save(path, cached_satisfy)
if mmap:
del cached_satisfy
cached_satisfy = np.load(path, mmap_mode="r")
else:
if mmap:
cached_satisfy = np.load(path, mmap_mode="r")
else:
cached_satisfy = np.load(path)
return cached_satisfy
def main():
args = parse_args()
config = load_config(
args.config,
["wiki",
"language_path",
"wikidata",
"redirections",
"classification"],
defaults={
"num_names_to_load": 0,
"prefix": None,
"sample_size": 100,
"wiki": None,
"fix_links": False,
"min_count": 0,
"min_percent": 0.0
},
relative_to=args.relative_to
)
if config.wiki is None:
raise ValueError("must provide path to 'wiki' in config.")
prefix = get_prefix(config)
collection = TypeCollection(
config.wikidata,
num_names_to_load=config.num_names_to_load,
prefix=prefix,
verbose=True
)
collection.load_blacklist(join(SCRIPT_DIR, "blacklist.json"))
fname = config.wiki
test_tags = fix_and_parse_tags(config,
collection,
config.sample_size)
aucs = load_aucs()
ids = sorted(set([idx for doc_tags in test_tags
for _, tag in doc_tags if tag is not None
for idx in tag[2] if len(tag[2]) > 1]))
id2pos = {idx: k for k, idx in enumerate(ids)}
# use reduced identity system:
remapped_tags = []
for doc_tags in test_tags:
for text, tag in doc_tags:
if tag is not None:
remapped_tags.append(
(id2pos[tag[1]] if len(tag[2]) > 1 else tag[1],
np.array([id2pos[idx] for idx in tag[2]]) if len(tag[2]) > 1 else tag[2],
tag[3]))
test_tags = remapped_tags
aucs = {key: value for key, value in aucs.items() if value > 0.5}
print("%d relations to pick from with %d ids." % (len(aucs), len(ids)), flush=True)
cached_satisfy = get_cached_satisfy(collection, aucs, ids, mmap=args.method=="greedy")
del collection
key2row = {key: k for k, key in enumerate(sorted(aucs.keys()))}
if args.method == "greedy":
picks, _ = beam_project(
cached_satisfy,
key2row,
remapped_tags,
aucs,
ids,
beam_width=1,
penalty=args.penalty,
log=args.log
)
elif args.method == "beam":
picks, _ = beam_project(
cached_satisfy,
key2row,
remapped_tags,
aucs,
ids,
beam_width=args.beam_width,
penalty=args.penalty,
log=args.log
)
elif args.method == "cem":
picks, _ = cem_project(
cached_satisfy,
key2row,
remapped_tags,
aucs,
ids,
n_samples=args.samples,
penalty=args.penalty,
log=args.log
)
elif args.method == "ga":
picks, _ = ga_project(
cached_satisfy,
key2row,
remapped_tags,
aucs,
ids,
ngen=args.ngen,
n_samples=args.samples,
penalty=args.penalty,
log=args.log
)
else:
raise ValueError("unknown method %r." % (args.method,))
with open(args.out, "wt") as fout:
json.dump(picks, fout)
if __name__ == "__main__":
main()
|
import sys
import pickle
import argparse
import requests
import marisa_trie
import traceback
import numpy as np
from os.path import join, dirname, realpath, exists
from os import stat
from collections import Counter
from itertools import product
from wikidata_linker_utils.anchor_filtering import clean_up_trie_source, acceptable_anchor
from wikidata_linker_utils.wikipedia import (
load_wikipedia_docs, induce_wikipedia_prefix, load_redirections, transition_trie_index
)
from wikidata_linker_utils.json import load_config
from wikidata_linker_utils.offset_array import OffsetArray
from wikidata_linker_utils.repl import reload_run_retry, enter_or_quit
from wikidata_linker_utils.progressbar import get_progress_bar
from wikidata_linker_utils.type_collection import TypeCollection, get_name as web_get_name
SCRIPT_DIR = dirname(realpath(__file__))
PROJECT_DIR = dirname(SCRIPT_DIR)
INTERNET = True
def maybe_web_get_name(s):
global INTERNET
if INTERNET:
try:
res = web_get_name(s)
return res
except requests.exceptions.ConnectionError:
INTERNET = False
return s
class OracleClassification(object):
def __init__(self, classes, classification, path):
self.classes = classes
self.classification = classification
self.path = path
self.contains_other = self.classes[-1] == "other"
def classify(self, index):
return self.classification[index]
def load_oracle_classification(path):
with open(join(path, "classes.txt"), "rt") as fin:
classes = fin.read().splitlines()
classification = np.load(join(path, "classification.npy"))
return OracleClassification(classes, classification, path)
def can_disambiguate(oracles, truth, alternatives,
times_pointed, count_threshold,
ignore_other=False, keep_other=False):
ambig = np.ones(len(alternatives), dtype=np.bool)
for oracle in oracles:
truth_pred = oracle.classify(truth)
alt_preds = oracle.classify(alternatives)
if keep_other and oracle.contains_other:
if truth_pred == len(oracle.classes) - 1:
continue
else:
ambig = np.logical_and(
ambig,
np.logical_or(
np.equal(alt_preds, truth_pred),
np.equal(alt_preds, len(oracle.classes) - 1)
)
)
elif ignore_other and oracle.contains_other and np.any(alt_preds == len(oracle.classes) - 1):
continue
else:
ambig = np.logical_and(ambig, np.equal(alt_preds, truth_pred))
# apply type rules to disambiguate:
alternatives_matching_type = alternatives[ambig]
alternatives_matching_type_times_pointed = times_pointed[ambig]
if len(alternatives_matching_type) <= 1:
return alternatives_matching_type, alternatives_matching_type_times_pointed, False
# apply rules for count thresholding:
ordered_times_pointed = np.argsort(alternatives_matching_type_times_pointed)[::-1]
top1count = alternatives_matching_type_times_pointed[ordered_times_pointed[0]]
top2count = alternatives_matching_type_times_pointed[ordered_times_pointed[1]]
if top1count > top2count + count_threshold and alternatives_matching_type[ordered_times_pointed[0]] == truth:
return (
alternatives_matching_type[ordered_times_pointed[0]:ordered_times_pointed[0]+1],
alternatives_matching_type_times_pointed[ordered_times_pointed[0]:ordered_times_pointed[0]+1],
True
)
return alternatives_matching_type, alternatives_matching_type_times_pointed, False
def disambiguate(tags, oracles):
ambiguous = 0
obvious = 0
disambiguated_oracle = 0
disambiguated_with_counts = 0
disambiguated_greedy = 0
disambiguated_with_background = 0
count_threshold = 0
ambiguous_tags = []
obvious_tags = []
non_obvious_tags = []
disambiguated_oracle_ignore_other = 0
disambiguated_oracle_keep_other = 0
for text, tag in tags:
if tag is None:
continue
anchor, dest, other_dest, times_pointed = tag
if len(other_dest) == 1:
obvious += 1
obvious_tags.append((anchor, dest, other_dest, times_pointed))
else:
ambiguous += 1
non_obvious_tags.append((anchor, dest, other_dest, times_pointed))
if other_dest[np.argmax(times_pointed)] == dest:
disambiguated_greedy += 1
matching_tags, times_pointed_subset, used_counts = can_disambiguate(
oracles, dest, other_dest, times_pointed, count_threshold
)
if len(matching_tags) <= 1:
if used_counts:
disambiguated_with_counts += 1
else:
disambiguated_oracle += 1
else:
ambiguous_tags.append(
(anchor, dest, matching_tags, times_pointed_subset)
)
matching_tags, times_pointed_subset, used_counts = can_disambiguate(
oracles, dest, other_dest, times_pointed, count_threshold, ignore_other=True
)
if len(matching_tags) <= 1:
disambiguated_oracle_ignore_other += 1
matching_tags, times_pointed_subset, used_counts = can_disambiguate(
oracles, dest, other_dest, times_pointed, count_threshold, keep_other=True
)
if len(matching_tags) <= 1:
disambiguated_oracle_keep_other += 1
report = {
"ambiguous": ambiguous,
"obvious": obvious,
"disambiguated oracle": disambiguated_oracle,
"disambiguated greedy": disambiguated_greedy,
"disambiguated oracle + counts": disambiguated_oracle + disambiguated_with_counts,
"disambiguated oracle + counts + ignore other": disambiguated_oracle_ignore_other,
"disambiguated oracle + counts + keep other": disambiguated_oracle_keep_other
}
return (report, ambiguous_tags)
def disambiguate_batch(test_tags, train_tags, oracles):
test_tags = test_tags
total_report = {}
ambiguous_tags = []
for tags in get_progress_bar("disambiguating", item="articles")(test_tags):
report, remainder = disambiguate(tags, oracles)
ambiguous_tags.extend(remainder)
for key, value in report.items():
if key not in total_report:
total_report[key] = value
else:
total_report[key] += value
return total_report, ambiguous_tags
def obtain_tags(doc,
wiki_trie,
anchor_trie,
trie_index2indices,
trie_index2indices_counts,
trie_index2indices_transitions,
redirections,
prefix,
collection,
first_names,
min_count,
min_percent):
out_doc = []
for anchor, dest_index in doc.links(wiki_trie, redirections, prefix):
if dest_index is None:
out_doc.append((anchor, None))
continue
anchor_stripped = anchor.strip()
keep = False
if len(anchor_stripped) > 0:
anchor_stripped = clean_up_trie_source(anchor_stripped)
if acceptable_anchor(anchor_stripped, anchor_trie, first_names):
anchor_idx = anchor_trie[anchor_stripped]
all_options = trie_index2indices[anchor_idx]
all_counts = trie_index2indices_counts[anchor_idx]
if len(all_options) > 0:
if trie_index2indices_transitions is not None:
old_dest_index = dest_index
dest_index = transition_trie_index(
anchor_idx, dest_index,
trie_index2indices_transitions,
all_options
)
if dest_index != -1:
new_dest_index = dest_index
keep = True
if keep and (min_count > 0 or min_percent > 0):
dest_count = all_counts[all_options==new_dest_index]
if dest_count < min_count or (dest_count / sum(all_counts)) < min_percent:
keep = False
if keep:
out_doc.append(
(
anchor,
(anchor_stripped, new_dest_index, all_options, all_counts)
)
)
if not keep:
out_doc.append((anchor, None))
return out_doc
def add_boolean(parser, name, default):
parser.add_argument("--%s" % (name,), action="store_true", default=default)
parser.add_argument("--no%s" % (name,), action="store_false", dest=name)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("config")
parser.add_argument("--relative_to", type=str, default=None)
parser.add_argument("--log", type=str, default=None)
add_boolean(parser, "verbose", True)
add_boolean(parser, "interactive", True)
return parser
def parse_args(args=None):
return get_parser().parse_args(args=args)
def summarize_disambiguation(total_report, file=None):
if file is None:
file = sys.stdout
if total_report.get("ambiguous", 0) > 0:
for key, value in sorted(total_report.items(), key=lambda x : x[1]):
if "disambiguated" in key:
print("%.3f%% disambiguated by %s (%d / %d)" % (
100.0 * value / total_report["ambiguous"],
key[len("disambiguated"):].strip(),
value, total_report["ambiguous"]
), file=file
)
print("", file=file)
for key, value in sorted(total_report.items(), key=lambda x : x[1]):
if "disambiguated" in key:
print("%.3f%% disambiguated by %s [including single choice] (%d / %d)" % (
100.0 * (
(value + total_report["obvious"]) /
(total_report["ambiguous"] + total_report["obvious"])
),
key[len("disambiguated"):].strip(),
value + total_report["obvious"],
total_report["ambiguous"] + total_report["obvious"]
), file=file
)
print("", file=file)
def summarize_ambiguities(ambiguous_tags,
oracles,
get_name):
class_ambiguities = {}
for anchor, dest, other_dest, times_pointed in ambiguous_tags:
class_ambig_name = []
for oracle in oracles:
class_ambig_name.append(oracle.classes[oracle.classify(dest)])
class_ambig_name = " and ".join(class_ambig_name)
if class_ambig_name not in class_ambiguities:
class_ambiguities[class_ambig_name] = {
"count": 1,
"examples": [(anchor, dest, other_dest, times_pointed)]
}
else:
class_ambiguities[class_ambig_name]["count"] += 1
class_ambiguities[class_ambig_name]["examples"].append((anchor, dest, other_dest, times_pointed))
print("Ambiguity Report:")
for classname, ambiguity in sorted(class_ambiguities.items(), key=lambda x: x[0]):
print(" %s" % (classname,))
print(" %d ambiguities" % (ambiguity["count"],))
common_bad_anchors = Counter([anc for anc, _, _, _ in ambiguity["examples"]]).most_common(6)
anchor2example = {anc: (dest, other_dest, times_pointed) for anc, dest, other_dest, times_pointed in ambiguity["examples"]}
for bad_anchor, count in common_bad_anchors:
dest, other_dest, times_pointed = anchor2example[bad_anchor]
truth_times_pointed = int(times_pointed[np.equal(other_dest, dest)])
only_alt = [(el, int(times_pointed[k])) for k, el in enumerate(other_dest) if el != dest]
only_alt = sorted(only_alt, key=lambda x: x[1], reverse=True)
print(" %r (%d time%s)" % (bad_anchor, count, 's' if count != 1 else ''))
print(" Actual: %r" % ((get_name(dest), truth_times_pointed),))
print(" Others: %r" % ([(get_name(el), c) for (el, c) in only_alt[:5]]))
print("")
print("")
def get_prefix(config):
return config.prefix or induce_wikipedia_prefix(config.wiki)
def fix_and_parse_tags(config, collection, size):
trie_index2indices = OffsetArray.load(
join(config.language_path, "trie_index2indices"),
compress=True
)
trie_index2indices_counts = OffsetArray(
np.load(join(config.language_path, "trie_index2indices_counts.npy")),
trie_index2indices.offsets
)
if exists(join(config.language_path, "trie_index2indices_transition_values.npy")):
trie_index2indices_transitions = OffsetArray(
np.load(join(config.language_path, "trie_index2indices_transition_values.npy")),
np.load(join(config.language_path, "trie_index2indices_transition_offsets.npy")),
)
else:
trie_index2indices_transitions = None
anchor_trie = marisa_trie.Trie().load(join(config.language_path, "trie.marisa"))
wiki_trie = marisa_trie.RecordTrie('i').load(
join(config.wikidata, "wikititle2wikidata.marisa")
)
prefix = get_prefix(config)
redirections = load_redirections(config.redirections)
docs = load_wikipedia_docs(config.wiki, size)
while True:
try:
collection.load_blacklist(join(SCRIPT_DIR, "blacklist.json"))
except (ValueError,) as e:
print("issue reading blacklist, please fix.")
print(str(e))
enter_or_quit()
continue
break
print("Load first_names")
with open(join(PROJECT_DIR, "data", "first_names.txt"), "rt") as fin:
first_names = set(fin.read().splitlines())
all_tags = []
for doc in get_progress_bar('fixing links', item='article')(docs):
tags = obtain_tags(
doc,
wiki_trie=wiki_trie,
anchor_trie=anchor_trie,
trie_index2indices=trie_index2indices,
trie_index2indices_counts=trie_index2indices_counts,
trie_index2indices_transitions=trie_index2indices_transitions,
redirections=redirections,
prefix=prefix,
first_names=first_names,
collection=collection,
min_count=config.min_count,
min_percent=config.min_percent)
if any(x is not None for _, x in tags):
all_tags.append(tags)
collection.reset_cache()
return all_tags
def main():
args = parse_args()
config = load_config(args.config,
["wiki",
"language_path",
"wikidata",
"redirections",
"classification",
"path"],
defaults={"num_names_to_load": 0,
"prefix": None,
"sample_size": 100,
"wiki": None,
"min_count": 0,
"min_percent": 0.0},
relative_to=args.relative_to)
if config.wiki is None:
raise ValueError("must provide path to 'wiki' in config.")
prefix = get_prefix(config)
print("Load type_collection")
collection = TypeCollection(
config.wikidata,
num_names_to_load=config.num_names_to_load,
prefix=prefix,
verbose=True)
fname = config.wiki
all_tags = fix_and_parse_tags(config, collection, config.sample_size)
test_tags = all_tags[:config.sample_size]
train_tags = all_tags[config.sample_size:]
oracles = [load_oracle_classification(classification)
for classification in config.classification]
def get_name(idx):
if idx < config.num_names_to_load:
if idx in collection.known_names:
return collection.known_names[idx] + " (%s)" % (collection.ids[idx],)
else:
return collection.ids[idx]
else:
return maybe_web_get_name(collection.ids[idx]) + " (%s)" % (collection.ids[idx],)
while True:
total_report, ambiguous_tags = disambiguate_batch(
test_tags, train_tags, oracles)
summarize_disambiguation(total_report)
if args.log is not None:
with open(args.log, "at") as fout:
summarize_disambiguation(total_report, file=fout)
if args.verbose:
try:
summarize_ambiguities(
ambiguous_tags,
oracles,
get_name
)
except KeyboardInterrupt as e:
pass
if args.interactive:
enter_or_quit()
else:
break
if __name__ == "__main__":
main()
|
import argparse
import marisa_trie
import numpy as np
from os.path import join
from wikidata_linker_utils.progressbar import get_progress_bar
from wikidata_linker_utils.bash import count_lines
from wikidata_linker_utils.offset_array import save_record_with_offset
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("wikipedia2wikidata_trie",
help="Location of wikipedia -> wikidata mapping trie.")
parser.add_argument("wikidata_ids")
parser.add_argument("prefix")
parser.add_argument("category_links")
parser.add_argument("out")
return parser.parse_args(argv)
def main():
args = parse_args()
trie = marisa_trie.RecordTrie('i').load(args.wikipedia2wikidata_trie)
print('loaded trie')
num_lines = count_lines(args.category_links)
num_ids = count_lines(args.wikidata_ids)
missing = []
num_missing = 0
num_broken = 0
all_category_links = [[] for i in range(num_ids)]
with open(args.category_links, 'rt') as fin:
fin_pbar = get_progress_bar('reading category_links', max_value=num_lines)(fin)
for line in fin_pbar:
try:
origin, dest = line.rstrip('\n').split('\t')
except:
num_broken += 1
continue
if len(dest) == 0:
num_broken += 1
continue
origin = args.prefix + '/' + origin
prefixed_dest = args.prefix + '/' + dest
origin_index = trie.get(origin, None)
dest_index = trie.get(prefixed_dest, None)
if dest_index is None:
prefixed_dest = args.prefix + '/' + dest[0].upper() + dest[1:]
dest_index = trie.get(prefixed_dest, None)
if origin_index is None or dest_index is None:
missing.append((origin, prefixed_dest))
num_missing += 1
else:
all_category_links[origin_index[0][0]].append(dest_index[0][0])
print("%d/%d category links could not be found in wikidata" % (num_missing, num_lines))
print("%d/%d category links were malformed" % (num_broken, num_lines))
print("Missing links sample:")
for origin, dest in missing[:10]:
print("%r -> %r" % (origin, dest))
save_record_with_offset(
join(args.out, "wikidata_%s_category_links" % (args.prefix,)),
all_category_links
)
if __name__ == "__main__":
main()
|
import argparse
import time
import marisa_trie
import numpy as np
import pandas
from os.path import join, realpath, dirname
from os import makedirs
from wikidata_linker_utils.wikidata_iterator import open_wikidata_file
from wikidata_linker_utils.file import true_exists
from wikidata_linker_utils.bash import count_lines
from wikidata_linker_utils.progressbar import get_progress_bar
from wikidata_linker_utils.offset_array import save_record_with_offset
from wikidata_linker_utils.wikidata_ids import (
load_wikidata_ids, load_names, property_names, temporal_property_names
)
import wikidata_linker_utils.wikidata_properties as wikidata_properties
SCRIPT_DIR = dirname(realpath(__file__))
PROJECT_DIR = dirname(SCRIPT_DIR)
WIKITILE_2_WIKIDATA_TRIE_NAME = "wikititle2wikidata.marisa"
WIKITILE_2_WIKIDATA_TSV_NAME = "wikidata_wikititle2wikidata.tsv"
WIKIDATA_IDS_NAME = "wikidata_ids.txt"
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("wikidata_dump", type=str,
help="Path to wikidata dump file.")
parser.add_argument("wikidata", type=str,
help="Path to save location for wikidata properties.")
parser.add_argument("--batch_size", type=int, default=1000)
return parser.parse_args(args=args)
def get_related_nested_field(doc_claims, nested_field):
out = []
for claim in doc_claims:
mainsnak = claim.get("mainsnak", None)
if mainsnak is None:
continue
datavalue = mainsnak.get("datavalue", None)
if datavalue is None:
continue
value = datavalue.get("value", None)
if value is None:
continue
value_id = value.get(nested_field, None)
if value_id is None:
continue
out.append(value_id)
return out
def get_related_entities(doc_claims):
return get_related_nested_field(doc_claims, "id")
def get_claim_time(doc_claims):
return get_related_nested_field(doc_claims, "time")
def get_wikidata_mapping(name2id_path,
wikidata_ids_path,
jsons,
relation_names,
verbose=False):
approx_max_quantity = 24642416
if verbose:
pbar = None
from IPython.display import clear_output
else:
pbar = get_progress_bar("collect wikilinks", max_value=approx_max_quantity)
pbar.start()
clear_output = None
wikidata_ids = []
entity_types = []
subclass = []
seen = 0
relations = {
name: (open(outfile, "wt"), is_temporal) for name, outfile, is_temporal in relation_names
}
fout_name2id = None if true_exists(name2id_path) else open(name2id_path, "wt")
fout_wikidata_ids = None if true_exists(wikidata_ids_path) else open(wikidata_ids_path, "wt")
try:
t_then = time.time()
seen_last = 0
speed = None
index = 0
for doc in jsons:
seen += 1
if seen % 2000 == 0:
if verbose:
t_now = time.time()
new_speed = (seen - seen_last) / (t_now - t_then)
if speed is None:
speed = new_speed
else:
speed = 0.9 * speed + 0.1 * new_speed
clear_output(wait=True)
print("%.3f%% done (%d seen, %.3f docs/s, ETA: %ds)" % (
100.0 * seen / approx_max_quantity,
seen,
speed,
int((approx_max_quantity - seen) / speed)
), flush=True)
seen_last = seen
t_then = t_now
else:
if seen < approx_max_quantity:
pbar.update(seen)
if fout_name2id is not None:
if "sitelinks" in doc:
for key, value in doc["sitelinks"].items():
if key.endswith("wiki"):
fout_name2id.write(key + "/" + value["title"] + "\t" + str(index) + "\n")
index += 1
if fout_wikidata_ids is not None:
fout_wikidata_ids.write(doc["id"] + "\n")
for name, (outfile, is_temporal) in relations.items():
if is_temporal:
outfile.write(
"\t".join(get_claim_time(doc["claims"].get(name, []))) + "\n"
)
else:
outfile.write(
"\t".join(get_related_entities(doc["claims"].get(name, []))) + "\n"
)
if pbar is not None:
pbar.finish()
finally:
for name, (outfile, _) in relations.items():
outfile.close()
if fout_name2id is not None:
fout_name2id.close()
if fout_wikidata_ids is not None:
fout_wikidata_ids.close()
def convert_wikidata_ids_to_ids(id2index, wikidata_ids):
return [[id2index.get(wikidata_id, -1) for wikidata_id in propgroup] for propgroup in wikidata_ids]
def parse_year(text):
pos = text[1:].find("-")
return int(text[:pos+1])
def values_exist(path):
return (
true_exists(path + "_values.npy") or
true_exists(path + "_values.sparse.npy")
)
def line2indices(id2index, line):
if len(line) == 0:
return []
out = []
for el in line.split("\t"):
idx = id2index.get(el, None)
if idx is None:
continue
else:
out.append(idx)
return out
def fixed_point_name_alternates(name):
if name.endswith(")"):
pos_closing = name.rfind("(")
return (name, name[:pos_closing].strip())
if name.endswith("ses"):
return (name, name[:-2] + "is")
if name.endswith("ies"):
return (name, name[:-3] + "y")
if name.endswith("s"):
return (name, name[:-1])
return (name,)
def build_fixed_point(out, prefix):
wiki_fixed_point_save = join(out, "wikidata_%s_fixed_points_values.npy" % (prefix,))
if not true_exists(wiki_fixed_point_save):
print("building %s fixed point property." % (prefix,))
trie = marisa_trie.RecordTrie('i').load(join(out, WIKITILE_2_WIKIDATA_TRIE_NAME))
num_items = count_lines(join(out, WIKIDATA_IDS_NAME))
fixed_point_relation = {}
category_prefix = "%s/Category:" % (prefix,)
article_prefix = "%s/" % (prefix,)
wikititle2wikidata_path = join(out, WIKITILE_2_WIKIDATA_TSV_NAME)
relevant_items = trie.iteritems(category_prefix)
for name, category_idx in relevant_items:
article_name = article_prefix + name[len(category_prefix):]
for fixed_point_name_alternate in fixed_point_name_alternates(article_name):
matches = trie.get(fixed_point_name_alternate, None)
if matches is not None and len(matches) > 0:
fixed_point_relation[category_idx] = [matches[0][0]]
break
print("Found %d fixed point relations for %s" % (len(fixed_point_relation), prefix,))
save_record_with_offset(
join(out, "wikidata_%s_fixed_points" % (prefix,)),
fixed_point_relation,
num_items
)
def main():
args = parse_args()
makedirs(args.wikidata, exist_ok=True)
wikidata_names2prop_names = property_names(
join(PROJECT_DIR, "data", "wikidata", 'wikidata_property_names.json')
)
wikidata_names2temporal_prop_names = temporal_property_names(
join(PROJECT_DIR, "data", "wikidata", 'wikidata_time_property_names.json')
)
# fields to make easily accessible:
wikidata_important_properties = [
wikidata_properties.INSTANCE_OF,
wikidata_properties.SUBCLASS_OF,
wikidata_properties.PART_OF,
wikidata_properties.OCCUPATION,
wikidata_properties.FIELD_OF_WORK,
wikidata_properties.FIELD_OF_THIS_OCCUPATION,
wikidata_properties.MEDICAL_SPECIALITY,
wikidata_properties.GENRE,
wikidata_properties.SEX_OR_GENDER,
wikidata_properties.COUNTRY_OF_CITIZENSHIP,
wikidata_properties.COUNTRY,
wikidata_properties.CONTINENT,
wikidata_properties.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY,
wikidata_properties.SPORT,
wikidata_properties.STUDIES,
wikidata_properties.SERIES,
wikidata_properties.USE,
wikidata_properties.LOCATION,
wikidata_properties.FACET_OF,
wikidata_properties.IS_A_LIST_OF,
wikidata_properties.COUNTRY_OF_ORIGIN,
wikidata_properties.PRODUCT_OR_MATERIAL_PRODUCED,
wikidata_properties.INDUSTRY,
wikidata_properties.PARENT_TAXON,
wikidata_properties.APPLIES_TO_TERRITORIAL_JURISDICTION,
wikidata_properties.POSITION_HELD,
wikidata_properties.CATEGORYS_MAIN_TOPIC,
# temporal properties
wikidata_properties.PUBLICATION_DATE,
wikidata_properties.DATE_OF_BIRTH,
wikidata_properties.DATE_OF_DEATH,
wikidata_properties.INCEPTION,
wikidata_properties.DISSOLVED_OR_ABOLISHED,
wikidata_properties.POINT_IN_TIME,
wikidata_properties.START_TIME,
wikidata_properties.END_TIME
]
wikidata_important_properties_fnames = [
(name, join(args.wikidata, "wikidata_%s.txt" % (name,)), name in wikidata_names2temporal_prop_names)
for name in wikidata_important_properties
]
missing_wikidata_important_properties_fnames = [
(name, outfile, is_temporal)
for name, outfile, is_temporal in wikidata_important_properties_fnames
if not true_exists(outfile)
]
wikidata_ids_path = join(args.wikidata, WIKIDATA_IDS_NAME)
wikititle2wikidata_path = join(args.wikidata, WIKITILE_2_WIKIDATA_TSV_NAME)
work_to_be_done = (
not true_exists(wikidata_ids_path) or
not true_exists(wikititle2wikidata_path) or
len(missing_wikidata_important_properties_fnames) > 0
)
if work_to_be_done:
get_wikidata_mapping(
wikititle2wikidata_path,
wikidata_ids_path,
open_wikidata_file(args.wikidata_dump, args.batch_size),
missing_wikidata_important_properties_fnames
)
numpy_wikidata_important_properties_fnames = [
(name, outfile, is_temporal)
for name, outfile, is_temporal in wikidata_important_properties_fnames
if not values_exist(join(args.wikidata, "wikidata_%s" % (name,)))
]
# obtain a mapping from id -> number
if len(numpy_wikidata_important_properties_fnames) > 0:
_, id2index = load_wikidata_ids(args.wikidata)
# make relations numerical:
for relname, outfile, is_temporal in numpy_wikidata_important_properties_fnames:
with open(outfile, "rt") as fin:
lines = fin.read().splitlines()
fin_pbar = get_progress_bar("loading relation %r" % (relname,))(lines)
if is_temporal:
value = np.zeros(len(lines) * 2 + 1, dtype=np.int32)
position = 1
seen = 0
for idx, line in enumerate(fin_pbar):
for wikidata_id in line.split('\t'):
if len(wikidata_id) > 0:
value[position] = idx
value[position + 1] = parse_year(wikidata_id)
position += 2
seen += 1
break
value[0] = len(lines)
value = value[:position]
np.save(join(args.wikidata, "wikidata_%s_values.sparse.npy" % (relname,)), value)
else:
relation = [
line2indices(id2index, line) for line in fin_pbar
]
save_record_with_offset(
join(args.wikidata, "wikidata_%s" % (relname,)),
relation
)
del id2index
# convert the mapping from wikinames to integer values:
trie_save_path = join(args.wikidata, WIKITILE_2_WIKIDATA_TRIE_NAME)
if not true_exists(trie_save_path):
print("loading wikipedia name -> wikidata")
name2id = pandas.read_csv(wikititle2wikidata_path, sep="\t", encoding='utf-8')
print("loaded")
trie = marisa_trie.RecordTrie(
'i',
get_progress_bar("convert to trie", max_value=name2id.shape[0])(
(key, (value,)) for _, key, value in name2id.itertuples()
)
)
trie.save(trie_save_path)
build_fixed_point(args.wikidata, "enwiki")
if __name__ == '__main__':
main()
|
import json
import time
import re
import argparse
from wikidata_linker_utils.wikipedia import iterate_articles
from multiprocessing import Pool
CATEGORY_PREFIXES = [
"Category:",
"CatΓ©gorie:",
"Categorie:",
"CategorΓa:",
"Categoria:",
"Kategorie:",
"Kategoria:",
"ΠΠ°ΡΠ΅Π³ΠΎΡΠΈΡ:",
"Kategori:"
]
category_link_pattern = re.compile(
r"\[\[((?:" + "|".join(CATEGORY_PREFIXES) + r")[^\]\[]*)\]\]"
)
redirection_link_pattern = re.compile(r"(?:#REDIRECT|#weiterleitung|#REDIRECCIΓN|REDIRECIONAMENTO)\s*\[\[([^\]\[]*)\]\]", re.IGNORECASE)
anchor_link_pattern = re.compile(r"\[\[([^\]\[:]*)\]\]")
def category_link_job(args):
"""
Performing map-processing on different articles
(in this case, just remove internal links)
"""
article_name, lines = args
found_tags = []
for match in re.finditer(category_link_pattern, lines):
match_string = match.group(1).strip()
if "|" in match_string:
link, _ = match_string.rsplit("|", 1)
link = link.strip().split("#")[0]
else:
link = match_string
if len(link) > 0:
found_tags.append(link)
return (article_name, found_tags)
def redirection_link_job(args):
"""
Performing map-processing on different articles
(in this case, just remove internal links)
"""
article_name, lines = args
found_tags = []
for match in re.finditer(redirection_link_pattern, lines):
if match is None:
continue
if match.group(1) is None:
continue
match_string = match.group(1).strip()
if "|" in match_string:
link, _ = match_string.rsplit("|", 1)
link = link.strip().split("#")[0]
else:
link = match_string
if len(link) > 0:
found_tags.append(link)
return (article_name, found_tags)
def anchor_finding_job(args):
"""
Performing map-processing on different articles
(in this case, just remove internal links)
"""
article_name, lines = args
found_tags = []
for match in re.finditer(anchor_link_pattern, lines):
match_string = match.group(1).strip()
if "|" in match_string:
link, anchor = match_string.rsplit("|", 1)
link = link.strip().split("#")[0]
anchor = anchor.strip()
else:
anchor = match_string
link = match_string
if len(anchor) > 0 and len(link) > 0:
found_tags.append((anchor, link))
return (article_name, found_tags)
def anchor_category_redirection_link_job(args):
article_name, found_redirections = redirection_link_job(args)
article_name, found_categories = category_link_job(args)
article_name, found_anchors = anchor_finding_job(args)
return (article_name, (found_anchors, found_redirections, found_categories))
def run_jobs(worker_pool, pool_jobs, outfile_anchors, outfile_redirections, outfile_category_links):
results = worker_pool.map(anchor_category_redirection_link_job, pool_jobs)
for article_name, result in results:
anchor_links, redirect_links, category_links = result
for link in redirect_links:
outfile_redirections.write(article_name + "\t" + link + "\n")
for link in category_links:
outfile_category_links.write(article_name + "\t" + link + "\n")
if ":" not in article_name:
outfile_anchors.write(article_name + "\t" + article_name + "\t" + article_name + "\n")
for anchor, link in anchor_links:
outfile_anchors.write(article_name + "\t" + anchor + "\t" + link + "\n")
def parse_wiki(path,
anchors_path,
redirections_path,
category_links_path,
threads=1,
max_jobs=10):
t0 = time.time()
jobs = []
pool = Pool(processes=threads)
try:
with open(redirections_path, "wt") as fout_redirections, open(category_links_path, "wt") as fout_category_links, open(anchors_path, "wt") as fout_anchors:
for article_name, lines in iterate_articles(path):
jobs.append((article_name, lines))
if len(jobs) >= max_jobs:
run_jobs(pool, jobs, fout_anchors, fout_redirections, fout_category_links)
jobs = []
if len(jobs) > 0:
run_jobs(pool, jobs, fout_anchors, fout_redirections, fout_category_links)
jobs = []
finally:
pool.close()
t1 = time.time()
print("%.3fs elapsed." % (t1 - t0,))
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("wiki",
help="Wikipedia dump file (xml).")
parser.add_argument("out_anchors",
help="File where anchor information should be saved (tsv).")
parser.add_argument("out_redirections",
help="File where redirection information should be saved (tsv).")
parser.add_argument("out_category_links",
help="File where category link information should be saved (tsv).")
def add_int_arg(name, default):
parser.add_argument("--%s" % (name,), type=int, default=default)
add_int_arg("threads", 8)
add_int_arg("max_jobs", 10000)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
parse_wiki(
path=args.wiki,
anchors_path=args.out_anchors,
redirections_path=args.out_redirections,
category_links_path=args.out_category_links,
threads=args.threads,
max_jobs=args.max_jobs
)
if __name__ == "__main__":
main()
|
"""
Create a tsv file where where the first column is a token and second column
is the QID (wikidata internal id for entities). This can then be used
by evaluate_learnability or from training a type model.
Usage
-----
```
python3 produce_wikidata_tsv.py configs/en_export_config.json en_wikipedia.tsv
```
Use `--relative_to` argument to specify the base directory for relative paths in the
config file.
"""
import argparse
import re
import json
from os.path import join, dirname, realpath, exists
import marisa_trie
import ciseau
import numpy as np
from wikidata_linker_utils.wikipedia import (
iterate_articles, induce_wikipedia_prefix, load_redirections,
transition_trie_index
)
from wikidata_linker_utils.json import load_config
from wikidata_linker_utils.offset_array import OffsetArray
from wikidata_linker_utils.type_collection import TypeCollection
from wikidata_linker_utils.anchor_filtering import acceptable_anchor, clean_up_trie_source
from wikidata_linker_utils.wikipedia import match_wikipedia_to_wikidata
SCRIPT_DIR = dirname(realpath(__file__))
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("config")
parser.add_argument("out")
parser.add_argument("--relative_to", type=str, default=None)
return parser.parse_args(args=args)
link_pattern = re.compile(r"\[\[([^\]\[:]*)\]\]")
ref_pattern = re.compile(r"<ref[^<>]*>[^<]+</ref>")
double_bracket_pattern = re.compile(r"{{[^{}]+}}")
title_pattern = re.compile(r"==+([^=]+)==+")
bullet_point_pattern = re.compile(r"^([*#])", re.MULTILINE)
def merge_tags(words, tags, start_sent):
out = [(w, []) for w in words]
for tag_start, tag_end, tag in tags:
so_far = start_sent
for k, word in enumerate(words):
begins = tag_start <= so_far or (tag_start > so_far and tag_start < so_far + len(word))
ends = (so_far + len(word) <= tag_end) or (tag_end < so_far + len(word) and tag_end > so_far)
if begins and ends:
out[k][1].append(tag)
so_far += len(word)
if so_far >= tag_end:
break
return out
def pick_relevant_tags(tagged_sequence, char_offset, char_offset_end):
relevant_tags = []
for word, tags in tagged_sequence:
if tags is not None:
start, end, dest_index = tags
if start >= char_offset and start < char_offset_end:
relevant_tags.append((start, end, dest_index))
if start >= char_offset_end:
break
return relevant_tags
def convert_document_to_labeled_tags(annotated, sentences):
paragraphs = []
paragraph = []
char_offset = 0
for sentence in sentences:
sentence_length = sum(len(w) for w in sentence)
sentence_tags = pick_relevant_tags(
annotated,
char_offset,
char_offset + sentence_length
)
sentence_with_tags = merge_tags(
sentence,
sentence_tags,
char_offset
)
sentence_with_tags = [
(
w,
[tags[0]] if len(tags) > 0 else []
) for w, tags in sentence_with_tags
]
if "\n" in sentence[-1]:
paragraph.extend(sentence_with_tags)
paragraphs.append(paragraph)
paragraph = []
else:
paragraph.extend(sentence_with_tags)
char_offset += sentence_length
if len(paragraph) > 0:
paragraphs.append(paragraph)
return paragraphs
def annotate_document(doc,
collection,
wiki_trie,
anchor_trie,
trie_index2indices,
trie_index2indices_counts,
trie_index2indices_transitions,
redirections,
prefix):
out = []
current_position = 0
current_position_no_brackets = 0
for match in re.finditer(link_pattern, doc):
start = match.start()
end = match.end()
if current_position != start:
out.append(
(doc[current_position:start], None)
)
current_position_no_brackets += start - current_position
current_position = end
match_string = match.group(1).strip()
if "|" in match_string:
link, anchor = match_string.rsplit("|", 1)
link = link.strip().split("#")[0]
anchor = anchor
anchor_stripped = anchor.strip()
else:
anchor = match_string
anchor_stripped = match_string.strip()
link = anchor_stripped
if len(anchor) > 0 and len(link) > 0:
anchor = clean_up_trie_source(anchor, lowercase=False)
lowercase_anchor = anchor.lower()
if acceptable_anchor(lowercase_anchor, anchor_trie):
anchor_idx = anchor_trie[lowercase_anchor]
dest_index = match_wikipedia_to_wikidata(link, wiki_trie, redirections, prefix)
if dest_index is not None:
all_options = trie_index2indices[anchor_idx]
if len(all_options) > 0:
if trie_index2indices_transitions is not None:
dest_index = transition_trie_index(
anchor_idx, dest_index,
trie_index2indices_transitions,
all_options
)
try:
new_dest_index = dest_index
keep = True
if keep:
out.append(
(
anchor,
(
current_position_no_brackets,
current_position_no_brackets + len(anchor),
collection.ids[new_dest_index]
)
)
)
current_position_no_brackets += len(anchor)
continue
except IndexError:
# missing element
pass
current_position_no_brackets += len(anchor)
out.append(
(anchor, None)
)
if current_position != len(doc):
out.append(
(doc[current_position:len(doc)], None)
)
return out
def convert(article_name,
doc,
collection,
wiki_trie,
anchor_trie,
trie_index2indices,
trie_index2indices_counts,
trie_index2indices_transitions,
redirections,
prefix):
doc = doc.replace("\t", " ")
# remove ref tags:
doc = re.sub(ref_pattern, " ", doc)
doc = re.sub(double_bracket_pattern, " ", doc)
doc = re.sub(title_pattern, r"\n\n\1\. ", doc)
doc = re.sub(bullet_point_pattern, r"\1 ", doc)
article_index = match_wikipedia_to_wikidata(
article_name, wiki_trie, redirections, prefix
)
# find location of tagged items in wikipedia:
annotated = annotate_document(doc,
collection,
wiki_trie,
anchor_trie,
trie_index2indices,
trie_index2indices_counts,
trie_index2indices_transitions,
redirections,
prefix)
text_without_brackets = "".join(text for text, _ in annotated)
sentences = ciseau.sent_tokenize(
text_without_brackets,
normalize_ascii=False,
keep_whitespace=True
)
return (
convert_document_to_labeled_tags(
annotated, sentences
),
collection.ids[article_index] if article_index is not None else "other"
)
def main():
args = parse_args()
config = load_config(
args.config,
["wiki", "language_path", "wikidata", "redirections"],
defaults={
"num_names_to_load": 0,
"prefix": None,
"sample_size": 100
},
relative_to=args.relative_to
)
prefix = config.prefix or induce_wikipedia_prefix(config.wiki)
collection = TypeCollection(
config.wikidata,
num_names_to_load=0
)
collection.load_blacklist(join(SCRIPT_DIR, "blacklist.json"))
trie_index2indices = OffsetArray.load(
join(config.language_path, "trie_index2indices"),
compress=True
)
trie_index2indices_counts = OffsetArray(
np.load(join(config.language_path, "trie_index2indices_counts.npy")),
trie_index2indices.offsets
)
if exists(join(config.language_path, "trie_index2indices_transition_values.npy")):
trie_index2indices_transitions = OffsetArray(
np.load(join(config.language_path, "trie_index2indices_transition_values.npy")),
np.load(join(config.language_path, "trie_index2indices_transition_offsets.npy")),
)
else:
trie_index2indices_transitions = None
anchor_trie = marisa_trie.Trie().load(join(config.language_path, "trie.marisa"))
wiki_trie = marisa_trie.RecordTrie('i').load(
join(config.wikidata, "wikititle2wikidata.marisa")
)
redirections = load_redirections(config.redirections)
seen = 0
with open(args.out, "wt") as fout:
try:
for article_name, article in iterate_articles(config.wiki):
fixed_article, article_qid = convert(
article_name,
article,
collection=collection,
anchor_trie=anchor_trie,
wiki_trie=wiki_trie,
trie_index2indices=trie_index2indices,
trie_index2indices_counts=trie_index2indices_counts,
trie_index2indices_transitions=trie_index2indices_transitions,
redirections=redirections,
prefix=prefix)
for paragraph in fixed_article:
for word, qids in paragraph:
if len(qids) > 0:
fout.write(word.rstrip() + "\t" + "\t".join(qids + [article_qid]) + "\n")
else:
fout.write(word.rstrip() + "\n")
fout.write("\n")
seen += 1
if seen >= config.sample_size:
break
finally:
fout.flush()
fout.close()
if __name__ == "__main__":
main()
|
"""
Perform a reduction on the anchors to articles relation
by finding different articles refering to the same item
and making the anchor point to the most common version,
or by using the wikidata graph to find instance of, and
other parent-child relations that allow one article to
encompass or be more generic than its co-triggerable
articles.
Usage:
------
```
DATA_DIR=data/wikidata
LANG_DIR=data/en_trie
FIXED_LANG_DIR=data/en_trie_fixed
python3 fast_link_fixer.py ${WIKIDATA_PATH} ${LANG_DIR} ${FIXED_LANG_DIR}
```
"""
import argparse
import time
import shutil
from os.path import join, realpath, dirname
from os import makedirs
import numpy as np
import marisa_trie
from wikidata_linker_utils.type_collection import get_name, TypeCollection
from wikidata_linker_utils.logic import logical_and, logical_ands, logical_not, logical_or, logical_ors
from wikidata_linker_utils.progressbar import get_progress_bar
from wikidata_linker_utils.offset_array import OffsetArray
from wikidata_linker_utils.file import true_exists
import wikidata_linker_utils.wikidata_properties as wprop
from wikidata_linker_utils.successor_mask import (
related_promote_highest, extend_relations, reduce_values,
remap_offset_array
)
SCRIPT_DIR = dirname(realpath(__file__))
from numpy import logical_not, logical_or, logical_and
from wikidata_linker_utils.logic import logical_ors
IS_HISTORY = None
IS_PEOPLE = None
IS_BREED = None
IS_PEOPLE_GROUP = None
IS_LIST_ARTICLE = None
IS_LANGUAGE_ALPHABET = None
IS_SPORTS_TEAM = None
IS_CARDINAL_DIRECTION = None
IS_POLITICAL_PARTY = None
IS_SOCIETY = None
IS_POSITION = None
IS_CHARACTER_HUMAN = None
IS_POLITICAL_ORGANIZATION = None
IS_LANDFORM = None
IS_THING = None
IS_BATTLE = None
IS_EVENT = None
IS_ACTIVITY = None
IS_THOROUGHFARE = None
IS_KINSHIP = None
IS_EPISODE_LIST = None
def wkp(c, name):
return c.article2id['enwiki/' + name][0][0]
def wkd(c, name):
return c.name2index[name]
def initialize_globals(c):
"""global variables that guide the metonymy/anaphora removal process."""
global IS_HISTORY
global IS_PEOPLE
global IS_PEOPLE_GROUP
global IS_LIST_ARTICLE
global IS_COUNTRY
global IS_BREED
global IS_EVENT_SPORT
global IS_LANGUAGE_ALPHABET
global IS_SPORTS_TEAM
global IS_CARDINAL_DIRECTION
global IS_ACTIVITY
global IS_POLITICAL_PARTY
global IS_SOCIETY
global IS_BATTLE
global IS_POSITION
global IS_LANDFORM
global IS_CHARACTER_HUMAN
global IS_POLITICAL_ORGANIZATION
global IS_THING
global IS_THOROUGHFARE
global IS_EVENT
global IS_KINSHIP
global IS_EPISODE_LIST
PEOPLE = wkd(c, "Q2472587")
NATIONALITY = wkd(c, "Q231002")
ASPECT_OF_HIST = wkd(c, "Q17524420")
HISTORY = wkd(c, "Q309")
LIST_ARTICLE = wkd(c, "Q13406463")
WAR = wkd(c, "Q198")
COUNTRY = wkd(c, "Q6256")
FORMER_COUNTRY = wkd(c, "Q3024240")
DOMINION = wkd(c, "Q223832")
LANGUAGE = wkd(c, "Q34770")
ALPHABET = wkd(c, "Q9779")
COLONY = wkd(c, "Q133156")
GOVERNORATE = wkd(c, "Q1798622")
SPORTS_TEAM = wkd(c, "Q12973014")
ATHLETIC_CONFERENCE = wkd(c, "Q2992826")
CARDINAL_DIRECTION = wkd(c, "Q23718")
POLITICAL_PARTY = wkd(c, "Q7278")
STATE = wkd(c, "Q7275")
DYNASTY = wkd(c, "Q164950")
SOCIETY = wkd(c, "Q8425")
MENS_SINGLES = wkd(c, "Q16893072")
SPORT = wkd(c, "Q349")
POSITION = wkd(c, "Q4164871")
HUMAN = wkd(c, "Q5")
FICTIONAL_CHARACTER = wkd(c, "Q95074")
BREED = wkd(c, "Q38829")
ORTHOGRAPHY = wkd(c, "Q43091")
POLITICAL_ORGANIZATION = wkd(c, "Q7210356")
GROUP_OF_HUMANS = wkd(c, "Q16334295")
LANDFORM = wkd(c, "Q271669")
BATTLE = wkd(c, "Q178561")
FOOD = wkd(c, "Q2095")
DRINK = wkd(c, "Q40050")
ANIMAL = wkd(c, "Q16521")
WORK = wkd(c, "Q386724")
AUTOMOBILE_MODEL = wkd(c, "Q3231690")
GOOD = wkd(c, "Q28877")
VEHICLE = wkd(c, "Q42889")
PUBLICATION = wkd(c, "Q732577")
AUDIOVISUAL = wkd(c, "Q2431196")
TERRITORIAL_ENTITY = wkd(c, "Q15642541")
GEOGRAPHIC_OBJECT = wkd(c, "Q618123")
ASTRO_OBJECT = wkd(c, "Q17444909")
EVENT_SPORTING = wkd(c, "Q1656682")
EVENT_OCCURRENCE = wkd(c, "Q1190554")
ELECTROMAGNETIC_SPECTRUM = wkd(c, "Q133139")
MAGICAL_ORG = wkd(c, "Q14946195")
AUTONOM_CHURCH = wkd(c, "Q20871948")
SIGN = wkd(c, "Q3695082")
FORM_OF_GOVERNMENT = wkd(c, "Q1307214")
SPORTS_ORG = wkd(c, "Q4438121")
RECURRING_SPORTING_EVENT = wkd(c, "Q18608583")
CLASS_SCHEME = wkd(c, "Q5962346")
STYLE = wkd(c, "Q1292119")
SIGN_SYSTEM = wkd(c, "Q7512598")
PHYSICAL_PHENOMENON = wkd(c, "Q1293220")
LAW = wkd(c, "Q7748")
WATERCOURSE = wkd(c, "Q355304")
BODY_OF_WATER = wkd(c, "Q15324")
CHEMICAL_SUBSTANCE = wkd(c, "Q79529")
HISTORICAL_PERIOD = wkd(c, "Q11514315")
ACTIVITY = wkd(c, "Q815962")
THOROUGHFARE = wkd(c, "Q83620")
KINSHIP = wkd(c, "Q171318")
FICTIONAL_HUMAN = wkd(c, "Q15632617")
EPISODE = wkd(c, "Q1983062")
IS_CHARACTER_HUMAN = c.satisfy(
[wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.IS_A_LIST_OF],
[HUMAN, FICTIONAL_HUMAN, FICTIONAL_CHARACTER]
)
# to be a history you must be an aspect of history
# but not a history itself:
IS_HISTORY = logical_and(
c.satisfy([wprop.INSTANCE_OF], [ASPECT_OF_HIST]),
logical_not(c.satisfy([wprop.INSTANCE_OF], [HISTORY]))
)
IS_PEOPLE = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [PEOPLE, NATIONALITY])
IS_PEOPLE_GROUP = np.logical_or(
IS_PEOPLE,
c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [GROUP_OF_HUMANS, MAGICAL_ORG, AUTONOM_CHURCH])
)
IS_LIST_ARTICLE = c.satisfy([wprop.INSTANCE_OF], [LIST_ARTICLE])
IS_LANGUAGE_ALPHABET = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF],
[LANGUAGE, ALPHABET, ORTHOGRAPHY, SIGN_SYSTEM]
)
IS_COUNTRY = c.satisfy([wprop.INSTANCE_OF], [COUNTRY, FORMER_COUNTRY, DOMINION, COLONY, STATE, DYNASTY, GOVERNORATE])
IS_SPORTS_TEAM = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.PART_OF], [SPORTS_TEAM, ATHLETIC_CONFERENCE, SPORTS_ORG, RECURRING_SPORTING_EVENT])
IS_CARDINAL_DIRECTION = c.satisfy([wprop.INSTANCE_OF], [CARDINAL_DIRECTION])
IS_POLITICAL_PARTY = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POLITICAL_PARTY])
IS_SOCIETY = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [SOCIETY, HISTORICAL_PERIOD])
IS_POSITION = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POSITION])
IS_BREED = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [BREED])
IS_POLITICAL_ORGANIZATION = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POLITICAL_ORGANIZATION, FORM_OF_GOVERNMENT])
IS_LANDFORM = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [LANDFORM, TERRITORIAL_ENTITY, GEOGRAPHIC_OBJECT, ASTRO_OBJECT, WATERCOURSE, BODY_OF_WATER])
IS_EVENT_SPORT = c.satisfy([wprop.SUBCLASS_OF, wprop.PART_OF, wprop.INSTANCE_OF], [EVENT_SPORTING, SPORT])
IS_THING = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF],
[
AUTOMOBILE_MODEL,
FOOD,
DRINK,
STYLE,
ANIMAL,
GOOD,
LAW,
CHEMICAL_SUBSTANCE,
SIGN,
VEHICLE,
PHYSICAL_PHENOMENON,
PUBLICATION,
AUDIOVISUAL,
CLASS_SCHEME,
WORK,
ELECTROMAGNETIC_SPECTRUM
]
)
IS_THOROUGHFARE = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [THOROUGHFARE])
IS_ACTIVITY = c.satisfy([wprop.INSTANCE_OF], [ACTIVITY])
IS_EVENT = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [EVENT_OCCURRENCE])
IS_BATTLE = c.satisfy([wprop.SUBCLASS_OF, wprop.INSTANCE_OF], [BATTLE])
IS_KINSHIP = c.satisfy([wprop.INSTANCE_OF], [KINSHIP])
IS_EPISODE_LIST = c.satisfy([wprop.IS_A_LIST_OF], [EPISODE])
def get_relation_data(collection, relation_paths):
"""Prepare relations for usage inside extend_relations."""
out = []
for path in relation_paths:
promote = path.get("promote", False)
numpy_path = []
for step in path["steps"]:
if isinstance(step, str):
step_name, max_usage = step, 1
else:
step_name, max_usage = step
relation = collection.relation(step_name)
numpy_path.append((relation.offsets, relation.values, max_usage))
inv_relation = collection.get_inverted_relation(step_name).edges() > 0
out.append((numpy_path, inv_relation, promote))
return out
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("wikidata")
parser.add_argument("language_path")
parser.add_argument("new_language_path")
parser.add_argument("--steps", type=int, default=3,
help="how many time should fixing be recursed (takes "
"about 2mn per step. Has diminishing returns).")
return parser.parse_args()
def get_trie_properties(trie, offsets, values):
"""Obtain the length of every trigger in the trie."""
anchor_length = np.zeros(len(values), dtype=np.int32)
start, end = 0, 0
for idx, key in enumerate(trie.iterkeys()):
end = offsets[idx]
anchor_length[start:end] = len(key)
start = end
return anchor_length
def fix(collection,
offsets,
values,
counts,
anchor_length,
num_category_link=8,
keep_min=5):
relations_that_can_extend = [
{"steps": [wprop.INSTANCE_OF]},
{"steps": [wprop.INSTANCE_OF, (wprop.SUBCLASS_OF, 2)]},
{"steps": [wprop.INSTANCE_OF, wprop.FACET_OF]},
{"steps": [(wprop.SUBCLASS_OF, 3)]},
{"steps": [wprop.OCCUPATION], "promote": True},
{"steps": [wprop.POSITION_HELD], "promote": True},
{"steps": [wprop.PART_OF, wprop.INSTANCE_OF]},
{"steps": [wprop.SERIES, wprop.INSTANCE_OF]},
{"steps": [wprop.SERIES, wprop.LOCATION]},
{"steps": [wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY]},
{"steps": [wprop.COUNTRY]},
{"steps": [wprop.CATEGORY_LINK, wprop.CATEGORYS_MAIN_TOPIC]},
{"steps": [(wprop.CATEGORY_LINK, num_category_link), wprop.FIXED_POINTS]},
{"steps": [wprop.CATEGORY_LINK, wprop.FIXED_POINTS, wprop.IS_A_LIST_OF]},
{"steps": [wprop.IS_A_LIST_OF, (wprop.SUBCLASS_OF, 2)]}
]
relation_data = get_relation_data(collection, relations_that_can_extend)
new_values = values
# get rid of History of BLAH where link also points to BLAH:
is_history = IS_HISTORY[new_values]
is_people_mask = IS_PEOPLE[new_values]
is_list = IS_LIST_ARTICLE[new_values]
new_values = related_promote_highest(
new_values,
offsets,
counts,
condition=is_history,
alternative=is_people_mask,
keep_min=keep_min
)
unchanged = values == new_values
is_not_history_or_list = logical_and(
logical_not(is_history), logical_not(is_list)
)
new_values = related_promote_highest(
new_values,
offsets,
counts,
condition=logical_and(is_history, unchanged),
alternative=is_not_history_or_list,
keep_min=keep_min
)
is_sport_or_thoroughfare = logical_or(
IS_EVENT_SPORT, IS_THOROUGHFARE
)[new_values]
# delete these references:
new_values[anchor_length < 2] = -1
# get rid of shorthand for sports:
new_values[logical_and(is_sport_or_thoroughfare, anchor_length <= 2)] = -1
# remove lists of episodes:
is_episode_list = IS_EPISODE_LIST[new_values]
new_values[is_episode_list] = -1
# get rid of "car" -> "Renault Megane", when "car" -> "Car",
# and "Renault Megane" is instance of "Car":
is_not_people = logical_not(IS_PEOPLE)[new_values]
new_values = extend_relations(
relation_data,
new_values,
offsets,
counts,
alternative=is_not_people,
pbar=get_progress_bar("extend_relations", max_value=len(offsets), item="links"),
keep_min=keep_min
)
unchanged = values == new_values
# remove all non-modified values that are
# not instances of anything, nor subclasses of anything:
new_values[logical_ands(
[
logical_ands([
collection.relation(wprop.INSTANCE_OF).edges() == 0,
collection.relation(wprop.SUBCLASS_OF).edges() == 0,
collection.relation(wprop.PART_OF).edges() == 0,
collection.relation(wprop.CATEGORY_LINK).edges() == 0
])[new_values],
unchanged
])] = -1
is_kinship = IS_KINSHIP[new_values]
is_human = IS_CHARACTER_HUMAN[new_values]
new_values = related_promote_highest(
new_values,
offsets,
counts,
condition=is_human,
alternative=is_kinship,
keep_min=keep_min
)
# replace elements by a country
# if a better alternative is present,
# counts is less than 100:
should_replace_by_country = logical_ands(
[
logical_not(
logical_ors([
IS_POLITICAL_ORGANIZATION,
IS_CARDINAL_DIRECTION,
IS_LANGUAGE_ALPHABET,
IS_COUNTRY,
IS_PEOPLE_GROUP,
IS_BREED,
IS_BATTLE,
IS_SOCIETY,
IS_POSITION,
IS_POLITICAL_PARTY,
IS_SPORTS_TEAM,
IS_CHARACTER_HUMAN,
IS_LANDFORM,
IS_ACTIVITY
])
)[new_values],
counts < 100
]
)
# turn this into a promote highest in this order:
is_country_or_cardinal = [
IS_CARDINAL_DIRECTION,
IS_COUNTRY,
IS_POLITICAL_ORGANIZATION
]
for i, alternative in enumerate(is_country_or_cardinal):
unchanged = values == new_values
should_replace_by_country = logical_and(
should_replace_by_country, unchanged
)
new_values = related_promote_highest(
new_values,
offsets,
counts,
condition=should_replace_by_country,
alternative=alternative[new_values],
keep_min=keep_min
)
new_offsets, new_values, new_counts, location_shift = reduce_values(
offsets, new_values, counts)
return (new_offsets, new_values, new_counts), location_shift
def filter_trie(trie, values):
return marisa_trie.Trie((trie.restore_key(value) for value in values))
def remap_trie_offset_array(old_trie, new_trie, offsets_values_counts):
mapping = np.zeros(len(new_trie), dtype=np.int32)
t0 = time.time()
for new_index in range(len(new_trie)):
mapping[new_index] = old_trie[new_trie.restore_key(new_index)]
t1 = time.time()
print("Got mapping from old trie to new trie in %.3fs" % (t1 - t0,))
ported = []
for offsets, values, counts in offsets_values_counts:
new_offsets, new_values, new_counts = remap_offset_array(
mapping, offsets, values, counts
)
ported.append((new_offsets, new_values, new_counts))
t2 = time.time()
print("Ported counts and values across tries in %.3fs" % (t2 - t1,))
return ported
def main():
args = parse_args()
if args.new_language_path == args.language_path:
raise ValueError("new_language_path and language_path must be "
"different: cannot generate a fixed trie in "
"the same directory as the original trie.")
c = TypeCollection(args.wikidata, num_names_to_load=0)
c.load_blacklist(join(SCRIPT_DIR, "blacklist.json"))
original_values = np.load(
join(args.language_path, "trie_index2indices_values.npy"))
original_offsets = np.load(
join(args.language_path, "trie_index2indices_offsets.npy"))
original_counts = np.load(
join(args.language_path, "trie_index2indices_counts.npy"))
original_trie_path = join(args.language_path, 'trie.marisa')
trie = marisa_trie.Trie().load(original_trie_path)
initialize_globals(c)
t0 = time.time()
old_location_shift = None
values, offsets, counts = original_values, original_offsets, original_counts
for step in range(args.steps):
anchor_length = get_trie_properties(trie, offsets, values)
(offsets, values, counts), location_shift = fix(
collection=c,
offsets=offsets,
values=values,
counts=counts,
anchor_length=anchor_length,
num_category_link=8
)
if old_location_shift is not None:
# see where newly shifted values are now pointing
# to (extra indirection level):
location_shift = location_shift[old_location_shift]
location_shift[old_location_shift == -1] = -1
old_location_shift = location_shift
pre_reduced_values = values[location_shift]
pre_reduced_values[location_shift == -1] = -1
num_changes = int((pre_reduced_values != original_values).sum())
change_volume = int((original_counts[pre_reduced_values != original_values].sum()))
print("step %d with %d changes, %d total links" % (
step, num_changes, change_volume)
)
pre_reduced_values = values[location_shift]
pre_reduced_values[location_shift == -1] = -1
t1 = time.time()
num_changes = int((pre_reduced_values != original_values).sum())
print("Done with link fixing in %.3fs, with %d changes." % (
t1 - t0, num_changes)
)
# show some remappings:
np.random.seed(1234)
num_samples = 10
samples = np.random.choice(
np.where(
np.logical_and(
np.logical_and(
pre_reduced_values != original_values,
pre_reduced_values != -1
),
original_values != -1
)
)[0],
size=num_samples,
replace=False
)
print("Sample fixes:")
for index in samples:
print(" %r (%d) -> %r (%d)" % (
c.get_name(int(original_values[index])),
int(original_values[index]),
c.get_name(int(pre_reduced_values[index])),
int(pre_reduced_values[index])
)
)
print("")
samples = np.random.choice(
np.where(
OffsetArray(values, offsets).edges() == 0
)[0],
size=num_samples,
replace=False
)
print("Sample deletions:")
for index in samples:
print(" %r" % (trie.restore_key(int(index))))
# prune out anchors where there are no more linked items:
print("Removing empty anchors from trie...")
t0 = time.time()
non_empty_offsets = np.where(
OffsetArray(values, offsets).edges() != 0
)[0]
fixed_trie = filter_trie(trie, non_empty_offsets)
contexts_found = true_exists(
join(args.language_path, "trie_index2contexts_values.npy")
)
if contexts_found:
contexts_values = np.load(
join(args.language_path, "trie_index2contexts_values.npy"))
contexts_offsets = np.load(
join(args.language_path, "trie_index2contexts_offsets.npy"))
contexts_counts = np.load(
join(args.language_path, "trie_index2contexts_counts.npy"))
to_port = [
(offsets, values, counts),
(original_offsets, pre_reduced_values, original_values)
]
if contexts_found:
to_port.append(
(contexts_offsets, contexts_values, contexts_counts)
)
ported = remap_trie_offset_array(trie, fixed_trie, to_port)
offsets, values, counts = ported[0]
original_offsets, pre_reduced_values, original_values = ported[1]
t1 = time.time()
print("Removed %d empty anchors from trie in %.3fs" % (
len(trie) - len(fixed_trie), t1 - t0,)
)
print("Saving...")
makedirs(args.new_language_path, exist_ok=True)
np.save(join(args.new_language_path, "trie_index2indices_values.npy"),
values)
np.save(join(args.new_language_path, "trie_index2indices_offsets.npy"),
offsets)
np.save(join(args.new_language_path, "trie_index2indices_counts.npy"),
counts)
if contexts_found:
contexts_offsets, contexts_values, contexts_counts = ported[2]
np.save(join(args.new_language_path, "trie_index2contexts_values.npy"),
contexts_values)
np.save(join(args.new_language_path, "trie_index2contexts_offsets.npy"),
contexts_offsets)
np.save(join(args.new_language_path, "trie_index2contexts_counts.npy"),
contexts_counts)
new_trie_path = join(args.new_language_path, 'trie.marisa')
fixed_trie.save(new_trie_path)
transition = np.vstack([original_values, pre_reduced_values]).T
np.save(join(args.new_language_path, "trie_index2indices_transition_values.npy"),
transition)
np.save(join(args.new_language_path, "trie_index2indices_transition_offsets.npy"),
original_offsets)
print("Done.")
if __name__ == "__main__":
main()
|
"""
Obtain a coarse-grained classification of places and entities according to their associated
continent/country.
"""
from numpy import (
logical_and, logical_or, logical_not, logical_xor, where
)
from wikidata_linker_utils.logic import logical_negate
import wikidata_linker_utils.wikidata_properties as wprop
def wkp(c, name):
"""Convert a string wikipedia article name to its Wikidata index."""
return c.article2id["enwiki/" + name][0][0]
def wkd(c, name):
"""Convert a wikidata QID to its wikidata index."""
return c.name2index[name]
def classify(c):
EUROPE = wkp(c, 'Europe')
AFRICA = wkp(c, 'Africa')
ASIA = wkp(c, 'Asia')
NORTH_AMERICA = wkp(c, 'North America')
SOUTH_AMERICA = wkp(c, 'South America')
OCEANIA = wkp(c, 'Oceania')
ANTARCTICA = wkp(c, 'Antarctica')
CONTINENT = wkp(c, wprop.CONTINENT)
OUTERSPACE = wkp(c, 'Astronomical object')
EARTH = wkp(c, "Earth")
GEOGRAPHIC_LOCATION = wkd(c, "Q2221906")
POPULATED_PLACE = wkd(c, 'Q486972')
MIDDLE_EAST = [
wkp(c, "Bahrain"),
wkp(c, "Cyprus"),
wkp(c, "Turkish"),
wkp(c, "Egypt"),
wkp(c, "Iran"),
wkp(c, "Iraq"),
wkp(c, "Kurdish"),
wkp(c, "Israel"),
wkp(c, "Arabic"),
wkp(c, "Jordan"),
wkp(c, "Kuwait"),
wkp(c, "Lebanon"),
wkp(c, "Oman"),
wkp(c, "Palestine"),
wkp(c, "Jordanian"),
wkp(c, "Qatar"),
wkp(c, "Saudi Arabia"),
wkp(c, "Syria"),
wkp(c, "Turkey"),
wkp(c, "United Arab Emirates"),
wkp(c, "Yemen")
]
TRAVERSIBLE = [
wprop.INSTANCE_OF,
wprop.SUBCLASS_OF,
wprop.CONTINENT,
wprop.PART_OF,
wprop.COUNTRY_OF_CITIZENSHIP,
wprop.COUNTRY,
wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY
]
# c.describe_connection("Q55", "North America", TRAVERSIBLE)
# return {}
print("is_in_middle_east")
is_in_middle_east = c.satisfy(TRAVERSIBLE, MIDDLE_EAST)
print("is_in_europe")
is_in_europe = c.satisfy(TRAVERSIBLE, [EUROPE])
is_in_europe_only = logical_negate(is_in_europe, [is_in_middle_east])
print("is_in_asia")
is_in_asia = c.satisfy(TRAVERSIBLE, [ASIA])
is_in_asia_only = logical_negate(is_in_asia, [is_in_europe, is_in_middle_east])
print("is_in_africa")
is_in_africa = c.satisfy(TRAVERSIBLE, [AFRICA])
is_in_africa_only = logical_negate(is_in_africa, [is_in_europe, is_in_asia, is_in_middle_east])
print("is_in_north_america")
is_in_north_america = c.satisfy(TRAVERSIBLE, [NORTH_AMERICA])
is_in_north_america_only = logical_negate(is_in_north_america, [is_in_europe, is_in_asia, is_in_middle_east])
print("is_in_south_america")
is_in_south_america = c.satisfy(TRAVERSIBLE, [SOUTH_AMERICA])
print("is_in_antarctica")
is_in_antarctica = c.satisfy(TRAVERSIBLE, [ANTARCTICA])
is_in_antarctica_only = logical_negate(is_in_antarctica, [is_in_europe, is_in_north_america, is_in_asia, is_in_middle_east])
print("is_in_oceania")
is_in_oceania = c.satisfy(TRAVERSIBLE, [OCEANIA])
is_in_oceania_only = logical_negate(is_in_oceania, [is_in_europe, is_in_north_america, is_in_asia, is_in_middle_east])
print("is_in_outer_space")
is_in_outer_space = c.satisfy(TRAVERSIBLE, [OUTERSPACE])
print("part_of_earth")
part_of_earth = c.satisfy(
[wprop.INSTANCE_OF, wprop.PART_OF, wprop.CONTINENT, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.SUBCLASS_OF],
[GEOGRAPHIC_LOCATION, EARTH]
)
print("is_in_outer_space_not_earth")
is_in_outer_space_not_earth = logical_negate(
is_in_outer_space, [part_of_earth]
)
print("is_a_populated_place")
is_populated_place = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POPULATED_PLACE])
is_unlocalized_populated_place = logical_negate( is_populated_place, [is_in_europe, is_in_asia, is_in_antarctica, is_in_oceania, is_in_outer_space, is_in_south_america, is_in_north_america])
return {
"europe": is_in_europe_only,
"asia": is_in_asia_only,
"africa": is_in_africa_only,
"middle_east": is_in_middle_east,
"north_america": is_in_north_america_only,
"south_america": is_in_south_america,
"antarctica": is_in_antarctica_only,
"oceania": is_in_oceania_only,
"outer_space": is_in_outer_space_not_earth,
# "populated_space": is_populated_place,
"populated_place_unlocalized": is_unlocalized_populated_place
}
|
"""
Obtain a finer-grained classification of places and entities according to their associated
country/region.
"""
from numpy import (
logical_and, logical_or, logical_not, logical_xor, where
)
from wikidata_linker_utils.logic import logical_negate, logical_ors
import wikidata_linker_utils.wikidata_properties as wprop
def wkp(c, name):
"""Convert a string wikipedia article name to its Wikidata index."""
return c.article2id["enwiki/" + name][0][0]
def wkd(c, name):
"""Convert a wikidata QID to its wikidata index."""
return c.name2index[name]
def classify(c):
TRAVERSIBLE_BASIC = [wprop.INSTANCE_OF, wprop.SUBCLASS_OF]
TRAVERSIBLE_COUNTRY = [
wprop.INSTANCE_OF,
wprop.SUBCLASS_OF,
wprop.COUNTRY_OF_CITIZENSHIP,
wprop.COUNTRY,
wprop.LOCATION,
wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY
]
TRAVERSIBLE_PART_OF = [
wprop.INSTANCE_OF,
wprop.SUBCLASS_OF,
wprop.CONTINENT,
wprop.PART_OF,
wprop.COUNTRY_OF_CITIZENSHIP,
wprop.COUNTRY,
wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY
]
TRAVERSIBLE_TOPIC = [
wprop.INSTANCE_OF, wprop.SUBCLASS_OF,
wprop.STUDIES, wprop.FIELD_OF_THIS_OCCUPATION, wprop.OCCUPATION,
wprop.FIELD_OF_WORK, wprop.INDUSTRY]
ASSOCIATION_FOOTBALL_PLAYER = wkd(c,"Q937857")
PAINTER = wkd(c,"Q1028181")
POLITICIAN = wkd(c,"Q82955")
ARTICLE = wkd(c,"Q191067")
VIDEO_GAME = wkd(c,"Q7889")
FILM = wkd(c,"Q11424")
FICTIONAL_CHARACTER = wkd(c,"Q95074")
POEM = wkd(c,"Q482")
BOOK = wkd(c,"Q571")
DISEASE = wkd(c,"Q12136")
PAINTING = wkd(c,"Q3305213")
VISUAL_ART_WORK = wkd(c,"Q4502142")
MUSIC_WORK = wkd(c,"Q2188189")
SCIENTIFIC_ARTICLE = wkd(c,"Q13442814")
PROTEIN_FAMILY = wkd(c,"Q417841")
PROTEIN_COMPLEX = wkd(c,"Q420927")
GENE = wkd(c,"Q7187")
CHEMICAL_SUBSTANCE = wkd(c,"Q79529")
PROTEIN = wkd(c,"Q8054")
TAXON = wkd(c,"Q16521")
PHYSICAL_OBJECT = wkd(c,"Q223557")
OUTERSPACE = wkp(c, 'Astronomical object')
#INTERNATIONAL_ORGANISATION = wkd(c,"")
HUMAN = wkp(c,"Human")
HUMAN_SETTLMENT = wkd(c,"Q486972")
DICTIONARY = wkd(c,"Q23622")
ABRREVIATION = wkd(c,"Q102786")
POPULATED_PLACE = wkd(c,"Q486972")
TERRITORIAL_ENTITY = wkd(c, "Q1496967")
DESA = wkd(c,"Q26211545")
TOWN_IN_CHINA = wkd(c,"Q735428")
ADMIN_DIVISION_CHINA = wkd(c,"Q50231")
COUNTRY = wkd(c,"Q6256")
MOUNTAIN_RANGE = wkd(c,"Q46831")
EARTH = wkp(c, "Earth")
GEOGRAPHIC_LOCATION = wkd(c, "Q2221906")
is_politician = c.satisfy([wprop.OCCUPATION], [POLITICIAN])
is_painter = c.satisfy([wprop.OCCUPATION], [PAINTER])
is_association_football_player = c.satisfy([wprop.OCCUPATION],[ASSOCIATION_FOOTBALL_PLAYER])
is_populated_place = c.satisfy(
[wprop.INSTANCE_OF, wprop.PART_OF, wprop.CONTINENT, wprop.COUNTRY_OF_CITIZENSHIP,
wprop.COUNTRY, wprop.SUBCLASS_OF],
[GEOGRAPHIC_LOCATION, EARTH, HUMAN_SETTLMENT])
is_taxon = c.satisfy(
[wprop.INSTANCE_OF, wprop.PART_OF, wprop.SUBCLASS_OF],
[TAXON])
is_other_wkd= c.satisfy(
[wprop.INSTANCE_OF, wprop.PART_OF, wprop.SUBCLASS_OF],
[GENE, CHEMICAL_SUBSTANCE, SCIENTIFIC_ARTICLE,
PROTEIN, DISEASE, PROTEIN_FAMILY,PROTEIN_COMPLEX,
BOOK, MUSIC_WORK, PAINTING, VISUAL_ART_WORK, POEM, FILM,
FICTIONAL_CHARACTER,VIDEO_GAME,SCIENTIFIC_ARTICLE,ARTICLE])
is_gene_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Genes")], max_steps=5)
is_chromosome_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Chromosomes")], max_steps=5)
is_protein_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Proteins")], max_steps=5)
is_other= logical_ors([is_other_wkd, is_gene_wkp, is_chromosome_wkp,
is_protein_wkp ])
print("WIKI Links")
WIKIPEDIA_DISAMBIGUATION_PAGE = wkd(c,"Q4167410")
SCIENTIFIC_JOURNAL = wkd(c,"Q5633421")
SURNAME = wkd(c,"Q101352")
WIKI_NEWS_ARTICLE = wkd(c,"Q17633526")
WIKIMEDIA_CATEGORY = wkd(c,"Q4167836")
WIKIPEDIA_TEMPLATE_NAMESPACE = wkd(c,"Q11266439")
WIKIPEDIA_LIST = wkd(c,"Q13406463")
ENCYCLOPEDIA_ARTICLE = wkd(c,"Q17329259")
WIKIMEDIA_PROJECT_PAGE = wkd(c,"Q14204246")
RURAL_COMUNE_VIETNAM = wkd(c,"Q2389082")
TERRITORIAL_ENTITY = wkd(c,"Q1496967")
is_Wiki_Links = c.satisfy(TRAVERSIBLE_TOPIC,
[WIKIPEDIA_DISAMBIGUATION_PAGE,
SURNAME,
WIKIMEDIA_CATEGORY,
WIKIPEDIA_TEMPLATE_NAMESPACE,
WIKIPEDIA_LIST,
ENCYCLOPEDIA_ARTICLE,
WIKIMEDIA_PROJECT_PAGE,
WIKI_NEWS_ARTICLE
])
print("is_in_outer_space")
is_in_outer_space = c.satisfy(TRAVERSIBLE_PART_OF, [OUTERSPACE])
print("part_of_earth")
part_of_earth = c.satisfy(
[wprop.INSTANCE_OF, wprop.PART_OF, wprop.CONTINENT, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.SUBCLASS_OF, wprop.LOCATION],
[GEOGRAPHIC_LOCATION, EARTH])
print("is_in_outer_space_not_earth")
is_in_outer_space_not_earth = logical_negate(
is_in_outer_space, [part_of_earth])
print("African countries")
ALGERIA = wkp(c,"Algeria")
ANGOLA = wkp(c,"Angola")
BENIN = wkp(c,"Benin")
BOTSWANA = wkd(c,"Q963")
BURKINA_FASO = wkd(c,"Q965")
BURUNDI = wkd(c,"Q967")
CAMEROON = wkd(c,"Q1009")
CAPE_VERDE = wkd(c,"Q1011")
CHAD = wkd(c,"Q657")
CENTRAL_AFRICAN_REPUBLIC = wkd(c,"Q929")
COMOROS = wkd(c,"Q970")
DEMOCRATIC_REPUBLIC_OF_CONGO = wkd(c,"Q974")
REPUBLIC_OF_CONGO = wkd(c,"Q971")
DJIBOUTI = wkd(c,"Q977")
EGYPT = wkd(c,"Q79")
RASHIDUN_CALIPHATE = wkd(c,"Q12490507")
EQUATORIAL_GUINEA = wkd(c,"Q983")
ERITREA = wkd(c,"Q986")
ETHIOPIA = wkd(c,"Q115")
GABON = wkd(c,"Q1000")
THE_GAMBIA = wkd(c,"Q1005")
GHANA = wkd(c,"Q117")
GUINEA = wkd(c,"Q1006")
GUINEA_BISSAU = wkd(c,"Q1007")
IVORY_COAST = wkd(c,"Q1008")
KENYA = wkd(c,"Q114")
LESOTHO = wkd(c,"Q1013")
LIBERIA = wkd(c,"Q1014")
LIBYA = wkd(c,"Q1016")
MADAGASCAR = wkd(c,"Q1019")
MALAWI = wkd(c,"Q1020")
MALI = wkd(c,"Q912")
MAURITANIA = wkd(c,"Q1025")
MAURITIUS = wkd(c,"Q1027")
MOROCCO = wkd(c,"Q1028")
MOZAMBIQUE = wkd(c,"Q1029")
NAMIBIA = wkd(c,"Q1030")
NIGER = wkd(c,"Q1032")
NIGERIA = wkd(c,"Q1033")
RWANDA = wkd(c,"Q1037")
SAHARI_ARAB_DEOMOCRATIC_REPUBLIC = wkd(c,"Q40362")
SAO_TOME_AND_PRINCIPE= wkd(c,"Q1039")
SENEGAL = wkd(c,"Q1041")
SEYCHELLES = wkd(c,"Q1042")
SIERRA_LEONE = wkd(c,"Q1044")
SOMALIA = wkd(c,"Q1045")
SOUTH_AFRICA = wkd(c,"Q258")
SOUTHSUDAN = wkd(c,"Q958")
SUDAN = wkd(c,"Q1049")
SWAZILAND= wkd(c,"Q1050")
TANZANIA = wkd(c,"Q924")
TOGO = wkd(c,"Q945")
TUNISIA= wkd(c,"Q948")
UGANDA = wkd(c,"Q1036")
WESTERN_SAHARA = wkd(c,"Q6250")
ZAMBIA = wkd(c,"Q953")
ZIMBABWE = wkd(c,"Q954")
SOMALI_LAND = wkd(c,"Q34754")
in_algeria_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ALGERIA])
in_algeria_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Algeria stubs")], max_steps=4)
in_algeria_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Algeria")], max_steps=3)
in_algeria_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in Algeria")], max_steps=3)
in_algeria = logical_ors([in_algeria_wkd, in_algeria_stubs, in_algeria_politics, in_algeria_roads])
in_angola_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ANGOLA])
in_angola_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Angola stubs")], max_steps=4)
in_angola_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Angola")], max_steps=3)
in_angola_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in Angola")], max_steps=3)
in_angola = logical_ors([in_angola_wkd , in_angola_stubs, in_angola_politics, in_angola_roads])
in_benin_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BENIN])
in_benin_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Benin stubs")], max_steps=4)
in_benin_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Benin")], max_steps=3)
in_benin_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in Benin")], max_steps=3)
in_benin = logical_ors([in_benin_wkd, in_benin_stubs, in_benin_politics, in_benin_roads])
in_botswana_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BOTSWANA])
in_botswana_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Botswana stubs")], max_steps=4)
in_botswana_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Botswana")], max_steps=3)
in_botswana_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in Botswana")], max_steps=3)
in_botswana = logical_ors([in_botswana_wkd, in_botswana_stubs, in_botswana_politics,in_botswana_roads])
in_burkina_faso_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BURKINA_FASO])
in_bburkina_faso_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Burkina Faso stubs")], max_steps=4)
in_bburkina_faso_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Botswana")], max_steps=3)
in_burkina_faso = logical_ors([in_burkina_faso_wkd , in_botswana_stubs, in_botswana_politics])
in_burundi_politics_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Burkina Faso")], max_steps=4)
in_burundi_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BURUNDI])
in_burundi = logical_ors([in_burundi_wkd,in_burundi_politics_wkp])
in_cameroon = c.satisfy(TRAVERSIBLE_COUNTRY, [CAMEROON])
in_cape_verde= c.satisfy(TRAVERSIBLE_COUNTRY, [CAPE_VERDE])
in_chad = c.satisfy(TRAVERSIBLE_COUNTRY, [CHAD])
in_central_african_republic = c.satisfy(TRAVERSIBLE_COUNTRY, [CENTRAL_AFRICAN_REPUBLIC])
in_comoros = c.satisfy(TRAVERSIBLE_COUNTRY, [COMOROS])
in_democratic_republic_congo = c.satisfy(TRAVERSIBLE_COUNTRY, [DEMOCRATIC_REPUBLIC_OF_CONGO])
in_republic_of_congo = c.satisfy(TRAVERSIBLE_COUNTRY, [REPUBLIC_OF_CONGO])
in_djibouti = c.satisfy(TRAVERSIBLE_COUNTRY, [DJIBOUTI])
in_egypt_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [EGYPT])
in_ancient_egypt = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Ancient Egypt")], max_steps=6)
in_Rashidun_Caliphate = c.satisfy(TRAVERSIBLE_COUNTRY, [RASHIDUN_CALIPHATE])
egyptian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Egyptian people")], max_steps=6)
in_egypt = logical_ors([in_egypt_wkd, in_egypt_wkd,in_Rashidun_Caliphate, egyptian_people])
in_equatorial_guinea = c.satisfy(TRAVERSIBLE_COUNTRY, [EQUATORIAL_GUINEA])
in_eritrea = c.satisfy(TRAVERSIBLE_COUNTRY, [ERITREA])
in_ethiopia = c.satisfy(TRAVERSIBLE_COUNTRY, [ETHIOPIA])
in_gabon = c.satisfy(TRAVERSIBLE_COUNTRY, [GABON])
in_the_gambia = c.satisfy(TRAVERSIBLE_COUNTRY, [THE_GAMBIA])
in_ghana = c.satisfy(TRAVERSIBLE_COUNTRY, [GHANA])
in_guinea = c.satisfy(TRAVERSIBLE_COUNTRY, [GUINEA])
in_guinea_bissau = c.satisfy(TRAVERSIBLE_COUNTRY, [GUINEA_BISSAU])
in_ivory_coast = c.satisfy(TRAVERSIBLE_COUNTRY, [IVORY_COAST])
in_lesotho = c.satisfy(TRAVERSIBLE_COUNTRY, [LESOTHO])
in_kenya = c.satisfy(TRAVERSIBLE_COUNTRY, [KENYA])
in_liberia = c.satisfy(TRAVERSIBLE_COUNTRY, [LIBERIA])
in_libya = c.satisfy(TRAVERSIBLE_COUNTRY, [LIBYA])
in_madagascar = c.satisfy(TRAVERSIBLE_COUNTRY, [MADAGASCAR])
in_malawi = c.satisfy(TRAVERSIBLE_COUNTRY, [MALAWI])
in_mali = c.satisfy(TRAVERSIBLE_COUNTRY, [MALI])
in_mauritania = c.satisfy(TRAVERSIBLE_COUNTRY, [MAURITANIA])
in_mauritius = c.satisfy(TRAVERSIBLE_COUNTRY, [MAURITIUS])
in_morrocco = c.satisfy(TRAVERSIBLE_COUNTRY, [MOROCCO])
in_mozambique = c.satisfy(TRAVERSIBLE_COUNTRY, [MOZAMBIQUE])
in_namibia = c.satisfy(TRAVERSIBLE_COUNTRY, [NAMIBIA])
in_niger = c.satisfy(TRAVERSIBLE_COUNTRY, [NIGER])
in_nigeria = c.satisfy(TRAVERSIBLE_COUNTRY, [NIGERIA])
in_rwanda = c.satisfy(TRAVERSIBLE_COUNTRY, [RWANDA])
in_sadr = c.satisfy(TRAVERSIBLE_COUNTRY, [SAHARI_ARAB_DEOMOCRATIC_REPUBLIC])
in_stap = c.satisfy(TRAVERSIBLE_COUNTRY, [SAO_TOME_AND_PRINCIPE])
in_senegal = c.satisfy(TRAVERSIBLE_COUNTRY, [SENEGAL])
in_seychelles = c.satisfy(TRAVERSIBLE_COUNTRY, [SEYCHELLES])
in_sierra_leone = c.satisfy(TRAVERSIBLE_COUNTRY, [SIERRA_LEONE])
in_somalia = c.satisfy(TRAVERSIBLE_COUNTRY, [SOMALIA])
in_somali_land = c.satisfy(TRAVERSIBLE_COUNTRY, [SOMALI_LAND])
in_south_africa = c.satisfy(TRAVERSIBLE_COUNTRY, [SOUTH_AFRICA])
in_ssudan= c.satisfy(TRAVERSIBLE_COUNTRY, [SOUTHSUDAN])
in_sudan= c.satisfy(TRAVERSIBLE_COUNTRY, [SUDAN])
in_swaziland= c.satisfy(TRAVERSIBLE_COUNTRY, [SWAZILAND])
in_tanzania_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Sports competitions in Tanzania")], max_steps=4)
in_tanzania_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [TANZANIA])
in_tanzania = logical_ors([in_tanzania_wkp,in_tanzania_wkd])
in_togo = c.satisfy(TRAVERSIBLE_COUNTRY, [TOGO])
in_tunisia = c.satisfy(TRAVERSIBLE_COUNTRY, [TUNISIA])
in_uganda = c.satisfy(TRAVERSIBLE_COUNTRY, [UGANDA])
in_western_sahara = c.satisfy(TRAVERSIBLE_COUNTRY, [WESTERN_SAHARA])
in_zambia_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ZAMBIA])
zambian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Zambian people")], max_steps=4)
in_zambia = logical_ors([in_zambia_wkd, zambian_people])
in_zimbabwe = c.satisfy(TRAVERSIBLE_COUNTRY, [ZIMBABWE])
in_africa = logical_ors([
in_botswana,
in_burkina_faso,
in_burundi,
in_cameroon,
in_cape_verde,
in_chad,
in_central_african_republic,
in_comoros,
in_democratic_republic_congo,
in_republic_of_congo,
in_djibouti,
in_egypt,
in_equatorial_guinea,
in_eritrea,
in_ethiopia,
in_gabon,
in_the_gambia,
in_ghana,
in_guinea,
in_guinea_bissau,
in_ivory_coast,
in_lesotho,
in_kenya,
in_liberia,
in_libya,
in_madagascar,
in_malawi
])
print("Oceanian countries")
AUSTRALIA = wkd(c,"Q408")
FIJI = wkd(c,"Q712")
INDONESIA = wkd(c,"Q252")
KIRIBATI= wkd(c,"Q710")
MARSHALL_ISLANDS= wkd(c,"Q709")
FEDERATED_STATES_OF_MICRONESIA= wkd(c,"Q702")
NAURU= wkd(c,"Q697")
PALAU= wkd(c,"Q695")
PAPUA_NEW_GUINEA= wkd(c,"Q691")
SAMOA = wkd(c,"Q683")
SOLOMON_ISLANDS= wkd(c,"Q685")
VANUATU = wkd(c,"Q686")
NEW_ZEALAND = wkd(c,"Q664")
in_australia_athletes = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Australian sportspeople")], max_steps=5)
in_australia_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [AUSTRALIA])
in_australia = logical_ors([in_australia_wkd, in_australia_athletes])
in_fiji = c.satisfy(TRAVERSIBLE_COUNTRY, [FIJI])
in_indonesia = c.satisfy(TRAVERSIBLE_COUNTRY, [INDONESIA])
in_kiribati = c.satisfy(TRAVERSIBLE_COUNTRY, [KIRIBATI])
in_marshall_islands = c.satisfy(TRAVERSIBLE_COUNTRY, [MARSHALL_ISLANDS])
in_federates_states_of_micronesia = c.satisfy(TRAVERSIBLE_COUNTRY, [FEDERATED_STATES_OF_MICRONESIA])
in_nauru = c.satisfy(TRAVERSIBLE_COUNTRY, [NAURU])
in_palau = c.satisfy(TRAVERSIBLE_COUNTRY, [PALAU])
in_papua_new_guinea = c.satisfy(TRAVERSIBLE_COUNTRY, [PAPUA_NEW_GUINEA])
in_samoa_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Samoa")], max_steps=5)
in_samoa_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SAMOA])
in_samoa = logical_ors([in_samoa_wkd, in_samoa_wkp])
in_solomon_islands = c.satisfy(TRAVERSIBLE_COUNTRY, [SOLOMON_ISLANDS])
in_vanuatu = c.satisfy(TRAVERSIBLE_COUNTRY, [VANUATU])
in_new_zealand = c.satisfy(TRAVERSIBLE_COUNTRY, [NEW_ZEALAND])
print("South American countries")
ARGENTINA = wkd(c,"Q414")
BOLIVIA = wkd(c,"Q750")
BRAZIL = wkd(c,"Q155")
CHILE = wkd(c,"Q298")
COLOMBIA = wkd(c,"Q739")
ECUADOR = wkd(c,"Q736")
GUYANA = wkd(c,"Q734")
PARAGUAY = wkd(c,"Q733")
PERU = wkd(c,"Q419")
SURINAME = wkd(c,"Q730")
TRINIDAD_AND_TOBAGO = wkd(c,"Q754")
URUGUAY = wkd(c,"Q77")
VENEZUELA = wkd(c,"Q717")
in_argentina = c.satisfy(TRAVERSIBLE_COUNTRY, [ARGENTINA])
in_bolivia = c.satisfy(TRAVERSIBLE_COUNTRY, [BOLIVIA])
in_brazil = c.satisfy(TRAVERSIBLE_COUNTRY, [BRAZIL])
in_chile = c.satisfy(TRAVERSIBLE_COUNTRY, [CHILE])
in_colombia = c.satisfy(TRAVERSIBLE_COUNTRY, [COLOMBIA])
in_ecuador = c.satisfy(TRAVERSIBLE_COUNTRY, [ECUADOR])
in_guyana = c.satisfy(TRAVERSIBLE_COUNTRY, [GUYANA])
in_paraguay = c.satisfy(TRAVERSIBLE_COUNTRY, [PARAGUAY])
in_peru = c.satisfy(TRAVERSIBLE_COUNTRY, [PERU])
in_suriname = c.satisfy(TRAVERSIBLE_COUNTRY, [SURINAME])
in_trinidad_and_tobago = c.satisfy(TRAVERSIBLE_COUNTRY, [TRINIDAD_AND_TOBAGO])
in_uruguay = c.satisfy(TRAVERSIBLE_COUNTRY, [URUGUAY])
in_venezuela = c.satisfy(TRAVERSIBLE_COUNTRY, [VENEZUELA])
print("Central American countries")
BELIZE = wkd(c,"Q242")
COSTA_RICA = wkd(c,"Q800")
EL_SALVADOR = wkd(c,"Q792")
GUATEMALA = wkd(c,"Q774")
HONDURAS = wkd(c,"Q783")
NICARAGUA = wkd(c,"Q811")
PANAMA = wkd(c,"Q804")
in_belize = c.satisfy(TRAVERSIBLE_COUNTRY, [BELIZE])
in_costa_rica = c.satisfy(TRAVERSIBLE_COUNTRY, [COSTA_RICA])
in_el_salvador = c.satisfy(TRAVERSIBLE_COUNTRY, [EL_SALVADOR])
in_guatemala = c.satisfy(TRAVERSIBLE_COUNTRY, [GUATEMALA])
in_honduras = c.satisfy(TRAVERSIBLE_COUNTRY, [HONDURAS])
in_nicaragua = c.satisfy(TRAVERSIBLE_COUNTRY, [NICARAGUA])
in_panama = c.satisfy(TRAVERSIBLE_COUNTRY, [PANAMA])
print("North American countries")
ANTIGUA_BARBUDA = wkd(c,"Q781")
BAHAMAS = wkd(c,"Q778")
BARBADOS = wkd(c,"Q244")
BELIZE = wkd(c,"Q242")
CANADA = wkd(c,"Q16")
COSTA_RICA = wkd(c,"Q800")
CUBA = wkd(c,"Q241")
DOMINICAN_REPUBLIC = wkd(c,"Q786")
EL_SALVADOR = wkd(c,"Q792")
GRENADA = wkd(c,"Q769")
GUATEMALA = wkd(c,"Q774")
HAITI = wkd(c,"Q790")
HONDURAS = wkd(c,"Q783")
JAMAICA = wkd(c,"Q766")
MEXICO = wkd(c,"Q96")
NICARAGUA = wkd(c,"Q811")
PANAMA = wkd(c,"Q804")
SAINT_KITTS_AND_NEVIS = wkd(c,"Q763")
SAINT_LUCIA = wkd(c,"Q760")
SAINT_VINCENT_AND_GRENADINES = wkd(c,"Q757")
UNITED_STATES = wkd(c,"Q30")
in_antigua_barbuda = c.satisfy(TRAVERSIBLE_COUNTRY, [ANTIGUA_BARBUDA])
in_bahamas = c.satisfy(TRAVERSIBLE_COUNTRY, [BAHAMAS])
in_barbados = c.satisfy(TRAVERSIBLE_COUNTRY, [BARBADOS])
in_belize = c.satisfy(TRAVERSIBLE_COUNTRY, [BELIZE])
canadians = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Canadian people by occupation")], max_steps=5)
in_canada_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [CANADA])
in_canada = logical_ors([canadians, in_canada_wkd])
in_costa_rica = c.satisfy(TRAVERSIBLE_COUNTRY, [COSTA_RICA])
in_cuba = c.satisfy(TRAVERSIBLE_COUNTRY, [CUBA])
in_dominican_republic = c.satisfy(TRAVERSIBLE_COUNTRY, [DOMINICAN_REPUBLIC])
in_el_salvador = c.satisfy(TRAVERSIBLE_COUNTRY, [EL_SALVADOR])
in_grenada = c.satisfy(TRAVERSIBLE_COUNTRY, [GRENADA])
in_guatemala = c.satisfy(TRAVERSIBLE_COUNTRY, [GUATEMALA])
in_haiti = c.satisfy(TRAVERSIBLE_COUNTRY, [HAITI])
in_honduras = c.satisfy(TRAVERSIBLE_COUNTRY, [HONDURAS])
in_jamaica = c.satisfy(TRAVERSIBLE_COUNTRY, [JAMAICA])
in_mexico = c.satisfy(TRAVERSIBLE_COUNTRY, [MEXICO])
in_nicaragua = c.satisfy(TRAVERSIBLE_COUNTRY, [NICARAGUA])
in_panama = c.satisfy(TRAVERSIBLE_COUNTRY, [PANAMA])
in_Saint_Kitts_and_Nevis = c.satisfy(TRAVERSIBLE_COUNTRY, [SAINT_KITTS_AND_NEVIS])
in_saint_lucia = c.satisfy(TRAVERSIBLE_COUNTRY, [SAINT_LUCIA])
in_saint_vincent_and_grenadines = c.satisfy(TRAVERSIBLE_COUNTRY, [SAINT_VINCENT_AND_GRENADINES])
in_usa_sports = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:History of sports in the United States")], max_steps=7)
years_in_usa = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in the United States")], max_steps=7)
in_usa_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in the United States")], max_steps=7)
in_united_states_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [UNITED_STATES])
in_united_states = logical_ors([in_usa_sports,in_united_states_wkd, years_in_usa])
print("Asian countries")
FOURTH_ADMIN_DIVISION_INDONESIA = wkd(c,"Q2225692")
RURAL_COMUNE_VIETNAM = wkd(c,"Q2389082")
AFGHANISTAN = wkd(c,"Q889")
KINGDOM_OF_AFGHANISTAN = wkd(c,"Q1138904")
REPUBLIC_OF_AFGHANISTAN = wkd(c,"Q1415128")
DEMOCRATIC_REPUBLIC_OF_AFGHANISTAN = wkd(c,"Q476757")
BANGLADESH = wkd(c,"Q902")
BHUTAN = wkd(c,"Q917")
BRUNEI = wkd(c,"Q921")
CAMBODIA = wkd(c,"Q424")
CHINA = wkd(c,"Q148")
EAST_TIMOR = wkd(c,"Q574")
INDIA = wkd(c,"Q668")
INDONESIA = wkd(c,"Q252")
IRAN = wkd(c,"Q794")
IRAQ = wkd(c,"Q796")
KURDISTAN = wkd(c,"Q41470")
ISRAEL = wkd(c,"Q801")
JAPAN = wkd(c,"Q17")
JORDAN = wkd(c,"Q810")
KAZAKHSTAN = wkd(c,"Q232")
KUWAIT = wkd(c,"Q817")
KYRGYZSTAN = wkd(c,"Q813")
LAOS = wkd(c,"Q819")
LEBANON = wkd(c,"Q822")
MALAYSIA = wkd(c,"Q833")
MALDIVES = wkd(c,"Q826")
MONGOLIA = wkd(c,"Q711")
MYANMAR = wkd(c,"Q836")
NEPAL = wkd(c,"Q837")
NORTH_KOREA = wkd(c,"Q423")
OMAN = wkd(c,"Q842")
PALESTINE = wkd(c,"Q219060")
PAKISTAN = wkd(c,"Q843")
PHILIPPINES = wkd(c,"Q928")
QATAR = wkd(c,"Q846")
SAUDI_ARABIA = wkd(c,"Q851")
SINGAPORE = wkd(c,"Q334")
SOUTH_KOREA = wkd(c,"Q884")
SRI_LANKA = wkd(c,"Q854")
SYRIA = wkd(c,"Q858")
TAIWAN = wkd(c,"Q865")
TAJIKISTAN = wkd(c,"Q863")
THAILAND = wkd(c,"Q869")
TURKMENISTAN = wkd(c,"Q874")
UNITED_ARAB_EMIRATES = wkd(c,"Q878")
UZBEKISTAN = wkd(c,"Q265")
VIETNAM = wkd(c,"Q881")
YEMEN = wkd(c,"Q805")
in_afghanistan = c.satisfy(TRAVERSIBLE_COUNTRY, [AFGHANISTAN, REPUBLIC_OF_AFGHANISTAN, DEMOCRATIC_REPUBLIC_OF_AFGHANISTAN])
in_bangladesh = c.satisfy(TRAVERSIBLE_COUNTRY, [BANGLADESH])
in_bhutan = c.satisfy(TRAVERSIBLE_COUNTRY, [BHUTAN])
in_brunei = c.satisfy(TRAVERSIBLE_COUNTRY, [BRUNEI])
in_cambodia = c.satisfy(TRAVERSIBLE_COUNTRY, [CAMBODIA])
years_in_china = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in China")], max_steps=6)
chinese_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Chinese people by occupation")], max_steps=6)
is_tibetan_politician = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Tibetan politicians")], max_steps=6)
in_china_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [CHINA])
in_china = logical_ors([in_china_wkd,years_in_china,is_tibetan_politician, chinese_people])
in_east_timor = c.satisfy(TRAVERSIBLE_COUNTRY, [EAST_TIMOR])
in_india = c.satisfy(TRAVERSIBLE_COUNTRY, [INDIA])
in_indonesia = c.satisfy(TRAVERSIBLE_COUNTRY, [INDONESIA,FOURTH_ADMIN_DIVISION_INDONESIA])
in_iran = c.satisfy(TRAVERSIBLE_COUNTRY, [IRAN])
in_iraq = c.satisfy(TRAVERSIBLE_COUNTRY, [IRAQ, KURDISTAN])
in_israel = c.satisfy(TRAVERSIBLE_COUNTRY, [ISRAEL])
in_japan = c.satisfy(TRAVERSIBLE_COUNTRY, [JAPAN])
in_jordan = c.satisfy(TRAVERSIBLE_COUNTRY, [JORDAN])
in_kazakhstan = c.satisfy(TRAVERSIBLE_COUNTRY, [KAZAKHSTAN])
in_kuwait = c.satisfy(TRAVERSIBLE_COUNTRY, [KUWAIT])
in_kyrgyzstan = c.satisfy(TRAVERSIBLE_COUNTRY, [KYRGYZSTAN])
in_laos = c.satisfy(TRAVERSIBLE_COUNTRY, [LAOS])
in_lebanon = c.satisfy(TRAVERSIBLE_COUNTRY, [LEBANON])
in_malaysia = c.satisfy(TRAVERSIBLE_COUNTRY, [MALAYSIA])
in_maldives = c.satisfy(TRAVERSIBLE_COUNTRY, [MALDIVES])
in_mongolia = c.satisfy(TRAVERSIBLE_COUNTRY, [MONGOLIA])
in_myanmar = c.satisfy(TRAVERSIBLE_COUNTRY, [MYANMAR])
in_nepal = c.satisfy(TRAVERSIBLE_COUNTRY, [NEPAL])
in_north_korea = c.satisfy(TRAVERSIBLE_COUNTRY, [NORTH_KOREA])
in_oman = c.satisfy(TRAVERSIBLE_COUNTRY, [OMAN])
in_palestine = c.satisfy(TRAVERSIBLE_COUNTRY, [PALESTINE])
in_pakistan = c.satisfy(TRAVERSIBLE_COUNTRY, [PAKISTAN])
in_philippines = c.satisfy(TRAVERSIBLE_COUNTRY, [PHILIPPINES])
in_qatar = c.satisfy(TRAVERSIBLE_COUNTRY, [QATAR])
in_saudi_arabia = c.satisfy(TRAVERSIBLE_COUNTRY, [SAUDI_ARABIA])
in_singapore = c.satisfy(TRAVERSIBLE_COUNTRY, [SINGAPORE])
in_south_korea_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SOUTH_KOREA])
korean_rulers = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Korean rulers")], max_steps=6)
south_korea_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:South Korea")], max_steps=6)
south_korean_rulers = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Korean rulers")], max_steps=6)
in_south_korea = logical_ors([in_south_korea_wkd, korean_rulers])
in_sri_lanka = c.satisfy(TRAVERSIBLE_COUNTRY, [SRI_LANKA])
in_syria_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SYRIA])
ancient_syria = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Ancient Syria")], max_steps=6)
in_syria = logical_ors([in_syria_wkd,ancient_syria])
in_taiwan = c.satisfy(TRAVERSIBLE_COUNTRY, [TAIWAN])
in_tajikistan = c.satisfy(TRAVERSIBLE_COUNTRY, [TAJIKISTAN])
in_thailand = c.satisfy(TRAVERSIBLE_COUNTRY, [THAILAND])
in_turkmenistan = c.satisfy(TRAVERSIBLE_COUNTRY, [TURKMENISTAN])
in_united_arab_emirates = c.satisfy(TRAVERSIBLE_COUNTRY, [UNITED_ARAB_EMIRATES])
in_uzbekistan = c.satisfy(TRAVERSIBLE_COUNTRY, [UZBEKISTAN])
in_vietnam = c.satisfy(TRAVERSIBLE_COUNTRY, [VIETNAM, RURAL_COMUNE_VIETNAM])
in_yemen = c.satisfy(TRAVERSIBLE_COUNTRY, [YEMEN])
print("European countries")
ALBANIA = wkd(c,"Q222")
ANDORRA = wkd(c,"Q228")
ARMENIA = wkd(c,"Q399")
AUSTRIA = wkd(c,"Q40")
AUSTRIA_HUNGARY = wkd(c,"Q28513")
AZERBAIJAN = wkd(c,"Q227")
BELARUS = wkd(c,"Q184")
BELGIUM = wkd(c,"Q31")
BOSNIA = wkd(c,"Q225")
BULGARIA = wkd(c,"Q219")
CROATIA = wkd(c,"Q224")
CYPRUS = wkd(c,"Q229")
CZECH_REPUBLIC = wkd(c,"Q213")
CZECHOSLOVAKIA = wkd(c,"Q33946")
DENMARK = wkd(c,"Q35")
ESTONIA = wkd(c,"Q191")
FINLAND = wkd(c,"Q33")
FRANCE = wkd(c,"Q142")
GEORGIA = wkd(c,"Q230")
GERMANY = wkd(c,"Q183")
GERMANY_NAZI = wkd(c,"Q7318")
GERMAN_EMPIRE = wkd(c,"Q43287")
GERMAN_CONFEDERATION = wkd(c,"Q151624")
EAST_GERMANY = wkd(c,"Q16957")
GREECE = wkd(c,"Q41")
HUNGARY = wkd(c,"Q28")
ICELAND = wkd(c,"Q189")
IRELAND = wkd(c,"Q27")
ITALY = wkd(c,"Q38")
ROMAN_EMPIRE = wkd(c,"Q2277")
ANCIENT_ROME = wkd(c,"Q1747689")
KINGDOM_OF_ITALY = wkd(c,"Q172579")
NATIONAL_FASCIST_PARTY = wkd(c,"Q139596")
KAZAKHSTAN = wkd(c,"Q232")
KOSOVO = wkd(c,"Q1246")
LATVIA = wkd(c,"Q211")
LIECHTENSTEIN = wkd(c,"Q347")
LITHUANIA = wkd(c,"Q37")
LUXEMBOURG = wkd(c,"Q32")
MACEDONIA = wkd(c,"Q221")
MALTA = wkd(c,"Q233")
MOLDOVA = wkd(c,"Q217")
MONACO = wkd(c,"Q235")
MONTENEGRO = wkd(c,"Q236")
NETHERLANDS = wkd(c,"Q55")
SOUTHERN_NETHERLANDS = wkd(c,"Q6581823")
KINGDOM_OF_NETHERLANDS = wkd(c,"Q29999")
NORWAY = wkd(c,"Q20")
POLAND = wkd(c,"Q36")
PORTUGAL = wkd(c,"Q45")
ROMANIA = wkd(c,"Q218")
RUSSIA = wkd(c,"Q159")
SOVIET_UNION =wkd(c,"Q15180")
RUSSIAN_EMPIRE = wkd(c,"Q34266")
SAN_MARINO = wkd(c,"Q238")
SERBIA = wkd(c,"Q403")
YOUGOSLAVIA = wkd(c,"Q36704")
SLOVAKIA = wkd(c,"Q214")
SLOVENIA = wkd(c,"Q215")
SPAIN = wkd(c,"Q29")
KINGDOM_OF_CASTILLE = wkd(c,"Q179293")
SWEDEN = wkd(c,"Q34")
SWITZERLAND = wkd(c,"Q39")
TURKEY = wkd(c,"Q43")
OTTOMAN_EMPIRE = wkd(c,"Q12560")
UKRAINE = wkd(c,"Q212")
UNITED_KINGDOM = wkd(c,"Q145")
UNITED_KINGDOM_OLD = wkd(c,"Q174193")
KINGDOM_OF_ENGLAND = wkd(c,"Q179876")
KINGDOM_OF_GREAT_BRITAIN = wkd(c,"Q161885")
VATICAN_CITY = wkd(c,"Q237")
in_albania = c.satisfy(TRAVERSIBLE_COUNTRY, [ALBANIA])
in_andorra = c.satisfy(TRAVERSIBLE_COUNTRY, [ANDORRA])
in_armenia = c.satisfy(TRAVERSIBLE_COUNTRY, [ARMENIA])
in_austria_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [AUSTRIA, AUSTRIA_HUNGARY])
is_austria_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Austrian people by occupation")], max_steps=5)
in_austria = logical_ors([in_austria_wkd, is_austria_people])
in_azerbaijan = c.satisfy(TRAVERSIBLE_COUNTRY, [AZERBAIJAN])
in_belarus = c.satisfy(TRAVERSIBLE_COUNTRY, [BELARUS])
in_belgium = c.satisfy(TRAVERSIBLE_COUNTRY, [BELGIUM])
in_bosnia = c.satisfy(TRAVERSIBLE_COUNTRY, [BOSNIA])
in_bulgaria = c.satisfy(TRAVERSIBLE_COUNTRY, [BULGARIA])
in_croatia = c.satisfy(TRAVERSIBLE_COUNTRY, [CROATIA])
in_cyprus = c.satisfy(TRAVERSIBLE_COUNTRY, [CYPRUS])
in_czech_republic_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [CZECH_REPUBLIC,CZECHOSLOVAKIA])
czhec_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Czechoslovak people")], max_steps=5)
in_czech_republic = logical_ors([in_czech_republic_wkd, czhec_people])
in_denmark_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [DENMARK])
is_danish_legendary_figure = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Danish legendary figures")], max_steps=5)
in_denmark = logical_ors([in_denmark_wkd,is_danish_legendary_figure])
in_estonia = c.satisfy(TRAVERSIBLE_COUNTRY, [ESTONIA])
in_finland = c.satisfy(TRAVERSIBLE_COUNTRY, [FINLAND])
years_in_france = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in France")], max_steps=5)
in_france_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [FRANCE])
in_france = logical_ors([in_france_wkd,years_in_france])
in_georgia = c.satisfy(TRAVERSIBLE_COUNTRY, [GEORGIA])
years_in_germany = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Germany")], max_steps=5)
nazis = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Nazis")], max_steps=5)
german_nobility = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:German nobility")], max_steps=7)
in_germany_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [GERMANY, GERMANY_NAZI, GERMAN_EMPIRE, GERMAN_CONFEDERATION, EAST_GERMANY])
in_germany = logical_ors([in_germany_wkd, years_in_germany, nazis, german_nobility])
years_in_greece = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Greece")], max_steps=5)
ancient_greeks = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Ancient Greeks")], max_steps=7)
greek_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Greek people by occupation")], max_steps=7)
in_greece_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [GREECE])
in_greece = logical_ors([in_greece_wkd,years_in_greece, ancient_greeks, greek_people])
in_hungary = c.satisfy(TRAVERSIBLE_COUNTRY, [HUNGARY])
in_iceland = c.satisfy(TRAVERSIBLE_COUNTRY, [ICELAND])
in_ireland = c.satisfy(TRAVERSIBLE_COUNTRY, [IRELAND])
in_italy_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ITALY,NATIONAL_FASCIST_PARTY, KINGDOM_OF_ITALY, ROMAN_EMPIRE, ANCIENT_ROME])
is_italian_politician = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Italian politicians")], max_steps=6)
in_roman_empire = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roman Empire")], max_steps=6)
in_history_of_italy = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:History of Italy by region")], max_steps=6)
italian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Italian people by occupation")], max_steps=6)
ancient_romans = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Ancient Romans")], max_steps=8)
in_italy = logical_ors([in_italy_wkd, in_roman_empire, in_history_of_italy,
is_italian_politician, italian_people, ancient_romans])
in_kazakhstan = c.satisfy(TRAVERSIBLE_COUNTRY, [KAZAKHSTAN])
in_kosovo = c.satisfy(TRAVERSIBLE_COUNTRY, [KOSOVO])
in_latvia = c.satisfy(TRAVERSIBLE_COUNTRY, [LATVIA])
in_liectenstein = c.satisfy(TRAVERSIBLE_COUNTRY, [LIECHTENSTEIN])
in_lithuania = c.satisfy(TRAVERSIBLE_COUNTRY, [LITHUANIA])
in_luxembourg = c.satisfy(TRAVERSIBLE_COUNTRY, [LUXEMBOURG])
in_macedonia = c.satisfy(TRAVERSIBLE_COUNTRY, [MACEDONIA])
in_malta = c.satisfy(TRAVERSIBLE_COUNTRY, [MALTA])
in_moldova = c.satisfy(TRAVERSIBLE_COUNTRY, [MOLDOVA])
in_monaco = c.satisfy(TRAVERSIBLE_COUNTRY, [MONACO])
in_montenegro = c.satisfy(TRAVERSIBLE_COUNTRY, [MONTENEGRO])
in_netherlands_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [NETHERLANDS, KINGDOM_OF_NETHERLANDS, SOUTHERN_NETHERLANDS])
dutch_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Dutch people by occupation")], max_steps=5)
in_netherlands = logical_ors([in_netherlands_wkd, dutch_people])
in_norway = c.satisfy(TRAVERSIBLE_COUNTRY, [NORWAY])
in_poland = c.satisfy(TRAVERSIBLE_COUNTRY, [POLAND])
in_portugal = c.satisfy(TRAVERSIBLE_COUNTRY, [PORTUGAL])
in_romania = c.satisfy(TRAVERSIBLE_COUNTRY, [ROMANIA])
russian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Russian people by occupation")], max_steps=7)
sport_in_the_soviet_union = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Sport in the Soviet Union")], max_steps=7)
in_russia_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [RUSSIA, RUSSIAN_EMPIRE, SOVIET_UNION])
in_russia = logical_ors([in_russia_wkd, russian_people, sport_in_the_soviet_union])
in_san_marino = c.satisfy(TRAVERSIBLE_COUNTRY, [SAN_MARINO])
in_serbia = c.satisfy(TRAVERSIBLE_COUNTRY, [SERBIA, YOUGOSLAVIA])
in_slovakia = c.satisfy(TRAVERSIBLE_COUNTRY, [SLOVAKIA])
in_slovenia = c.satisfy(TRAVERSIBLE_COUNTRY, [SLOVENIA])
years_in_spain = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Spain")], max_steps=5)
in_spain_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SPAIN, KINGDOM_OF_CASTILLE])
in_spain = logical_ors([in_spain_wkd, years_in_spain])
years_in_sweden = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Sweden")], max_steps=5)
in_sweden_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SWEDEN])
in_sweden = logical_ors([in_sweden_wkd, years_in_sweden])
years_in_switzerland = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Switzerland")], max_steps=5)
in_switzerland_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SWITZERLAND])
in_switzerland = logical_ors([in_switzerland_wkd, years_in_switzerland ])
in_turkey = c.satisfy(TRAVERSIBLE_COUNTRY, [TURKEY, OTTOMAN_EMPIRE])
in_ukraine = c.satisfy(TRAVERSIBLE_COUNTRY, [UKRAINE])
in_united_kingdom = c.satisfy(TRAVERSIBLE_COUNTRY,
[UNITED_KINGDOM, UNITED_KINGDOM_OLD, KINGDOM_OF_ENGLAND, KINGDOM_OF_GREAT_BRITAIN])
popes = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Popes")], max_steps=5)
in_vatican_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [VATICAN_CITY])
in_vatican = logical_ors([popes, in_vatican_wkd])
print("Artic and others")
ARCTIC = wkd(c,"Q25322")
INUIT = wkd(c,"Q189975")
FAROE_ISLANDS = wkd(c,"Q4628")
TONGA = wkd(c,"Q678")
in_faroe_islands_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Faroe Islands")], max_steps=5)
in_faroe_islands_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [FAROE_ISLANDS])
in_faroe_islands = logical_ors([in_faroe_islands_wkp, in_faroe_islands_wkd])
in_arctic = c.satisfy(TRAVERSIBLE_COUNTRY, [ARCTIC,INUIT])
in_tonga_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [TONGA])
in_tonga_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Tonga")], max_steps=5)
in_tonga = logical_ors([in_tonga_wkd,in_tonga_wkp])
is_unlocated = logical_ors([is_Wiki_Links,is_taxon])
is_unlocated_not = logical_negate(is_unlocated,[is_populated_place,
is_in_outer_space_not_earth,in_tanzania])
is_unlocated_only = logical_ors([is_unlocated_not,is_other])
COUNTRIES = [ALGERIA, ANGOLA, BENIN, BOTSWANA, BURKINA_FASO, BURUNDI, CAPE_VERDE, CAMEROON, CHAD,
CENTRAL_AFRICAN_REPUBLIC, COMOROS, DEMOCRATIC_REPUBLIC_OF_CONGO, REPUBLIC_OF_CONGO, DJIBOUTI,
EGYPT, EQUATORIAL_GUINEA, ERITREA, ETHIOPIA, GABON, THE_GAMBIA, GHANA, GUINEA, GUINEA_BISSAU, IVORY_COAST,
LESOTHO, KENYA, LIBERIA, LIBYA, MADAGASCAR, MALAWI, MALI, MAURITANIA,MAURITIUS, MOROCCO, MOZAMBIQUE,
NAMIBIA, NIGER, NIGERIA, RWANDA,SAHARI_ARAB_DEOMOCRATIC_REPUBLIC, SAO_TOME_AND_PRINCIPE, SENEGAL,
SEYCHELLES, SIERRA_LEONE, SOMALIA, SOMALI_LAND, SOUTH_AFRICA, SUDAN, TANZANIA, TOGO,
TUNISIA, UGANDA, WESTERN_SAHARA, ZAMBIA, ZIMBABWE,
AUSTRALIA, FIJI,INDONESIA,KIRIBATI, MARSHALL_ISLANDS,
FEDERATED_STATES_OF_MICRONESIA, NAURU, NEW_ZEALAND, PAPUA_NEW_GUINEA, SAMOA, SOLOMON_ISLANDS, VANUATU,
ARGENTINA, BOLIVIA, BRAZIL, CHILE, COLOMBIA, ECUADOR, GUYANA, PARAGUAY, PERU, SURINAME, TRINIDAD_AND_TOBAGO,
URUGUAY, VENEZUELA,
BELIZE, COSTA_RICA,EL_SALVADOR, GUATEMALA, HONDURAS, NICARAGUA, PANAMA,
ANTIGUA_BARBUDA, BAHAMAS, BARBADOS, CANADA, CUBA, DOMINICAN_REPUBLIC, GRENADA, GUATEMALA, HAITI, JAMAICA, MEXICO,
SAINT_KITTS_AND_NEVIS, SAINT_LUCIA, SAINT_VINCENT_AND_GRENADINES, UNITED_STATES,
ALBANIA, ANDORRA, ARMENIA, AUSTRIA, AUSTRIA_HUNGARY, AZERBAIJAN, BELARUS, BELGIUM, BOSNIA, BULGARIA, CROATIA,
CYPRUS,
CZECH_REPUBLIC, CZECHOSLOVAKIA,
DENMARK, ESTONIA, FINLAND, FRANCE, GEORGIA, GERMANY, GERMANY_NAZI, GREECE, HUNGARY, ICELAND,
IRELAND, ITALY, NATIONAL_FASCIST_PARTY, KINGDOM_OF_ITALY, ROMAN_EMPIRE,
KAZAKHSTAN, KOSOVO, LATVIA, LIECHTENSTEIN, LITHUANIA, LUXEMBOURG, MACEDONIA, MALTA,
MOLDOVA, MONACO, MONTENEGRO, NORWAY,
NETHERLANDS, KINGDOM_OF_NETHERLANDS, SOUTHERN_NETHERLANDS,
POLAND, PORTUGAL, ROMANIA,
RUSSIA, RUSSIAN_EMPIRE, SOVIET_UNION,
SAN_MARINO,
SERBIA, YOUGOSLAVIA,
SLOVAKIA,
SLOVENIA, SPAIN, SWEDEN, SWITZERLAND,
TURKEY, OTTOMAN_EMPIRE, UKRAINE,
UNITED_KINGDOM, UNITED_KINGDOM_OLD, KINGDOM_OF_ENGLAND, KINGDOM_OF_GREAT_BRITAIN,
AFGHANISTAN, BANGLADESH, BRUNEI, CAMBODIA, CHINA, CYPRUS, EAST_TIMOR, EGYPT, GEORGIA, INDIA, INDONESIA,
IRAN, IRAQ, ISRAEL, JAPAN, KAZAKHSTAN, KUWAIT, KYRGYZSTAN, LAOS, LEBANON, MALAYSIA, MALDIVES, MONGOLIA,
MYANMAR, NEPAL, NORTH_KOREA, OMAN, PALESTINE, PAKISTAN, PHILIPPINES, QATAR, SAUDI_ARABIA, SINGAPORE, SOUTH_KOREA, SRI_LANKA,
SYRIA, TAJIKISTAN, TAIWAN, THAILAND, TURKMENISTAN, UNITED_ARAB_EMIRATES, UZBEKISTAN, VIETNAM, YEMEN,
VATICAN_CITY,
ARCTIC, FAROE_ISLANDS, TONGA
]
located_somewhere_wkd = c.satisfy([wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY], COUNTRIES)
located_somewhere = logical_ors([ located_somewhere_wkd, in_austria, in_afghanistan, in_china, in_france,
in_sweden, in_china, in_switzerland, in_germany, years_in_usa, in_greece,
in_south_korea, in_italy,
in_denmark, in_spain, in_iraq, in_egypt, in_vatican, in_canada,
in_faroe_islands, in_netherlands, in_russia, in_samoa, in_syria, in_tonga, in_zambia ])
is_unlocated_politician = logical_negate(is_politician,[located_somewhere])
is_unlocated_painter = logical_negate(is_painter, [located_somewhere])
is_unlocated_association_football_player = logical_negate(is_association_football_player, [located_somewhere])
return {
"Algeria": in_algeria,
"Angola": in_angola,
"Benin": in_benin,
"BOSTWANA": in_botswana,
"BURKINA_FASO": in_burkina_faso,
"BURUNDI": in_burundi,
"CAPE_VERDE": in_cape_verde,
"CAMEROON": in_cameroon,
"CHAD": in_chad,
"CENTRAL AFRICAN REPUBLIC": in_central_african_republic,
"COMOROS": in_comoros,
"DEMOCRATIC_REPUBLIC_OF_CONGO": in_democratic_republic_congo,
"REPUBLIC_OF_CONGO": in_republic_of_congo,
"DJIBOUTI": in_djibouti,
"EGYPT": in_egypt,
"EQUATORIAL_GUINEA": in_equatorial_guinea,
"ERITREA": in_eritrea,
"ETHIOPIA": in_ethiopia,
"GABON": in_gabon,
"THE_GAMBIA": in_the_gambia,
"GHANA": in_ghana,
"GUINEA": in_guinea,
"GUINEA_BISSAU": in_guinea_bissau,
"IVORY_COAST": in_ivory_coast,
"LESOTHO": in_lesotho,
"KENYA": in_kenya,
"LIBERIA": in_liberia,
"LIBYA": in_libya,
"Madagascar": in_madagascar,
"Malawi": in_malawi,
"Mali": in_mali,
"Mauritania": in_mauritania,
"Mauritius": in_mauritius,
"Morocco": in_morrocco,
"Mozambique": in_mozambique,
"Namibia": in_namibia,
"Niger": in_niger,
"Nigeria": in_nigeria,
"Rwanda": in_rwanda,
"Sahrawi_Arab_Democratic_Republic": in_sadr,
"Sao_Tome_and_Principe": in_stap,
"Senegal": in_senegal,
"Seychelles": in_seychelles,
"Sierra_Leone": in_sierra_leone,
"Somalia": in_somalia,
"Somalilandβ": in_somali_land,
"South_Africaβ": in_south_africa,
"South_Sudanβ": in_ssudan,
"Sudan": in_sudan,
"SWAZILAND": in_swaziland,
"TANZANIA": in_tanzania,
"TOGO": in_togo,
"TUNISIA": in_tunisia,
"Uganda": in_uganda,
"Western Sahara": in_western_sahara,
"Zambia": in_zambia,
"Zimbabwe": in_zimbabwe,
"AUSTRALIA": in_australia,
"FIJI": in_fiji,
"INDONESIA": in_indonesia,
"KIRIBATI": in_kiribati,
"MARSHALL_ISLANDS": in_marshall_islands,
"FEDERATED_STATES_OF_MICRONESIA": in_federates_states_of_micronesia,
"NAURU": in_nauru,
"NEW_ZEALAND": in_new_zealand,
"PAPUA_NEW_GUINEA": in_papua_new_guinea,
"SAMOA": in_samoa,
"SOLOMON_ISLANDS": in_solomon_islands,
"VANUATU": in_vanuatu,
"ARGENTINA": in_argentina,
"BOLIVIA": in_bolivia,
"BRAZIL": in_brazil,
"CHILE": in_chile,
"COLOMBIA": in_colombia,
"ECUADOR": in_ecuador,
"GUYANA": in_guyana,
"PARAGUAY": in_paraguay,
"PERU": in_peru,
"SURINAME": in_suriname,
"TRINIDAD_AND_TOBAGO": in_trinidad_and_tobago,
"URUGUAY": in_uruguay,
"VENEZUELA": in_venezuela,
"BELIZE": in_belize,
"COSTA_RICA": in_costa_rica,
"EL_SALVADOR": in_el_salvador,
"GUATEMALA": in_guatemala,
"HONDURAS": in_honduras,
"NICARAGUA": in_nicaragua,
"PANAMA": in_panama,
"ANTIGUA_BARBUDA": in_antigua_barbuda,
"BAHAMAS": in_bahamas,
"BARBADOS": in_barbados,
"CANADA": in_canada,
"CUBA": in_cuba,
"DOMINICAN REPUBLIC": in_dominican_republic,
"GRENADA": in_grenada,
"GUATEMALA": in_guatemala,
"HAITI": in_haiti,
"JAMAICA": in_jamaica,
"MEXICO": in_mexico,
"SAINT_KITTS_AND_NEVIS": in_Saint_Kitts_and_Nevis,
"SAINT_LUCIA": in_saint_lucia,
"SAINT_VINCENT_AND_GRENADINES": in_saint_vincent_and_grenadines,
"UNITED_STATES": in_united_states,
"ALBANIA": in_albania,
"ANDORRA": in_andorra,
"ARMENIA": in_armenia,
"AUSTRIA": in_austria,
"AZERBAIJAN": in_azerbaijan,
"BELARUS": in_belarus,
"BELGIUM": in_belgium,
"BOSNIA": in_bosnia,
"BULGARIA": in_bulgaria,
"CROATIA": in_croatia,
"CYPRUS": in_cyprus,
"CZECH REPUBLIC": in_czech_republic,
"DENMARK": in_denmark,
"ESTONIA": in_estonia,
"FINLAND": in_finland,
"FRANCE": in_france,
"GEORGIA": in_georgia,
"GERMANY": in_germany,
"GREECE": in_greece,
"HUNGARY": in_hungary,
"ICELAND": in_iceland,
"IRELAND": in_ireland,
"ITALY": in_italy,
"KAZAKHSTAN": in_kazakhstan,
"KOSOVO": in_kosovo,
"LATVIA": in_latvia,
"LIECHTENSTEIN": in_liectenstein,
"LITHUANIA": in_lithuania,
"LUXEMBOURG": in_luxembourg,
"MACEDONIA": in_macedonia,
"MALTA": in_malta,
"MOLDOVA": in_moldova,
"MONACO": in_monaco,
"MONTENEGRO": in_montenegro,
"NORWAY": in_norway,
"NETHERLANDS": in_netherlands,
"POLAND": in_poland,
"PORTUGAL": in_portugal,
"ROMANIA": in_romania,
"RUSSIA": in_russia,
"SAN MARINO": in_san_marino,
"SERBIA": in_serbia,
"SLOVAKIA": in_slovakia,
"SLOVENIA": in_slovenia,
"SPAIN": in_spain,
"SWEDEN": in_sweden,
"SWITZERLAND": in_switzerland,
"TURKEY": in_turkey,
"UKRAINE": in_ukraine,
"UNITED KINGDOM": in_united_kingdom,
"AFGHANISTAN": in_afghanistan,
"BANGLADESH": in_bangladesh,
"BHUTAN": in_bhutan,
"BRUNEI": in_brunei,
"CAMBODIA": in_cambodia,
"CHINA": in_china,
"CYPRUS": in_cyprus,
"EAST TIMOR": in_east_timor,
"EGYPT": in_egypt,
"GEORGIA": in_georgia,
"INDIA": in_india,
"INDONESIA": in_indonesia,
"IRAN": in_iran,
"IRAQ": in_iraq,
"ISRAEL": in_israel,
"JAPAN": in_japan,
"JORDAN": in_jordan,
"KAZAKHSTAN": in_kazakhstan,
"KUWAIT": in_kuwait,
"KYRGYZSTAN": in_kyrgyzstan,
"LAOS": in_laos,
"LEBANON": in_lebanon,
"MALAYSIA": in_malaysia,
"MALDIVES": in_maldives,
"MONGOLIA": in_mongolia,
"MYANMAR": in_myanmar,
"NEPAL": in_nepal,
"NORTH_KOREA": in_north_korea,
"OMAN": in_oman,
"PALESTINE": in_palestine,
"PAKISTAN": in_pakistan,
"PHILIPPINES": in_philippines,
"QATAR": in_qatar,
"SAUDI_ARABIA": in_saudi_arabia,
"SINGAPORE": in_singapore,
"SOUTH_KOREA": in_south_korea,
"SRI LANKA": in_sri_lanka,
"SYRIA": in_syria,
"TAJIKISTAN": in_tajikistan,
"TAIWAN": in_taiwan,
"THAILAND": in_thailand,
"TURKMENISTAN": in_turkmenistan,
"UNITED_ARAB_EMIRATES": in_united_arab_emirates,
"UZBEKISTAN": in_uzbekistan,
"VIETNAM": in_vietnam,
"YEMEN": in_yemen,
"OUTERSPACE": is_in_outer_space_not_earth,
"ARCTIC": in_arctic,
"FAROE_ISLANDS": in_faroe_islands,
"TONGA": in_tonga,
"UNLOCATED": is_unlocated_only,
"USA_ROADS": in_usa_roads,
"POLITICIAN": is_politician,
"UNLOCATED_POLITICIAN": is_unlocated_politician,
"UNLOCATED_PAINTER": is_unlocated_painter,
"UNLOCATED_ASSOCIATION_FOOTBALL_PLAYER": is_unlocated_association_football_player
}
|
"""
Create membership rules for entities based on their date of existence/birth/etc.
More classes can be created by selecting other key dates as hyperplanes.
"""
from numpy import (
logical_and, logical_or, logical_not, logical_xor, where
)
from wikidata_linker_utils.logic import logical_negate, logical_ors, logical_ands
import wikidata_linker_utils.wikidata_properties as wprop
def wkp(c, name):
"""Convert a string wikipedia article name to its Wikidata index."""
return c.article2id["enwiki/" + name][0][0]
def wkd(c, name):
"""Convert a wikidata QID to its wikidata index."""
return c.name2index[name]
def classify(c):
D1950 = 1950
pre_1950 = logical_ors([
c.attribute(wprop.PUBLICATION_DATE) < D1950,
c.attribute(wprop.DATE_OF_BIRTH) < D1950,
c.attribute(wprop.INCEPTION) < D1950,
c.attribute(wprop.DISSOLVED_OR_ABOLISHED) < D1950,
c.attribute(wprop.POINT_IN_TIME) < D1950,
c.attribute(wprop.START_TIME) < D1950
])
post_1950 = logical_and(logical_ors([
c.attribute(wprop.PUBLICATION_DATE) >= D1950,
c.attribute(wprop.DATE_OF_BIRTH) >= D1950,
c.attribute(wprop.INCEPTION) >= D1950,
c.attribute(wprop.DISSOLVED_OR_ABOLISHED) >= D1950,
c.attribute(wprop.POINT_IN_TIME) >= D1950,
c.attribute(wprop.START_TIME) >= D1950
]), logical_not(pre_1950))
# some elements are neither pre 1950 or post 1950, they are "undated"
# (e.g. no value was provided for any of the time attributes used
# above)
undated = logical_and(logical_not(pre_1950), logical_not(post_1950))
print("%d items have no date information" % (undated.sum(),))
return {
"pre-1950": pre_1950,
"post-1950": post_1950
}
|
"""
Associate to each entity a type (exclusive membership). Association is imperfect
(e.g. some false positives, false negatives), however the majority of entities
are covered under this umbrella and thus a model can learn to predict several
of the attributes listed below.
"""
from numpy import (
logical_and, logical_or, logical_not, logical_xor, where
)
from wikidata_linker_utils.logic import logical_negate, logical_ors, logical_ands
import wikidata_linker_utils.wikidata_properties as wprop
def wkp(c, name):
return c.article2id['enwiki/' + name][0][0]
def wkd(c, name):
return c.name2index[name]
def classify(c):
TRAVERSIBLE = [wprop.INSTANCE_OF, wprop.SUBCLASS_OF]
TRAVERSIBLE_LO = [wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.IS_A_LIST_OF]
MALE = wkd(c,"Q6581097")
FEMALE = wkd(c,"Q6581072")
HUMAN = wkp(c, "Human")
TAXON = wkd(c, "Q16521")
HORSE = wkd(c, "Q726")
RACE_HORSE = wkd(c, "Q10855242")
FOSSIL_TAXON = wkd(c, "Q23038290")
MONOTYPIC_TAXON = wkd(c, "Q310890")
FOOD = wkp(c, "Food")
DRINK = wkp(c, "Drink")
BIOLOGY = wkp(c, "Biology")
GEOGRAPHICAL_OBJECT = wkd(c, "Q618123")
LOCATION_GEOGRAPHY = wkd(c, "Q2221906")
ORGANISATION = wkp(c, 'Organization')
MUSICAL_WORK = wkd(c, 'Q2188189')
AUDIO_VISUAL_WORK = wkd(c,'Q2431196')
ART_WORK = wkd(c,'Q838948')
PHYSICAL_OBJECT = wkp(c, "Physical body")
VALUE = wkd(c, 'Q614112')
TIME_INTERVAL = wkd(c, 'Q186081')
EVENT = wkd(c, 'Q1656682')
POPULATED_PLACE = wkd(c, 'Q486972')
ACTIVITY = wkd(c, "Q1914636")
PROCESS = wkd(c, "Q3249551")
BODY_OF_WATER = wkd(c, "Q15324")
PEOPLE = wkd(c, "Q2472587")
LANGUAGE = wkd(c, "Q34770")
ALPHABET = wkd(c, "Q9779")
SPEECH = wkd(c, "Q861911")
GAS = wkd(c, "Q11432")
CHEMICAL_COMPOUND = wkd(c, "Q11173")
DRUG = wkd(c, "Q8386")
GEOMETRIC_SHAPE = wkd(c, "Q815741")
MIND = wkd(c, "Q450")
TV_STATION = wkd(c, "Q1616075")
AWARD_CEREMONY = wkd(c, "Q4504495")
SONG = wkd(c, "Q7366")
SINGLE = wkd(c, "Q134556")
CHESS_OPENING = wkd(c, "Q103632")
BATTLE = wkd(c, "Q178561")
BLOCKADE = wkd(c, "Q273976")
MILITARY_OFFENSIVE = wkd(c, "Q2001676")
DEVELOPMENT_BIOLOGY = wkd(c, "Q213713")
UNIT_OF_MASS = wkd(c, "Q3647172")
WATERCOURSE = wkd(c, "Q355304")
VOLCANO = wkd(c, "Q8072")
LAKE = wkd(c, "Q23397")
SEA = wkd(c, "Q165")
BRAND = wkd(c, "Q431289")
AUTOMOBILE_MANUFACTURER = wkd(c, "Q786820")
MOUNTAIN = wkd(c, "Q8502")
MASSIF = wkd(c, "Q1061151")
WAR = wkd(c, "Q198")
CRIME = wkd(c, "Q83267")
GENE = wkd(c, "Q7187")
CHROMOSOME = wkd(c, "Q37748")
DISEASE = wkd(c, "Q12136")
ASTEROID = wkd(c, "Q3863")
COMET = wkd(c, "Q3559")
PLANET = wkd(c, "Q634")
GALAXY = wkd(c, "Q318")
ASTRONOMICAL_OBJECT = wkd(c, "Q6999")
FICTIONAL_ASTRONOMICAL_OBJECT = wkd(c, "Q15831598")
MATHEMATICAL_OBJECT = wkd(c, "Q246672")
REGION = wkd(c, "Q82794")
PHYSICAL_QUANTITY = wkd(c, "Q107715")
NUMBER = wkd(c, "Q11563")
NATURAL_PHENOMENON = wkd(c, "Q1322005")
GEOLOGICAL_FORMATION = wkd(c, "Q736917")
CURRENCY = wkd(c, "Q8142")
MONEY = wkd(c, "Q1368")
LANDFORM = wkd(c, "Q271669")
COUNTRY = wkd(c, "Q6256")
FICTIONAL_HUMAN = wkd(c, "Q15632617")
AWARD = wkd(c, "Q618779")
RELIGIOUS_TEXT = wkd(c, "Q179461")
OCCUPATION = wkd(c, "Q12737077")
PROFESSION = wkd(c, "Q28640")
POSITION = wkd(c, "Q4164871")
RELIGION = wkd(c, "Q9174")
SOFTWARE = wkd(c, "Q7397")
ELECTRONIC_GAME = wkd(c, "Q2249149")
GAME = wkd(c, "Q11410")
VIDEO_GAME_FRANCHISES = wkd(c, "Q7213857")
TRAIN_STATION = wkd(c, "Q55488")
BRIDGE = wkd(c, "Q12280")
AIRPORT = wkd(c, "Q62447")
SURNAME = wkd(c, "Q101352")
GIVEN_NAME = wkd(c, "Q202444")
FEMALE_GIVEN_NAME = wkd(c, "Q11879590")
MALE_GIVEN_NAME = wkd(c, "Q12308941")
GIVEN_NAME = wkd(c, "Q202444")
MOLECULE = wkd(c, "Q11369")
PROTEIN_FAMILY = wkd(c, "Q417841")
PROTEIN_DOMAIN = wkd(c, "Q898273")
MULTIPROTEIN_COMPLEX = wkd(c, "Q420927")
LAW = wkd(c, "Q7748")
VEHICLE = wkd(c, "Q42889")
MODE_OF_TRANSPORT = wkd(c, "Q334166")
WATERCRAFT = wkd(c, "Q1229765")
AIRCRAFT = wkd(c, "Q11436")
ROAD_VEHICLE = wkd(c, "Q1515493")
AUTOMOBILE_MODEL = wkd(c, "Q3231690")
AUTOMOBILE = wkd(c, "Q1420")
TRUCK = wkd(c, "Q43193")
MOTORCYCLE_MODEL = wkd(c, "Q23866334")
TANK = wkd(c, "Q12876")
FIRE_ENGINE = wkd(c, "Q208281")
AMBULANCE = wkd(c, "Q180481")
RAILROAD = wkd(c, "Q22667")
RADIO_PROGRAM = wkd(c, "Q1555508")
DISCOGRAPHY = wkd(c, "Q273057")
WEBSITE = wkd(c, "Q35127")
WEAPON = wkd(c, "Q728")
PUBLICATION = wkd(c, "Q732577")
ARTICLE = wkd(c, "Q191067")
FAMILY = wkd(c, "Q8436")
FICTIONAL_CHARACTER = wkd(c, "Q95074")
FACILITY = wkd(c, "Q13226383")
CONCEPT = wkd(c, "Q151885")
PROVERB = wkd(c, "Q35102")
ANATOMICAL_STRUCTURE = wkd(c, "Q4936952")
BREED = wkd(c, "Q38829")
PLANT_STRUCTURE = wkd(c, "Q25571752")
PLANT = wkd(c, "Q756")
SPECIAL_FIELD = wkd(c, "Q1047113")
ACADEMIC_DISCIPLINE = wkd(c, "Q11862829")
TERM = wkd(c, "Q1969448")
SEXUAL_ORIENTATION = wkd(c, "Q17888")
PARADIGM = wkd(c, "Q28643")
LEGAL_CASE = wkd(c, "Q2334719")
SPORT = wkd(c, "Q349")
RECURRING_SPORTING_EVENT = wkd(c, "Q18608583")
ART_GENRE = wkd(c, "Q1792379")
SPORTING_EVENT = wkd(c, "Q16510064")
COMIC = wkd(c, "Q1004")
CHARACTER = wkd(c, "Q3241972")
PERSON = wkd(c, "Q215627")
NATIONAL_HERITAGE_SITE = wkd(c, "Q358")
ESTATE = wkd(c, "Q2186896")
ELECTION = wkd(c, "Q40231")
LEGISLATIVE_TERM = wkd(c, "Q15238777")
COMPETITION = wkd(c, "Q476300")
LEGAL_ACTION = wkd(c, "Q27095657")
SEX_TOY = wkd(c, "Q10816")
MONUMENT = wkd(c, "Q4989906")
ASSOCIATION_FOOTBALL_POSITION = wkd(c, "Q4611891")
# ICE_HOCKEY_POSITION = wkd(c, "Q18533987")
# PART_OF_LAND = wkd(c, "Q23001306")
MUSIC_DOWNLOAD = wkd(c, "Q6473564")
OCCUPATION = wkd(c, "Q12737077")
KINSHIP = wkd(c, "Q171318")
KIN = wkd(c, "Q21073947")
PSEUDONYM = wkd(c, "Q61002")
STOCK_CHARACTER = wkd(c, "Q162244")
TITLE = wkd(c, "Q4189293")
DATA_FORMAT = wkd(c, "Q494823")
ELECTROMAGNETIC_WAVE = wkd(c, "Q11386")
POSTAL_CODE = wkd(c, "Q37447")
CLOTHING = wkd(c, "Q11460")
NATIONALITY = wkd(c, "Q231002")
BASEBALL_POSITION = wkd(c, "Q1151733")
AMERICAN_FOOTBALL_POSITIONS = wkd(c, "Q694589")
POSITION_TEAM_SPORTS = wkd(c, "Q1781513")
FILE_FORMAT_FAMILY = wkd(c, "Q26085352")
FILE_FORMAT = wkd(c, "Q235557")
TAXONOMIC_RANK = wkd(c, "Q427626")
ORDER_HONOUR = wkd(c, "Q193622")
BRANCH_OF_SCIENCE = wkd(c, "Q2465832")
RESEARCH = wkd(c, "Q42240")
METHOD = wkd(c, "Q1799072")
ALGORITHM = wkd(c, "Q8366")
PROPOSITION = wkd(c, "Q108163")
SPORTSPERSON = wkd(c, "Q2066131")
LAKES_MINESOTTA = wkd(c, "Q8580663")
NAMED_PASSENGER_TRAIN_INDIA = wkd(c, "Q9260591")
TOWNSHIPS_MISOURI = wkd(c, "Q8861637")
RACE_ETHNICITY_USA = wkd(c, "Q2035701")
RECORD_CHART = wkd(c, "Q373899")
SINGLE_ENGINE_AIRCRAFT = wkd(c, "Q7405339")
SIGNIFICANT_OTHER = wkd(c, "Q841509")
BILLBOARDS = wkd(c, "Q19754079")
RADIO_STATION = wkd(c, "Q19754079")
RADIO_STATION2 = wkd(c, "Q1474493")
NOBLE_TITLE = wkd(c, "Q216353")
HOUSES_NATIONAL_REGISTER_ARKANSAS = wkd(c, "Q8526394")
CLADE = wkd(c, "Q713623")
BOARD_GAMES = wkd(c, "Q131436")
CLAN = wkd(c, "Q211503")
ACCIDENT = wkd(c, "Q171558")
MASSACRE = wkd(c, "Q3199915")
TORNADO = wkd(c, "Q8081")
NATURAL_DISASTER = wkd(c, "Q8065")
SPORTS_TEAM = wkd(c, "Q12973014")
BAND_ROCK_AND_POP = wkd(c, "Q215380")
ORGANIZATION_OTHER = wkd(c, "Q43229")
POLITICAL_PARTY = wkd(c, "Q7278")
SPECIES = wkd(c, "Q7432")
CHEMICAL_SUBSTANCE = wkd(c, "Q79529")
THREATENED_SPECIES = wkd(c, "Q515487")
HYPOTHETICAL_SPECIES = wkd(c, "Q5961273")
CONFLICT = wkd(c, "Q180684")
PRIVATE_USE_AREAS = wkd(c, "Q11152836")
BARONETCIES_IN_UK = wkd(c, "Q8290061")
EXTINCT_BARONETCIES_ENGLAND = wkd(c, "Q8432223")
EXTINCT_BARONETCIES_UK = wkd(c, "Q8432226")
WIKIPEDIA_DISAMBIGUATION = wkd(c, "Q4167410")
WIKIPEDIA_TEMPLATE_NAMESPACE = wkd(c, "Q11266439")
WIKIPEDIA_LIST = wkd(c, "Q13406463")
WIKIPEDIA_PROJECT_PAGE = wkd(c, "Q14204246")
WIKIMEDIA_CATEGORY_PAGE = wkd(c, "Q4167836")
WIKIPEDIA_USER_LANGUAGE_TEMPLATE = wkd(c, "Q19842659")
WIKIDATA_PROPERTY = wkd(c, "Q18616576")
COLLEGIATE_ATHLETICS_PROGRAM = wkd(c, "Q5146583")
SPORTS_TRANSFER_AF = wkd(c, "Q1811518")
DEMOGRAPHICS_OF_NORWAY = wkd(c, "Q7664203")
DOCUMENT = wkd(c, "Q49848")
BASIC_STAT_UNIT_NORWAY = wkd(c, "Q4580177")
PUBLIC_TRANSPORT = wkd(c, "Q178512")
HAZARD = wkd(c, "Q1132455")
BASEBALL_RULES = wkd(c, "Q1153773")
HIT_BASEBALL = wkd(c, "Q713493")
OUT_BASEBALL = wkd(c, "Q1153773")
LAWS_OF_ASSOCIATION_FOOTBALL = wkd(c, "Q7215850")
CRICKET_LAWS_AND_REGULATION = wkd(c, "Q8427034")
MEASUREMENTS_OF_POVERTY = wkd(c, "Q8614855")
PROFESSIONAL_WRESTLING_MATCH_TYPES = wkd(c, "Q679633")
CITATION = wkd(c, "Q1713")
INTERNATIONAL_RELATIONS = wkd(c, "Q166542")
WORLD_VIEW = wkd(c, "Q49447")
ROCK_GEOLOGY = wkd(c, "Q8063")
BASEBALL_STATISTIC = wkd(c, "Q8291081")
BASEBALL_STATISTICS = wkd(c, "Q809898")
TRAIN_ACCIDENT = wkd(c, "Q1078765")
CIRCUS_SKILLS = wkd(c, "Q4990963")
FOLKLORE = wkd(c, "Q36192")
NEWS_BUREAU = wkd(c, "Q19824398")
RECESSION = wkd(c, "Q176494")
NYC_BALLET = wkd(c, "Q1336942")
SPORTS_RECORD = wkd(c, "Q1241356")
WINGSPAN = wkd(c, "Q245097")
WIN_LOSS_RECORD_PITCHING = wkd(c, "Q1202506")
CRICKET_TERMINOLOGY = wkd(c, "Q8427141")
UNION_ARMY = wkd(c, "Q1752901")
POPULATION = wkd(c, "Q33829")
WIND = wkd(c, "Q8094")
TORPEDO_TUBE = wkd(c, "Q1330003")
WEAPONS_PLATFORM = wkd(c, "Q7978115")
COLOR = wkd(c, "Q1075")
SOCIAL_SCIENCE = wkd(c, "Q34749")
DISCIPLINE_ACADEMIA = wkd(c, "Q11862829")
FORMAL_SCIENCE = wkd(c, "Q816264")
ASPHALT = wkd(c, "Q167510")
TALK_RADIO = wkd(c, "Q502319")
ART_MOVEMENT = wkd(c, "Q968159")
IDEOLOGY = wkd(c, "Q7257")
# print([c.get_name(idx) for idx in c.relation(wprop.INSTANCE_OF)[wkd(c, "Q14934048")]])
# print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.INSTANCE_OF)[wkd(c, "Q14934048")]])
# print([c.get_name(idx) for idx in c.relation(wprop.PART_OF)[wkd(c, "Q14934048")]])
# print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.PART_OF)[wkd(c, "Q14934048")]])
# print([c.get_name(idx) for idx in c.relation(wprop.SUBCLASS_OF)[wkd(c, "Q14934048")]])
# print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.SUBCLASS_OF)[wkd(c, "Q14934048")]])
# print([c.get_name(idx) for idx in c.relation(wprop.CATEGORY_LINK)[wkd(c, "Q14934048")]])
# print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.CATEGORY_LINK)[wkd(c, "Q14934048")]])
is_sports_terminology = logical_or(
c.satisfy(TRAVERSIBLE_LO, [OUT_BASEBALL, HIT_BASEBALL]),
c.satisfy(
[wprop.CATEGORY_LINK],
[
BASEBALL_RULES,
LAWS_OF_ASSOCIATION_FOOTBALL,
CRICKET_LAWS_AND_REGULATION,
PROFESSIONAL_WRESTLING_MATCH_TYPES,
CRICKET_TERMINOLOGY
],
max_steps=1
)
)
is_accident = c.satisfy(TRAVERSIBLE_LO, [ACCIDENT])
is_taxon = c.satisfy([wprop.INSTANCE_OF, wprop.IS_A_LIST_OF],
[
TAXON, FOSSIL_TAXON, MONOTYPIC_TAXON, HORSE, RACE_HORSE, CLADE, SPECIES,
THREATENED_SPECIES, HYPOTHETICAL_SPECIES
]
)
is_breed = c.satisfy(TRAVERSIBLE_LO, [BREED])
is_taxon_or_breed = logical_or(is_taxon, is_breed)
is_human = c.satisfy(TRAVERSIBLE_LO, [HUMAN, FICTIONAL_HUMAN])
is_country = c.satisfy(TRAVERSIBLE_LO, [COUNTRY])
is_people = c.satisfy(
TRAVERSIBLE_LO,
[
PEOPLE,
NATIONALITY,
SPORTS_TRANSFER_AF,
POPULATION
]
)
is_populated_place = logical_or(
c.satisfy(TRAVERSIBLE_LO, [POPULATED_PLACE]),
c.satisfy([wprop.CATEGORY_LINK], [TOWNSHIPS_MISOURI], max_steps=1)
)
is_organization = c.satisfy(
TRAVERSIBLE_LO,
[
POLITICAL_PARTY,
COLLEGIATE_ATHLETICS_PROGRAM,
ORGANIZATION_OTHER,
ORGANISATION,
SPORTS_TEAM,
BAND_ROCK_AND_POP,
NEWS_BUREAU,
NYC_BALLET,
UNION_ARMY
]
)
is_position = c.satisfy(
TRAVERSIBLE_LO,
[
POSITION,
OCCUPATION,
POSITION_TEAM_SPORTS,
AMERICAN_FOOTBALL_POSITIONS,
ASSOCIATION_FOOTBALL_POSITION,
BASEBALL_POSITION,
# ICE_HOCKEY_POSITION,
SPORTSPERSON
]
)
is_kinship = c.satisfy(TRAVERSIBLE_LO, [KINSHIP])
is_kin = c.satisfy([wprop.SUBCLASS_OF, wprop.IS_A_LIST_OF], [KIN])
is_title = logical_or(
c.satisfy(TRAVERSIBLE_LO, [TITLE, NOBLE_TITLE]),
c.satisfy([wprop.CATEGORY_LINK], [BARONETCIES_IN_UK, EXTINCT_BARONETCIES_UK, EXTINCT_BARONETCIES_ENGLAND], max_steps=1)
)
is_art_work = c.satisfy(TRAVERSIBLE_LO, [ART_WORK, COMIC])
is_audio_visual_work = c.satisfy(TRAVERSIBLE_LO, [AUDIO_VISUAL_WORK, TV_STATION])
is_fictional_character = c.satisfy(TRAVERSIBLE_LO, [FICTIONAL_CHARACTER])
is_name = c.satisfy(TRAVERSIBLE_LO, [GIVEN_NAME, SURNAME, FEMALE_GIVEN_NAME, MALE_GIVEN_NAME, PSEUDONYM])
is_stock_character = c.satisfy([wprop.INSTANCE_OF, wprop.IS_A_LIST_OF], [STOCK_CHARACTER])
is_family = c.satisfy(TRAVERSIBLE_LO, [FAMILY, CLAN])
is_award = c.satisfy(TRAVERSIBLE_LO, [AWARD])
is_electromagnetic_wave = c.satisfy(TRAVERSIBLE_LO, [ELECTROMAGNETIC_WAVE])
is_geographical_object = c.satisfy(
TRAVERSIBLE_LO,
[
GEOGRAPHICAL_OBJECT,
BODY_OF_WATER,
LOCATION_GEOGRAPHY,
GEOLOGICAL_FORMATION,
NATIONAL_HERITAGE_SITE,
ESTATE,
# PART_OF_LAND,
PRIVATE_USE_AREAS
]
)
is_postal_code = c.satisfy(TRAVERSIBLE_LO, [POSTAL_CODE])
is_person = c.satisfy(TRAVERSIBLE_LO, [PERSON])
is_person_only = logical_or(
logical_negate(
is_person,
[
is_human,
is_people,
is_populated_place,
is_organization,
is_position,
is_title,
is_kinship,
is_kin,
is_country,
is_geographical_object,
is_art_work,
is_audio_visual_work,
is_fictional_character,
is_name,
is_family,
is_award
]
), is_stock_character)
is_male = c.satisfy([wprop.SEX_OR_GENDER], [MALE])
is_female = c.satisfy([wprop.SEX_OR_GENDER], [FEMALE])
is_human_male = logical_and(is_human, is_male)
is_human_female = logical_and(is_human, is_female)
is_musical_work = c.satisfy(TRAVERSIBLE_LO, [MUSICAL_WORK, DISCOGRAPHY])
is_song = c.satisfy(TRAVERSIBLE_LO, [SONG, SINGLE])
is_radio_program = c.satisfy(
TRAVERSIBLE_LO,
[
RADIO_PROGRAM,
RADIO_STATION,
RADIO_STATION2,
TALK_RADIO
]
)
is_sexual_orientation = c.satisfy(TRAVERSIBLE_LO, [SEXUAL_ORIENTATION])
is_taxonomic_rank = c.satisfy([wprop.INSTANCE_OF], [TAXONOMIC_RANK])
is_order = c.satisfy(TRAVERSIBLE_LO, [ORDER_HONOUR])
is_train_station = c.satisfy(TRAVERSIBLE_LO, [TRAIN_STATION])
is_bridge = c.satisfy(TRAVERSIBLE_LO, [BRIDGE])
is_airport = c.satisfy(TRAVERSIBLE_LO, [AIRPORT])
is_sex_toy = c.satisfy(TRAVERSIBLE_LO, [SEX_TOY])
is_monument = c.satisfy(TRAVERSIBLE_LO, [MONUMENT])
is_physical_object = c.satisfy(
TRAVERSIBLE_LO,
[
PHYSICAL_OBJECT,
BOARD_GAMES,
ELECTRONIC_GAME,
GAME,
ROCK_GEOLOGY,
ASPHALT
]
)
is_clothing = c.satisfy(TRAVERSIBLE_LO, [CLOTHING])
is_mathematical_object = c.satisfy(TRAVERSIBLE_LO, [MATHEMATICAL_OBJECT])
is_physical_quantity = logical_or(
c.satisfy(
TRAVERSIBLE_LO,
[
PHYSICAL_QUANTITY,
BASIC_STAT_UNIT_NORWAY,
SPORTS_RECORD,
WINGSPAN,
WIN_LOSS_RECORD_PITCHING,
BASEBALL_STATISTICS
]
),
c.satisfy(
[wprop.CATEGORY_LINK],
[
DEMOGRAPHICS_OF_NORWAY,
MEASUREMENTS_OF_POVERTY,
BASEBALL_STATISTIC
],
max_steps=1
)
)
is_number = c.satisfy(TRAVERSIBLE_LO, [NUMBER])
is_astronomical_object = c.satisfy(
TRAVERSIBLE_LO,
[
ASTEROID,
COMET,
PLANET,
GALAXY,
ASTRONOMICAL_OBJECT,
FICTIONAL_ASTRONOMICAL_OBJECT
]
)
is_hazard = c.satisfy(TRAVERSIBLE_LO, [HAZARD, TRAIN_ACCIDENT])
is_date = c.satisfy(TRAVERSIBLE_LO, [TIME_INTERVAL])
is_algorithm = c.satisfy(TRAVERSIBLE_LO, [ALGORITHM])
is_value = c.satisfy(TRAVERSIBLE_LO, [VALUE])
is_currency = c.satisfy(TRAVERSIBLE_LO, [CURRENCY, MONEY])
is_event = c.satisfy(TRAVERSIBLE_LO, [EVENT, RECESSION])
is_election = c.satisfy(TRAVERSIBLE_LO, [ELECTION])
is_legislative_term = c.satisfy(TRAVERSIBLE_LO, [LEGISLATIVE_TERM])
is_activity = c.satisfy([wprop.INSTANCE_OF, wprop.IS_A_LIST_OF], [ACTIVITY, MUSIC_DOWNLOAD, CIRCUS_SKILLS])
is_activity_subclass = c.satisfy([wprop.SUBCLASS_OF], [ACTIVITY, MUSIC_DOWNLOAD, CIRCUS_SKILLS])
is_food = c.satisfy([wprop.INSTANCE_OF, wprop.PART_OF, wprop.SUBCLASS_OF], [FOOD, DRINK])
is_wikidata_prop = c.satisfy(TRAVERSIBLE_LO, [WIKIDATA_PROPERTY])
is_wikipedia_disambiguation = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_DISAMBIGUATION])
is_wikipedia_template_namespace = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_TEMPLATE_NAMESPACE])
is_wikipedia_list = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_LIST])
is_wikipedia_project_page = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_PROJECT_PAGE])
is_wikipedia_user_language_template = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_USER_LANGUAGE_TEMPLATE])
is_wikimedia_category_page = c.satisfy([wprop.INSTANCE_OF], [WIKIMEDIA_CATEGORY_PAGE])
is_legal_case = c.satisfy(TRAVERSIBLE_LO, [LEGAL_CASE])
is_sport = c.satisfy(TRAVERSIBLE_LO, [SPORT])
is_data_format = c.satisfy(TRAVERSIBLE_LO, [DATA_FORMAT, FILE_FORMAT_FAMILY, FILE_FORMAT])
is_research_method = c.satisfy(TRAVERSIBLE_LO, [RESEARCH, METHOD, RACE_ETHNICITY_USA])
is_proposition = c.satisfy(TRAVERSIBLE_LO, [PROPOSITION])
is_record_chart = c.satisfy(TRAVERSIBLE_LO, [RECORD_CHART, BILLBOARDS])
is_international_relations = c.satisfy(TRAVERSIBLE_LO, [INTERNATIONAL_RELATIONS])
is_union = c.satisfy(TRAVERSIBLE_LO, [SIGNIFICANT_OTHER])
is_recurring_sporting_event = c.satisfy(
TRAVERSIBLE_LO,
[RECURRING_SPORTING_EVENT]
)
is_sport_event = logical_or(
logical_and(
is_sport,
c.satisfy([wprop.PART_OF, wprop.IS_A_LIST_OF], where(is_recurring_sporting_event)[0])
),
c.satisfy(TRAVERSIBLE_LO, [SPORTING_EVENT, COMPETITION])
)
is_genre = c.satisfy(TRAVERSIBLE_LO, [ART_GENRE, ART_MOVEMENT])
is_landform = c.satisfy(TRAVERSIBLE_LO, [LANDFORM])
is_language = c.satisfy(TRAVERSIBLE_LO, [LANGUAGE])
is_alphabet = c.satisfy(TRAVERSIBLE_LO, [ALPHABET])
is_railroad = logical_or(
c.satisfy(TRAVERSIBLE_LO, [RAILROAD]),
c.satisfy([wprop.CATEGORY_LINK], [NAMED_PASSENGER_TRAIN_INDIA], max_steps=1)
)
is_speech = c.satisfy(TRAVERSIBLE_LO, [SPEECH])
is_language_only = logical_negate(is_language, [is_speech])
is_alphabet_only = logical_negate(is_alphabet, [is_speech, is_language])
is_war = c.satisfy(TRAVERSIBLE_LO, [WAR])
is_battle = c.satisfy(TRAVERSIBLE_LO, [BATTLE, BLOCKADE, MILITARY_OFFENSIVE, CONFLICT, MASSACRE])
is_crime = c.satisfy(TRAVERSIBLE_LO, [CRIME])
is_gas = c.satisfy(TRAVERSIBLE_LO, [GAS])
is_chemical_compound = c.satisfy(TRAVERSIBLE_LO, [CHEMICAL_COMPOUND, DRUG, CHEMICAL_SUBSTANCE])
is_chemical_compound_only = logical_negate(is_chemical_compound, [is_food])
is_gas_only = logical_negate(is_gas, [is_chemical_compound])
is_geometric_shape = c.satisfy(TRAVERSIBLE_LO, [GEOMETRIC_SHAPE])
is_award_ceremony = c.satisfy(TRAVERSIBLE_LO, [AWARD_CEREMONY])
is_strategy = c.satisfy(TRAVERSIBLE_LO, [CHESS_OPENING])
is_gene = c.satisfy(TRAVERSIBLE_LO, [GENE, CHROMOSOME])
is_character = c.satisfy(TRAVERSIBLE_LO, [CHARACTER])
is_law = c.satisfy(TRAVERSIBLE_LO, [LAW])
is_legal_action = c.satisfy(TRAVERSIBLE_LO, [LEGAL_ACTION])
is_facility = logical_or(
c.satisfy(TRAVERSIBLE_LO, [FACILITY]),
c.satisfy([wprop.CATEGORY_LINK], [HOUSES_NATIONAL_REGISTER_ARKANSAS], max_steps=1)
)
is_molecule = c.satisfy(TRAVERSIBLE_LO, [MOLECULE, PROTEIN_FAMILY, PROTEIN_DOMAIN, MULTIPROTEIN_COMPLEX])
is_disease = c.satisfy(TRAVERSIBLE_LO, [DISEASE])
is_mind = c.satisfy(TRAVERSIBLE_LO, [MIND])
is_religion = c.satisfy(TRAVERSIBLE_LO, [RELIGION])
is_natural_phenomenon = c.satisfy(TRAVERSIBLE_LO, [NATURAL_PHENOMENON, NATURAL_DISASTER, WIND])
is_anatomical_structure = c.satisfy(TRAVERSIBLE_LO, [ANATOMICAL_STRUCTURE])
is_plant = c.satisfy(TRAVERSIBLE_LO + [wprop.PARENT_TAXON], [PLANT_STRUCTURE, PLANT])
is_region = c.satisfy(TRAVERSIBLE_LO, [REGION])
is_software = logical_or(
c.satisfy(TRAVERSIBLE_LO, [SOFTWARE]),
c.satisfy([wprop.CATEGORY_LINK], [VIDEO_GAME_FRANCHISES], max_steps=1)
)
is_website = c.satisfy(TRAVERSIBLE_LO, [WEBSITE])
is_river = logical_and(c.satisfy(TRAVERSIBLE_LO, [WATERCOURSE]), is_geographical_object)
is_lake = logical_or(
logical_and(c.satisfy(TRAVERSIBLE_LO, [LAKE]), is_geographical_object),
c.satisfy([wprop.CATEGORY_LINK], [LAKES_MINESOTTA], max_steps=1)
)
is_sea = logical_and(c.satisfy(TRAVERSIBLE_LO, [SEA]), is_geographical_object)
is_volcano = logical_and(c.satisfy(TRAVERSIBLE_LO, [VOLCANO]), is_geographical_object)
is_development_biology = c.satisfy([wprop.PART_OF, wprop.SUBCLASS_OF, wprop.INSTANCE_OF], [DEVELOPMENT_BIOLOGY, BIOLOGY])
is_unit_of_mass = c.satisfy(TRAVERSIBLE_LO, [UNIT_OF_MASS])
is_vehicle = c.satisfy(TRAVERSIBLE_LO, [VEHICLE, MODE_OF_TRANSPORT, PUBLIC_TRANSPORT])
is_watercraft = c.satisfy(TRAVERSIBLE_LO, [WATERCRAFT])
is_aircraft = logical_or(
c.satisfy(TRAVERSIBLE_LO, [AIRCRAFT]),
c.satisfy([wprop.CATEGORY_LINK], [SINGLE_ENGINE_AIRCRAFT], max_steps=1)
)
is_road_vehicle = c.satisfy(
TRAVERSIBLE_LO,
[
ROAD_VEHICLE,
TANK,
FIRE_ENGINE,
AMBULANCE,
AUTOMOBILE_MODEL,
MOTORCYCLE_MODEL
]
)
is_weapon = c.satisfy(TRAVERSIBLE_LO, [WEAPON, TORPEDO_TUBE, WEAPONS_PLATFORM])
is_book_magazine_article_proverb = c.satisfy(
TRAVERSIBLE_LO,
[
PUBLICATION,
ARTICLE,
RELIGIOUS_TEXT,
PROVERB,
DOCUMENT,
CITATION,
FOLKLORE
]
)
is_brand = c.satisfy(TRAVERSIBLE_LO, [BRAND])
is_concept = logical_or(
c.satisfy([wprop.INSTANCE_OF],
[TERM, ACADEMIC_DISCIPLINE, SPECIAL_FIELD, BRANCH_OF_SCIENCE, WORLD_VIEW]
),
c.satisfy([wprop.SUBCLASS_OF], [SOCIAL_SCIENCE, DISCIPLINE_ACADEMIA, FORMAL_SCIENCE, IDEOLOGY])
)
is_color = c.satisfy(TRAVERSIBLE_LO, [COLOR])
is_paradigm = c.satisfy(TRAVERSIBLE_LO, [PARADIGM])
is_vehicle_brand = logical_or(
logical_and(c.satisfy([wprop.PRODUCT_OR_MATERIAL_PRODUCED], [AUTOMOBILE, TRUCK]), is_brand),
c.satisfy(TRAVERSIBLE_LO, [AUTOMOBILE_MANUFACTURER])
)
is_mountain_massif = logical_and(c.satisfy(TRAVERSIBLE_LO, [MOUNTAIN, MASSIF]), is_geographical_object)
is_mountain_only = logical_negate(
is_mountain_massif,
[
is_volcano
]
)
is_physical_object_only = logical_negate(
is_physical_object,
[
is_audio_visual_work,
is_art_work,
is_musical_work,
is_geographical_object,
is_currency,
is_gas,
is_clothing,
is_chemical_compound,
is_electromagnetic_wave,
is_song,
is_food,
is_character,
is_law,
is_software,
is_website,
is_vehicle,
is_lake,
is_landform,
is_railroad,
is_airport,
is_aircraft,
is_watercraft,
is_sex_toy,
is_data_format,
is_date,
is_research_method,
is_sport,
is_watercraft,
is_aircraft,
is_brand,
is_vehicle_brand,
is_road_vehicle,
is_railroad,
is_radio_program,
is_weapon,
is_book_magazine_article_proverb,
is_brand,
is_organization,
is_facility,
is_anatomical_structure,
is_gene,
is_monument
]
)
is_musical_work_only = logical_negate(
is_musical_work,
[
is_song
]
)
is_geographical_object_only = logical_negate(
is_geographical_object,
[
is_river,
is_lake,
is_sea,
is_volcano,
is_mountain_only,
is_region,
is_monument,
is_country,
is_facility,
is_food,
is_airport,
is_bridge,
is_train_station
]
)
is_event_election_only = logical_negate(
logical_ors([is_event, is_election, is_accident]),
[
is_award_ceremony,
is_war,
is_natural_phenomenon
]
)
is_region_only = logical_negate(
is_region,
[
is_populated_place,
is_country,
is_lake,
is_river,
is_sea,
is_volcano,
is_mountain_only
]
)
is_astronomical_object_only = logical_negate(
is_astronomical_object,
[
is_geographical_object
]
)
is_date_only = logical_negate(
is_date,
[
is_strategy,
is_development_biology
]
)
is_development_biology_date = logical_and(is_development_biology, is_date)
is_value_only = logical_negate(
is_value,
[
is_unit_of_mass,
is_event,
is_election,
is_currency,
is_number,
is_physical_quantity,
is_award,
is_date,
is_postal_code
]
)
is_activity_subclass_only = logical_negate(
logical_or(is_activity_subclass, is_activity),
[
is_crime,
is_war,
is_chemical_compound,
is_gene,
is_molecule,
is_mathematical_object,
is_sport,
is_sport_event,
is_event,
is_paradigm,
is_position,
is_title,
is_algorithm,
is_order,
is_organization,
is_research_method,
is_proposition,
is_taxonomic_rank,
is_algorithm,
is_event,
is_election,
is_genre,
is_concept
]
)
is_crime_only = logical_negate(
is_crime,
[
is_war
]
)
is_number_only = logical_negate(
is_number,
[
is_physical_quantity
]
)
is_molecule_only = logical_negate(
is_molecule,
[
is_gene,
is_chemical_compound
]
)
# VEHICLES:
is_vehicle_only = logical_negate(
is_vehicle,
[
is_watercraft,
is_aircraft,
is_road_vehicle
]
)
is_watercraft_only = logical_negate(
is_watercraft,
[
is_aircraft
]
)
is_road_vehicle_only = logical_negate(
is_road_vehicle,
[
is_aircraft,
is_watercraft,
]
)
# remove groups that have occupations from mathematical objects:
is_object_with_occupation = c.satisfy([wprop.INSTANCE_OF, wprop.OCCUPATION], [OCCUPATION, PROFESSION, POSITION])
is_mathematical_object_only = logical_negate(
is_mathematical_object,
[
is_geometric_shape,
is_physical_quantity,
is_number,
is_object_with_occupation,
is_landform
]
)
is_organization_only = logical_negate(
is_organization,
[
is_country,
is_geographical_object,
is_family,
is_people
]
)
is_art_work_only = logical_negate(
is_art_work,
[
is_musical_work,
is_audio_visual_work,
is_sex_toy,
is_monument
]
)
is_software_only = logical_negate(
is_software,
[
is_language,
is_organization,
is_website
]
)
is_website_only = logical_negate(
is_website,
[
is_organization,
is_language
]
)
is_taxon_or_breed_only = logical_negate(
is_taxon_or_breed,
[
is_human,
is_plant
]
)
is_human_only = logical_negate(
is_human,
[
is_male,
is_female,
is_kin,
is_kinship,
is_title
]
)
is_weapon_only = logical_negate(
is_weapon,
[
is_software,
is_website,
is_vehicle
]
)
is_book_magazine_article_proverb_only = logical_negate(
is_book_magazine_article_proverb,
[
is_software,
is_website,
is_musical_work,
is_song,
is_law,
is_legal_action
]
)
is_fictional_character_only = logical_negate(
is_fictional_character,
[
is_human,
is_stock_character
]
)
is_battle_only = logical_negate(
is_battle,
[
is_war,
is_crime
]
)
is_brand_only = logical_negate(
is_brand,
[
is_vehicle,
is_aircraft,
is_watercraft,
is_website,
is_software,
is_vehicle_brand
]
)
is_vehicle_brand_only = logical_negate(
is_vehicle_brand,
[
is_vehicle,
is_aircraft,
is_watercraft,
is_website,
is_software
]
)
is_concept_paradigm_proposition_only = logical_negate(
logical_ors([is_concept, is_paradigm, is_proposition]),
[
is_physical_object,
is_physical_quantity,
is_software,
is_website,
is_color,
is_vehicle,
is_electromagnetic_wave,
is_brand,
is_vehicle_brand,
is_currency,
is_fictional_character,
is_human,
is_aircraft,
is_geographical_object,
is_geometric_shape,
is_mathematical_object,
is_musical_work,
is_mountain_massif,
is_lake,
is_landform,
is_language,
is_anatomical_structure,
is_book_magazine_article_proverb,
is_development_biology,
is_plant,
is_sexual_orientation,
is_genre,
is_legislative_term
]
)
is_anatomical_structure_only = logical_negate(
is_anatomical_structure,
[
is_plant
]
)
is_facility_only = logical_negate(
is_facility,
[
is_train_station,
is_aircraft,
is_airport,
is_bridge,
is_vehicle,
is_astronomical_object,
is_railroad,
is_monument
]
)
is_wikipedia_list_only = logical_negate(
is_wikipedia_list,
[
is_activity_subclass,
is_alphabet,
is_art_work,
is_astronomical_object,
is_audio_visual_work,
is_award,
is_character,
is_character,
is_chemical_compound,
is_color,
is_currency,
is_disease,
is_election,
is_electromagnetic_wave,
is_facility,
is_fictional_character,
is_gene,
is_genre,
is_geographical_object,
is_human,
is_language,
is_law,
is_law,
is_legal_action,
is_legal_case,
is_legislative_term,
is_mathematical_object,
is_mind,
is_people,
is_person,
is_person,
is_physical_object,
is_populated_place,
is_position,
is_region,
is_religion,
is_research_method,
is_sexual_orientation,
is_software,
is_speech,
is_sport,
is_sport_event,
is_stock_character,
is_strategy,
is_taxon_or_breed,
is_value,
is_vehicle,
is_wikidata_prop,
is_weapon
]
)
is_sport_only = logical_negate(
is_sport,
[
is_sport_event
]
)
is_legal_action_only = logical_negate(
is_legal_action,
[
is_law,
is_election
]
)
is_genre_only = logical_negate(
is_genre,
[
is_physical_object,
is_audio_visual_work,
is_art_work,
is_book_magazine_article_proverb,
is_concept
]
)
is_plant_only = logical_negate(
is_plant,
[
is_food,
is_human,
is_organization
]
)
is_kinship_kin_only = logical_negate(
logical_or(is_kinship, is_kin),
[
is_family
]
)
is_position_only = logical_negate(
is_position,
[
is_organization,
is_human
]
)
is_radio_program_only = logical_negate(
is_radio_program,
[
is_audio_visual_work,
]
)
is_taxonomic_rank_only = logical_negate(
is_taxonomic_rank,
[
is_order
]
)
is_research_method_only = logical_negate(
is_research_method,
[
is_audio_visual_work,
is_book_magazine_article_proverb,
is_art_work,
is_concept,
is_crime,
is_war,
is_algorithm,
is_law,
is_legal_action,
is_legal_case
]
)
is_algorithm_only = logical_negate(
is_algorithm,
[
is_concept,
is_paradigm
]
)
is_union_only = logical_negate(
is_union,
[
is_kinship,
is_human,
is_person
]
)
# get all the wikidata items that are disconnected:
no_instance_subclass_or_cat_link = logical_ands(
[
c.relation(relation_name).edges() == 0
for relation_name in [wprop.PART_OF, wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.CATEGORY_LINK]
]
)
is_sports_terminology_only = logical_negate(
is_sports_terminology,
[
is_organization,
is_human,
is_person,
is_activity,
is_title,
is_physical_quantity
]
)
out = {
"aaa_wikidata_prop": is_wikidata_prop,
"aaa_wikipedia_disambiguation": is_wikipedia_disambiguation,
"aaa_wikipedia_template_namespace": is_wikipedia_template_namespace,
"aaa_wikipedia_user_language_template": is_wikipedia_user_language_template,
"aaa_wikipedia_list": is_wikipedia_list_only,
"aaa_wikipedia_project_page": is_wikipedia_project_page,
"aaa_wikimedia_category_page": is_wikimedia_category_page,
"aaa_no_instance_subclass_or_link": no_instance_subclass_or_cat_link,
"taxon": is_taxon_or_breed_only,
"human_male": is_human_male,
"human_female": is_human_female,
"human": is_human_only,
"fictional_character": is_fictional_character_only,
"people": is_people,
"language": is_language_only,
"alphabet": is_alphabet_only,
"speech": is_speech,
"gas": is_gas_only,
"gene": is_gene,
"molecule": is_molecule_only,
"astronomical_object": is_astronomical_object_only,
"disease": is_disease,
"mind": is_mind,
"song": is_song,
"radio_program": is_radio_program_only,
"law": is_law,
"legal_action": is_legal_action_only,
"book_magazine_article": is_book_magazine_article_proverb_only,
"chemical_compound": is_chemical_compound_only,
"geometric_shape": is_geometric_shape,
"mathematical_object": is_mathematical_object_only,
"physical_quantity": is_physical_quantity,
"number": is_number_only,
"geographical_object": is_geographical_object_only,
"train_station": is_train_station,
"railroad": is_railroad,
"concept": is_concept_paradigm_proposition_only,
"genre": is_genre_only,
"sexual_orientation": is_sexual_orientation,
"bridge": is_bridge,
"airport": is_airport,
"river": is_river,
"lake": is_lake,
"sea": is_sea,
"weapon": is_weapon_only,
"region": is_region_only,
"country": is_country,
"software": is_software_only,
"website": is_website_only,
"volcano": is_volcano,
"mountain": is_mountain_only,
"religion": is_religion,
"organization": is_organization_only,
"musical_work": is_musical_work_only,
"other_art_work": is_art_work_only,
"audio_visual_work": is_audio_visual_work,
"physical_object": is_physical_object_only,
"record_chart": is_record_chart,
"clothing": is_clothing,
"plant": is_plant_only,
"anatomical_structure": is_anatomical_structure_only,
"facility": is_facility_only,
"monument": is_monument,
"vehicle": is_vehicle_only,
"watercraft": is_watercraft_only,
"road_vehicle": is_road_vehicle_only,
"vehicle_brand": is_vehicle_brand_only,
"brand": is_brand_only,
"aircraft": is_aircraft,
"legal_case": is_legal_case,
"position": is_position_only,
"person_role": is_person_only,
"populated_place": is_populated_place,
"value": is_value_only,
"unit_of_mass": is_unit_of_mass,
"currency": is_currency,
"postal_code": is_postal_code,
"name": is_name,
"data_format": is_data_format,
"character": is_character,
"family": is_family,
"sport": is_sport_only,
"taxonomic_rank": is_taxonomic_rank,
"sex_toy": is_sex_toy,
"legislative_term": is_legislative_term,
"sport_event": is_sport_event,
"date": is_date_only,
"kinship": is_kinship_kin_only,
"union": is_union_only,
"research": is_research_method_only,
"title": is_title,
"hazard": is_hazard,
"color": is_color,
"sports_terminology": is_sports_terminology_only,
"developmental_biology_period": is_development_biology_date,
"strategy": is_strategy,
"event": is_event_election_only,
"natural_phenomenon": is_natural_phenomenon,
"electromagnetic_wave": is_electromagnetic_wave,
"war": is_war,
"award": is_award,
"crime": is_crime_only,
"battle": is_battle_only,
"international_relations": is_international_relations,
"food": is_food,
"algorithm": is_algorithm,
"activity": is_activity_subclass_only,
"award_ceremony": is_award_ceremony
}
# is_other = logical_not(logical_ors([val for key, val in out.items() if key != "aaa_wikipedia_list"]))
# c.class_report([wprop.IS_A_LIST_OF, wprop.CATEGORY_LINK], logical_and(
# is_other,
# is_wikipedia_list_only
# ), name="remaining lists")
return out
|
import os
import argparse
import numpy as np
import gym
from gym.envs.atari.atari_env import ACTION_MEANING
import pygame
from atari_demo.wrappers import AtariDemo
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--game', type=str, default='MontezumaRevenge')
parser.add_argument('-f', '--frame_rate', type=int, default=60)
parser.add_argument('-y', '--screen_height', type=int, default=840)
parser.add_argument('-d', '--save_dir', type=str, default=None)
parser.add_argument('-s', '--frame_skip', type=int, default=4)
args = parser.parse_args()
if args.save_dir is None:
save_dir = os.path.join(os.getcwd(), 'demos')
else:
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
demo_file_name = os.path.join(save_dir, args.game + '.demo')
# //////// set up gym + atari part /////////
ACTION_KEYS = {
"NOOP" : set(),
"FIRE" : {'space'},
"UP" : {'up'},
"RIGHT": {'right'},
"LEFT" : {'left'},
"DOWN" : {'down'},
"UPRIGHT" : {'up', 'right'},
"UPLEFT" : {'up', 'left'},
"DOWNRIGHT" : {'down', 'right'},
"DOWNLEFT" : {'down', 'left'},
"UPFIRE" : {'up', 'space'},
"RIGHTFIRE" : {'right', 'space'},
"LEFTFIRE" : {'left', 'space'},
"DOWNFIRE" : {'down', 'space'},
"UPRIGHTFIRE" : {'up', 'right', 'space'},
"UPLEFTFIRE" : {'up', 'left', 'space'},
"DOWNRIGHTFIRE" : {'down', 'right', 'space'},
"DOWNLEFTFIRE" : {'down', 'left', 'space'},
"TIMETRAVEL": {'b'}
}
env = AtariDemo(gym.make(args.game + 'NoFrameskip-v4'))
available_actions = [ACTION_MEANING[i] for i in env.unwrapped._action_set] + ["TIMETRAVEL"]
env.reset()
loaded_previous = False
if os.path.exists(demo_file_name):
env.load_from_file(demo_file_name)
loaded_previous = True
def get_gym_action(key_presses):
action = 0
for i,action_name in enumerate(available_actions):
if ACTION_KEYS[action_name].issubset(key_presses):
action = i
return action
# ///////// set up pygame part //////////
pygame.init()
screen_size = (int((args.screen_height/210)*160),args.screen_height)
screen = pygame.display.set_mode(screen_size)
small_screen = pygame.transform.scale(screen.copy(), (160,210))
clock = pygame.time.Clock()
pygame.display.set_caption("Recording demonstration for " + args.game)
def show_text(text_lines):
screen.fill((255, 255, 255))
f1 = pygame.font.SysFont("", 30)
for i, line in enumerate(text_lines):
text = f1.render(line, True, (0, 0, 0))
screen.blit(text, (50, 100 + 50 * i))
pygame.display.flip()
def show_start_screen():
text_lines = ["Recording demo for " + args.game,
"Control the game using the arrow keys and space bar",
"Hold <b> to go backward in time to fix mistakes",
"Press <s> to save the demo and exit",
"Press <SPACE BAR> to get started"]
if loaded_previous:
text_lines = text_lines[:1] + ["Continuing from previously recorded demo"] + text_lines[1:]
show_text(text_lines)
started = False
while not started:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
key_name = pygame.key.name(event.key)
if key_name == 'space':
started = True
clock.tick(args.frame_rate)
def show_end_screen():
text_lines = ["GAME OVER",
"Hold <b> to go backward in time",
"Press <s> to save the demo and exit"]
show_text(text_lines)
def show_game_screen(observation):
pygame.surfarray.blit_array(small_screen, np.transpose(observation,[1,0,2]))
pygame.transform.scale(small_screen, screen_size, screen)
pygame.display.flip()
key_is_pressed = set()
def process_key_presses():
key_presses = set()
quit = False
save = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit = True
elif event.type == pygame.KEYDOWN:
key_name = pygame.key.name(event.key)
key_presses.add(key_name)
key_is_pressed.add(key_name)
elif event.type == pygame.KEYUP:
key_name = pygame.key.name(event.key)
if key_name in key_is_pressed:
key_is_pressed.remove(key_name)
if key_name == 's':
save = True
key_presses.update(key_is_pressed)
return key_presses, quit, save
# //////// run the game and record the demo! /////////
quit = False
done = False
show_start_screen()
while not quit:
# process key presses & save when requested
key_presses, quit, save = process_key_presses()
if save:
env.save_to_file(demo_file_name)
quit = True
# advance gym env
action = get_gym_action(key_presses)
for step in range(args.frame_skip):
observation, reward, done, info = env.step(action)
# show screen
if done:
show_end_screen()
else:
show_game_screen(observation)
clock.tick(float(args.frame_rate)/args.frame_skip)
|
import numpy as np
from multiprocessing import Process, Pipe
import gym
from baselines.common.vec_env.subproc_vec_env import CloudpickleWrapper
class ClonedEnv(gym.Wrapper):
def __init__(self, env, possible_actions_dict, best_action_dict, seed):
gym.Wrapper.__init__(self, env)
self.possible_actions_dict = possible_actions_dict
self.best_action_dict = best_action_dict
self.state = None
self.rng = np.random.RandomState(seed)
self.just_initialized = True
self.l = 0
self.r = 0
def step(self, action=None):
if self.state in self.possible_actions_dict:
possible_actions = list(self.possible_actions_dict[self.state])
action = possible_actions[self.rng.randint(len(possible_actions))]
obs, reward, done, info = self.env.step(action)
self.l += 1
self.r += reward
self.state = self.env.unwrapped._get_ram().tostring()
if self.state in self.possible_actions_dict: # still in known territory
info['possible_actions'] = self.possible_actions_dict[self.state]
if self.state in self.best_action_dict:
info['best_action'] = self.best_action_dict[self.state]
else:
done = True
past_l = self.l
past_r = self.r
self.l = 0
self.r = 0
if past_l > 0:
info['episode'] = {'r': past_r, 'l': past_l}
else:
raise Exception('stepping cloned env without resetting')
return obs, reward, done, info
def reset(self):
obs = self.env.reset()
if isinstance(obs, tuple):
obs,info = obs
else:
info = {}
self.state = self.env.unwrapped._get_ram().tostring()
if self.state in self.best_action_dict:
info['best_action'] = self.best_action_dict[self.state]
for randop in range(self.rng.randint(30)): # randomize starting point
obs, reward, done, info = self._step(None)
if self.just_initialized:
self.just_initialized = False
for randops in range(self.rng.randint(50000)): # randomize starting point further
obs, reward, done, info = self._step(None)
if done:
obs, info = self._reset()
return obs, info
def get_best_actions_from_infos(infos):
k = len(infos)
best_actions = [0] * k
action_masks = [1] * k
for i in range(k):
if 'best_action' in infos[i]:
best_actions[i] = infos[i]['best_action']
action_masks[i] = 0
return best_actions, action_masks
def get_available_actions_from_infos(infos, n_actions):
k = len(infos)
best_actions = np.zeros((k,n_actions), dtype=np.uint8)
action_masks = [1] * k
for i in range(k):
if 'possible_actions' in infos[i]:
action_masks[i] = 0
for j in infos[i]['possible_actions']:
best_actions[i,j] = 1
return best_actions, action_masks
def worker2(nr, remote, env_fn_wrapper, mode):
env = env_fn_wrapper.x()
while True:
cmd,count = remote.recv()
if cmd == 'step':
obs = []
rews = []
dones = []
infos = []
for step in range(count):
ob, reward, done, info = env.step(0) # action is ignored in ClonedEnv downstream
if done:
ob = env.reset()
if isinstance(ob, tuple):
ob, new_info = ob
info.update(new_info)
if 'episode' in info:
epinfo = info['episode']
print('simulator thread %d completed demo run with total return %d obtained in %d steps' % (nr, epinfo["r"], epinfo["l"]))
obs.append(ob)
rews.append(reward)
dones.append(done)
infos.append(info)
if mode == 'best':
best_actions, action_masks = get_best_actions_from_infos(infos)
else:
best_actions, action_masks = get_available_actions_from_infos(infos, env.action_space.n)
remote.send((obs, rews, dones, best_actions, action_masks))
elif cmd == 'reset':
ob = env.reset()
if isinstance(ob, tuple):
ob,info = ob
else:
info = {}
remote.send((ob,info))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_space, env.observation_space))
else:
raise NotImplementedError(str(cmd) + ' action not implemented in worker')
class ClonedVecEnv(object):
def __init__(self, env_fns, mode='best'):
self.nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(self.nenvs)])
self.ps = [Process(target=worker2, args=(nr, work_remote, CloudpickleWrapper(env_fn), mode))
for (nr, work_remote, env_fn) in zip(range(self.nenvs), self.work_remotes, env_fns)]
for p in self.ps:
p.start()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
self.steps_taken = 0
def step(self, time_steps=128):
for remote in self.remotes:
remote.send(('step', time_steps))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, best_actions, action_masks = [np.stack(x) for x in zip(*results)]
return obs, rews, dones, best_actions, action_masks
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, infos = zip(*results)
best_actions, action_masks = [np.stack(x) for x in get_best_actions_from_infos(infos)]
return np.stack(obs), best_actions, action_masks
def close(self):
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def make_cloned_vec_env(nenvs, env_id, possible_actions_dict, best_action_dict, wrappers, mode='best'):
def make_env(rank):
def env_fn():
env = gym.make(env_id)
env = ClonedEnv(env, possible_actions_dict, best_action_dict, rank)
env = wrappers(env)
return env
return env_fn
return ClonedVecEnv([make_env(i) for i in range(nenvs)], mode)
|
import pickle
import sys
import os
def save_as_pickled_object(obj, filepath):
"""
This is a defensive way to write pickle.write, allowing for very large files on all platforms
"""
max_bytes = 2**31 - 1
bytes_out = pickle.dumps(obj)
n_bytes = sys.getsizeof(bytes_out)
with open(filepath, 'wb') as f_out:
for idx in range(0, n_bytes, max_bytes):
f_out.write(bytes_out[idx:idx+max_bytes])
def load_as_pickled_object(filepath):
"""
This is a defensive way to write pickle.load, allowing for very large files on all platforms
"""
max_bytes = 2**31 - 1
try:
input_size = os.path.getsize(filepath)
bytes_in = bytearray(0)
with open(filepath, 'rb') as f_in:
for _ in range(0, input_size, max_bytes):
bytes_in += f_in.read(max_bytes)
obj = pickle.loads(bytes_in)
except:
return None
return obj
|
import pickle
import gym
from gym import spaces
class AtariDemo(gym.Wrapper):
"""
Records actions taken, creates checkpoints, allows time travel, restoring and saving of states
"""
def __init__(self, env, disable_time_travel=False):
super(AtariDemo, self).__init__(env)
self.action_space = spaces.Discrete(len(env.unwrapped._action_set)+1) # add "time travel" action
self.save_every_k = 100
self.max_time_travel_steps = 10000
self.disable_time_travel = disable_time_travel
def step(self, action):
if action >= len(self.env.unwrapped._action_set):
if self.disable_time_travel:
obs, reward, done, info = self.env.step(0)
else:
obs, reward, done, info = self.time_travel()
else:
if self.steps_in_the_past > 0:
self.restore_past_state()
if len(self.done)>0 and self.done[-1]:
obs = self.obs[-1]
reward = 0
done = True
info = None
else:
self.lives.append(self.env.unwrapped.ale.lives())
obs, reward, done, info = self.env.step(action)
self.actions.append(action)
self.obs.append(obs)
self.rewards.append(reward)
self.done.append(done)
self.info.append(info)
# periodic checkpoint saving
if not done:
if (len(self.checkpoint_action_nr)>0 and len(self.actions) >= self.checkpoint_action_nr[-1] + self.save_every_k) \
or (len(self.checkpoint_action_nr)==0 and len(self.actions) >= self.save_every_k):
self.save_checkpoint()
return obs, reward, done, info
def reset(self):
obs = self.env.reset()
self.actions = []
self.lives = []
self.checkpoints = []
self.checkpoint_action_nr = []
self.obs = [obs]
self.rewards = []
self.done = [False]
self.info = [None]
self.steps_in_the_past = 0
return obs
def time_travel(self):
if len(self.obs) > 1:
reward = self.rewards.pop()
self.obs.pop()
self.done.pop()
self.info.pop()
self.lives.pop()
obs = self.obs[-1]
done = self.done[-1]
info = self.info[-1]
self.steps_in_the_past += 1
else: # reached time travel limit
reward = 0
obs = self.obs[0]
done = self.done[0]
info = self.info[0]
# rewards are differences in subsequent state values, and so should get reversed sign when going backward in time
reward = -reward
return obs, reward, done, info
def save_to_file(self, file_name):
dat = {'actions': self.actions, 'checkpoints': self.checkpoints, 'checkpoint_action_nr': self.checkpoint_action_nr,
'rewards': self.rewards, 'lives': self.lives}
with open(file_name, "wb") as f:
pickle.dump(dat, f)
def load_from_file(self, file_name):
self.reset()
with open(file_name, "rb") as f:
dat = pickle.load(f)
self.actions = dat['actions']
self.checkpoints = dat['checkpoints']
self.checkpoint_action_nr = dat['checkpoint_action_nr']
self.rewards = dat['rewards']
self.lives = dat['lives']
self.load_state_and_walk_forward()
def save_checkpoint(self):
chk_pnt = self.env.unwrapped.clone_state()
self.checkpoints.append(chk_pnt)
self.checkpoint_action_nr.append(len(self.actions))
def restore_past_state(self):
self.actions = self.actions[:-self.steps_in_the_past]
while len(self.checkpoints)>0 and self.checkpoint_action_nr[-1]>len(self.actions):
self.checkpoints.pop()
self.checkpoint_action_nr.pop()
self.load_state_and_walk_forward()
self.steps_in_the_past = 0
def load_state_and_walk_forward(self):
if len(self.checkpoints)==0:
self.env.reset()
time_step = 0
else:
self.env.unwrapped.restore_state(self.checkpoints[-1])
time_step = self.checkpoint_action_nr[-1]
for a in self.actions[time_step:]:
action = self.env.unwrapped._action_set[a]
self.env.unwrapped.ale.act(action)
|
import distutils.util
platform = distutils.util.get_platform()
# technically, our platform is not actually multilinux... so this may fail in some distros
# however, tested in python:3.6 docker image (by construction)
# and in ubuntu:16.04
platform = platform.replace('linux', 'manylinux1')
print(platform)
|
import os
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import subprocess
import sys
with open(os.path.join(os.path.dirname(__file__), 'atari_py', 'package_data.txt')) as f:
package_data = [line.rstrip() for line in f.readlines()]
class Build(build_ext):
def run(self):
if os.name != 'posix' and not self.inplace:
# silly patch to disable build steps on windows, as we are doing compilation externally
return
try:
cwd = os.path.join('' if self.inplace else self.build_lib, 'atari_py', 'ale_interface', 'build')
if not os.path.exists(cwd):
os.makedirs(cwd)
subprocess.check_call(['cmake', '..'], cwd=cwd)
subprocess.check_call(['cmake', '--build', '.'], cwd=cwd)
except subprocess.CalledProcessError as e:
sys.stderr.write("Could not build atari-py: %s. (HINT: are you sure cmake is installed? You might also be missing a library. Atari-py requires: zlib [installable as 'apt-get install zlib1g-dev' on Ubuntu].)\n" % e)
raise
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
setup(name='atari-py',
version='0.3.0',
description='Python bindings to Atari games',
url='https://github.com/openai/atari-py',
author='OpenAI',
author_email='[email protected]',
license='',
packages=['atari_py'],
package_data={'atari_py': package_data},
ext_modules=[CMakeExtension('atari_py')],
cmdclass={'build_ext': Build},
install_requires=['numpy', 'six'],
tests_require=['nose2']
)
|
import sys
from .ale_python_interface import *
from .games import get_game_path, list_games
print(
"[NOTICE] atari-py is deprecated in favor ale-py "
"and will no longer receive further maintenance or critical updates. "
"ale-py is fully backwards compatible with atari-py. "
"If you're using Gym, you can simply upgrade via pip install -U gym[atari]",
file=sys.stderr,
)
# default to only logging errors
ALEInterface.setLoggerMode(ALEInterface.Logger.Error)
|
import os
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
try:
import atari_py_roms
_games_dir = os.path.join(atari_py_roms.__path__[0], "atari_roms")
except ImportError:
_games_dir = os.path.join(SCRIPT_DIR, "atari_roms")
def get_games_dir():
return _games_dir
def get_game_path(game_name):
path = os.path.join(_games_dir, game_name) + ".bin"
if not os.path.exists(path):
raise Exception('ROM is missing for %s, see https://github.com/openai/atari-py#roms for instructions' % (game_name,))
return path
def list_games():
files = os.listdir(_games_dir)
return [os.path.basename(f).split(".")[0] for f in files] |
# ale_python_interface.py
# Author: Ben Goodrich
# This directly implements a python version of the arcade learning
# environment interface.
__all__ = ['ALEInterface']
from ctypes import *
import numpy as np
from numpy.ctypeslib import as_ctypes
import os
import six
if os.name == 'posix':
ale_lib = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__),
'ale_interface/libale_c.so'))
else:
ale_lib = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__),
'ale_interface/ale_c.dll'))
ale_lib.ALE_new.argtypes = None
ale_lib.ALE_new.restype = c_void_p
ale_lib.ALE_del.argtypes = [c_void_p]
ale_lib.ALE_del.restype = None
ale_lib.getString.argtypes = [c_void_p, c_char_p]
ale_lib.getString.restype = c_char_p
ale_lib.getInt.argtypes = [c_void_p, c_char_p]
ale_lib.getInt.restype = c_int
ale_lib.getBool.argtypes = [c_void_p, c_char_p]
ale_lib.getBool.restype = c_bool
ale_lib.getFloat.argtypes = [c_void_p, c_char_p]
ale_lib.getFloat.restype = c_float
ale_lib.setString.argtypes = [c_void_p, c_char_p, c_char_p]
ale_lib.setString.restype = None
ale_lib.setInt.argtypes = [c_void_p, c_char_p, c_int]
ale_lib.setInt.restype = None
ale_lib.setBool.argtypes = [c_void_p, c_char_p, c_bool]
ale_lib.setBool.restype = None
ale_lib.setFloat.argtypes = [c_void_p, c_char_p, c_float]
ale_lib.setFloat.restype = None
ale_lib.loadROM.argtypes = [c_void_p, c_char_p]
ale_lib.loadROM.restype = None
ale_lib.act.argtypes = [c_void_p, c_int]
ale_lib.act.restype = c_int
ale_lib.game_over.argtypes = [c_void_p]
ale_lib.game_over.restype = c_bool
ale_lib.reset_game.argtypes = [c_void_p]
ale_lib.reset_game.restype = None
ale_lib.getAvailableModes.argtypes = [c_void_p, c_void_p]
ale_lib.getAvailableModes.restype = None
ale_lib.getAvailableModesSize.argtypes = [c_void_p]
ale_lib.getAvailableModesSize.restype = c_int
ale_lib.setMode.argtypes = [c_void_p, c_int]
ale_lib.setMode.restype = None
ale_lib.getAvailableDifficulties.argtypes = [c_void_p, c_void_p]
ale_lib.getAvailableDifficulties.restype = None
ale_lib.getAvailableDifficultiesSize.argtypes = [c_void_p]
ale_lib.getAvailableDifficultiesSize.restype = c_int
ale_lib.setDifficulty.argtypes = [c_void_p, c_int]
ale_lib.setDifficulty.restype = None
ale_lib.getLegalActionSet.argtypes = [c_void_p, c_void_p]
ale_lib.getLegalActionSet.restype = None
ale_lib.getLegalActionSize.argtypes = [c_void_p]
ale_lib.getLegalActionSize.restype = c_int
ale_lib.getMinimalActionSet.argtypes = [c_void_p, c_void_p]
ale_lib.getMinimalActionSet.restype = None
ale_lib.getMinimalActionSize.argtypes = [c_void_p]
ale_lib.getMinimalActionSize.restype = c_int
ale_lib.getFrameNumber.argtypes = [c_void_p]
ale_lib.getFrameNumber.restype = c_int
ale_lib.lives.argtypes = [c_void_p]
ale_lib.lives.restype = c_int
ale_lib.getEpisodeFrameNumber.argtypes = [c_void_p]
ale_lib.getEpisodeFrameNumber.restype = c_int
ale_lib.getScreen.argtypes = [c_void_p, c_void_p]
ale_lib.getScreen.restype = None
ale_lib.getRAM.argtypes = [c_void_p, c_void_p]
ale_lib.getRAM.restype = None
ale_lib.getRAMSize.argtypes = [c_void_p]
ale_lib.getRAMSize.restype = c_int
ale_lib.getScreenWidth.argtypes = [c_void_p]
ale_lib.getScreenWidth.restype = c_int
ale_lib.getScreenHeight.argtypes = [c_void_p]
ale_lib.getScreenHeight.restype = c_int
ale_lib.getScreenRGB.argtypes = [c_void_p, c_void_p]
ale_lib.getScreenRGB.restype = None
ale_lib.getScreenRGB2.argtypes = [c_void_p, c_void_p]
ale_lib.getScreenRGB2.restype = None
ale_lib.getScreenGrayscale.argtypes = [c_void_p, c_void_p]
ale_lib.getScreenGrayscale.restype = None
ale_lib.saveState.argtypes = [c_void_p]
ale_lib.saveState.restype = None
ale_lib.loadState.argtypes = [c_void_p]
ale_lib.loadState.restype = None
ale_lib.cloneState.argtypes = [c_void_p]
ale_lib.cloneState.restype = c_void_p
ale_lib.restoreState.argtypes = [c_void_p, c_void_p]
ale_lib.restoreState.restype = None
ale_lib.cloneSystemState.argtypes = [c_void_p]
ale_lib.cloneSystemState.restype = c_void_p
ale_lib.restoreSystemState.argtypes = [c_void_p, c_void_p]
ale_lib.restoreSystemState.restype = None
ale_lib.deleteState.argtypes = [c_void_p]
ale_lib.deleteState.restype = None
ale_lib.saveScreenPNG.argtypes = [c_void_p, c_char_p]
ale_lib.saveScreenPNG.restype = None
ale_lib.encodeState.argtypes = [c_void_p, c_void_p, c_int]
ale_lib.encodeState.restype = None
ale_lib.encodeStateLen.argtypes = [c_void_p]
ale_lib.encodeStateLen.restype = c_int
ale_lib.decodeState.argtypes = [c_void_p, c_int]
ale_lib.decodeState.restype = c_void_p
ale_lib.setLoggerMode.argtypes = [c_int]
ale_lib.setLoggerMode.restype = None
def _as_bytes(s):
if hasattr(s, 'encode'):
return s.encode('utf8')
return s
class ALEInterface(object):
# Logger enum
class Logger:
Info = 0
Warning = 1
Error = 2
def __init__(self):
self.obj = ale_lib.ALE_new()
def getString(self, key):
return ale_lib.getString(self.obj, _as_bytes(key))
def getInt(self, key):
return ale_lib.getInt(self.obj, _as_bytes(key))
def getBool(self, key):
return ale_lib.getBool(self.obj, _as_bytes(key))
def getFloat(self, key):
return ale_lib.getFloat(self.obj, _as_bytes(key))
def setString(self, key, value):
ale_lib.setString(self.obj, _as_bytes(key), _as_bytes(value))
def setInt(self, key, value):
ale_lib.setInt(self.obj, _as_bytes(key), int(value))
def setBool(self, key, value):
ale_lib.setBool(self.obj, _as_bytes(key), bool(value))
def setFloat(self, key, value):
ale_lib.setFloat(self.obj, _as_bytes(key), float(value))
def loadROM(self, rom_file):
ale_lib.loadROM(self.obj, _as_bytes(rom_file))
def act(self, action):
return ale_lib.act(self.obj, int(action))
def game_over(self):
return ale_lib.game_over(self.obj)
def reset_game(self):
ale_lib.reset_game(self.obj)
def getLegalActionSet(self):
act_size = ale_lib.getLegalActionSize(self.obj)
act = np.zeros((act_size), dtype=np.intc)
ale_lib.getLegalActionSet(self.obj, as_ctypes(act))
return act
def getMinimalActionSet(self):
act_size = ale_lib.getMinimalActionSize(self.obj)
act = np.zeros((act_size), dtype=np.intc)
ale_lib.getMinimalActionSet(self.obj, as_ctypes(act))
return act
def getAvailableModes(self):
modes_size = ale_lib.getAvailableModesSize(self.obj)
modes = np.zeros((modes_size), dtype=np.intc)
ale_lib.getAvailableModes(self.obj, as_ctypes(modes))
return modes
def setMode(self, mode):
ale_lib.setMode(self.obj, int(mode))
def getAvailableDifficulties(self):
difficulties_size = ale_lib.getAvailableDifficultiesSize(self.obj)
difficulties = np.zeros((difficulties_size), dtype=np.intc)
ale_lib.getAvailableDifficulties(self.obj, as_ctypes(difficulties))
return difficulties
def setDifficulty(self, difficulty):
ale_lib.setDifficulty(self.obj, int(difficulty))
def getLegalActionSet(self):
act_size = ale_lib.getLegalActionSize(self.obj)
act = np.zeros((act_size), dtype=np.intc)
ale_lib.getLegalActionSet(self.obj, as_ctypes(act))
return act
def getMinimalActionSet(self):
act_size = ale_lib.getMinimalActionSize(self.obj)
act = np.zeros((act_size), dtype=np.intc)
ale_lib.getMinimalActionSet(self.obj, as_ctypes(act))
return act
def getFrameNumber(self):
return ale_lib.getFrameNumber(self.obj)
def lives(self):
return ale_lib.lives(self.obj)
def getEpisodeFrameNumber(self):
return ale_lib.getEpisodeFrameNumber(self.obj)
def getScreenDims(self):
"""returns a tuple that contains (screen_width, screen_height)
"""
width = ale_lib.getScreenWidth(self.obj)
height = ale_lib.getScreenHeight(self.obj)
return (width, height)
def getScreen(self, screen_data=None):
"""This function fills screen_data with the RAW Pixel data
screen_data MUST be a numpy array of uint8/int8. This could be initialized like so:
screen_data = np.empty(w*h, dtype=np.uint8)
Notice, it must be width*height in size also
If it is None, then this function will initialize it
Note: This is the raw pixel values from the atari, before any RGB palette transformation takes place
"""
if(screen_data is None):
width = ale_lib.getScreenWidth(self.obj)
height = ale_lib.getScreenHeight(self.obj)
screen_data = np.zeros(width*height, dtype=np.uint8)
ale_lib.getScreen(self.obj, as_ctypes(screen_data))
return screen_data
def getScreenRGB(self, screen_data=None):
"""This function fills screen_data with the data in RGB format
screen_data MUST be a numpy array of uint8. This can be initialized like so:
screen_data = np.empty((height,width,3), dtype=np.uint8)
If it is None, then this function will initialize it.
On little-endian machines like x86, the channels are BGR order:
screen_data[x, y, 0:3] is [blue, green, red]
On big-endian machines (rare in 2017) the channels would be the opposite order.
There's not much error checking here: if you supply an array that's too small
this function will produce undefined behavior.
"""
if(screen_data is None):
width = ale_lib.getScreenWidth(self.obj)
height = ale_lib.getScreenHeight(self.obj)
screen_data = np.empty((height, width,3), dtype=np.uint8)
ale_lib.getScreenRGB(self.obj, as_ctypes(screen_data[:]))
return screen_data
def getScreenRGB2(self, screen_data=None):
"""This function fills screen_data with the data in RGB format.
screen_data MUST be a numpy array of uint8. This can be initialized like so:
screen_data = np.empty((height,width,3), dtype=np.uint8)
If it is None, then this function will initialize it.
On all architectures, the channels are RGB order:
screen_data[x, y, :] is [red, green, blue]
There's not much error checking here: if you supply an array that's too small
this function will produce undefined behavior.
"""
if(screen_data is None):
width = ale_lib.getScreenWidth(self.obj)
height = ale_lib.getScreenHeight(self.obj)
screen_data = np.empty((height, width, 3), dtype=np.uint8)
assert screen_data.strides == (480, 3, 1)
ale_lib.getScreenRGB2(self.obj, as_ctypes(screen_data[:]))
return screen_data
def getScreenGrayscale(self, screen_data=None):
"""This function fills screen_data with the data in grayscale
screen_data MUST be a numpy array of uint8. This can be initialized like so:
screen_data = np.empty((height,width,1), dtype=np.uint8)
If it is None, then this function will initialize it.
"""
if(screen_data is None):
width = ale_lib.getScreenWidth(self.obj)
height = ale_lib.getScreenHeight(self.obj)
screen_data = np.empty((height, width,1), dtype=np.uint8)
ale_lib.getScreenGrayscale(self.obj, as_ctypes(screen_data[:]))
return screen_data
def getRAMSize(self):
return ale_lib.getRAMSize(self.obj)
def getRAM(self, ram=None):
"""This function grabs the atari RAM.
ram MUST be a numpy array of uint8/int8. This can be initialized like so:
ram = np.array(ram_size, dtype=uint8)
Notice: It must be ram_size where ram_size can be retrieved via the getRAMSize function.
If it is None, then this function will initialize it.
"""
if(ram is None):
ram_size = ale_lib.getRAMSize(self.obj)
ram = np.zeros(ram_size, dtype=np.uint8)
ale_lib.getRAM(self.obj, as_ctypes(ram))
return ram
def saveScreenPNG(self, filename):
"""Save the current screen as a png file"""
return ale_lib.saveScreenPNG(self.obj, _as_bytes(filename))
def saveState(self):
"""Saves the state of the system"""
return ale_lib.saveState(self.obj)
def loadState(self):
"""Loads the state of the system"""
return ale_lib.loadState(self.obj)
def cloneState(self):
"""This makes a copy of the environment state. This copy does *not*
include pseudorandomness, making it suitable for planning
purposes. By contrast, see cloneSystemState.
"""
return ale_lib.cloneState(self.obj)
def restoreState(self, state):
"""Reverse operation of cloneState(). This does not restore
pseudorandomness, so that repeated calls to restoreState() in
the stochastic controls setting will not lead to the same
outcomes. By contrast, see restoreSystemState.
"""
ale_lib.restoreState(self.obj, state)
def cloneSystemState(self):
"""This makes a copy of the system & environment state, suitable for
serialization. This includes pseudorandomness and so is *not*
suitable for planning purposes.
"""
return ale_lib.cloneSystemState(self.obj)
def restoreSystemState(self, state):
"""Reverse operation of cloneSystemState."""
ale_lib.restoreSystemState(self.obj, state)
def deleteState(self, state):
""" Deallocates the ALEState """
ale_lib.deleteState(state)
def encodeStateLen(self, state):
return ale_lib.encodeStateLen(state)
def encodeState(self, state, buf=None):
if buf == None:
length = ale_lib.encodeStateLen(state)
buf = np.zeros(length, dtype=np.uint8)
ale_lib.encodeState(state, as_ctypes(buf), c_int(len(buf)))
return buf
def decodeState(self, serialized):
return ale_lib.decodeState(as_ctypes(serialized), len(serialized))
def __del__(self):
ale_lib.ALE_del(self.obj)
@staticmethod
def setLoggerMode(mode):
dic = {'info': 0, 'warning': 1, 'error': 2}
mode = dic.get(mode, mode)
assert mode in [0, 1, 2], "Invalid Mode! Mode must be one of 0: info, 1: warning, 2: error"
ale_lib.setLoggerMode(mode)
|
import os
import hashlib
import shutil
import zipfile
import argparse
import io
from .games import get_games_dir
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
MD5_CHUNK_SIZE = 8096
def _check_zipfile(f, process_f):
with zipfile.ZipFile(f) as zf:
for entry in zf.infolist():
_root, ext = os.path.splitext(entry.filename)
with zf.open(entry) as innerf:
if ext == ".zip":
_check_zipfile(innerf, process_f)
else:
process_f(innerf)
def _calc_md5(f):
h = hashlib.md5()
while True:
chunk = f.read(MD5_CHUNK_SIZE)
if chunk == b'':
break
h.update(chunk)
return h.hexdigest()
def import_roms(dirpath="."):
md5s = {}
copied_md5s = set()
with open(os.path.join(SCRIPT_DIR, "ale_interface", "md5.txt")) as f:
f.readline()
f.readline()
for line in f:
hexdigest, filename = line.strip().split(' ')
md5s[hexdigest] = os.path.join(get_games_dir(), filename)
def save_if_matches(f):
hexdigest = _calc_md5(f)
if hexdigest == "ce5cc62608be2cd3ed8abd844efb8919":
# the ALE version of road_runner.bin is not easily available
# patch this file instead to match the correct data
delta = {4090: 216, 4091: 111, 4092: 216, 4093: 111, 4094: 216, 4095: 111, 8186: 18, 8187: 43, 8188: -216, 8189: 49, 8190: -216, 8191: 49, 12281: 234, 12282: 18, 12283: 11, 12284: -216, 12285: 17, 12286: -216, 12287: 17, 16378: 18, 16379: -21, 16380: -216, 16381: -15, 16382: -216, 16383: -15}
f.seek(0)
data = bytearray(f.read())
for index, offset in delta.items():
data[index] += offset
name = f"patched version of {f.name}"
f = io.BytesIO(bytes(data))
f.name = name
hexdigest = _calc_md5(f)
if hexdigest in md5s and hexdigest not in copied_md5s:
copied_md5s.add(hexdigest)
rom_path = md5s[hexdigest]
print(f"copying {os.path.basename(rom_path)} from {f.name} to {rom_path}")
os.makedirs(os.path.dirname(rom_path), exist_ok=True)
f.seek(0)
with open(rom_path, "wb") as out_f:
shutil.copyfileobj(f, out_f)
for root, dirs, files in os.walk(dirpath):
for filename in files:
filepath = os.path.join(root, filename)
with open(filepath, "rb") as f:
_root, ext = os.path.splitext(filename)
if ext == ".zip":
try:
_check_zipfile(f, save_if_matches)
except zipfile.BadZipFile:
pass
else:
save_if_matches(f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dirpath", help="path to directory containing extracted ROM files")
args = parser.parse_args()
import_roms(args.dirpath)
if __name__ == "__main__":
main() |
import atari_py
import numpy as np
def test_smoke():
game_path = atari_py.get_game_path('tetris')
ale = atari_py.ALEInterface()
ale.loadROM(game_path)
action_set = ale.getMinimalActionSet()
# Test stepping
ale.act(action_set[0])
# Test screen capture
(screen_width,screen_height) = ale.getScreenDims()
arr = np.zeros((screen_height, screen_width, 4), dtype=np.uint8)
ale.getScreenRGB(arr)
if __name__ == '__main__':
print('smoke test')
test_smoke()
print('done!')
|
#!/usr/bin/env python
# python_example.py
# Author: Ben Goodrich
#
# This is a direct port to python of the shared library example from
# ALE provided in doc/examples/sharedLibraryInterfaceExample.cpp
from __future__ import print_function
import sys
from random import randrange
from atari_py import ALEInterface
if len(sys.argv) < 2:
print('Usage:', sys.argv[0], 'rom_file')
sys.exit()
ale = ALEInterface()
# Get & Set the desired settings
ale.setInt('random_seed', 123)
# Set USE_SDL to true to display the screen. ALE must be compilied
# with SDL enabled for this to work. On OSX, pygame init is used to
# proxy-call SDL_main.
USE_SDL = False
if USE_SDL:
if sys.platform == 'darwin':
import pygame
pygame.init()
ale.setBool('sound', False) # Sound doesn't work on OSX
elif sys.platform.startswith('linux'):
ale.setBool('sound', True)
ale.setBool('display_screen', True)
# Load the ROM file
ale.loadROM(sys.argv[1])
# Get the list of legal actions
legal_actions = ale.getLegalActionSet()
# Play 10 episodes
for episode in range(10):
total_reward = 0
while not ale.game_over():
a = legal_actions[randrange(len(legal_actions))]
# Apply an action and get the resulting reward
reward = ale.act(a);
total_reward += reward
print('Episode', episode, 'ended with score:', total_reward)
ale.reset_game()
|
# TODO: the code below does not work!
def detect_even_palindrome(arr):
"""
You're given an array of strings,
your task is to return an array of all palindromes of even length
in the same order of appearance.
Consider the empty string as not palindrome.
Examples:
* detect_even_palindrome(["AabcbaA", "abccdA", "1221"]) => ["1221"]
* detect_even_palindrome(["rebber", "madam"]) => ["rebber"]
* detect_even_palindrome(["", "124", "a"]) => []
* detect_even_palindrome([]) => []
"""
# END OF CONTEXT
return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != '']
# END OF SOLUTION
def check(candidate):
assert candidate(["AabcbaA", "abccdA", "1221"]) == ["1221"]
assert candidate(["rebber", "madam"]) == ["rebber"]
assert candidate(["", "124", "a"]) == []
assert candidate([]) == []
assert candidate([""]) == []
assert candidate(["as"]) == []
assert candidate(["asd"]) == []
assert candidate(["asd", "asd"]) == []
assert candidate(["rubbur", "rubbur"]) == ["rubbur", "rubbur"]
assert candidate(["rubber"]) == []
if __name__ == '__main__':
check(detect_even_palindrome)
|
# sdfljafowejidsfjospadjcfaopwjeopfsjsadkl;fjaowejfopjdksaldfjopweajfojasdfkljafpo2wqd;lcmpovnteoirdpsafd
# sdf9wjfaowiejf-0j23w9-eafjidosjf023qjiobgkf023w8hger90fivdfginb0qaerpoeprg0jegar0-3wjfiiewrowqeoiwer
# f0-23rnfer0-wfaeijoafweop32023lnfewopiagsd9234toerg9uegapjr3bng4eropgeojsfaewneffa0rq32fwiojwefniaggerj
# f03j4efqpwjdf902a3jwopfvjae09fj q9p23wjdoJDF
##WF0923JWEPOFJAF
#[[[[[[[
# {{{
def detect_even_palindrome(arr):
"""
You're given an array of strings,
your task is to return an array of all palindromes of even length
in the same order of appearance.
Consider the empty string as not palindrome.
Examples:
* detect_even_palindrome(["AabcbaA", "abccdA", "1221"]) => ["1221"]
* detect_even_palindrome(["rebber", "madam"]) => ["rebber"]
* detect_even_palindrome(["", "124", "a"]) => []
* detect_even_palindrome([]) => []
"""
# END OF CONTEXT
return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != '']
# END OF SOLUTION
def check(candidate):
assert candidate(["AabcbaA", "abccdA", "1221"]) == ["1221"]
assert candidate(["rebber", "madam"]) == ["rebber"]
assert candidate(["", "124", "a"]) == []
assert candidate([]) == []
assert candidate([""]) == []
assert candidate(["as"]) == []
assert candidate(["asd"]) == []
assert candidate(["asd", "asd"]) == []
assert candidate(["rubbur", "rubbur"]) == ["rubbur", "rubbur"]
assert candidate(["rubber"]) == []
if __name__ == '__main__':
check(detect_even_palindrome)
|
def genpassword(wlc,maxchar,txt,List,verbose):
word = ""
i1 = i2 = i3 = i4 = i5 = i6 = i6 = i7 = i8 = i9 = i10 = i11 = i12 = i13 = i14 = i15 = 0
txtfile = open(txt,'w')
i = 0
mc = int(maxchar) - 1
lword = [0]
for i in range(mc):
lword += [0]
for i1 in range(len(wlc)):
for i2 in range(len(wlc)):
for i3 in range(len(wlc)):
for i4 in range(len(wlc)):
for i5 in range(len(wlc)):
for i6 in range(len(wlc)):
for i7 in range(len(wlc)):
for i8 in range(len(wlc)):
for i9 in range(len(wlc)):
for i10 in range(len(wlc)):
for i11 in range(len(wlc)):
for i12 in range(len(wlc)):
for i13 in range(len(wlc)):
for i14 in range(len(wlc)):
for i15 in range(len(wlc)):
if int(maxchar) == 1 :
word = wlc[i15]
if int(maxchar) == 2 :
word = wlc[i14] + wlc[i15]
if int(maxchar) == 3 :
word = wlc[i13] + wlc[i14] + wlc[i15]
if int(maxchar) == 4 :
word = wlc[i12] + wlc[i13] + wlc[i14] + wlc[i15]
if int(maxchar) == 5 :
word = wlc[i11] + wlc[i12] + wlc[i13] + wlc[i14] \
+ wlc[i15]
if int(maxchar) == 6 :
word = wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \
+ wlc[i14] + wlc[i15]
if int(maxchar) == 7 :
word = wlc[i9] + wlc[i10] + wlc[i11] + wlc[i12] \
+ wlc[i13] + wlc[i14] + wlc[i15]
if int(maxchar) == 8 :
word = wlc[i8] + wlc[i9] + wlc[i10] + wlc[i11] \
+ wlc[i12] + wlc[i13] + wlc[i14] + wlc[i15]
if int(maxchar) == 9 :
word = wlc[i7] + wlc[i8] + wlc[i9] + wlc[i10] \
+ wlc[i11] + wlc[i12] + wlc[i13] + wlc[i14] + wlc[i15]
if int(maxchar) == 10 :
word = wlc[i6] + wlc[i7] + wlc[i8] + wlc[i9] \
+ wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] + wlc[i14] \
+ wlc[i15]
if int(maxchar) == 11 :
word = wlc[i5] + wlc[i6] + wlc[i7] + wlc[i8] \
+ wlc[i9] + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \
+ wlc[i14] + wlc[i15]
if int(maxchar) == 12 :
word = wlc[i4] + wlc[i5] + wlc[i6] + wlc[i7] + wlc[i8] \
+ wlc[i9] + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \
+ wlc[i14] + wlc[i15]
if int(maxchar) == 13 :
word = wlc[i3] + wlc[i4] + wlc[i5] + wlc[i6] \
+ wlc[i7] + wlc[i8] + wlc[i9] + wlc[i10]\
+ wlc[i11] + wlc[i12] + wlc[i13] \
+ wlc[i14] + wlc[i15]
if int(maxchar) == 14 :
word = wlc[i2] +wlc[i3] + wlc[i4] + wlc[i5] + wlc[i6] \
+ wlc[i7] + wlc[i8] + wlc[i9] + wlc[i10]\
+ wlc[i11] + wlc[i12] + wlc[i13] \
+ wlc[i14] + wlc[i15]
if int(maxchar) == 15 :
word = wlc[i1] + wlc[i2] + wlc[i3] + wlc[i4] \
+ wlc[i5] + wlc[i6] + wlc[i7] + wlc[i8] + wlc[i9] \
+ wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \
+ wlc[i14] + wlc[i15]
if int(verbose) == 1:
print(word)
txtfile.writelines(word + "\n")
i = 0
end = 0
if int(List) == 1 :
for i in range(len(word)):
lword[i] = "9"
if str(lword) == str(list(word)):
end = 1
elif (int(List) == 2):
for i in range(len(word)):
lword[i] = "z"
if str(lword) == str(list(word)):
end = 1
elif (int(List) == 3):
for i in range(len(word)):
lword[i] = "Z"
if str(lword) == str(list(word)):
end = 1
elif (int(List) == 4):
for i in range(len(word)):
lword[i] = "z"
if str(lword) == str(list(word)):
end = 1
elif (int(List) == 5):
for i in range(len(word)):
lword[i] = "Z"
if str(lword) == str(list(word)):
end = 1
elif (int(List) == 6):
for i in range(len(word)):
lword[i] = "Z"
if str(lword) == str(list(word)):
end = 1
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
if end == 1 : break
txtfile.close()
def detect_even_palindrome(arr):
"""
You're given an array of strings,
your task is to return an array of all palindromes of even length
in the same order of appearance.
Consider the empty string as not palindrome.
Examples:
* detect_even_palindrome(["AabcbaA", "abccdA", "1221"]) => ["1221"]
* detect_even_palindrome(["rebber", "madam"]) => ["rebber"]
* detect_even_palindrome(["", "124", "a"]) => []
* detect_even_palindrome([]) => []
"""
# END OF CONTEXT
return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != '']
# END OF SOLUTION
def check(candidate):
assert candidate(["AabcbaA", "abccdA", "1221"]) == ["1221"]
assert candidate(["rebber", "madam"]) == ["rebber"]
assert candidate(["", "124", "a"]) == []
assert candidate([]) == []
assert candidate([""]) == []
assert candidate(["as"]) == []
assert candidate(["asd"]) == []
assert candidate(["asd", "asd"]) == []
assert candidate(["rubbur", "rubbur"]) == ["rubbur", "rubbur"]
assert candidate(["rubber"]) == []
if __name__ == '__main__':
check(detect_even_palindrome)
|
def convert_time_zone(time, zone):
"""
Convert time from UTC to a given time zone.
"""
return 'y' % 5 and 'wtf'
halt()
zone and 59
def reverse_string(s):
"""
Reverse a string.
"""
s[1]
return 'y' % 5 and 'wtf'
def detect_even_palindrome(arr):
"""
You're given an array of strings,
your task is to return an array of all palindromes of even length
in the same order of appearance.
Consider the empty string as not palindrome.
Examples:
* detect_even_palindrome(["AabcbaA", "abccdA", "1221"]) => ["1221"]
* detect_even_palindrome(["rebber", "madam"]) => ["rebber"]
* detect_even_palindrome(["", "124", "a"]) => []
* detect_even_palindrome([]) => []
"""
# END OF CONTEXT
return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != '']
# END OF SOLUTION
def check(candidate):
assert candidate(["AabcbaA", "abccdA", "1221"]) == ["1221"]
assert candidate(["rebber", "madam"]) == ["rebber"]
assert candidate(["", "124", "a"]) == []
assert candidate([]) == []
assert candidate([""]) == []
assert candidate(["as"]) == []
assert candidate(["asd"]) == []
assert candidate(["asd", "asd"]) == []
assert candidate(["rubbur", "rubbur"]) == ["rubbur", "rubbur"]
assert candidate(["rubber"]) == []
if __name__ == '__main__':
check(detect_even_palindrome)
|
ENTRY_POINT = 'factorize'
#[PROMPT]
from typing import List
def factorize(n: int) -> List[int]:
""" Return list of prime factors of given integer in the order from smallest to largest.
Each of the factors should be listed number of times corresponding to how many times it appeares in factorization.
Input number should be equal to the product of all factors
>>> factorize(8)
[2, 2, 2]
>>> factorize(25)
[5, 5]
>>> factorize(70)
[2, 5, 7]
"""
#[SOLUTION]
import math
fact = []
i = 2
while i <= int(math.sqrt(n) + 1):
if n % i == 0:
fact.append(i)
n //= i
else:
i += 1
if n > 1:
fact.append(n)
return fact
#[CHECK]
METADATA = {
'author': 'jt',
'dataset': 'test'
}
def check(candidate):
assert candidate(2) == [2]
assert candidate(4) == [2, 2]
assert candidate(8) == [2, 2, 2]
assert candidate(3 * 19) == [3, 19]
assert candidate(3 * 19 * 3 * 19) == [3, 3, 19, 19]
assert candidate(3 * 19 * 3 * 19 * 3 * 19) == [3, 3, 3, 19, 19, 19]
assert candidate(3 * 19 * 19 * 19) == [3, 19, 19, 19]
assert candidate(3 * 2 * 3) == [2, 3, 3]
|
ENTRY_POINT = 'is_simple_power'
#[PROMPT]
def is_simple_power(x, n):
"""Your task is to write a function that returns true if a number x is a simple
power of n and false in other cases.
x is a simple power of n if n**int=x
For example:
is_simple_power(1, 4) => true
is_simple_power(2, 2) => true
is_simple_power(8, 2) => true
is_simple_power(3, 2) => false
is_simple_power(3, 1) => false
is_simple_power(5, 3) => false
"""
#[SOLUTION]
if (n == 1):
return (x == 1)
power = 1
while (power < x):
power = power * n
return (power == x)
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate(16, 2)== True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(143214, 16)== False, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(4, 2)==True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(9, 3)==True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(16, 4)==True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(24, 2)==False, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(128, 4)==False, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(12, 6)==False, "This prints if this assert fails 1 (good for debugging!)"
# Check some edge cases that are easy to work out by hand.
assert candidate(1, 1)==True, "This prints if this assert fails 2 (also good for debugging!)"
assert candidate(1, 12)==True, "This prints if this assert fails 2 (also good for debugging!)"
|
ENTRY_POINT = 'solve'
#[PROMPT]
def solve(N):
"""Given a positive integer N, return the total sum of its digits in binary.
Example
For N = 1000, the sum of digits will be 1 the output should be "1".
For N = 150, the sum of digits will be 6 the output should be "110".
For N = 147, the sum of digits will be 12 the output should be "1100".
Variables:
@N integer
Constraints: 0 β€ N β€ 10000.
Output:
a string of binary number
"""
#[SOLUTION]
return bin(sum(int(i) for i in str(N)))[2:]
#[CHECK]
def check(candidate):
# Check some simple cases
assert True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(1000) == "1", "Error"
assert candidate(150) == "110", "Error"
assert candidate(147) == "1100", "Error"
# Check some edge cases that are easy to work out by hand.
assert True, "This prints if this assert fails 2 (also good for debugging!)"
assert candidate(333) == "1001", "Error"
assert candidate(963) == "10010", "Error"
|
ENTRY_POINT = 'correct_bracketing'
#[PROMPT]
def correct_bracketing(brackets: str):
""" brackets is a string of "(" and ")".
return True if every opening bracket has a corresponding closing bracket.
>>> correct_bracketing("(")
False
>>> correct_bracketing("()")
True
>>> correct_bracketing("(()())")
True
>>> correct_bracketing(")(()")
False
"""
#[SOLUTION]
depth = 0
for b in brackets:
if b == "(":
depth += 1
else:
depth -= 1
if depth < 0:
return False
return depth == 0
#[CHECK]
METADATA = {}
def check(candidate):
assert candidate("()")
assert candidate("(()())")
assert candidate("()()(()())()")
assert candidate("()()((()()())())(()()(()))")
assert not candidate("((()())))")
assert not candidate(")(()")
assert not candidate("(")
assert not candidate("((((")
assert not candidate(")")
assert not candidate("(()")
assert not candidate("()()(()())())(()")
assert not candidate("()()(()())()))()")
|
ENTRY_POINT = 'pluck'
#[PROMPT]
def pluck(arr):
"""
"Given an array representing a branch of a tree that has non-negative integer nodes
your task is to pluck one of the nodes and return it.
The plucked node should be the node with the smallest even value.
If multiple nodes with the same smallest even value are found return the node that has smallest index.
The plucked node should be returned in a list, [ smalest_value, its index ],
If there are no even values or the given array is empty, return [].
Example 1:
Input: [4,2,3]
Output: [2, 1]
Explanation: 2 has the smallest even value, and 2 has the smallest index.
Example 2:
Input: [1,2,3]
Output: [2, 1]
Explanation: 2 has the smallest even value, and 2 has the smallest index.
Example 3:
Input: []
Output: []
Example 4:
Input: [5, 0, 3, 0, 4, 2]
Output: [0, 1]
Explanation: 0 is the smallest value, but there are two zeros,
so we will choose the first zero, which has the smallest index.
Constraints:
* 1 <= nodes.length <= 10000
* 0 <= node.value
"""
#[SOLUTION]
if(len(arr) == 0): return []
evens = list(filter(lambda x: x%2 == 0, arr))
if(evens == []): return []
return [min(evens), arr.index(min(evens))]
#[CHECK]
def check(candidate):
# Check some simple cases
assert True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate([4,2,3]) == [2, 1], "Error"
assert candidate([1,2,3]) == [2, 1], "Error"
assert candidate([]) == [], "Error"
assert candidate([5, 0, 3, 0, 4, 2]) == [0, 1], "Error"
# Check some edge cases that are easy to work out by hand.
assert True, "This prints if this assert fails 2 (also good for debugging!)"
assert candidate([1, 2, 3, 0, 5, 3]) == [0, 3], "Error"
assert candidate([5, 4, 8, 4 ,8]) == [4, 1], "Error"
assert candidate([7, 6, 7, 1]) == [6, 1], "Error"
assert candidate([7, 9, 7, 1]) == [], "Error"
|
ENTRY_POINT = 'add'
#[PROMPT]
def add(lst):
"""Given a non-empty list of integers lst. add the even elements that are at odd indices..
Examples:
add([4, 2, 6, 7]) ==> 2
"""
#[SOLUTION]
return sum([lst[i] for i in range(1, len(lst), 2) if lst[i]%2 == 0])
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate([4, 88]) == 88
assert candidate([4, 5, 6, 7, 2, 122]) == 122
assert candidate([4, 0, 6, 7]) == 0
assert candidate([4, 4, 6, 8]) == 12
# Check some edge cases that are easy to work out by hand.
|
ENTRY_POINT = 'any_int'
#[PROMPT]
def any_int(x, y, z):
'''
Create a function that takes 3 numbers.
Returns true if one of the numbers is equal to the sum of the other two, and all numbers are integers.
Returns false in any other cases.
Examples
any_int(5, 2, 7) β True
any_int(3, 2, 2) β False
any_int(3, -2, 1) β True
any_int(3.6, -2.2, 2) β False
'''
#[SOLUTION]
if isinstance(x,int) and isinstance(y,int) and isinstance(z,int):
if (x+y==z) or (x+z==y) or (y+z==x):
return True
return False
return False
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate(2, 3, 1)==True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(2.5, 2, 3)==False, "This prints if this assert fails 2 (good for debugging!)"
assert candidate(1.5, 5, 3.5)==False, "This prints if this assert fails 3 (good for debugging!)"
assert candidate(2, 6, 2)==False, "This prints if this assert fails 4 (good for debugging!)"
assert candidate(4, 2, 2)==True, "This prints if this assert fails 5 (good for debugging!)"
assert candidate(2.2, 2.2, 2.2)==False, "This prints if this assert fails 6 (good for debugging!)"
assert candidate(-4, 6, 2)==True, "This prints if this assert fails 7 (good for debugging!)"
# Check some edge cases that are easy to work out by hand.
assert candidate(2,1,1)==True, "This prints if this assert fails 8 (also good for debugging!)"
assert candidate(3,4,7)==True, "This prints if this assert fails 9 (also good for debugging!)"
|
ENTRY_POINT = 'exchange'
#[PROMPT]
def exchange(lst1, lst2):
"""In this problem, you will implement a function that takes two lists of numbers,
and determines whether it is possible to perform an exchange of elements
between them to make lst1 a list of only even numbers.
There is no limit on the number of exchanged elements between lst1 and lst2.
If it is possible to exchange elements between the lst1 and lst2 to make
all the elements of lst1 to be even, return "YES".
Otherwise, return "NO".
For example:
exchange([1, 2, 3, 4], [1, 2, 3, 4]) => "YES"
exchange([1, 2, 3, 4], [1, 5, 3, 4]) => "NO"
It is assumed that the input lists will be non-empty.
"""
#[SOLUTION]
odd = 0
even = 0
for i in lst1:
if i%2 == 1:
odd += 1
for i in lst2:
if i%2 == 0:
even += 1
if even >= odd:
return "YES"
return "NO"
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate([1, 2, 3, 4], [1, 2, 3, 4]) == "YES"
assert candidate([1, 2, 3, 4], [1, 5, 3, 4]) == "NO"
assert candidate([1, 2, 3, 4], [2, 1, 4, 3]) == "YES"
assert candidate([5, 7, 3], [2, 6, 4]) == "YES"
assert candidate([5, 7, 3], [2, 6, 3]) == "NO"
assert candidate([3, 2, 6, 1, 8, 9], [3, 5, 5, 1, 1, 1]) == "NO"
# Check some edge cases that are easy to work out by hand.
assert candidate([100, 200], [200, 200]) == "YES"
|
ENTRY_POINT = 'reverse_delete'
#[PROMPT]
def reverse_delete(s,c):
"""Task
We are given two strings s and c, you have to deleted all the characters in s that are equal to any character in c
then check if the result string is palindrome.
A string is called palindrome if it reads the same backward as forward.
You should return a tuple containing the result string and True/False for the check.
Example
For s = "abcde", c = "ae", the result should be ('bcd',False)
For s = "abcdef", c = "b" the result should be ('acdef',False)
For s = "abcdedcba", c = "ab", the result should be ('cdedc',True)
"""
#[SOLUTION]
s = ''.join([char for char in s if char not in c])
return (s,s[::-1] == s)
#[CHECK]
def check(candidate):
assert candidate("abcde","ae") == ('bcd',False)
assert candidate("abcdef", "b") == ('acdef',False)
assert candidate("abcdedcba","ab") == ('cdedc',True)
assert candidate("dwik","w") == ('dik',False)
assert candidate("a","a") == ('',True)
assert candidate("abcdedcba","") == ('abcdedcba',True)
assert candidate("abcdedcba","v") == ('abcdedcba',True)
|
ENTRY_POINT = 'strange_sort_list'
#[PROMPT]
def strange_sort_list(lst):
'''
Given list of integers, return list in strange order.
Strange sorting, is when you start with the minimum value,
then maximum of the remaining integers, then minimum and so on.
Examples:
strange_sort_list([1, 2, 3, 4]) == [1, 4, 2, 3]
strange_sort_list([5, 5, 5, 5]) == [5, 5, 5, 5]
strange_sort_list([]) == []
'''
#[SOLUTION]
res, switch = [], True
while lst:
res.append(min(lst) if switch else max(lst))
lst.remove(res[-1])
switch = not switch
return res
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate([1, 2, 3, 4]) == [1, 4, 2, 3]
assert candidate([5, 6, 7, 8, 9]) == [5, 9, 6, 8, 7]
assert candidate([1, 2, 3, 4, 5]) == [1, 5, 2, 4, 3]
assert candidate([5, 6, 7, 8, 9, 1]) == [1, 9, 5, 8, 6, 7]
assert candidate([5, 5, 5, 5]) == [5, 5, 5, 5]
assert candidate([]) == []
assert candidate([1,2,3,4,5,6,7,8]) == [1, 8, 2, 7, 3, 6, 4, 5]
assert candidate([0,2,2,2,5,5,-5,-5]) == [-5, 5, -5, 5, 0, 2, 2, 2]
assert candidate([111111]) == [111111]
# Check some edge cases that are easy to work out by hand.
assert True
|
ENTRY_POINT = 'words_in_sentence'
#[PROMPT]
def words_in_sentence(sentence):
"""
You are given a string representing a sentence,
the sentence contains some words separated by a space,
and you have to return a string that contains the words from the original sentence,
whose lengths are prime numbers,
the order of the words in the new string should be the same as the original one.
Example 1:
Input: sentence = "This is a test"
Output: "is"
Example 2:
Input: sentence = "lets go for swimming"
Output: "go for"
Constraints:
* 1 <= len(sentence) <= 100
* sentence contains only letters
"""
#[SOLUTION]
new_lst = []
for word in sentence.split():
flg = 0
if len(word) == 1:
flg = 1
for i in range(2, len(word)):
if len(word)%i == 0:
flg = 1
if flg == 0 or len(word) == 2:
new_lst.append(word)
return " ".join(new_lst)
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate("This is a test") == "is"
assert candidate("lets go for swimming") == "go for"
assert candidate("there is no place available here") == "there is no place"
assert candidate("Hi I am Hussein") == "Hi am Hussein"
assert candidate("go for it") == "go for it"
# Check some edge cases that are easy to work out by hand.
assert candidate("here") == ""
assert candidate("here is") == "is"
|
ENTRY_POINT = 'check_if_last_char_is_a_letter'
#[PROMPT]
def check_if_last_char_is_a_letter(txt):
'''
Create a function that returns True if the last character
of a given string is an alphabetical character and is not
a part of a word, and False otherwise.
Note: "word" is a group of characters separated by space.
Examples:
check_if_last_char_is_a_letter("apple pie") β False
check_if_last_char_is_a_letter("apple pi e") β True
check_if_last_char_is_a_letter("apple pi e ") β False
check_if_last_char_is_a_letter("") β False
'''
#[SOLUTION]
check = txt.split(' ')[-1]
return True if len(check) == 1 and (97 <= ord(check.lower()) <= 122) else False
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate("apple") == False
assert candidate("apple pi e") == True
assert candidate("eeeee") == False
assert candidate("A") == True
assert candidate("Pumpkin pie ") == False
assert candidate("Pumpkin pie 1") == False
assert candidate("") == False
assert candidate("eeeee e ") == False
assert candidate("apple pie") == False
assert candidate("apple pi e ") == False
# Check some edge cases that are easy to work out by hand.
assert True
|
ENTRY_POINT = 'multiply'
#[PROMPT]
def multiply(a, b):
"""Complete the function that takes two integers and returns
the product of their unit digits.
Assume the input is always valid.
Examples:
multiply(148, 412) should return 16.
multiply(19, 28) should return 72.
multiply(2020, 1851) should return 0.
multiply(14,-15) should return 20.
"""
#[SOLUTION]
return abs(a % 10) * abs(b % 10)
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate(148, 412) == 16, "First test error: " + str(candidate(148, 412))
assert candidate(19, 28) == 72, "Second test error: " + str(candidate(19, 28))
assert candidate(2020, 1851) == 0, "Third test error: " + str(candidate(2020, 1851))
assert candidate(14,-15) == 20, "Fourth test error: " + str(candidate(14,-15))
assert candidate(76, 67) == 42, "Fifth test error: " + str(candidate(76, 67))
assert candidate(17, 27) == 49, "Sixth test error: " + str(candidate(17, 27))
# Check some edge cases that are easy to work out by hand.
assert candidate(0, 1) == 0, "1st edge test error: " + str(candidate(0, 1))
assert candidate(0, 0) == 0, "2nd edge test error: " + str(candidate(0, 0))
|
ENTRY_POINT = 'can_arrange'
FIX = """
Fixed typo arange -> arrange
Remove semicolon from solution
"""
#[PROMPT]
def can_arrange(arr):
"""Create a function which returns the index of the element such that after
removing that element the remaining array is itself sorted in ascending order.
If the given array is already sorted in ascending order then return -1.
Note: It is guaranteed that the array arr will either be sorted or it will
have only one element such that after its removal the given array
will become sorted in ascending order.
- The given array will not contain duplicate values.
Examples:
can_arrange([1,2,4,3,5]) = 3
can_arrange([1,2,3]) = -1
"""
#[SOLUTION]
ind=-1
i=1
while i<len(arr):
if arr[i]<arr[i-1]:
ind=i
i+=1
return ind
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate([1,2,4,3,5])==3
assert candidate([1,2,4,5])==-1
assert candidate([1,4,2,5,6,7,8,9,10])==2
# Check some edge cases that are easy to work out by hand.
assert candidate([])==-1
|
ENTRY_POINT = 'sorted_list_sum'
FIX = """
Add test case when input strings with equal length are not in sorted order.
"""
#[PROMPT]
def sorted_list_sum(lst):
"""Write a function that accepts a list of strings as a parameter,
deletes the strings that have odd lengths from it,
and returns the resulted list with a sorted order,
The list is always a list of strings and never an array of numbers,
and it may contain duplicates.
The order of the list should be ascending by length of each word, and you
should return the list sorted by that rule.
If two words have the same length, sort the list alphabetically.
The function should return a list of strings in sorted order.
You may assume that all words will have the same length.
For example:
assert list_sort(["aa", "a", "aaa"]) => ["aa"]
assert list_sort(["ab", "a", "aaa", "cd"]) => ["ab", "cd"]
"""
#[SOLUTION]
lst.sort()
new_lst = []
for i in lst:
if len(i)%2 == 0:
new_lst.append(i)
return sorted(new_lst, key=len)
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate(["aa", "a", "aaa"]) == ["aa"]
assert candidate(["school", "AI", "asdf", "b"]) == ["AI", "asdf", "school"]
assert candidate(["d", "b", "c", "a"]) == []
assert candidate(["d", "dcba", "abcd", "a"]) == ["abcd", "dcba"]
# Check some edge cases that are easy to work out by hand.
assert candidate(["AI", "ai", "au"]) == ["AI", "ai", "au"]
assert candidate(["a", "b", "b", "c", "c", "a"]) == []
assert candidate(['aaaa', 'bbbb', 'dd', 'cc']) == ["cc", "dd", "aaaa", "bbbb"]
|
ENTRY_POINT = 'max_element'
#[PROMPT]
def max_element(l: list):
"""Return maximum element in the list.
>>> max_element([1, 2, 3])
3
>>> max_element([5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10])
123
"""
#[SOLUTION]
m = l[0]
for e in l:
if e > m:
m = e
return m
#[CHECK]
METADATA = {}
def check(candidate):
assert candidate([1, 2, 3]) == 3
assert candidate([5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10]) == 123
|
ENTRY_POINT = 'sort_even'
FIX = """
Remove sort helper function
"""
#[PROMPT]
def sort_even(l: list):
"""This function takes a list l and returns a list l' such that
l' is identical to l in the odd indicies, while its values at the even indicies are equal
to the values of the even indicies of l, but sorted.
>>> sort_even([1, 2, 3])
[1, 2, 3]
>>> sort_even([5, 6, 3, 4])
[3, 6, 5, 4]
"""
#[SOLUTION]
evens = l[::2]
odds = l[1::2]
evens.sort()
ans = []
for e, o in zip(evens, odds):
ans.extend([e, o])
if len(evens) > len(odds):
ans.append(evens[-1])
return ans
#[CHECK]
METADATA = {}
def check(candidate):
assert tuple(candidate([1, 2, 3])) == tuple([1, 2, 3])
assert tuple(candidate([5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10])) == tuple([-10, 3, -5, 2, -3, 3, 5, 0, 9, 1, 123])
assert tuple(candidate([5, 8, -12, 4, 23, 2, 3, 11, 12, -10])) == tuple([-12, 8, 3, 4, 5, 2, 12, 11, 23, -10])
|
ENTRY_POINT = 'parse_nested_parens'
#[PROMPT]
from typing import List
def parse_nested_parens(paren_string: str) -> List[int]:
""" Input to this function is a string represented multiple groups for nested parentheses separated by spaces.
For each of the group, output the deepest level of nesting of parentheses.
E.g. (()()) has maximum two levels of nesting while ((())) has three.
>>> parse_nested_parens('(()()) ((())) () ((())()())')
[2, 3, 1, 3]
"""
#[SOLUTION]
def parse_paren_group(s):
depth = 0
max_depth = 0
for c in s:
if c == '(':
depth += 1
max_depth = max(depth, max_depth)
else:
depth -= 1
return max_depth
return [parse_paren_group(x) for x in paren_string.split(' ') if x]
#[CHECK]
METADATA = {
'author': 'jt',
'dataset': 'test'
}
def check(candidate):
assert candidate('(()()) ((())) () ((())()())') == [2, 3, 1, 3]
assert candidate('() (()) ((())) (((())))') == [1, 2, 3, 4]
assert candidate('(()(())((())))') == [4]
|
ENTRY_POINT = 'triangle_area'
#[PROMPT]
def triangle_area(a, h):
"""Given length of a side and high return area for a triangle.
>>> triangle_area(5, 3)
7.5
"""
#[SOLUTION]
return a * h / 2.0
#[CHECK]
METADATA = {}
def check(candidate):
assert candidate(5, 3) == 7.5
assert candidate(2, 2) == 2.0
assert candidate(10, 8) == 40.0
|
ENTRY_POINT = 'fizz_buzz'
FIX = """
Update doc string to remove requirement for print.
"""
#[PROMPT]
def fizz_buzz(n: int):
"""Return the number of times the digit 7 appears in integers less than n which are divisible by 11 or 13.
>>> fizz_buzz(50)
0
>>> fizz_buzz(78)
2
>>> fizz_buzz(79)
3
"""
#[SOLUTION]
ns = []
for i in range(n):
if i % 11 == 0 or i % 13 == 0:
ns.append(i)
s = ''.join(list(map(str, ns)))
ans = 0
for c in s:
ans += (c == '7')
return ans
#[CHECK]
METADATA = {}
def check(candidate):
assert candidate(50) == 0
assert candidate(78) == 2
assert candidate(79) == 3
assert candidate(100) == 3
assert candidate(200) == 6
assert candidate(4000) == 192
assert candidate(10000) == 639
assert candidate(100000) == 8026
|
ENTRY_POINT = 'car_race_collision'
#[PROMPT]
def car_race_collision(n: int):
"""
Imagine a road that's a perfectly straight infinitely long line.
n cars are driving left to right; simultaneously, a different set of n cars
are driving right to left. The two sets of cars start out being very far from
each other. All cars move in the same speed. Two cars are said to collide
when a car that's moving left to right hits a car that's moving right to left.
However, the cars are infinitely sturdy and strong; as a result, they continue moving
in their trajectory as if they did not collide.
This function outputs the number of such collisions.
"""
#[SOLUTION]
return n**2
#[CHECK]
METADATA = {}
def check(candidate):
assert candidate(2) == 4
assert candidate(3) == 9
assert candidate(4) == 16
assert candidate(8) == 64
assert candidate(10) == 100
|
ENTRY_POINT = 'monotonic'
FIX = """
Add a few more tests.
"""
#[PROMPT]
def monotonic(l: list):
"""Return True is list elements are monotonically increasing or decreasing.
>>> monotonic([1, 2, 4, 20])
True
>>> monotonic([1, 20, 4, 10])
False
>>> monotonic([4, 1, 0, -10])
True
"""
#[SOLUTION]
if l == sorted(l) or l == sorted(l, reverse=True):
return True
return False
#[CHECK]
METADATA = {}
def check(candidate):
assert candidate([1, 2, 4, 10]) == True
assert candidate([1, 2, 4, 20]) == True
assert candidate([1, 20, 4, 10]) == False
assert candidate([4, 1, 0, -10]) == True
assert candidate([4, 1, 1, 0]) == True
assert candidate([1, 2, 3, 2, 5, 60]) == False
assert candidate([1, 2, 3, 4, 5, 60]) == True
assert candidate([9, 9, 9, 9]) == True
|
ENTRY_POINT = 'decode_shift'
#[PROMPT]
def encode_shift(s: str):
"""
returns encoded string by shifting every character by 5 in the alphabet.
"""
return "".join([chr(((ord(ch) + 5 - ord("a")) % 26) + ord("a")) for ch in s])
def decode_shift(s: str):
"""
takes as input string encoded with encode_shift function. Returns decoded string.
"""
#[SOLUTION]
return "".join([chr(((ord(ch) - 5 - ord("a")) % 26) + ord("a")) for ch in s])
#[CHECK]
METADATA = {}
def check(candidate):
from random import randint, choice
import copy
import string
letters = string.ascii_lowercase
for _ in range(100):
str = ''.join(choice(letters) for i in range(randint(10, 20)))
encoded_str = encode_shift(str)
assert candidate(copy.deepcopy(encoded_str)) == str
|
ENTRY_POINT = 'digitSum'
#[PROMPT]
def digitSum(s):
"""Task
Write a function that takes a string as input and returns the sum of the upper characters only'
ASCII codes.
Examples:
digitSum("") => 0
digitSum("abAB") => 131
digitSum("abcCd") => 67
digitSum("helloE") => 69
digitSum("woArBld") => 131
digitSum("aAaaaXa") => 153
"""
#[SOLUTION]
if s == "": return 0
return sum(ord(char) if char.isupper() else 0 for char in s)
#[CHECK]
def check(candidate):
# Check some simple cases
assert True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate("") == 0, "Error"
assert candidate("abAB") == 131, "Error"
assert candidate("abcCd") == 67, "Error"
assert candidate("helloE") == 69, "Error"
assert candidate("woArBld") == 131, "Error"
assert candidate("aAaaaXa") == 153, "Error"
# Check some edge cases that are easy to work out by hand.
assert True, "This prints if this assert fails 2 (also good for debugging!)"
assert candidate(" How are yOu?") == 151, "Error"
assert candidate("You arE Very Smart") == 327, "Error"
|
ENTRY_POINT = 'x_or_y'
#[PROMPT]
def x_or_y(n, x, y):
"""A simple program which should return the value of x if n is
a prime number and should return the value of y otherwise.
Examples:
for x_or_y(7, 34, 12) == 34
for x_or_y(15, 8, 5) == 5
"""
#[SOLUTION]
if n == 1:
return y
for i in range(2, n):
if n % i == 0:
return y
break
else:
return x
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate(7, 34, 12) == 34
assert candidate(15, 8, 5) == 5
assert candidate(3, 33, 5212) == 33
assert candidate(1259, 3, 52) == 3
assert candidate(7919, -1, 12) == -1
assert candidate(3609, 1245, 583) == 583
assert candidate(91, 56, 129) == 129
assert candidate(6, 34, 1234) == 1234
# Check some edge cases that are easy to work out by hand.
assert candidate(1, 2, 0) == 0
assert candidate(2, 2, 0) == 2
|
ENTRY_POINT = 'choose_num'
#[PROMPT]
def choose_num(x, y):
"""This function takes two positive numbers x and y and returns the
biggest even integer number that is in the range [x, y] inclusive. If
there's no such number, then the function should return -1.
For example:
choose_num(12, 15) = 14
choose_num(13, 12) = -1
"""
#[SOLUTION]
if x > y:
return -1
if y % 2 == 0:
return y
if x == y:
return -1
return y - 1
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate(12, 15) == 14
assert candidate(13, 12) == -1
assert candidate(33, 12354) == 12354
assert candidate(5234, 5233) == -1
assert candidate(6, 29) == 28
assert candidate(27, 10) == -1
# Check some edge cases that are easy to work out by hand.
assert candidate(7, 7) == -1
assert candidate(546, 546) == 546
|
ENTRY_POINT = 'move_one_ball'
#[PROMPT]
def move_one_ball(arr):
"""We have an array 'arr' of N integers arr[1], arr[2], ..., arr[N].The
numbers in the array will be randomly ordered. Your task is to determine if
it is possible to get an array sorted in non-decreasing order by performing
the following operation on the given array:
You are allowed to perform right shift operation any number of times.
One right shift operation means shifting all elements of the array by one
position in the right direction. The last element of the array will be moved to
the starting position in the array i.e. 0th index.
If it is possible to obtain the sorted array by performing the above operation
then return True else return False.
If the given array is empty then return True.
Note: The given list is guaranteed to have unique elements.
For Example:
move_one_ball([3, 4, 5, 1, 2])==>True
Explanation: By performin 2 right shift operations, non-decreasing order can
be achieved for the given array.
move_one_ball([3, 5, 4, 1, 2])==>False
Explanation:It is not possible to get non-decreasing order for the given
array by performing any number of right shift operations.
"""
#[SOLUTION]
if len(arr)==0:
return True
sorted_array=sorted(arr)
my_arr=[]
min_value=min(arr)
min_index=arr.index(min_value)
my_arr=arr[min_index:]+arr[0:min_index]
for i in range(len(arr)):
if my_arr[i]!=sorted_array[i]:
return False
return True
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate([3, 4, 5, 1, 2])==True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate([3, 5, 10, 1, 2])==True
# Check some edge cases that are easy to work out by hand.
assert candidate([3, 5, 4, 1, 2])==False, "This prints if this assert fails 2 (also good for debugging!)"
assert candidate([])==True
|
ENTRY_POINT = 'minSubArraySum'
#[PROMPT]
def minSubArraySum(nums):
"""
Given an array of integers nums, find the minimum sum of any non-empty sub-array
of nums.
Example
minSubArraySum([2, 3, 4, 1, 2, 4]) == 1
minSubArraySum([-1, -2, -3]) == -6
"""
#[SOLUTION]
max_sum = 0
s = 0
for num in nums:
s += -num
if (s < 0):
s = 0
max_sum = max(s, max_sum)
if max_sum == 0:
max_sum = max(-i for i in nums)
min_sum = -max_sum
return min_sum
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate([2, 3, 4, 1, 2, 4]) == 1, "This prints if this assert fails 1 (good for debugging!)"
assert candidate([-1, -2, -3]) == -6
assert candidate([-1, -2, -3, 2, -10]) == -14
assert candidate([-9999999999999999]) == -9999999999999999
assert candidate([0, 10, 20, 1000000]) == 0
assert candidate([-1, -2, -3, 10, -5]) == -6
assert candidate([100, -1, -2, -3, 10, -5]) == -6
assert candidate([10, 11, 13, 8, 3, 4]) == 3
assert candidate([100, -33, 32, -1, 0, -2]) == -33
# Check some edge cases that are easy to work out by hand.
assert candidate([-10]) == -10, "This prints if this assert fails 2 (also good for debugging!)"
assert candidate([7]) == 7
assert candidate([1, -1]) == -1
|
ENTRY_POINT = 'rolling_max'
#[PROMPT]
from typing import List, Tuple
def rolling_max(numbers: List[int]) -> List[int]:
""" From a given list of integers, generate a list of rolling maximum element found until given moment
in the sequence.
>>> rolling_max([1, 2, 3, 2, 3, 4, 2])
[1, 2, 3, 3, 3, 4, 4]
"""
#[SOLUTION]
running_max = None
result = []
for n in numbers:
if running_max is None:
running_max = n
else:
running_max = max(running_max, n)
result.append(running_max)
return result
#[CHECK]
METADATA = {
'author': 'jt',
'dataset': 'test'
}
def check(candidate):
assert candidate([]) == []
assert candidate([1, 2, 3, 4]) == [1, 2, 3, 4]
assert candidate([4, 3, 2, 1]) == [4, 4, 4, 4]
assert candidate([3, 2, 3, 100, 3]) == [3, 3, 3, 100, 100]
|
ENTRY_POINT = 'is_bored'
#[PROMPT]
def is_bored(S):
"""
You'll be given a string of words, and your task is to count the number
of boredoms. A boredom is a sentence that starts with the word "I".
Sentences are delimited by '.', '?' or '!'.
For example:
>>> is_bored("Hello world")
0
>>> is_bored("The sky is blue. The sun is shining. I love this weather")
1
"""
#[SOLUTION]
import re
sentences = re.split(r'[.?!]\s*', S)
return sum(sentence[0:2] == 'I ' for sentence in sentences)
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate("Hello world") == 0, "Test 1"
assert candidate("Is the sky blue?") == 0, "Test 2"
assert candidate("I love It !") == 1, "Test 3"
assert candidate("bIt") == 0, "Test 4"
assert candidate("I feel good today. I will be productive. will kill It") == 2, "Test 5"
assert candidate("You and I are going for a walk") == 0, "Test 6"
# Check some edge cases that are easy to work out by hand.
assert True, "This prints if this assert fails 2 (also good for debugging!)"
|
ENTRY_POINT = 'starts_one_ends'
#[PROMPT]
def starts_one_ends(n):
"""
Given a positive integer n, return the count of the numbers of n-digit
positive integers that start or end with 1.
"""
#[SOLUTION]
if n == 1: return 1
return 18 * (10 ** (n - 2))
#[CHECK]
def check(candidate):
# Check some simple cases
assert True, "This prints if this assert fails 1 (good for debugging!)"
assert candidate(1) == 1
assert candidate(2) == 18
assert candidate(3) == 180
assert candidate(4) == 1800
assert candidate(5) == 18000
# Check some edge cases that are easy to work out by hand.
assert True, "This prints if this assert fails 2 (also good for debugging!)"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.