code
stringlengths 42
43.2k
| apis
list | extract_api
stringlengths 115
61.9k
|
---|---|---|
import chess
from math import inf
from time import time
from evaluate import evaluate
def minimax(game, depth, player, timeout):
moves = game.board.legal_moves
best_move = None
if player == chess.WHITE:
best_score = -inf
for move in moves:
game.move(move)
score = minimiser(game, depth, inf, -inf, timeout)
if score > best_score:
best_score = score
best_move = move
game.undo()
else:
best_score = inf
for move in moves:
game.move(move)
score = maximiser(game, depth, -inf, inf, timeout)
if score < best_score:
best_score = score
best_move = move
game.undo()
return best_move
def minimiser(game, depth, alpha, beta, timeout):
if depth == 0 or game.board.is_game_over() or is_timeout(game, timeout):
return evaluate(game)
score = inf
for move in game.board.legal_moves:
game.move(move)
score = min(maximiser(game, depth - 1, alpha, beta, timeout), score)
game.undo()
if score <= alpha:
return score
beta = min(beta, score)
return score
def maximiser(game, depth, alpha, beta, timeout):
if depth == 0 or game.board.is_game_over() or is_timeout(game, timeout):
return evaluate(game)
score = -inf
for move in game.board.legal_moves:
game.move(move)
score = max(minimiser(game, depth - 1, alpha, beta, timeout), score)
game.undo()
if score >= beta:
return score
alpha = max(alpha, score)
return score
def is_timeout(game, timeout):
return time() - game.start_time > timeout
|
[
"evaluate.evaluate"
] |
[((822, 836), 'evaluate.evaluate', 'evaluate', (['game'], {}), '(game)\n', (830, 836), False, 'from evaluate import evaluate\n'), ((1223, 1237), 'evaluate.evaluate', 'evaluate', (['game'], {}), '(game)\n', (1231, 1237), False, 'from evaluate import evaluate\n'), ((1532, 1538), 'time.time', 'time', ([], {}), '()\n', (1536, 1538), False, 'from time import time\n')]
|
import argparse
import datetime
import os
import copy
import time
import json
# import visdom
import torch
import numpy as np
import tempfile
from os.path import join as pjoin
from distutils.dir_util import copy_tree
import gym
import textworld
from textworld.gym import register_game, make_batch2
from agent import Agent
import generic
import reward_helper
import game_generator
import evaluate
from query import process_facts
request_infos = textworld.EnvInfos(description=True,
inventory=True,
verbs=True,
location_names=True,
location_nouns=True,
location_adjs=True,
object_names=True,
object_nouns=True,
object_adjs=True,
facts=True,
last_action=True,
game=True,
admissible_commands=True,
extras=["object_locations", "object_attributes", "uuid"])
def train(data_path):
time_1 = datetime.datetime.now()
agent = Agent()
# # visdom
# viz = visdom.Visdom()
# plt_win = None
# eval_plt_win = None
# viz_avg_correct_state_acc, viz_avg_qa_acc = [], []
# viz_eval_sufficient_info_reward, viz_eval_qa_reward = [], []
step_in_total = 0
running_avg_qa_reward = generic.HistoryScoreCache(capacity=500)
running_avg_sufficient_info_reward = generic.HistoryScoreCache(
capacity=500)
running_avg_qa_loss = generic.HistoryScoreCache(capacity=500)
running_avg_correct_state_loss = generic.HistoryScoreCache(capacity=500)
output_dir, data_dir = ".", "."
json_file_name = agent.experiment_tag.replace(" ", "_")
best_sum_reward_so_far = 0.0
# load model from checkpoint
if agent.load_pretrained:
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
agent.load_pretrained_model(
output_dir + "/" + agent.experiment_tag + "_model.pt")
agent.update_target_net()
elif os.path.exists(data_dir + "/" + agent.load_from_tag + ".pt"):
agent.load_pretrained_model(
data_dir + "/" + agent.load_from_tag + ".pt")
agent.update_target_net()
else:
print(
"Failed to load pretrained model... couldn't find the checkpoint file...")
# Create temporary folder for the generated games.
# This is not deleted upon error. It would be better to use a with statement.
games_dir = tempfile.TemporaryDirectory(prefix="tw_games")
games_dir = pjoin(games_dir.name, "") # So path ends with '/'.
# copy grammar files into tmp folder so that it works smoothly
assert os.path.exists(
"./textworld_data"), "Oh no! textworld_data folder is not there..."
os.mkdir(games_dir)
os.mkdir(pjoin(games_dir, "textworld_data"))
copy_tree("textworld_data", games_dir + "textworld_data")
if agent.run_eval:
assert os.path.exists(
pjoin(data_path, agent.testset_path)), "Oh no! test_set folder is not there..."
os.mkdir(pjoin(games_dir, agent.testset_path))
copy_tree(pjoin(data_path, agent.testset_path),
pjoin(games_dir, agent.testset_path))
if agent.train_data_size == -1:
game_queue_size = agent.batch_size * 5
game_queue = []
f = open("accuracies_avg_pretrained10games.txt", "a+", encoding="utf-8")
episode_no = 0
if agent.train_data_size == -1:
# endless mode
game_generator_queue = game_generator.game_generator_queue(
path=games_dir, random_map=agent.random_map, question_type=agent.question_type, max_q_size=agent.batch_size * 2, nb_worker=8)
else:
# generate the training set
all_training_games = game_generator.game_generator(
path=games_dir, random_map=agent.random_map, question_type=agent.question_type, train_data_size=agent.train_data_size)
all_training_games.sort()
all_env_ids = None
while(True):
if episode_no > agent.max_episode:
break
np.random.seed(episode_no)
if agent.train_data_size == -1:
# endless mode
for _ in range(agent.batch_size):
if not game_generator_queue.empty():
tmp_game = game_generator_queue.get()
if os.path.exists(tmp_game):
game_queue.append(tmp_game)
if len(game_queue) == 0:
time.sleep(0.1)
continue
can_delete_these = []
if len(game_queue) > game_queue_size:
can_delete_these = game_queue[:-game_queue_size]
game_queue = game_queue[-game_queue_size:]
sampled_games = np.random.choice(
game_queue, agent.batch_size).tolist()
env_ids = [register_game(gamefile, request_infos=request_infos)
for gamefile in sampled_games]
else:
if all_env_ids is None:
all_env_ids = [register_game(
gamefile, request_infos=request_infos) for gamefile in all_training_games]
env_ids = np.random.choice(all_env_ids, agent.batch_size).tolist()
if len(env_ids) != agent.batch_size: # either less than or greater than
env_ids = np.random.choice(env_ids, agent.batch_size).tolist()
env_id = make_batch2(env_ids, parallel=True)
env = gym.make(env_id)
env.seed(episode_no)
obs, infos = env.reset()
batch_size = len(obs)
# generate question-answer pairs here
questions, answers, reward_helper_info = game_generator.generate_qa_pairs(
infos, question_type=agent.question_type, seed=episode_no)
print("\n====================================================================================", episode_no)
print(questions[0], answers[0])
agent.train()
agent.init(obs, infos)
commands, last_facts, init_facts = [], [], []
commands_per_step, game_facts_cache = [], []
for i in range(batch_size):
commands.append("restart")
last_facts.append(None)
init_facts.append(None)
game_facts_cache.append([])
commands_per_step.append(["restart"])
observation_strings, possible_words = agent.get_game_info_at_certain_step(
obs, infos)
observation_strings = [a + " <|> " + item for a,
item in zip(commands, observation_strings)]
input_quest, input_quest_char, _ = agent.get_agent_inputs(questions)
transition_cache = []
print_cmds = []
counting_rewards_np = []
valid_command_rewards_np = []
act_randomly = False if agent.noisy_net else episode_no < agent.learn_start_from_this_episode
# push init state into counting reward dict
state_strings = agent.get_state_strings(infos)
_ = agent.get_binarized_count(state_strings, update=True)
for step_no in range(agent.max_nb_steps_per_episode):
# update answerer input
for i in range(batch_size):
if agent.not_finished_yet[i] == 1:
agent.naozi.push_one(i, copy.copy(observation_strings[i]))
if agent.prev_step_is_still_interacting[i] == 1:
new_facts = process_facts(
last_facts[i], infos["game"][i], infos["facts"][i], infos["last_action"][i], commands[i])
# info used in reward computing of existence question
game_facts_cache[i].append(new_facts)
last_facts[i] = new_facts
if step_no == 0:
init_facts[i] = copy.copy(new_facts)
# generate commands
if agent.noisy_net:
agent.reset_noise() # Draw a new set of noisy weights
observation_strings_w_history = agent.naozi.get()
input_observation, input_observation_char, _ = agent.get_agent_inputs(
observation_strings_w_history)
commands, replay_info = agent.act(obs, infos, input_observation, input_observation_char,
input_quest, input_quest_char, possible_words, random=act_randomly)
for i in range(batch_size):
commands_per_step[i].append(commands[i])
replay_info = [observation_strings_w_history,
questions, possible_words] + replay_info
admissible_commands = [set(item) - set(["look", "wait", "inventory"])
for item in infos["admissible_commands"]]
vc_rewards = [float(c in ac)
for c, ac in zip(commands, admissible_commands)]
valid_command_rewards_np.append(np.array(vc_rewards))
# pass commands into env
obs, _, _, infos = env.step(commands)
# possible words no not depend on history, because one can only interact with what is currently accessible
observation_strings, possible_words = agent.get_game_info_at_certain_step(
obs, infos)
observation_strings = [a + " <|> " + item for a,
item in zip(commands, observation_strings)]
# counting rewards
state_strings = agent.get_state_strings(infos)
c_rewards = agent.get_binarized_count(state_strings, update=True)
counting_rewards_np.append(np.array(c_rewards))
if agent.noisy_net and step_in_total % agent.update_per_k_game_steps == 0:
agent.reset_noise() # Draw a new set of noisy weights
if episode_no >= agent.learn_start_from_this_episode and step_in_total % agent.update_per_k_game_steps == 0:
interaction_loss = agent.update_interaction()
if interaction_loss is not None:
running_avg_correct_state_loss.push(interaction_loss)
qa_loss = agent.update_qa()
if qa_loss is not None:
running_avg_qa_loss.push(qa_loss)
print_cmds.append(
commands[0] if agent.prev_step_is_still_interacting[0] else "--")
# force stopping
if step_no == agent.max_nb_steps_per_episode - 1:
replay_info[-1] = torch.zeros_like(replay_info[-1])
transition_cache.append(replay_info)
step_in_total += 1
if (step_no == agent.max_nb_steps_per_episode - 1) or (step_no > 0 and np.sum(generic.to_np(replay_info[-1])) == 0):
break
print(" / ".join(print_cmds))
# The agent has exhausted all steps, now answer question.
answerer_input = agent.naozi.get()
answerer_input_observation, answerer_input_observation_char, answerer_observation_ids = agent.get_agent_inputs(
answerer_input)
chosen_word_indices = agent.answer_question_act_greedy(
answerer_input_observation, answerer_input_observation_char, answerer_observation_ids, input_quest, input_quest_char) # batch
chosen_word_indices_np = generic.to_np(chosen_word_indices)
chosen_answers = [agent.word_vocab[item]
for item in chosen_word_indices_np]
# rewards
# qa reward
qa_reward_np = reward_helper.get_qa_reward(answers, chosen_answers)
# sufficient info rewards
masks = [item[-1] for item in transition_cache]
masks_np = [generic.to_np(item) for item in masks]
# 1 1 0 0 0 --> 1 1 0 0 0 0
game_finishing_mask = np.stack(
masks_np + [np.zeros((batch_size,))], 0) # game step+1 x batch size
# 1 1 0 0 0 0 --> 0 1 0 0 0
# game step x batch size
game_finishing_mask = game_finishing_mask[:-
1, :] - game_finishing_mask[1:, :]
game_running_mask = np.stack(masks_np, 0) # game step x batch size
if agent.question_type == "location":
# sufficient info reward: location question
reward_helper_info["observation_before_finish"] = answerer_input
reward_helper_info["game_finishing_mask"] = game_finishing_mask
sufficient_info_reward_np = reward_helper.get_sufficient_info_reward_location(
reward_helper_info)
elif agent.question_type == "existence":
# sufficient info reward: existence question
reward_helper_info["observation_before_finish"] = answerer_input
# facts before issuing command (we want to stop at correct state)
reward_helper_info["game_facts_per_step"] = game_facts_cache
reward_helper_info["init_game_facts"] = init_facts
reward_helper_info["full_facts"] = infos["facts"]
reward_helper_info["answers"] = answers
reward_helper_info["game_finishing_mask"] = game_finishing_mask
sufficient_info_reward_np = reward_helper.get_sufficient_info_reward_existence(
reward_helper_info)
elif agent.question_type == "attribute":
# sufficient info reward: attribute question
reward_helper_info["answers"] = answers
# facts before and after issuing commands (we want to compare the differnce)
reward_helper_info["game_facts_per_step"] = game_facts_cache
reward_helper_info["init_game_facts"] = init_facts
reward_helper_info["full_facts"] = infos["facts"]
# commands before and after issuing commands (we want to compare the differnce)
reward_helper_info["commands_per_step"] = commands_per_step
reward_helper_info["game_finishing_mask"] = game_finishing_mask
sufficient_info_reward_np = reward_helper.get_sufficient_info_reward_attribute(
reward_helper_info)
else:
raise NotImplementedError
# push qa experience into qa replay buffer
for b in range(batch_size): # data points in batch
# if the agent is not in the correct state, do not push it into replay buffer
if np.sum(sufficient_info_reward_np[b]) == 0.0:
continue
agent.qa_replay_memory.push(
False, qa_reward_np[b], answerer_input[b], questions[b], answers[b])
# assign sufficient info reward and counting reward to the corresponding steps
counting_rewards_np = np.stack(
counting_rewards_np, 1) # batch x game step
valid_command_rewards_np = np.stack(
valid_command_rewards_np, 1) # batch x game step
command_rewards_np = sufficient_info_reward_np + counting_rewards_np * game_running_mask.T * agent.revisit_counting_lambda + \
valid_command_rewards_np * game_running_mask.T * \
agent.valid_command_bonus_lambda # batch x game step
command_rewards = generic.to_pt(
command_rewards_np, enable_cuda=agent.use_cuda, type="float") # batch x game step
for i in range(command_rewards_np.shape[1]):
transition_cache[i].append(command_rewards[:, i])
print(command_rewards_np[0])
# push command generation experience into replay buffer
for b in range(batch_size):
is_prior = np.sum(command_rewards_np[b], 0) > 0.0
for i in range(len(transition_cache)):
batch_observation_strings, batch_question_strings, batch_possible_words, batch_chosen_indices, _, batch_rewards = transition_cache[
i]
is_final = True
if masks_np[i][b] != 0:
is_final = False
agent.command_generation_replay_memory.push(is_prior, batch_observation_strings[b], batch_question_strings[b], [
item[b] for item in batch_possible_words], [item[b] for item in batch_chosen_indices], batch_rewards[b], is_final)
if masks_np[i][b] == 0.0:
break
# for printing
r_qa = np.mean(qa_reward_np)
r_sufficient_info = np.mean(np.sum(sufficient_info_reward_np, -1))
running_avg_qa_reward.push(r_qa)
running_avg_sufficient_info_reward.push(r_sufficient_info)
print_rewards = np.mean(np.sum(command_rewards_np, -1))
obs_string = answerer_input[0]
print(obs_string)
# finish game
agent.finish_of_episode(episode_no, batch_size)
# close env
env.close()
if agent.train_data_size == -1:
# when games are generated on the fly,
# remove all files (including .json and .ni) that have been used
files_to_delete = []
for gamefile in can_delete_these:
if not gamefile.endswith(".ulx"):
continue
files_to_delete.append(gamefile)
files_to_delete.append(gamefile.replace(".ulx", ".json"))
files_to_delete.append(gamefile.replace(".ulx", ".ni"))
# print("rm -f {}".format(" ".join(files_to_delete)))
os.system("rm -f {}".format(" ".join(files_to_delete)))
episode_no += batch_size
time_2 = datetime.datetime.now()
print("Episode: {:3d} | time spent: {:s} | interaction loss: {:2.3f} | qa loss: {:2.3f} | rewards: {:2.3f} | qa acc: {:2.3f}/{:2.3f} | correct state: {:2.3f}/{:2.3f}".format(episode_no, str(time_2 - time_1).rsplit(
".")[0], running_avg_correct_state_loss.get_avg(), running_avg_qa_loss.get_avg(), print_rewards, r_qa, running_avg_qa_reward.get_avg(), r_sufficient_info, running_avg_sufficient_info_reward.get_avg()))
# add current qa accuracy to file
f.write("{} ".format(running_avg_qa_reward.get_avg()))
if episode_no < agent.learn_start_from_this_episode:
continue
if episode_no == 0 or (episode_no % agent.save_frequency > (episode_no - batch_size) % agent.save_frequency):
continue
eval_qa_reward, eval_sufficient_info_reward = 0.0, 0.0
# evaluate
if agent.run_eval:
eval_qa_reward, eval_sufficient_info_reward = evaluate.evaluate(
data_dir, agent)
# if run eval, then save model by eval accucacy
if eval_qa_reward + eval_sufficient_info_reward > best_sum_reward_so_far:
best_sum_reward_so_far = eval_qa_reward + eval_sufficient_info_reward
agent.save_model_to_path(
output_dir + "/" + agent.experiment_tag + "_model.pt")
# save model
elif agent.save_checkpoint:
if running_avg_qa_reward.get_avg() + running_avg_sufficient_info_reward.get_avg() > best_sum_reward_so_far:
best_sum_reward_so_far = running_avg_qa_reward.get_avg(
) + running_avg_sufficient_info_reward.get_avg()
agent.save_model_to_path(
output_dir + "/" + agent.experiment_tag + "_model.pt")
# # plot using visdom
# viz_avg_correct_state_acc.append(running_avg_sufficient_info_reward.get_avg())
# viz_avg_qa_acc.append(running_avg_qa_reward.get_avg())
# viz_eval_sufficient_info_reward.append(eval_sufficient_info_reward)
# viz_eval_qa_reward.append(eval_qa_reward)
# viz_x = np.arange(len(viz_avg_correct_state_acc)).tolist()
# if plt_win is None:
# plt_win = viz.line(X=viz_x, Y=viz_avg_correct_state_acc,
# opts=dict(title=agent.experiment_tag + "_train"),
# name="correct state")
# viz.line(X=viz_x, Y=viz_avg_qa_acc,
# opts=dict(title=agent.experiment_tag + "_train"),
# win=plt_win, update='append', name="qa")
# else:
# viz.line(X=[len(viz_avg_correct_state_acc) - 1], Y=[viz_avg_correct_state_acc[-1]],
# opts=dict(title=agent.experiment_tag + "_train"),
# win=plt_win,
# update='append', name="correct state")
# viz.line(X=[len(viz_avg_qa_acc) - 1], Y=[viz_avg_qa_acc[-1]],
# opts=dict(title=agent.experiment_tag + "_train"),
# win=plt_win,
# update='append', name="qa")
# if eval_plt_win is None:
# eval_plt_win = viz.line(X=viz_x, Y=viz_eval_sufficient_info_reward,
# opts=dict(title=agent.experiment_tag + "_eval"),
# name="correct state")
# viz.line(X=viz_x, Y=viz_eval_qa_reward,
# opts=dict(title=agent.experiment_tag + "_eval"),
# win=eval_plt_win, update='append', name="qa")
# else:
# viz.line(X=[len(viz_eval_sufficient_info_reward) - 1], Y=[viz_eval_sufficient_info_reward[-1]],
# opts=dict(title=agent.experiment_tag + "_eval"),
# win=eval_plt_win,
# update='append', name="correct state")
# viz.line(X=[len(viz_eval_qa_reward) - 1], Y=[viz_eval_qa_reward[-1]],
# opts=dict(title=agent.experiment_tag + "_eval"),
# win=eval_plt_win,
# update='append', name="qa")
# write accucacies down into file
_s = json.dumps({"time spent": str(time_2 - time_1).rsplit(".")[0],
"sufficient info": running_avg_sufficient_info_reward.get_avg(),
"qa": running_avg_qa_reward.get_avg(),
"eval sufficient info": eval_sufficient_info_reward,
"eval qa": eval_qa_reward})
with open(output_dir + "/" + json_file_name + '.json', 'a+', encoding="utf-8") as outfile:
outfile.write(_s + '\n')
outfile.flush()
f.close()
# evaluate the agents performance at the end of experiment
if agent.run_eval:
evaluate.evaluate(data_dir, agent)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train an agent.")
parser.add_argument("data_path",
default="./",
help="where the data (games) are.")
args = parser.parse_args()
train(args.data_path)
|
[
"evaluate.evaluate"
] |
[((446, 770), 'textworld.EnvInfos', 'textworld.EnvInfos', ([], {'description': '(True)', 'inventory': '(True)', 'verbs': '(True)', 'location_names': '(True)', 'location_nouns': '(True)', 'location_adjs': '(True)', 'object_names': '(True)', 'object_nouns': '(True)', 'object_adjs': '(True)', 'facts': '(True)', 'last_action': '(True)', 'game': '(True)', 'admissible_commands': '(True)', 'extras': "['object_locations', 'object_attributes', 'uuid']"}), "(description=True, inventory=True, verbs=True,\n location_names=True, location_nouns=True, location_adjs=True,\n object_names=True, object_nouns=True, object_adjs=True, facts=True,\n last_action=True, game=True, admissible_commands=True, extras=[\n 'object_locations', 'object_attributes', 'uuid'])\n", (464, 770), False, 'import textworld\n'), ((1247, 1270), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1268, 1270), False, 'import datetime\n'), ((1283, 1290), 'agent.Agent', 'Agent', ([], {}), '()\n', (1288, 1290), False, 'from agent import Agent\n'), ((1557, 1596), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(500)'}), '(capacity=500)\n', (1582, 1596), False, 'import generic\n'), ((1638, 1677), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(500)'}), '(capacity=500)\n', (1663, 1677), False, 'import generic\n'), ((1713, 1752), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(500)'}), '(capacity=500)\n', (1738, 1752), False, 'import generic\n'), ((1790, 1829), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(500)'}), '(capacity=500)\n', (1815, 1829), False, 'import generic\n'), ((2749, 2795), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""tw_games"""'}), "(prefix='tw_games')\n", (2776, 2795), False, 'import tempfile\n'), ((2812, 2837), 'os.path.join', 'pjoin', (['games_dir.name', '""""""'], {}), "(games_dir.name, '')\n", (2817, 2837), True, 'from os.path import join as pjoin\n'), ((2942, 2976), 'os.path.exists', 'os.path.exists', (['"""./textworld_data"""'], {}), "('./textworld_data')\n", (2956, 2976), False, 'import os\n'), ((3038, 3057), 'os.mkdir', 'os.mkdir', (['games_dir'], {}), '(games_dir)\n', (3046, 3057), False, 'import os\n'), ((3111, 3168), 'distutils.dir_util.copy_tree', 'copy_tree', (['"""textworld_data"""', "(games_dir + 'textworld_data')"], {}), "('textworld_data', games_dir + 'textworld_data')\n", (3120, 3168), False, 'from distutils.dir_util import copy_tree\n'), ((22577, 22631), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train an agent."""'}), "(description='Train an agent.')\n", (22600, 22631), False, 'import argparse\n'), ((2034, 2103), 'os.path.exists', 'os.path.exists', (["(output_dir + '/' + agent.experiment_tag + '_model.pt')"], {}), "(output_dir + '/' + agent.experiment_tag + '_model.pt')\n", (2048, 2103), False, 'import os\n'), ((3071, 3105), 'os.path.join', 'pjoin', (['games_dir', '"""textworld_data"""'], {}), "(games_dir, 'textworld_data')\n", (3076, 3105), True, 'from os.path import join as pjoin\n'), ((3776, 3947), 'game_generator.game_generator_queue', 'game_generator.game_generator_queue', ([], {'path': 'games_dir', 'random_map': 'agent.random_map', 'question_type': 'agent.question_type', 'max_q_size': '(agent.batch_size * 2)', 'nb_worker': '(8)'}), '(path=games_dir, random_map=agent.\n random_map, question_type=agent.question_type, max_q_size=agent.\n batch_size * 2, nb_worker=8)\n', (3811, 3947), False, 'import game_generator\n'), ((4026, 4178), 'game_generator.game_generator', 'game_generator.game_generator', ([], {'path': 'games_dir', 'random_map': 'agent.random_map', 'question_type': 'agent.question_type', 'train_data_size': 'agent.train_data_size'}), '(path=games_dir, random_map=agent.random_map,\n question_type=agent.question_type, train_data_size=agent.train_data_size)\n', (4055, 4178), False, 'import game_generator\n'), ((4336, 4362), 'numpy.random.seed', 'np.random.seed', (['episode_no'], {}), '(episode_no)\n', (4350, 4362), True, 'import numpy as np\n'), ((5665, 5700), 'textworld.gym.make_batch2', 'make_batch2', (['env_ids'], {'parallel': '(True)'}), '(env_ids, parallel=True)\n', (5676, 5700), False, 'from textworld.gym import register_game, make_batch2\n'), ((5716, 5732), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (5724, 5732), False, 'import gym\n'), ((5921, 6016), 'game_generator.generate_qa_pairs', 'game_generator.generate_qa_pairs', (['infos'], {'question_type': 'agent.question_type', 'seed': 'episode_no'}), '(infos, question_type=agent.question_type,\n seed=episode_no)\n', (5953, 6016), False, 'import game_generator\n'), ((11510, 11544), 'generic.to_np', 'generic.to_np', (['chosen_word_indices'], {}), '(chosen_word_indices)\n', (11523, 11544), False, 'import generic\n'), ((11717, 11769), 'reward_helper.get_qa_reward', 'reward_helper.get_qa_reward', (['answers', 'chosen_answers'], {}), '(answers, chosen_answers)\n', (11744, 11769), False, 'import reward_helper\n'), ((12312, 12333), 'numpy.stack', 'np.stack', (['masks_np', '(0)'], {}), '(masks_np, 0)\n', (12320, 12333), True, 'import numpy as np\n'), ((14854, 14886), 'numpy.stack', 'np.stack', (['counting_rewards_np', '(1)'], {}), '(counting_rewards_np, 1)\n', (14862, 14886), True, 'import numpy as np\n'), ((14956, 14993), 'numpy.stack', 'np.stack', (['valid_command_rewards_np', '(1)'], {}), '(valid_command_rewards_np, 1)\n', (14964, 14993), True, 'import numpy as np\n'), ((15318, 15393), 'generic.to_pt', 'generic.to_pt', (['command_rewards_np'], {'enable_cuda': 'agent.use_cuda', 'type': '"""float"""'}), "(command_rewards_np, enable_cuda=agent.use_cuda, type='float')\n", (15331, 15393), False, 'import generic\n'), ((16485, 16506), 'numpy.mean', 'np.mean', (['qa_reward_np'], {}), '(qa_reward_np)\n', (16492, 16506), True, 'import numpy as np\n'), ((17643, 17666), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17664, 17666), False, 'import datetime\n'), ((22500, 22534), 'evaluate.evaluate', 'evaluate.evaluate', (['data_dir', 'agent'], {}), '(data_dir, agent)\n', (22517, 22534), False, 'import evaluate\n'), ((2268, 2328), 'os.path.exists', 'os.path.exists', (["(data_dir + '/' + agent.load_from_tag + '.pt')"], {}), "(data_dir + '/' + agent.load_from_tag + '.pt')\n", (2282, 2328), False, 'import os\n'), ((3235, 3271), 'os.path.join', 'pjoin', (['data_path', 'agent.testset_path'], {}), '(data_path, agent.testset_path)\n', (3240, 3271), True, 'from os.path import join as pjoin\n'), ((3332, 3368), 'os.path.join', 'pjoin', (['games_dir', 'agent.testset_path'], {}), '(games_dir, agent.testset_path)\n', (3337, 3368), True, 'from os.path import join as pjoin\n'), ((3388, 3424), 'os.path.join', 'pjoin', (['data_path', 'agent.testset_path'], {}), '(data_path, agent.testset_path)\n', (3393, 3424), True, 'from os.path import join as pjoin\n'), ((3444, 3480), 'os.path.join', 'pjoin', (['games_dir', 'agent.testset_path'], {}), '(games_dir, agent.testset_path)\n', (3449, 3480), True, 'from os.path import join as pjoin\n'), ((11881, 11900), 'generic.to_np', 'generic.to_np', (['item'], {}), '(item)\n', (11894, 11900), False, 'import generic\n'), ((12656, 12725), 'reward_helper.get_sufficient_info_reward_location', 'reward_helper.get_sufficient_info_reward_location', (['reward_helper_info'], {}), '(reward_helper_info)\n', (12705, 12725), False, 'import reward_helper\n'), ((16543, 16580), 'numpy.sum', 'np.sum', (['sufficient_info_reward_np', '(-1)'], {}), '(sufficient_info_reward_np, -1)\n', (16549, 16580), True, 'import numpy as np\n'), ((16722, 16752), 'numpy.sum', 'np.sum', (['command_rewards_np', '(-1)'], {}), '(command_rewards_np, -1)\n', (16728, 16752), True, 'import numpy as np\n'), ((18599, 18633), 'evaluate.evaluate', 'evaluate.evaluate', (['data_dir', 'agent'], {}), '(data_dir, agent)\n', (18616, 18633), False, 'import evaluate\n'), ((4741, 4756), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4751, 4756), False, 'import time\n'), ((5114, 5166), 'textworld.gym.register_game', 'register_game', (['gamefile'], {'request_infos': 'request_infos'}), '(gamefile, request_infos=request_infos)\n', (5127, 5166), False, 'from textworld.gym import register_game, make_batch2\n'), ((9157, 9177), 'numpy.array', 'np.array', (['vc_rewards'], {}), '(vc_rewards)\n', (9165, 9177), True, 'import numpy as np\n'), ((9848, 9867), 'numpy.array', 'np.array', (['c_rewards'], {}), '(c_rewards)\n', (9856, 9867), True, 'import numpy as np\n'), ((10712, 10745), 'torch.zeros_like', 'torch.zeros_like', (['replay_info[-1]'], {}), '(replay_info[-1])\n', (10728, 10745), False, 'import torch\n'), ((13370, 13440), 'reward_helper.get_sufficient_info_reward_existence', 'reward_helper.get_sufficient_info_reward_existence', (['reward_helper_info'], {}), '(reward_helper_info)\n', (13420, 13440), False, 'import reward_helper\n'), ((14540, 14576), 'numpy.sum', 'np.sum', (['sufficient_info_reward_np[b]'], {}), '(sufficient_info_reward_np[b])\n', (14546, 14576), True, 'import numpy as np\n'), ((15704, 15736), 'numpy.sum', 'np.sum', (['command_rewards_np[b]', '(0)'], {}), '(command_rewards_np[b], 0)\n', (15710, 15736), True, 'import numpy as np\n'), ((4610, 4634), 'os.path.exists', 'os.path.exists', (['tmp_game'], {}), '(tmp_game)\n', (4624, 4634), False, 'import os\n'), ((5018, 5064), 'numpy.random.choice', 'np.random.choice', (['game_queue', 'agent.batch_size'], {}), '(game_queue, agent.batch_size)\n', (5034, 5064), True, 'import numpy as np\n'), ((5302, 5354), 'textworld.gym.register_game', 'register_game', (['gamefile'], {'request_infos': 'request_infos'}), '(gamefile, request_infos=request_infos)\n', (5315, 5354), False, 'from textworld.gym import register_game, make_batch2\n'), ((5434, 5481), 'numpy.random.choice', 'np.random.choice', (['all_env_ids', 'agent.batch_size'], {}), '(all_env_ids, agent.batch_size)\n', (5450, 5481), True, 'import numpy as np\n'), ((5595, 5638), 'numpy.random.choice', 'np.random.choice', (['env_ids', 'agent.batch_size'], {}), '(env_ids, agent.batch_size)\n', (5611, 5638), True, 'import numpy as np\n'), ((7665, 7773), 'query.process_facts', 'process_facts', (['last_facts[i]', "infos['game'][i]", "infos['facts'][i]", "infos['last_action'][i]", 'commands[i]'], {}), "(last_facts[i], infos['game'][i], infos['facts'][i], infos[\n 'last_action'][i], commands[i])\n", (7678, 7773), False, 'from query import process_facts\n'), ((12020, 12043), 'numpy.zeros', 'np.zeros', (['(batch_size,)'], {}), '((batch_size,))\n', (12028, 12043), True, 'import numpy as np\n'), ((14183, 14253), 'reward_helper.get_sufficient_info_reward_attribute', 'reward_helper.get_sufficient_info_reward_attribute', (['reward_helper_info'], {}), '(reward_helper_info)\n', (14233, 14253), False, 'import reward_helper\n'), ((7533, 7566), 'copy.copy', 'copy.copy', (['observation_strings[i]'], {}), '(observation_strings[i])\n', (7542, 7566), False, 'import copy\n'), ((8049, 8069), 'copy.copy', 'copy.copy', (['new_facts'], {}), '(new_facts)\n', (8058, 8069), False, 'import copy\n'), ((10916, 10946), 'generic.to_np', 'generic.to_np', (['replay_info[-1]'], {}), '(replay_info[-1])\n', (10929, 10946), False, 'import generic\n')]
|
import os
import time
import torch
from torch import nn
from cifar10 import load_data_cifar10
from evaluate import Accumulator, accurate_num, evaluate_accuracy
from alex import Alexnet
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
root_path = os.getcwd()
model_name = 'alexnet.pth'
model_path = os.path.join(root_path, 'model', model_name)
num_epochs = 2
batch_size = 128
momentum = 0.9
lr_decay = 0.0005
lr_init = 0.01
classes = 10
alexnet = Alexnet(num_classes=classes).to(device)
train_iter, val_iter = load_data_cifar10(batch_size)
loss = nn.CrossEntropyLoss()
trainer = torch.optim.SGD(params=alexnet.parameters(), lr=lr_init, momentum=momentum, weight_decay=lr_decay)
def train_epoch(net, train_iter, loss, updater):
metric = Accumulator(3)
for imgs, classes in train_iter:
imgs, classes = imgs.to(device), classes.to(device)
classes_hat = net(imgs)
l = loss(classes_hat, classes)
updater.zero_grad()
l.sum().backward()
updater.step()
metric.add(float(l.sum()), accurate_num(classes_hat, classes), classes.numel())
return metric[0] / metric[2], metric[1] / metric[2]
def train(net, train_iter, val_iter, loss, num_epochs, updater):
for epoch in range(num_epochs):
train_loss, train_acc = train_epoch(net, train_iter, loss, updater)
val_acc = evaluate_accuracy(net, val_iter)
print('save model')
state = {'model': alexnet.state_dict(), 'optimizer': trainer.state_dict(),
'epoch': epoch + 1}
torch.save(state, model_path)
print(f'epoch{epoch + 1}: train loss: {train_loss:.5f} train acc: {train_acc:.2%} val acc: {val_acc:.2%}')
start = time.time()
train(alexnet, train_iter, val_iter, loss, num_epochs, trainer)
end = time.time()
print('Time:{:.6f} s'.format(end - start))
|
[
"evaluate.Accumulator",
"evaluate.accurate_num",
"evaluate.evaluate_accuracy"
] |
[((279, 290), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (288, 290), False, 'import os\n'), ((333, 377), 'os.path.join', 'os.path.join', (['root_path', '"""model"""', 'model_name'], {}), "(root_path, 'model', model_name)\n", (345, 377), False, 'import os\n'), ((556, 585), 'cifar10.load_data_cifar10', 'load_data_cifar10', (['batch_size'], {}), '(batch_size)\n', (573, 585), False, 'from cifar10 import load_data_cifar10\n'), ((594, 615), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (613, 615), False, 'from torch import nn\n'), ((1766, 1777), 'time.time', 'time.time', ([], {}), '()\n', (1775, 1777), False, 'import time\n'), ((1850, 1861), 'time.time', 'time.time', ([], {}), '()\n', (1859, 1861), False, 'import time\n'), ((794, 808), 'evaluate.Accumulator', 'Accumulator', (['(3)'], {}), '(3)\n', (805, 808), False, 'from evaluate import Accumulator, accurate_num, evaluate_accuracy\n'), ((226, 251), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (249, 251), False, 'import torch\n'), ((492, 520), 'alex.Alexnet', 'Alexnet', ([], {'num_classes': 'classes'}), '(num_classes=classes)\n', (499, 520), False, 'from alex import Alexnet\n'), ((1411, 1443), 'evaluate.evaluate_accuracy', 'evaluate_accuracy', (['net', 'val_iter'], {}), '(net, val_iter)\n', (1428, 1443), False, 'from evaluate import Accumulator, accurate_num, evaluate_accuracy\n'), ((1604, 1633), 'torch.save', 'torch.save', (['state', 'model_path'], {}), '(state, model_path)\n', (1614, 1633), False, 'import torch\n'), ((1098, 1132), 'evaluate.accurate_num', 'accurate_num', (['classes_hat', 'classes'], {}), '(classes_hat, classes)\n', (1110, 1132), False, 'from evaluate import Accumulator, accurate_num, evaluate_accuracy\n')]
|
from load import LoadData
from preprocess import PreprocessData
from model import BidaF
from evaluate import evaluate as eval
from load import ptbtokenizer
import torch.optim as optim
import torch.nn as nn
import torch
import pickle
import time
import sys
import argparse
import copy
import numpy as np
import datetime
import os
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def create_checkpoint_paths(folder_path):
if not os.path.isdir(folder_path):
os.mkdir(folder_path)
def save_vocab(vocab, path):
output = open(path, 'wb')
pickle.dump(vocab, output)
output.close()
def predict_data(context_tokenized, question_tokenized, word_vocab, char_vocab):
longest_context_word = max([len(w) for w in context_tokenized])
longest_question_word = max([len(w) for w in question_tokenized])
context_words = (torch.tensor([[word_vocab[word.lower()] for word in context_tokenized]]),
torch.tensor([len(context_tokenized)]))
question_words = (torch.tensor([[word_vocab[word.lower()] for word in question_tokenized]]),
torch.tensor([len(question_tokenized)]))
context_char = []
for word in context_tokenized:
_context_word = []
for c_index in range(longest_context_word):
if c_index < len(word):
_context_word.append(char_vocab[word[c_index]])
else:
_context_word.append(char_vocab['<pad>'])
context_char.append(_context_word)
context_char = torch.tensor([context_char])
question_char = []
for word in question_tokenized:
_question_word = []
for c_index in range(longest_question_word):
if c_index < len(word):
_question_word.append(char_vocab[word[c_index]])
else:
_question_word.append(char_vocab['<pad>'])
question_char.append(_question_word)
question_char = torch.tensor([question_char])
predict_data.context_words = context_words
predict_data.question_words = question_words
predict_data.context_char = context_char
predict_data.question_char = question_char
predict_data.batch_size = 1
def get_prediction(context, question, model, word_vocab, char_vocab):
model.eval()
context_tokenized = ptbtokenizer(context, context=True)
question_tokenized = ptbtokenizer(question)
predict_data(context_tokenized, question_tokenized, word_vocab, char_vocab)
p1, p2 = model(predict_data)
answer = " ".join(context_tokenized[p1.argmax(1): p2.argmax(1)])
return answer, p1.argmax(1), p2.argmax(1)
def train(model, optimizer, criterion, path, epochs, epochs_log, model_name):
start_time = time.time()
for epoch in range(epochs):
# SETTING MODEL IN TRAINING MODE
model.train()
epoch_loss = 0
batch_num = 0.0
exmaples_count = 0.0
best_dev_acc_exact = -1.0
best_dev_acc_f1 = -1.0
for train_data in iter(data_preprocessor.train_iter):
batch_num += 1.0
exmaples_count += train_data.batch_size
p1, p2 = model(train_data)
optimizer.zero_grad()
try:
batch_loss = criterion(p1, train_data.start_idx) + criterion(p2, train_data.end_idx)
except Exception as e:
print(e)
return (p1, p2, train_data)
epoch_loss += batch_loss.item()
batch_loss.backward()
optimizer.step()
time_delta = datetime.timedelta(seconds=np.round(time.time() - start_time, 0))
sys.stdout.write(f'\rEpoch:{epoch} | Batch:{batch_num} | Time Running: {time_delta}')
break
if epoch % epochs_log == 0:
train_loss = epoch_loss/(exmaples_count)
dev_accuracy, dev_loss = eval(data_preprocessor.dev_iter,
model,
criterion,
data_preprocessor.WORDS.vocab,
calculate_loss=True,
calculate_accuracy=True)
dev_accuracy_exact = dev_accuracy.groupby('id')['Exact'].max().mean()
dev_accuracy_f1 = dev_accuracy.groupby('id')['F1'].max().mean()
train_accuracy, _ = eval(data_preprocessor.train_iter,
model,
criterion,
data_preprocessor.WORDS.vocab,
calculate_loss=False,
calculate_accuracy=True)
train_accuracy_exact = train_accuracy.groupby('id')['Exact'].max().mean()
train_accuracy_f1 = train_accuracy.groupby('id')['F1'].max().mean()
print(
f'\nTrain Loss:{train_loss:.4f} Train Acc Exact:{train_accuracy_exact:.4f} Train Acc F1:{train_accuracy_f1:.4f}')
print(
f'Validation Loss :{dev_loss:.4f} Dev Acc Exact:{dev_accuracy_exact:.4f} Dev Acc F1:{dev_accuracy_f1:.4f}')
print('Test Prediction Results')
predict_context = "He was speaking after figures showed that the country's economy shrank by 20.4% in April - " \
"the largest monthly contraction on record - as the country spent its first full month in lockdown."
predict_ques = "By how much did the country's economy shrank"
print(get_prediction(predict_context,
predict_ques,
model,
data_preprocessor.WORDS.vocab,
data_preprocessor.CHAR.vocab))
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': epoch_loss,
}, path + '/' + model_name + '.torch')
if dev_accuracy_f1 > best_dev_acc_f1:
best_dev_acc_f1 = dev_accuracy_f1
best_dev_acc_exact = dev_accuracy_exact
torch.save(model, path + '/' + 'best_' + model_name + '.torch')
print (f'Best Validation Results '
f'Dev Acc Exact:{best_dev_acc_exact:.4f} '
f'Dev Acc F1:{best_dev_acc_f1:.4f}')
parser = argparse.ArgumentParser(description='BiDaF for Machine Comprehension & Cloze-Style Reading Comprehension - Training')
# Input data
parser.add_argument('-data', type=str, default='../../../../data/squad v1.1/', help='Path to input data')
# Checkpoints paths
parser.add_argument('-data_checkpoint', type=str, default='./data_checkpoints', help='Path to store preprocessed data checkpoints')
parser.add_argument('-model_checkpoint', type=str, default='./model_checkpoints', help='Path to store modelled data checkpoints')
parser.add_argument('-model_name', type=str, default=None, required=True, help='provide a name to the model for storing at chekpoints')
parser.add_argument('-dataset_name', type=str, default=None, required=True, help='Name of the Dataset')
parser.add_argument('-load_data', type=str2bool, nargs='?', default=False, help='To Load data of use preprocessed data')
# Modelling parameters
parser.add_argument('-epochs', type=int, default=20, help='No. of Epoch to run')
parser.add_argument('-batch_size', type=int, default=60, help='Number of examples in each batch')
parser.add_argument('-glove_size', type=int, default=100, help='Size of Glove vector to use')
parser.add_argument('-char_embedding_size', type=int, default=100, help='Size of Character embeddings to be used')
parser.add_argument('-kernel_size', type=int, default=5, help='Kernel Size')
parser.add_argument('-channels_count', type=int, default=100 ,help='Count of channels for character embeddings')
parser.add_argument('-learning_rate', type=float, default=0.5, help='Learning Rate')
parser.add_argument('-epoch_log', type=int, default=2, help='Print logs after xx epochs')
args = parser.parse_args()
if args.load_data:
_ = LoadData(data_path=args.data,
checkpoint_path=args.data_checkpoint,
train_file=args.dataset_name + '_' + 'train.csv',
dev_file=args.dataset_name + '_' + 'dev.csv')
# Create Checkpoint folders for in between sessions storage
create_checkpoint_paths(args.data_checkpoint)
create_checkpoint_paths(args.model_checkpoint)
data_preprocessor = PreprocessData(data_path=args.data_checkpoint,
glove_size=args.glove_size,
batch_size=args.batch_size,
train_file='train1.csv',
dev_file='dev1.csv')
# train_file=args.dataset_name + '_' + 'train.csv',
# dev_file=args.dataset_name + '_' + 'dev.csv')
save_vocab(data_preprocessor.WORDS.vocab, args.model_checkpoint + '/' + args.dataset_name + '_' + 'WORDS.vocab')
save_vocab(data_preprocessor.CHAR.vocab, args.model_checkpoint + '/' + args.dataset_name + '_' + 'CHAR.vocab')
# Initializing Bidaf Model
model = BidaF(data_preprocessor.WORDS,
data_preprocessor.CHAR,
char_embedding_size=args.char_embedding_size,
char_conv_kernel_size=args.kernel_size,
char_conv_channels_count=args.channels_count).to(device)
# Intialize Optimizer
# optimizer = optim.Adam(model.parameters(), lr=0.001)
optimizer = optim.Adadelta(model.parameters(), lr=args.learning_rate, rho=0.999)
criterion = nn.CrossEntropyLoss()
## This is a piece of code for retraining the model from where left
# if os.path.isfile(args.model_checkpoint + '/model.torch'):
# checkpoint = torch.load(args.model_checkpoint + '/model.torch')
# model.load_state_dict(checkpoint['model_state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# epoch = checkpoint['epoch']
# loss = checkpoint['loss']
# print('Loaded Trained Model')
# print(f'Trained for {epoch} Epochs, Achieved {loss} Training Loss')
_error = train(model=model,
optimizer=optimizer,
criterion=criterion,
path=args.model_checkpoint,
epochs=args.epochs,
epochs_log=args.epoch_log,
model_name=args.dataset_name + '_' + args.model_name)
|
[
"evaluate.evaluate"
] |
[((6793, 6920), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""BiDaF for Machine Comprehension & Cloze-Style Reading Comprehension - Training"""'}), "(description=\n 'BiDaF for Machine Comprehension & Cloze-Style Reading Comprehension - Training'\n )\n", (6816, 6920), False, 'import argparse\n'), ((8900, 9052), 'preprocess.PreprocessData', 'PreprocessData', ([], {'data_path': 'args.data_checkpoint', 'glove_size': 'args.glove_size', 'batch_size': 'args.batch_size', 'train_file': '"""train1.csv"""', 'dev_file': '"""dev1.csv"""'}), "(data_path=args.data_checkpoint, glove_size=args.glove_size,\n batch_size=args.batch_size, train_file='train1.csv', dev_file='dev1.csv')\n", (8914, 9052), False, 'from preprocess import PreprocessData\n'), ((10045, 10066), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (10064, 10066), True, 'import torch.nn as nn\n'), ((868, 894), 'pickle.dump', 'pickle.dump', (['vocab', 'output'], {}), '(vocab, output)\n', (879, 894), False, 'import pickle\n'), ((1828, 1856), 'torch.tensor', 'torch.tensor', (['[context_char]'], {}), '([context_char])\n', (1840, 1856), False, 'import torch\n'), ((2241, 2270), 'torch.tensor', 'torch.tensor', (['[question_char]'], {}), '([question_char])\n', (2253, 2270), False, 'import torch\n'), ((2605, 2640), 'load.ptbtokenizer', 'ptbtokenizer', (['context'], {'context': '(True)'}), '(context, context=True)\n', (2617, 2640), False, 'from load import ptbtokenizer\n'), ((2666, 2688), 'load.ptbtokenizer', 'ptbtokenizer', (['question'], {}), '(question)\n', (2678, 2688), False, 'from load import ptbtokenizer\n'), ((3017, 3028), 'time.time', 'time.time', ([], {}), '()\n', (3026, 3028), False, 'import time\n'), ((8510, 8682), 'load.LoadData', 'LoadData', ([], {'data_path': 'args.data', 'checkpoint_path': 'args.data_checkpoint', 'train_file': "(args.dataset_name + '_' + 'train.csv')", 'dev_file': "(args.dataset_name + '_' + 'dev.csv')"}), "(data_path=args.data, checkpoint_path=args.data_checkpoint,\n train_file=args.dataset_name + '_' + 'train.csv', dev_file=args.\n dataset_name + '_' + 'dev.csv')\n", (8518, 8682), False, 'from load import LoadData\n'), ((365, 390), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (388, 390), False, 'import torch\n'), ((745, 771), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (758, 771), False, 'import os\n'), ((781, 802), 'os.mkdir', 'os.mkdir', (['folder_path'], {}), '(folder_path)\n', (789, 802), False, 'import os\n'), ((9620, 9815), 'model.BidaF', 'BidaF', (['data_preprocessor.WORDS', 'data_preprocessor.CHAR'], {'char_embedding_size': 'args.char_embedding_size', 'char_conv_kernel_size': 'args.kernel_size', 'char_conv_channels_count': 'args.channels_count'}), '(data_preprocessor.WORDS, data_preprocessor.CHAR, char_embedding_size=\n args.char_embedding_size, char_conv_kernel_size=args.kernel_size,\n char_conv_channels_count=args.channels_count)\n', (9625, 9815), False, 'from model import BidaF\n'), ((636, 689), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (662, 689), False, 'import argparse\n'), ((3918, 4008), 'sys.stdout.write', 'sys.stdout.write', (["f'\\rEpoch:{epoch} | Batch:{batch_num} | Time Running: {time_delta}'"], {}), "(\n f'\\rEpoch:{epoch} | Batch:{batch_num} | Time Running: {time_delta}')\n", (3934, 4008), False, 'import sys\n'), ((4150, 4282), 'evaluate.evaluate', 'eval', (['data_preprocessor.dev_iter', 'model', 'criterion', 'data_preprocessor.WORDS.vocab'], {'calculate_loss': '(True)', 'calculate_accuracy': '(True)'}), '(data_preprocessor.dev_iter, model, criterion, data_preprocessor.WORDS.\n vocab, calculate_loss=True, calculate_accuracy=True)\n', (4154, 4282), True, 'from evaluate import evaluate as eval\n'), ((4699, 4834), 'evaluate.evaluate', 'eval', (['data_preprocessor.train_iter', 'model', 'criterion', 'data_preprocessor.WORDS.vocab'], {'calculate_loss': '(False)', 'calculate_accuracy': '(True)'}), '(data_preprocessor.train_iter, model, criterion, data_preprocessor.\n WORDS.vocab, calculate_loss=False, calculate_accuracy=True)\n', (4703, 4834), True, 'from evaluate import evaluate as eval\n'), ((6576, 6639), 'torch.save', 'torch.save', (['model', "(path + '/' + 'best_' + model_name + '.torch')"], {}), "(model, path + '/' + 'best_' + model_name + '.torch')\n", (6586, 6639), False, 'import torch\n'), ((3876, 3887), 'time.time', 'time.time', ([], {}), '()\n', (3885, 3887), False, 'import time\n')]
|
"""
Main code for weakly supervised object localization
===================================================
*Author*: <NAME>, Northwestern Polytechnical University
"""
import torch
import os
import numpy as np
import time
import datetime
from model.model import WSL, load_pretrained
import data_utils.load_voc as load_voc
import argparse
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from evaluate.rst_for_corloc import spn_torch_rst
from evaluate.corloc_eval import corloc
parser = argparse.ArgumentParser(description='test torch version of spn')
parser.add_argument('--batch_size', default=64, type=int, metavar='BT',
help='batch size')
data_dir = '/home/zhangyu/data/VOC2007_test/'
root_dir = '/disk3/zhangyu/WeaklyLoc/spn_torch/'
attention_maps_h5 = os.path.join(root_dir, 'rst/h5/attention_maps.h5')
imgDir = os.path.join(data_dir, 'JPEGImages')
train_annos = os.path.join(data_dir, 'train_annos')
trainval_annos = os.path.join(data_dir, 'Annotations')
att_map_dir = os.path.join(root_dir, 'rst/h5big/')
cls_number = 20
save_file = os.path.join(att_map_dir, 'predict{}.csv'.format(
datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')))
def main():
global args
args = parser.parse_args()
test_loader = prepare_data(trainval_annos)
test(test_loader)
corloc_rst = corloc(save_file, trainval_annos)
print('Corloc results: {}'.format(corloc_rst))
def test(test_loader):
for i, data in enumerate(test_loader):
print('Testing: [{0}/{1}] '.format(i, len(test_loader)))
batch_names = data['filename']
img_szs = data['sz'].numpy()
target = data['class'].float().numpy()
# generate results
spn_torch_rst(batch_names, target, img_szs, att_map_dir, save_file)
def prepare_data(annos_path):
# prepare dataloader for training and validation
train_dataset = load_voc.VOCDataset(
xmlsPath=annos_path, imgDir=imgDir,
transform=transforms.Compose([
load_voc.Rescale((224, 224)),
load_voc.ToTensor(),
load_voc.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]))
train_loader = DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=None,
num_workers=1, drop_last=True)
return train_loader
if __name__ == '__main__':
main()
|
[
"evaluate.corloc_eval.corloc",
"evaluate.rst_for_corloc.spn_torch_rst"
] |
[((524, 588), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""test torch version of spn"""'}), "(description='test torch version of spn')\n", (547, 588), False, 'import argparse\n'), ((816, 866), 'os.path.join', 'os.path.join', (['root_dir', '"""rst/h5/attention_maps.h5"""'], {}), "(root_dir, 'rst/h5/attention_maps.h5')\n", (828, 866), False, 'import os\n'), ((876, 912), 'os.path.join', 'os.path.join', (['data_dir', '"""JPEGImages"""'], {}), "(data_dir, 'JPEGImages')\n", (888, 912), False, 'import os\n'), ((927, 964), 'os.path.join', 'os.path.join', (['data_dir', '"""train_annos"""'], {}), "(data_dir, 'train_annos')\n", (939, 964), False, 'import os\n'), ((982, 1019), 'os.path.join', 'os.path.join', (['data_dir', '"""Annotations"""'], {}), "(data_dir, 'Annotations')\n", (994, 1019), False, 'import os\n'), ((1034, 1070), 'os.path.join', 'os.path.join', (['root_dir', '"""rst/h5big/"""'], {}), "(root_dir, 'rst/h5big/')\n", (1046, 1070), False, 'import os\n'), ((1358, 1391), 'evaluate.corloc_eval.corloc', 'corloc', (['save_file', 'trainval_annos'], {}), '(save_file, trainval_annos)\n', (1364, 1391), False, 'from evaluate.corloc_eval import corloc\n'), ((2234, 2336), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': 'None', 'num_workers': '(1)', 'drop_last': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=None,\n num_workers=1, drop_last=True)\n', (2244, 2336), False, 'from torch.utils.data import DataLoader\n'), ((1734, 1801), 'evaluate.rst_for_corloc.spn_torch_rst', 'spn_torch_rst', (['batch_names', 'target', 'img_szs', 'att_map_dir', 'save_file'], {}), '(batch_names, target, img_szs, att_map_dir, save_file)\n', (1747, 1801), False, 'from evaluate.rst_for_corloc import spn_torch_rst\n'), ((1154, 1177), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1175, 1177), False, 'import datetime\n'), ((2023, 2051), 'data_utils.load_voc.Rescale', 'load_voc.Rescale', (['(224, 224)'], {}), '((224, 224))\n', (2039, 2051), True, 'import data_utils.load_voc as load_voc\n'), ((2065, 2084), 'data_utils.load_voc.ToTensor', 'load_voc.ToTensor', ([], {}), '()\n', (2082, 2084), True, 'import data_utils.load_voc as load_voc\n'), ((2098, 2171), 'data_utils.load_voc.Normalize', 'load_voc.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2116, 2171), True, 'import data_utils.load_voc as load_voc\n')]
|
import os
import logging
import time
import random
import json
from copy import deepcopy
import numpy as np
import torch
from tqdm import tqdm
from transformers import BertTokenizer
from dora import DORA
from config import Config
from reader import Reader
import ontology
from db import DB
from evaluate import MultiWozEvaluator
def test(config):
logger = logging.getLogger("DORA")
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)
torch.cuda.set_device(config.cuda_device)
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
db = DB(config.db_path)
reader = Reader(db, config)
start = time.time()
logger.info("Loading data...")
reader.load_data("test")
end = time.time()
logger.info("Loaded. {} secs".format(end-start))
evaluator = MultiWozEvaluator("test", config.data_path, config.db_path, config.assets_path)
model = DORA(db, config).cuda()
model.eval()
# load saved model, optimizer
assert config.save_path is not None
load(model, config.save_path, config.cuda_device)
max_iter = len(list(reader.make_batch(reader.test)))
slot_acc = 0
joint_acc = 0
batch_count = 0
gate_acc = 0
domain_acc = 0
with open(os.path.join(config.assets_path, "never_split.txt"), "r") as f:
never_split = f.read().split("\n")
tokenizer = BertTokenizer(os.path.join(config.assets_path, "vocab.txt"), never_split=never_split)
test_dial_gens = {}
test_dial_gens_decoded = {}
with torch.no_grad():
iterator = reader.make_batch(reader.test)
t = tqdm(enumerate(iterator), total=max_iter, ncols=150, position=0, leave=True)
for batch_idx, batch in t:
# don't shuffle slot order nor use true previous domain state and belief
inputs, contexts, segments, dial_ids = reader.make_input(batch, mode="test")
batch_size = len(contexts[0])
turns = len(inputs)
inputs[0]["prev_belief"] = inputs[0]["prev_belief"].tolist()
dial_gens = [[] for i in range(batch_size)]
dial_gens_decoded = [[] for i in range(batch_size)]
belief_gens = [[] for i in range(batch_size)]
action_gens = [[] for i in range(batch_size)]
for turn_idx in range(turns):
turn_context = torch.zeros(batch_size, config.max_context_len, dtype=torch.int64).cuda()
turn_segment = torch.zeros(batch_size, config.max_context_len, dtype=torch.int64).cuda()
max_context_len = 0
for idx in range(len(contexts[turn_idx])):
turn_context_ = contexts[turn_idx][idx].tolist()
turn_segment_ = segments[turn_idx][idx].tolist()
try:
turn_context_ = turn_context_[:turn_context_.index(config.pad_idx)]
except:
turn_context_ = turn_context_
turn_segment_ = turn_segment_[:len(turn_context_)]
# add previous domain state to context
domain_state = []
prev_domain_state = inputs[turn_idx]["prev_domain_state"]
for domain_idx, domain in enumerate(ontology.all_domains):
domain_state.append("[DOMAIN]")
domain_state.append("[{}]".format(domain))
if prev_domain_state[idx, domain_idx] == 1:
domain_state.append("[ON]")
else:
domain_state.append("[OFF]")
domain_state = " ".join(domain_state)
turn_context_ += tokenizer.encode(domain_state, add_special_tokens=False)
# add previous belief state to context
for slot_idx in range(len(ontology.all_info_slots)):
slot = ontology.all_info_slots[slot_idx]
domain, slot = slot.split("-")
slot = "[{}] - {}".format(domain, slot)
value = inputs[turn_idx]["prev_belief"][idx][slot_idx]
if config.slot_idx in value:
value = tokenizer.encode("none")[1:]
turn_context_ += tokenizer.convert_tokens_to_ids(["[SLOT]"])
turn_context_ += tokenizer.encode(slot, add_special_tokens=False)
turn_context_ += tokenizer.convert_tokens_to_ids(["-"])
turn_context_ += value[:-1] # except [EOS]
turn_context_.append(tokenizer.sep_token_id) # [SEP]
context_len = len(turn_context_)
max_context_len = max(max_context_len, context_len)
turn_context[idx, :context_len] = torch.tensor(turn_context_[:1] + turn_context_[-(min(context_len, config.max_context_len)-1):])
turn_segment[idx, :len(turn_segment_)] = torch.tensor(turn_segment_)
turn_context = turn_context[:, :max_context_len]
turn_segment = turn_segment[:, :max_context_len]
domain_acc_, gate_acc_, belief_acc, domain_state, belief_gen, action_gen, response_gen = \
model.forward(inputs[turn_idx], turn_context, turn_segment, "val", config.postprocessing)
if turn_idx < turns-1:
inputs[turn_idx+1]["prev_belief"] = deepcopy(belief_gen) # generated belief, not ground truth
inputs[turn_idx+1]["prev_domain_state"] = domain_state
domain_acc += domain_acc_ * batch_size
gate_acc += gate_acc_ * batch_size
slot_acc += belief_acc.sum(dim=1).sum(dim=0)
joint_acc += (belief_acc.mean(dim=1) == 1).sum(dim=0).float()
batch_count += batch_size
torch.cuda.empty_cache()
# for evaluation
response_gens = [response[:-1] for response in response_gen]
response_gens_decoded = [tokenizer.decode(response[:-1]) for response in response_gen]
action_gen_decoded = [tokenizer.decode(action[:-1]) for action in action_gen]
for b_idx, belief in enumerate(belief_gen):
belief_gen[b_idx] = [tokenizer.decode(value[:-1]) for value in belief]
for b_idx in range(batch_size):
dial_gens[b_idx].append(response_gens[b_idx])
dial_gens_decoded[b_idx].append(response_gens_decoded[b_idx])
belief = {}
for slot_idx, slot in enumerate(ontology.all_info_slots):
belief[slot] = belief_gen[b_idx][slot_idx]
belief_gens[b_idx].append(belief)
action_gens[b_idx].append(action_gen_decoded[b_idx])
t.set_description("iter: {}".format(batch_idx+1))
time.sleep(1)
for b_idx in range(batch_size):
dial_id = dial_ids[b_idx]
dial_id = "{}.json".format(dial_id)
test_dial_gens[dial_id] = dial_gens[b_idx]
test_dial_gens_decoded[dial_id] = {}
test_dial_gens_decoded[dial_id]["response"] = dial_gens_decoded[b_idx]
test_dial_gens_decoded[dial_id]["belief_state"] = belief_gens[b_idx]
test_dial_gens_decoded[dial_id]["action"] = action_gens[b_idx]
gate_acc = gate_acc.item() / batch_count * 100
domain_acc = domain_acc.item() / batch_count * 100
slot_acc = slot_acc.item() / batch_count / len(ontology.all_info_slots) * 100
joint_acc = joint_acc.item() / batch_count * 100
test_dial = json.load(open(os.path.join(config.data_path, "test_data.json"), "r"))
_, inform_rate, success_rate, bleu_score = evaluator.evaluateModel(test_dial_gens_decoded, test_dial_gens, test_dial, mode='test', \
save_path=config.save_path.split("/")[1].split(".")[0], make_report=config.make_report)
logger.info("accuracy(domain/gate/joint/slot): {:.2f}, {:.2f}, {:.2f}, {:.2f}, inform: {:.2f}, success: {:.2f}, bleu: {:.2f}"\
.format(domain_acc, gate_acc, joint_acc, slot_acc, inform_rate, success_rate, bleu_score))
def load(model, save_path, cuda_device):
checkpoint = torch.load(save_path, map_location = lambda storage, loc: storage.cuda(cuda_device))
model.load_state_dict(checkpoint["model"])
if __name__ == "__main__":
config = Config()
parser = config.parser
config = parser.parse_args()
test(config)
|
[
"evaluate.MultiWozEvaluator"
] |
[((364, 389), 'logging.getLogger', 'logging.getLogger', (['"""DORA"""'], {}), "('DORA')\n", (381, 389), False, 'import logging\n'), ((445, 468), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (466, 468), False, 'import logging\n'), ((516, 557), 'torch.cuda.set_device', 'torch.cuda.set_device', (['config.cuda_device'], {}), '(config.cuda_device)\n', (537, 557), False, 'import torch\n'), ((563, 587), 'random.seed', 'random.seed', (['config.seed'], {}), '(config.seed)\n', (574, 587), False, 'import random\n'), ((592, 619), 'numpy.random.seed', 'np.random.seed', (['config.seed'], {}), '(config.seed)\n', (606, 619), True, 'import numpy as np\n'), ((624, 654), 'torch.manual_seed', 'torch.manual_seed', (['config.seed'], {}), '(config.seed)\n', (641, 654), False, 'import torch\n'), ((659, 694), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['config.seed'], {}), '(config.seed)\n', (681, 694), False, 'import torch\n'), ((699, 738), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['config.seed'], {}), '(config.seed)\n', (725, 738), False, 'import torch\n'), ((838, 856), 'db.DB', 'DB', (['config.db_path'], {}), '(config.db_path)\n', (840, 856), False, 'from db import DB\n'), ((871, 889), 'reader.Reader', 'Reader', (['db', 'config'], {}), '(db, config)\n', (877, 889), False, 'from reader import Reader\n'), ((902, 913), 'time.time', 'time.time', ([], {}), '()\n', (911, 913), False, 'import time\n'), ((988, 999), 'time.time', 'time.time', ([], {}), '()\n', (997, 999), False, 'import time\n'), ((1070, 1149), 'evaluate.MultiWozEvaluator', 'MultiWozEvaluator', (['"""test"""', 'config.data_path', 'config.db_path', 'config.assets_path'], {}), "('test', config.data_path, config.db_path, config.assets_path)\n", (1087, 1149), False, 'from evaluate import MultiWozEvaluator\n'), ((8784, 8792), 'config.Config', 'Config', ([], {}), '()\n', (8790, 8792), False, 'from config import Config\n'), ((1635, 1680), 'os.path.join', 'os.path.join', (['config.assets_path', '"""vocab.txt"""'], {}), "(config.assets_path, 'vocab.txt')\n", (1647, 1680), False, 'import os\n'), ((1774, 1789), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1787, 1789), False, 'import torch\n'), ((1163, 1179), 'dora.DORA', 'DORA', (['db', 'config'], {}), '(db, config)\n', (1167, 1179), False, 'from dora import DORA\n'), ((1498, 1549), 'os.path.join', 'os.path.join', (['config.assets_path', '"""never_split.txt"""'], {}), "(config.assets_path, 'never_split.txt')\n", (1510, 1549), False, 'import os\n'), ((7241, 7254), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7251, 7254), False, 'import time\n'), ((8031, 8079), 'os.path.join', 'os.path.join', (['config.data_path', '"""test_data.json"""'], {}), "(config.data_path, 'test_data.json')\n", (8043, 8079), False, 'import os\n'), ((6182, 6206), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6204, 6206), False, 'import torch\n'), ((5271, 5298), 'torch.tensor', 'torch.tensor', (['turn_segment_'], {}), '(turn_segment_)\n', (5283, 5298), False, 'import torch\n'), ((5743, 5763), 'copy.deepcopy', 'deepcopy', (['belief_gen'], {}), '(belief_gen)\n', (5751, 5763), False, 'from copy import deepcopy\n'), ((2601, 2667), 'torch.zeros', 'torch.zeros', (['batch_size', 'config.max_context_len'], {'dtype': 'torch.int64'}), '(batch_size, config.max_context_len, dtype=torch.int64)\n', (2612, 2667), False, 'import torch\n'), ((2706, 2772), 'torch.zeros', 'torch.zeros', (['batch_size', 'config.max_context_len'], {'dtype': 'torch.int64'}), '(batch_size, config.max_context_len, dtype=torch.int64)\n', (2717, 2772), False, 'import torch\n')]
|
#!/usr/bin/python
import sys,os
import argparse
import glob
from generate_windows import Generate_windows,Get_block_position,split_chr_bedGraph2
from evaluate import Evaluate
from scanTranscriptome_forward import Scan_Forward
from scanTranscriptome_reverse import Scan_Backward
from postprocess import Postprocess
#
from multiprocessing import Pool
import datetime
import logging as log
import gc
#
def get_ref(fa_file):
ref = dict()
fa = open(fa_file,'r')
skip = False
chro = ''
seq = ''
for line in fa.readlines():
line = line.rstrip('\n')
if('>' in line):
chro = line.split(' ')[0]
chro = chro[1:]
if('chr' not in chro):
chro = 'chr'+chro
if(len(chro)>6 or 'M' in chro):
skip = True
continue
else:
skip = False
else:
if(skip):
continue
if chro in ref.keys():
ref[chro] += line
else:
ref[chro] = line
fa.close()
return ref
def get_genome_sequence(fa_file):
f = open(fa_file,"r")
line = f.readline()
line = line.rstrip('\n')
f.close()
return line
def args():
parser = argparse.ArgumentParser()
parser.add_argument('--out_dir', default='out_dir', help='out dir')
parser.add_argument('--input_file', default=None, help='unstranded wig file')
parser.add_argument('--input_plus', default=None, help='plus strand wig file')
parser.add_argument('--input_minus', default=None, help='minus strand wig file')
parser.add_argument('--fa_file',default=None,help='path to one line fa file')
parser.add_argument('--keep_temp',default=None,help='if you want to keep temporary file, set to "yes"')
parser.add_argument('--window', default=201, type=int, help='input length')
parser.add_argument('--name', default='sample',help='sample name')
parser.add_argument("--model", help="the model weights file", required=True)
parser.add_argument("--RNASeqRCThreshold",default=0.05,type=float,help="RNA-Seq Coverage Threshold")
parser.add_argument('--threshold', default=0,type=int,help='peak length lower than threshold will be fiter out')
parser.add_argument('--penality', default=1,type=int,help='penality for prediction score lower than 0.5')
parser.add_argument('--DB_file', default=None, help='polyA database file')
parser.add_argument('--depth', default=1, type=float,help='total number of mapped reads( in millions)')
parser.add_argument('--t', default = 30, type = int, help='number of thread')
argv = parser.parse_args()
out_dir = argv.out_dir
input_file = argv.input_file
input_plus = argv.input_plus
input_minus = argv.input_minus
fa_file = argv.fa_file
keep_temp = argv.keep_temp
window = argv.window
name = argv.name
model = argv.model
rst = argv.RNASeqRCThreshold
threshold = argv.threshold
penality = argv.penality
DB_file = argv.DB_file
depth = argv.depth
thread = argv.t
return out_dir,input_file,input_plus,input_minus,fa_file,keep_temp,window,name,model,rst,threshold,penality,DB_file,depth,thread
def run_single_block(input_list):
global ref
#print(ref.keys())
baseName,model,out_dir,rst,window,keep_temp,threshold,penality,DB_file,input_file,chromosome,strand,depth,start,end = input_list
#log_dir = out_dir+'/log'
#if not os.path.exists(log_dir):
# os.makedirs(log_dir)
#log.basicConfig(filename='%s/%s.log'%(log_dir,baseName), level=log.INFO)
print("Generating blocks ...%s %d %s"%(baseName,start,end))
####Generate sliding windlows
gw_start_time = datetime.datetime.now()
block = split_chr_bedGraph2(out_dir,input_file,chromosome,strand,window,ref[chromosome],depth,start,end)
ww = open(baseName,'w')
for a,b,c in block:
ww.write('%s\t%s\t%s\n'%(a,b,c))
ww.close()
gw_end_time = datetime.datetime.now()
print("Generate blocks used time: {}\n".format(gw_end_time - gw_start_time))
print("Evaluating blocks ...%s %d %s"%(baseName,start,end))
ev_start_time = datetime.datetime.now()
Evaluate(baseName,block,model,out_dir,rst,window,keep_temp)
del block #destroyed the block reference
gc.collect() #manually run garbage collection process
ev_end_time = datetime.datetime.now()
print("Evaluated blocks used time: {}\n".format(ev_end_time - ev_start_time))
print("Postprocessing blocks ...%s %d %s"%(baseName,start,end))
ps_start_time = datetime.datetime.now()
Scan_Forward(baseName,threshold,penality,out_dir)
Scan_Backward(baseName,threshold,penality,out_dir)
if(keep_temp != 'yes'):
predict_file = out_dir+'/predict/'+baseName+'.txt'
os.system('rm %s'%predict_file)
Postprocess(DB_file,baseName,threshold,penality,out_dir)
ps_end_time = datetime.datetime.now()
print("Postprocessed blocks used time: {}\n".format(ps_end_time - ps_start_time))
if(keep_temp != 'yes'):
forward_file=out_dir+"/maxSum/%s.forward.%d.%d.txt"%(baseName,threshold,penality)
backward_file=out_dir+"/maxSum/%s.backward.%d.%d.txt"%(baseName,threshold,penality)
os.system('rm %s %s'%(forward_file,backward_file))
#print('Finished postprocessing...%s\n'%baseName)
return [gw_end_time-gw_start_time,ev_end_time-ev_start_time,ps_end_time-ps_start_time]
#def main(out_dir,input_file,input_plus,input_minus,fa_file,keep_temp,window,name,model,rst,threshold,penality,DB_file,depth,thread):
if __name__ == '__main__':
out_dir,input_file,input_plus,input_minus,fa_file,keep_temp,window,name,model,rst,threshold,penality,DB_file,depth,thread = args()
if(out_dir[-1] == '/'):
out_dir = out_dir[0:-1]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_dir = out_dir+'/'+name
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if input_file is not None:
input_plus = input_file
input_minus = input_file
files = (input_plus,input_minus)
strands = ('+','-')
print("Load reference")
ref = dict()
#ref = get_ref(fa_file)#
for root, ds, fs in os.walk(fa_file):
#print(root,ds,fs)
for f in fs:
fil = root+'/'+f
chro_seq = get_genome_sequence(fil)
chro = f.split('.')[1]
ref[chro] = chro_seq
print("Finished Load reference")
log = open('%s/%s.log'%(out_dir,name),'w')
for i in range(2):
input_file = files[i]
strand = strands[i]
print("Processing %s strand"%strand)
blocks = Get_block_position(out_dir,input_file,strand,window,1e6)
block_input_list = []
for chromosome,strand,block_num,start,end in blocks:
baseName = '%s.%s_%s_%s'%(name,chromosome,strand,block_num)
print('%s\t%d\t%d'%(baseName,start,end))
block_input_list.append([baseName,model,out_dir,rst,window,keep_temp,threshold,penality,DB_file,input_file,chromosome,strand,depth,start,end])
print("Predicting results ...")
pred_start_time = datetime.datetime.now()
#block_out_indic = []
with Pool(thread) as p:
#p.map(run_single_block,block_input_list)
time_lists = p.map(run_single_block,block_input_list)
#p.close()
p.terminate()
p.join()
for i,input_list in enumerate(block_input_list):
baseName = input_list[0]
gw_time,ev_time,ps_time = time_lists[i]
log.write('%s\t%s\t%s\t%s\n'%(baseName,str(gw_time),str(ev_time),str(ps_time)))
# print('%s\t%s\t%s\t%s\n'%(baseName,str(gw_time),str(ev_time),str(ps_time)))
pred_end_time = datetime.datetime.now()
print("Prediction used time: {}".format(pred_end_time - pred_start_time))
log.close()
out_file = '%s/%s.predicted.txt' %(out_dir,name)
ww = open(out_file,'w')
if(DB_file is not None):
ww.write('predicted_pasid\tdb_pasid\tdb_diff\tscore\n')
else:
ww.write('predicted_pasid\tscore\n')
ww.close()
os.system('cat %s/maxSum/*bidirection* >>%s'%(out_dir,out_file))
if(keep_temp != 'yes'):
os.system('rm -rf %s/predict %s/maxSum'%(out_dir,out_dir))
print("Job Done!")
#if __name__ == '__main__':
# main(*args())
|
[
"evaluate.Evaluate"
] |
[((1262, 1287), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1285, 1287), False, 'import argparse\n'), ((3742, 3765), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3763, 3765), False, 'import datetime\n'), ((3778, 3887), 'generate_windows.split_chr_bedGraph2', 'split_chr_bedGraph2', (['out_dir', 'input_file', 'chromosome', 'strand', 'window', 'ref[chromosome]', 'depth', 'start', 'end'], {}), '(out_dir, input_file, chromosome, strand, window, ref[\n chromosome], depth, start, end)\n', (3797, 3887), False, 'from generate_windows import Generate_windows, Get_block_position, split_chr_bedGraph2\n'), ((4001, 4024), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4022, 4024), False, 'import datetime\n'), ((4191, 4214), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4212, 4214), False, 'import datetime\n'), ((4219, 4284), 'evaluate.Evaluate', 'Evaluate', (['baseName', 'block', 'model', 'out_dir', 'rst', 'window', 'keep_temp'], {}), '(baseName, block, model, out_dir, rst, window, keep_temp)\n', (4227, 4284), False, 'from evaluate import Evaluate\n'), ((4333, 4345), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4343, 4345), False, 'import gc\n'), ((4407, 4430), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4428, 4430), False, 'import datetime\n'), ((4602, 4625), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4623, 4625), False, 'import datetime\n'), ((4630, 4682), 'scanTranscriptome_forward.Scan_Forward', 'Scan_Forward', (['baseName', 'threshold', 'penality', 'out_dir'], {}), '(baseName, threshold, penality, out_dir)\n', (4642, 4682), False, 'from scanTranscriptome_forward import Scan_Forward\n'), ((4684, 4737), 'scanTranscriptome_reverse.Scan_Backward', 'Scan_Backward', (['baseName', 'threshold', 'penality', 'out_dir'], {}), '(baseName, threshold, penality, out_dir)\n', (4697, 4737), False, 'from scanTranscriptome_reverse import Scan_Backward\n'), ((4866, 4926), 'postprocess.Postprocess', 'Postprocess', (['DB_file', 'baseName', 'threshold', 'penality', 'out_dir'], {}), '(DB_file, baseName, threshold, penality, out_dir)\n', (4877, 4926), False, 'from postprocess import Postprocess\n'), ((4941, 4964), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4962, 4964), False, 'import datetime\n'), ((6276, 6292), 'os.walk', 'os.walk', (['fa_file'], {}), '(fa_file)\n', (6283, 6292), False, 'import sys, os\n'), ((7987, 7998), 'logging.close', 'log.close', ([], {}), '()\n', (7996, 7998), True, 'import logging as log\n'), ((8248, 8315), 'os.system', 'os.system', (["('cat %s/maxSum/*bidirection* >>%s' % (out_dir, out_file))"], {}), "('cat %s/maxSum/*bidirection* >>%s' % (out_dir, out_file))\n", (8257, 8315), False, 'import sys, os\n'), ((4830, 4863), 'os.system', 'os.system', (["('rm %s' % predict_file)"], {}), "('rm %s' % predict_file)\n", (4839, 4863), False, 'import sys, os\n'), ((5270, 5323), 'os.system', 'os.system', (["('rm %s %s' % (forward_file, backward_file))"], {}), "('rm %s %s' % (forward_file, backward_file))\n", (5279, 5323), False, 'import sys, os\n'), ((5868, 5891), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (5882, 5891), False, 'import sys, os\n'), ((5901, 5921), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (5912, 5921), False, 'import sys, os\n'), ((5964, 5987), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (5978, 5987), False, 'import sys, os\n'), ((5997, 6017), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (6008, 6017), False, 'import sys, os\n'), ((6716, 6782), 'generate_windows.Get_block_position', 'Get_block_position', (['out_dir', 'input_file', 'strand', 'window', '(1000000.0)'], {}), '(out_dir, input_file, strand, window, 1000000.0)\n', (6734, 6782), False, 'from generate_windows import Generate_windows, Get_block_position, split_chr_bedGraph2\n'), ((7210, 7233), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7231, 7233), False, 'import datetime\n'), ((7876, 7899), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7897, 7899), False, 'import datetime\n'), ((8349, 8411), 'os.system', 'os.system', (["('rm -rf %s/predict %s/maxSum' % (out_dir, out_dir))"], {}), "('rm -rf %s/predict %s/maxSum' % (out_dir, out_dir))\n", (8358, 8411), False, 'import sys, os\n'), ((7282, 7294), 'multiprocessing.Pool', 'Pool', (['thread'], {}), '(thread)\n', (7286, 7294), False, 'from multiprocessing import Pool\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras import layers
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import time
from time import localtime, strftime
import send_email
import pickle
import load_movies
import load_ratings
import evaluate as evl
# captura o tempo agora, somente para informação e análise dos resultados
date_now = strftime("%d/%m/%Y %H:%M:%S", localtime())
# carrega o dataset de ratings
ratings = load_ratings.load('../')
# carrega o dataset de filmes
movies = load_movies.load('../')
# divide o dataset em 80% para treinamento e 20% para teste
train, test = train_test_split(ratings, test_size=0.3, random_state=0)
n_users, n_movies = len(ratings.userId.unique()), len(ratings.movieId.unique())
embedding_size = 64
# cria as camadas da rede neural
movie_input = layers.Input(shape=[1], name='Movie')
user_input = layers.Input(shape=[1], name='User')
movie_embedding = layers.Embedding(input_dim=n_movies,
input_length=1,
output_dim=embedding_size,
name='Movie-Embedding')(movie_input)
user_embedding = layers.Embedding(input_dim=n_users,
input_length=1,
output_dim=embedding_size,
name='User-Embedding')(user_input)
movie_vec = layers.Flatten()(movie_embedding)
user_vec = layers.Flatten()(user_embedding)
input_vecs = layers.Concatenate()([user_vec, movie_vec])
dense_1 = layers.Dense(64, activation='relu')(input_vecs)
dense_1 = layers.Dropout(0.2)(dense_1)
dense_1 = layers.Dense(32, activation='relu')(dense_1)
dense_1 = layers.Dropout(0.1)(dense_1)
dense_2 = layers.Dense(1)(dense_1)
model = keras.Model(inputs=[user_input, movie_input], outputs=dense_2)
model.compile(optimizer='adam', loss='mae')
# cria uma imagem do modelo da rede
plot_model(model, to_file='model_deep_learning.png', show_shapes=True)
# imprime o resumo do modelo em um arquivo
with open('model_summary_deep_learning.txt', 'w') as f:
model.summary(print_fn=lambda x: f.write(x + '\n'))
f.close()
# variável para guardar o número de epochs
epochs = 16
# salva os modelos de acordo com o callback do Keras
save_path = '../models'
my_time = time.strftime("%Y_%m_%d_%H_%M")
model_name = 'deep_learning_' + my_time
full_name = save_path + '/' + model_name + '.h5'
m_check = ModelCheckpoint(full_name, monitor='val_loss', save_best_only=True)
# sanity check
test_map = evl.mean_average_precision(model, train, test)
test_ndcg = evl.normalized_dcg(model, train, test)
test_auc = evl.roc_auc(model, train, test)
print("Check MAP: %0.4f" % test_map)
print("Check NDCG: %0.4f" % test_ndcg)
print("Check ROC_AUC: %0.4f" % test_auc)
# começa a contar o tempo do treinamento
start_time = time.time()
# faz o treinamento do modelo
history = model.fit([train.userId, train.movieId],
train.rating,
epochs=epochs,
batch_size=64,
verbose=2,
shuffle=True,
validation_split=0.1,
callbacks=[m_check])
# mostra o tempo to treinamento no formato hh:mm:ss
seconds = (time.time() - start_time)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
print('%02d:%02d:%02d' % (h, m, s))
test_map = evl.mean_average_precision(model, train, test)
test_ndcg = evl.normalized_dcg(model, train, test)
test_auc = evl.roc_auc(model, train, test)
print("MAP: %0.4f" % test_map)
print("NDCG: %0.4f" % test_ndcg)
print("ROC_AUC: %0.4f" % test_auc)
# salva o treinamento
history_name = 'dense_' + my_time
with open('../histories/' + history_name + '.pkl', 'wb') as file_pi:
pickle.dump(history.history, file_pi)
# plota um gráfico da perda em relação às epochs e depois salva em uma imagem
loss = history.history['loss']
val_loss = history.history['val_loss']
pd.Series(loss).plot(label='Training loss')
pd.Series(val_loss).plot(label='Validation loss')
plt.title('Perda do treinamento')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
fig1 = plt.gcf()
plt.show()
plt.draw()
fig1.savefig('training_loss_deep_learning.png', dpi=200)
# imprime a MSE e a MAE do teste e do treinamento
test_preds = model.predict([test.userId, test.movieId])
final_test_mse = "Final test MSE: %0.3f" % mean_squared_error(test_preds, test.rating)
final_test_mae = "Final test MAE: %0.3f" % mean_absolute_error(test_preds, test.rating)
print(final_test_mse)
print(final_test_mae)
train_preds = model.predict([train.userId, train.movieId])
final_train_mse = "Final train MSE: %0.3f" % mean_squared_error(train_preds, train.rating)
final_train_mae = "Final train MAE: %0.3f" % mean_absolute_error(train_preds, train.rating)
print(final_train_mse)
print(final_train_mae)
# imprime os resultados em um arquivo
with open('results.txt', 'w') as fr:
fr.write('Data de treinamento da rede: ' + date_now + '\n')
fr.write('\n' + 'Tempo de execução: ' + str('%02d:%02d:%02d' % (h, m, s)) + '\n')
fr.write('\n' + str(final_test_mse) + '\n')
fr.write('\n' + str(final_test_mae) + '\n')
fr.write('\n' + str(final_train_mse) + '\n')
fr.write('\n' + str(final_train_mae) + '\n')
fr.write('\n' + 'Número de Epochs da rede: ' + str(epochs) + '\n')
fr.close()
# manda um email com os resultados da execução, passando como parâmetro arquivos para mandar como anexo
send_email.send(['training_loss_deep_learning.png', 'model_deep_learning.png', 'model_summary_deep_learning.txt'])
|
[
"evaluate.normalized_dcg",
"evaluate.mean_average_precision",
"evaluate.roc_auc"
] |
[((632, 656), 'load_ratings.load', 'load_ratings.load', (['"""../"""'], {}), "('../')\n", (649, 656), False, 'import load_ratings\n'), ((696, 719), 'load_movies.load', 'load_movies.load', (['"""../"""'], {}), "('../')\n", (712, 719), False, 'import load_movies\n'), ((795, 851), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ratings'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(ratings, test_size=0.3, random_state=0)\n', (811, 851), False, 'from sklearn.model_selection import train_test_split\n'), ((1002, 1039), 'keras.layers.Input', 'layers.Input', ([], {'shape': '[1]', 'name': '"""Movie"""'}), "(shape=[1], name='Movie')\n", (1014, 1039), False, 'from keras import layers\n'), ((1053, 1089), 'keras.layers.Input', 'layers.Input', ([], {'shape': '[1]', 'name': '"""User"""'}), "(shape=[1], name='User')\n", (1065, 1089), False, 'from keras import layers\n'), ((1948, 2010), 'keras.Model', 'keras.Model', ([], {'inputs': '[user_input, movie_input]', 'outputs': 'dense_2'}), '(inputs=[user_input, movie_input], outputs=dense_2)\n', (1959, 2010), False, 'import keras\n'), ((2092, 2162), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': '"""model_deep_learning.png"""', 'show_shapes': '(True)'}), "(model, to_file='model_deep_learning.png', show_shapes=True)\n", (2102, 2162), False, 'from keras.utils import plot_model\n'), ((2473, 2504), 'time.strftime', 'time.strftime', (['"""%Y_%m_%d_%H_%M"""'], {}), "('%Y_%m_%d_%H_%M')\n", (2486, 2504), False, 'import time\n'), ((2604, 2671), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['full_name'], {'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(full_name, monitor='val_loss', save_best_only=True)\n", (2619, 2671), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2699, 2745), 'evaluate.mean_average_precision', 'evl.mean_average_precision', (['model', 'train', 'test'], {}), '(model, train, test)\n', (2725, 2745), True, 'import evaluate as evl\n'), ((2758, 2796), 'evaluate.normalized_dcg', 'evl.normalized_dcg', (['model', 'train', 'test'], {}), '(model, train, test)\n', (2776, 2796), True, 'import evaluate as evl\n'), ((2808, 2839), 'evaluate.roc_auc', 'evl.roc_auc', (['model', 'train', 'test'], {}), '(model, train, test)\n', (2819, 2839), True, 'import evaluate as evl\n'), ((3012, 3023), 'time.time', 'time.time', ([], {}), '()\n', (3021, 3023), False, 'import time\n'), ((3544, 3590), 'evaluate.mean_average_precision', 'evl.mean_average_precision', (['model', 'train', 'test'], {}), '(model, train, test)\n', (3570, 3590), True, 'import evaluate as evl\n'), ((3603, 3641), 'evaluate.normalized_dcg', 'evl.normalized_dcg', (['model', 'train', 'test'], {}), '(model, train, test)\n', (3621, 3641), True, 'import evaluate as evl\n'), ((3653, 3684), 'evaluate.roc_auc', 'evl.roc_auc', (['model', 'train', 'test'], {}), '(model, train, test)\n', (3664, 3684), True, 'import evaluate as evl\n'), ((4195, 4228), 'matplotlib.pyplot.title', 'plt.title', (['"""Perda do treinamento"""'], {}), "('Perda do treinamento')\n", (4204, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4229, 4249), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (4239, 4249), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4268), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (4260, 4268), True, 'import matplotlib.pyplot as plt\n'), ((4269, 4281), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4279, 4281), True, 'import matplotlib.pyplot as plt\n'), ((4289, 4298), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4296, 4298), True, 'import matplotlib.pyplot as plt\n'), ((4299, 4309), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4307, 4309), True, 'import matplotlib.pyplot as plt\n'), ((4310, 4320), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4318, 4320), True, 'import matplotlib.pyplot as plt\n'), ((5599, 5717), 'send_email.send', 'send_email.send', (["['training_loss_deep_learning.png', 'model_deep_learning.png',\n 'model_summary_deep_learning.txt']"], {}), "(['training_loss_deep_learning.png',\n 'model_deep_learning.png', 'model_summary_deep_learning.txt'])\n", (5614, 5717), False, 'import send_email\n'), ((577, 588), 'time.localtime', 'localtime', ([], {}), '()\n', (586, 588), False, 'from time import localtime, strftime\n'), ((1109, 1217), 'keras.layers.Embedding', 'layers.Embedding', ([], {'input_dim': 'n_movies', 'input_length': '(1)', 'output_dim': 'embedding_size', 'name': '"""Movie-Embedding"""'}), "(input_dim=n_movies, input_length=1, output_dim=\n embedding_size, name='Movie-Embedding')\n", (1125, 1217), False, 'from keras import layers\n'), ((1348, 1454), 'keras.layers.Embedding', 'layers.Embedding', ([], {'input_dim': 'n_users', 'input_length': '(1)', 'output_dim': 'embedding_size', 'name': '"""User-Embedding"""'}), "(input_dim=n_users, input_length=1, output_dim=\n embedding_size, name='User-Embedding')\n", (1364, 1454), False, 'from keras import layers\n'), ((1577, 1593), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (1591, 1593), False, 'from keras import layers\n'), ((1622, 1638), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (1636, 1638), False, 'from keras import layers\n'), ((1669, 1689), 'keras.layers.Concatenate', 'layers.Concatenate', ([], {}), '()\n', (1687, 1689), False, 'from keras import layers\n'), ((1723, 1758), 'keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1735, 1758), False, 'from keras import layers\n'), ((1781, 1800), 'keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (1795, 1800), False, 'from keras import layers\n'), ((1820, 1855), 'keras.layers.Dense', 'layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (1832, 1855), False, 'from keras import layers\n'), ((1875, 1894), 'keras.layers.Dropout', 'layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (1889, 1894), False, 'from keras import layers\n'), ((1914, 1929), 'keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (1926, 1929), False, 'from keras import layers\n'), ((3422, 3433), 'time.time', 'time.time', ([], {}), '()\n', (3431, 3433), False, 'import time\n'), ((3914, 3951), 'pickle.dump', 'pickle.dump', (['history.history', 'file_pi'], {}), '(history.history, file_pi)\n', (3925, 3951), False, 'import pickle\n'), ((4528, 4571), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['test_preds', 'test.rating'], {}), '(test_preds, test.rating)\n', (4546, 4571), False, 'from sklearn.metrics import mean_squared_error\n'), ((4615, 4659), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['test_preds', 'test.rating'], {}), '(test_preds, test.rating)\n', (4634, 4659), False, 'from sklearn.metrics import mean_absolute_error\n'), ((4808, 4853), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['train_preds', 'train.rating'], {}), '(train_preds, train.rating)\n', (4826, 4853), False, 'from sklearn.metrics import mean_squared_error\n'), ((4899, 4945), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['train_preds', 'train.rating'], {}), '(train_preds, train.rating)\n', (4918, 4945), False, 'from sklearn.metrics import mean_absolute_error\n'), ((4101, 4116), 'pandas.Series', 'pd.Series', (['loss'], {}), '(loss)\n', (4110, 4116), True, 'import pandas as pd\n'), ((4145, 4164), 'pandas.Series', 'pd.Series', (['val_loss'], {}), '(val_loss)\n', (4154, 4164), True, 'import pandas as pd\n')]
|
# Standard Libraries
import os
import subprocess
# import datetime
# Third Libraries
# import pandas as pd
# import numpy as np
# Local Libraries
import define
import analyze
import prepare
import fselect
import evaluate
import improve
import tools
import pandas as pd
from flask import Flask, render_template, \
redirect, request, url_for, jsonify, flash
from werkzeug.utils import secure_filename
from collections import OrderedDict
app = Flask(__name__)
APP_PATH = os.path.dirname(os.path.abspath(__file__))
app.config['UPLOAD_DIR'] = os.path.join(APP_PATH, 'uploads')
app.config['MODELS_DIR'] = os.path.join(APP_PATH, 'models')
app.config['MARKET_DIR'] = os.path.join(APP_PATH, 'market')
ALLOWED_EXTENSIONS = ['txt', 'csv', 'ml', 'html']
def report_analyze(figures, response, data_path, data_name):
definer = define.Define(
data_path=data_path,
header=None,
response=response).pipeline()
analyzer = analyze.Analyze(definer)
dict_figures = OrderedDict()
for fig in figures:
data_name = data_name.replace(".csv", "")
plot_path = os.path.join(app.config['MARKET_DIR'], data_name, 'analyze')
tools.path_exists(plot_path)
plot_path_plot = os.path.join(plot_path, fig+'.html')
dict_figures[fig] = analyzer.plot(fig)
analyzer.save_plot(plot_path_plot)
return dict_figures
def report_model(response, data_path, data_name):
definer = define.Define(
data_path=data_path,
header=None,
response=response,
problem_type='regression').pipeline()
preparer = prepare.Prepare(definer).pipeline()
selector = fselect.Select(definer).pipeline()
evaluator = evaluate.Evaluate(definer, preparer, selector).pipeline()
plot = evaluator.plot_models()
table = evaluator.report
data_name = data_name.replace(".csv", "")
plot_path = os.path.join(app.config['MARKET_DIR'], data_name, 'model')
tools.path_exists(plot_path)
plot_path_plot = os.path.join(plot_path, 'boxplot.html')
evaluator.save_plot(plot_path_plot)
plot_path_report = os.path.join(plot_path, 'report.csv')
evaluator.save_report(plot_path_report)
dict_report = {'plot': plot, 'table': table}
return dict_report
def report_improve(response, data_name):
definer = define.Define(
data_path=data_name,
header=None,
response=response).pipeline()
preparer = prepare.Prepare(definer).pipeline()
selector = fselect.Select(definer).pipeline()
evaluator = evaluate.Evaluate(definer, preparer, selector)
improver = improve.Improve(evaluator).pipeline()
plot = improver.plot_models()
table = improver.report
dict_report = {'plot': plot, 'table': table}
return dict_report
def report_market(data_name):
# analyze_report = OrderedDict()
# model_report = OrderedDict()
data_name = data_name.replace(".csv", "")
app_path = os.path.join(app.config['MARKET_DIR'], data_name)
# app_dirs = os.listdir(app_path)
# Show Model info
try:
model_path = os.path.join(app_path, 'model')
plot_model = ''
with open(os.path.join(model_path, 'boxplot.html')) as f:
plot_model = f.read()
table_model = pd.read_csv(os.path.join(model_path, 'report.csv'))
dict_report_model = {'plot':plot_model, 'table':table_model} # return 1
except:
dict_report_model = {'plot':None, 'table':None} # return 1
# Show Analyze info
try:
analyze_path = os.path.join(app_path, 'analyze')
plot_analyze = OrderedDict()
for plot in os.listdir(analyze_path):
with open(os.path.join(analyze_path, plot)) as f:
fig = plot.replace('.html', '')
plot_analyze[fig] = f.read()
# Join full report: model and analyze
dicts_market = {'model':dict_report_model, 'analyze':plot_analyze}
except:
dicts_market = {'model':dict_report_model, 'analyze':None} # return 2
return dicts_market
def allowed_file(file_name):
return '.' in file_name and file_name.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
#<EMAIL>.route('/')
#def home():
#return render_template("uploadData.html")
########################### Start Upload Button ##################################
@app.route('/')
@app.route('/defineData', methods=['GET', 'POST'])
def defineData():
""" Show the files that have been uploaded """
dirs = os.listdir(app.config['UPLOAD_DIR'])
return render_template('uploadData.html', files=dirs)
@app.route('/storeData', methods=['GET', 'POST'])
def storedata():
""" Upload a new file """
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
#if file and allowed_file(file.file_name):
file_name = ''
data_name = ''
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
# if file:
file_name = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_DIR'], file_name)
file.save(file_path)
# file_name = str(file.filename)
# data_name = file_name.replace(".csv", "")
# print(data_name)
# command = 'csvtotable -c "Iris dataset" iris.csv iris.html'
return jsonify({"success":True})
# result = subprocess.run(['csvtotable', '-c',
# data_name, file_name, data_name+'.html'],
# stdout=subprocess.PIPE)
# return redirect(url_for('showData', filename=file_name))
return redirect(url_for('defineData'))
else:
return redirect(url_for('defineData'))
@app.route('/chooseData', methods=['GET', 'POST'])
def chooseData():
""" choose a file and show its content """
from itertools import islice
# tools.localization()
file_name = ''
data_name = ''
data_path = ''
dirs = os.listdir(app.config['UPLOAD_DIR'])
if request.method == 'POST':
file_name = request.form['submit']
data_name = file_name.replace(".csv", "")
data_path = os.path.join(app.config['UPLOAD_DIR'], data_name+'.html')
# result = subprocess.run(['csvtotable', '-c', '--display-length','50',
# data_name, data_name+'.csv', data_name+'.html'],
# stdout=subprocess.PIPE)
try:
dataset = None
with open(data_path) as f:
dataset = f.read()
except:
data_path = os.path.join(app.config['UPLOAD_DIR'], file_name)
with open(data_path) as myfile:
dataset = list(islice(myfile, 40))
dataset = [line[:-1] for line in dataset]
return render_template(
'uploadData.html',
files=dirs,
dataset=dataset,
data_name=data_name)
########################### End Upload Button ##################################
# Convert the uploaded csv file into a responsive table.
# ########################## Start Convert table ##################################
# @app.route('/chooseData/<filename>')
# def showData(filename):
# """ choose a file and show its content """
# from itertools import islice
#
# data_name = filename.replace(".csv", "")
# dirs = os.listdir(app.config['UPLOAD_DIR'])
# # result = subprocess.run(['csvtotable', '-c',
# # data_name, filename, data_name+'.html'],
# # stdout=subprocess.PIPE)
#
# dataset = 'asdasd'
# print(filename + 'start')
# data_path = os.path.join(app.config['UPLOAD_DIR'], filename)
# comm = 'csvtotable -c' + " Iris " + filename + ' ' + data_name+'.html'
# os.system(comm)
# # with open(data_path) as f:
# # dataset = f.read()
# # print(dataset[0])
# print(filename + 'end')
# # data_path = os.path.join(app.config['UPLOAD_DIR'], data_name+'.html')
# #
# # dataset = None
# # with open(data_path) as f:
# # dataset = f.read()
#
# # with open(data_path) as myfile:
# # dataset = list(islice(myfile, 40))
# # dataset = [line[:-1] for line in dataset]
#
# return render_template(
# 'uploadData.html',
# files=dirs,
# dataset=dataset,
# data_name=data_name)
# ########################## End Convert table ##################################
# ########################## Start Analyze Button ##################################
@app.route('/analyze_base', methods=['GET', 'POST'])
def analyze_base():
dirs = os.listdir(app.config['UPLOAD_DIR'])
return render_template('analyzeData.html', files=dirs)
@app.route('/analyze_app', methods=['GET', 'POST'])
def analyze_app():
figures = ['histogram', 'box', 'corr', 'scatter']
response = "class"
data_name = ''
data_path = ''
dirs = os.listdir(app.config['UPLOAD_DIR'])
if request.method == 'POST':
data_name = request.form['submit']
data_path = os.path.join(app.config['UPLOAD_DIR'], data_name)
return render_template(
'analyzeData.html',
files=dirs,
figures=report_analyze(figures, response, data_path, data_name),
data_name=data_name)
########################### End Analyze Button ##################################
########################### Start Model Button ##################################
@app.route('/model_base', methods=['GET', 'POST'])
def model_base():
dirs = os.listdir(app.config['UPLOAD_DIR'])
return render_template('models.html', files=dirs)
@app.route('/model_app', methods=['GET', 'POST'])
def model_app():
response = "class"
data_name = ''
data_path = ''
dirs = os.listdir(app.config['UPLOAD_DIR'])
if request.method == 'POST':
data_name = request.form['submit']
data_path = os.path.join(app.config['UPLOAD_DIR'], data_name)
return render_template(
'models.html',
files=dirs,
report=report_model(response, data_path, data_name),
data_name=data_name)
########################### End Model Button ##################################
########################### Start Improve Button ##################################
@app.route('/improve_base', methods=['GET', 'POST'])
def improve_base():
dirs = os.listdir(app.config['UPLOAD_DIR'])
return render_template('improve.html', files=dirs)
@app.route('/improve_app', methods=['GET', 'POST'])
def improve_app():
response = "class"
data_name = ''
data_path = ''
dirs = os.listdir(app.config['UPLOAD_DIR'])
if request.method == 'POST':
data_name = request.form['submit']
data_path = os.path.join(app.config['UPLOAD_DIR'], data_name)
return render_template(
'improve.html',
files=dirs,
report=report_improve(response, data_path, data_name),
data_name=data_name)
########################### End Improve Button ##################################
########################### Start Model Button ##################################
@app.route('/market_base', methods=['GET', 'POST'])
def market_base():
dirs = os.listdir(app.config['MARKET_DIR'])
return render_template('market.html', files=dirs)
@app.route('/market_app', methods=['GET', 'POST'])
def market_app():
response = "class"
data_name = ''
data_path = ''
dirs = os.listdir(app.config['MARKET_DIR'])
if request.method == 'POST':
data_name = request.form['submit']
# data_path = os.path.join(app.config['MARKET_DIR'], data_name)
return render_template(
'market.html',
files=dirs,
report=report_market(data_name),
data_name=data_name)
########################### End Market Button ##################################
# @app.route('/prediction', methods=['GET', 'POST'])
# def prediction():
# attributes = []
# dirs = os.listdir(app.config['UPLOAD_DIR'])
# data_class = 'class'
# file_name = 'iris.csv'
# filepath = os.path.join(app.config['UPLOAD_DIR'], file_name)
# model = 'Naive Bayes'
# f = open(filepath, 'r')
# g = open(filepath, 'r')
# for item in g.readline().split(','):
# if item.strip() != data_class:
# attributes.append(item)
# print(attributes, ' this is something')
# return render_template('showPrediction.html', file = f, attributes = attributes, data_class = data_class, model = model)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, port=8002)
|
[
"evaluate.Evaluate"
] |
[((456, 471), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (461, 471), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((554, 587), 'os.path.join', 'os.path.join', (['APP_PATH', '"""uploads"""'], {}), "(APP_PATH, 'uploads')\n", (566, 587), False, 'import os\n'), ((615, 647), 'os.path.join', 'os.path.join', (['APP_PATH', '"""models"""'], {}), "(APP_PATH, 'models')\n", (627, 647), False, 'import os\n'), ((675, 707), 'os.path.join', 'os.path.join', (['APP_PATH', '"""market"""'], {}), "(APP_PATH, 'market')\n", (687, 707), False, 'import os\n'), ((500, 525), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (515, 525), False, 'import os\n'), ((967, 991), 'analyze.Analyze', 'analyze.Analyze', (['definer'], {}), '(definer)\n', (982, 991), False, 'import analyze\n'), ((1012, 1025), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1023, 1025), False, 'from collections import OrderedDict\n'), ((1919, 1977), 'os.path.join', 'os.path.join', (["app.config['MARKET_DIR']", 'data_name', '"""model"""'], {}), "(app.config['MARKET_DIR'], data_name, 'model')\n", (1931, 1977), False, 'import os\n'), ((1982, 2010), 'tools.path_exists', 'tools.path_exists', (['plot_path'], {}), '(plot_path)\n', (1999, 2010), False, 'import tools\n'), ((2032, 2071), 'os.path.join', 'os.path.join', (['plot_path', '"""boxplot.html"""'], {}), "(plot_path, 'boxplot.html')\n", (2044, 2071), False, 'import os\n'), ((2135, 2172), 'os.path.join', 'os.path.join', (['plot_path', '"""report.csv"""'], {}), "(plot_path, 'report.csv')\n", (2147, 2172), False, 'import os\n'), ((2581, 2627), 'evaluate.Evaluate', 'evaluate.Evaluate', (['definer', 'preparer', 'selector'], {}), '(definer, preparer, selector)\n', (2598, 2627), False, 'import evaluate\n'), ((2983, 3032), 'os.path.join', 'os.path.join', (["app.config['MARKET_DIR']", 'data_name'], {}), "(app.config['MARKET_DIR'], data_name)\n", (2995, 3032), False, 'import os\n'), ((4508, 4544), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_DIR']"], {}), "(app.config['UPLOAD_DIR'])\n", (4518, 4544), False, 'import os\n'), ((4556, 4602), 'flask.render_template', 'render_template', (['"""uploadData.html"""'], {'files': 'dirs'}), "('uploadData.html', files=dirs)\n", (4571, 4602), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((6203, 6239), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_DIR']"], {}), "(app.config['UPLOAD_DIR'])\n", (6213, 6239), False, 'import os\n'), ((6990, 7079), 'flask.render_template', 'render_template', (['"""uploadData.html"""'], {'files': 'dirs', 'dataset': 'dataset', 'data_name': 'data_name'}), "('uploadData.html', files=dirs, dataset=dataset, data_name=\n data_name)\n", (7005, 7079), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((8844, 8880), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_DIR']"], {}), "(app.config['UPLOAD_DIR'])\n", (8854, 8880), False, 'import os\n'), ((8892, 8939), 'flask.render_template', 'render_template', (['"""analyzeData.html"""'], {'files': 'dirs'}), "('analyzeData.html', files=dirs)\n", (8907, 8939), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((9139, 9175), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_DIR']"], {}), "(app.config['UPLOAD_DIR'])\n", (9149, 9175), False, 'import os\n'), ((9763, 9799), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_DIR']"], {}), "(app.config['UPLOAD_DIR'])\n", (9773, 9799), False, 'import os\n'), ((9811, 9853), 'flask.render_template', 'render_template', (['"""models.html"""'], {'files': 'dirs'}), "('models.html', files=dirs)\n", (9826, 9853), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((9995, 10031), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_DIR']"], {}), "(app.config['UPLOAD_DIR'])\n", (10005, 10031), False, 'import os\n'), ((10606, 10642), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_DIR']"], {}), "(app.config['UPLOAD_DIR'])\n", (10616, 10642), False, 'import os\n'), ((10654, 10697), 'flask.render_template', 'render_template', (['"""improve.html"""'], {'files': 'dirs'}), "('improve.html', files=dirs)\n", (10669, 10697), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((10842, 10878), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_DIR']"], {}), "(app.config['UPLOAD_DIR'])\n", (10852, 10878), False, 'import os\n'), ((11454, 11490), 'os.listdir', 'os.listdir', (["app.config['MARKET_DIR']"], {}), "(app.config['MARKET_DIR'])\n", (11464, 11490), False, 'import os\n'), ((11502, 11544), 'flask.render_template', 'render_template', (['"""market.html"""'], {'files': 'dirs'}), "('market.html', files=dirs)\n", (11517, 11544), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((11688, 11724), 'os.listdir', 'os.listdir', (["app.config['MARKET_DIR']"], {}), "(app.config['MARKET_DIR'])\n", (11698, 11724), False, 'import os\n'), ((1120, 1180), 'os.path.join', 'os.path.join', (["app.config['MARKET_DIR']", 'data_name', '"""analyze"""'], {}), "(app.config['MARKET_DIR'], data_name, 'analyze')\n", (1132, 1180), False, 'import os\n'), ((1189, 1217), 'tools.path_exists', 'tools.path_exists', (['plot_path'], {}), '(plot_path)\n', (1206, 1217), False, 'import tools\n'), ((1243, 1281), 'os.path.join', 'os.path.join', (['plot_path', "(fig + '.html')"], {}), "(plot_path, fig + '.html')\n", (1255, 1281), False, 'import os\n'), ((3124, 3155), 'os.path.join', 'os.path.join', (['app_path', '"""model"""'], {}), "(app_path, 'model')\n", (3136, 3155), False, 'import os\n'), ((3574, 3607), 'os.path.join', 'os.path.join', (['app_path', '"""analyze"""'], {}), "(app_path, 'analyze')\n", (3586, 3607), False, 'import os\n'), ((3631, 3644), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3642, 3644), False, 'from collections import OrderedDict\n'), ((3665, 3689), 'os.listdir', 'os.listdir', (['analyze_path'], {}), '(analyze_path)\n', (3675, 3689), False, 'import os\n'), ((6386, 6445), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_DIR']", "(data_name + '.html')"], {}), "(app.config['UPLOAD_DIR'], data_name + '.html')\n", (6398, 6445), False, 'import os\n'), ((9272, 9321), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_DIR']", 'data_name'], {}), "(app.config['UPLOAD_DIR'], data_name)\n", (9284, 9321), False, 'import os\n'), ((10128, 10177), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_DIR']", 'data_name'], {}), "(app.config['UPLOAD_DIR'], data_name)\n", (10140, 10177), False, 'import os\n'), ((10975, 11024), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_DIR']", 'data_name'], {}), "(app.config['UPLOAD_DIR'], data_name)\n", (10987, 11024), False, 'import os\n'), ((836, 902), 'define.Define', 'define.Define', ([], {'data_path': 'data_path', 'header': 'None', 'response': 'response'}), '(data_path=data_path, header=None, response=response)\n', (849, 902), False, 'import define\n'), ((1461, 1558), 'define.Define', 'define.Define', ([], {'data_path': 'data_path', 'header': 'None', 'response': 'response', 'problem_type': '"""regression"""'}), "(data_path=data_path, header=None, response=response,\n problem_type='regression')\n", (1474, 1558), False, 'import define\n'), ((1631, 1655), 'prepare.Prepare', 'prepare.Prepare', (['definer'], {}), '(definer)\n', (1646, 1655), False, 'import prepare\n'), ((1682, 1705), 'fselect.Select', 'fselect.Select', (['definer'], {}), '(definer)\n', (1696, 1705), False, 'import fselect\n'), ((1733, 1779), 'evaluate.Evaluate', 'evaluate.Evaluate', (['definer', 'preparer', 'selector'], {}), '(definer, preparer, selector)\n', (1750, 1779), False, 'import evaluate\n'), ((2348, 2414), 'define.Define', 'define.Define', ([], {'data_path': 'data_name', 'header': 'None', 'response': 'response'}), '(data_path=data_name, header=None, response=response)\n', (2361, 2414), False, 'import define\n'), ((2479, 2503), 'prepare.Prepare', 'prepare.Prepare', (['definer'], {}), '(definer)\n', (2494, 2503), False, 'import prepare\n'), ((2530, 2553), 'fselect.Select', 'fselect.Select', (['definer'], {}), '(definer)\n', (2544, 2553), False, 'import fselect\n'), ((2643, 2669), 'improve.Improve', 'improve.Improve', (['evaluator'], {}), '(evaluator)\n', (2658, 2669), False, 'import improve\n'), ((3315, 3353), 'os.path.join', 'os.path.join', (['model_path', '"""report.csv"""'], {}), "(model_path, 'report.csv')\n", (3327, 3353), False, 'import os\n'), ((4789, 4810), 'flask.flash', 'flash', (['"""No file part"""'], {}), "('No file part')\n", (4794, 4810), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((4830, 4851), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (4838, 4851), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((5032, 5057), 'flask.flash', 'flash', (['"""No selected file"""'], {}), "('No selected file')\n", (5037, 5057), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((5077, 5098), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (5085, 5098), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((5191, 5221), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (5206, 5221), False, 'from werkzeug.utils import secure_filename\n'), ((5246, 5295), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_DIR']", 'file_name'], {}), "(app.config['UPLOAD_DIR'], file_name)\n", (5258, 5295), False, 'import os\n'), ((5554, 5580), 'flask.jsonify', 'jsonify', (["{'success': True}"], {}), "({'success': True})\n", (5561, 5580), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((5875, 5896), 'flask.url_for', 'url_for', (['"""defineData"""'], {}), "('defineData')\n", (5882, 5896), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((5932, 5953), 'flask.url_for', 'url_for', (['"""defineData"""'], {}), "('defineData')\n", (5939, 5953), False, 'from flask import Flask, render_template, redirect, request, url_for, jsonify, flash\n'), ((6787, 6836), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_DIR']", 'file_name'], {}), "(app.config['UPLOAD_DIR'], file_name)\n", (6799, 6836), False, 'import os\n'), ((3198, 3238), 'os.path.join', 'os.path.join', (['model_path', '"""boxplot.html"""'], {}), "(model_path, 'boxplot.html')\n", (3210, 3238), False, 'import os\n'), ((3713, 3745), 'os.path.join', 'os.path.join', (['analyze_path', 'plot'], {}), '(analyze_path, plot)\n', (3725, 3745), False, 'import os\n'), ((6904, 6922), 'itertools.islice', 'islice', (['myfile', '(40)'], {}), '(myfile, 40)\n', (6910, 6922), False, 'from itertools import islice\n')]
|
# Standard libraries
from argparse import ArgumentParser
import logging
from typing import Callable
# Third-party libraries
import numpy as np
from pymoo.operators.crossover import erx
from pymoo.factory import get_crossover
# Local dependencies
from evaluate import get_env, get_state_action_size, evaluate
from policy import NeuroevoPolicy
def random_search(x, fitness, gens, std=0.01, r=5., rng=np.random.default_rng()):
x_best = x
f_best = -np.Inf
for g in trange(gens):
ind = np.random.uniform(-r, r, len(x_best))
f = fitness(ind)
if f > f_best:
f_best = f
x_best = ind
logging.info('\t%d\t%d', g, f_best)
return x_best
def random_optimization(x, fitness, gens, std=0.01, r=5., rng=np.random.default_rng()):
x_best = x
f_best = -np.Inf
for g in trange(gens):
N = rng.normal(size=(len(x))) * std
ind = x + N[:]
f = fitness(ind)
if f > f_best:
f_best = f
x_best = ind
logging.info('\t%d\t%d', g, f_best)
return x_best
def oneplus_lambda(x, fitness, gens, lam, std=0.5, rng=np.random.default_rng()):
x_best = x
f_best = -np.Inf
n_evals = 0
for g in trange(gens):
N = rng.normal(size=(lam, len(x))) * std
for i in range(lam):
ind = x + N[i, :]
f = fitness(ind)
if f > f_best:
f_best = f
x_best = ind
x = x_best
n_evals += lam
logging.info('\t%d\t%d', n_evals, f_best)
return x_best
def mu_lambda(x, fitness, gens, lam, alpha=0.2, verbose=False):
x_best = x
f_best = -np.Inf
n_evals = 0
fits = np.zeros(gens)
for g in range(gens):
N = np.random.normal(size=(lam, len(x))) *2
F = np.zeros(lam)
for i in range(lam):
ind = x + N[i, :]
F[i] = fitness(ind)
print("F[" + str(i)+ "] =" + str(F[i]))
if F[i] > f_best:
f_best = F[i]
x_best = ind
fits[g] = f_best
mu_f = np.mean(F)
std_f = np.std(F)
A = F
if std_f != 0:
A = (F - mu_f) / std_f
x = x - alpha * np.dot(A, N) / lam
n_evals += lam
logging.info('\t%d\t%d', n_evals, f_best)
print("x0 = "+str(x[0]))
return x_best
def simulated_annealing_proba(f, f_best, t):
return np.exp(-(f_best - f) / t)
def simulated_annealing_optimization(x, fitness, gens, std=0.01, rng=np.random.default_rng()):
x_best = x
f_best = -np.Inf
n_evals = 0
for k in trange(gens):
t = (gens - k) / gens
N = rng.normal(size=(len(x))) * std
ind = x_best + N[:]
f = fitness(ind)
if f > f_best or (rng.random() < simulated_annealing_proba(f, f_best, t)):
f_best = f
x_best = ind
n_evals += 1
logging.info('\t%d\t%d', n_evals, f_best)
return x_best
erc = get_crossover("perm_erx")
def evaluate_pop(population: np.ndarray, fit: Callable) -> np.ndarray:
fitness_population = np.zeros(len(population))
for i in range(len(population)):
fitness_population[i] = fit(population[i])
return fitness_population
def fp_selection(population, fitness_population):
p = (np.min(fitness_population) - fitness_population)
if len(np.unique(p)) == 1:
p = np.ones(len(population))
p /= np.sum(p)
rng = np.random.default_rng()
ind = rng.choice(len(population), p=p, size=int(len(population)*0.4))
return population[ind], fitness_population[ind]
def truncation_selection(population, fitness_population, p=0.2):
n_elites = int(np.floor(len(population) * p))
elites = np.argsort(fitness_population)[-n_elites:]
return population[elites], fitness_population[elites]
def tournament_selection(population, fitness_population, t_size=2):
inds = rng.choice(len(population), t_size)
ind = inds[np.argmax(fitness_population[inds])]
return population[ind], fitness_population[ind]
def mutate(ind):
ind = np.copy(ind)
i, j = rng.choice(len(ind), size=2, replace=False)
ind[i], ind[j] = ind[j], ind[i]
return ind
def ga_step(population, fit):
fitness_population = evaluate_pop(population, fit)
next_pop, _ = fp_selection(population, fitness_population)
while len(next_pop) < len(population):
parent1, _ = tournament_selection(population, fitness_population)
parent2, _ = tournament_selection(population, fitness_population)
child = erx.erx(parent1, parent2)
child = mutate(child)
next_pop = np.concatenate((next_pop, [child]))
return next_pop, fitness_population
def ga(x: np.ndarray, fit: Callable, n_gens: int = 100) -> np.ndarray:
current_population = x
fitness_population = evaluate_pop(x, fit)
best_fitness = np.max(fitness_population)
x_best = x[np.argmax(fitness_population)]
for g in range(n_gens):
print(f"GENERATION {g+1}/{n_gens}")
new_population, fitness_population = ga_step(current_population, fit)
max_for_this_generation = np.max(fitness_population)
if max_for_this_generation > best_fitness:
best_fitness = max_for_this_generation
x_best = current_population[np.argmax(fitness_population)]
print("Best Fit updated : ", fit(x_best))
current_population = new_population
logging.info('\t%d\t%d', len(x)*g, best_fitness)
if best_fitness == 0.0:
print("BEST FITNESS POSSIBLE ACHIEVED")
break
return x_best
def fitness(x, s, a, env, params):
policy = NeuroevoPolicy(s, a)
policy.set_params(x)
return evaluate(env, params, policy)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-e', '--env', help='environment', default='small', type=str)
parser.add_argument('-g', '--gens', help='number of generations', default=100, type=int)
parser.add_argument('-p', '--pop', help='population size (lambda for the 1+lambda ES)', default=10, type=int)
parser.add_argument('-s', '--seed', help='seed for evolution', default=0, type=int)
parser.add_argument('-S', '--std', help='the standard deviation of the search', default=0.01, type=float)
parser.add_argument('-r', '--range', help='the range of the search', default=5., type=float)
parser.add_argument('-a', '--algorithm', help='the algorithm', default="opl", type=str)
parser.add_argument('--log', help='log file', default='evolution.log', type=str)
parser.add_argument('--weights', help='filename to save policy weights', default='weights', type=str)
args = parser.parse_args()
logging.basicConfig(filename=args.log, level=logging.DEBUG,
format='%(asctime)s %(message)s')
# starting point
env, params = get_env(args.env)
s, a = get_state_action_size(env)
policy = NeuroevoPolicy(s, a)
# evolution
rng = np.random.default_rng(args.seed)
start = rng.normal(size=(args.pop, len(policy.get_params(),)))
def fit(x):
return fitness(x, s, a, env, params)
print(args)
if args.algorithm == "opl":
x_best = oneplus_lambda(start, fit, args.gens, args.pop, rng=rng)
elif args.algorithm == "rs":
x_best = random_search(start, fit, args.gens, std=args.std, r=args.range, rng=np.random.default_rng())
elif args.algorithm == "ro":
x_best = random_optimization(start, fit, args.gens, std=args.std, r=args.range, rng=np.random.default_rng())
elif args.algorithm == "sao":
x_best = simulated_annealing_optimization(start, fit, args.gens, std=args.std, rng=np.random.default_rng())
elif args.algorithm == "mu":
x_best =mu_lambda(start, fit, args.gens, args.pop)
elif args.algorithm == "ga":
x_best = ga(start, fit, args.gens)
else:
print(f"unkown algorithm '{args.algorithm}'. Aborting.")
exit()
# Evaluation
policy.set_params(x_best)
policy.save(args.weights)
best_eval = evaluate(env, params, policy)
print('Best individual: ', x_best[:5])
print('Fitness: ', best_eval)
|
[
"evaluate.get_state_action_size",
"evaluate.evaluate",
"evaluate.get_env"
] |
[((2981, 3006), 'pymoo.factory.get_crossover', 'get_crossover', (['"""perm_erx"""'], {}), "('perm_erx')\n", (2994, 3006), False, 'from pymoo.factory import get_crossover\n'), ((402, 425), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (423, 425), True, 'import numpy as np\n'), ((764, 787), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (785, 787), True, 'import numpy as np\n'), ((1134, 1157), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (1155, 1157), True, 'import numpy as np\n'), ((1698, 1712), 'numpy.zeros', 'np.zeros', (['gens'], {}), '(gens)\n', (1706, 1712), True, 'import numpy as np\n'), ((2423, 2448), 'numpy.exp', 'np.exp', (['(-(f_best - f) / t)'], {}), '(-(f_best - f) / t)\n', (2429, 2448), True, 'import numpy as np\n'), ((2520, 2543), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (2541, 2543), True, 'import numpy as np\n'), ((3436, 3445), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (3442, 3445), True, 'import numpy as np\n'), ((3456, 3479), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (3477, 3479), True, 'import numpy as np\n'), ((4087, 4099), 'numpy.copy', 'np.copy', (['ind'], {}), '(ind)\n', (4094, 4099), True, 'import numpy as np\n'), ((4884, 4910), 'numpy.max', 'np.max', (['fitness_population'], {}), '(fitness_population)\n', (4890, 4910), True, 'import numpy as np\n'), ((5666, 5686), 'policy.NeuroevoPolicy', 'NeuroevoPolicy', (['s', 'a'], {}), '(s, a)\n', (5680, 5686), False, 'from policy import NeuroevoPolicy\n'), ((5723, 5752), 'evaluate.evaluate', 'evaluate', (['env', 'params', 'policy'], {}), '(env, params, policy)\n', (5731, 5752), False, 'from evaluate import get_env, get_state_action_size, evaluate\n'), ((5795, 5811), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (5809, 5811), False, 'from argparse import ArgumentParser\n'), ((6718, 6816), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'args.log', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(message)s"""'}), "(filename=args.log, level=logging.DEBUG, format=\n '%(asctime)s %(message)s')\n", (6737, 6816), False, 'import logging\n'), ((6876, 6893), 'evaluate.get_env', 'get_env', (['args.env'], {}), '(args.env)\n', (6883, 6893), False, 'from evaluate import get_env, get_state_action_size, evaluate\n'), ((6905, 6931), 'evaluate.get_state_action_size', 'get_state_action_size', (['env'], {}), '(env)\n', (6926, 6931), False, 'from evaluate import get_env, get_state_action_size, evaluate\n'), ((6945, 6965), 'policy.NeuroevoPolicy', 'NeuroevoPolicy', (['s', 'a'], {}), '(s, a)\n', (6959, 6965), False, 'from policy import NeuroevoPolicy\n'), ((6993, 7025), 'numpy.random.default_rng', 'np.random.default_rng', (['args.seed'], {}), '(args.seed)\n', (7014, 7025), True, 'import numpy as np\n'), ((8074, 8103), 'evaluate.evaluate', 'evaluate', (['env', 'params', 'policy'], {}), '(env, params, policy)\n', (8082, 8103), False, 'from evaluate import get_env, get_state_action_size, evaluate\n'), ((647, 682), 'logging.info', 'logging.info', (['"""\t%d\t%d"""', 'g', 'f_best'], {}), "('\\t%d\\t%d', g, f_best)\n", (659, 682), False, 'import logging\n'), ((1024, 1059), 'logging.info', 'logging.info', (['"""\t%d\t%d"""', 'g', 'f_best'], {}), "('\\t%d\\t%d', g, f_best)\n", (1036, 1059), False, 'import logging\n'), ((1509, 1550), 'logging.info', 'logging.info', (['"""\t%d\t%d"""', 'n_evals', 'f_best'], {}), "('\\t%d\\t%d', n_evals, f_best)\n", (1521, 1550), False, 'import logging\n'), ((1803, 1816), 'numpy.zeros', 'np.zeros', (['lam'], {}), '(lam)\n', (1811, 1816), True, 'import numpy as np\n'), ((2089, 2099), 'numpy.mean', 'np.mean', (['F'], {}), '(F)\n', (2096, 2099), True, 'import numpy as np\n'), ((2116, 2125), 'numpy.std', 'np.std', (['F'], {}), '(F)\n', (2122, 2125), True, 'import numpy as np\n'), ((2272, 2313), 'logging.info', 'logging.info', (['"""\t%d\t%d"""', 'n_evals', 'f_best'], {}), "('\\t%d\\t%d', n_evals, f_best)\n", (2284, 2313), False, 'import logging\n'), ((2913, 2954), 'logging.info', 'logging.info', (['"""\t%d\t%d"""', 'n_evals', 'f_best'], {}), "('\\t%d\\t%d', n_evals, f_best)\n", (2925, 2954), False, 'import logging\n'), ((3310, 3336), 'numpy.min', 'np.min', (['fitness_population'], {}), '(fitness_population)\n', (3316, 3336), True, 'import numpy as np\n'), ((3736, 3766), 'numpy.argsort', 'np.argsort', (['fitness_population'], {}), '(fitness_population)\n', (3746, 3766), True, 'import numpy as np\n'), ((3969, 4004), 'numpy.argmax', 'np.argmax', (['fitness_population[inds]'], {}), '(fitness_population[inds])\n', (3978, 4004), True, 'import numpy as np\n'), ((4568, 4593), 'pymoo.operators.crossover.erx.erx', 'erx.erx', (['parent1', 'parent2'], {}), '(parent1, parent2)\n', (4575, 4593), False, 'from pymoo.operators.crossover import erx\n'), ((4643, 4678), 'numpy.concatenate', 'np.concatenate', (['(next_pop, [child])'], {}), '((next_pop, [child]))\n', (4657, 4678), True, 'import numpy as np\n'), ((4926, 4955), 'numpy.argmax', 'np.argmax', (['fitness_population'], {}), '(fitness_population)\n', (4935, 4955), True, 'import numpy as np\n'), ((5141, 5167), 'numpy.max', 'np.max', (['fitness_population'], {}), '(fitness_population)\n', (5147, 5167), True, 'import numpy as np\n'), ((3370, 3382), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (3379, 3382), True, 'import numpy as np\n'), ((5310, 5339), 'numpy.argmax', 'np.argmax', (['fitness_population'], {}), '(fitness_population)\n', (5319, 5339), True, 'import numpy as np\n'), ((2222, 2234), 'numpy.dot', 'np.dot', (['A', 'N'], {}), '(A, N)\n', (2228, 2234), True, 'import numpy as np\n'), ((7397, 7420), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (7418, 7420), True, 'import numpy as np\n'), ((7547, 7570), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (7568, 7570), True, 'import numpy as np\n'), ((7697, 7720), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (7718, 7720), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
import torch
from torch.utils.data import DataLoader, random_split
import os
import sys
import random
import argparse
from pytorch_nndct.apis import torch_quantizer, dump_xmodel
from evaluate import evaluate
from unet import UNet
from utils.data_loading import CarvanaDataset
from pathlib import Path
DIVIDER = '-----------------------------------------'
def quantization(model,build_dir,batch_size,quant_mode):
img_scale = 0.5
val_percent = 0.1
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dir_img = Path('fyp_data/crops/')
dir_mask = Path('fyp_data/masks/')
float_model = build_dir + '/float_model'
quant_model = build_dir + '/quant_model'
dataset = CarvanaDataset(dir_img, dir_mask, img_scale)
finetune = True
# 2. Split into train / validation partitions
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train_set, val_set = random_split(dataset, [n_train, n_val], generator=torch.Generator().manual_seed(0))
# 3. Create data loaders
loader_args = dict(batch_size=batch_size, num_workers=4, pin_memory=True)
train_loader = DataLoader(train_set, shuffle=True, **loader_args)
val_loader = DataLoader(val_set, shuffle=False, drop_last=True, **loader_args)
net = model
net.load_state_dict(torch.load(os.path.join(float_model,'checkpoint_epoch_224x224_35.pth'),map_location=device))
if (quant_mode=='test'):
batch_size = 1
rand_in = torch.randn([batch_size, 3, 224, 224])
quantizer = torch_quantizer(quant_mode, net, (rand_in), output_dir=quant_model)
quantized_model = quantizer.quant_model
if finetune == True:
if quant_mode == 'calib':
quantizer.fast_finetune(evaluate, (quantized_model, val_loader,device))
elif quant_mode == 'test':
quantizer.load_ft_param()
val_score = evaluate(quantized_model,val_loader,device)
print('val_score is = ',val_score)
if quant_mode == 'calib':
quantizer.export_quant_config()
if quant_mode == 'test':
quantizer.export_xmodel(deploy_check=False, output_dir=quant_model)
return
def run_main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--build_dir', type=str, default='build', help='Path to build folder. Default is build')
ap.add_argument('-q', '--quant_mode', type=str, default='calib', choices=['calib','test'], help='Quantization mode (calib or test). Default is calib')
ap.add_argument('-b', '--batchsize', type=int, default=1, help='Testing batchsize - must be an integer. Default is 100')
args = ap.parse_args()
model = UNet(n_channels=3, n_classes=2, bilinear=False)
print('\n'+DIVIDER)
print('PyTorch version : ',torch.__version__)
print(sys.version)
print(DIVIDER)
print(' Command line options:')
print ('--build_dir : ',args.build_dir)
print ('--quant_mode : ',args.quant_mode)
print ('--batchsize : ',args.batchsize)
print(DIVIDER)
quantization(model,args.build_dir,args.batchsize,args.quant_mode)
return
if __name__ == '__main__':
run_main()
|
[
"evaluate.evaluate"
] |
[((626, 649), 'pathlib.Path', 'Path', (['"""fyp_data/crops/"""'], {}), "('fyp_data/crops/')\n", (630, 649), False, 'from pathlib import Path\n'), ((665, 688), 'pathlib.Path', 'Path', (['"""fyp_data/masks/"""'], {}), "('fyp_data/masks/')\n", (669, 688), False, 'from pathlib import Path\n'), ((793, 837), 'utils.data_loading.CarvanaDataset', 'CarvanaDataset', (['dir_img', 'dir_mask', 'img_scale'], {}), '(dir_img, dir_mask, img_scale)\n', (807, 837), False, 'from utils.data_loading import CarvanaDataset\n'), ((1225, 1275), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'shuffle': '(True)'}), '(train_set, shuffle=True, **loader_args)\n', (1235, 1275), False, 'from torch.utils.data import DataLoader, random_split\n'), ((1293, 1358), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'shuffle': '(False)', 'drop_last': '(True)'}), '(val_set, shuffle=False, drop_last=True, **loader_args)\n', (1303, 1358), False, 'from torch.utils.data import DataLoader, random_split\n'), ((1568, 1606), 'torch.randn', 'torch.randn', (['[batch_size, 3, 224, 224]'], {}), '([batch_size, 3, 224, 224])\n', (1579, 1606), False, 'import torch\n'), ((1623, 1688), 'pytorch_nndct.apis.torch_quantizer', 'torch_quantizer', (['quant_mode', 'net', 'rand_in'], {'output_dir': 'quant_model'}), '(quant_mode, net, rand_in, output_dir=quant_model)\n', (1638, 1688), False, 'from pytorch_nndct.apis import torch_quantizer, dump_xmodel\n'), ((1972, 2017), 'evaluate.evaluate', 'evaluate', (['quantized_model', 'val_loader', 'device'], {}), '(quantized_model, val_loader, device)\n', (1980, 2017), False, 'from evaluate import evaluate\n'), ((2328, 2353), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2351, 2353), False, 'import argparse\n'), ((2796, 2843), 'unet.UNet', 'UNet', ([], {'n_channels': '(3)', 'n_classes': '(2)', 'bilinear': '(False)'}), '(n_channels=3, n_classes=2, bilinear=False)\n', (2800, 2843), False, 'from unet import UNet\n'), ((575, 600), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (598, 600), False, 'import torch\n'), ((1415, 1475), 'os.path.join', 'os.path.join', (['float_model', '"""checkpoint_epoch_224x224_35.pth"""'], {}), "(float_model, 'checkpoint_epoch_224x224_35.pth')\n", (1427, 1475), False, 'import os\n'), ((1064, 1081), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (1079, 1081), False, 'import torch\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import pandas as pd
import sys
sys.path.append("..")
sys.path.append("../technical-analysis_python/")
mpl.use('tkagg') # issues with Big Sur
# technical analysis
from strategy.macd_crossover import macdCrossover
from backtest import Backtest
from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR
# macroeconomic analysis
from filters.macro_analysis import GetSensitivity, GetMacrodata
# sentiment analysis
from filters.sentiment_analysis import SentimentFilter
"""
Technical analysis
-
Generate signals with MACD crossover strategy
"""
# load price data
df_whole = pd.read_csv('../../database/microeconomic_data/hkex_ticks_day/hkex_0001.csv', header=0, index_col='Date', parse_dates=True)
# select time range (for trading)
start_date = pd.Timestamp('2017-01-01')
end_date = pd.Timestamp('2021-01-01')
#start_date = pd.Timestamp('2017-01-01')
#end_date = pd.Timestamp('2019-02-05')
df = df_whole.loc[start_date:end_date]
# get filtered df for macro analysis
filtered_df = df_whole.loc[:end_date]
ticker = "0005.HK"
# apply MACD crossover strategy
macd_cross = macdCrossover(df)
macd_fig = macd_cross.plot_MACD()
plt.close() # hide figure
signals = macd_cross.gen_signals()
print(signals.head())
signal_fig = macd_cross.plot_signals(signals)
plt.close() # hide figure
"""
Macroecnomic analysis
-
Adjust bias in signals with macroeconomic data
"""
# get ticker's sensitivity to macro data
s_gdp, s_unemploy, s_property = GetSensitivity(filtered_df)
# append signals with macro data
signals = GetMacrodata(signals)
# calculate adjusting factor
signals['macro_factor'] = s_gdp * signals['GDP'] + s_unemploy * signals['Unemployment rate'] + s_property * signals['Property price']
signals['signal'] = signals['signal'] + signals['macro_factor']
# round off signals['signal'] to the nearest integer
signals['signal'] = signals['signal'].round(0)
"""
Sentiment analysis
-
Filter out signals that contrast with the sentiment label
"""
filtered_signals = SentimentFilter(ticker, signals)
"""
Backtesting & evaluation
"""
portfolio, backtest_fig = Backtest(ticker, filtered_signals, df)
plt.close() # hide figure
print("Final total value: {value:.4f} ".format(value=portfolio['total'][-1]))
print("Total return: {value:.4f}%".format(value=(((portfolio['total'][-1] - portfolio['total'][0])/portfolio['total'][0]) * 100))) # for analysis
print("No. of trade: {value}".format(value=len(signals[signals.positions == 1])))
"""
Plotting figures
"""
backtest_fig.suptitle('Baseline - Portfolio value', fontsize=14)
#backtest_fig.savefig('./figures/baseline_portfolio-value')
plt.show()
# Evaluate strategy
# 1. Portfolio return
returns_fig = PortfolioReturn(portfolio)
returns_fig.suptitle('Baseline - Portfolio return')
#returns_fig.savefig('./figures/baseline_portfolo-return')
plt.show()
# 2. Sharpe ratio
sharpe_ratio = SharpeRatio(portfolio)
print("Sharpe ratio: {ratio:.4f} ".format(ratio = sharpe_ratio))
# 3. Maximum drawdown
maxDrawdown_fig, max_daily_drawdown, daily_drawdown = MaxDrawdown(df)
maxDrawdown_fig.suptitle('Baseline - Maximum drawdown', fontsize=14)
#maxDrawdown_fig.savefig('./figures/baseline_maximum-drawdown')
plt.show()
# 4. Compound Annual Growth Rate
cagr = CAGR(portfolio)
print("CAGR: {cagr:.4f} ".format(cagr = cagr))
|
[
"evaluate.SharpeRatio",
"evaluate.MaxDrawdown",
"evaluate.PortfolioReturn",
"evaluate.CAGR"
] |
[((108, 129), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (123, 129), False, 'import sys\n'), ((130, 178), 'sys.path.append', 'sys.path.append', (['"""../technical-analysis_python/"""'], {}), "('../technical-analysis_python/')\n", (145, 178), False, 'import sys\n'), ((179, 195), 'matplotlib.use', 'mpl.use', (['"""tkagg"""'], {}), "('tkagg')\n", (186, 195), True, 'import matplotlib as mpl\n'), ((663, 790), 'pandas.read_csv', 'pd.read_csv', (['"""../../database/microeconomic_data/hkex_ticks_day/hkex_0001.csv"""'], {'header': '(0)', 'index_col': '"""Date"""', 'parse_dates': '(True)'}), "('../../database/microeconomic_data/hkex_ticks_day/hkex_0001.csv',\n header=0, index_col='Date', parse_dates=True)\n", (674, 790), True, 'import pandas as pd\n'), ((835, 861), 'pandas.Timestamp', 'pd.Timestamp', (['"""2017-01-01"""'], {}), "('2017-01-01')\n", (847, 861), True, 'import pandas as pd\n'), ((873, 899), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-01-01"""'], {}), "('2021-01-01')\n", (885, 899), True, 'import pandas as pd\n'), ((1161, 1178), 'strategy.macd_crossover.macdCrossover', 'macdCrossover', (['df'], {}), '(df)\n', (1174, 1178), False, 'from strategy.macd_crossover import macdCrossover\n'), ((1213, 1224), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1222, 1224), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1354), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1352, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1550), 'filters.macro_analysis.GetSensitivity', 'GetSensitivity', (['filtered_df'], {}), '(filtered_df)\n', (1537, 1550), False, 'from filters.macro_analysis import GetSensitivity, GetMacrodata\n'), ((1595, 1616), 'filters.macro_analysis.GetMacrodata', 'GetMacrodata', (['signals'], {}), '(signals)\n', (1607, 1616), False, 'from filters.macro_analysis import GetSensitivity, GetMacrodata\n'), ((2054, 2086), 'filters.sentiment_analysis.SentimentFilter', 'SentimentFilter', (['ticker', 'signals'], {}), '(ticker, signals)\n', (2069, 2086), False, 'from filters.sentiment_analysis import SentimentFilter\n'), ((2147, 2185), 'backtest.Backtest', 'Backtest', (['ticker', 'filtered_signals', 'df'], {}), '(ticker, filtered_signals, df)\n', (2155, 2185), False, 'from backtest import Backtest\n'), ((2186, 2197), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2195, 2197), True, 'import matplotlib.pyplot as plt\n'), ((2670, 2680), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2678, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2739, 2765), 'evaluate.PortfolioReturn', 'PortfolioReturn', (['portfolio'], {}), '(portfolio)\n', (2754, 2765), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n'), ((2877, 2887), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2885, 2887), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2944), 'evaluate.SharpeRatio', 'SharpeRatio', (['portfolio'], {}), '(portfolio)\n', (2933, 2944), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n'), ((3087, 3102), 'evaluate.MaxDrawdown', 'MaxDrawdown', (['df'], {}), '(df)\n', (3098, 3102), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n'), ((3236, 3246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3244, 3246), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3303), 'evaluate.CAGR', 'CAGR', (['portfolio'], {}), '(portfolio)\n', (3292, 3303), False, 'from evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n')]
|
'''This script tunes the L2 reg weight of the final classifier.'''
import argparse
import os
import math
import torch
import torch.backends.cudnn as cudnn
from configs import get_datasets
from evaluate import encode_train_set, train_clf, test
from models import *
parser = argparse.ArgumentParser(description='Tune regularization coefficient of downstream classifier.')
parser.add_argument("--num-workers", type=int, default=2, help='Number of threads for data loaders')
parser.add_argument("--load-from", type=str, default='ckpt.pth', help='File to load from')
args = parser.parse_args()
# Load checkpoint.
print('==> Loading settings from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
resume_from = os.path.join('./checkpoint', args.load_from)
checkpoint = torch.load(resume_from)
args.dataset = checkpoint['args']['dataset']
args.arch = checkpoint['args']['arch']
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Data
print('==> Preparing data..')
_, testset, clftrainset, num_classes, stem = get_datasets(args.dataset)
testloader = torch.utils.data.DataLoader(testset, batch_size=1000, shuffle=False, num_workers=args.num_workers,
pin_memory=True)
clftrainloader = torch.utils.data.DataLoader(clftrainset, batch_size=1000, shuffle=False, num_workers=args.num_workers,
pin_memory=True)
# Model
print('==> Building model..')
##############################################################
# Encoder
##############################################################
if args.arch == 'resnet18':
net = ResNet18(stem=stem)
elif args.arch == 'resnet34':
net = ResNet34(stem=stem)
elif args.arch == 'resnet50':
net = ResNet50(stem=stem)
else:
raise ValueError("Bad architecture specification")
net = net.to(device)
if device == 'cuda':
repr_dim = net.representation_dim
net = torch.nn.DataParallel(net)
net.representation_dim = repr_dim
cudnn.benchmark = True
print('==> Loading encoder from checkpoint..')
net.load_state_dict(checkpoint['net'])
best_acc = 0
X, y = encode_train_set(clftrainloader, device, net)
for reg_weight in torch.exp(math.log(10) * torch.linspace(-7, -3, 16, dtype=torch.float, device=device)):
clf = train_clf(X, y, net.representation_dim, num_classes, device, reg_weight=reg_weight)
acc = test(testloader, device, net, clf)
if acc > best_acc:
best_acc = acc
print("Best test accuracy", best_acc, "%")
|
[
"evaluate.encode_train_set",
"evaluate.test",
"evaluate.train_clf"
] |
[((276, 377), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tune regularization coefficient of downstream classifier."""'}), "(description=\n 'Tune regularization coefficient of downstream classifier.')\n", (299, 377), False, 'import argparse\n'), ((667, 694), 'os.path.isdir', 'os.path.isdir', (['"""checkpoint"""'], {}), "('checkpoint')\n", (680, 694), False, 'import os\n'), ((750, 794), 'os.path.join', 'os.path.join', (['"""./checkpoint"""', 'args.load_from'], {}), "('./checkpoint', args.load_from)\n", (762, 794), False, 'import os\n'), ((808, 831), 'torch.load', 'torch.load', (['resume_from'], {}), '(resume_from)\n', (818, 831), False, 'import torch\n'), ((1056, 1082), 'configs.get_datasets', 'get_datasets', (['args.dataset'], {}), '(args.dataset)\n', (1068, 1082), False, 'from configs import get_datasets\n'), ((1097, 1216), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(testset, batch_size=1000, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n', (1124, 1216), False, 'import torch\n'), ((1271, 1394), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['clftrainset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(clftrainset, batch_size=1000, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n', (1298, 1394), False, 'import torch\n'), ((2142, 2187), 'evaluate.encode_train_set', 'encode_train_set', (['clftrainloader', 'device', 'net'], {}), '(clftrainloader, device, net)\n', (2158, 2187), False, 'from evaluate import encode_train_set, train_clf, test\n'), ((936, 961), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (959, 961), False, 'import torch\n'), ((1941, 1967), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (1962, 1967), False, 'import torch\n'), ((2304, 2392), 'evaluate.train_clf', 'train_clf', (['X', 'y', 'net.representation_dim', 'num_classes', 'device'], {'reg_weight': 'reg_weight'}), '(X, y, net.representation_dim, num_classes, device, reg_weight=\n reg_weight)\n', (2313, 2392), False, 'from evaluate import encode_train_set, train_clf, test\n'), ((2398, 2432), 'evaluate.test', 'test', (['testloader', 'device', 'net', 'clf'], {}), '(testloader, device, net, clf)\n', (2402, 2432), False, 'from evaluate import encode_train_set, train_clf, test\n'), ((2216, 2228), 'math.log', 'math.log', (['(10)'], {}), '(10)\n', (2224, 2228), False, 'import math\n'), ((2231, 2291), 'torch.linspace', 'torch.linspace', (['(-7)', '(-3)', '(16)'], {'dtype': 'torch.float', 'device': 'device'}), '(-7, -3, 16, dtype=torch.float, device=device)\n', (2245, 2291), False, 'import torch\n')]
|
import numpy as np
import os
import paddle.fluid as fluid
from gmf import GMF
from mlp import MLP
from neumf import NeuMF
from Dataset import Dataset
from evaluate import evaluate_model
import logging
import paddle
import args
import utils
import time
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
if __name__ == "__main__":
args = args.parse_args()
dataset = Dataset(args.path + args.dataset)
testRatings, testNegatives = dataset.testRatings, dataset.testNegatives
topK = 10
begin = time.time()
model_path = args.model_dir + "/epoch_" + args.test_epoch
(hits, ndcgs) = evaluate_model(args, testRatings, testNegatives, topK, model_path)
hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
end = time.time()
logger.info("epoch: {}, epoch_time: {:.5f}s, HR: {:.5f}, NDCG: {:.5f}".format(args.epochs, end - begin, hr, ndcg))
|
[
"evaluate.evaluate_model"
] |
[((252, 323), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(format='%(asctime)s - %(levelname)s - %(message)s')\n", (271, 323), False, 'import logging\n'), ((333, 359), 'logging.getLogger', 'logging.getLogger', (['"""fluid"""'], {}), "('fluid')\n", (350, 359), False, 'import logging\n'), ((432, 449), 'args.parse_args', 'args.parse_args', ([], {}), '()\n', (447, 449), False, 'import args\n'), ((464, 497), 'Dataset.Dataset', 'Dataset', (['(args.path + args.dataset)'], {}), '(args.path + args.dataset)\n', (471, 497), False, 'from Dataset import Dataset\n'), ((604, 615), 'time.time', 'time.time', ([], {}), '()\n', (613, 615), False, 'import time\n'), ((698, 764), 'evaluate.evaluate_model', 'evaluate_model', (['args', 'testRatings', 'testNegatives', 'topK', 'model_path'], {}), '(args, testRatings, testNegatives, topK, model_path)\n', (712, 764), False, 'from evaluate import evaluate_model\n'), ((836, 847), 'time.time', 'time.time', ([], {}), '()\n', (845, 847), False, 'import time\n'), ((780, 794), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (788, 794), True, 'import numpy as np\n'), ((803, 818), 'numpy.array', 'np.array', (['ndcgs'], {}), '(ndcgs)\n', (811, 818), True, 'import numpy as np\n')]
|
from evaluate import evaluate
evaluate()
|
[
"evaluate.evaluate"
] |
[((32, 42), 'evaluate.evaluate', 'evaluate', ([], {}), '()\n', (40, 42), False, 'from evaluate import evaluate\n')]
|
from unittest.mock import Mock, patch, PropertyMock
from evaluate.filtered_vcf_file import FilteredVCFFile
from evaluate.vcf_filters import VCF_Filters
from evaluate.vcf_file import VCFFile, VCFFactory
import pytest
import pysam
from io import StringIO
from evaluate.coverage_filter import CoverageFilter
@pytest.fixture
def remove_record_filter_mock():
remove_record_filter_mock = Mock()
remove_record_filter_mock.record_should_be_filtered_out.return_value = True
return remove_record_filter_mock
@pytest.fixture
def keep_record_filter_mock():
keep_record_filter_mock = Mock()
keep_record_filter_mock.record_should_be_filtered_out.return_value = False
return keep_record_filter_mock
@pytest.fixture
def sample_to_gene_to_VCFs_all_records():
sample_to_gene_to_VCFs_all_records = {
"sample_1": {
"gene_0": [Mock(), Mock()],
"gene_1": [Mock()],
"gene_2": [Mock(), Mock(), Mock()],
},
"sample_2": {
"gene_0": [Mock()],
"gene_1": [Mock(), Mock(), Mock()],
"gene_2": [Mock(), Mock(), Mock(), Mock(), Mock(), Mock()],
}
}
return sample_to_gene_to_VCFs_all_records
class TestFilteredVCFFile:
filter_records_mock_return_value = Mock()
sample_to_gene_to_VCFs_mock = Mock()
@patch.object(FilteredVCFFile, "sample_to_gene_to_VCFs", new_callable=PropertyMock, return_value=sample_to_gene_to_VCFs_mock)
@patch.object(VCFFile, VCFFile.__init__.__name__)
@patch.object(FilteredVCFFile, FilteredVCFFile._filter_records.__name__, return_value=filter_records_mock_return_value)
def test___constructor(self, filter_records_mock, VCFFile_init_mock, sample_to_gene_to_VCFs_property_mock):
pysam_variant_file_mock = Mock()
filters_mock = Mock()
VCF_creator_method_mock = Mock()
filtered_vcf_file = FilteredVCFFile(pysam_variant_file_mock, filters_mock, VCF_creator_method_mock)
VCFFile_init_mock.assert_called_once_with(filtered_vcf_file, pysam_variant_file_mock, VCF_creator_method_mock)
filter_records_mock.assert_called_once_with(TestFilteredVCFFile.sample_to_gene_to_VCFs_mock, filters_mock)
def test___constructor___empty_medaka_file(self):
import pysam
from evaluate.vcf import VCFFactory
with pysam.VariantFile("tests/test_cases/sample_medaka_empty_vcf.expected.vcf") as pysam_variant_file:
filtered_vcf_file = FilteredVCFFile(pysam_variant_file=pysam_variant_file, filters=[],
VCF_creator_method=VCFFactory.create_Medaka_VCF_from_VariantRecord_and_Sample)
assert filtered_vcf_file._sample_to_gene_to_VCFs == {}
def test_filter_records_noFiltersReturnsAllRecords(self, sample_to_gene_to_VCFs_all_records):
filters = VCF_Filters()
actual = FilteredVCFFile._filter_records(sample_to_gene_to_VCFs_all_records, filters)
expected = sample_to_gene_to_VCFs_all_records
assert actual == expected
def test_filter_records_severalFiltersNothingIsFilteredReturnsAllRecords(self,
sample_to_gene_to_VCFs_all_records,
keep_record_filter_mock):
filters = VCF_Filters([keep_record_filter_mock]*3)
actual = FilteredVCFFile._filter_records(sample_to_gene_to_VCFs_all_records, filters)
expected = sample_to_gene_to_VCFs_all_records
assert actual == expected
def test_filter_records_severalFiltersEverythingIsFilteredReturnsNothing(self,
sample_to_gene_to_VCFs_all_records,
remove_record_filter_mock):
filters = VCF_Filters([remove_record_filter_mock]*3)
actual = FilteredVCFFile._filter_records(sample_to_gene_to_VCFs_all_records, filters)
expected = {}
assert actual == expected
def test_filter_records_severalFiltersOneFiltersEverythingReturnsNothing(self,
sample_to_gene_to_VCFs_all_records,
keep_record_filter_mock,
remove_record_filter_mock):
filters = VCF_Filters([keep_record_filter_mock]*4 + [remove_record_filter_mock])
actual = FilteredVCFFile._filter_records(sample_to_gene_to_VCFs_all_records, filters)
expected = {}
assert actual == expected
def test_filter_records_twoFiltersOneFiltersGene0OtherFiltersGene2ReturnsRecordsInGene1(self,
sample_to_gene_to_VCFs_all_records):
filter_gene_0_mock = Mock()
filter_gene_0_mock.record_should_be_filtered_out.side_effect = \
lambda vcf_record : \
vcf_record in sample_to_gene_to_VCFs_all_records["sample_1"]["gene_0"] or \
vcf_record in sample_to_gene_to_VCFs_all_records["sample_2"]["gene_0"]
filter_gene_2_mock = Mock()
filter_gene_2_mock.record_should_be_filtered_out.side_effect = \
lambda vcf_record: \
vcf_record in sample_to_gene_to_VCFs_all_records["sample_1"]["gene_2"] or \
vcf_record in sample_to_gene_to_VCFs_all_records["sample_2"]["gene_2"]
filters = VCF_Filters([filter_gene_0_mock, filter_gene_2_mock])
actual = FilteredVCFFile._filter_records(sample_to_gene_to_VCFs_all_records, filters)
expected = {
"sample_1": {
"gene_1": sample_to_gene_to_VCFs_all_records["sample_1"]["gene_1"],
},
"sample_2": {
"gene_1": sample_to_gene_to_VCFs_all_records["sample_2"]["gene_1"],
}
}
assert actual == expected
def test___write(self):
vcf_filepath = "tests/test_cases/test.vcf"
with pysam.VariantFile(vcf_filepath) as pysam_variant_file:
vcf_file = FilteredVCFFile(pysam_variant_file=pysam_variant_file,
filters=VCF_Filters([CoverageFilter(55)]),
VCF_creator_method=VCFFactory.create_Pandora_VCF_from_VariantRecord_and_Sample)
filehandler = StringIO()
vcf_file.write(filehandler)
actual_vcf = filehandler.getvalue()
filehandler.close()
expected_vcf="""##fileformat=VCFv4.3
##FILTER=<ID=PASS,Description="All filters passed">
##fileDate==26/04/19
##ALT=<ID=SNP,Description="SNP">
##ALT=<ID=PH_SNPs,Description="Phased SNPs">
##ALT=<ID=INDEL,Description="Insertion-deletion">
##ALT=<ID=COMPLEX,Description="Complex variant, collection of SNPs and indels">
##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of variant">
##ALT=<ID=SIMPLE,Description="Graph bubble is simple">
##ALT=<ID=NESTED,Description="Variation site was a nested feature in the graph">
##ALT=<ID=TOO_MANY_ALTS,Description="Variation site was a multinested feature with too many alts to include all in the VCF">
##INFO=<ID=GRAPHTYPE,Number=1,Type=String,Description="Type of graph feature">
##contig=<ID=GC00000001_155>
##FORMAT=<ID=GT,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=MEAN_FWD_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=MEAN_REV_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=MED_FWD_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=MED_REV_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=SUM_FWD_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=SUM_REV_COVG,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=GAPS,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=LIKELIHOOD,Number=1,Type=String,Description="Dummy">
##FORMAT=<ID=GT_CONF,Number=1,Type=String,Description="Dummy">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT sample
GC00000001_155 1 . ACGT TTGGGGGAAGGCTCTGCACTGCCCGTTGGC,TTGGGGGAAGGCTCTGCACTGCCTGTTGGT . . SVTYPE=COMPLEX;GRAPHTYPE=NESTED GT:MEAN_FWD_COVG:MEAN_REV_COVG:MED_FWD_COVG:MED_REV_COVG:SUM_FWD_COVG:SUM_REV_COVG:GAPS:LIKELIHOOD:GT_CONF 1:6,25,0:7,30,0:0,24,0:0,30,0:24,24,0:30,30,0:0.75,0,1:-326.079,-63.3221,-432.546:262.757
"""
assert actual_vcf == expected_vcf
|
[
"evaluate.filtered_vcf_file.FilteredVCFFile",
"evaluate.coverage_filter.CoverageFilter",
"evaluate.vcf_filters.VCF_Filters",
"evaluate.filtered_vcf_file.FilteredVCFFile._filter_records"
] |
[((387, 393), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (391, 393), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((589, 595), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (593, 595), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1268, 1274), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1272, 1274), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1309, 1315), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1313, 1315), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1322, 1451), 'unittest.mock.patch.object', 'patch.object', (['FilteredVCFFile', '"""sample_to_gene_to_VCFs"""'], {'new_callable': 'PropertyMock', 'return_value': 'sample_to_gene_to_VCFs_mock'}), "(FilteredVCFFile, 'sample_to_gene_to_VCFs', new_callable=\n PropertyMock, return_value=sample_to_gene_to_VCFs_mock)\n", (1334, 1451), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1452, 1500), 'unittest.mock.patch.object', 'patch.object', (['VCFFile', 'VCFFile.__init__.__name__'], {}), '(VCFFile, VCFFile.__init__.__name__)\n', (1464, 1500), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1506, 1628), 'unittest.mock.patch.object', 'patch.object', (['FilteredVCFFile', 'FilteredVCFFile._filter_records.__name__'], {'return_value': 'filter_records_mock_return_value'}), '(FilteredVCFFile, FilteredVCFFile._filter_records.__name__,\n return_value=filter_records_mock_return_value)\n', (1518, 1628), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1771, 1777), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1775, 1777), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1801, 1807), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1805, 1807), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1842, 1848), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1846, 1848), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1878, 1957), 'evaluate.filtered_vcf_file.FilteredVCFFile', 'FilteredVCFFile', (['pysam_variant_file_mock', 'filters_mock', 'VCF_creator_method_mock'], {}), '(pysam_variant_file_mock, filters_mock, VCF_creator_method_mock)\n', (1893, 1957), False, 'from evaluate.filtered_vcf_file import FilteredVCFFile\n'), ((2832, 2845), 'evaluate.vcf_filters.VCF_Filters', 'VCF_Filters', ([], {}), '()\n', (2843, 2845), False, 'from evaluate.vcf_filters import VCF_Filters\n'), ((2864, 2940), 'evaluate.filtered_vcf_file.FilteredVCFFile._filter_records', 'FilteredVCFFile._filter_records', (['sample_to_gene_to_VCFs_all_records', 'filters'], {}), '(sample_to_gene_to_VCFs_all_records, filters)\n', (2895, 2940), False, 'from evaluate.filtered_vcf_file import FilteredVCFFile\n'), ((3348, 3390), 'evaluate.vcf_filters.VCF_Filters', 'VCF_Filters', (['([keep_record_filter_mock] * 3)'], {}), '([keep_record_filter_mock] * 3)\n', (3359, 3390), False, 'from evaluate.vcf_filters import VCF_Filters\n'), ((3407, 3483), 'evaluate.filtered_vcf_file.FilteredVCFFile._filter_records', 'FilteredVCFFile._filter_records', (['sample_to_gene_to_VCFs_all_records', 'filters'], {}), '(sample_to_gene_to_VCFs_all_records, filters)\n', (3438, 3483), False, 'from evaluate.filtered_vcf_file import FilteredVCFFile\n'), ((3893, 3937), 'evaluate.vcf_filters.VCF_Filters', 'VCF_Filters', (['([remove_record_filter_mock] * 3)'], {}), '([remove_record_filter_mock] * 3)\n', (3904, 3937), False, 'from evaluate.vcf_filters import VCF_Filters\n'), ((3954, 4030), 'evaluate.filtered_vcf_file.FilteredVCFFile._filter_records', 'FilteredVCFFile._filter_records', (['sample_to_gene_to_VCFs_all_records', 'filters'], {}), '(sample_to_gene_to_VCFs_all_records, filters)\n', (3985, 4030), False, 'from evaluate.filtered_vcf_file import FilteredVCFFile\n'), ((4511, 4583), 'evaluate.vcf_filters.VCF_Filters', 'VCF_Filters', (['([keep_record_filter_mock] * 4 + [remove_record_filter_mock])'], {}), '([keep_record_filter_mock] * 4 + [remove_record_filter_mock])\n', (4522, 4583), False, 'from evaluate.vcf_filters import VCF_Filters\n'), ((4600, 4676), 'evaluate.filtered_vcf_file.FilteredVCFFile._filter_records', 'FilteredVCFFile._filter_records', (['sample_to_gene_to_VCFs_all_records', 'filters'], {}), '(sample_to_gene_to_VCFs_all_records, filters)\n', (4631, 4676), False, 'from evaluate.filtered_vcf_file import FilteredVCFFile\n'), ((4991, 4997), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (4995, 4997), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((5305, 5311), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (5309, 5311), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((5615, 5668), 'evaluate.vcf_filters.VCF_Filters', 'VCF_Filters', (['[filter_gene_0_mock, filter_gene_2_mock]'], {}), '([filter_gene_0_mock, filter_gene_2_mock])\n', (5626, 5668), False, 'from evaluate.vcf_filters import VCF_Filters\n'), ((5687, 5763), 'evaluate.filtered_vcf_file.FilteredVCFFile._filter_records', 'FilteredVCFFile._filter_records', (['sample_to_gene_to_VCFs_all_records', 'filters'], {}), '(sample_to_gene_to_VCFs_all_records, filters)\n', (5718, 5763), False, 'from evaluate.filtered_vcf_file import FilteredVCFFile\n'), ((6529, 6539), 'io.StringIO', 'StringIO', ([], {}), '()\n', (6537, 6539), False, 'from io import StringIO\n'), ((2327, 2401), 'pysam.VariantFile', 'pysam.VariantFile', (['"""tests/test_cases/sample_medaka_empty_vcf.expected.vcf"""'], {}), "('tests/test_cases/sample_medaka_empty_vcf.expected.vcf')\n", (2344, 2401), False, 'import pysam\n'), ((2457, 2611), 'evaluate.filtered_vcf_file.FilteredVCFFile', 'FilteredVCFFile', ([], {'pysam_variant_file': 'pysam_variant_file', 'filters': '[]', 'VCF_creator_method': 'VCFFactory.create_Medaka_VCF_from_VariantRecord_and_Sample'}), '(pysam_variant_file=pysam_variant_file, filters=[],\n VCF_creator_method=VCFFactory.\n create_Medaka_VCF_from_VariantRecord_and_Sample)\n', (2472, 2611), False, 'from evaluate.filtered_vcf_file import FilteredVCFFile\n'), ((6172, 6203), 'pysam.VariantFile', 'pysam.VariantFile', (['vcf_filepath'], {}), '(vcf_filepath)\n', (6189, 6203), False, 'import pysam\n'), ((857, 863), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (861, 863), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((865, 871), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (869, 871), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((897, 903), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (901, 903), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((929, 935), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (933, 935), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((937, 943), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (941, 943), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((945, 951), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (949, 951), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1010, 1016), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1014, 1016), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1042, 1048), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1046, 1048), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1050, 1056), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1054, 1056), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1058, 1064), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1062, 1064), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1090, 1096), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1094, 1096), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1098, 1104), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1102, 1104), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1106, 1112), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1110, 1112), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1114, 1120), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1118, 1120), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1122, 1128), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1126, 1128), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((1130, 1136), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1134, 1136), False, 'from unittest.mock import Mock, patch, PropertyMock\n'), ((6365, 6383), 'evaluate.coverage_filter.CoverageFilter', 'CoverageFilter', (['(55)'], {}), '(55)\n', (6379, 6383), False, 'from evaluate.coverage_filter import CoverageFilter\n')]
|
import numpy as np
import lasagne
import theano
import theano.tensor as T
import random
import sys
import batch_char as batch
import time
import cPickle as pkl
import io
import evaluate
from collections import OrderedDict
from t2v import tweet2vec, init_params, load_params
from settings_char import N_BATCH, MAX_LENGTH, MAX_CLASSES
def classify(tweet, t_mask, params, n_classes, n_chars):
# tweet embedding
emb_layer = tweet2vec(tweet, t_mask, params, n_chars)
# Dense layer for classes
l_dense = lasagne.layers.DenseLayer(emb_layer, n_classes, W=params['W_cl'], b=params['b_cl'], nonlinearity=lasagne.nonlinearities.softmax)
return lasagne.layers.get_output(l_dense), lasagne.layers.get_output(emb_layer)
def main(args):
data_path = args[0]
model_path = args[1]
save_path = args[2]
if len(args)>3:
m_num = int(args[3])
print("Preparing Data...")
# Test data
Xt = []
yt = []
with io.open(data_path,'r',encoding='utf-8') as f:
for line in f:
(yc, Xc) = line.rstrip('\n').split('\t')
Xt.append(Xc[:MAX_LENGTH])
yt.append(yc.split(','))
# Model
print("Loading model params from base path: %s ..." % model_path)
if len(args)>3:
params = load_params('%s/model_%d.npz' % (model_path,m_num))
else:
params = load_params('%s/best_model.npz' % model_path)
print("Loading dictionaries...")
with open('%s/dict.pkl' % model_path, 'rb') as f:
chardict = pkl.load(f)
with open('%s/label_dict.pkl' % model_path, 'rb') as f:
labeldict = pkl.load(f)
n_char = len(chardict.keys()) + 1
n_classes = min(len(labeldict.keys()) + 1, MAX_CLASSES)
# iterators
test_iter = batch.BatchTweets(Xt, yt, labeldict, batch_size=N_BATCH, max_classes=MAX_CLASSES, test=True)
print("Building network...")
# Tweet variables
tweet = T.itensor3()
targets = T.imatrix()
# masks
t_mask = T.fmatrix()
# network for prediction
predictions = classify(tweet, t_mask, params, n_classes, n_char)[0]
embeddings = classify(tweet, t_mask, params, n_classes, n_char)[1]
# Theano function
print("Compiling theano functions...")
predict = theano.function([tweet,t_mask],predictions)
encode = theano.function([tweet,t_mask],embeddings)
# Test
print("Testing...")
out_data = []
out_pred = []
out_emb = []
out_target = []
for xr,y in test_iter:
x, x_m = batch.prepare_data(xr, chardict, n_chars=n_char)
p = predict(x,x_m)
e = encode(x,x_m)
ranks = np.argsort(p)[:,::-1]
for idx, item in enumerate(xr):
out_data.append(item)
out_pred.append(ranks[idx,:])
out_emb.append(e[idx,:])
out_target.append(y[idx])
# Save
print("Saving...")
with open('%s/data.pkl'%save_path,'w') as f:
pkl.dump(out_data,f)
with open('%s/predictions.npy'%save_path,'w') as f:
np.save(f,np.asarray(out_pred))
with open('%s/embeddings.npy'%save_path,'w') as f:
np.save(f,np.asarray(out_emb))
with open('%s/targets.pkl'%save_path,'w') as f:
pkl.dump(out_target,f)
if __name__ == '__main__':
main(sys.argv[1:])
evaluate.main(sys.argv[3],sys.argv[2])
|
[
"evaluate.main"
] |
[((430, 471), 't2v.tweet2vec', 'tweet2vec', (['tweet', 't_mask', 'params', 'n_chars'], {}), '(tweet, t_mask, params, n_chars)\n', (439, 471), False, 'from t2v import tweet2vec, init_params, load_params\n'), ((516, 649), 'lasagne.layers.DenseLayer', 'lasagne.layers.DenseLayer', (['emb_layer', 'n_classes'], {'W': "params['W_cl']", 'b': "params['b_cl']", 'nonlinearity': 'lasagne.nonlinearities.softmax'}), "(emb_layer, n_classes, W=params['W_cl'], b=params[\n 'b_cl'], nonlinearity=lasagne.nonlinearities.softmax)\n", (541, 649), False, 'import lasagne\n'), ((1740, 1837), 'batch_char.BatchTweets', 'batch.BatchTweets', (['Xt', 'yt', 'labeldict'], {'batch_size': 'N_BATCH', 'max_classes': 'MAX_CLASSES', 'test': '(True)'}), '(Xt, yt, labeldict, batch_size=N_BATCH, max_classes=\n MAX_CLASSES, test=True)\n', (1757, 1837), True, 'import batch_char as batch\n'), ((1901, 1913), 'theano.tensor.itensor3', 'T.itensor3', ([], {}), '()\n', (1911, 1913), True, 'import theano.tensor as T\n'), ((1928, 1939), 'theano.tensor.imatrix', 'T.imatrix', ([], {}), '()\n', (1937, 1939), True, 'import theano.tensor as T\n'), ((1966, 1977), 'theano.tensor.fmatrix', 'T.fmatrix', ([], {}), '()\n', (1975, 1977), True, 'import theano.tensor as T\n'), ((2231, 2276), 'theano.function', 'theano.function', (['[tweet, t_mask]', 'predictions'], {}), '([tweet, t_mask], predictions)\n', (2246, 2276), False, 'import theano\n'), ((2288, 2332), 'theano.function', 'theano.function', (['[tweet, t_mask]', 'embeddings'], {}), '([tweet, t_mask], embeddings)\n', (2303, 2332), False, 'import theano\n'), ((3258, 3297), 'evaluate.main', 'evaluate.main', (['sys.argv[3]', 'sys.argv[2]'], {}), '(sys.argv[3], sys.argv[2])\n', (3271, 3297), False, 'import evaluate\n'), ((657, 691), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_dense'], {}), '(l_dense)\n', (682, 691), False, 'import lasagne\n'), ((693, 729), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['emb_layer'], {}), '(emb_layer)\n', (718, 729), False, 'import lasagne\n'), ((951, 992), 'io.open', 'io.open', (['data_path', '"""r"""'], {'encoding': '"""utf-8"""'}), "(data_path, 'r', encoding='utf-8')\n", (958, 992), False, 'import io\n'), ((1269, 1321), 't2v.load_params', 'load_params', (["('%s/model_%d.npz' % (model_path, m_num))"], {}), "('%s/model_%d.npz' % (model_path, m_num))\n", (1280, 1321), False, 'from t2v import tweet2vec, init_params, load_params\n'), ((1348, 1393), 't2v.load_params', 'load_params', (["('%s/best_model.npz' % model_path)"], {}), "('%s/best_model.npz' % model_path)\n", (1359, 1393), False, 'from t2v import tweet2vec, init_params, load_params\n'), ((1505, 1516), 'cPickle.load', 'pkl.load', (['f'], {}), '(f)\n', (1513, 1516), True, 'import cPickle as pkl\n'), ((1597, 1608), 'cPickle.load', 'pkl.load', (['f'], {}), '(f)\n', (1605, 1608), True, 'import cPickle as pkl\n'), ((2484, 2532), 'batch_char.prepare_data', 'batch.prepare_data', (['xr', 'chardict'], {'n_chars': 'n_char'}), '(xr, chardict, n_chars=n_char)\n', (2502, 2532), True, 'import batch_char as batch\n'), ((2908, 2929), 'cPickle.dump', 'pkl.dump', (['out_data', 'f'], {}), '(out_data, f)\n', (2916, 2929), True, 'import cPickle as pkl\n'), ((3179, 3202), 'cPickle.dump', 'pkl.dump', (['out_target', 'f'], {}), '(out_target, f)\n', (3187, 3202), True, 'import cPickle as pkl\n'), ((2602, 2615), 'numpy.argsort', 'np.argsort', (['p'], {}), '(p)\n', (2612, 2615), True, 'import numpy as np\n'), ((3003, 3023), 'numpy.asarray', 'np.asarray', (['out_pred'], {}), '(out_pred)\n', (3013, 3023), True, 'import numpy as np\n'), ((3098, 3117), 'numpy.asarray', 'np.asarray', (['out_emb'], {}), '(out_emb)\n', (3108, 3117), True, 'import numpy as np\n')]
|
import json
from evaluate import Evaluator
input_path = './input/input.json' # input file path
output_path = './output/length2.tsv' # output file path
evaluator = Evaluator()
def calculate_rouge_all(score):
rouge_all = 0.2 * score["rouge-1"] + 0.4 * score["rouge-2"] + 0.4 * score["rouge-l"]
return rouge_all
def get_length(question, candidate, answer):
strings = question
strings += candidate
strings += answer
return strings
if __name__ == '__main__':
length = []
with open(output_path, 'w', encoding='utf8') as fw:
with open(input_path, 'r', encoding="utf8") as f:
for line in f:
data = json.loads(line)
id = data.get('id')
question = data.get("question")
question=question.replace("\n","")
question = question.replace("\r", "")
candidates_list = data.get('candidates')
real_answer = data.get("answer")
real_answer = real_answer.replace("\n", "")
for candidate in candidates_list:
candidate = candidate.replace("\n", "")
temp_length = len(get_length(question, candidate, real_answer))
score = evaluator.compute_rouge(real_answer, candidate)
rouge_1=score["rouge-1"]
rouge_2 = score["rouge-2"]
rouge_l = score["rouge-l"]
rouge_all = calculate_rouge_all(score)
fw.write(str(question) + "\t" +str(candidate) + "\t"+ str(real_answer) + "\t" +
str(rouge_1)+"\t"+str(rouge_2)+"\t"+str(rouge_l)+"\t"+ str(rouge_all)+"\t"+
str(len(question)) +"\t"+str(len(question+candidate)) +"\t"+str(len(real_answer))+ "\t" +str(temp_length) + "\n")
|
[
"evaluate.Evaluator"
] |
[((167, 178), 'evaluate.Evaluator', 'Evaluator', ([], {}), '()\n', (176, 178), False, 'from evaluate import Evaluator\n'), ((666, 682), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (676, 682), False, 'import json\n')]
|
"""
Like described in the :mod:`parsing_representation` module, there's a need for
an ast like module to represent the states of parsed modules.
But now there are also structures in Python that need a little bit more than
that. An ``Instance`` for example is only a ``Class`` before it is
instantiated. This class represents these cases.
So, why is there also a ``Class`` class here? Well, there are decorators and
they change classes in Python 3.
"""
from __future__ import with_statement
import copy
import itertools
from jedi._compatibility import use_metaclass, next, hasattr, unicode
from jedi.parser import representation as pr
from jedi import cache
from jedi import helpers
from jedi import debug
from jedi import common
import recursion
import docstrings
import imports
import evaluate
import builtin
import dynamic
class Executable(pr.IsScope):
"""
An instance is also an executable - because __init__ is called
:param var_args: The param input array, consist of `pr.Array` or list.
"""
def __init__(self, base, var_args=()):
self.base = base
self.var_args = var_args
def get_parent_until(self, *args, **kwargs):
return self.decorated.get_parent_until(*args, **kwargs)
@property
def parent(self):
return self.decorated.parent
@property
def decorated(self):
"""
Instance doesn't care about decorators and Execution overrides this
"""
return self.base
class Instance(use_metaclass(cache.CachedMetaClass, Executable)):
"""
This class is used to evaluate instances.
"""
def __init__(self, base, var_args=()):
super(Instance, self).__init__(base, var_args)
if str(base.name) in ['list', 'set'] \
and builtin.Builtin.scope == base.get_parent_until():
# compare the module path with the builtin name.
self.var_args = dynamic.check_array_instances(self)
else:
# need to execute the __init__ function, because the dynamic param
# searching needs it.
with common.ignored(KeyError):
self.execute_subscope_by_name('__init__', self.var_args)
# Generated instances are classes that are just generated by self
# (No var_args) used.
self.is_generated = False
@cache.memoize_default()
def _get_method_execution(self, func):
func = InstanceElement(self, func, True)
return Execution(func, self.var_args)
def _get_func_self_name(self, func):
"""
Returns the name of the first param in a class method (which is
normally self.
"""
try:
return str(func.params[0].get_name())
except IndexError:
return None
@cache.memoize_default([])
def _get_self_attributes(self):
def add_self_dot_name(name):
"""
Need to copy and rewrite the name, because names are now
``instance_usage.variable`` instead of ``self.variable``.
"""
n = copy.copy(name)
n.names = n.names[1:]
names.append(InstanceElement(self, n))
names = []
# This loop adds the names of the self object, copies them and removes
# the self.
for sub in self.base.subscopes:
if isinstance(sub, pr.Class):
continue
# Get the self name, if there's one.
self_name = self._get_func_self_name(sub)
if not self_name:
continue
if sub.name.get_code() == '__init__':
# ``__init__`` is special because the params need are injected
# this way. Therefore an execution is necessary.
if not sub.decorators:
# __init__ decorators should generally just be ignored,
# because to follow them and their self variables is too
# complicated.
sub = self._get_method_execution(sub)
for n in sub.get_set_vars():
# Only names with the selfname are being added.
# It is also important, that they have a len() of 2,
# because otherwise, they are just something else
if n.names[0] == self_name and len(n.names) == 2:
add_self_dot_name(n)
for s in self.base.get_super_classes():
names += Instance(s)._get_self_attributes()
return names
def get_subscope_by_name(self, name):
sub = self.base.get_subscope_by_name(name)
return InstanceElement(self, sub, True)
def execute_subscope_by_name(self, name, args=()):
method = self.get_subscope_by_name(name)
return Execution(method, args).get_return_types()
def get_descriptor_return(self, obj):
""" Throws a KeyError if there's no method. """
# Arguments in __get__ descriptors are obj, class.
# `method` is the new parent of the array, don't know if that's good.
args = [obj, obj.base] if isinstance(obj, Instance) else [None, obj]
return self.execute_subscope_by_name('__get__', args)
@cache.memoize_default([])
def get_defined_names(self):
"""
Get the instance vars of a class. This includes the vars of all
classes
"""
names = self._get_self_attributes()
class_names = self.base.instance_names()
for var in class_names:
names.append(InstanceElement(self, var, True))
return names
def scope_generator(self):
"""
An Instance has two scopes: The scope with self names and the class
scope. Instance variables have priority over the class scope.
"""
yield self, self._get_self_attributes()
names = []
class_names = self.base.instance_names()
for var in class_names:
names.append(InstanceElement(self, var, True))
yield self, names
def get_index_types(self, index=None):
args = [] if index is None else [index]
try:
return self.execute_subscope_by_name('__getitem__', args)
except KeyError:
debug.warning('No __getitem__, cannot access the array.')
return []
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'name', 'get_imports',
'doc', 'docstr', 'asserts']:
raise AttributeError("Instance %s: Don't touch this (%s)!"
% (self, name))
return getattr(self.base, name)
def __repr__(self):
return "<e%s of %s (var_args: %s)>" % \
(type(self).__name__, self.base, len(self.var_args or []))
class InstanceElement(use_metaclass(cache.CachedMetaClass, pr.Base)):
"""
InstanceElement is a wrapper for any object, that is used as an instance
variable (e.g. self.variable or class methods).
"""
def __init__(self, instance, var, is_class_var=False):
if isinstance(var, pr.Function):
var = Function(var)
elif isinstance(var, pr.Class):
var = Class(var)
self.instance = instance
self.var = var
self.is_class_var = is_class_var
@property
@cache.memoize_default()
def parent(self):
par = self.var.parent
if isinstance(par, Class) and par == self.instance.base \
or isinstance(par, pr.Class) \
and par == self.instance.base.base:
par = self.instance
elif not isinstance(par, pr.Module):
par = InstanceElement(self.instance, par, self.is_class_var)
return par
def get_parent_until(self, *args, **kwargs):
return pr.Simple.get_parent_until(self, *args, **kwargs)
def get_decorated_func(self):
""" Needed because the InstanceElement should not be stripped """
func = self.var.get_decorated_func(self.instance)
if func == self.var:
return self
return func
def get_commands(self):
# Copy and modify the array.
return [InstanceElement(self.instance, command, self.is_class_var)
if not isinstance(command, unicode) else command
for command in self.var.get_commands()]
def __iter__(self):
for el in self.var.__iter__():
yield InstanceElement(self.instance, el, self.is_class_var)
def __getattr__(self, name):
return getattr(self.var, name)
def isinstance(self, *cls):
return isinstance(self.var, cls)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self.var)
class Class(use_metaclass(cache.CachedMetaClass, pr.IsScope)):
"""
This class is not only important to extend `pr.Class`, it is also a
important for descriptors (if the descriptor methods are evaluated or not).
"""
def __init__(self, base):
self.base = base
@cache.memoize_default(default=())
def get_super_classes(self):
supers = []
# TODO care for mro stuff (multiple super classes).
for s in self.base.supers:
# Super classes are statements.
for cls in evaluate.follow_statement(s):
if not isinstance(cls, Class):
debug.warning('Received non class, as a super class')
continue # Just ignore other stuff (user input error).
supers.append(cls)
if not supers and self.base.parent != builtin.Builtin.scope:
# add `object` to classes
supers += evaluate.find_name(builtin.Builtin.scope, 'object')
return supers
@cache.memoize_default(default=())
def instance_names(self):
def in_iterable(name, iterable):
""" checks if the name is in the variable 'iterable'. """
for i in iterable:
# Only the last name is important, because these names have a
# maximal length of 2, with the first one being `self`.
if i.names[-1] == name.names[-1]:
return True
return False
result = self.base.get_defined_names()
super_result = []
# TODO mro!
for cls in self.get_super_classes():
# Get the inherited names.
for i in cls.instance_names():
if not in_iterable(i, result):
super_result.append(i)
result += super_result
return result
@cache.memoize_default(default=())
def get_defined_names(self):
result = self.instance_names()
type_cls = evaluate.find_name(builtin.Builtin.scope, 'type')[0]
return result + type_cls.base.get_defined_names()
def get_subscope_by_name(self, name):
for sub in reversed(self.subscopes):
if sub.name.get_code() == name:
return sub
raise KeyError("Couldn't find subscope.")
@property
def name(self):
return self.base.name
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'parent', 'asserts', 'docstr',
'doc', 'get_imports', 'get_parent_until', 'get_code',
'subscopes']:
raise AttributeError("Don't touch this: %s of %s !" % (name, self))
return getattr(self.base, name)
def __repr__(self):
return "<e%s of %s>" % (type(self).__name__, self.base)
class Function(use_metaclass(cache.CachedMetaClass, pr.IsScope)):
"""
Needed because of decorators. Decorators are evaluated here.
"""
def __init__(self, func, is_decorated=False):
""" This should not be called directly """
self.base_func = func
self.is_decorated = is_decorated
@cache.memoize_default()
def _decorated_func(self, instance=None):
"""
Returns the function, that is to be executed in the end.
This is also the places where the decorators are processed.
"""
f = self.base_func
# Only enter it, if has not already been processed.
if not self.is_decorated:
for dec in reversed(self.base_func.decorators):
debug.dbg('decorator:', dec, f)
dec_results = set(evaluate.follow_statement(dec))
if not len(dec_results):
debug.warning('decorator not found: %s on %s' %
(dec, self.base_func))
return None
decorator = dec_results.pop()
if dec_results:
debug.warning('multiple decorators found', self.base_func,
dec_results)
# Create param array.
old_func = Function(f, is_decorated=True)
if instance is not None and decorator.isinstance(Function):
old_func = InstanceElement(instance, old_func)
instance = None
wrappers = Execution(decorator, (old_func,)).get_return_types()
if not len(wrappers):
debug.warning('no wrappers found', self.base_func)
return None
if len(wrappers) > 1:
debug.warning('multiple wrappers found', self.base_func,
wrappers)
# This is here, that the wrapper gets executed.
f = wrappers[0]
debug.dbg('decorator end', f)
if f != self.base_func and isinstance(f, pr.Function):
f = Function(f)
return f
def get_decorated_func(self, instance=None):
decorated_func = self._decorated_func(instance)
if decorated_func == self.base_func:
return self
if decorated_func is None:
# If the decorator func is not found, just ignore the decorator
# function, because sometimes decorators are just really
# complicated.
return Function(self.base_func, True)
return decorated_func
def get_magic_method_names(self):
return builtin.Builtin.magic_function_scope.get_defined_names()
def get_magic_method_scope(self):
return builtin.Builtin.magic_function_scope
def __getattr__(self, name):
return getattr(self.base_func, name)
def __repr__(self):
dec = ''
if self._decorated_func() != self.base_func:
dec = " is " + repr(self._decorated_func())
return "<e%s of %s%s>" % (type(self).__name__, self.base_func, dec)
class Execution(Executable):
"""
This class is used to evaluate functions and their returns.
This is the most complicated class, because it contains the logic to
transfer parameters. It is even more complicated, because there may be
multiple calls to functions and recursion has to be avoided. But this is
responsibility of the decorators.
"""
def follow_var_arg(self, index):
try:
stmt = self.var_args[index]
except IndexError:
return []
else:
if isinstance(stmt, pr.Statement):
return evaluate.follow_statement(stmt)
else:
return [stmt] # just some arbitrary object
@property
@cache.memoize_default()
def decorated(self):
"""Get the decorated version of the input"""
base = self.base
if self.base.isinstance(Function):
base = base.get_decorated_func()
return base
@cache.memoize_default(default=())
@recursion.ExecutionRecursionDecorator
def get_return_types(self, evaluate_generator=False):
""" Get the return types of a function. """
base = self.decorated
stmts = []
if base.parent == builtin.Builtin.scope \
and not isinstance(base, (Generator, Array)):
func_name = str(base.name)
# some implementations of builtins:
if func_name == 'getattr':
# follow the first param
objects = self.follow_var_arg(0)
names = self.follow_var_arg(1)
for obj in objects:
if not isinstance(obj, (Instance, Class, pr.Module)):
debug.warning('getattr called without instance')
continue
for arr_name in names:
if not isinstance(arr_name, Instance):
debug.warning('getattr called without str')
continue
if len(arr_name.var_args) != 1:
debug.warning('jedi getattr is too simple')
key = arr_name.var_args[0]
stmts += evaluate.follow_path(iter([key]), obj, base)
return stmts
elif func_name == 'type':
# otherwise it would be a metaclass
if len(self.var_args) == 1:
objects = self.follow_var_arg(0)
return [o.base for o in objects if isinstance(o, Instance)]
elif func_name == 'super':
# TODO make this able to detect multiple inheritance supers
accept = (pr.Function,)
func = self.var_args.get_parent_until(accept)
if func.isinstance(*accept):
cls = func.get_parent_until(accept + (pr.Class,),
include_current=False)
if isinstance(cls, pr.Class):
cls = Class(cls)
su = cls.get_super_classes()
if su:
return [Instance(su[0])]
return []
if base.isinstance(Class):
# There maybe executions of executions.
return [Instance(base, self.var_args)]
elif isinstance(base, Generator):
return base.iter_content()
else:
try:
base.returns # Test if it is a function
except AttributeError:
if hasattr(base, 'execute_subscope_by_name'):
try:
stmts = base.execute_subscope_by_name('__call__',
self.var_args)
except KeyError:
debug.warning("no __call__ func available", base)
else:
debug.warning("no execution possible", base)
else:
stmts = self._get_function_returns(base, evaluate_generator)
debug.dbg('exec result: %s in %s' % (stmts, self))
return imports.strip_imports(stmts)
def _get_function_returns(self, func, evaluate_generator):
""" A normal Function execution """
# Feed the listeners, with the params.
for listener in func.listeners:
listener.execute(self.get_params())
if func.is_generator and not evaluate_generator:
return [Generator(func, self.var_args)]
else:
stmts = docstrings.find_return_types(func)
for r in self.returns:
if r is not None:
stmts += evaluate.follow_statement(r)
return stmts
@cache.memoize_default(default=())
def get_params(self):
"""
This returns the params for an Execution/Instance and is injected as a
'hack' into the pr.Function class.
This needs to be here, because Instance can have __init__ functions,
which act the same way as normal functions.
"""
def gen_param_name_copy(param, keys=(), values=(), array_type=None):
"""
Create a param with the original scope (of varargs) as parent.
"""
if isinstance(self.var_args, pr.Array):
parent = self.var_args.parent
start_pos = self.var_args.start_pos
else:
parent = self.decorated
start_pos = 0, 0
new_param = copy.copy(param)
new_param.is_generated = True
if parent is not None:
new_param.parent = parent
# create an Array (-> needed for *args/**kwargs tuples/dicts)
arr = pr.Array(self._sub_module, start_pos, array_type, parent)
arr.values = values
key_stmts = []
for key in keys:
stmt = pr.Statement(self._sub_module, [], start_pos, None)
stmt._commands = [key]
key_stmts.append(stmt)
arr.keys = key_stmts
arr.type = array_type
new_param._commands = [arr]
name = copy.copy(param.get_name())
name.parent = new_param
return name
result = []
start_offset = 0
if isinstance(self.decorated, InstanceElement):
# Care for self -> just exclude it and add the instance
start_offset = 1
self_name = copy.copy(self.decorated.params[0].get_name())
self_name.parent = self.decorated.instance
result.append(self_name)
param_dict = {}
for param in self.decorated.params:
param_dict[str(param.get_name())] = param
# There may be calls, which don't fit all the params, this just ignores
# it.
var_arg_iterator = self.get_var_args_iterator()
non_matching_keys = []
keys_used = set()
keys_only = False
for param in self.decorated.params[start_offset:]:
# The value and key can both be null. There, the defaults apply.
# args / kwargs will just be empty arrays / dicts, respectively.
# Wrong value count is just ignored. If you try to test cases that
# are not allowed in Python, Jedi will maybe not show any
# completions.
key, value = next(var_arg_iterator, (None, None))
while key:
keys_only = True
try:
key_param = param_dict[str(key)]
except KeyError:
non_matching_keys.append((key, value))
else:
keys_used.add(str(key))
result.append(gen_param_name_copy(key_param,
values=[value]))
key, value = next(var_arg_iterator, (None, None))
commands = param.get_commands()
keys = []
values = []
array_type = None
ignore_creation = False
if commands[0] == '*':
# *args param
array_type = pr.Array.TUPLE
if value:
values.append(value)
for key, value in var_arg_iterator:
# Iterate until a key argument is found.
if key:
var_arg_iterator.push_back((key, value))
break
values.append(value)
elif commands[0] == '**':
# **kwargs param
array_type = pr.Array.DICT
if non_matching_keys:
keys, values = zip(*non_matching_keys)
elif not keys_only:
# normal param
if value is not None:
values = [value]
else:
if param.assignment_details:
# No value: return the default values.
ignore_creation = True
result.append(param.get_name())
param.is_generated = True
else:
# If there is no assignment detail, that means there is
# no assignment, just the result. Therefore nothing has
# to be returned.
values = []
# Just ignore all the params that are without a key, after one
# keyword argument was set.
if not ignore_creation and (not keys_only or commands[0] == '**'):
keys_used.add(str(key))
result.append(gen_param_name_copy(param, keys=keys,
values=values, array_type=array_type))
if keys_only:
# sometimes param arguments are not completely written (which would
# create an Exception, but we have to handle that).
for k in set(param_dict) - keys_used:
result.append(gen_param_name_copy(param_dict[k]))
return result
def get_var_args_iterator(self):
"""
Yields a key/value pair, the key is None, if its not a named arg.
"""
def iterate():
# `var_args` is typically an Array, and not a list.
for stmt in self.var_args:
if not isinstance(stmt, pr.Statement):
if stmt is None:
yield None, None
continue
old = stmt
# generate a statement if it's not already one.
module = builtin.Builtin.scope
stmt = pr.Statement(module, [], (0, 0), None)
stmt._commands = [old]
# *args
commands = stmt.get_commands()
if not len(commands):
continue
if commands[0] == '*':
arrays = evaluate.follow_call_list(commands[1:])
# *args must be some sort of an array, otherwise -> ignore
for array in arrays:
if isinstance(array, Array):
for field_stmt in array: # yield from plz!
yield None, field_stmt
elif isinstance(array, Generator):
for field_stmt in array.iter_content():
yield None, helpers.FakeStatement(field_stmt)
# **kwargs
elif commands[0] == '**':
arrays = evaluate.follow_call_list(commands[1:])
for array in arrays:
if isinstance(array, Array):
for key_stmt, value_stmt in array.items():
# first index, is the key if syntactically correct
call = key_stmt.get_commands()[0]
if isinstance(call, pr.Name):
yield call, value_stmt
elif isinstance(call, pr.Call):
yield call.name, value_stmt
# Normal arguments (including key arguments).
else:
if stmt.assignment_details:
key_arr, op = stmt.assignment_details[0]
# named parameter
if key_arr and isinstance(key_arr[0], pr.Call):
yield key_arr[0].name, stmt
else:
yield None, stmt
return iter(common.PushBackIterator(iterate()))
def get_defined_names(self):
"""
Call the default method with the own instance (self implements all
the necessary functions). Add also the params.
"""
return self.get_params() + pr.Scope.get_set_vars(self)
get_set_vars = get_defined_names
@common.rethrow_uncaught
def copy_properties(self, prop):
"""
Literally copies a property of a Function. Copying is very expensive,
because it is something like `copy.deepcopy`. However, these copied
objects can be used for the executions, as if they were in the
execution.
"""
# Copy all these lists into this local function.
attr = getattr(self.decorated, prop)
objects = []
for element in attr:
if element is None:
copied = element
else:
copied = helpers.fast_parent_copy(element)
copied.parent = self._scope_copy(copied.parent)
if isinstance(copied, pr.Function):
copied = Function(copied)
objects.append(copied)
return objects
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'imports', '_sub_module']:
raise AttributeError('Tried to access %s: %s. Why?' % (name, self))
return getattr(self.decorated, name)
@cache.memoize_default()
@common.rethrow_uncaught
def _scope_copy(self, scope):
""" Copies a scope (e.g. if) in an execution """
# TODO method uses different scopes than the subscopes property.
# just check the start_pos, sometimes it's difficult with closures
# to compare the scopes directly.
if scope.start_pos == self.start_pos:
return self
else:
copied = helpers.fast_parent_copy(scope)
copied.parent = self._scope_copy(copied.parent)
return copied
@property
@cache.memoize_default()
def returns(self):
return self.copy_properties('returns')
@property
@cache.memoize_default()
def asserts(self):
return self.copy_properties('asserts')
@property
@cache.memoize_default()
def statements(self):
return self.copy_properties('statements')
@property
@cache.memoize_default()
def subscopes(self):
return self.copy_properties('subscopes')
def get_statement_for_position(self, pos):
return pr.Scope.get_statement_for_position(self, pos)
def __repr__(self):
return "<%s of %s>" % \
(type(self).__name__, self.decorated)
class Generator(use_metaclass(cache.CachedMetaClass, pr.Base)):
""" Cares for `yield` statements. """
def __init__(self, func, var_args):
super(Generator, self).__init__()
self.func = func
self.var_args = var_args
def get_defined_names(self):
"""
Returns a list of names that define a generator, which can return the
content of a generator.
"""
names = []
none_pos = (0, 0)
executes_generator = ('__next__', 'send')
for n in ('close', 'throw') + executes_generator:
name = pr.Name(builtin.Builtin.scope, [(n, none_pos)],
none_pos, none_pos)
if n in executes_generator:
name.parent = self
else:
name.parent = builtin.Builtin.scope
names.append(name)
debug.dbg('generator names', names)
return names
def iter_content(self):
""" returns the content of __iter__ """
return Execution(self.func, self.var_args).get_return_types(True)
def get_index_types(self, index=None):
debug.warning('Tried to get array access on a generator', self)
return []
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'parent', 'get_imports',
'asserts', 'doc', 'docstr', 'get_parent_until', 'get_code',
'subscopes']:
raise AttributeError("Accessing %s of %s is not allowed."
% (self, name))
return getattr(self.func, name)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self.func)
class Array(use_metaclass(cache.CachedMetaClass, pr.Base)):
"""
Used as a mirror to pr.Array, if needed. It defines some getter
methods which are important in this module.
"""
def __init__(self, array):
self._array = array
def get_index_types(self, index_arr=None):
""" Get the types of a specific index or all, if not given """
if index_arr is not None:
if index_arr and [x for x in index_arr if ':' in x.get_commands()]:
# array slicing
return [self]
index_possibilities = self._follow_values(index_arr)
if len(index_possibilities) == 1:
# This is indexing only one element, with a fixed index number,
# otherwise it just ignores the index (e.g. [1+1]).
index = index_possibilities[0]
if isinstance(index, Instance) \
and str(index.name) in ['int', 'str'] \
and len(index.var_args) == 1:
# TODO this is just very hackish and a lot of use cases are
# being ignored
with common.ignored(KeyError, IndexError,
UnboundLocalError, TypeError):
return self.get_exact_index_types(index.var_args[0])
result = list(self._follow_values(self._array.values))
result += dynamic.check_array_additions(self)
return set(result)
def get_exact_index_types(self, mixed_index):
""" Here the index is an int/str. Raises IndexError/KeyError """
index = mixed_index
if self.type == pr.Array.DICT:
index = None
for i, key_statement in enumerate(self._array.keys):
# Because we only want the key to be a string.
key_commands = key_statement.get_commands()
if len(key_commands) != 1: # cannot deal with complex strings
continue
key = key_commands[0]
if isinstance(key, pr.String):
str_key = key.value
elif isinstance(key, pr.Name):
str_key = str(key)
if mixed_index == str_key:
index = i
break
if index is None:
raise KeyError('No key found in dictionary')
# Can raise an IndexError
values = [self._array.values[index]]
return self._follow_values(values)
def _follow_values(self, values):
""" helper function for the index getters """
return list(itertools.chain.from_iterable(evaluate.follow_statement(v)
for v in values))
def get_defined_names(self):
"""
This method generates all `ArrayMethod` for one pr.Array.
It returns e.g. for a list: append, pop, ...
"""
# `array.type` is a string with the type, e.g. 'list'.
scope = evaluate.find_name(builtin.Builtin.scope, self._array.type)[0]
scope = Instance(scope)
names = scope.get_defined_names()
return [ArrayMethod(n) for n in names]
@property
def parent(self):
return builtin.Builtin.scope
def get_parent_until(self):
return builtin.Builtin.scope
def __getattr__(self, name):
if name not in ['type', 'start_pos', 'get_only_subelement', 'parent',
'get_parent_until', 'items']:
raise AttributeError('Strange access on %s: %s.' % (self, name))
return getattr(self._array, name)
def __getitem__(self):
return self._array.__getitem__()
def __iter__(self):
return self._array.__iter__()
def __len__(self):
return self._array.__len__()
def __repr__(self):
return "<e%s of %s>" % (type(self).__name__, self._array)
class ArrayMethod(object):
"""
A name, e.g. `list.append`, it is used to access the original array
methods.
"""
def __init__(self, name):
super(ArrayMethod, self).__init__()
self.name = name
def __getattr__(self, name):
# Set access privileges:
if name not in ['parent', 'names', 'start_pos', 'end_pos', 'get_code']:
raise AttributeError('Strange accesson %s: %s.' % (self, name))
return getattr(self.name, name)
def get_parent_until(self):
return builtin.Builtin.scope
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self.name)
|
[
"evaluate.follow_call_list",
"evaluate.follow_statement",
"evaluate.find_name"
] |
[((1490, 1538), 'jedi._compatibility.use_metaclass', 'use_metaclass', (['cache.CachedMetaClass', 'Executable'], {}), '(cache.CachedMetaClass, Executable)\n', (1503, 1538), False, 'from jedi._compatibility import use_metaclass, next, hasattr, unicode\n'), ((6775, 6820), 'jedi._compatibility.use_metaclass', 'use_metaclass', (['cache.CachedMetaClass', 'pr.Base'], {}), '(cache.CachedMetaClass, pr.Base)\n', (6788, 6820), False, 'from jedi._compatibility import use_metaclass, next, hasattr, unicode\n'), ((8693, 8741), 'jedi._compatibility.use_metaclass', 'use_metaclass', (['cache.CachedMetaClass', 'pr.IsScope'], {}), '(cache.CachedMetaClass, pr.IsScope)\n', (8706, 8741), False, 'from jedi._compatibility import use_metaclass, next, hasattr, unicode\n'), ((11491, 11539), 'jedi._compatibility.use_metaclass', 'use_metaclass', (['cache.CachedMetaClass', 'pr.IsScope'], {}), '(cache.CachedMetaClass, pr.IsScope)\n', (11504, 11539), False, 'from jedi._compatibility import use_metaclass, next, hasattr, unicode\n'), ((30048, 30093), 'jedi._compatibility.use_metaclass', 'use_metaclass', (['cache.CachedMetaClass', 'pr.Base'], {}), '(cache.CachedMetaClass, pr.Base)\n', (30061, 30093), False, 'from jedi._compatibility import use_metaclass, next, hasattr, unicode\n'), ((31729, 31774), 'jedi._compatibility.use_metaclass', 'use_metaclass', (['cache.CachedMetaClass', 'pr.Base'], {}), '(cache.CachedMetaClass, pr.Base)\n', (31742, 31774), False, 'from jedi._compatibility import use_metaclass, next, hasattr, unicode\n'), ((2330, 2353), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {}), '()\n', (2351, 2353), False, 'from jedi import cache\n'), ((2773, 2798), 'jedi.cache.memoize_default', 'cache.memoize_default', (['[]'], {}), '([])\n', (2794, 2798), False, 'from jedi import cache\n'), ((5184, 5209), 'jedi.cache.memoize_default', 'cache.memoize_default', (['[]'], {}), '([])\n', (5205, 5209), False, 'from jedi import cache\n'), ((7286, 7309), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {}), '()\n', (7307, 7309), False, 'from jedi import cache\n'), ((8973, 9006), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {'default': '()'}), '(default=())\n', (8994, 9006), False, 'from jedi import cache\n'), ((9693, 9726), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {'default': '()'}), '(default=())\n', (9714, 9726), False, 'from jedi import cache\n'), ((10526, 10559), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {'default': '()'}), '(default=())\n', (10547, 10559), False, 'from jedi import cache\n'), ((11801, 11824), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {}), '()\n', (11822, 11824), False, 'from jedi import cache\n'), ((15330, 15353), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {}), '()\n', (15351, 15353), False, 'from jedi import cache\n'), ((15571, 15604), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {'default': '()'}), '(default=())\n', (15592, 15604), False, 'from jedi import cache\n'), ((19383, 19416), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {'default': '()'}), '(default=())\n', (19404, 19416), False, 'from jedi import cache\n'), ((28789, 28812), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {}), '()\n', (28810, 28812), False, 'from jedi import cache\n'), ((29367, 29390), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {}), '()\n', (29388, 29390), False, 'from jedi import cache\n'), ((29481, 29504), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {}), '()\n', (29502, 29504), False, 'from jedi import cache\n'), ((29595, 29618), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {}), '()\n', (29616, 29618), False, 'from jedi import cache\n'), ((29715, 29738), 'jedi.cache.memoize_default', 'cache.memoize_default', ([], {}), '()\n', (29736, 29738), False, 'from jedi import cache\n'), ((7757, 7806), 'jedi.parser.representation.Simple.get_parent_until', 'pr.Simple.get_parent_until', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (7783, 7806), True, 'from jedi.parser import representation as pr\n'), ((14148, 14204), 'builtin.Builtin.magic_function_scope.get_defined_names', 'builtin.Builtin.magic_function_scope.get_defined_names', ([], {}), '()\n', (14202, 14204), False, 'import builtin\n'), ((18708, 18758), 'jedi.debug.dbg', 'debug.dbg', (["('exec result: %s in %s' % (stmts, self))"], {}), "('exec result: %s in %s' % (stmts, self))\n", (18717, 18758), False, 'from jedi import debug\n'), ((18775, 18803), 'imports.strip_imports', 'imports.strip_imports', (['stmts'], {}), '(stmts)\n', (18796, 18803), False, 'import imports\n'), ((29876, 29922), 'jedi.parser.representation.Scope.get_statement_for_position', 'pr.Scope.get_statement_for_position', (['self', 'pos'], {}), '(self, pos)\n', (29911, 29922), True, 'from jedi.parser import representation as pr\n'), ((30897, 30932), 'jedi.debug.dbg', 'debug.dbg', (['"""generator names"""', 'names'], {}), "('generator names', names)\n", (30906, 30932), False, 'from jedi import debug\n'), ((31157, 31220), 'jedi.debug.warning', 'debug.warning', (['"""Tried to get array access on a generator"""', 'self'], {}), "('Tried to get array access on a generator', self)\n", (31170, 31220), False, 'from jedi import debug\n'), ((33145, 33180), 'dynamic.check_array_additions', 'dynamic.check_array_additions', (['self'], {}), '(self)\n', (33174, 33180), False, 'import dynamic\n'), ((1907, 1942), 'dynamic.check_array_instances', 'dynamic.check_array_instances', (['self'], {}), '(self)\n', (1936, 1942), False, 'import dynamic\n'), ((3059, 3074), 'copy.copy', 'copy.copy', (['name'], {}), '(name)\n', (3068, 3074), False, 'import copy\n'), ((9222, 9250), 'evaluate.follow_statement', 'evaluate.follow_statement', (['s'], {}), '(s)\n', (9247, 9250), False, 'import evaluate\n'), ((9613, 9664), 'evaluate.find_name', 'evaluate.find_name', (['builtin.Builtin.scope', '"""object"""'], {}), "(builtin.Builtin.scope, 'object')\n", (9631, 9664), False, 'import evaluate\n'), ((10651, 10700), 'evaluate.find_name', 'evaluate.find_name', (['builtin.Builtin.scope', '"""type"""'], {}), "(builtin.Builtin.scope, 'type')\n", (10669, 10700), False, 'import evaluate\n'), ((19190, 19224), 'docstrings.find_return_types', 'docstrings.find_return_types', (['func'], {}), '(func)\n', (19218, 19224), False, 'import docstrings\n'), ((20168, 20184), 'copy.copy', 'copy.copy', (['param'], {}), '(param)\n', (20177, 20184), False, 'import copy\n'), ((20397, 20454), 'jedi.parser.representation.Array', 'pr.Array', (['self._sub_module', 'start_pos', 'array_type', 'parent'], {}), '(self._sub_module, start_pos, array_type, parent)\n', (20405, 20454), True, 'from jedi.parser import representation as pr\n'), ((22045, 22081), 'jedi._compatibility.next', 'next', (['var_arg_iterator', '(None, None)'], {}), '(var_arg_iterator, (None, None))\n', (22049, 22081), False, 'from jedi._compatibility import use_metaclass, next, hasattr, unicode\n'), ((27634, 27661), 'jedi.parser.representation.Scope.get_set_vars', 'pr.Scope.get_set_vars', (['self'], {}), '(self)\n', (27655, 27661), True, 'from jedi.parser import representation as pr\n'), ((29229, 29260), 'jedi.helpers.fast_parent_copy', 'helpers.fast_parent_copy', (['scope'], {}), '(scope)\n', (29253, 29260), False, 'from jedi import helpers\n'), ((30618, 30685), 'jedi.parser.representation.Name', 'pr.Name', (['builtin.Builtin.scope', '[(n, none_pos)]', 'none_pos', 'none_pos'], {}), '(builtin.Builtin.scope, [(n, none_pos)], none_pos, none_pos)\n', (30625, 30685), True, 'from jedi.parser import representation as pr\n'), ((34741, 34800), 'evaluate.find_name', 'evaluate.find_name', (['builtin.Builtin.scope', 'self._array.type'], {}), '(builtin.Builtin.scope, self._array.type)\n', (34759, 34800), False, 'import evaluate\n'), ((2087, 2111), 'jedi.common.ignored', 'common.ignored', (['KeyError'], {}), '(KeyError)\n', (2101, 2111), False, 'from jedi import common\n'), ((6209, 6266), 'jedi.debug.warning', 'debug.warning', (['"""No __getitem__, cannot access the array."""'], {}), "('No __getitem__, cannot access the array.')\n", (6222, 6266), False, 'from jedi import debug\n'), ((12226, 12257), 'jedi.debug.dbg', 'debug.dbg', (['"""decorator:"""', 'dec', 'f'], {}), "('decorator:', dec, f)\n", (12235, 12257), False, 'from jedi import debug\n'), ((13494, 13523), 'jedi.debug.dbg', 'debug.dbg', (['"""decorator end"""', 'f'], {}), "('decorator end', f)\n", (13503, 13523), False, 'from jedi import debug\n'), ((15200, 15231), 'evaluate.follow_statement', 'evaluate.follow_statement', (['stmt'], {}), '(stmt)\n', (15225, 15231), False, 'import evaluate\n'), ((20566, 20617), 'jedi.parser.representation.Statement', 'pr.Statement', (['self._sub_module', '[]', 'start_pos', 'None'], {}), '(self._sub_module, [], start_pos, None)\n', (20578, 20617), True, 'from jedi.parser import representation as pr\n'), ((22535, 22571), 'jedi._compatibility.next', 'next', (['var_arg_iterator', '(None, None)'], {}), '(var_arg_iterator, (None, None))\n', (22539, 22571), False, 'from jedi._compatibility import use_metaclass, next, hasattr, unicode\n'), ((28295, 28328), 'jedi.helpers.fast_parent_copy', 'helpers.fast_parent_copy', (['element'], {}), '(element)\n', (28319, 28328), False, 'from jedi import helpers\n'), ((9319, 9372), 'jedi.debug.warning', 'debug.warning', (['"""Received non class, as a super class"""'], {}), "('Received non class, as a super class')\n", (9332, 9372), False, 'from jedi import debug\n'), ((12292, 12322), 'evaluate.follow_statement', 'evaluate.follow_statement', (['dec'], {}), '(dec)\n', (12317, 12322), False, 'import evaluate\n'), ((12385, 12455), 'jedi.debug.warning', 'debug.warning', (["('decorator not found: %s on %s' % (dec, self.base_func))"], {}), "('decorator not found: %s on %s' % (dec, self.base_func))\n", (12398, 12455), False, 'from jedi import debug\n'), ((12619, 12690), 'jedi.debug.warning', 'debug.warning', (['"""multiple decorators found"""', 'self.base_func', 'dec_results'], {}), "('multiple decorators found', self.base_func, dec_results)\n", (12632, 12690), False, 'from jedi import debug\n'), ((13139, 13189), 'jedi.debug.warning', 'debug.warning', (['"""no wrappers found"""', 'self.base_func'], {}), "('no wrappers found', self.base_func)\n", (13152, 13189), False, 'from jedi import debug\n'), ((13280, 13346), 'jedi.debug.warning', 'debug.warning', (['"""multiple wrappers found"""', 'self.base_func', 'wrappers'], {}), "('multiple wrappers found', self.base_func, wrappers)\n", (13293, 13346), False, 'from jedi import debug\n'), ((19323, 19351), 'evaluate.follow_statement', 'evaluate.follow_statement', (['r'], {}), '(r)\n', (19348, 19351), False, 'import evaluate\n'), ((25384, 25422), 'jedi.parser.representation.Statement', 'pr.Statement', (['module', '[]', '(0, 0)', 'None'], {}), '(module, [], (0, 0), None)\n', (25396, 25422), True, 'from jedi.parser import representation as pr\n'), ((25673, 25712), 'evaluate.follow_call_list', 'evaluate.follow_call_list', (['commands[1:]'], {}), '(commands[1:])\n', (25698, 25712), False, 'import evaluate\n'), ((34388, 34416), 'evaluate.follow_statement', 'evaluate.follow_statement', (['v'], {}), '(v)\n', (34413, 34416), False, 'import evaluate\n'), ((16317, 16365), 'jedi.debug.warning', 'debug.warning', (['"""getattr called without instance"""'], {}), "('getattr called without instance')\n", (16330, 16365), False, 'from jedi import debug\n'), ((18187, 18228), 'jedi._compatibility.hasattr', 'hasattr', (['base', '"""execute_subscope_by_name"""'], {}), "(base, 'execute_subscope_by_name')\n", (18194, 18228), False, 'from jedi._compatibility import use_metaclass, next, hasattr, unicode\n'), ((26317, 26356), 'evaluate.follow_call_list', 'evaluate.follow_call_list', (['commands[1:]'], {}), '(commands[1:])\n', (26342, 26356), False, 'import evaluate\n'), ((32878, 32944), 'jedi.common.ignored', 'common.ignored', (['KeyError', 'IndexError', 'UnboundLocalError', 'TypeError'], {}), '(KeyError, IndexError, UnboundLocalError, TypeError)\n', (32892, 32944), False, 'from jedi import common\n'), ((16534, 16577), 'jedi.debug.warning', 'debug.warning', (['"""getattr called without str"""'], {}), "('getattr called without str')\n", (16547, 16577), False, 'from jedi import debug\n'), ((16699, 16742), 'jedi.debug.warning', 'debug.warning', (['"""jedi getattr is too simple"""'], {}), "('jedi getattr is too simple')\n", (16712, 16742), False, 'from jedi import debug\n'), ((18559, 18603), 'jedi.debug.warning', 'debug.warning', (['"""no execution possible"""', 'base'], {}), "('no execution possible', base)\n", (18572, 18603), False, 'from jedi import debug\n'), ((18467, 18516), 'jedi.debug.warning', 'debug.warning', (['"""no __call__ func available"""', 'base'], {}), "('no __call__ func available', base)\n", (18480, 18516), False, 'from jedi import debug\n'), ((26185, 26218), 'jedi.helpers.FakeStatement', 'helpers.FakeStatement', (['field_stmt'], {}), '(field_stmt)\n', (26206, 26218), False, 'from jedi import helpers\n')]
|
import tqdm
import struct
import os
import numpy as np
import pickle
import json
import random
import argparse
from collections import Counter
#from lightfm import LightFM
from scipy import sparse
from evaluate import evaluate, coverage
from implicit.als import AlternatingLeastSquares
from scipy.linalg import norm
os.environ["OPENBLAS_NUM_THREADS"] = "1"
split_folder = 'lastfm'
user_features_playcounts_filename = 'out_user_playcounts_als.feats'
item_features_playcounts_filename = 'out_item_playcounts_als.feats'
predictions_playcounts_filename = 'predicted_playcounts_als.npy'
gender_location = 'data/lfm-gender.json'
def evaluate2(iteration_tracks, items_dict, tracks_pop):
all_songs = {}
popularity = []
for user in range(len(iteration_tracks)):
if len(iteration_tracks[user]):
curr_pop = 0
for track in iteration_tracks[user]:
curr_pop += tracks_pop[0, track]
if track not in all_songs:
all_songs[track] = 0
all_songs[track] += 1
popularity.append(curr_pop/len(iteration_tracks[user]))
#return len(different_songs)/len(iteration_tracks) #return np.mean(all_songs)
#print (len(different_songs), len(items_dict))
#return len(different_songs)/len(items_dict)#sum(all_songs) #return np.mean(all_songs)
popularity = np.mean(popularity)
different_songs = len(all_songs)
if different_songs > len(items_dict):
np_counts = np.zeros(different_songs, np.dtype('float64'))
else:
np_counts = np.zeros(len(items_dict), np.dtype('float64'))
np_counts[:different_songs] = np.array(list(all_songs.values()))
return gini(np_counts), different_songs, popularity
def gini(array):
# based on bottom eq: http://www.statsdirect.com/help/content/image/stat0206_wmf.gif
# from: http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
array = array.flatten() #all values are treated equally, arrays must be 1d
if np.amin(array) < 0:
array -= np.amin(array) #values cannot be negative
array += 0.0000001 #values cannot be 0
array = np.sort(array) #values must be sorted
index = np.arange(1,array.shape[0]+1) #index per array element
n = array.shape[0]#number of array elements
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array))) #Gini coefficient
def load_feats(feat_fname, meta_only=False, nrz=False):
with open(feat_fname, 'rb') as fin:
keys = fin.readline().strip().split()
R, C = struct.unpack('qq', fin.read(16))
if meta_only:
return keys, (R, C)
feat = np.fromstring(fin.read(), count=R * C, dtype=np.float32)
feat = feat.reshape((R, C))
if nrz:
feat = feat / np.sqrt((feat ** 2).sum(-1) + 1e-8)[..., np.newaxis]
return keys, feat
def save(keys, feats, out_fname):
feats = np.array(feats, dtype=np.float32)
with open(out_fname + '.tmp', 'wb') as fout:
fout.write(b' '.join([k.encode() for k in keys]))
fout.write(b'\n')
R, C = feats.shape
fout.write(struct.pack('qq', *(R, C)))
fout.write(feats.tostring())
os.rename(out_fname + '.tmp', out_fname)
def train_als(impl_train_data, dims, user_ids, item_ids, user_features_file, item_features_file, save_res=True):
model = AlternatingLeastSquares(factors=dims, iterations=50)
model.fit(impl_train_data.T)
user_vecs_reg = model.user_factors
item_vecs_reg = model.item_factors
print("USER FEAT:", user_vecs_reg.shape)
print("ITEM FEAT:", item_vecs_reg.shape)
if save_res==True:
save(item_ids, item_vecs_reg, item_features_file)
save(user_ids, user_vecs_reg, user_features_file)
return item_ids, item_vecs_reg, user_ids, user_vecs_reg
def train(impl_train_data, dims, user_ids, item_ids, item_features_filem, user_features_file, user_features=None, save_res=True):
model = LightFM(loss='warp', no_components=dims, max_sampled=30, user_alpha=1e-06)
#model = model.fit(impl_train_data, epochs=50, num_threads=8)
model = model.fit(impl_train_data, user_features=user_features, epochs=50, num_threads=8)
user_biases, user_embeddings = model.get_user_representations(user_features)
#user_biases, user_embeddings = model.get_user_representations()
item_biases, item_embeddings = model.get_item_representations()
item_vecs_reg = np.concatenate((item_embeddings, np.reshape(item_biases, (1, -1)).T), axis=1)
user_vecs_reg = np.concatenate((user_embeddings, np.ones((1, user_biases.shape[0])).T), axis=1)
#print("USER FEAT:", user_vecs_reg.shape)
#print("ITEM FEAT:", item_vecs_reg.shape)
if save_res==True:
save(item_ids, item_vecs_reg, item_features_file)
save(user_ids, user_vecs_reg, user_features_file)
return item_ids, item_vecs_reg, user_ids, user_vecs_reg
def predict(item_vecs_reg, user_vecs_reg, prediction_file,impl_train_data, N=100, step=1000, save_res=True):
#listened_dict = sparse.dok_matrix(impl_train_data)
listened_dict = impl_train_data
predicted = np.zeros((user_vecs_reg.shape[0],N), dtype=np.uint32)
for u in range(0,user_vecs_reg.shape[0], step):
sims = user_vecs_reg[u:u+step].dot(item_vecs_reg.T)
curr_users = listened_dict[u:u+step].todense() == 0
topn = np.argsort(-np.multiply(sims,curr_users), axis=1)[:,:N]
predicted[u:u+step, :] = topn
#if u % 100000 == 0:
# print ("Precited users: ", u)
if save_res==True:
np.save(open(prediction_file, 'wb'), predicted)
return predicted
def rerank(predicted, items_gender, lambda1=10):
ret_all = []
ret_non_zero = []
zero_users = 0
for u in range(0,predicted.shape[0]):
counter = 0
recs_dict = {item:p for p,item in enumerate(predicted[u, :])}
for i, track in enumerate(recs_dict.keys()):
if items_gender[track] == "Male":
recs_dict[track] += lambda1
if i< 10:
counter += 1
ret_all.append(counter)
if counter == 0:
zero_users += 1
else:
ret_non_zero.append(counter)
predicted[u] = np.array([k for k,v in sorted(recs_dict.items(), key=lambda x: x[1])])
#if u % 50000 == 0:
# print ("reranked users: ", u)
return np.mean(ret_all), np.mean(ret_non_zero), zero_users
from math import log2
def show_eval(predicted_x, fan_test_data,item_ids,items_gender, sum_listen, changes):
topn = predicted_x.shape[1]
fan_test_data_sorted = []
fan_test_data_male = []
fan_test_data_female = []
predicted_male = []
predicted_female = []
all_res = {'test_fidelity': [], 'test_engagement': [], 'test_awearnes': [], 'test_playcounts': [], 'pred_fidelity': {}, 'pred_awearnes': {}, 'pred_engagement': {}, 'pred_playcounts': {}}
for cutoff in ('1', '3', '5', '10', '100'):
for name in ('pred_fidelity', 'pred_awearnes', 'pred_engagement', 'pred_playcounts'):
all_res[name][cutoff] = []
_SQRT2 = np.sqrt(2) # sqrt(2) with default precision np.float64
artist_gender_user = []
artist_gender_user_recommend = []
artist_gender_dist = []
artist_gender_first_female = []
artist_gender_first_male = []
reco_set= {}
reco_set_10= {}
for i in range(len(fan_test_data)):
#fan_test_data_sorted.append(fan_test_data[i])
test_u_sorted_playcount = sorted([(a, p) for a,p in fan_test_data[i]], key=lambda x: x[1])
fan_test_data_sorted.append([a[0] for a in test_u_sorted_playcount])
fan_test_data_male.append([a[0] for a in test_u_sorted_playcount if items_gender[a[0]] == "Male"])
fan_test_data_female.append([a[0] for a in test_u_sorted_playcount if items_gender[a[0]] == "Female"])
if len(fan_test_data_sorted) == 0:
continue
first_female = None
first_male = None
curr_predict_female = []
curr_predict_male = []
for p,a in enumerate(predicted_x[i]):
if first_female == None and items_gender[a] == 'Female':
first_female = p
if first_male == None and items_gender[a] == 'Male':
first_male = p
#if first_male != None and first_female != None:
# break
if items_gender[a] == 'Female':
curr_predict_female.append(a)
elif items_gender[a] == 'Male':
curr_predict_male.append(a)
predicted_female.append(curr_predict_female)
predicted_male.append(curr_predict_male)
if first_female != None:
artist_gender_first_female.append(first_female)
else:
artist_gender_first_female.append(len(predicted_x[i])+1)
if first_male != None:
artist_gender_first_male.append(first_male)
else:
artist_gender_first_male.append(len(predicted_x[i])+1)
reco_set.update({a:1 for a in predicted_x[i]})
reco_set_10.update({a:1 for a in predicted_x[i][:10]})
listened_gender = None
listened = dict(Counter([items_gender[a[0]] for a in test_u_sorted_playcount]))
female = 0
male = 0
if 'Female' in listened:
female = listened['Female']
if 'Male' in listened:
male = listened['Male']
if (male+female) > 0:
#artist_gender_user.append(female / (male+female))
listened_gender = female / (male+female)
q = [female / (male+female), male/ (male+female)]
listened= dict(Counter([items_gender[a] for a in predicted_x[i]]))
female = 0
male = 0
if 'Female' in listened:
female = listened['Female']
if 'Male' in listened:
male = listened['Male']
if (male+female) > 0 and listened_gender != None:
artist_gender_user_recommend.append(female / (male+female))
p = [female / (male+female), male/ (male+female)]
artist_gender_user.append(listened_gender)
artist_gender_dist.append(norm(np.sqrt(p) - np.sqrt(q)) / _SQRT2)
reco_set_total = dict(Counter([items_gender[a] for a in reco_set.keys()]))
reco_set_10_total = dict(Counter([items_gender[a] for a in reco_set_10.keys()]))
header = 'Coverage@100 Male, Coverage@100 Female, Coverage@10 Male, Coverage@10 Female, Distribution, Female listened, Female recommended, First Female, First Male'
res = []
res.append(reco_set_total['Male'])
res.append(reco_set_total['Female'])
res.append(reco_set_10_total['Male'])
res.append(reco_set_10_total['Female'])
res.append(np.mean(artist_gender_dist))
res.append(np.mean(artist_gender_user))
res.append(np.mean(artist_gender_user_recommend))
res.append(np.mean(artist_gender_first_female))
res.append(np.mean(artist_gender_first_male))
header += ', GINI@100, pop@100, coverage@100, Coverage@10, Coverage on FAN test set@10, all changes, non_zero changes, zero_users, iter'
gini_val,cov_val,pop_val = evaluate2(predicted_x, item_ids, sum_listen)
res.append(gini_val)
res.append(pop_val)
res.append(cov_val)
res.append(coverage(predicted_x.tolist(), 10))
res.append(coverage(fan_test_data_sorted, 100))
res.append(changes[0])
res.append(changes[1])
res.append(changes[2])
res.append(changes[3])
print (header)
for i in range(len(res)):
if i in (0,1,2,3,10,11,12,13,16,17):
print(int(res[i]),end=', ')
else:
print('{:.4f}'.format(res[i]),end=', ')
print()
metrics = ['map@10', 'precision@1', 'precision@3', 'precision@5', 'precision@10', 'r-precision', 'ndcg@10']
results = evaluate(metrics, fan_test_data_sorted, predicted_x)#[:, :10])
print_head = ''
print_str = ''
for metric in metrics:
print_head += metric +", "
for metric in metrics:
print_str+= ', {:.4f}'.format(results[metric])
print (print_head)
print (print_str)
def predict_pop(pop_artists, impl_train_data, N=100):
predicted = np.zeros((impl_train_data.shape[0],N), dtype=np.uint32)
for u in range(0, impl_train_data.shape[0]):
curr_val = 0
for a in pop_artists:
if impl_train_data[u,a] == 0:
predicted[u,curr_val] = a
curr_val += 1
if curr_val == 100:
break
return predicted
def predict_rnd(item_ids, impl_train_data, N=100):
predicted = np.zeros((impl_train_data.shape[0],N), dtype=np.uint32)
items = range(len(item_ids))
for u in range(0, impl_train_data.shape[0]):
selected = random.sample(items, N)
predicted[u,:] = selected
return predicted
if __name__== "__main__":
parser = argparse.ArgumentParser(description='Run model training and evaluation.')
parser.add_argument('-l', "--lambda1", default='0')
args = parser.parse_args()
lambda1 = int(args.lambda1)
artists_gender = json.load(open(gender_location))
fan_train_data = sparse.load_npz(os.path.join('data', split_folder, 'rain_data_playcount.npz')).tocsr()
sum_listen = fan_train_data.sum(axis=0)
fan_test_data = pickle.load(open(os.path.join('data', split_folder, 'test_data.pkl'), 'rb'))
fan_items_dict = pickle.load(open(os.path.join('data', split_folder, 'items_dict.pkl'), 'rb'))
items_gender = [0]*len(fan_items_dict)
for a in fan_items_dict.keys():
items_gender[fan_items_dict[a]] =artists_gender[a]
fan_users_dict = pickle.load(open(os.path.join('data', split_folder,'users_dict.pkl'), 'rb'))
print ("Item", len(fan_items_dict))
print ("User", len(fan_users_dict))
print (sum_listen.shape)
model_folder = 'models'
dims = 300
user_features_file = os.path.join(model_folder, split_folder, user_features_playcounts_filename.format(dims))
item_features_file = os.path.join(model_folder, split_folder, item_features_playcounts_filename.format(dims))
item_ids, item_vecs_reg, user_ids, user_vecs_reg = train_als(fan_train_data, dims, fan_users_dict, fan_items_dict, user_features_file, item_features_file, save_res=True)
#item_ids, item_vecs_reg, user_ids, user_vecs_reg = train(fan_train_data_fidelity, 50, fan_users_dict, fan_items_dict, model_folder, save_res=True)
#user_ids, user_vecs_reg = load_feats(user_features_file)
#item_ids, item_vecs_reg = load_feats(item_features_file)
predictions_file = os.path.join(model_folder, split_folder,predictions_playcounts_filename.format(dims))
predicted = predict(item_vecs_reg, user_vecs_reg, predictions_file, fan_train_data, step=500)
#predicted = np.load(predictions_file)
#rerank(predicted, items_gender, lambda1)
print (predicted.shape, len(fan_test_data), user_vecs_reg.shape, len(user_ids))
#print ("ALS: -->", dims, "Lambda", lambda1)
#show_eval(predicted, fan_test_data, fan_items_dict, items_gender, sum_listen)
N = 100
step = 2000
for iter_n in range(21):
artists_count = Counter()
predicted = np.zeros((user_vecs_reg.shape[0],N), dtype=np.uint32)
for u in range(0,user_vecs_reg.shape[0],step):#len(user_ids)):
sims = user_vecs_reg[u:u+step].dot(item_vecs_reg.T)
topn = np.argsort(-sims, axis=1)[:,:N]#.flatten()
#curr_users = fan_train_data[u:u+step].todense() == 0
#topn = np.argsort(-np.multiply(sims,curr_users), axis=1)[:,:N]
predicted[u:u+step, :] = topn
changes = rerank(predicted, items_gender, lambda1)
changes = list(changes)
M = 10
for u in range(0,user_vecs_reg.shape[0],step):#len(user_ids)):
topn = predicted[u:u+step, :][:, :M].flatten()
u_min = min(u+step, user_vecs_reg.shape[0])
rows = np.repeat(np.arange(u,u_min), M)
mtrx_sum = sparse.csr_matrix((np.repeat(M,topn.shape[0]), (rows, topn)),shape=fan_train_data.shape, dtype=np.float32)
fan_train_data = fan_train_data+mtrx_sum
artists_count.update(topn.tolist())
n_artists = len(artists_count)
np_counts = np.zeros(item_vecs_reg.shape[0], np.dtype('float64'))
np_counts[:n_artists] = np.array(list(artists_count.values()))
#pickle.dump(artists_count, open("data/artists_count_{}.pkl".format(str(iter_n)),"wb"))
#print ("iter:", str(iter_n))
#print (artists_count.most_common(10))
#print ("coverage:", n_artists)
changes.append(iter_n)
show_eval(predicted, fan_test_data, fan_items_dict, items_gender, sum_listen, changes)
if iter_n % 10 == 0:
#out_item_features = 'data/items_{}_{}.feat'.format(dimms,str(iter_n))
#out_user_features = 'data/users_{}_{}.feat'.format(dimms,str(iter_n))
#user_features_file = os.path.join(model_folder, split_folder, user_features_playcounts_filename.format(dims))
#item_features_file = os.path.join(model_folder, split_folder, item_features_playcounts_filename.format(dims))
item_ids, item_vecs_reg, user_ids, user_vecs_reg = train_als(fan_train_data, 300, fan_users_dict, fan_items_dict, user_features_file, item_features_file, save_res=False)
else:
item_ids, item_vecs_reg, user_ids, user_vecs_reg = train_als(fan_train_data, 300, fan_users_dict, fan_items_dict, user_features_file, item_features_file, save_res=False)
|
[
"evaluate.evaluate",
"evaluate.coverage"
] |
[((1370, 1389), 'numpy.mean', 'np.mean', (['popularity'], {}), '(popularity)\n', (1377, 1389), True, 'import numpy as np\n'), ((2153, 2167), 'numpy.sort', 'np.sort', (['array'], {}), '(array)\n', (2160, 2167), True, 'import numpy as np\n'), ((2203, 2235), 'numpy.arange', 'np.arange', (['(1)', '(array.shape[0] + 1)'], {}), '(1, array.shape[0] + 1)\n', (2212, 2235), True, 'import numpy as np\n'), ((2921, 2954), 'numpy.array', 'np.array', (['feats'], {'dtype': 'np.float32'}), '(feats, dtype=np.float32)\n', (2929, 2954), True, 'import numpy as np\n'), ((3231, 3271), 'os.rename', 'os.rename', (["(out_fname + '.tmp')", 'out_fname'], {}), "(out_fname + '.tmp', out_fname)\n", (3240, 3271), False, 'import os\n'), ((3399, 3451), 'implicit.als.AlternatingLeastSquares', 'AlternatingLeastSquares', ([], {'factors': 'dims', 'iterations': '(50)'}), '(factors=dims, iterations=50)\n', (3422, 3451), False, 'from implicit.als import AlternatingLeastSquares\n'), ((5160, 5214), 'numpy.zeros', 'np.zeros', (['(user_vecs_reg.shape[0], N)'], {'dtype': 'np.uint32'}), '((user_vecs_reg.shape[0], N), dtype=np.uint32)\n', (5168, 5214), True, 'import numpy as np\n'), ((7144, 7154), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7151, 7154), True, 'import numpy as np\n'), ((11830, 11882), 'evaluate.evaluate', 'evaluate', (['metrics', 'fan_test_data_sorted', 'predicted_x'], {}), '(metrics, fan_test_data_sorted, predicted_x)\n', (11838, 11882), False, 'from evaluate import evaluate, coverage\n'), ((12192, 12248), 'numpy.zeros', 'np.zeros', (['(impl_train_data.shape[0], N)'], {'dtype': 'np.uint32'}), '((impl_train_data.shape[0], N), dtype=np.uint32)\n', (12200, 12248), True, 'import numpy as np\n'), ((12606, 12662), 'numpy.zeros', 'np.zeros', (['(impl_train_data.shape[0], N)'], {'dtype': 'np.uint32'}), '((impl_train_data.shape[0], N), dtype=np.uint32)\n', (12614, 12662), True, 'import numpy as np\n'), ((12883, 12956), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run model training and evaluation."""'}), "(description='Run model training and evaluation.')\n", (12906, 12956), False, 'import argparse\n'), ((2019, 2033), 'numpy.amin', 'np.amin', (['array'], {}), '(array)\n', (2026, 2033), True, 'import numpy as np\n'), ((2056, 2070), 'numpy.amin', 'np.amin', (['array'], {}), '(array)\n', (2063, 2070), True, 'import numpy as np\n'), ((2319, 2354), 'numpy.sum', 'np.sum', (['((2 * index - n - 1) * array)'], {}), '((2 * index - n - 1) * array)\n', (2325, 2354), True, 'import numpy as np\n'), ((6426, 6442), 'numpy.mean', 'np.mean', (['ret_all'], {}), '(ret_all)\n', (6433, 6442), True, 'import numpy as np\n'), ((6444, 6465), 'numpy.mean', 'np.mean', (['ret_non_zero'], {}), '(ret_non_zero)\n', (6451, 6465), True, 'import numpy as np\n'), ((10759, 10786), 'numpy.mean', 'np.mean', (['artist_gender_dist'], {}), '(artist_gender_dist)\n', (10766, 10786), True, 'import numpy as np\n'), ((10803, 10830), 'numpy.mean', 'np.mean', (['artist_gender_user'], {}), '(artist_gender_user)\n', (10810, 10830), True, 'import numpy as np\n'), ((10847, 10884), 'numpy.mean', 'np.mean', (['artist_gender_user_recommend'], {}), '(artist_gender_user_recommend)\n', (10854, 10884), True, 'import numpy as np\n'), ((10901, 10936), 'numpy.mean', 'np.mean', (['artist_gender_first_female'], {}), '(artist_gender_first_female)\n', (10908, 10936), True, 'import numpy as np\n'), ((10953, 10986), 'numpy.mean', 'np.mean', (['artist_gender_first_male'], {}), '(artist_gender_first_male)\n', (10960, 10986), True, 'import numpy as np\n'), ((11346, 11381), 'evaluate.coverage', 'coverage', (['fan_test_data_sorted', '(100)'], {}), '(fan_test_data_sorted, 100)\n', (11354, 11381), False, 'from evaluate import evaluate, coverage\n'), ((12763, 12786), 'random.sample', 'random.sample', (['items', 'N'], {}), '(items, N)\n', (12776, 12786), False, 'import random\n'), ((15141, 15150), 'collections.Counter', 'Counter', ([], {}), '()\n', (15148, 15150), False, 'from collections import Counter\n'), ((15171, 15225), 'numpy.zeros', 'np.zeros', (['(user_vecs_reg.shape[0], N)'], {'dtype': 'np.uint32'}), '((user_vecs_reg.shape[0], N), dtype=np.uint32)\n', (15179, 15225), True, 'import numpy as np\n'), ((1515, 1534), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (1523, 1534), True, 'import numpy as np\n'), ((1592, 1611), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (1600, 1611), True, 'import numpy as np\n'), ((2364, 2377), 'numpy.sum', 'np.sum', (['array'], {}), '(array)\n', (2370, 2377), True, 'import numpy as np\n'), ((3154, 3180), 'struct.pack', 'struct.pack', (['"""qq"""', '*(R, C)'], {}), "('qq', *(R, C))\n", (3165, 3180), False, 'import struct\n'), ((9204, 9266), 'collections.Counter', 'Counter', (['[items_gender[a[0]] for a in test_u_sorted_playcount]'], {}), '([items_gender[a[0]] for a in test_u_sorted_playcount])\n', (9211, 9266), False, 'from collections import Counter\n'), ((9676, 9726), 'collections.Counter', 'Counter', (['[items_gender[a] for a in predicted_x[i]]'], {}), '([items_gender[a] for a in predicted_x[i]])\n', (9683, 9726), False, 'from collections import Counter\n'), ((13321, 13372), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""test_data.pkl"""'], {}), "('data', split_folder, 'test_data.pkl')\n", (13333, 13372), False, 'import os\n'), ((13419, 13471), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""items_dict.pkl"""'], {}), "('data', split_folder, 'items_dict.pkl')\n", (13431, 13471), False, 'import os\n'), ((13656, 13708), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""users_dict.pkl"""'], {}), "('data', split_folder, 'users_dict.pkl')\n", (13668, 13708), False, 'import os\n'), ((16275, 16294), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (16283, 16294), True, 'import numpy as np\n'), ((4506, 4538), 'numpy.reshape', 'np.reshape', (['item_biases', '(1, -1)'], {}), '(item_biases, (1, -1))\n', (4516, 4538), True, 'import numpy as np\n'), ((4604, 4638), 'numpy.ones', 'np.ones', (['(1, user_biases.shape[0])'], {}), '((1, user_biases.shape[0]))\n', (4611, 4638), True, 'import numpy as np\n'), ((13169, 13230), 'os.path.join', 'os.path.join', (['"""data"""', 'split_folder', '"""rain_data_playcount.npz"""'], {}), "('data', split_folder, 'rain_data_playcount.npz')\n", (13181, 13230), False, 'import os\n'), ((15379, 15404), 'numpy.argsort', 'np.argsort', (['(-sims)'], {'axis': '(1)'}), '(-sims, axis=1)\n', (15389, 15404), True, 'import numpy as np\n'), ((15929, 15948), 'numpy.arange', 'np.arange', (['u', 'u_min'], {}), '(u, u_min)\n', (15938, 15948), True, 'import numpy as np\n'), ((5413, 5442), 'numpy.multiply', 'np.multiply', (['sims', 'curr_users'], {}), '(sims, curr_users)\n', (5424, 5442), True, 'import numpy as np\n'), ((15994, 16021), 'numpy.repeat', 'np.repeat', (['M', 'topn.shape[0]'], {}), '(M, topn.shape[0])\n', (16003, 16021), True, 'import numpy as np\n'), ((10196, 10206), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (10203, 10206), True, 'import numpy as np\n'), ((10209, 10219), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (10216, 10219), True, 'import numpy as np\n')]
|
import os
import re
import sys
sys.path.append('.')
import cv2
import math
import time
import scipy
import argparse
import matplotlib
import numpy as np
import pylab as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from scipy.ndimage.morphology import generate_binary_structure
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from lib.network.rtpose_vgg import get_model
from lib.network import im_transform
from lib.config import update_config, cfg
from evaluate.coco_eval import get_outputs, handle_paf_and_heat
from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender, draw_humans
from lib.utils.paf_to_pose import paf_to_pose_cpp
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', help='experiment configure file name',
default='./experiments/vgg19_368x368_sgd.yaml', type=str)
parser.add_argument('--weight', type=str,
default='pose_model.pth')
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# update config file
update_config(cfg, args)
model = get_model('vgg19')
model.load_state_dict(torch.load(args.weight))
model.cuda()
model.float()
model.eval()
if __name__ == "__main__":
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, oriImg = video_capture.read()
shape_dst = np.min(oriImg.shape[0:2])
with torch.no_grad():
paf, heatmap, imscale = get_outputs(
oriImg, model, 'rtpose')
humans = paf_to_pose_cpp(heatmap, paf, cfg)
out = draw_humans(oriImg, humans)
# Display the resulting frame
cv2.imshow('Video', out)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
[
"evaluate.coco_eval.get_outputs"
] |
[((31, 51), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (46, 51), False, 'import sys\n'), ((788, 813), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (811, 813), False, 'import argparse\n'), ((1278, 1302), 'lib.config.update_config', 'update_config', (['cfg', 'args'], {}), '(cfg, args)\n', (1291, 1302), False, 'from lib.config import update_config, cfg\n'), ((1315, 1333), 'lib.network.rtpose_vgg.get_model', 'get_model', (['"""vgg19"""'], {}), "('vgg19')\n", (1324, 1333), False, 'from lib.network.rtpose_vgg import get_model\n'), ((1356, 1379), 'torch.load', 'torch.load', (['args.weight'], {}), '(args.weight)\n', (1366, 1379), False, 'import torch\n'), ((1474, 1493), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1490, 1493), False, 'import cv2\n'), ((2114, 2137), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2135, 2137), False, 'import cv2\n'), ((1616, 1641), 'numpy.min', 'np.min', (['oriImg.shape[0:2]'], {}), '(oriImg.shape[0:2])\n', (1622, 1641), True, 'import numpy as np\n'), ((1799, 1833), 'lib.utils.paf_to_pose.paf_to_pose_cpp', 'paf_to_pose_cpp', (['heatmap', 'paf', 'cfg'], {}), '(heatmap, paf, cfg)\n', (1814, 1833), False, 'from lib.utils.paf_to_pose import paf_to_pose_cpp\n'), ((1865, 1892), 'lib.utils.common.draw_humans', 'draw_humans', (['oriImg', 'humans'], {}), '(oriImg, humans)\n', (1876, 1892), False, 'from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender, draw_humans\n'), ((1940, 1964), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'out'], {}), "('Video', out)\n", (1950, 1964), False, 'import cv2\n'), ((1656, 1671), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1669, 1671), False, 'import torch\n'), ((1709, 1745), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['oriImg', 'model', '"""rtpose"""'], {}), "(oriImg, model, 'rtpose')\n", (1720, 1745), False, 'from evaluate.coco_eval import get_outputs, handle_paf_and_heat\n'), ((1977, 1991), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1988, 1991), False, 'import cv2\n')]
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
import paddle
from utils import setup, get_loader
from train import train_model
from evaluate import valid
import os
def main():
# Model & Tokenizer Setup
args, model = setup()
if args.mode=='train':
# Training
train_model(args, model)
elif args.mode=='eval':
if args.checkpoint_dir is not None and os.path.exists(args.checkpoint_dir):
model.set_state_dict(paddle.load(args.checkpoint_dir))
# Prepare dataset
train_loader, test_loader = get_loader(args)
accuracy = valid(args, model, test_loader, global_step=0)
print("Accuracy:",accuracy)
else:
raise ValueError("checkpoint_dir must exist.")
else:
raise ValueError("mode must be in ['train', 'eval'].")
if __name__ == "__main__":
main()
|
[
"evaluate.valid"
] |
[((264, 271), 'utils.setup', 'setup', ([], {}), '()\n', (269, 271), False, 'from utils import setup, get_loader\n'), ((327, 351), 'train.train_model', 'train_model', (['args', 'model'], {}), '(args, model)\n', (338, 351), False, 'from train import train_model\n'), ((427, 462), 'os.path.exists', 'os.path.exists', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (441, 462), False, 'import os\n'), ((601, 617), 'utils.get_loader', 'get_loader', (['args'], {}), '(args)\n', (611, 617), False, 'from utils import setup, get_loader\n'), ((641, 687), 'evaluate.valid', 'valid', (['args', 'model', 'test_loader'], {'global_step': '(0)'}), '(args, model, test_loader, global_step=0)\n', (646, 687), False, 'from evaluate import valid\n'), ((497, 529), 'paddle.load', 'paddle.load', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (508, 529), False, 'import paddle\n')]
|
import os
import time
import logging
import argparse
import sys
sys.path.append("libs")
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from data import ContentVaeDataGenerator
from data import CollaborativeVAEDataGenerator
from pretrain_vae import get_content_vae
from train_vae import get_collabo_vae, infer
from evaluate import EvaluateModel
from evaluate import EvaluateCold
from evaluate import Recall_at_k, NDCG_at_k
def predict_and_evaluate():
### Parse the console arguments.
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str,
help="specify the dataset for experiment")
parser.add_argument("--split", type=int,
help="specify the split of the dataset")
parser.add_argument("--batch_size", type=int, default=128,
help="specify the batch size prediction")
parser.add_argument("--device" , type=str, default="0",
help="specify the visible GPU device")
parser.add_argument("--lambda_V", default=None, type=int,
help="specify the value of lambda_V for regularization")
parser.add_argument("--num_cold", default=None, type=int,
help="specify the number of cold start items")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
### Set up the tensorflow session.
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
### Fix the random seeds.
np.random.seed(98765)
tf.set_random_seed(98765)
### Get the train, val data generator for content vae
if args.lambda_V is not None:
model_root = os.path.join("models", args.dataset, str(args.split), str(args.lambda_V))
else:
model_root = os.path.join("models", args.dataset, str(args.split))
if args.num_cold is not None:
data_root = os.path.join("data", args.dataset, str(args.split), str(args.num_cold))
model_root = os.path.join("models", args.dataset, str(args.split), "num_cold", str(args.num_cold))
else:
data_root = os.path.join("data", args.dataset, str(args.split))
model_root = os.path.join("models", args.dataset, str(args.split))
dataset = "movielen-10" if "movielen-10" in args.dataset else args.dataset
tstep_cold_gen = ContentVaeDataGenerator(
data_root = data_root, joint=True,
batch_size = args.batch_size, use_cold=True,
)
bstep_test_gen = CollaborativeVAEDataGenerator(
data_root = data_root, phase = "test",
batch_size = args.batch_size, shuffle=False
)
bstep_cold_gen = CollaborativeVAEDataGenerator(
data_root = data_root, phase="test",
batch_size = args.batch_size*8, use_cold=True,
)
### Build test model and load trained weights
collabo_vae = get_collabo_vae(dataset, bstep_test_gen.num_items)
collabo_vae.load_weights(os.path.join(model_root, "best_bstep.model"))
content_vae = get_content_vae(dataset, tstep_cold_gen.feature_dim)
content_vae.load_weights(os.path.join(model_root, "best_tstep.model"))
vae_infer_tstep = content_vae.build_vae_infer_tstep()
vae_eval = collabo_vae.build_vae_eval()
vae_eval_cold = collabo_vae.update_vae_coldstart(infer(vae_infer_tstep, tstep_cold_gen.features.A))
### Evaluate and save the results
k4recalls = [10, 20, 25, 30, 35, 40, 45, 50]
k4ndcgs = [25, 50, 100]
recalls, NDCGs = [], []
recalls_cold, NDCGs_cold = [], []
for k in k4recalls:
recalls.append("{:.4f}".format(EvaluateModel(vae_eval, bstep_test_gen, Recall_at_k, k=k)))
recalls_cold.append("{:.4f}".format(EvaluateCold(vae_eval_cold, bstep_cold_gen, Recall_at_k, k=k)))
for k in k4ndcgs:
NDCGs.append("{:.4f}".format(EvaluateModel(vae_eval, bstep_test_gen, NDCG_at_k, k=k)))
NDCGs_cold.append("{:.4f}".format(EvaluateCold(vae_eval_cold, bstep_cold_gen, NDCG_at_k, k=k)))
recall_table = pd.DataFrame({"k":k4recalls, "recalls":recalls}, columns=["k", "recalls"])
recall_table.to_csv(os.path.join(model_root, "recalls.csv"), index=False)
ndcg_table = pd.DataFrame({"k":k4ndcgs, "NDCGs": NDCGs}, columns=["k", "NDCGs"])
ndcg_table.to_csv(os.path.join(model_root, "NDCGs.csv"), index=False)
recall_cold_table = pd.DataFrame({"k":k4recalls, "recalls":recalls_cold}, columns=["k", "recalls"])
recall_cold_table.to_csv(os.path.join(model_root, "recalls_cold.csv"), index=False)
ndcg_cold_table = pd.DataFrame({"k":k4ndcgs, "NDCGs": NDCGs_cold}, columns=["k", "NDCGs"])
ndcg_cold_table.to_csv(os.path.join(model_root, "NDCGs_cold.csv"), index=False)
print("Done evaluation! Results saved to {}".format(model_root))
if __name__ == '__main__':
predict_and_evaluate()
|
[
"evaluate.EvaluateCold",
"evaluate.EvaluateModel"
] |
[((71, 94), 'sys.path.append', 'sys.path.append', (['"""libs"""'], {}), "('libs')\n", (86, 94), False, 'import sys\n'), ((589, 614), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (612, 614), False, 'import argparse\n'), ((1425, 1441), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1439, 1441), True, 'import tensorflow as tf\n'), ((1496, 1521), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1506, 1521), True, 'import tensorflow as tf\n'), ((1527, 1546), 'tensorflow.keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (1540, 1546), True, 'from tensorflow.keras import backend as K\n'), ((1585, 1606), 'numpy.random.seed', 'np.random.seed', (['(98765)'], {}), '(98765)\n', (1599, 1606), True, 'import numpy as np\n'), ((1612, 1637), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(98765)'], {}), '(98765)\n', (1630, 1637), True, 'import tensorflow as tf\n'), ((2421, 2525), 'data.ContentVaeDataGenerator', 'ContentVaeDataGenerator', ([], {'data_root': 'data_root', 'joint': '(True)', 'batch_size': 'args.batch_size', 'use_cold': '(True)'}), '(data_root=data_root, joint=True, batch_size=args.\n batch_size, use_cold=True)\n', (2444, 2525), False, 'from data import ContentVaeDataGenerator\n'), ((2575, 2687), 'data.CollaborativeVAEDataGenerator', 'CollaborativeVAEDataGenerator', ([], {'data_root': 'data_root', 'phase': '"""test"""', 'batch_size': 'args.batch_size', 'shuffle': '(False)'}), "(data_root=data_root, phase='test', batch_size\n =args.batch_size, shuffle=False)\n", (2604, 2687), False, 'from data import CollaborativeVAEDataGenerator\n'), ((2739, 2855), 'data.CollaborativeVAEDataGenerator', 'CollaborativeVAEDataGenerator', ([], {'data_root': 'data_root', 'phase': '"""test"""', 'batch_size': '(args.batch_size * 8)', 'use_cold': '(True)'}), "(data_root=data_root, phase='test', batch_size\n =args.batch_size * 8, use_cold=True)\n", (2768, 2855), False, 'from data import CollaborativeVAEDataGenerator\n'), ((2951, 3001), 'train_vae.get_collabo_vae', 'get_collabo_vae', (['dataset', 'bstep_test_gen.num_items'], {}), '(dataset, bstep_test_gen.num_items)\n', (2966, 3001), False, 'from train_vae import get_collabo_vae, infer\n'), ((3099, 3151), 'pretrain_vae.get_content_vae', 'get_content_vae', (['dataset', 'tstep_cold_gen.feature_dim'], {}), '(dataset, tstep_cold_gen.feature_dim)\n', (3114, 3151), False, 'from pretrain_vae import get_content_vae\n'), ((4108, 4184), 'pandas.DataFrame', 'pd.DataFrame', (["{'k': k4recalls, 'recalls': recalls}"], {'columns': "['k', 'recalls']"}), "({'k': k4recalls, 'recalls': recalls}, columns=['k', 'recalls'])\n", (4120, 4184), True, 'import pandas as pd\n'), ((4282, 4350), 'pandas.DataFrame', 'pd.DataFrame', (["{'k': k4ndcgs, 'NDCGs': NDCGs}"], {'columns': "['k', 'NDCGs']"}), "({'k': k4ndcgs, 'NDCGs': NDCGs}, columns=['k', 'NDCGs'])\n", (4294, 4350), True, 'import pandas as pd\n'), ((4452, 4537), 'pandas.DataFrame', 'pd.DataFrame', (["{'k': k4recalls, 'recalls': recalls_cold}"], {'columns': "['k', 'recalls']"}), "({'k': k4recalls, 'recalls': recalls_cold}, columns=['k',\n 'recalls'])\n", (4464, 4537), True, 'import pandas as pd\n'), ((4646, 4719), 'pandas.DataFrame', 'pd.DataFrame', (["{'k': k4ndcgs, 'NDCGs': NDCGs_cold}"], {'columns': "['k', 'NDCGs']"}), "({'k': k4ndcgs, 'NDCGs': NDCGs_cold}, columns=['k', 'NDCGs'])\n", (4658, 4719), True, 'import pandas as pd\n'), ((3032, 3076), 'os.path.join', 'os.path.join', (['model_root', '"""best_bstep.model"""'], {}), "(model_root, 'best_bstep.model')\n", (3044, 3076), False, 'import os\n'), ((3182, 3226), 'os.path.join', 'os.path.join', (['model_root', '"""best_tstep.model"""'], {}), "(model_root, 'best_tstep.model')\n", (3194, 3226), False, 'import os\n'), ((3389, 3438), 'train_vae.infer', 'infer', (['vae_infer_tstep', 'tstep_cold_gen.features.A'], {}), '(vae_infer_tstep, tstep_cold_gen.features.A)\n', (3394, 3438), False, 'from train_vae import get_collabo_vae, infer\n'), ((4208, 4247), 'os.path.join', 'os.path.join', (['model_root', '"""recalls.csv"""'], {}), "(model_root, 'recalls.csv')\n", (4220, 4247), False, 'import os\n'), ((4373, 4410), 'os.path.join', 'os.path.join', (['model_root', '"""NDCGs.csv"""'], {}), "(model_root, 'NDCGs.csv')\n", (4385, 4410), False, 'import os\n'), ((4562, 4606), 'os.path.join', 'os.path.join', (['model_root', '"""recalls_cold.csv"""'], {}), "(model_root, 'recalls_cold.csv')\n", (4574, 4606), False, 'import os\n'), ((4747, 4789), 'os.path.join', 'os.path.join', (['model_root', '"""NDCGs_cold.csv"""'], {}), "(model_root, 'NDCGs_cold.csv')\n", (4759, 4789), False, 'import os\n'), ((3693, 3750), 'evaluate.EvaluateModel', 'EvaluateModel', (['vae_eval', 'bstep_test_gen', 'Recall_at_k'], {'k': 'k'}), '(vae_eval, bstep_test_gen, Recall_at_k, k=k)\n', (3706, 3750), False, 'from evaluate import EvaluateModel\n'), ((3798, 3859), 'evaluate.EvaluateCold', 'EvaluateCold', (['vae_eval_cold', 'bstep_cold_gen', 'Recall_at_k'], {'k': 'k'}), '(vae_eval_cold, bstep_cold_gen, Recall_at_k, k=k)\n', (3810, 3859), False, 'from evaluate import EvaluateCold\n'), ((3923, 3978), 'evaluate.EvaluateModel', 'EvaluateModel', (['vae_eval', 'bstep_test_gen', 'NDCG_at_k'], {'k': 'k'}), '(vae_eval, bstep_test_gen, NDCG_at_k, k=k)\n', (3936, 3978), False, 'from evaluate import EvaluateModel\n'), ((4024, 4083), 'evaluate.EvaluateCold', 'EvaluateCold', (['vae_eval_cold', 'bstep_cold_gen', 'NDCG_at_k'], {'k': 'k'}), '(vae_eval_cold, bstep_cold_gen, NDCG_at_k, k=k)\n', (4036, 4083), False, 'from evaluate import EvaluateCold\n')]
|
import argparse
import numpy as np
import os
import pandas as pd
import time
import torch
from data import (
eval_collate_fn,
EvalDataset,
TrainCollator,
TrainDataset,
)
from evaluate import predict, make_word_outputs_final
from transformers import (
AdamW,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
get_linear_schedule_with_warmup,
set_seed,
)
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser()
parser.add_argument('--train-src', type=str, required=True)
parser.add_argument('--train-tgt', type=str, required=True)
parser.add_argument('--dev-src', type=str, required=True)
parser.add_argument('--dev-tgt', type=str, required=True)
parser.add_argument('--dev-hter', type=str)
parser.add_argument('--dev-tags', type=str)
parser.add_argument('--block-size', type=int, default=256)
parser.add_argument('--eval-block-size', type=int, default=512)
parser.add_argument('--wwm', action='store_true')
parser.add_argument('--mlm-probability', type=float, default=0.15)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--update-cycle', type=int, default=8)
parser.add_argument('--eval-batch-size', type=int, default=8)
parser.add_argument('--train-steps', type=int, default=100000)
parser.add_argument('--eval-steps', type=int, default=1000)
parser.add_argument('--learning-rate', type=float, default=5e-5)
parser.add_argument('--pretrained-model-path', type=str, required=True)
parser.add_argument('--save-model-path', type=str, required=True)
parser.add_argument('--seed', type=int, default=42)
args = parser.parse_args()
print(args)
set_seed(args.seed)
device = torch.device('cuda')
torch.cuda.set_device(0)
config = AutoConfig.from_pretrained(args.pretrained_model_path, cache_dir=None)
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_path, cache_dir=None, use_fast=False, do_lower_case=False)
model = AutoModelWithLMHead.from_pretrained(args.pretrained_model_path, config=config, cache_dir=None)
model.resize_token_embeddings(len(tokenizer))
model.to(device)
train_dataset = TrainDataset(
src_path=args.train_src,
tgt_path=args.train_tgt,
tokenizer=tokenizer,
block_size=args.block_size,
wwm=args.wwm,
)
train_dataloader = DataLoader(
dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
collate_fn=TrainCollator(tokenizer=tokenizer, mlm_probability=args.mlm_probability),
)
dev_dataset = EvalDataset(
src_path=args.dev_src,
tgt_path=args.dev_tgt,
tokenizer=tokenizer,
block_size=args.eval_block_size,
wwm=args.wwm,
N=7,
M=1,
)
dev_dataloader = DataLoader(
dataset=dev_dataset,
batch_size=args.eval_batch_size,
shuffle=False,
collate_fn=eval_collate_fn,
)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{
'params':
[p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay':
0.01
},
{
'params':
[p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay':
0.0
}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
lr_scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=args.train_steps)
dirs = ['checkpoint_best', 'checkpoint_last']
files_to_copy = ['config.json', 'tokenizer.json', 'tokenizer_config.json', 'vocab.txt']
for d in dirs:
os.system('mkdir -p %s' % os.path.join(args.save_model_path, d))
for f in files_to_copy:
os.system('cp %s %s' % (
os.path.join(args.pretrained_model_path, f),
os.path.join(args.save_model_path, d, f)
))
print('Configuration files copied')
def save_model(model, save_dir):
print('Saving Model to', save_dir)
if os.path.exists(save_dir):
print('%s already exists. Removing it...' % save_dir)
os.remove(save_dir)
print('%s removed successfully.' % save_dir)
torch.save(model.state_dict(), save_dir)
print('%s saved successfully.' % save_dir)
total_minibatches = len(train_dataloader)
best_score = 0.0
num_steps = 1
model.train()
model.zero_grad()
epoch = 1
total_loss = 0.0
current_time = time.time()
while True:
for i, inputs in enumerate(train_dataloader):
n_minibatches = i + 1
output = model(
input_ids=inputs['input_ids'].to(device),
token_type_ids=inputs['token_type_ids'].to(device),
attention_mask=inputs['attention_mask'].to(device),
labels=inputs['labels'].to(device),
)
loss = output.loss / float(args.update_cycle)
total_loss += float(loss)
loss.backward()
if (n_minibatches == total_minibatches) or (n_minibatches % args.update_cycle == 0):
optimizer.step()
lr_scheduler.step()
model.zero_grad()
old_time = current_time
current_time = time.time()
print('epoch = %d, step = %d, loss = %.6f (%.3fs)' % (epoch, num_steps, total_loss, current_time - old_time))
if (num_steps == args.train_steps) or (num_steps % args.eval_steps == 0):
print('Evaluating...')
preds, preds_prob = predict(
eval_dataloader=dev_dataloader,
model=model,
device=device,
tokenizer=tokenizer,
N=7,
M=1,
mc_dropout=False,
)
eval_score = make_word_outputs_final(preds, args.dev_tgt, tokenizer, threshold_tune=args.dev_tags)[-1]
word_scores_prob = make_word_outputs_final(preds_prob, args.dev_tgt, tokenizer, threshold=0.5)[0]
sent_outputs = pd.Series([float(np.mean(w)) for w in word_scores_prob])
fhter = open(args.dev_hter, 'r', encoding='utf-8')
hter = pd.Series([float(x.strip()) for x in fhter])
fhter.close()
pearson = float(sent_outputs.corr(hter))
print('Pearson: %.6f' % pearson)
eval_score += pearson
print('Validation Score: %.6f, Previous Best Score: %.6f' % (eval_score, best_score))
if eval_score > best_score:
save_model(model, os.path.join(args.save_model_path, 'checkpoint_best/pytorch_model.bin'))
best_score = eval_score
save_model(model, os.path.join(args.save_model_path, 'checkpoint_last/pytorch_model.bin'))
if num_steps >= args.train_steps:
exit(0)
num_steps += 1
total_loss = 0.0
epoch += 1
|
[
"evaluate.predict",
"evaluate.make_word_outputs_final"
] |
[((442, 467), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (465, 467), False, 'import argparse\n'), ((1636, 1655), 'transformers.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (1644, 1655), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n'), ((1665, 1685), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1677, 1685), False, 'import torch\n'), ((1686, 1710), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (1707, 1710), False, 'import torch\n'), ((1721, 1791), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['args.pretrained_model_path'], {'cache_dir': 'None'}), '(args.pretrained_model_path, cache_dir=None)\n', (1747, 1791), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n'), ((1804, 1918), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.pretrained_model_path'], {'cache_dir': 'None', 'use_fast': '(False)', 'do_lower_case': '(False)'}), '(args.pretrained_model_path, cache_dir=None,\n use_fast=False, do_lower_case=False)\n', (1833, 1918), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n'), ((1924, 2023), 'transformers.AutoModelWithLMHead.from_pretrained', 'AutoModelWithLMHead.from_pretrained', (['args.pretrained_model_path'], {'config': 'config', 'cache_dir': 'None'}), '(args.pretrained_model_path, config=\n config, cache_dir=None)\n', (1959, 2023), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n'), ((2099, 2229), 'data.TrainDataset', 'TrainDataset', ([], {'src_path': 'args.train_src', 'tgt_path': 'args.train_tgt', 'tokenizer': 'tokenizer', 'block_size': 'args.block_size', 'wwm': 'args.wwm'}), '(src_path=args.train_src, tgt_path=args.train_tgt, tokenizer=\n tokenizer, block_size=args.block_size, wwm=args.wwm)\n', (2111, 2229), False, 'from data import eval_collate_fn, EvalDataset, TrainCollator, TrainDataset\n'), ((2462, 2602), 'data.EvalDataset', 'EvalDataset', ([], {'src_path': 'args.dev_src', 'tgt_path': 'args.dev_tgt', 'tokenizer': 'tokenizer', 'block_size': 'args.eval_block_size', 'wwm': 'args.wwm', 'N': '(7)', 'M': '(1)'}), '(src_path=args.dev_src, tgt_path=args.dev_tgt, tokenizer=\n tokenizer, block_size=args.eval_block_size, wwm=args.wwm, N=7, M=1)\n', (2473, 2602), False, 'from data import eval_collate_fn, EvalDataset, TrainCollator, TrainDataset\n'), ((2646, 2758), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dev_dataset', 'batch_size': 'args.eval_batch_size', 'shuffle': '(False)', 'collate_fn': 'eval_collate_fn'}), '(dataset=dev_dataset, batch_size=args.eval_batch_size, shuffle=\n False, collate_fn=eval_collate_fn)\n', (2656, 2758), False, 'from torch.utils.data import DataLoader\n'), ((3215, 3273), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate'}), '(optimizer_grouped_parameters, lr=args.learning_rate)\n', (3220, 3273), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n'), ((3289, 3392), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0)', 'num_training_steps': 'args.train_steps'}), '(optimizer, num_warmup_steps=0,\n num_training_steps=args.train_steps)\n', (3320, 3392), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n'), ((4316, 4327), 'time.time', 'time.time', ([], {}), '()\n', (4325, 4327), False, 'import time\n'), ((3906, 3930), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (3920, 3930), False, 'import os\n'), ((2371, 2443), 'data.TrainCollator', 'TrainCollator', ([], {'tokenizer': 'tokenizer', 'mlm_probability': 'args.mlm_probability'}), '(tokenizer=tokenizer, mlm_probability=args.mlm_probability)\n', (2384, 2443), False, 'from data import eval_collate_fn, EvalDataset, TrainCollator, TrainDataset\n'), ((4002, 4021), 'os.remove', 'os.remove', (['save_dir'], {}), '(save_dir)\n', (4011, 4021), False, 'import os\n'), ((3569, 3606), 'os.path.join', 'os.path.join', (['args.save_model_path', 'd'], {}), '(args.save_model_path, d)\n', (3581, 3606), False, 'import os\n'), ((5053, 5064), 'time.time', 'time.time', ([], {}), '()\n', (5062, 5064), False, 'import time\n'), ((5361, 5481), 'evaluate.predict', 'predict', ([], {'eval_dataloader': 'dev_dataloader', 'model': 'model', 'device': 'device', 'tokenizer': 'tokenizer', 'N': '(7)', 'M': '(1)', 'mc_dropout': '(False)'}), '(eval_dataloader=dev_dataloader, model=model, device=device,\n tokenizer=tokenizer, N=7, M=1, mc_dropout=False)\n', (5368, 5481), False, 'from evaluate import predict, make_word_outputs_final\n'), ((3681, 3724), 'os.path.join', 'os.path.join', (['args.pretrained_model_path', 'f'], {}), '(args.pretrained_model_path, f)\n', (3693, 3724), False, 'import os\n'), ((3738, 3778), 'os.path.join', 'os.path.join', (['args.save_model_path', 'd', 'f'], {}), '(args.save_model_path, d, f)\n', (3750, 3778), False, 'import os\n'), ((5666, 5756), 'evaluate.make_word_outputs_final', 'make_word_outputs_final', (['preds', 'args.dev_tgt', 'tokenizer'], {'threshold_tune': 'args.dev_tags'}), '(preds, args.dev_tgt, tokenizer, threshold_tune=args\n .dev_tags)\n', (5689, 5756), False, 'from evaluate import predict, make_word_outputs_final\n'), ((5791, 5866), 'evaluate.make_word_outputs_final', 'make_word_outputs_final', (['preds_prob', 'args.dev_tgt', 'tokenizer'], {'threshold': '(0.5)'}), '(preds_prob, args.dev_tgt, tokenizer, threshold=0.5)\n', (5814, 5866), False, 'from evaluate import predict, make_word_outputs_final\n'), ((6619, 6690), 'os.path.join', 'os.path.join', (['args.save_model_path', '"""checkpoint_last/pytorch_model.bin"""'], {}), "(args.save_model_path, 'checkpoint_last/pytorch_model.bin')\n", (6631, 6690), False, 'import os\n'), ((6468, 6539), 'os.path.join', 'os.path.join', (['args.save_model_path', '"""checkpoint_best/pytorch_model.bin"""'], {}), "(args.save_model_path, 'checkpoint_best/pytorch_model.bin')\n", (6480, 6539), False, 'import os\n'), ((5918, 5928), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (5925, 5928), True, 'import numpy as np\n')]
|
import os
from collections import defaultdict
import logging
from time import time
import numpy as np
import torch
from matplotlib import pyplot as plt
from tqdm import tqdm
from evaluate import evaluate
from config import *
from metrics import VolumeLoss
TRAIN_LOSS_TAG = 'Train-Loss'
VALIDATION_LOSS_TAG = 'Val-Loss'
VALIDATION_SCORE_TAG_1 = 'Dice-class-1'
VALIDATION_SCORE_TAG_2 = 'Dice-class-2'
def iterate_dataloader(dataloader):
"""
Iterates dataloader infinitely
"""
while True:
for sample in dataloader:
yield sample
class CNNTrainer:
def __init__(self, train_configs, smooth_score_size=10):
"""
Manages the training process of a model and monitors it
"""
self.config = train_configs
self.volume_crieteria = VolumeLoss(self.config.dice_loss_weight, self.config.wce_loss_weight, self.config.ce_loss_weight)
self.step = 0
self.plot_data = defaultdict(list)
self.plot_data_means = defaultdict(list)
self.smooth_score_size = smooth_score_size
self.train_time = 0
self.pbar = tqdm(unit='Slices')
def train_model(self, model, dataloaders, train_dir):
self.train_dir = train_dir
os.makedirs(self.train_dir, exist_ok=True)
model.to(self.config.device)
model.train()
train_loader, val_loader = dataloaders
logging.info('Training..')
start = time()
for sample in iterate_dataloader(train_loader):
ct_volume = sample['ct'].to(device=self.config.device, dtype=torch.float32)
gt_volume = sample['gt'].to(device=self.config.device, dtype=torch.long)
mask_volume = sample['mask'].to(device=self.config.device, dtype=torch.bool)
loss = model.train_one_sample(ct_volume, gt_volume, mask_volume, self.volume_crieteria)
self.register_plot_data({TRAIN_LOSS_TAG: loss})
self.pbar.update(ct_volume.shape[0] * ct_volume.shape[-3])
self.pbar.set_description(f"Train-step: {self.step}/{self.config.train_steps}, lr: {model.optimizer.param_groups[0]['lr']:.10f}")
# Evaluation
if self.step % self.config.eval_freq == 0:
validation_report = evaluate(model, val_loader, self.config.device, self.volume_crieteria)
validation_report[VALIDATION_LOSS_TAG] = validation_report.pop('Loss')
self.register_plot_data(validation_report)
self.plot()
self.save_checkpoint(model, name='latest')
measure_progress_key = VALIDATION_SCORE_TAG_2 if self.config.n_classes == 3 else VALIDATION_SCORE_TAG_1
if self.is_last_smoothed_score_best(measure_progress_key):
self.save_checkpoint(model, name='best')
if self.step % self.config.ckpt_frequency == 0:
self.save_checkpoint(model, name=f'step-{self.step}')
if self.step % self.config.decay_steps == 0 and self.step > 0:
model.decay_learning_rate(self.config.decay_factor)
self.step += 1
if self.step > self.config.train_steps:
break
self.train_time += time() - start
def is_last_smoothed_score_best(self, metric_name):
return np.argmax(self.plot_data_means[metric_name]) == len(self.plot_data_means[metric_name]) - 1
def get_report(self):
report = {'Train time (H)': f"{self.train_time/3600:.2f}"}
for k, v in self.plot_data_means.items():
idx = np.argmax(v)
report[f'{k}-({self.smooth_score_size}-smooth) Step'] = idx
report[f'{k}-({self.smooth_score_size}-smooth) Score'] = f"{v[idx]:.3f}"
return report
def register_plot_data(self, loss_dict):
for k, v in loss_dict.items():
self.plot_data[k].append(float(v))
self.plot_data_means[k].append(np.mean(self.plot_data[k][-self.smooth_score_size:]))
def plot(self):
metric_groups = [[TRAIN_LOSS_TAG, VALIDATION_LOSS_TAG]]
metric_groups.append([VALIDATION_SCORE_TAG_1, VALIDATION_SCORE_TAG_2] if self.config.n_classes == 3 else [VALIDATION_SCORE_TAG_1])
for metric_group in metric_groups:
nvalues = max([len(self.plot_data[k]) for k in metric_group])
for k in metric_group:
plt.plot(np.linspace(0, nvalues - 1, len(self.plot_data[k])), self.plot_data[k],
alpha=0.5, label=f"{k}: {self.plot_data[k][-1]:.3f}")
plt.plot(np.linspace(0, nvalues - 1, len(self.plot_data_means[k])), self.plot_data_means[k],
alpha=0.5, label=f"avg-last-{self.smooth_score_size}: {self.plot_data_means[k][-1]:.3f}")
plt.legend()
plt.savefig(f'{self.train_dir}/Plot({",".join(metric_group)}).png')
plt.clf()
def save_checkpoint(self, model, name):
"""
Saves model weights and trainer inner state to file
"""
trainer_state_dict = dict(step=self.step, data=self.plot_data, data_means=self.plot_data_means, train_time=self.train_time)
torch.save(dict(trainer=trainer_state_dict, model=model.get_state_dict()), f'{self.train_dir}/{name}.pth')
def load_state(self, trainer_state):
logging.info("loaded trainer from file")
self.step = trainer_state['step']
self.plot_data = trainer_state['data']
self.plot_data_means = trainer_state['data_means']
self.train_time = trainer_state['train_time']
|
[
"evaluate.evaluate"
] |
[((803, 905), 'metrics.VolumeLoss', 'VolumeLoss', (['self.config.dice_loss_weight', 'self.config.wce_loss_weight', 'self.config.ce_loss_weight'], {}), '(self.config.dice_loss_weight, self.config.wce_loss_weight, self.\n config.ce_loss_weight)\n', (813, 905), False, 'from metrics import VolumeLoss\n'), ((948, 965), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (959, 965), False, 'from collections import defaultdict\n'), ((997, 1014), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1008, 1014), False, 'from collections import defaultdict\n'), ((1114, 1133), 'tqdm.tqdm', 'tqdm', ([], {'unit': '"""Slices"""'}), "(unit='Slices')\n", (1118, 1133), False, 'from tqdm import tqdm\n'), ((1236, 1278), 'os.makedirs', 'os.makedirs', (['self.train_dir'], {'exist_ok': '(True)'}), '(self.train_dir, exist_ok=True)\n', (1247, 1278), False, 'import os\n'), ((1396, 1422), 'logging.info', 'logging.info', (['"""Training.."""'], {}), "('Training..')\n", (1408, 1422), False, 'import logging\n'), ((1439, 1445), 'time.time', 'time', ([], {}), '()\n', (1443, 1445), False, 'from time import time\n'), ((5312, 5352), 'logging.info', 'logging.info', (['"""loaded trainer from file"""'], {}), "('loaded trainer from file')\n", (5324, 5352), False, 'import logging\n'), ((3222, 3228), 'time.time', 'time', ([], {}), '()\n', (3226, 3228), False, 'from time import time\n'), ((3309, 3353), 'numpy.argmax', 'np.argmax', (['self.plot_data_means[metric_name]'], {}), '(self.plot_data_means[metric_name])\n', (3318, 3353), True, 'import numpy as np\n'), ((3562, 3574), 'numpy.argmax', 'np.argmax', (['v'], {}), '(v)\n', (3571, 3574), True, 'import numpy as np\n'), ((4771, 4783), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4781, 4783), True, 'from matplotlib import pyplot as plt\n'), ((4876, 4885), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4883, 4885), True, 'from matplotlib import pyplot as plt\n'), ((2256, 2326), 'evaluate.evaluate', 'evaluate', (['model', 'val_loader', 'self.config.device', 'self.volume_crieteria'], {}), '(model, val_loader, self.config.device, self.volume_crieteria)\n', (2264, 2326), False, 'from evaluate import evaluate\n'), ((3929, 3981), 'numpy.mean', 'np.mean', (['self.plot_data[k][-self.smooth_score_size:]'], {}), '(self.plot_data[k][-self.smooth_score_size:])\n', (3936, 3981), True, 'import numpy as np\n')]
|
"""1) Include/define any dependencies for catacomb.System class"""
import torch
import catacomb
from data import str_to_array, tensor_from_list
from evaluate import evaluate
from models.encoder import Encoder
from models.ptr_decoder import PtrDecoder
"""2) Implementing catacomb.System class with initialization and output methods"""
class System(catacomb.System):
def __init__(self):
hidden_dim = embedding_dim = 128
data_dim = 101
self.encoder = Encoder(input_dim=data_dim,
embedding_dim=embedding_dim,
hidden_dim=hidden_dim)
self.decoder = PtrDecoder(output_dim=data_dim,
embedding_dim=embedding_dim,
hidden_dim=hidden_dim)
checkpoint = torch.load('./e1i0.ckpt', map_location='cpu')
self.encoder.load_state_dict(checkpoint['encoder'])
self.decoder.load_state_dict(checkpoint['decoder'])
def output(self, unsorted):
unsorted_list = str_to_array(unsorted['list'])
unsorted_tensor = tensor_from_list(unsorted_list)
return str(evaluate(self.encoder, self.decoder, unsorted_tensor, True))
|
[
"evaluate.evaluate"
] |
[((481, 560), 'models.encoder.Encoder', 'Encoder', ([], {'input_dim': 'data_dim', 'embedding_dim': 'embedding_dim', 'hidden_dim': 'hidden_dim'}), '(input_dim=data_dim, embedding_dim=embedding_dim, hidden_dim=hidden_dim)\n', (488, 560), False, 'from models.encoder import Encoder\n'), ((625, 713), 'models.ptr_decoder.PtrDecoder', 'PtrDecoder', ([], {'output_dim': 'data_dim', 'embedding_dim': 'embedding_dim', 'hidden_dim': 'hidden_dim'}), '(output_dim=data_dim, embedding_dim=embedding_dim, hidden_dim=\n hidden_dim)\n', (635, 713), False, 'from models.ptr_decoder import PtrDecoder\n'), ((771, 816), 'torch.load', 'torch.load', (['"""./e1i0.ckpt"""'], {'map_location': '"""cpu"""'}), "('./e1i0.ckpt', map_location='cpu')\n", (781, 816), False, 'import torch\n'), ((995, 1025), 'data.str_to_array', 'str_to_array', (["unsorted['list']"], {}), "(unsorted['list'])\n", (1007, 1025), False, 'from data import str_to_array, tensor_from_list\n'), ((1052, 1083), 'data.tensor_from_list', 'tensor_from_list', (['unsorted_list'], {}), '(unsorted_list)\n', (1068, 1083), False, 'from data import str_to_array, tensor_from_list\n'), ((1104, 1163), 'evaluate.evaluate', 'evaluate', (['self.encoder', 'self.decoder', 'unsorted_tensor', '(True)'], {}), '(self.encoder, self.decoder, unsorted_tensor, True)\n', (1112, 1163), False, 'from evaluate import evaluate\n')]
|
import os
import numpy as np
import torch
import torch.nn as nn
from constants import Constants
from datasets import load_data
from evaluate import evaluate
from models import CoVA
from train import train_model
from utils import cmdline_args_parser, print_and_log, set_all_seeds
parser = cmdline_args_parser()
args = parser.parse_args()
device = torch.device("cuda:%d" % args.device if torch.cuda.is_available() else "cpu")
set_all_seeds(Constants.SEED)
N_CLASSES = Constants.N_CLASSES
CLASS_NAMES = Constants.CLASS_NAMES
IMG_HEIGHT = Constants.IMG_HEIGHT
DATA_DIR = Constants.DATA_DIR
SPLIT_DIR = Constants.SPLIT_DIR
OUTPUT_DIR = Constants.OUTPUT_DIR
# NOTE: if same hyperparameter configuration is run again, previous log file and saved model will be overwritten
EVAL_INTERVAL = 1 # Number of Epochs after which model is evaluated while training
NUM_WORKERS = args.num_workers # multithreaded data loading
CV_FOLD = args.cv_fold
FOLD_DIR = "%s/Fold-%d" % (SPLIT_DIR, CV_FOLD)
if CV_FOLD == -1:
FOLD_DIR = SPLIT_DIR # use files from SPLIT_DIR
train_img_ids = np.loadtxt("%s/train_imgs.txt" % FOLD_DIR, str)
val_img_ids = np.loadtxt("%s/val_imgs.txt" % FOLD_DIR, str)
test_img_ids = np.loadtxt("%s/test_imgs.txt" % FOLD_DIR, str)
# for calculating domainwise and macro accuracy if below files are available (optional)
webpage_info_file = "%s/webpage_info.csv" % FOLD_DIR
webpage_info = None
if os.path.isfile(webpage_info_file):
webpage_info = np.loadtxt(
webpage_info_file, str, delimiter=",", skiprows=1
) # (img_id, domain) values
test_domains_file = "%s/test_domains.txt" % FOLD_DIR
test_domains = None
if os.path.isfile(test_domains_file):
test_domains = np.loadtxt(test_domains_file, str)
########## HYPERPARAMETERS ##########
N_EPOCHS = args.n_epochs
LEARNING_RATE = args.learning_rate
BATCH_SIZE = args.batch_size
CONTEXT_SIZE = args.context_size
use_context = CONTEXT_SIZE > 0
HIDDEN_DIM = args.hidden_dim if use_context else 0
ROI_OUTPUT = (args.roi, args.roi)
BBOX_HIDDEN_DIM = args.bbox_hidden_dim
USE_ADDITIONAL_FEAT = args.additional_feat
WEIGHT_DECAY = args.weight_decay
DROP_PROB = args.drop_prob
SAMPLING_FRACTION = (
args.sampling_fraction
if (args.sampling_fraction >= 0 and args.sampling_fraction <= 1)
else 1
)
params = "lr-%.0e batch-%d cs-%d hd-%d roi-%d bbhd-%d af-%d wd-%.0e dp-%.1f sf-%.1f" % (
LEARNING_RATE,
BATCH_SIZE,
CONTEXT_SIZE,
HIDDEN_DIM,
ROI_OUTPUT[0],
BBOX_HIDDEN_DIM,
USE_ADDITIONAL_FEAT,
WEIGHT_DECAY,
DROP_PROB,
SAMPLING_FRACTION,
)
results_dir = "%s/%s" % (OUTPUT_DIR, params)
fold_wise_acc_file = "%s/fold_wise_acc.csv" % results_dir
if not os.path.exists(results_dir):
os.makedirs(results_dir)
print("\n%s Training on Fold-%s %s" % ("*" * 20, CV_FOLD, "*" * 20))
########## DATA LOADERS ##########
train_loader, val_loader, test_loader = load_data(
DATA_DIR,
train_img_ids,
val_img_ids,
test_img_ids,
CONTEXT_SIZE,
BATCH_SIZE,
USE_ADDITIONAL_FEAT,
SAMPLING_FRACTION,
NUM_WORKERS,
)
n_additional_feat = train_loader.dataset.n_additional_feat
log_file = "%s/Fold-%s logs.txt" % (results_dir, CV_FOLD)
test_acc_imgwise_file = "%s/Fold-%s test_acc_imgwise.csv" % (results_dir, CV_FOLD)
test_acc_domainwise_file = "%s/Fold-%s test_acc_domainwise.csv" % (results_dir, CV_FOLD)
model_save_file = "%s/Fold-%s saved_model.pth" % (results_dir, CV_FOLD)
print('logs will be saved in "%s"' % (log_file))
print_and_log("Learning Rate: %.0e" % (LEARNING_RATE), log_file, "w")
print_and_log("Batch Size: %d" % (BATCH_SIZE), log_file)
print_and_log("Context Size: %d" % (CONTEXT_SIZE), log_file)
print_and_log("Hidden Dim: %d" % (HIDDEN_DIM), log_file)
print_and_log("RoI Pool Output Size: (%d, %d)" % ROI_OUTPUT, log_file)
print_and_log("BBox Hidden Dim: %d" % (BBOX_HIDDEN_DIM), log_file)
print_and_log("Use Additional Features: %s" % (USE_ADDITIONAL_FEAT), log_file)
print_and_log("Weight Decay: %.0e" % (WEIGHT_DECAY), log_file)
print_and_log("Dropout Probability: %.2f" % (DROP_PROB), log_file)
print_and_log("Sampling Fraction: %.2f\n" % (SAMPLING_FRACTION), log_file)
########## TRAIN MODEL ##########
model = CoVA(
ROI_OUTPUT,
IMG_HEIGHT,
N_CLASSES,
use_context,
HIDDEN_DIM,
BBOX_HIDDEN_DIM,
n_additional_feat,
DROP_PROB,
CLASS_NAMES,
).to(device)
optimizer = torch.optim.Adam(
model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=100, gamma=1
) # No LR Scheduling
criterion = nn.CrossEntropyLoss(reduction="sum").to(device)
val_acc = train_model(
model,
train_loader,
optimizer,
scheduler,
criterion,
N_EPOCHS,
device,
val_loader,
EVAL_INTERVAL,
log_file,
model_save_file,
)
class_acc_test, macro_acc_test = evaluate(
model,
test_loader,
device,
log_file,
test_acc_imgwise_file,
webpage_info,
test_domains,
test_acc_domainwise_file,
)
with open(fold_wise_acc_file, "a") as f:
if os.stat(fold_wise_acc_file).st_size == 0: # add header if file is empty
f.write(
"Fold,val_avg,price_acc,price_macro_acc,title_acc,title_macro_acc,image_acc,image_macro_acc\n"
)
f.write(
"%s,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n"
% (
CV_FOLD,
val_acc,
class_acc_test[1],
macro_acc_test[1],
class_acc_test[2],
macro_acc_test[2],
class_acc_test[3],
macro_acc_test[3],
)
)
|
[
"evaluate.evaluate"
] |
[((291, 312), 'utils.cmdline_args_parser', 'cmdline_args_parser', ([], {}), '()\n', (310, 312), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((428, 457), 'utils.set_all_seeds', 'set_all_seeds', (['Constants.SEED'], {}), '(Constants.SEED)\n', (441, 457), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((1075, 1122), 'numpy.loadtxt', 'np.loadtxt', (["('%s/train_imgs.txt' % FOLD_DIR)", 'str'], {}), "('%s/train_imgs.txt' % FOLD_DIR, str)\n", (1085, 1122), True, 'import numpy as np\n'), ((1137, 1182), 'numpy.loadtxt', 'np.loadtxt', (["('%s/val_imgs.txt' % FOLD_DIR)", 'str'], {}), "('%s/val_imgs.txt' % FOLD_DIR, str)\n", (1147, 1182), True, 'import numpy as np\n'), ((1198, 1244), 'numpy.loadtxt', 'np.loadtxt', (["('%s/test_imgs.txt' % FOLD_DIR)", 'str'], {}), "('%s/test_imgs.txt' % FOLD_DIR, str)\n", (1208, 1244), True, 'import numpy as np\n'), ((1410, 1443), 'os.path.isfile', 'os.path.isfile', (['webpage_info_file'], {}), '(webpage_info_file)\n', (1424, 1443), False, 'import os\n'), ((1644, 1677), 'os.path.isfile', 'os.path.isfile', (['test_domains_file'], {}), '(test_domains_file)\n', (1658, 1677), False, 'import os\n'), ((2879, 3023), 'datasets.load_data', 'load_data', (['DATA_DIR', 'train_img_ids', 'val_img_ids', 'test_img_ids', 'CONTEXT_SIZE', 'BATCH_SIZE', 'USE_ADDITIONAL_FEAT', 'SAMPLING_FRACTION', 'NUM_WORKERS'], {}), '(DATA_DIR, train_img_ids, val_img_ids, test_img_ids, CONTEXT_SIZE,\n BATCH_SIZE, USE_ADDITIONAL_FEAT, SAMPLING_FRACTION, NUM_WORKERS)\n', (2888, 3023), False, 'from datasets import load_data\n'), ((3471, 3538), 'utils.print_and_log', 'print_and_log', (["('Learning Rate: %.0e' % LEARNING_RATE)", 'log_file', '"""w"""'], {}), "('Learning Rate: %.0e' % LEARNING_RATE, log_file, 'w')\n", (3484, 3538), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((3541, 3595), 'utils.print_and_log', 'print_and_log', (["('Batch Size: %d' % BATCH_SIZE)", 'log_file'], {}), "('Batch Size: %d' % BATCH_SIZE, log_file)\n", (3554, 3595), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((3598, 3656), 'utils.print_and_log', 'print_and_log', (["('Context Size: %d' % CONTEXT_SIZE)", 'log_file'], {}), "('Context Size: %d' % CONTEXT_SIZE, log_file)\n", (3611, 3656), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((3659, 3713), 'utils.print_and_log', 'print_and_log', (["('Hidden Dim: %d' % HIDDEN_DIM)", 'log_file'], {}), "('Hidden Dim: %d' % HIDDEN_DIM, log_file)\n", (3672, 3713), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((3716, 3786), 'utils.print_and_log', 'print_and_log', (["('RoI Pool Output Size: (%d, %d)' % ROI_OUTPUT)", 'log_file'], {}), "('RoI Pool Output Size: (%d, %d)' % ROI_OUTPUT, log_file)\n", (3729, 3786), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((3787, 3851), 'utils.print_and_log', 'print_and_log', (["('BBox Hidden Dim: %d' % BBOX_HIDDEN_DIM)", 'log_file'], {}), "('BBox Hidden Dim: %d' % BBOX_HIDDEN_DIM, log_file)\n", (3800, 3851), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((3854, 3930), 'utils.print_and_log', 'print_and_log', (["('Use Additional Features: %s' % USE_ADDITIONAL_FEAT)", 'log_file'], {}), "('Use Additional Features: %s' % USE_ADDITIONAL_FEAT, log_file)\n", (3867, 3930), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((3933, 3993), 'utils.print_and_log', 'print_and_log', (["('Weight Decay: %.0e' % WEIGHT_DECAY)", 'log_file'], {}), "('Weight Decay: %.0e' % WEIGHT_DECAY, log_file)\n", (3946, 3993), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((3996, 4060), 'utils.print_and_log', 'print_and_log', (["('Dropout Probability: %.2f' % DROP_PROB)", 'log_file'], {}), "('Dropout Probability: %.2f' % DROP_PROB, log_file)\n", (4009, 4060), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((4063, 4135), 'utils.print_and_log', 'print_and_log', (["('Sampling Fraction: %.2f\\n' % SAMPLING_FRACTION)", 'log_file'], {}), "('Sampling Fraction: %.2f\\n' % SAMPLING_FRACTION, log_file)\n", (4076, 4135), False, 'from utils import cmdline_args_parser, print_and_log, set_all_seeds\n'), ((4468, 4534), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(100)', 'gamma': '(1)'}), '(optimizer, step_size=100, gamma=1)\n', (4499, 4534), False, 'import torch\n'), ((4632, 4773), 'train.train_model', 'train_model', (['model', 'train_loader', 'optimizer', 'scheduler', 'criterion', 'N_EPOCHS', 'device', 'val_loader', 'EVAL_INTERVAL', 'log_file', 'model_save_file'], {}), '(model, train_loader, optimizer, scheduler, criterion, N_EPOCHS,\n device, val_loader, EVAL_INTERVAL, log_file, model_save_file)\n', (4643, 4773), False, 'from train import train_model\n'), ((4851, 4978), 'evaluate.evaluate', 'evaluate', (['model', 'test_loader', 'device', 'log_file', 'test_acc_imgwise_file', 'webpage_info', 'test_domains', 'test_acc_domainwise_file'], {}), '(model, test_loader, device, log_file, test_acc_imgwise_file,\n webpage_info, test_domains, test_acc_domainwise_file)\n', (4859, 4978), False, 'from evaluate import evaluate\n'), ((1464, 1525), 'numpy.loadtxt', 'np.loadtxt', (['webpage_info_file', 'str'], {'delimiter': '""","""', 'skiprows': '(1)'}), "(webpage_info_file, str, delimiter=',', skiprows=1)\n", (1474, 1525), True, 'import numpy as np\n'), ((1698, 1732), 'numpy.loadtxt', 'np.loadtxt', (['test_domains_file', 'str'], {}), '(test_domains_file, str)\n', (1708, 1732), True, 'import numpy as np\n'), ((2676, 2703), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (2690, 2703), False, 'import os\n'), ((2709, 2733), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (2720, 2733), False, 'import os\n'), ((390, 415), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (413, 415), False, 'import torch\n'), ((4181, 4309), 'models.CoVA', 'CoVA', (['ROI_OUTPUT', 'IMG_HEIGHT', 'N_CLASSES', 'use_context', 'HIDDEN_DIM', 'BBOX_HIDDEN_DIM', 'n_additional_feat', 'DROP_PROB', 'CLASS_NAMES'], {}), '(ROI_OUTPUT, IMG_HEIGHT, N_CLASSES, use_context, HIDDEN_DIM,\n BBOX_HIDDEN_DIM, n_additional_feat, DROP_PROB, CLASS_NAMES)\n', (4185, 4309), False, 'from models import CoVA\n'), ((4573, 4609), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (4592, 4609), True, 'import torch.nn as nn\n'), ((5059, 5086), 'os.stat', 'os.stat', (['fold_wise_acc_file'], {}), '(fold_wise_acc_file)\n', (5066, 5086), False, 'import os\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import regex
from data import AlignDataset
from evaluate import evaluate
from models import Aligner
import collections
resdict = collections.defaultdict(None)
aligner = Aligner(
'criss-align', distortion=0,
path='criss/criss-3rd.pt',
args_path='criss/args.pt',
matching_method='a'
)
dset = AlignDataset('data/align/', 'de-en')
aligns = aligner.align_sents(dset.sent_pairs, langcodes=('de_DE', 'en_XX'))
res = evaluate(dset.ground_truth, aligns, 1)
print('de-en:', res)
|
[
"evaluate.evaluate"
] |
[((183, 212), 'collections.defaultdict', 'collections.defaultdict', (['None'], {}), '(None)\n', (206, 212), False, 'import collections\n'), ((224, 340), 'models.Aligner', 'Aligner', (['"""criss-align"""'], {'distortion': '(0)', 'path': '"""criss/criss-3rd.pt"""', 'args_path': '"""criss/args.pt"""', 'matching_method': '"""a"""'}), "('criss-align', distortion=0, path='criss/criss-3rd.pt', args_path=\n 'criss/args.pt', matching_method='a')\n", (231, 340), False, 'from models import Aligner\n'), ((362, 398), 'data.AlignDataset', 'AlignDataset', (['"""data/align/"""', '"""de-en"""'], {}), "('data/align/', 'de-en')\n", (374, 398), False, 'from data import AlignDataset\n'), ((481, 519), 'evaluate.evaluate', 'evaluate', (['dset.ground_truth', 'aligns', '(1)'], {}), '(dset.ground_truth, aligns, 1)\n', (489, 519), False, 'from evaluate import evaluate\n')]
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import evaluate
import os
import base64
import json
# HTTPRequestHandler class
class testHTTPServer_RequestHandler(BaseHTTPRequestHandler):
# GET
def do_GET(self):
impath = self.path[1:]
try:
q = json.loads(base64.b64decode(impath).decode("utf-8"))
except:
self.send_response(404)
self.wfile.write(bytes("Not found", "utf-8"))
print("Failed request")
return
evaluate.ffwd([q["path"]], ["out_dir/result.png"], "models/" + q["style"] + ".ckpt")
self.send_response(200)
# Send headers
self.send_header('Content-type','image/png')
self.end_headers()
# Send message back to client
message = "Hello world!"
# Write content as utf-8 data
with open("out_dir/result.png", "rb") as f:
self.wfile.write(f.read())
return
def run():
print('starting server...')
# Server settings
# Choose port 8080, for port 80, which is normally used for a http server, you need root access
server_address = ('127.0.0.1', 8081)
httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)
print('running server...')
httpd.serve_forever()
run()
|
[
"evaluate.ffwd"
] |
[((1167, 1224), 'http.server.HTTPServer', 'HTTPServer', (['server_address', 'testHTTPServer_RequestHandler'], {}), '(server_address, testHTTPServer_RequestHandler)\n', (1177, 1224), False, 'from http.server import BaseHTTPRequestHandler, HTTPServer\n'), ((519, 607), 'evaluate.ffwd', 'evaluate.ffwd', (["[q['path']]", "['out_dir/result.png']", "('models/' + q['style'] + '.ckpt')"], {}), "([q['path']], ['out_dir/result.png'], 'models/' + q['style'] +\n '.ckpt')\n", (532, 607), False, 'import evaluate\n'), ((303, 327), 'base64.b64decode', 'base64.b64decode', (['impath'], {}), '(impath)\n', (319, 327), False, 'import base64\n')]
|
from typing import Dict
import argparse
import itertools
import logging
import os
import os.path
import sys
import time
import dynet as dy
import numpy as np
import yaml
import evaluate
import loader
import parse
import trees
import vocabulary
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
logger = logging.getLogger('minimal-span-parser')
logger.setLevel(LOGLEVEL)
logger.addHandler(logging.StreamHandler(sys.stdout))
def format_elapsed(start_time):
elapsed_time = int(time.time() - start_time)
minutes, seconds = divmod(elapsed_time, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
elapsed_string = "{}h{:02}m{:02}s".format(hours, minutes, seconds)
if days > 0:
elapsed_string = "{}d{}".format(days, elapsed_string)
return elapsed_string
def build_parser(args, model, tag_vocab, word_vocab, label_vocab,
language_embeddings):
if args.parser_type == "top-down":
return parse.TopDownParser(
model,
tag_vocab,
word_vocab,
label_vocab,
args.tag_embedding_dim,
args.word_embedding_dim,
args.lstm_layers,
args.lstm_dim,
args.label_hidden_dim,
args.split_hidden_dim,
args.dropout,
language_embeddings,
)
else:
return parse.ChartParser(
model,
tag_vocab,
word_vocab,
label_vocab,
args.tag_embedding_dim,
args.word_embedding_dim,
args.lstm_layers,
args.lstm_dim,
args.label_hidden_dim,
args.dropout,
language_embeddings,
)
def run_train(args):
logger.addHandler(logging.FileHandler(f"{args.model_path_base}.log"))
logger.info(args)
if args.numpy_seed is not None:
logger.info("Setting numpy random seed to {}...".format(
args.numpy_seed))
np.random.seed(args.numpy_seed)
data_loader = loader.Loader(args)
if args.language_embedding:
language_embeddings = load_language_embeddings(args.language_embedding)
else:
language_embeddings = None
train_langs, train_treebank = data_loader.load_parse(
args.train_langs.split(',') if args.train_langs else None,
args.train_paths.split(','))
logger.info("Loaded {} training examples.".format(len(train_treebank)))
dev_langs, dev_treebank = data_loader.load_treebank(
args.dev_langs.split(',') if args.dev_langs else None,
args.dev_paths.split(','))
logger.info("Loaded {} development examples.".format(len(dev_treebank)))
logger.info("Constructing vocabularies...")
tag_vocab = vocabulary.Vocabulary([parse.START, parse.STOP])
word_vocab = vocabulary.Vocabulary([parse.START, parse.STOP, parse.UNK])
label_vocab = vocabulary.Vocabulary([()])
for tree in train_treebank:
nodes = [tree]
while nodes:
node = nodes.pop()
if isinstance(node, trees.InternalParseNode):
label_vocab.index(node.label)
nodes.extend(reversed(node.children))
else:
tag_vocab.index(node.tag)
word_vocab.index(node.word)
tag_vocab.freeze()
word_vocab.freeze()
label_vocab.freeze()
def print_vocabulary(name, vocab):
special = {parse.START, parse.STOP, parse.UNK}
logger.debug("{} ({:,}): {}".format(
name, vocab.size,
sorted(value for value in vocab.values if value in special) +
sorted(value for value in vocab.values if value not in special)))
if args.print_vocabs:
print_vocabulary("Tag", tag_vocab)
print_vocabulary("Word", word_vocab)
print_vocabulary("Label", label_vocab)
logger.info("Initializing model...")
model = dy.ParameterCollection()
parser = build_parser(args, model, tag_vocab, word_vocab, label_vocab,
language_embeddings)
trainer = dy.AdamTrainer(model)
total_processed = 0
current_processed = 0
check_every = args.checks_every
best_dev_fscore = -np.inf
best_processed = None
best_dev_model_path = None
start_time = time.time()
def check_dev():
nonlocal best_dev_fscore
nonlocal best_dev_model_path
nonlocal best_processed
nonlocal total_processed
dev_start_time = time.time()
dev_predicted = []
if args.dev_limit:
dev_sample = np.random.choice(
dev_treebank, args.dev_limit, replace=False)
else:
dev_sample = dev_treebank
dev_losses = []
if language_embeddings:
for lang, tree in zip(dev_langs, dev_sample):
dy.renew_cg()
sentence = [(leaf.tag, leaf.word) for leaf in tree.leaves()]
predicted, loss = parser.parse(sentence, lang=lang)
dev_predicted.append(predicted.convert())
dev_losses.append(loss)
else:
for tree in dev_sample:
dy.renew_cg()
sentence = [(leaf.tag, leaf.word) for leaf in tree.leaves()]
predicted, loss = parser.parse(sentence)
dev_predicted.append(predicted.convert())
dev_losses.append(loss)
dev_loss = dy.average(dev_losses)
dev_loss_value = dev_loss.scalar_value()
dev_fscore = evaluate.evalb(args.evalb_dir, dev_sample, dev_predicted)
dev_frs_score = evaluate.calc_frs(dev_sample, dev_predicted)
logger.info("dev-loss {}"
"dev-fscore {} "
"dev-fuzzy reordering score {:4f} "
"dev-elapsed {} "
"total-elapsed {}".format(
dev_loss_value,
dev_fscore,
dev_frs_score,
format_elapsed(dev_start_time),
format_elapsed(start_time),
))
if dev_fscore.fscore >= best_dev_fscore:
if best_dev_model_path is not None:
for ext in [".data", ".meta"]:
path = best_dev_model_path + ext
if os.path.exists(path):
logger.info(
"Removing previous model file {}...".format(path))
os.remove(path)
best_dev_fscore = dev_fscore.fscore
best_processed = total_processed
best_dev_model_path = "{}_dev={:.2f}".format(
args.model_path_base, dev_fscore.fscore)
logger.info(
"Saving new best model to {}...".format(best_dev_model_path))
dy.save(best_dev_model_path, [parser])
for epoch in itertools.count(start=1):
if args.epochs is not None and epoch > args.epochs:
break
np.random.shuffle(train_treebank)
epoch_start_time = time.time()
for start_index in range(
0,
min(args.train_limit, len(train_treebank))
if args.train_limit else len(train_treebank), args.batch_size):
dy.renew_cg()
batch_losses = []
if language_embeddings:
for lang, tree in zip(
train_langs[start_index:start_index + args.batch_size],
train_treebank[start_index:start_index +
args.batch_size]):
sentence = [(leaf.tag, leaf.word)
for leaf in tree.leaves()]
if args.parser_type == "top-down":
_, loss = parser.parse(sentence, tree, args.explore,
lang)
else:
_, loss = parser.parse(sentence, tree, lang)
batch_losses.append(loss)
total_processed += 1
current_processed += 1
else:
for tree in train_treebank[start_index:start_index +
args.batch_size]:
sentence = [(leaf.tag, leaf.word)
for leaf in tree.leaves()]
if args.parser_type == "top-down":
_, loss = parser.parse(
sentence, tree, explore=args.explore)
else:
_, loss = parser.parse(sentence, tree)
batch_losses.append(loss)
total_processed += 1
current_processed += 1
batch_loss = dy.average(batch_losses)
batch_loss_value = batch_loss.scalar_value()
batch_loss.backward()
trainer.update()
logger.info(
"epoch {:,} "
"batch {:,}/{:,} "
"processed {:,} "
"batch-loss {:.4f} "
"epoch-elapsed {} "
"total-elapsed {}".format(
epoch,
start_index // args.batch_size + 1,
int(np.ceil(len(train_treebank) / args.batch_size)),
total_processed,
batch_loss_value,
format_elapsed(epoch_start_time),
format_elapsed(start_time),
))
if current_processed >= check_every:
current_processed -= check_every
check_dev()
if best_processed and total_processed - best_processed > args.patience:
break
if best_processed and total_processed - best_processed > args.patience:
logger.info(
f"Patience limit of {args.patience} reached. Best processed: {best_processed}. Last epoch: {epoch - 1}"
)
break
def run_test(args):
logger.info("Loading test trees from {}...".format(args.test_paths))
data_loader = loader.Loader(args)
test_langs, test_treebank = data_loader.load_treebank(
args.test_langs.split(',') if args.test_langs else None,
args.test_paths.split(','))
logger.info("Loaded {:,} test examples.".format(len(test_treebank)))
test_predicted = []
if args.no_prediction:
for tree in test_treebank:
children = [
trees.LeafTreebankNode(leaf.tag, leaf.word)
for leaf in tree.leaves()
]
test_predicted.append(trees.InternalTreebankNode('S', children))
start_time = time.time()
else:
logger.info("Loading model from {}...".format(args.model_path_base))
model = dy.ParameterCollection()
[parser] = dy.load(args.model_path_base, model)
if args.language_embedding and test_langs:
logger.debug(
f"Setting up language embeddings from {args.language_embedding} for {test_langs}"
)
parser.lang_embeddings = load_language_embeddings(
args.language_embedding)
logger.info("Parsing test sentences...")
start_time = time.time()
if test_langs and args.language_embedding:
for lang, tree in zip(test_langs, test_treebank):
dy.renew_cg()
sentence = [(leaf.tag, leaf.word) for leaf in tree.leaves()]
predicted, _ = parser.parse(sentence, lang=lang)
test_predicted.append(predicted.convert())
else:
for tree in test_treebank:
dy.renew_cg()
sentence = [(leaf.tag, leaf.word) for leaf in tree.leaves()]
predicted, _ = parser.parse(sentence)
test_predicted.append(predicted.convert())
test_fscore = evaluate.evalb(args.evalb_dir, test_treebank, test_predicted)
test_frs_score = evaluate.calc_frs(test_treebank, test_predicted)
logger.info("test-fscore {} "
"test-fuzzy-reordering-score {:4f} "
"test-elapsed {}".format(test_fscore, test_frs_score,
format_elapsed(start_time)))
logger.info(f"Printing to this path {args.result_prefix}.prediction")
with open(args.result_prefix + '.prediction', 'w') as out_file:
for prediction, truth, in zip(test_predicted, test_treebank):
out_file.write(f"{str(prediction)}\n")
out_file.write(f"{str(truth)}\n")
logger.info(f"Printing to this path {args.result_prefix}.yaml")
with open(args.result_prefix + '.yaml', 'w') as out_file:
result_dict = {
'recall': test_fscore.recall,
'precision': test_fscore.precision,
'fscore': test_fscore.fscore,
'fuzzy_reorder_score': test_frs_score
}
yaml.dump(result_dict, out_file)
def main():
dynet_args = [
"--dynet-mem",
"--dynet-weight-decay",
"--dynet-autobatch",
"--dynet-gpus",
"--dynet-gpu",
"--dynet-devices",
"--dynet-seed",
]
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
subparser = subparsers.add_parser("train")
subparser.set_defaults(callback=run_train)
for arg in dynet_args:
subparser.add_argument(arg)
subparser.add_argument("--numpy-seed", type=int)
# Model options
subparser.add_argument(
"--parser-type", choices=["top-down", "chart"], required=True)
subparser.add_argument("--tag-embedding-dim", type=int, default=50)
subparser.add_argument("--word-embedding-dim", type=int, default=100)
subparser.add_argument("--lstm-layers", type=int, default=2)
subparser.add_argument("--lstm-dim", type=int, default=250)
subparser.add_argument("--label-hidden-dim", type=int, default=250)
subparser.add_argument("--split-hidden-dim", type=int, default=250)
subparser.add_argument("--model-path-base", required=True)
'''
Language embedding file should look something along the lines of the formatting
done with the uriel data
'''
subparser.add_argument("--language-embedding", type=str)
# Paths for eval and training
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--train-paths", default="data/02-21.10way.clean")
subparser.add_argument("--train-langs", default=None)
subparser.add_argument("--train-limit", type=int, default=None)
subparser.add_argument("--dev-paths", default="data/22.auto.clean")
subparser.add_argument("--dev-langs", default=None)
subparser.add_argument("--dev-limit", type=int, default=None)
# Training options
subparser.add_argument("--dropout", type=float, default=0.4)
subparser.add_argument("--explore", action="store_true")
subparser.add_argument("--batch-size", type=int, default=10)
subparser.add_argument("--epochs", type=int)
subparser.add_argument("--patience", type=int, default=5000)
# Checks every x number of batches
subparser.add_argument("--checks-every", type=int, default=1000)
subparser.add_argument("--print-vocabs", action="store_true")
subparser.add_argument(
"--tree-type", choices=["itg", "treebank"], required=True)
subparser = subparsers.add_parser("test")
subparser.set_defaults(callback=run_test)
for arg in dynet_args:
subparser.add_argument(arg)
subparser.add_argument("--no-prediction", action='store_true')
subparser.add_argument("--model-path-base")
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--test-paths", default="data/23.auto.clean")
subparser.add_argument("--test-langs", type=str)
subparser.add_argument(
"--tree-type", choices=["itg", "treebank"], required=True)
subparser.add_argument("--language-embedding", type=str)
subparser.add_argument('--result-prefix', type=str, required=True)
args = parser.parse_args()
args.callback(args)
if __name__ == "__main__":
main()
|
[
"evaluate.calc_frs",
"evaluate.evalb"
] |
[((310, 350), 'logging.getLogger', 'logging.getLogger', (['"""minimal-span-parser"""'], {}), "('minimal-span-parser')\n", (327, 350), False, 'import logging\n'), ((395, 428), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (416, 428), False, 'import logging\n'), ((2034, 2053), 'loader.Loader', 'loader.Loader', (['args'], {}), '(args)\n', (2047, 2053), False, 'import loader\n'), ((2748, 2796), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', (['[parse.START, parse.STOP]'], {}), '([parse.START, parse.STOP])\n', (2769, 2796), False, 'import vocabulary\n'), ((2814, 2873), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', (['[parse.START, parse.STOP, parse.UNK]'], {}), '([parse.START, parse.STOP, parse.UNK])\n', (2835, 2873), False, 'import vocabulary\n'), ((2892, 2919), 'vocabulary.Vocabulary', 'vocabulary.Vocabulary', (['[()]'], {}), '([()])\n', (2913, 2919), False, 'import vocabulary\n'), ((3901, 3925), 'dynet.ParameterCollection', 'dy.ParameterCollection', ([], {}), '()\n', (3923, 3925), True, 'import dynet as dy\n'), ((4062, 4083), 'dynet.AdamTrainer', 'dy.AdamTrainer', (['model'], {}), '(model)\n', (4076, 4083), True, 'import dynet as dy\n'), ((4276, 4287), 'time.time', 'time.time', ([], {}), '()\n', (4285, 4287), False, 'import time\n'), ((6873, 6897), 'itertools.count', 'itertools.count', ([], {'start': '(1)'}), '(start=1)\n', (6888, 6897), False, 'import itertools\n'), ((10137, 10156), 'loader.Loader', 'loader.Loader', (['args'], {}), '(args)\n', (10150, 10156), False, 'import loader\n'), ((11927, 11988), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank', 'test_predicted'], {}), '(args.evalb_dir, test_treebank, test_predicted)\n', (11941, 11988), False, 'import evaluate\n'), ((12010, 12058), 'evaluate.calc_frs', 'evaluate.calc_frs', (['test_treebank', 'test_predicted'], {}), '(test_treebank, test_predicted)\n', (12027, 12058), False, 'import evaluate\n'), ((13219, 13244), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13242, 13244), False, 'import argparse\n'), ((258, 292), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""INFO"""'], {}), "('LOGLEVEL', 'INFO')\n", (272, 292), False, 'import os\n'), ((975, 1214), 'parse.TopDownParser', 'parse.TopDownParser', (['model', 'tag_vocab', 'word_vocab', 'label_vocab', 'args.tag_embedding_dim', 'args.word_embedding_dim', 'args.lstm_layers', 'args.lstm_dim', 'args.label_hidden_dim', 'args.split_hidden_dim', 'args.dropout', 'language_embeddings'], {}), '(model, tag_vocab, word_vocab, label_vocab, args.\n tag_embedding_dim, args.word_embedding_dim, args.lstm_layers, args.\n lstm_dim, args.label_hidden_dim, args.split_hidden_dim, args.dropout,\n language_embeddings)\n', (994, 1214), False, 'import parse\n'), ((1381, 1591), 'parse.ChartParser', 'parse.ChartParser', (['model', 'tag_vocab', 'word_vocab', 'label_vocab', 'args.tag_embedding_dim', 'args.word_embedding_dim', 'args.lstm_layers', 'args.lstm_dim', 'args.label_hidden_dim', 'args.dropout', 'language_embeddings'], {}), '(model, tag_vocab, word_vocab, label_vocab, args.\n tag_embedding_dim, args.word_embedding_dim, args.lstm_layers, args.\n lstm_dim, args.label_hidden_dim, args.dropout, language_embeddings)\n', (1398, 1591), False, 'import parse\n'), ((1770, 1820), 'logging.FileHandler', 'logging.FileHandler', (['f"""{args.model_path_base}.log"""'], {}), "(f'{args.model_path_base}.log')\n", (1789, 1820), False, 'import logging\n'), ((1983, 2014), 'numpy.random.seed', 'np.random.seed', (['args.numpy_seed'], {}), '(args.numpy_seed)\n', (1997, 2014), True, 'import numpy as np\n'), ((4471, 4482), 'time.time', 'time.time', ([], {}), '()\n', (4480, 4482), False, 'import time\n'), ((5414, 5436), 'dynet.average', 'dy.average', (['dev_losses'], {}), '(dev_losses)\n', (5424, 5436), True, 'import dynet as dy\n'), ((5507, 5564), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'dev_sample', 'dev_predicted'], {}), '(args.evalb_dir, dev_sample, dev_predicted)\n', (5521, 5564), False, 'import evaluate\n'), ((5590, 5634), 'evaluate.calc_frs', 'evaluate.calc_frs', (['dev_sample', 'dev_predicted'], {}), '(dev_sample, dev_predicted)\n', (5607, 5634), False, 'import evaluate\n'), ((6986, 7019), 'numpy.random.shuffle', 'np.random.shuffle', (['train_treebank'], {}), '(train_treebank)\n', (7003, 7019), True, 'import numpy as np\n'), ((7047, 7058), 'time.time', 'time.time', ([], {}), '()\n', (7056, 7058), False, 'import time\n'), ((10716, 10727), 'time.time', 'time.time', ([], {}), '()\n', (10725, 10727), False, 'import time\n'), ((10831, 10855), 'dynet.ParameterCollection', 'dy.ParameterCollection', ([], {}), '()\n', (10853, 10855), True, 'import dynet as dy\n'), ((10875, 10911), 'dynet.load', 'dy.load', (['args.model_path_base', 'model'], {}), '(args.model_path_base, model)\n', (10882, 10911), True, 'import dynet as dy\n'), ((11278, 11289), 'time.time', 'time.time', ([], {}), '()\n', (11287, 11289), False, 'import time\n'), ((12951, 12983), 'yaml.dump', 'yaml.dump', (['result_dict', 'out_file'], {}), '(result_dict, out_file)\n', (12960, 12983), False, 'import yaml\n'), ((487, 498), 'time.time', 'time.time', ([], {}), '()\n', (496, 498), False, 'import time\n'), ((4563, 4624), 'numpy.random.choice', 'np.random.choice', (['dev_treebank', 'args.dev_limit'], {'replace': '(False)'}), '(dev_treebank, args.dev_limit, replace=False)\n', (4579, 4624), True, 'import numpy as np\n'), ((6816, 6854), 'dynet.save', 'dy.save', (['best_dev_model_path', '[parser]'], {}), '(best_dev_model_path, [parser])\n', (6823, 6854), True, 'import dynet as dy\n'), ((7264, 7277), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (7275, 7277), True, 'import dynet as dy\n'), ((8786, 8810), 'dynet.average', 'dy.average', (['batch_losses'], {}), '(batch_losses)\n', (8796, 8810), True, 'import dynet as dy\n'), ((4825, 4838), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (4836, 4838), True, 'import dynet as dy\n'), ((5148, 5161), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (5159, 5161), True, 'import dynet as dy\n'), ((10518, 10561), 'trees.LeafTreebankNode', 'trees.LeafTreebankNode', (['leaf.tag', 'leaf.word'], {}), '(leaf.tag, leaf.word)\n', (10540, 10561), False, 'import trees\n'), ((10652, 10693), 'trees.InternalTreebankNode', 'trees.InternalTreebankNode', (['"""S"""', 'children'], {}), "('S', children)\n", (10678, 10693), False, 'import trees\n'), ((11420, 11433), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (11431, 11433), True, 'import dynet as dy\n'), ((11704, 11717), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (11715, 11717), True, 'import dynet as dy\n'), ((6314, 6334), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6328, 6334), False, 'import os\n'), ((6476, 6491), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (6485, 6491), False, 'import os\n')]
|
import argparse
import os.path
import time
import numpy as np
import torch
import torch.nn as nn
import sys
from nltk import tree
import evaluate
import treebanks
from benepar import Parser, InputSentence
from benepar.partitioned_transformer import PartitionedMultiHeadAttention
def format_elapsed(start_time):
elapsed_time = int(time.time() - start_time)
minutes, seconds = divmod(elapsed_time, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
elapsed_string = f"{hours}h{minutes:02}m{seconds:02}s"
if days > 0:
elapsed_string = f"{days}d{elapsed_string}"
return elapsed_string
def inputs_from_treebank(treebank, predict_tags):
return [
InputSentence(
words=example.words,
space_after=example.space_after,
tags=None if predict_tags else [tag for _, tag in example.pos()],
escaped_words=list(example.leaves()),
)
for example in treebank
]
def run_test(args):
print(f"Loading test trees from {args.test_path}...")
test_treebank = treebanks.load_trees(
args.test_path, args.test_path_text, args.text_processing
)
print(f"Loaded {len(test_treebank):,} test examples.")
print(f"Loading model from {args.model_path}...")
parser = Parser(args.model_path, batch_size=args.batch_size)
print("Parsing test sentences...")
start_time = time.time()
if args.output_path == "-":
output_file = sys.stdout
elif args.output_path:
output_file = open(args.output_path, "w")
else:
output_file = None
test_predicted = []
for predicted_tree in parser.parse_sents(
inputs_from_treebank(test_treebank, predict_tags=args.predict_tags)
):
test_predicted.append(predicted_tree)
if output_file is not None:
print(tree.pformat(margin=1e100), file=output_file)
test_fscore = evaluate.evalb(args.evalb_dir, test_treebank.trees, test_predicted)
print(
"test-fscore {} "
"test-elapsed {}".format(
test_fscore,
format_elapsed(start_time),
)
)
def get_compressed_state_dict(model):
state_dict = model.state_dict()
for module_name, module in model.named_modules():
if not isinstance(
module, (nn.Linear, nn.Embedding, PartitionedMultiHeadAttention)
):
continue
elif "token_type_embeddings" in module_name:
continue
elif "position_embeddings" in module_name:
continue
elif "f_tag" in module_name or "f_label" in module_name:
continue
elif "project_pretrained" in module_name:
continue
if isinstance(module, PartitionedMultiHeadAttention):
weight_names = [
module_name + "." + param
for param in ("w_qkv_c", "w_qkv_p", "w_o_c", "w_o_p")
]
else:
weight_names = [module_name + ".weight"]
for weight_name in weight_names:
weight = state_dict[weight_name]
if weight.shape.numel() <= 2048:
continue
print(weight_name, ":", weight.shape.numel(), "parameters")
if isinstance(module, nn.Embedding) or "word_embeddings" in module_name or "shared.weight" in weight_name:
is_embedding = True
else:
is_embedding = False
num_steps = 64
use_histogram = True
if "pooler.dense.weight" in weight_name:
weight.data.zero_()
continue
elif "pretrained_model" in weight_name and not is_embedding:
num_steps = 128
if not model.retokenizer.is_t5:
use_histogram = False
elif isinstance(module, PartitionedMultiHeadAttention):
num_steps = 128
if use_histogram:
observer = torch.quantization.HistogramObserver()
observer.dst_nbins = num_steps
observer(weight)
scale, zero_point = observer.calculate_qparams()
scale = scale.item()
zero_point = zero_point.item()
cluster_centers = (
scale * (np.arange(0, 256, 256 / num_steps) - zero_point)[:, None]
)
cluster_centers = np.asarray(cluster_centers, dtype=np.float32)
else:
weight_np = weight.cpu().detach().numpy()
min_val = weight_np.min()
max_val = weight_np.max()
bucket_width = (max_val - min_val) / num_steps
cluster_centers = (
min_val
+ (np.arange(num_steps, dtype=np.float32) + 0.5) * bucket_width
)
cluster_centers = cluster_centers.reshape((-1, 1))
codebook = torch.tensor(
cluster_centers, dtype=weight.dtype, device=weight.device
)
distances = weight.data.reshape((-1, 1)) - codebook.t()
codes = torch.argmin(distances ** 2, dim=-1)
weight_rounded = codebook[codes].reshape(weight.shape)
weight.data.copy_(weight_rounded)
return state_dict
def run_export(args):
if args.test_path is not None:
print("Loading test trees from {}...".format(args.test_path))
test_treebank = treebanks.load_trees(
args.test_path, args.test_path_text, args.text_processing
)
print("Loaded {:,} test examples.".format(len(test_treebank)))
else:
test_treebank = None
print("Loading model from {}...".format(args.model_path))
parser = Parser(args.model_path, batch_size=args.batch_size)
model = parser._parser
if model.pretrained_model is None:
raise ValueError(
"Exporting is only defined when using a pre-trained transformer "
"encoder. For CharLSTM-based model, just distribute the pytorch "
"checkpoint directly. You may manually delete the 'optimizer' "
"field to reduce file size by discarding the optimizer state."
)
if test_treebank is not None:
print("Parsing test sentences (predicting tags)...")
start_time = time.time()
test_inputs = inputs_from_treebank(test_treebank, predict_tags=True)
test_predicted = list(parser.parse_sents(test_inputs))
test_fscore = evaluate.evalb(args.evalb_dir, test_treebank.trees, test_predicted)
test_elapsed = format_elapsed(start_time)
print("test-fscore {} test-elapsed {}".format(test_fscore, test_elapsed))
print("Parsing test sentences (not predicting tags)...")
start_time = time.time()
test_inputs = inputs_from_treebank(test_treebank, predict_tags=False)
notags_test_predicted = list(parser.parse_sents(test_inputs))
notags_test_fscore = evaluate.evalb(
args.evalb_dir, test_treebank.trees, notags_test_predicted
)
notags_test_elapsed = format_elapsed(start_time)
print(
"test-fscore {} test-elapsed {}".format(notags_test_fscore, notags_test_elapsed)
)
print("Exporting tokenizer...")
model.retokenizer.tokenizer.save_pretrained(args.output_dir)
print("Exporting config...")
config = model.pretrained_model.config
config.benepar = model.config
config.save_pretrained(args.output_dir)
if args.compress:
print("Compressing weights...")
state_dict = get_compressed_state_dict(model.cpu())
print("Saving weights...")
else:
print("Exporting weights...")
state_dict = model.cpu().state_dict()
torch.save(state_dict, os.path.join(args.output_dir, "benepar_model.bin"))
del model, parser, state_dict
print("Loading exported model from {}...".format(args.output_dir))
exported_parser = Parser(args.output_dir, batch_size=args.batch_size)
if test_treebank is None:
print()
print("Export complete.")
print("Did not verify model accuracy because no treebank was provided.")
return
print("Parsing test sentences (predicting tags)...")
start_time = time.time()
test_inputs = inputs_from_treebank(test_treebank, predict_tags=True)
exported_predicted = list(exported_parser.parse_sents(test_inputs))
exported_fscore = evaluate.evalb(
args.evalb_dir, test_treebank.trees, exported_predicted
)
exported_elapsed = format_elapsed(start_time)
print(
"exported-fscore {} exported-elapsed {}".format(
exported_fscore, exported_elapsed
)
)
print("Parsing test sentences (not predicting tags)...")
start_time = time.time()
test_inputs = inputs_from_treebank(test_treebank, predict_tags=False)
notags_exported_predicted = list(exported_parser.parse_sents(test_inputs))
notags_exported_fscore = evaluate.evalb(
args.evalb_dir, test_treebank.trees, notags_exported_predicted
)
notags_exported_elapsed = format_elapsed(start_time)
print(
"exported-fscore {} exported-elapsed {}".format(
notags_exported_fscore, notags_exported_elapsed
)
)
print()
print("Export and verification complete.")
fscore_delta = evaluate.FScore(
recall=notags_exported_fscore.recall - notags_test_fscore.recall,
precision=notags_exported_fscore.precision - notags_test_fscore.precision,
fscore=notags_exported_fscore.fscore - notags_test_fscore.fscore,
complete_match=(
notags_exported_fscore.complete_match - notags_test_fscore.complete_match
),
tagging_accuracy=(
exported_fscore.tagging_accuracy - test_fscore.tagging_accuracy
),
)
print("delta-fscore {}".format(fscore_delta))
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
subparser = subparsers.add_parser("test")
subparser.set_defaults(callback=run_test)
subparser.add_argument("--model-path", type=str, required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--test-path", type=str, required=True)
subparser.add_argument("--test-path-text", type=str)
subparser.add_argument("--text-processing", default="default")
subparser.add_argument("--predict-tags", action="store_true")
subparser.add_argument("--output-path", default="")
subparser.add_argument("--batch-size", type=int, default=8)
subparser = subparsers.add_parser("export")
subparser.set_defaults(callback=run_export)
subparser.add_argument("--model-path", type=str, required=True)
subparser.add_argument("--output-dir", type=str, required=True)
subparser.add_argument("--evalb-dir", default="EVALB/")
subparser.add_argument("--test-path", type=str, default=None)
subparser.add_argument("--test-path-text", type=str)
subparser.add_argument("--text-processing", default="default")
subparser.add_argument("--compress", action="store_true")
subparser.add_argument("--batch-size", type=int, default=8)
args = parser.parse_args()
args.callback(args)
if __name__ == "__main__":
main()
|
[
"evaluate.evalb",
"evaluate.FScore"
] |
[((1086, 1165), 'treebanks.load_trees', 'treebanks.load_trees', (['args.test_path', 'args.test_path_text', 'args.text_processing'], {}), '(args.test_path, args.test_path_text, args.text_processing)\n', (1106, 1165), False, 'import treebanks\n'), ((1307, 1358), 'benepar.Parser', 'Parser', (['args.model_path'], {'batch_size': 'args.batch_size'}), '(args.model_path, batch_size=args.batch_size)\n', (1313, 1358), False, 'from benepar import Parser, InputSentence\n'), ((1416, 1427), 'time.time', 'time.time', ([], {}), '()\n', (1425, 1427), False, 'import time\n'), ((1931, 1998), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank.trees', 'test_predicted'], {}), '(args.evalb_dir, test_treebank.trees, test_predicted)\n', (1945, 1998), False, 'import evaluate\n'), ((5762, 5813), 'benepar.Parser', 'Parser', (['args.model_path'], {'batch_size': 'args.batch_size'}), '(args.model_path, batch_size=args.batch_size)\n', (5768, 5813), False, 'from benepar import Parser, InputSentence\n'), ((7979, 8030), 'benepar.Parser', 'Parser', (['args.output_dir'], {'batch_size': 'args.batch_size'}), '(args.output_dir, batch_size=args.batch_size)\n', (7985, 8030), False, 'from benepar import Parser, InputSentence\n'), ((8283, 8294), 'time.time', 'time.time', ([], {}), '()\n', (8292, 8294), False, 'import time\n'), ((8462, 8533), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank.trees', 'exported_predicted'], {}), '(args.evalb_dir, test_treebank.trees, exported_predicted)\n', (8476, 8533), False, 'import evaluate\n'), ((8807, 8818), 'time.time', 'time.time', ([], {}), '()\n', (8816, 8818), False, 'import time\n'), ((9001, 9079), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank.trees', 'notags_exported_predicted'], {}), '(args.evalb_dir, test_treebank.trees, notags_exported_predicted)\n', (9015, 9079), False, 'import evaluate\n'), ((9374, 9792), 'evaluate.FScore', 'evaluate.FScore', ([], {'recall': '(notags_exported_fscore.recall - notags_test_fscore.recall)', 'precision': '(notags_exported_fscore.precision - notags_test_fscore.precision)', 'fscore': '(notags_exported_fscore.fscore - notags_test_fscore.fscore)', 'complete_match': '(notags_exported_fscore.complete_match - notags_test_fscore.complete_match)', 'tagging_accuracy': '(exported_fscore.tagging_accuracy - test_fscore.tagging_accuracy)'}), '(recall=notags_exported_fscore.recall - notags_test_fscore.\n recall, precision=notags_exported_fscore.precision - notags_test_fscore\n .precision, fscore=notags_exported_fscore.fscore - notags_test_fscore.\n fscore, complete_match=notags_exported_fscore.complete_match -\n notags_test_fscore.complete_match, tagging_accuracy=exported_fscore.\n tagging_accuracy - test_fscore.tagging_accuracy)\n', (9389, 9792), False, 'import evaluate\n'), ((9949, 9974), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9972, 9974), False, 'import argparse\n'), ((5474, 5553), 'treebanks.load_trees', 'treebanks.load_trees', (['args.test_path', 'args.test_path_text', 'args.text_processing'], {}), '(args.test_path, args.test_path_text, args.text_processing)\n', (5494, 5553), False, 'import treebanks\n'), ((6340, 6351), 'time.time', 'time.time', ([], {}), '()\n', (6349, 6351), False, 'import time\n'), ((6514, 6581), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank.trees', 'test_predicted'], {}), '(args.evalb_dir, test_treebank.trees, test_predicted)\n', (6528, 6581), False, 'import evaluate\n'), ((6801, 6812), 'time.time', 'time.time', ([], {}), '()\n', (6810, 6812), False, 'import time\n'), ((6990, 7064), 'evaluate.evalb', 'evaluate.evalb', (['args.evalb_dir', 'test_treebank.trees', 'notags_test_predicted'], {}), '(args.evalb_dir, test_treebank.trees, notags_test_predicted)\n', (7004, 7064), False, 'import evaluate\n'), ((339, 350), 'time.time', 'time.time', ([], {}), '()\n', (348, 350), False, 'import time\n'), ((4958, 5029), 'torch.tensor', 'torch.tensor', (['cluster_centers'], {'dtype': 'weight.dtype', 'device': 'weight.device'}), '(cluster_centers, dtype=weight.dtype, device=weight.device)\n', (4970, 5029), False, 'import torch\n'), ((5148, 5184), 'torch.argmin', 'torch.argmin', (['(distances ** 2)'], {'dim': '(-1)'}), '(distances ** 2, dim=-1)\n', (5160, 5184), False, 'import torch\n'), ((1866, 1893), 'nltk.tree.pformat', 'tree.pformat', ([], {'margin': '(1e+100)'}), '(margin=1e+100)\n', (1878, 1893), False, 'from nltk import tree\n'), ((3977, 4015), 'torch.quantization.HistogramObserver', 'torch.quantization.HistogramObserver', ([], {}), '()\n', (4013, 4015), False, 'import torch\n'), ((4424, 4469), 'numpy.asarray', 'np.asarray', (['cluster_centers'], {'dtype': 'np.float32'}), '(cluster_centers, dtype=np.float32)\n', (4434, 4469), True, 'import numpy as np\n'), ((4314, 4348), 'numpy.arange', 'np.arange', (['(0)', '(256)', '(256 / num_steps)'], {}), '(0, 256, 256 / num_steps)\n', (4323, 4348), True, 'import numpy as np\n'), ((4788, 4826), 'numpy.arange', 'np.arange', (['num_steps'], {'dtype': 'np.float32'}), '(num_steps, dtype=np.float32)\n', (4797, 4826), True, 'import numpy as np\n')]
|
"""
This script demonstrates initialisation, training, evaluation, and forecasting of ForecastNet. The dataset used for the
time-invariance test in section 6.1 of the ForecastNet paper is used for this demonstration.
Paper:
"ForecastNet: A Time-Variant Deep Feed-Forward Neural Network Architecture for Multi-Step-Ahead Time-Series Forecasting"
by <NAME>, <NAME>, and <NAME>
Link to the paper: https://arxiv.org/abs/2002.04155
"""
import numpy as np
import matplotlib.pyplot as plt
from forecastNet import forecastNet
from train import train
from evaluate import evaluate
from dataHelpers import generate_data, process_data
import pandas as pd
from window_generator import WindowGenerator
import os
OUT_STEPS = 48
shift = 24
input_width = 48
data_path = os.environ.get('DATA_PATH') or '/tmp/data/'
model_path = os.environ.get('MODEL_PATH') or '/tmp/model/'
d15 = pd.read_parquet('/Users/ryadhkhisb/Dev/workspaces/m/finance-scrape/data/nasdaq100_15min.parquet')
column_indices = {name: i for i, name in enumerate(d15.columns)}
d15['close'] = d15['last']
n = len(d15)
train_df = d15[0:int(n * 0.7)]
val_df = d15[int(n * 0.7):int(n * 0.9)]
test_df = d15[int(n * 0.9):]
num_features = d15.shape[1]
train_mean = train_df.mean()
train_std = train_df.std()
train_df = (train_df - train_mean) / train_std
val_df = (val_df - train_mean) / train_std
test_df = (test_df - train_mean) / train_std
label_columns = train_df.columns.tolist()
num_labels = len(label_columns)
wide_window = WindowGenerator(input_width=input_width, label_width=OUT_STEPS, shift=shift, label_columns=label_columns,
train_df=train_df, val_df=val_df, test_df=test_df)
test_data = [x for x in wide_window.test.as_numpy_iterator()]
train_data = [x for x in wide_window.train.as_numpy_iterator()]
valid_data = [x for x in wide_window.val.as_numpy_iterator()]
test_y = np.concatenate(np.array([x[1] for x in test_data]))
test_x = np.concatenate(np.array([x[0] for x in test_data]))
train_y = np.concatenate(np.array([x[1] for x in train_data]))
train_x = np.concatenate(np.array([x[0] for x in train_data]))
valid_y = np.concatenate(np.array([x[1] for x in valid_data]))
valid_x = np.concatenate(np.array([x[0] for x in valid_data]))
test_y = np.swapaxes(test_y, 1, 0)
test_x = np.swapaxes(test_x, 1, 0)
train_y = np.swapaxes(train_y, 1, 0)
train_x = np.swapaxes(train_x, 1, 0)
valid_y = np.swapaxes(valid_y, 1, 0)
valid_x = np.swapaxes(valid_x, 1, 0)
# Model parameters
model_type = 'conv' #'dense' or 'conv', 'dense2' or 'conv2'
hidden_dim = 24
input_dim = train_x.shape[-1]
output_dim = train_x.shape[-1]
learning_rate = 0.0001
n_epochs=2
batch_size = 64
# Initialise model
fcstnet = forecastNet(in_seq_length=input_width, out_seq_length=input_width, input_dim=input_dim,
hidden_dim=hidden_dim, output_dim=output_dim, model_type = model_type, batch_size = batch_size,
n_epochs = n_epochs, learning_rate = learning_rate, save_file = './forecastnet.pt')
# Train the model
training_costs, validation_costs = train(fcstnet, train_x, train_y, valid_x, valid_y, restore_session=False)
# Plot the training curves
plt.figure()
plt.plot(training_costs)
plt.plot(validation_costs)
# Evaluate the model
mase, smape, nrmse = evaluate(fcstnet, test_x, test_y, return_lists=False)
print('')
print('MASE:', mase)
print('SMAPE:', smape)
print('NRMSE:', nrmse)
# Generate and plot forecasts for various samples from the test dataset
samples = [0, 10, 20]
# Models with a Gaussian Mixture Density Component output
if model_type == 'dense' or model_type == 'conv':
# Generate a set of n_samples forecasts (Monte Carlo Forecasts)
num_forecasts = 10
y_pred = np.zeros((test_y.shape[0], len(samples), test_y.shape[2], num_forecasts))
mu = np.zeros((test_y.shape[0], len(samples), test_y.shape[2], num_forecasts))
sigma = np.zeros((test_y.shape[0], len(samples), test_y.shape[2], num_forecasts))
for i in range(num_forecasts):
y_pred[:, :, :, i], mu[:, :, :, i], sigma[:, :, :, i] = fcstnet.forecast(test_x[:, samples, :])
s_mean = np.mean(y_pred, axis=3)
s_std = np.std(y_pred, axis=3)
botVarLine = s_mean - s_std
topVarLine = s_mean + s_std
for i in range(len(samples)):
plt.figure()
plt.plot(np.arange(0, input_width), test_x[:, samples[i], 0],
'-o', label='input')
plt.plot(np.arange(input_width, input_width + input_width), test_y[:, samples[i], 0],
'-o', label='data')
plt.plot(np.arange(input_width, input_width + input_width), s_mean[:, i, 0],
'-*', label='forecast')
plt.fill_between(np.arange(input_width, input_width + input_width),
botVarLine[:, i, 0], topVarLine[:, i, 0],
color='gray', alpha=0.3, label='Uncertainty')
plt.legend()
# Models with a linear output
elif model_type == 'dense2' or model_type == 'conv2':
# Generate a forecast
y_pred = fcstnet.forecast(test_x[:,samples,:])
for i in range(len(samples)):
# Plot the forecast
plt.figure()
plt.plot(np.arange(0, fcstnet.in_seq_length),
test_x[:, samples[i], 0],
'o-', label='test_data')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
test_y[:, samples[i], 0],
'o-')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
y_pred[:, i, 0],
'*-', linewidth=0.7, label='mean')
plt.show()
|
[
"evaluate.evaluate"
] |
[((868, 975), 'pandas.read_parquet', 'pd.read_parquet', (['"""/Users/ryadhkhisb/Dev/workspaces/m/finance-scrape/data/nasdaq100_15min.parquet"""'], {}), "(\n '/Users/ryadhkhisb/Dev/workspaces/m/finance-scrape/data/nasdaq100_15min.parquet'\n )\n", (883, 975), True, 'import pandas as pd\n'), ((1486, 1651), 'window_generator.WindowGenerator', 'WindowGenerator', ([], {'input_width': 'input_width', 'label_width': 'OUT_STEPS', 'shift': 'shift', 'label_columns': 'label_columns', 'train_df': 'train_df', 'val_df': 'val_df', 'test_df': 'test_df'}), '(input_width=input_width, label_width=OUT_STEPS, shift=shift,\n label_columns=label_columns, train_df=train_df, val_df=val_df, test_df=\n test_df)\n', (1501, 1651), False, 'from window_generator import WindowGenerator\n'), ((2246, 2271), 'numpy.swapaxes', 'np.swapaxes', (['test_y', '(1)', '(0)'], {}), '(test_y, 1, 0)\n', (2257, 2271), True, 'import numpy as np\n'), ((2281, 2306), 'numpy.swapaxes', 'np.swapaxes', (['test_x', '(1)', '(0)'], {}), '(test_x, 1, 0)\n', (2292, 2306), True, 'import numpy as np\n'), ((2317, 2343), 'numpy.swapaxes', 'np.swapaxes', (['train_y', '(1)', '(0)'], {}), '(train_y, 1, 0)\n', (2328, 2343), True, 'import numpy as np\n'), ((2354, 2380), 'numpy.swapaxes', 'np.swapaxes', (['train_x', '(1)', '(0)'], {}), '(train_x, 1, 0)\n', (2365, 2380), True, 'import numpy as np\n'), ((2391, 2417), 'numpy.swapaxes', 'np.swapaxes', (['valid_y', '(1)', '(0)'], {}), '(valid_y, 1, 0)\n', (2402, 2417), True, 'import numpy as np\n'), ((2428, 2454), 'numpy.swapaxes', 'np.swapaxes', (['valid_x', '(1)', '(0)'], {}), '(valid_x, 1, 0)\n', (2439, 2454), True, 'import numpy as np\n'), ((2694, 2963), 'forecastNet.forecastNet', 'forecastNet', ([], {'in_seq_length': 'input_width', 'out_seq_length': 'input_width', 'input_dim': 'input_dim', 'hidden_dim': 'hidden_dim', 'output_dim': 'output_dim', 'model_type': 'model_type', 'batch_size': 'batch_size', 'n_epochs': 'n_epochs', 'learning_rate': 'learning_rate', 'save_file': '"""./forecastnet.pt"""'}), "(in_seq_length=input_width, out_seq_length=input_width,\n input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim,\n model_type=model_type, batch_size=batch_size, n_epochs=n_epochs,\n learning_rate=learning_rate, save_file='./forecastnet.pt')\n", (2705, 2963), False, 'from forecastNet import forecastNet\n'), ((3056, 3129), 'train.train', 'train', (['fcstnet', 'train_x', 'train_y', 'valid_x', 'valid_y'], {'restore_session': '(False)'}), '(fcstnet, train_x, train_y, valid_x, valid_y, restore_session=False)\n', (3061, 3129), False, 'from train import train\n'), ((3157, 3169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3167, 3169), True, 'import matplotlib.pyplot as plt\n'), ((3170, 3194), 'matplotlib.pyplot.plot', 'plt.plot', (['training_costs'], {}), '(training_costs)\n', (3178, 3194), True, 'import matplotlib.pyplot as plt\n'), ((3195, 3221), 'matplotlib.pyplot.plot', 'plt.plot', (['validation_costs'], {}), '(validation_costs)\n', (3203, 3221), True, 'import matplotlib.pyplot as plt\n'), ((3265, 3318), 'evaluate.evaluate', 'evaluate', (['fcstnet', 'test_x', 'test_y'], {'return_lists': '(False)'}), '(fcstnet, test_x, test_y, return_lists=False)\n', (3273, 3318), False, 'from evaluate import evaluate\n'), ((5612, 5622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5620, 5622), True, 'import matplotlib.pyplot as plt\n'), ((759, 786), 'os.environ.get', 'os.environ.get', (['"""DATA_PATH"""'], {}), "('DATA_PATH')\n", (773, 786), False, 'import os\n'), ((816, 844), 'os.environ.get', 'os.environ.get', (['"""MODEL_PATH"""'], {}), "('MODEL_PATH')\n", (830, 844), False, 'import os\n'), ((1886, 1921), 'numpy.array', 'np.array', (['[x[1] for x in test_data]'], {}), '([x[1] for x in test_data])\n', (1894, 1921), True, 'import numpy as np\n'), ((1947, 1982), 'numpy.array', 'np.array', (['[x[0] for x in test_data]'], {}), '([x[0] for x in test_data])\n', (1955, 1982), True, 'import numpy as np\n'), ((2009, 2045), 'numpy.array', 'np.array', (['[x[1] for x in train_data]'], {}), '([x[1] for x in train_data])\n', (2017, 2045), True, 'import numpy as np\n'), ((2072, 2108), 'numpy.array', 'np.array', (['[x[0] for x in train_data]'], {}), '([x[0] for x in train_data])\n', (2080, 2108), True, 'import numpy as np\n'), ((2135, 2171), 'numpy.array', 'np.array', (['[x[1] for x in valid_data]'], {}), '([x[1] for x in valid_data])\n', (2143, 2171), True, 'import numpy as np\n'), ((2198, 2234), 'numpy.array', 'np.array', (['[x[0] for x in valid_data]'], {}), '([x[0] for x in valid_data])\n', (2206, 2234), True, 'import numpy as np\n'), ((4098, 4121), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(3)'}), '(y_pred, axis=3)\n', (4105, 4121), True, 'import numpy as np\n'), ((4134, 4156), 'numpy.std', 'np.std', (['y_pred'], {'axis': '(3)'}), '(y_pred, axis=3)\n', (4140, 4156), True, 'import numpy as np\n'), ((4264, 4276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4274, 4276), True, 'import matplotlib.pyplot as plt\n'), ((4864, 4876), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4874, 4876), True, 'import matplotlib.pyplot as plt\n'), ((4294, 4319), 'numpy.arange', 'np.arange', (['(0)', 'input_width'], {}), '(0, input_width)\n', (4303, 4319), True, 'import numpy as np\n'), ((4402, 4451), 'numpy.arange', 'np.arange', (['input_width', '(input_width + input_width)'], {}), '(input_width, input_width + input_width)\n', (4411, 4451), True, 'import numpy as np\n'), ((4533, 4582), 'numpy.arange', 'np.arange', (['input_width', '(input_width + input_width)'], {}), '(input_width, input_width + input_width)\n', (4542, 4582), True, 'import numpy as np\n'), ((4667, 4716), 'numpy.arange', 'np.arange', (['input_width', '(input_width + input_width)'], {}), '(input_width, input_width + input_width)\n', (4676, 4716), True, 'import numpy as np\n'), ((5109, 5121), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5119, 5121), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5174), 'numpy.arange', 'np.arange', (['(0)', 'fcstnet.in_seq_length'], {}), '(0, fcstnet.in_seq_length)\n', (5148, 5174), True, 'import numpy as np\n'), ((5278, 5363), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (5287, 5363), True, 'import numpy as np\n'), ((5443, 5528), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (5452, 5528), True, 'import numpy as np\n')]
|
import data as Data
from model import Model
from loss import PairwiseRankingLoss as Loss
from optimizer import Optimizer
import evaluate
from settings import config
# load data
data = Data.Loader()
data.create_dictionaries()
data.save_dictionaries()
# load evaluation data
evaluation_data = Data.Loader()
evaluation_data.load_dictionaries()
# init model
model = Model()
# init loss
loss = Loss()
# init optimizer
optimizer = Optimizer(model)
def evaluate_model():
"""
Evaluate the model and print the Recall@K score.
"""
print("\n[RESULTS]")
text_to_image, image_to_text = [], []
# process in batches
for captions, image_features in evaluation_data:
# pass batch through model
captions, image_features = model(captions, image_features)
# evaluate text to image score
text_to_image.append(evaluate.text_to_image(captions, image_features))
image_to_text.append(evaluate.image_to_text(captions, image_features))
# retreive score
score = evaluate.recall_score(text_to_image, image_to_text)
# save model if better
model.save_if_better(score)
def train_model():
"""
Train the model with parameters from settings.py
"""
# train model
for epoch in range(1, config["num_epochs"]+1):
print("\n[EPOCH]", epoch)
# process in batches
for captions, image_features in data:
# pass batch through model
captions, image_features = model(captions, image_features)
# cost
cost = loss(captions, image_features)
# backprop
optimizer.backprop(cost)
evaluate_model()
if __name__ == '__main__':
train_model()
evaluate_model()
print("\n[DONE] Script complete.")
train_model()
evaluate_model()
print("\n[DONE] Script complete.")
|
[
"evaluate.recall_score",
"evaluate.text_to_image",
"evaluate.image_to_text"
] |
[((185, 198), 'data.Loader', 'Data.Loader', ([], {}), '()\n', (196, 198), True, 'import data as Data\n'), ((293, 306), 'data.Loader', 'Data.Loader', ([], {}), '()\n', (304, 306), True, 'import data as Data\n'), ((365, 372), 'model.Model', 'Model', ([], {}), '()\n', (370, 372), False, 'from model import Model\n'), ((393, 399), 'loss.PairwiseRankingLoss', 'Loss', ([], {}), '()\n', (397, 399), True, 'from loss import PairwiseRankingLoss as Loss\n'), ((430, 446), 'optimizer.Optimizer', 'Optimizer', (['model'], {}), '(model)\n', (439, 446), False, 'from optimizer import Optimizer\n'), ((969, 1020), 'evaluate.recall_score', 'evaluate.recall_score', (['text_to_image', 'image_to_text'], {}), '(text_to_image, image_to_text)\n', (990, 1020), False, 'import evaluate\n'), ((816, 864), 'evaluate.text_to_image', 'evaluate.text_to_image', (['captions', 'image_features'], {}), '(captions, image_features)\n', (838, 864), False, 'import evaluate\n'), ((889, 937), 'evaluate.image_to_text', 'evaluate.image_to_text', (['captions', 'image_features'], {}), '(captions, image_features)\n', (911, 937), False, 'import evaluate\n')]
|
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
import torch.multiprocessing as mp
import torch.distributed as dist
import torch.utils.data.distributed
import argparse
import os
import json
from models.StyleSpeech import StyleSpeech
from models.Discriminators import Discriminator
from dataloader import prepare_dataloader
from optimizer import ScheduledOptim
from evaluate import evaluate
import utils
def load_checkpoint(checkpoint_path, model, discriminator, G_optim, D_optim, rank, distributed=False):
assert os.path.isfile(checkpoint_path)
print("Starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cuda:{}'.format(rank))
if 'model' in checkpoint_dict:
if distributed:
state_dict = {}
for k,v in checkpoint_dict['model'].items():
state_dict['module.{}'.format(k)] = v
model.load_state_dict(state_dict)
else:
model.load_state_dict(checkpoint_dict['model'])
print('Model is loaded!')
if 'discriminator' in checkpoint_dict:
if distributed:
state_dict = {}
for k,v in checkpoint_dict['discriminator'].items():
state_dict['module.{}'.format(k)] = v
discriminator.load_state_dict(state_dict)
else:
discriminator.load_state_dict(checkpoint_dict['discriminator'])
print('Discriminator is loaded!')
if 'G_optim' in checkpoint_dict or 'optimizer' in checkpoint_dict:
if 'optimizer' in checkpoint_dict:
G_optim.load_state_dict(checkpoint_dict['optimizer'])
if 'G_optim' in checkpoint_dict:
G_optim.load_state_dict(checkpoint_dict['G_optim'])
print('G_optim is loaded!')
if 'D_optim' in checkpoint_dict:
D_optim.load_state_dict(checkpoint_dict['D_optim'])
print('D_optim is loaded!')
current_step = checkpoint_dict['step'] + 1
del checkpoint_dict
return model, discriminator, G_optim, D_optim, current_step
def main(rank, args, c):
print('Use GPU: {} for training'.format(rank))
ngpus = args.ngpus
if args.distributed:
torch.cuda.set_device(rank % ngpus)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=rank)
# Define model & loss
model = StyleSpeech(c).cuda()
discriminator = Discriminator(c).cuda()
num_param = utils.get_param_num(model)
D_num_param = utils.get_param_num(discriminator)
if rank==0:
print('Number of Meta-StyleSpeech Parameters:', num_param)
print('Number of Discriminator Parameters:', D_num_param)
with open(os.path.join(args.save_path, "model.txt"), "w") as f_log:
f_log.write(str(model))
f_log.write(str(discriminator))
print("Model Has Been Defined")
model_without_ddp = model
discriminator_without_ddp = discriminator
if args.distributed:
c.meta_batch_size = c.meta_batch_size // ngpus
model = nn.parallel.DistributedDataParallel(model, device_ids=[rank])
model_without_ddp = model.module
discriminator = nn.parallel.DistributedDataParallel(discriminator, device_ids=[rank])
discriminator_without_ddp = discriminator.module
# Optimizer
G_optim = torch.optim.Adam(model.parameters(), betas=c.betas, eps=c.eps)
D_optim = torch.optim.Adam(discriminator.parameters(), lr=2e-4, betas=c.betas, eps=c.eps)
# Loss
Loss = model_without_ddp.get_criterion()
adversarial_loss = discriminator_without_ddp.get_criterion()
print("Optimizer and Loss Function Defined.")
# Get dataset
data_loader = prepare_dataloader(args.data_path, "train.txt", batch_size=c.meta_batch_size, meta_learning=True, seed=rank)
print("Data Loader is Prepared")
# Load checkpoint if exists
if args.checkpoint_path is not None:
assert os.path.exists(args.checkpoint_path)
model, discriminator, G_optim, D_optim, current_step = load_checkpoint(
args.checkpoint_path, model, discriminator, G_optim, D_optim, rank, args.distributed)
print("\n---Model Restored at Step {}---\n".format(current_step))
else:
print("\n---Start New Training---\n")
current_step = 0
if rank == 0:
checkpoint_path = os.path.join(args.save_path, 'ckpt')
os.makedirs(checkpoint_path, exist_ok=True)
# scheduled optimizer
G_optim = ScheduledOptim(G_optim, c.decoder_hidden, c.n_warm_up_step, current_step)
# Init logger
if rank == 0:
log_path = os.path.join(args.save_path, 'log')
logger = SummaryWriter(os.path.join(log_path, 'board'))
with open(os.path.join(log_path, "log.txt"), "a") as f_log:
f_log.write("Dataset :{}\n Number of Parameters: {}\n".format(c.dataset, num_param))
# Init synthesis directory
if rank == 0:
synth_path = os.path.join(args.save_path, 'synth')
os.makedirs(synth_path, exist_ok=True)
model.train()
while current_step < args.max_iter:
# Get Training Loader
for idx, batch in enumerate(data_loader):
if current_step == args.max_iter:
break
losses = {}
#### Generator ####
G_optim.zero_grad()
# Get Support Data
sid, text, mel_target, D, log_D, f0, energy, \
src_len, mel_len, max_src_len, max_mel_len = model_without_ddp.parse_batch(batch)
# Support Forward
mel_output, src_output, style_vector, log_duration_output, f0_output, energy_output, src_mask, mel_mask, _ = model(
text, src_len, mel_target, mel_len, D, f0, energy, max_src_len, max_mel_len)
src_target, _, _ = model_without_ddp.variance_adaptor.length_regulator(src_output, D)
# Reconstruction loss
mel_loss, d_loss, f_loss, e_loss = Loss(mel_output, mel_target,
log_duration_output, log_D, f0_output, f0, energy_output, energy, src_len, mel_len)
losses['G_recon'] = mel_loss
losses['d_loss'] = d_loss
losses['f_loss'] = f_loss
losses['e_loss'] = e_loss
#### META LEARNING ####
# Get query text
B = mel_target.shape[0]
perm_idx = torch.randperm(B)
q_text, q_src_len = text[perm_idx], src_len[perm_idx]
# Generate query speech
q_mel_output, q_src_output, q_log_duration_output, \
_, _, q_src_mask, q_mel_mask, q_mel_len = model_without_ddp.inference(style_vector, q_text, q_src_len)
# Legulate length of query src
q_duration_rounded = torch.clamp(torch.round(torch.exp(q_log_duration_output.detach())-1.), min=0)
q_duration = q_duration_rounded.masked_fill(q_src_mask, 0).long()
q_src, _, _ = model_without_ddp.variance_adaptor.length_regulator(q_src_output, q_duration)
# Adverserial loss
t_val, s_val, _= discriminator(q_mel_output, q_src, None, sid, q_mel_mask)
losses['G_GAN_query_t'] = adversarial_loss(t_val, is_real=True)
losses['G_GAN_query_s'] = adversarial_loss(s_val, is_real=True)
# Total generator loss
alpha = 10.0
G_loss = alpha*losses['G_recon'] + losses['d_loss'] + losses['f_loss'] + losses['e_loss'] + \
losses['G_GAN_query_t'] + losses['G_GAN_query_s']
# Backward loss
G_loss.backward()
# Update weights
G_optim.step_and_update_lr()
#### Discriminator ####
D_optim.zero_grad()
# Real
real_t_pred, real_s_pred, cls_loss = discriminator(
mel_target, src_target.detach(), style_vector.detach(), sid, mask=mel_mask)
# Fake
fake_t_pred, fake_s_pred, _ = discriminator(
q_mel_output.detach(), q_src.detach(), None, sid, mask=q_mel_mask)
losses['D_t_loss'] = adversarial_loss(real_t_pred, is_real=True) + adversarial_loss(fake_t_pred, is_real=False)
losses['D_s_loss'] = adversarial_loss(real_s_pred, is_real=True) + adversarial_loss(fake_s_pred, is_real=False)
losses['cls_loss'] = cls_loss
# Total discriminator Loss
D_loss = losses['D_t_loss'] + losses['D_s_loss'] + losses['cls_loss']
# Backward
D_loss.backward()
# Update weights
D_optim.step()
# Print log
if current_step % args.log_step == 0 and current_step != 0 and rank == 0 :
m_l = losses['G_recon'].item()
d_l = losses['d_loss'].item()
f_l = losses['f_loss'].item()
e_l = losses['e_loss'].item()
g_t_l = losses['G_GAN_query_t'].item()
g_s_l = losses['G_GAN_query_s'].item()
d_t_l = losses['D_t_loss'].item() / 2
d_s_l = losses['D_s_loss'].item() / 2
cls_l = losses['cls_loss'].item()
str1 = "Step [{}/{}]:".format(current_step, args.max_iter)
str2 = "Mel Loss: {:.4f},\n" \
"Duration Loss: {:.4f}, F0 Loss: {:.4f}, Energy Loss: {:.4f}\n" \
"T G Loss: {:.4f}, T D Loss: {:.4f}, S G Loss: {:.4f}, S D Loss: {:.4f} \n" \
"cls_Loss: {:.4f};" \
.format(m_l, d_l, f_l, e_l, g_t_l, d_t_l, g_s_l, d_s_l, cls_l)
print(str1 + "\n" + str2 +"\n")
with open(os.path.join(log_path, "log.txt"), "a") as f_log:
f_log.write(str1 + "\n" + str2 +"\n")
logger.add_scalar('Train/mel_loss', m_l, current_step)
logger.add_scalar('Train/duration_loss', d_l, current_step)
logger.add_scalar('Train/f0_loss', f_l, current_step)
logger.add_scalar('Train/energy_loss', e_l, current_step)
logger.add_scalar('Train/G_t_loss', g_t_l, current_step)
logger.add_scalar('Train/D_t_loss', d_t_l, current_step)
logger.add_scalar('Train/G_s_loss', g_s_l, current_step)
logger.add_scalar('Train/D_s_loss', d_s_l, current_step)
logger.add_scalar('Train/cls_loss', cls_l, current_step)
# Save Checkpoint
if current_step % args.save_step == 0 and current_step != 0 and rank == 0:
torch.save({'model': model_without_ddp.state_dict(),
'discriminator': discriminator_without_ddp.state_dict(),
'G_optim': G_optim.state_dict(),'D_optim': D_optim.state_dict(),
'step': current_step},
os.path.join(checkpoint_path, 'checkpoint_{}.pth.tar'.format(current_step)))
print("*** Save Checkpoint ***")
print("Save model at step {}...\n".format(current_step))
if current_step % args.synth_step == 0 and current_step != 0 and rank == 0:
length = mel_len[0].item()
mel_target = mel_target[0, :length].detach().cpu().transpose(0, 1)
mel = mel_output[0, :length].detach().cpu().transpose(0, 1)
q_length = q_mel_len[0].item()
q_mel = q_mel_output[0, :q_length].detach().cpu().transpose(0, 1)
# plotting
utils.plot_data([q_mel.numpy(), mel.numpy(), mel_target.numpy()],
['Query Spectrogram', 'Recon Spectrogram', 'Ground-Truth Spectrogram'], filename=os.path.join(synth_path, 'step_{}.png'.format(current_step)))
print("Synth audios at step {}...\n".format(current_step))
# Evaluate
if current_step % args.eval_step == 0 and current_step != 0 and rank == 0:
model.eval()
with torch.no_grad():
m_l, d_l, f_l, e_l = evaluate(args, model_without_ddp, current_step)
str_v = "*** Validation ***\n" \
"Meta-StyleSpeech Step {},\n" \
"Mel Loss: {}\nDuration Loss:{}\nF0 Loss: {}\nEnergy Loss: {}" \
.format(current_step, m_l, d_l, f_l, e_l)
print(str_v + "\n" )
with open(os.path.join(log_path, "eval.txt"), "a") as f_log:
f_log.write(str_v + "\n")
logger.add_scalar('Validation/mel_loss', m_l, current_step)
logger.add_scalar('Validation/duration_loss', d_l, current_step)
logger.add_scalar('Validation/f0_loss', f_l, current_step)
logger.add_scalar('Validation/energy_loss', e_l, current_step)
model.train()
current_step += 1
if rank == 0:
print("Training Done at Step : {}".format(current_step))
torch.save({'model': model_without_ddp.state_dict(),
'discriminator': discriminator_without_ddp.state_dict(),
'G_optim': G_optim.state_dict(), 'D_optim': D_optim.state_dict(),
'step': current_step},
os.path.join(checkpoint_path, 'checkpoint_last_{}.pth.tar'.format(current_step)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='dataset/LibriTTS/preprocessed')
parser.add_argument('--save_path', default='exp_meta_stylespeech')
parser.add_argument('--config', default='configs/config.json')
parser.add_argument('--max_iter', default=100000, type=int)
parser.add_argument('--save_step', default=5000, type=int)
parser.add_argument('--synth_step', default=1000, type=int)
parser.add_argument('--eval_step', default=5000, type=int)
parser.add_argument('--log_step', default=100, type=int)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to pretrained model')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:3456', type=str, help='url for setting up distributed training')
parser.add_argument('--world-size', default=-1, type=int, help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int, help='distributed backend')
parser.add_argument('--dist-backend', default='nccl', type=str, help='node rank for distributed training')
args = parser.parse_args()
torch.backends.cudnn.enabled = True
with open(args.config) as f:
data = f.read()
json_config = json.loads(data)
config = utils.AttrDict(json_config)
utils.build_env(args.config, 'config.json', args.save_path)
ngpus = torch.cuda.device_count()
args.ngpus = ngpus
args.distributed = ngpus > 1
if args.distributed:
args.world_size = ngpus
mp.spawn(main, nprocs=ngpus, args=(args, config))
else:
main(0, args, config)
|
[
"evaluate.evaluate"
] |
[((610, 641), 'os.path.isfile', 'os.path.isfile', (['checkpoint_path'], {}), '(checkpoint_path)\n', (624, 641), False, 'import os\n'), ((2609, 2635), 'utils.get_param_num', 'utils.get_param_num', (['model'], {}), '(model)\n', (2628, 2635), False, 'import utils\n'), ((2655, 2689), 'utils.get_param_num', 'utils.get_param_num', (['discriminator'], {}), '(discriminator)\n', (2674, 2689), False, 'import utils\n'), ((3891, 4004), 'dataloader.prepare_dataloader', 'prepare_dataloader', (['args.data_path', '"""train.txt"""'], {'batch_size': 'c.meta_batch_size', 'meta_learning': '(True)', 'seed': 'rank'}), "(args.data_path, 'train.txt', batch_size=c.\n meta_batch_size, meta_learning=True, seed=rank)\n", (3909, 4004), False, 'from dataloader import prepare_dataloader\n'), ((4700, 4773), 'optimizer.ScheduledOptim', 'ScheduledOptim', (['G_optim', 'c.decoder_hidden', 'c.n_warm_up_step', 'current_step'], {}), '(G_optim, c.decoder_hidden, c.n_warm_up_step, current_step)\n', (4714, 4773), False, 'from optimizer import ScheduledOptim\n'), ((13965, 13990), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13988, 13990), False, 'import argparse\n'), ((15231, 15247), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (15241, 15247), False, 'import json\n'), ((15262, 15289), 'utils.AttrDict', 'utils.AttrDict', (['json_config'], {}), '(json_config)\n', (15276, 15289), False, 'import utils\n'), ((15295, 15354), 'utils.build_env', 'utils.build_env', (['args.config', '"""config.json"""', 'args.save_path'], {}), "(args.config, 'config.json', args.save_path)\n", (15310, 15354), False, 'import utils\n'), ((15370, 15395), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (15393, 15395), False, 'import torch\n'), ((2321, 2356), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(rank % ngpus)'], {}), '(rank % ngpus)\n', (2342, 2356), False, 'import torch\n'), ((2366, 2487), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': 'args.dist_backend', 'init_method': 'args.dist_url', 'world_size': 'args.world_size', 'rank': 'rank'}), '(backend=args.dist_backend, init_method=args.\n dist_url, world_size=args.world_size, rank=rank)\n', (2389, 2487), True, 'import torch.distributed as dist\n'), ((3227, 3288), 'torch.nn.parallel.DistributedDataParallel', 'nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[rank]'}), '(model, device_ids=[rank])\n', (3262, 3288), True, 'import torch.nn as nn\n'), ((3356, 3425), 'torch.nn.parallel.DistributedDataParallel', 'nn.parallel.DistributedDataParallel', (['discriminator'], {'device_ids': '[rank]'}), '(discriminator, device_ids=[rank])\n', (3391, 3425), True, 'import torch.nn as nn\n'), ((4132, 4168), 'os.path.exists', 'os.path.exists', (['args.checkpoint_path'], {}), '(args.checkpoint_path)\n', (4146, 4168), False, 'import os\n'), ((4562, 4598), 'os.path.join', 'os.path.join', (['args.save_path', '"""ckpt"""'], {}), "(args.save_path, 'ckpt')\n", (4574, 4598), False, 'import os\n'), ((4608, 4651), 'os.makedirs', 'os.makedirs', (['checkpoint_path'], {'exist_ok': '(True)'}), '(checkpoint_path, exist_ok=True)\n', (4619, 4651), False, 'import os\n'), ((4838, 4873), 'os.path.join', 'os.path.join', (['args.save_path', '"""log"""'], {}), "(args.save_path, 'log')\n", (4850, 4873), False, 'import os\n'), ((5181, 5218), 'os.path.join', 'os.path.join', (['args.save_path', '"""synth"""'], {}), "(args.save_path, 'synth')\n", (5193, 5218), False, 'import os\n'), ((5228, 5266), 'os.makedirs', 'os.makedirs', (['synth_path'], {'exist_ok': '(True)'}), '(synth_path, exist_ok=True)\n', (5239, 5266), False, 'import os\n'), ((15524, 15573), 'torch.multiprocessing.spawn', 'mp.spawn', (['main'], {'nprocs': 'ngpus', 'args': '(args, config)'}), '(main, nprocs=ngpus, args=(args, config))\n', (15532, 15573), True, 'import torch.multiprocessing as mp\n'), ((2525, 2539), 'models.StyleSpeech.StyleSpeech', 'StyleSpeech', (['c'], {}), '(c)\n', (2536, 2539), False, 'from models.StyleSpeech import StyleSpeech\n'), ((2568, 2584), 'models.Discriminators.Discriminator', 'Discriminator', (['c'], {}), '(c)\n', (2581, 2584), False, 'from models.Discriminators import Discriminator\n'), ((4906, 4937), 'os.path.join', 'os.path.join', (['log_path', '"""board"""'], {}), "(log_path, 'board')\n", (4918, 4937), False, 'import os\n'), ((6668, 6685), 'torch.randperm', 'torch.randperm', (['B'], {}), '(B)\n', (6682, 6685), False, 'import torch\n'), ((2863, 2904), 'os.path.join', 'os.path.join', (['args.save_path', '"""model.txt"""'], {}), "(args.save_path, 'model.txt')\n", (2875, 2904), False, 'import os\n'), ((4958, 4991), 'os.path.join', 'os.path.join', (['log_path', '"""log.txt"""'], {}), "(log_path, 'log.txt')\n", (4970, 4991), False, 'import os\n'), ((12493, 12508), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12506, 12508), False, 'import torch\n'), ((12552, 12599), 'evaluate.evaluate', 'evaluate', (['args', 'model_without_ddp', 'current_step'], {}), '(args, model_without_ddp, current_step)\n', (12560, 12599), False, 'from evaluate import evaluate\n'), ((10077, 10110), 'os.path.join', 'os.path.join', (['log_path', '"""log.txt"""'], {}), "(log_path, 'log.txt')\n", (10089, 10110), False, 'import os\n'), ((12953, 12987), 'os.path.join', 'os.path.join', (['log_path', '"""eval.txt"""'], {}), "(log_path, 'eval.txt')\n", (12965, 12987), False, 'import os\n')]
|
from pathlib import Path
import sys
sys.path.append(str(Path().absolute()))
import logging
log_level = "INFO"
logging.basicConfig(
filename=str(snakemake.log),
filemode="w",
level=log_level,
format="[%(asctime)s]:%(levelname)s: %(message)s",
datefmt="%d/%m/%Y %I:%M:%S %p",
)
import pysam
from evaluate.masker import RecallMasker
from evaluate.classifier import RecallClassifier
from evaluate.reporter import RecallReporter
# setup
mask_filepath = snakemake.input.mask
sams_filepath = snakemake.input.sams
sample_id = snakemake.wildcards.sample_id
variant_call_recall_reports = snakemake.output.reports
gt_conf_percentiles = snakemake.params.gt_conf_percentiles
# API usage
logging.info(f"Creating masker from {mask_filepath}")
with open(mask_filepath) as bed:
masker = RecallMasker.from_bed(bed)
for sam_filepath, variant_call_recall_report, gt_conf_percentile in zip(sams_filepath, variant_call_recall_reports, gt_conf_percentiles):
logging.info(f"Masking SAM records")
with pysam.AlignmentFile(sam_filepath) as sam:
records = masker.filter_records(sam)
logging.info("Creating classifier")
classifier = RecallClassifier(sam=records, name=sample_id)
logging.info("Creating reporter")
reporter = RecallReporter(classifiers=[classifier])
logging.info("Generating report")
# TODO: we are passing gt_conf_percentile (values in [0, 100, 1]) as gt_conf
# TODO: fix this? It does not really matter as we use step gt (which is gt_conf_percentile) anyway later
report = reporter.generate_report(gt_conf_percentile)
# output
logging.info("Saving report")
with open(variant_call_recall_report, "w") as output:
reporter.save_report(report, output)
logging.info("Done")
|
[
"evaluate.classifier.RecallClassifier",
"evaluate.masker.RecallMasker.from_bed",
"evaluate.reporter.RecallReporter"
] |
[((701, 754), 'logging.info', 'logging.info', (['f"""Creating masker from {mask_filepath}"""'], {}), "(f'Creating masker from {mask_filepath}')\n", (713, 754), False, 'import logging\n'), ((1744, 1764), 'logging.info', 'logging.info', (['"""Done"""'], {}), "('Done')\n", (1756, 1764), False, 'import logging\n'), ((801, 827), 'evaluate.masker.RecallMasker.from_bed', 'RecallMasker.from_bed', (['bed'], {}), '(bed)\n', (822, 827), False, 'from evaluate.masker import RecallMasker\n'), ((971, 1007), 'logging.info', 'logging.info', (['f"""Masking SAM records"""'], {}), "(f'Masking SAM records')\n", (983, 1007), False, 'import logging\n'), ((1109, 1144), 'logging.info', 'logging.info', (['"""Creating classifier"""'], {}), "('Creating classifier')\n", (1121, 1144), False, 'import logging\n'), ((1162, 1207), 'evaluate.classifier.RecallClassifier', 'RecallClassifier', ([], {'sam': 'records', 'name': 'sample_id'}), '(sam=records, name=sample_id)\n', (1178, 1207), False, 'from evaluate.classifier import RecallClassifier\n'), ((1213, 1246), 'logging.info', 'logging.info', (['"""Creating reporter"""'], {}), "('Creating reporter')\n", (1225, 1246), False, 'import logging\n'), ((1262, 1302), 'evaluate.reporter.RecallReporter', 'RecallReporter', ([], {'classifiers': '[classifier]'}), '(classifiers=[classifier])\n', (1276, 1302), False, 'from evaluate.reporter import RecallReporter\n'), ((1308, 1341), 'logging.info', 'logging.info', (['"""Generating report"""'], {}), "('Generating report')\n", (1320, 1341), False, 'import logging\n'), ((1610, 1639), 'logging.info', 'logging.info', (['"""Saving report"""'], {}), "('Saving report')\n", (1622, 1639), False, 'import logging\n'), ((1017, 1050), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['sam_filepath'], {}), '(sam_filepath)\n', (1036, 1050), False, 'import pysam\n'), ((56, 62), 'pathlib.Path', 'Path', ([], {}), '()\n', (60, 62), False, 'from pathlib import Path\n')]
|
import torch
import torch.nn as nn
from helper import *
from lstm import LSTM
from evaluate import evaluate
def train(model, model_optimizer, inp, target):
model.zero_grad()
loss = 0
for c in range(chunk_len):
output = model(inp[c])
loss += criterion(output, target[c].unsqueeze(0))
loss.backward()
model_optimizer.step()
return loss.data.item() / chunk_len
if __name__ == '__main__':
current_file, n_characters = import_and_sanitize("../../data/shakespeare.txt")
input_dim = output_dim = n_characters
n_epochs = 2000
print_every = 100
plot_every = 10
hidden_dim = 100
n_layers = 1
lr = 0.005
chunk_len = 200
model_lstm = LSTM(input_dim, hidden_dim, n_characters, n_characters)
model = model_lstm # choose a model
model_optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
start = time.time()
all_losses = []
loss_avg = 0
for epoch in range(1, n_epochs + 1):
(inp, target) = random_training_set(current_file, chunk_len)
loss = train(model, model_optimizer, inp, target)
loss_avg += loss
if epoch % print_every == 0:
print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch, epoch / n_epochs * 100, loss))
print(evaluate(model, 'Wh', 100), '\n')
if epoch % plot_every == 0:
all_losses.append(loss_avg / plot_every)
loss_avg = 0
|
[
"evaluate.evaluate"
] |
[((711, 766), 'lstm.LSTM', 'LSTM', (['input_dim', 'hidden_dim', 'n_characters', 'n_characters'], {}), '(input_dim, hidden_dim, n_characters, n_characters)\n', (715, 766), False, 'from lstm import LSTM\n'), ((889, 910), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (908, 910), True, 'import torch.nn as nn\n'), ((1330, 1356), 'evaluate.evaluate', 'evaluate', (['model', '"""Wh"""', '(100)'], {}), "(model, 'Wh', 100)\n", (1338, 1356), False, 'from evaluate import evaluate\n')]
|
"""
This script demonstrates initialisation, training, evaluation, and forecasting of ForecastNet. The dataset used for the
time-invariance test in section 6.1 of the ForecastNet paper is used for this demonstration.
Paper:
"ForecastNet: A Time-Variant Deep Feed-Forward Neural Network Architecture for Multi-Step-Ahead Time-Series Forecasting"
by <NAME>, <NAME>, and <NAME>
Link to the paper: https://arxiv.org/abs/2002.04155
"""
import numpy as np
import matplotlib.pyplot as plt
from forecastNet import forecastNet
from train import train
from evaluate import evaluate
from dataHelpers import generate_data, process_data
import pandas as pd
#Use a fixed seed for repreducible results
np.random.seed(1)
# Generate the dataset
# train_x, train_y, test_x, test_y, valid_x, valid_y, period = generate_data(T=2750, period = 50, n_seqs = 4)
# df = pd.read_csv('/Users/ryadhkhisb/Dev/workspaces/m/finance-scrape/LSTNet/data/aapl_15min.csv')
# df = df[[c for c in df.columns if 'aapl_15min' in c]]
df = pd.read_parquet('/Users/ryadhkhisb/Dev/workspaces/m/finance-scrape/data/nasdaq100_15min.parquet')
# df = pd.DataFrame(
# np.array([
# np.arange(100), np.arange(100), np.arange(100),
# np.arange(100).astype(np.float32) / 100,
# np.arange(100).astype(np.float32) / 100]).transpose(),
# columns=['c1', 'c2', 'c3', 'open', 'close'])
# df=(df-df.mean())/df.std()
in_seq_length = 8
out_seq_length = 8
shift = 2
train_mean = df.mean()
train_std = df.std()
df = (df - train_mean) / train_std
train_x, train_y, test_x, test_y, valid_x, valid_y = process_data(df.to_numpy(),
T_in_seq=in_seq_length,
T_out_seq = out_seq_length,
shift=shift)
# train_mean = train_x.mean()
# train_std = train_x.std()
#
# train_x = (train_x - train_mean) / train_std
# valid_x = (valid_x - train_mean) / train_std
# test_x = (test_x - train_mean) / train_std
# train_size = int(len(df) * 0.66)
# df_train, df_test = df[0:train_size], df[train_size:len(df)]
#
# train_size = int(len(df_train) * 0.90)
# df_train, df_val = df_train[0:train_size], df_train[train_size:len(df_train)]
# def from_df(df):
# return df.to_numpy()[np.newaxis, :] , df.iloc[:,-3:-2].to_numpy()[np.newaxis, :]
#
# train_x, train_y = from_df(df_train)
# valid_x, valid_y = from_df(df_val)
# test_x, test_y = from_df(df_test)
# Model parameters
model_type = 'conv2' #'dense' or 'conv', 'dense2' or 'conv2'
hidden_dim = 24
input_dim = train_x.shape[-1]
output_dim = train_x.shape[-1]
learning_rate = 0.0001
n_epochs=20
batch_size = 32
# Initialise model
fcstnet = forecastNet(in_seq_length=in_seq_length, out_seq_length=out_seq_length, input_dim=input_dim,
hidden_dim=hidden_dim, output_dim=output_dim, model_type = model_type, batch_size = batch_size,
n_epochs = n_epochs, learning_rate = learning_rate, save_file = './forecastnet.pt')
# Train the model
training_costs, validation_costs = train(fcstnet, train_x, train_y, valid_x, valid_y, restore_session=False)
# Plot the training curves
plt.figure()
plt.plot(training_costs)
plt.plot(validation_costs)
# Evaluate the model
mase, smape, nrmse = evaluate(fcstnet, test_x, test_y, return_lists=False)
print('')
print('MASE:', mase)
print('SMAPE:', smape)
print('NRMSE:', nrmse)
# Generate and plot forecasts for various samples from the test dataset
samples = [0, 10, 20]
# Models with a Gaussian Mixture Density Component output
if model_type == 'dense' or model_type == 'conv':
# Generate a set of n_samples forecasts (Monte Carlo Forecasts)
num_forecasts = 10
y_pred = np.zeros((test_y.shape[0], len(samples), test_y.shape[2], num_forecasts))
mu = np.zeros((test_y.shape[0], len(samples), test_y.shape[2], num_forecasts))
sigma = np.zeros((test_y.shape[0], len(samples), test_y.shape[2], num_forecasts))
for i in range(num_forecasts):
y_pred[:, :, :, i], mu[:, :, :, i], sigma[:, :, :, i] = fcstnet.forecast(test_x[:, samples, :])
s_mean = np.mean(y_pred, axis=3)
s_std = np.std(y_pred, axis=3)
botVarLine = s_mean - s_std
topVarLine = s_mean + s_std
for i in range(len(samples)):
plt.figure()
plt.plot(np.arange(0, in_seq_length), test_x[:, samples[i], 0],
'-o', label='input')
plt.plot(np.arange(in_seq_length, in_seq_length + out_seq_length), test_y[:, samples[i], 0],
'-o', label='data')
plt.plot(np.arange(in_seq_length, in_seq_length + out_seq_length), s_mean[:, i, 0],
'-*', label='forecast')
plt.fill_between(np.arange(in_seq_length, in_seq_length + out_seq_length),
botVarLine[:, i, 0], topVarLine[:, i, 0],
color='gray', alpha=0.3, label='Uncertainty')
plt.legend()
# Models with a linear output
elif model_type == 'dense2' or model_type == 'conv2':
# Generate a forecast
y_pred = fcstnet.forecast(test_x[:,samples,:])
for i in range(len(samples)):
# Plot the forecast
plt.figure()
plt.plot(np.arange(0, fcstnet.in_seq_length),
test_x[:, samples[i], 0],
'o-', label='test_data')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
test_y[:, samples[i], 0],
'o-')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
y_pred[:, i, 0],
'*-', linewidth=0.7, label='mean')
plt.show()
|
[
"evaluate.evaluate"
] |
[((690, 707), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (704, 707), True, 'import numpy as np\n'), ((1003, 1110), 'pandas.read_parquet', 'pd.read_parquet', (['"""/Users/ryadhkhisb/Dev/workspaces/m/finance-scrape/data/nasdaq100_15min.parquet"""'], {}), "(\n '/Users/ryadhkhisb/Dev/workspaces/m/finance-scrape/data/nasdaq100_15min.parquet'\n )\n", (1018, 1110), True, 'import pandas as pd\n'), ((2749, 3023), 'forecastNet.forecastNet', 'forecastNet', ([], {'in_seq_length': 'in_seq_length', 'out_seq_length': 'out_seq_length', 'input_dim': 'input_dim', 'hidden_dim': 'hidden_dim', 'output_dim': 'output_dim', 'model_type': 'model_type', 'batch_size': 'batch_size', 'n_epochs': 'n_epochs', 'learning_rate': 'learning_rate', 'save_file': '"""./forecastnet.pt"""'}), "(in_seq_length=in_seq_length, out_seq_length=out_seq_length,\n input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim,\n model_type=model_type, batch_size=batch_size, n_epochs=n_epochs,\n learning_rate=learning_rate, save_file='./forecastnet.pt')\n", (2760, 3023), False, 'from forecastNet import forecastNet\n'), ((3124, 3197), 'train.train', 'train', (['fcstnet', 'train_x', 'train_y', 'valid_x', 'valid_y'], {'restore_session': '(False)'}), '(fcstnet, train_x, train_y, valid_x, valid_y, restore_session=False)\n', (3129, 3197), False, 'from train import train\n'), ((3225, 3237), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3235, 3237), True, 'import matplotlib.pyplot as plt\n'), ((3238, 3262), 'matplotlib.pyplot.plot', 'plt.plot', (['training_costs'], {}), '(training_costs)\n', (3246, 3262), True, 'import matplotlib.pyplot as plt\n'), ((3263, 3289), 'matplotlib.pyplot.plot', 'plt.plot', (['validation_costs'], {}), '(validation_costs)\n', (3271, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3386), 'evaluate.evaluate', 'evaluate', (['fcstnet', 'test_x', 'test_y'], {'return_lists': '(False)'}), '(fcstnet, test_x, test_y, return_lists=False)\n', (3341, 3386), False, 'from evaluate import evaluate\n'), ((5703, 5713), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5711, 5713), True, 'import matplotlib.pyplot as plt\n'), ((4166, 4189), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(3)'}), '(y_pred, axis=3)\n', (4173, 4189), True, 'import numpy as np\n'), ((4202, 4224), 'numpy.std', 'np.std', (['y_pred'], {'axis': '(3)'}), '(y_pred, axis=3)\n', (4208, 4224), True, 'import numpy as np\n'), ((4332, 4344), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4342, 4344), True, 'import matplotlib.pyplot as plt\n'), ((4955, 4967), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4965, 4967), True, 'import matplotlib.pyplot as plt\n'), ((4362, 4389), 'numpy.arange', 'np.arange', (['(0)', 'in_seq_length'], {}), '(0, in_seq_length)\n', (4371, 4389), True, 'import numpy as np\n'), ((4472, 4528), 'numpy.arange', 'np.arange', (['in_seq_length', '(in_seq_length + out_seq_length)'], {}), '(in_seq_length, in_seq_length + out_seq_length)\n', (4481, 4528), True, 'import numpy as np\n'), ((4610, 4666), 'numpy.arange', 'np.arange', (['in_seq_length', '(in_seq_length + out_seq_length)'], {}), '(in_seq_length, in_seq_length + out_seq_length)\n', (4619, 4666), True, 'import numpy as np\n'), ((4751, 4807), 'numpy.arange', 'np.arange', (['in_seq_length', '(in_seq_length + out_seq_length)'], {}), '(in_seq_length, in_seq_length + out_seq_length)\n', (4760, 4807), True, 'import numpy as np\n'), ((5200, 5212), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5210, 5212), True, 'import matplotlib.pyplot as plt\n'), ((5230, 5265), 'numpy.arange', 'np.arange', (['(0)', 'fcstnet.in_seq_length'], {}), '(0, fcstnet.in_seq_length)\n', (5239, 5265), True, 'import numpy as np\n'), ((5369, 5454), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (5378, 5454), True, 'import numpy as np\n'), ((5534, 5619), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (5543, 5619), True, 'import numpy as np\n')]
|
from typing import Dict, List, Any
import chess
import sys
import time
from evaluate import evaluate_board, move_value, check_end_game
debug_info: Dict[str, Any] = {}
MATE_SCORE = 1000000000
MATE_THRESHOLD = 999000000
def next_move(depth: int, board: chess.Board, debug=True) -> chess.Move:
debug_info.clear()
debug_info["nodes"] = 0
t0 = time.time()
move = minimax_root(depth, board)
debug_info["time"] = time.time() - t0
if debug == True:
print(f"info {debug_info}")
return move
def get_ordered_moves(board: chess.Board) -> List[chess.Move]:
end_game = check_end_game(board)
def orderer(move):
return move_value(board, move, end_game)
in_order = sorted(
board.legal_moves, key=orderer, reverse=(board.turn == chess.WHITE)
)
return list(in_order)
def minimax_root(depth: int, board: chess.Board) -> chess.Move:
maximize = board.turn == chess.WHITE
best_move = -float("inf")
if not maximize:
best_move = float("inf")
moves = get_ordered_moves(board)
best_move_found = moves[0]
for move in moves:
board.push(move)
if board.can_claim_draw():
value = 0.0
else:
value = minimax(depth - 1, board, -float("inf"), float("inf"), not maximize)
board.pop()
if maximize and value >= best_move:
best_move = value
best_move_found = move
elif not maximize and value <= best_move:
best_move = value
best_move_found = move
return best_move_found
def minimax(
depth: int,
board: chess.Board,
alpha: float,
beta: float,
is_maximising_player: bool,
) -> float:
debug_info["nodes"] += 1
if board.is_checkmate():
return -MATE_SCORE if is_maximising_player else MATE_SCORE
elif board.is_game_over():
return 0
if depth == 0:
return evaluate_board(board)
if is_maximising_player:
best_move = -float("inf")
moves = get_ordered_moves(board)
for move in moves:
board.push(move)
curr_move = minimax(depth - 1, board, alpha, beta, not is_maximising_player)
if curr_move > MATE_THRESHOLD:
curr_move -= 1
elif curr_move < -MATE_THRESHOLD:
curr_move += 1
best_move = max(
best_move,
curr_move,
)
board.pop()
alpha = max(alpha, best_move)
if beta <= alpha:
return best_move
return best_move
else:
best_move = float("inf")
moves = get_ordered_moves(board)
for move in moves:
board.push(move)
curr_move = minimax(depth - 1, board, alpha, beta, not is_maximising_player)
if curr_move > MATE_THRESHOLD:
curr_move -= 1
elif curr_move < -MATE_THRESHOLD:
curr_move += 1
best_move = min(
best_move,
curr_move,
)
board.pop()
beta = min(beta, best_move)
if beta <= alpha:
return best_move
return best_move
|
[
"evaluate.evaluate_board",
"evaluate.move_value",
"evaluate.check_end_game"
] |
[((359, 370), 'time.time', 'time.time', ([], {}), '()\n', (368, 370), False, 'import time\n'), ((606, 627), 'evaluate.check_end_game', 'check_end_game', (['board'], {}), '(board)\n', (620, 627), False, 'from evaluate import evaluate_board, move_value, check_end_game\n'), ((436, 447), 'time.time', 'time.time', ([], {}), '()\n', (445, 447), False, 'import time\n'), ((667, 700), 'evaluate.move_value', 'move_value', (['board', 'move', 'end_game'], {}), '(board, move, end_game)\n', (677, 700), False, 'from evaluate import evaluate_board, move_value, check_end_game\n'), ((1917, 1938), 'evaluate.evaluate_board', 'evaluate_board', (['board'], {}), '(board)\n', (1931, 1938), False, 'from evaluate import evaluate_board, move_value, check_end_game\n')]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import paddle
from data import ATISDataset
from paddle.io import DataLoader
from model import JointModel
from tqdm import tqdm
import paddle.nn.functional as F
from evaluate import evaluate
def collate_fn(batch, token_pad_val=0, tag_pad_val=0):
token_list, tag_list, intent_list, len_list = [], [], [], []
for tokens, tags, intent, len_ in batch:
assert len(tokens) == len(tags)
token_list.append(tokens.tolist())
tag_list.append(tags.tolist())
intent_list.append(intent)
len_list.append(len_)
# padding sequence
max_len = max(map(len, token_list))
for i in range(len(token_list)):
token_list[i] = token_list[i] + [token_pad_val] * (max_len-len(token_list[i]))
tag_list[i] = tag_list[i] + [tag_pad_val] * (max_len - len(tag_list[i]))
return paddle.to_tensor(token_list), paddle.to_tensor(tag_list), paddle.to_tensor(intent_list), paddle.to_tensor(len_list)
def train():
# configuring model training
with open("config.yaml", "r", encoding="utf-8") as f:
args = yaml.load(f.read())
train_set = ATISDataset(args["train_path"], args["vocab_path"], args["intent_path"], args["slot_path"])
test_set = ATISDataset(args["test_path"], args["vocab_path"], args["intent_path"], args["slot_path"])
print("train:",len(train_set))
print("test:", len(test_set))
args["vocab_size"] = train_set.vocab_size
args["num_intents"] = train_set.num_intents
args["num_slots"] = train_set.num_slots
train_loader = DataLoader(train_set, batch_size=args["batch_size"], shuffle=True, drop_last=True, collate_fn=collate_fn)
test_loader = DataLoader(test_set, batch_size=args["batch_size"], shuffle=False, drop_last=False, collate_fn=collate_fn)
jointModel = JointModel(args["vocab_size"], args["embedding_size"], args["lstm_hidden_size"], args["num_intents"], args["num_slots"], num_layers=args["lstm_layers"], drop_p=args["dropout_rate"])
use_gpu = True if paddle.get_device().startswith("gpu") else False
if use_gpu:
paddle.set_device('gpu:0')
optimizer = paddle.optimizer.Adam(learning_rate=args["learning_rate"], beta1=0.9, beta2=0.99, parameters=jointModel.parameters())
jointModel.train()
# training and evaluating model
for epoch in range(1, args["num_epochs"]+1):
for step, batch in enumerate(train_loader()):
batch_tokens, batch_tags, batch_intents, batch_lens = batch
emissions, intent_logits = jointModel(batch_tokens, batch_lens)
# compute slot prediction loss
slot_loss = jointModel.get_slot_loss(emissions, batch_lens, batch_tags)
# compute intent prediction loss
intent_loss = jointModel.get_intent_loss(intent_logits, batch_intents)
# sum slot_loss and intent_loss
loss = slot_loss + intent_loss
loss.backward()
optimizer.step()
optimizer.clear_gradients()
if step!=0 and step % args["log_steps"] == 0:
print("Epoch: %d, step: %d, total loss: %.4f, intent_loss: %.4f, slot_loss:%.4f" % (epoch, step, loss, intent_loss, slot_loss))
if step!=0 and step % args["eval_steps"] == 0:
evaluate(jointModel, test_set, args)
jointModel.train()
if (args["save_epochs"] != -1 and epoch % args["save_epochs"] == 0) or epoch == args["num_epochs"]:
if not os.path.exists(args["save_dir"]):
os.makedirs(args["save_dir"])
save_model_path = os.path.join(args["save_dir"], "jointModel_e{}.pdparams".format(epoch))
paddle.save(jointModel.state_dict(), save_model_path)
# save training args
save_args_path = os.path.join(args["save_dir"], "args.pdparams")
paddle.save(args, save_args_path)
if __name__=="__main__":
train()
|
[
"evaluate.evaluate"
] |
[((1737, 1832), 'data.ATISDataset', 'ATISDataset', (["args['train_path']", "args['vocab_path']", "args['intent_path']", "args['slot_path']"], {}), "(args['train_path'], args['vocab_path'], args['intent_path'],\n args['slot_path'])\n", (1748, 1832), False, 'from data import ATISDataset\n'), ((1844, 1938), 'data.ATISDataset', 'ATISDataset', (["args['test_path']", "args['vocab_path']", "args['intent_path']", "args['slot_path']"], {}), "(args['test_path'], args['vocab_path'], args['intent_path'],\n args['slot_path'])\n", (1855, 1938), False, 'from data import ATISDataset\n'), ((2163, 2272), 'paddle.io.DataLoader', 'DataLoader', (['train_set'], {'batch_size': "args['batch_size']", 'shuffle': '(True)', 'drop_last': '(True)', 'collate_fn': 'collate_fn'}), "(train_set, batch_size=args['batch_size'], shuffle=True,\n drop_last=True, collate_fn=collate_fn)\n", (2173, 2272), False, 'from paddle.io import DataLoader\n'), ((2288, 2398), 'paddle.io.DataLoader', 'DataLoader', (['test_set'], {'batch_size': "args['batch_size']", 'shuffle': '(False)', 'drop_last': '(False)', 'collate_fn': 'collate_fn'}), "(test_set, batch_size=args['batch_size'], shuffle=False,\n drop_last=False, collate_fn=collate_fn)\n", (2298, 2398), False, 'from paddle.io import DataLoader\n'), ((2413, 2604), 'model.JointModel', 'JointModel', (["args['vocab_size']", "args['embedding_size']", "args['lstm_hidden_size']", "args['num_intents']", "args['num_slots']"], {'num_layers': "args['lstm_layers']", 'drop_p': "args['dropout_rate']"}), "(args['vocab_size'], args['embedding_size'], args[\n 'lstm_hidden_size'], args['num_intents'], args['num_slots'], num_layers\n =args['lstm_layers'], drop_p=args['dropout_rate'])\n", (2423, 2604), False, 'from model import JointModel\n'), ((4425, 4472), 'os.path.join', 'os.path.join', (["args['save_dir']", '"""args.pdparams"""'], {}), "(args['save_dir'], 'args.pdparams')\n", (4437, 4472), False, 'import os\n'), ((4477, 4510), 'paddle.save', 'paddle.save', (['args', 'save_args_path'], {}), '(args, save_args_path)\n', (4488, 4510), False, 'import paddle\n'), ((1461, 1489), 'paddle.to_tensor', 'paddle.to_tensor', (['token_list'], {}), '(token_list)\n', (1477, 1489), False, 'import paddle\n'), ((1491, 1517), 'paddle.to_tensor', 'paddle.to_tensor', (['tag_list'], {}), '(tag_list)\n', (1507, 1517), False, 'import paddle\n'), ((1519, 1548), 'paddle.to_tensor', 'paddle.to_tensor', (['intent_list'], {}), '(intent_list)\n', (1535, 1548), False, 'import paddle\n'), ((1550, 1576), 'paddle.to_tensor', 'paddle.to_tensor', (['len_list'], {}), '(len_list)\n', (1566, 1576), False, 'import paddle\n'), ((2691, 2717), 'paddle.set_device', 'paddle.set_device', (['"""gpu:0"""'], {}), "('gpu:0')\n", (2708, 2717), False, 'import paddle\n'), ((2618, 2637), 'paddle.get_device', 'paddle.get_device', ([], {}), '()\n', (2635, 2637), False, 'import paddle\n'), ((3913, 3949), 'evaluate.evaluate', 'evaluate', (['jointModel', 'test_set', 'args'], {}), '(jointModel, test_set, args)\n', (3921, 3949), False, 'from evaluate import evaluate\n'), ((4118, 4150), 'os.path.exists', 'os.path.exists', (["args['save_dir']"], {}), "(args['save_dir'])\n", (4132, 4150), False, 'import os\n'), ((4168, 4197), 'os.makedirs', 'os.makedirs', (["args['save_dir']"], {}), "(args['save_dir'])\n", (4179, 4197), False, 'import os\n')]
|
from evaluate import evaluate_model
from models import *
from train import train_model
from visualize import visualize_model_performance
num_epochs = 50
model0 = model_0()
model1 = model_1()
model2 = model_2()
model3 = model_3()
baseline_model_path = train_model(model0, str(model0.name) + "_p3_baseline_balanced_weights", num_epochs,
class_weights=[1, 0, 1])
prelu_model_path = train_model(model1, str(model1.name) + "_p3_prelu_balanced_weights", num_epochs,
class_weights=[1, 0, 1])
elu_model_path = train_model(model2, str(model2.name) + "_p3_elu_balanced_weights", num_epochs, class_weights=[1, 0, 1])
relu_model_path = train_model(model3, str(model3.name) + "_p3_relu_balanced_weights", num_epochs,
class_weights=[1, 0, 1])
evaluate_model(baseline_model_path, class_weights=[1, 0, 1])
evaluate_model(prelu_model_path, class_weights=[1, 0, 1])
evaluate_model(elu_model_path, class_weights=[1, 0, 1])
evaluate_model(relu_model_path, class_weights=[1, 0, 1])
visualize_model_performance(baseline_model_path, class_weights=[1, 0, 1])
visualize_model_performance(prelu_model_path, class_weights=[1, 0, 1])
visualize_model_performance(elu_model_path, class_weights=[1, 0, 1])
visualize_model_performance(relu_model_path, class_weights=[1, 0, 1])
|
[
"evaluate.evaluate_model"
] |
[((828, 888), 'evaluate.evaluate_model', 'evaluate_model', (['baseline_model_path'], {'class_weights': '[1, 0, 1]'}), '(baseline_model_path, class_weights=[1, 0, 1])\n', (842, 888), False, 'from evaluate import evaluate_model\n'), ((889, 946), 'evaluate.evaluate_model', 'evaluate_model', (['prelu_model_path'], {'class_weights': '[1, 0, 1]'}), '(prelu_model_path, class_weights=[1, 0, 1])\n', (903, 946), False, 'from evaluate import evaluate_model\n'), ((947, 1002), 'evaluate.evaluate_model', 'evaluate_model', (['elu_model_path'], {'class_weights': '[1, 0, 1]'}), '(elu_model_path, class_weights=[1, 0, 1])\n', (961, 1002), False, 'from evaluate import evaluate_model\n'), ((1003, 1059), 'evaluate.evaluate_model', 'evaluate_model', (['relu_model_path'], {'class_weights': '[1, 0, 1]'}), '(relu_model_path, class_weights=[1, 0, 1])\n', (1017, 1059), False, 'from evaluate import evaluate_model\n'), ((1061, 1134), 'visualize.visualize_model_performance', 'visualize_model_performance', (['baseline_model_path'], {'class_weights': '[1, 0, 1]'}), '(baseline_model_path, class_weights=[1, 0, 1])\n', (1088, 1134), False, 'from visualize import visualize_model_performance\n'), ((1135, 1205), 'visualize.visualize_model_performance', 'visualize_model_performance', (['prelu_model_path'], {'class_weights': '[1, 0, 1]'}), '(prelu_model_path, class_weights=[1, 0, 1])\n', (1162, 1205), False, 'from visualize import visualize_model_performance\n'), ((1206, 1274), 'visualize.visualize_model_performance', 'visualize_model_performance', (['elu_model_path'], {'class_weights': '[1, 0, 1]'}), '(elu_model_path, class_weights=[1, 0, 1])\n', (1233, 1274), False, 'from visualize import visualize_model_performance\n'), ((1275, 1344), 'visualize.visualize_model_performance', 'visualize_model_performance', (['relu_model_path'], {'class_weights': '[1, 0, 1]'}), '(relu_model_path, class_weights=[1, 0, 1])\n', (1302, 1344), False, 'from visualize import visualize_model_performance\n')]
|
import numpy as np
import pickle
import tensorflow as tf
from time import time
from evaluate import evaluate_model, evaluate_model_recall_precision
class MF():
def __init__(self, rating_matrix):
#### 參數設定
self.num_u = rating_matrix.shape[0] # 5551
self.num_v = rating_matrix.shape[1] # 16980
self.u_lambda = 100
self.v_lambda = 0.1
self.k = 50 # latent維度
self.a = 1
self.b = 0.01
self.R = np.mat(rating_matrix)
self.C = np.mat(np.ones(self.R.shape)) * self.b
self.C[np.where(self.R > 0)] = self.a
self.I_U = np.mat(np.eye(self.k) * self.u_lambda)
self.I_V = np.mat(np.eye(self.k) * self.v_lambda)
self.U = np.mat(np.random.normal(0, 1 / self.u_lambda, size=(self.k, self.num_u)))
self.V = np.mat(np.random.normal(0, 1 / self.v_lambda, size=(self.k, self.num_v)))
def test(self):
print(((U_cut * self.R[np.ravel(np.where(self.R[:, j] > 0)[1]), j] + self.v_lambda * self.V_sdae[j])).shape)
def ALS(self, V_sdae):
self.V_sdae = np.mat(V_sdae)
V_sq = self.V * self.V.T * self.b
for i in range(self.num_u):
idx_a = np.ravel(np.where(self.R[i, :] > 0)[1])
V_cut = self.V[:, idx_a]
self.U[:, i] = np.linalg.pinv(V_sq + V_cut * V_cut.T * (self.a - self.b) + self.I_U) * (
V_cut * self.R[i, idx_a].T) # V_sq+V_cut*V_cut.T*a_m_b = VCV^T
U_sq = self.U * self.U.T * self.b
for j in range(self.num_v):
idx_a = np.ravel(np.where(self.R[:, j] > 0)[1])
U_cut = self.U[:, idx_a]
self.V[:, j] = np.linalg.pinv(U_sq + U_cut * U_cut.T * (self.a - self.b) + self.I_V) * (
U_cut * self.R[idx_a, j] + self.v_lambda * np.resize(self.V_sdae[j], (self.k, 1)))
return self.U, self.V
def mask(corruption_level ,size):
print("#### masking noise ")
mask = np.random.binomial(1, 1 - corruption_level, [size[0],size[1]])
return mask
def add_noise(x , corruption_level ):
x = x * mask(corruption_level , x.shape)
return x
class CDL():
def __init__(self, rating_matrix, item_infomation_matrix, topK=10, recallK=300, precisionK=500, use_recall_precision=False):
# model參數設定
self.use_recall_precision = use_recall_precision
self.topK =topK
self.recallK = recallK
self.precisionK = precisionK
self.n_input = item_infomation_matrix.shape[1]
self.n_hidden1 = 200
self.n_hidden2 = 50
self.k = 50
self.lambda_w = 1
self.lambda_n = 1
self.lambda_u = 1
self.lambda_v = 1
self.drop_ratio = 0.01
self.learning_rate = 0.001
self.epochs = 20
self.batch_size = 32
self.num_u = rating_matrix.shape[0]
self.num_v = rating_matrix.shape[1]
self.Weights = {
'w1': tf.Variable(tf.random_normal([self.n_input, self.n_hidden1], mean=0.0, stddev=1 / self.lambda_w)),
'w2': tf.Variable(tf.random_normal([self.n_hidden1, self.n_hidden2], mean=0.0, stddev=1 / self.lambda_w)),
'w3': tf.Variable(tf.random_normal([self.n_hidden2, self.n_hidden1], mean=0.0, stddev=1 / self.lambda_w)),
'w4': tf.Variable(tf.random_normal([self.n_hidden1, self.n_input], mean=0.0, stddev=1 / self.lambda_w))
}
self.Biases = {
'b1': tf.Variable(tf.random_normal([self.n_hidden1], mean=0.0, stddev=1 / self.lambda_w)),
'b2': tf.Variable(tf.random_normal([self.n_hidden2], mean=0.0, stddev=1 / self.lambda_w)),
'b3': tf.Variable(tf.random_normal([self.n_hidden1], mean=0.0, stddev=1 / self.lambda_w)),
'b4': tf.Variable(tf.random_normal([self.n_input], mean=0.0, stddev=1 / self.lambda_w))
}
self.item_infomation_matrix = item_infomation_matrix
self.build_model()
def encoder(self, x, drop_ratio):
w1 = self.Weights['w1']
b1 = self.Biases['b1']
L1 = tf.nn.sigmoid(tf.matmul(x, w1) + b1)
L1 = tf.nn.dropout(L1, keep_prob=1 - drop_ratio)
w2 = self.Weights['w2']
b2 = self.Biases['b2']
L2 = tf.nn.sigmoid(tf.matmul(L1, w2) + b2)
L2 = tf.nn.dropout(L2, keep_prob=1 - drop_ratio)
return L2
def decoder(self, x, drop_ratio):
w3 = self.Weights['w3']
b3 = self.Biases['b3']
L3 = tf.nn.sigmoid(tf.matmul(x, w3) + b3)
L3 = tf.nn.dropout(L3, keep_prob=1 - drop_ratio)
w4 = self.Weights['w4']
b4 = self.Biases['b4']
L4 = tf.nn.sigmoid(tf.matmul(L3, w4) + b4)
L4 = tf.nn.dropout(L4, keep_prob=1 - drop_ratio)
return L4
def build_model(self):
self.model_X_0 = tf.placeholder(tf.float32, shape=(None, self.n_input))
self.model_X_c = tf.placeholder(tf.float32, shape=(None, self.n_input))
self.model_V = tf.placeholder(tf.float32, shape=(None, self.k))
self.model_drop_ratio = tf.placeholder(tf.float32)
self.V_sdae = self.encoder(self.model_X_0, self.model_drop_ratio)
self.y_pred = self.decoder(self.V_sdae, self.model_drop_ratio)
self.Regularization = tf.reduce_sum(
[tf.nn.l2_loss(w) + tf.nn.l2_loss(b) for w, b in zip(self.Weights.values(), self.Biases.values())])
loss_r = 1 / 2 * self.lambda_w * self.Regularization
loss_a = 1 / 2 * self.lambda_n * tf.reduce_sum(tf.pow(self.model_X_c - self.y_pred, 2))
loss_v = 1 / 2 * self.lambda_v * tf.reduce_sum(tf.pow(self.model_V - self.V_sdae, 2))
self.Loss = loss_r + loss_a + loss_v
self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.Loss)
def training(self, rating_matrix, test_ratings, test_negatives):
# np.random.shuffle(self.item_infomation_matrix) #random index of train data
evaluation_threads = 1
num_items = rating_matrix.shape[1]
self.item_infomation_matrix_noise = add_noise(self.item_infomation_matrix, 0.3)
#self.item_infomation_matrix_noise = add_noise(self.item_infomation_matrix, 0.05)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
mf = MF(rating_matrix)
V_sdae = sess.run(self.V_sdae,
feed_dict={self.model_X_0: self.item_infomation_matrix_noise, self.model_drop_ratio: self.drop_ratio})
U, V = mf.ALS(V_sdae)
U=U.T
# Init performance
t1 = time()
if use_recall_precision:
(recalls, precisions) = evaluate_model_recall_precision(U @ V, num_items, test_ratings, self.recallK,
self.precisionK, evaluation_threads)
recall, precision = np.array(recalls).mean(), np.array(precisions).mean()
print('Init: Recall = %.4f, Precision = %.4f\t [%.1f s]' % (recall, precision, time() - t1))
else:
(hits, ndcgs) = evaluate_model(U @ V, test_ratings, test_negatives, self.topK, evaluation_threads)
hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
print('Init: HR = %.4f, NDCG = %.4f\t [%.1f s]' % (hr, ndcg, time() - t1))
# Train model
if use_recall_precision:
best_recall, best_precision, best_iter = recall, precision, -1
else:
best_hr, best_ndcg, best_iter = hr, ndcg, -1
for epoch in range(self.epochs):
#print("%d / %d" % (epoch + 1, self.epochs))
t1 = time()
V = np.resize(V, (num_items, self.k))
for i in range(0, self.item_infomation_matrix.shape[0], self.batch_size):
X_train_batch = self.item_infomation_matrix_noise[i:i + self.batch_size]
y_train_batch = self.item_infomation_matrix[i:i + self.batch_size]
V_batch = V[i:i + self.batch_size]
_, my_loss = sess.run([self.optimizer, self.Loss],
feed_dict={self.model_X_0: X_train_batch, self.model_X_c: y_train_batch,
self.model_V: V_batch, self.model_drop_ratio: self.drop_ratio})
V_sdae = sess.run(self.V_sdae,
feed_dict={self.model_X_0: self.item_infomation_matrix_noise, self.model_drop_ratio: self.drop_ratio})
U, V = mf.ALS(V_sdae)
U = U.T
t2 = time()
# Evaluation
if use_recall_precision:
(recalls, precisions) = evaluate_model_recall_precision(U @ V, num_items, test_ratings,
self.recallK,
self.precisionK, evaluation_threads)
recall, precision = np.array(recalls).mean(), np.array(precisions).mean()
print('Iteration %d [%.1f s]: Recall = %.4f, Precision = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2 - t1, recall, precision, my_loss, time() - t2))
if recall > best_recall:
best_recall, best_precision, best_iter = recall, precision, epoch
else:
(hits, ndcgs) = evaluate_model(U @ V, test_ratings, test_negatives, self.topK, evaluation_threads)
hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
print('Iteration %d [%.1f s]: HR = %.4f, NDCG = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2 - t1, hr, ndcg, my_loss, time() - t2))
if hr > best_hr:
best_hr, best_ndcg, best_iter = hr, ndcg, epoch
if use_recall_precision:
print(
"End. Best Iteration %d: Recall = %.4f, Precision = %.4f. " % (best_iter, best_recall, best_precision))
else:
print("End. Best Iteration %d: HR = %.4f, NDCG = %.4f. " % (best_iter, best_hr, best_ndcg))
return U, V
#init random seed
np.random.seed(5)
print(tf.__version__)
import argparse
parser = argparse.ArgumentParser(description="Run CDL.")
parser.add_argument('--db', nargs='?', default='ml-1m',
help='Choose a dataset.')
parser.add_argument('--recall_precision', action='store_true', default=False,
help='use recall_precision eval.')
args = parser.parse_args()
use_recall_precision = args.recall_precision
db = args.db
if use_recall_precision: p=-10
else: p=1
print("#### load matrix from pickle")
with open(r'{db}/item_infomation_matrix.pickle'.format(db=db), 'rb') as handle:
item_infomation_matrix = pickle.load(handle)
with open(r'{db}/rating_matrix_p{p}.pickle'.format(db=db,p=p), 'rb') as handle2:
rating_matrix = pickle.load(handle2)
print("#### build model")
print()
print("#### matrix factorization model")
cdl = CDL(rating_matrix , item_infomation_matrix, use_recall_precision=use_recall_precision)
cdl.build_model()
from Dataset import Dataset
if use_recall_precision: dataset = Dataset("{db}/{db}.precision-recall".format(db=db), use_recall_precision)
else: dataset = Dataset("{db}/{db}.hr-ndcg".format(db=db), use_recall_precision)
U, V = cdl.training(rating_matrix,dataset.testRatings, dataset.testNegatives)
'''
print(rating_matrix.shape)
print(U.shape)
print(V.shape)
np.save("ml-1m/U",U)
np.save("ml-1m/V",V)
np.save("ml-1m/R",rating_matrix)
np.save("ml-1m/R_",U@V)
'''
|
[
"evaluate.evaluate_model",
"evaluate.evaluate_model_recall_precision"
] |
[((10047, 10064), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (10061, 10064), True, 'import numpy as np\n'), ((10113, 10160), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run CDL."""'}), "(description='Run CDL.')\n", (10136, 10160), False, 'import argparse\n'), ((1953, 2016), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(1 - corruption_level)', '[size[0], size[1]]'], {}), '(1, 1 - corruption_level, [size[0], size[1]])\n', (1971, 2016), True, 'import numpy as np\n'), ((10673, 10692), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (10684, 10692), False, 'import pickle\n'), ((10795, 10815), 'pickle.load', 'pickle.load', (['handle2'], {}), '(handle2)\n', (10806, 10815), False, 'import pickle\n'), ((469, 490), 'numpy.mat', 'np.mat', (['rating_matrix'], {}), '(rating_matrix)\n', (475, 490), True, 'import numpy as np\n'), ((1079, 1093), 'numpy.mat', 'np.mat', (['V_sdae'], {}), '(V_sdae)\n', (1085, 1093), True, 'import numpy as np\n'), ((4094, 4137), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['L1'], {'keep_prob': '(1 - drop_ratio)'}), '(L1, keep_prob=1 - drop_ratio)\n', (4107, 4137), True, 'import tensorflow as tf\n'), ((4266, 4309), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['L2'], {'keep_prob': '(1 - drop_ratio)'}), '(L2, keep_prob=1 - drop_ratio)\n', (4279, 4309), True, 'import tensorflow as tf\n'), ((4494, 4537), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['L3'], {'keep_prob': '(1 - drop_ratio)'}), '(L3, keep_prob=1 - drop_ratio)\n', (4507, 4537), True, 'import tensorflow as tf\n'), ((4666, 4709), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['L4'], {'keep_prob': '(1 - drop_ratio)'}), '(L4, keep_prob=1 - drop_ratio)\n', (4679, 4709), True, 'import tensorflow as tf\n'), ((4782, 4836), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.n_input)'}), '(tf.float32, shape=(None, self.n_input))\n', (4796, 4836), True, 'import tensorflow as tf\n'), ((4862, 4916), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.n_input)'}), '(tf.float32, shape=(None, self.n_input))\n', (4876, 4916), True, 'import tensorflow as tf\n'), ((4940, 4988), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.k)'}), '(tf.float32, shape=(None, self.k))\n', (4954, 4988), True, 'import tensorflow as tf\n'), ((5021, 5047), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (5035, 5047), True, 'import tensorflow as tf\n'), ((6161, 6173), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6171, 6173), True, 'import tensorflow as tf\n'), ((6513, 6519), 'time.time', 'time', ([], {}), '()\n', (6517, 6519), False, 'from time import time\n'), ((562, 582), 'numpy.where', 'np.where', (['(self.R > 0)'], {}), '(self.R > 0)\n', (570, 582), True, 'import numpy as np\n'), ((733, 798), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1 / self.u_lambda)'], {'size': '(self.k, self.num_u)'}), '(0, 1 / self.u_lambda, size=(self.k, self.num_u))\n', (749, 798), True, 'import numpy as np\n'), ((824, 889), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1 / self.v_lambda)'], {'size': '(self.k, self.num_v)'}), '(0, 1 / self.v_lambda, size=(self.k, self.num_v))\n', (840, 889), True, 'import numpy as np\n'), ((6191, 6224), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6222, 6224), True, 'import tensorflow as tf\n'), ((6589, 6708), 'evaluate.evaluate_model_recall_precision', 'evaluate_model_recall_precision', (['(U @ V)', 'num_items', 'test_ratings', 'self.recallK', 'self.precisionK', 'evaluation_threads'], {}), '(U @ V, num_items, test_ratings, self.\n recallK, self.precisionK, evaluation_threads)\n', (6620, 6708), False, 'from evaluate import evaluate_model, evaluate_model_recall_precision\n'), ((7005, 7091), 'evaluate.evaluate_model', 'evaluate_model', (['(U @ V)', 'test_ratings', 'test_negatives', 'self.topK', 'evaluation_threads'], {}), '(U @ V, test_ratings, test_negatives, self.topK,\n evaluation_threads)\n', (7019, 7091), False, 'from evaluate import evaluate_model, evaluate_model_recall_precision\n'), ((7563, 7569), 'time.time', 'time', ([], {}), '()\n', (7567, 7569), False, 'from time import time\n'), ((7587, 7620), 'numpy.resize', 'np.resize', (['V', '(num_items, self.k)'], {}), '(V, (num_items, self.k))\n', (7596, 7620), True, 'import numpy as np\n'), ((8471, 8477), 'time.time', 'time', ([], {}), '()\n', (8475, 8477), False, 'from time import time\n'), ((515, 536), 'numpy.ones', 'np.ones', (['self.R.shape'], {}), '(self.R.shape)\n', (522, 536), True, 'import numpy as np\n'), ((619, 633), 'numpy.eye', 'np.eye', (['self.k'], {}), '(self.k)\n', (625, 633), True, 'import numpy as np\n'), ((677, 691), 'numpy.eye', 'np.eye', (['self.k'], {}), '(self.k)\n', (683, 691), True, 'import numpy as np\n'), ((1297, 1366), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(V_sq + V_cut * V_cut.T * (self.a - self.b) + self.I_U)'], {}), '(V_sq + V_cut * V_cut.T * (self.a - self.b) + self.I_U)\n', (1311, 1366), True, 'import numpy as np\n'), ((1662, 1731), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(U_sq + U_cut * U_cut.T * (self.a - self.b) + self.I_V)'], {}), '(U_sq + U_cut * U_cut.T * (self.a - self.b) + self.I_V)\n', (1676, 1731), True, 'import numpy as np\n'), ((2945, 3034), 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_input, self.n_hidden1]'], {'mean': '(0.0)', 'stddev': '(1 / self.lambda_w)'}), '([self.n_input, self.n_hidden1], mean=0.0, stddev=1 / self.\n lambda_w)\n', (2961, 3034), True, 'import tensorflow as tf\n'), ((3062, 3152), 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_hidden1, self.n_hidden2]'], {'mean': '(0.0)', 'stddev': '(1 / self.lambda_w)'}), '([self.n_hidden1, self.n_hidden2], mean=0.0, stddev=1 /\n self.lambda_w)\n', (3078, 3152), True, 'import tensorflow as tf\n'), ((3181, 3271), 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_hidden2, self.n_hidden1]'], {'mean': '(0.0)', 'stddev': '(1 / self.lambda_w)'}), '([self.n_hidden2, self.n_hidden1], mean=0.0, stddev=1 /\n self.lambda_w)\n', (3197, 3271), True, 'import tensorflow as tf\n'), ((3300, 3389), 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_hidden1, self.n_input]'], {'mean': '(0.0)', 'stddev': '(1 / self.lambda_w)'}), '([self.n_hidden1, self.n_input], mean=0.0, stddev=1 / self.\n lambda_w)\n', (3316, 3389), True, 'import tensorflow as tf\n'), ((3450, 3520), 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_hidden1]'], {'mean': '(0.0)', 'stddev': '(1 / self.lambda_w)'}), '([self.n_hidden1], mean=0.0, stddev=1 / self.lambda_w)\n', (3466, 3520), True, 'import tensorflow as tf\n'), ((3553, 3623), 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_hidden2]'], {'mean': '(0.0)', 'stddev': '(1 / self.lambda_w)'}), '([self.n_hidden2], mean=0.0, stddev=1 / self.lambda_w)\n', (3569, 3623), True, 'import tensorflow as tf\n'), ((3656, 3726), 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_hidden1]'], {'mean': '(0.0)', 'stddev': '(1 / self.lambda_w)'}), '([self.n_hidden1], mean=0.0, stddev=1 / self.lambda_w)\n', (3672, 3726), True, 'import tensorflow as tf\n'), ((3759, 3827), 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_input]'], {'mean': '(0.0)', 'stddev': '(1 / self.lambda_w)'}), '([self.n_input], mean=0.0, stddev=1 / self.lambda_w)\n', (3775, 3827), True, 'import tensorflow as tf\n'), ((4058, 4074), 'tensorflow.matmul', 'tf.matmul', (['x', 'w1'], {}), '(x, w1)\n', (4067, 4074), True, 'import tensorflow as tf\n'), ((4229, 4246), 'tensorflow.matmul', 'tf.matmul', (['L1', 'w2'], {}), '(L1, w2)\n', (4238, 4246), True, 'import tensorflow as tf\n'), ((4458, 4474), 'tensorflow.matmul', 'tf.matmul', (['x', 'w3'], {}), '(x, w3)\n', (4467, 4474), True, 'import tensorflow as tf\n'), ((4629, 4646), 'tensorflow.matmul', 'tf.matmul', (['L3', 'w4'], {}), '(L3, w4)\n', (4638, 4646), True, 'import tensorflow as tf\n'), ((5468, 5507), 'tensorflow.pow', 'tf.pow', (['(self.model_X_c - self.y_pred)', '(2)'], {}), '(self.model_X_c - self.y_pred, 2)\n', (5474, 5507), True, 'import tensorflow as tf\n'), ((5564, 5601), 'tensorflow.pow', 'tf.pow', (['(self.model_V - self.V_sdae)', '(2)'], {}), '(self.model_V - self.V_sdae, 2)\n', (5570, 5601), True, 'import tensorflow as tf\n'), ((5674, 5716), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (5696, 5716), True, 'import tensorflow as tf\n'), ((8581, 8700), 'evaluate.evaluate_model_recall_precision', 'evaluate_model_recall_precision', (['(U @ V)', 'num_items', 'test_ratings', 'self.recallK', 'self.precisionK', 'evaluation_threads'], {}), '(U @ V, num_items, test_ratings, self.\n recallK, self.precisionK, evaluation_threads)\n', (8612, 8700), False, 'from evaluate import evaluate_model, evaluate_model_recall_precision\n'), ((9291, 9377), 'evaluate.evaluate_model', 'evaluate_model', (['(U @ V)', 'test_ratings', 'test_negatives', 'self.topK', 'evaluation_threads'], {}), '(U @ V, test_ratings, test_negatives, self.topK,\n evaluation_threads)\n', (9305, 9377), False, 'from evaluate import evaluate_model, evaluate_model_recall_precision\n'), ((1202, 1228), 'numpy.where', 'np.where', (['(self.R[i, :] > 0)'], {}), '(self.R[i, :] > 0)\n', (1210, 1228), True, 'import numpy as np\n'), ((1567, 1593), 'numpy.where', 'np.where', (['(self.R[:, j] > 0)'], {}), '(self.R[:, j] > 0)\n', (1575, 1593), True, 'import numpy as np\n'), ((5253, 5269), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['w'], {}), '(w)\n', (5266, 5269), True, 'import tensorflow as tf\n'), ((5272, 5288), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['b'], {}), '(b)\n', (5285, 5288), True, 'import tensorflow as tf\n'), ((1803, 1841), 'numpy.resize', 'np.resize', (['self.V_sdae[j]', '(self.k, 1)'], {}), '(self.V_sdae[j], (self.k, 1))\n', (1812, 1841), True, 'import numpy as np\n'), ((6804, 6821), 'numpy.array', 'np.array', (['recalls'], {}), '(recalls)\n', (6812, 6821), True, 'import numpy as np\n'), ((6830, 6850), 'numpy.array', 'np.array', (['precisions'], {}), '(precisions)\n', (6838, 6850), True, 'import numpy as np\n'), ((7111, 7125), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (7119, 7125), True, 'import numpy as np\n'), ((7134, 7149), 'numpy.array', 'np.array', (['ndcgs'], {}), '(ndcgs)\n', (7142, 7149), True, 'import numpy as np\n'), ((6949, 6955), 'time.time', 'time', ([], {}), '()\n', (6953, 6955), False, 'from time import time\n'), ((7230, 7236), 'time.time', 'time', ([], {}), '()\n', (7234, 7236), False, 'from time import time\n'), ((8876, 8893), 'numpy.array', 'np.array', (['recalls'], {}), '(recalls)\n', (8884, 8893), True, 'import numpy as np\n'), ((8902, 8922), 'numpy.array', 'np.array', (['precisions'], {}), '(precisions)\n', (8910, 8922), True, 'import numpy as np\n'), ((9401, 9415), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (9409, 9415), True, 'import numpy as np\n'), ((9424, 9439), 'numpy.array', 'np.array', (['ndcgs'], {}), '(ndcgs)\n', (9432, 9439), True, 'import numpy as np\n'), ((9100, 9106), 'time.time', 'time', ([], {}), '()\n', (9104, 9106), False, 'from time import time\n'), ((9599, 9605), 'time.time', 'time', ([], {}), '()\n', (9603, 9605), False, 'from time import time\n'), ((952, 978), 'numpy.where', 'np.where', (['(self.R[:, j] > 0)'], {}), '(self.R[:, j] > 0)\n', (960, 978), True, 'import numpy as np\n')]
|
# Project hiatus
# script used to evaluate our models and analyse the results
# 16/11/2020
# <NAME>
# loading required packages
import os
import numpy as np
from sklearn.model_selection import train_test_split
from torch.utils.data import Subset
import torch
from sklearn.linear_model import LinearRegression
import sklearn
import random
# for manual visualisation
from rasterio.plot import show
# putting the right work directory
os.chdir("/home/adminlocal/Bureau/GIT/hiatus_change_detection")
# importing our functions
import utils as fun
import train as train
import evaluate as eval_model
import metrics as fun_metrics
import warnings
warnings.filterwarnings('ignore')
print(
"""
Loading the model and the data
""")
# loading the dataset, getting a raster for later data visualisation
# after every epoch
import frejus_dataset
# loading the data
train_data, gt_change, numpy_rasters = frejus_dataset.get_datasets(["1954","1966","1970", "1978", "1989"])
## loading the model
name_model = "AE_Mmodal+DAN+split"
dict_model = torch.load("evaluation_models/"+name_model)
args = dict_model["args"]
trained_model = fun.load_model_from_dict(dict_model)
# setting the seed
fun.set_seed(args.seed, args.cuda)
## we take a test set of the gt_change for evaluation (20%)
# creating a new dict for gt test
gt_change_test = {}
# getting a single subset list throughout the years
train_idx, val_idx = train_test_split(list(range(len(gt_change["1970"]))), test_size=0.20, random_state=1)
# loading the GT
for year in gt_change:
gt_change[year] = Subset(gt_change[year], train_idx)
print(
"""
Checking performance on ground truth change maps
We output the code subtraction with the model and on the baseline (simple
rasters subtraction)
""")
## generating prediction for the model
pred, y, classes = eval_model.generate_prediction_model(gt_change, trained_model,
args)
## evaluate the baseline
# get prediction and targets with the baseline
pred_alt, pred_rad, y = eval_model.generate_prediction_baseline(gt_change)
## making the ROC curve
threshold=fun_metrics.visualize_roc(y, pred_alt, return_thresh=True)
fun_metrics.iou_accuracy(pred_alt, threshold, y, classes)
threshold=fun_metrics.visualize_roc(y, pred_rad, return_thresh=True)
fun_metrics.iou_accuracy(pred_rad, threshold, y, classes)
# ROC for the model
threshold=fun_metrics.visualize_roc(y, pred, return_thresh = True)
## getting the IUC and the accuracy
fun_metrics.iou_accuracy(pred, threshold, y, classes)
print(
"""
Visualizing change detection on the ground truth
""")
for i in range(30,40):
# loading the raster
nb = i
rast1 = gt_change["1954"][nb][None,1:,:,:]
rast2 = gt_change["1970"][nb][None,1:,:,:]
# loading the gt
gts = [gt_change["1954"][nb][None,0,:,:].squeeze(),
gt_change["1970"][nb][None,0,:,:].squeeze()]
cmap, dccode, code1, code2 = fun.change_detection(rast1, rast2, trained_model,
args,
visualization=True,
threshold=threshold, gts=gts)
print(
"""
Performing normalized mutual information for continuous variables
""")
# load the data and the baselines
codes_clean, labels_clean = fun.prepare_codes_metrics(gt_change, args, trained_model)
dem_clean = fun.prepare_data_metrics(gt_change, 1)
rad_clean = fun.prepare_data_metrics(gt_change, 2)
## getting the number of pixels per classes
nb_build = np.count_nonzero(labels_clean == 1)
nb_road = np.count_nonzero(labels_clean == 2)
nb_field = np.count_nonzero(labels_clean == 3)
nb_classes = (nb_build, nb_road, nb_field)
## spliting the dataset according to the class
# loading the data
buildings_idx = labels_clean == 1
roads_idx = labels_clean == 2
fields_idx = labels_clean == 3
# putting into a list
classes_idx = [buildings_idx, roads_idx, fields_idx]
# calculating the NMI for the codes
fun_metrics.NMI_continuous_discrete(labels_clean, codes_clean,
nb_classes, [1,2,3], classes_idx)
# calculating the NMI for the dem
fun_metrics.NMI_continuous_discrete(labels_clean, dem_clean[:,None],
nb_classes, [1,2,3], classes_idx)
# calculating the NMI for the rad
fun_metrics.NMI_continuous_discrete(labels_clean, rad_clean[:,None],
nb_classes, [1,2,3], classes_idx)
# calculating the NMI for the both inputs
dem_rad = np.concatenate((rad_clean[:,None], dem_clean[:,None]), axis=1)
fun_metrics.NMI_continuous_discrete(labels_clean, dem_rad,
nb_classes, [1,2,3], classes_idx)
print(
"""
Making a linear SVM
""")
## linear svm with the model
conf_mat_model, class_report_model, scores_cv = fun_metrics.svm_accuracy_estimation(codes_clean,
labels_clean)
## linear svm with the dem
conf_mat_dem, class_report_dem, scores_cv = fun_metrics.svm_accuracy_estimation(dem_clean,
labels_clean)
## linear svm with the rad
conf_mat_rad, class_report_rad, scores_cv = fun_metrics.svm_accuracy_estimation(rad_clean,
labels_clean)
### Linear svm but distinct geographical locations
# getting ids for training and validation sets
train_idx, val_idx = train_test_split(list(range(len(gt_change["1954"]))), test_size=0.25)
# loading two dictionary for cross-validation
gt_change_train = {}
gt_change_test = {}
# creating test and train data on distinct locations
for year in gt_change:
gt_change_train[year] = Subset(gt_change[year], train_idx)
gt_change_test[year] = Subset(gt_change[year], val_idx)
# data for train
codes_train, labels_train = fun.prepare_codes_metrics(gt_change_train, args, trained_model)
dem_train = fun.prepare_data_metrics(gt_change_train, 1)
rad_train= fun.prepare_data_metrics(gt_change_train, 2)
# data for test
codes_test, labels_test = fun.prepare_codes_metrics(gt_change_test, args, trained_model)
dem_test = fun.prepare_data_metrics(gt_change_test, 1)
rad_test = fun.prepare_data_metrics(gt_change_test, 2)
## linear svm with the model
conf_mat_model, class_report_model, scores_cv_model = fun_metrics.svm_accuracy_estimation_2(codes_train, codes_test, labels_train, labels_test, cv=False)
## linear svm with the dem
conf_mat_dem, class_report_dem, scores_cv_dem = fun_metrics.svm_accuracy_estimation_2(dem_train, dem_test, labels_train, labels_test, cv=False)
## linear svm with the rad
conf_mat_rad, class_report_rad, scores_cv_rad = fun_metrics.svm_accuracy_estimation_2(rad_train, rad_test, labels_train, labels_test, cv=False)
## testing with only one year for train
# getting ids for training and validation sets
gt_change_train = {}
gt_change_test = {}
for year in gt_change:
if year == "1970":
gt_change_train[year] =gt_change[year]
else:
gt_change_test[year] = gt_change[year]
# data for train
codes_train, labels_train = fun.prepare_codes_metrics(gt_change_train, args, trained_model)
dem_train = fun.prepare_data_metrics(gt_change_train, 1)
rad_train= fun.prepare_data_metrics(gt_change_train, 2)
# data for test
codes_test, labels_test = fun.prepare_codes_metrics(gt_change_test, args, trained_model)
dem_test = fun.prepare_data_metrics(gt_change_test, 1)
rad_test = fun.prepare_data_metrics(gt_change_test, 2)
## linear svm with the model
conf_mat_model, class_report_model, scores_cv_model = fun_metrics.svm_accuracy_estimation_2(codes_train, codes_test, labels_train, labels_test, cv=False)
## linear svm with the dem
conf_mat_dem, class_report_dem, scores_cv_dem = fun_metrics.svm_accuracy_estimation_2(dem_train, dem_test, labels_train, labels_test, cv=False)
## linear svm with the rad
conf_mat_rad, class_report_rad, scores_cv_rad = fun_metrics.svm_accuracy_estimation_2(rad_train, rad_test, labels_train, labels_test, cv=False)
print("""
Now we do transfer learning (bayesian model)
""")
## loading the pre trained model
dict_model = torch.load("evaluation_models/test_transfer_aleo")
dict_model["args"].epochs = 1
dict_model["args"].defiance = 1
dict_model["args"].save = 0
dict_model["args"].load_best_model = 1
dict_model["args"].grad_clip = 0
dict_model["args"].name_model = "bayesian_model"
# updating the args
args = dict_model["args"]
# starting the run
trained_model = train.train_full(args, train_data, gt_change, dict_model)
print("""
Performing change detection with the alternative model (training the model
and then assessing the result)
""")
# list of years
years = ["1954","1966", "1970", "1978", "1989"]
# loading the data
import frejus_dataset
train_data, gt_change, numpy_rasters = frejus_dataset.get_datasets(["1954","1966","1970", "1978", "1989"])
# loading the args of the pre-trained model
dict_model = torch.load("evaluation_models/pre_trained_baseline")
args = dict_model["args"]
# setting the number of epochs
args.epochs = 5
args.save = 0
# getting th year for the first rasters
for year1 in years:
# getting the year for the second raster
for year2 in years:
# checking that both year are not the same year
if year1 != year2 and year2 > year1:
# naming the model
args.name_model = year1+"to"+year2+"_baseline"
# loading the data
train_data, _, numpy_rasters = frejus_dataset.get_datasets([year1,year2])
# taking two years and converting into torch
numpy_rasters[year1] = [fun.torch_raster(raster, cuda=False) for raster in numpy_rasters[year1]]
numpy_rasters[year2] = [fun.torch_raster(raster, cuda=False) for raster in numpy_rasters[year2]]
# training and saving the model
_ = train.train_full_alternative_model(args, numpy_rasters, dict_model)
## evaluating the model
pred, y, classes = eval_model.generate_prediction_baseline_model(gt_change, args)
# ROC
threshold=fun_metrics.visualize_roc(y, pred, return_thresh=False)
# accuracy and IoU
fun_metrics.iou_accuracy(pred, 0.69, y, classes)
print("""
Visualizing change detection on the ground truth
""")
for i in range(10):
# loading the raster
nb = i
rast1 = gt_change["1954"][nb][None,1:,:,:]
rast2 = gt_change["1970"][nb][None,1:,:,:]
# loading the gt
gts = [gt_change["1954"][nb][None,0,:,:].squeeze(),
gt_change["1970"][nb][None,0,:,:].squeeze()]
fun.change_detection_baseline(rast1, rast2, ["1954", "1970"], args,
visualization=True,
threshold=1.3, gts=gts)
print("""
Estimating correlation between codes, DEM and rad
""")
# getting the index for cross-validation
train_idx, val_idx = train_test_split(list(range(len(gt_change["1954"]))), test_size=0.25)
# empty dicts to store train and test sets
gt_change_train = {}
gt_change_test = {}
# loading train and test sets
for year in gt_change:
gt_change_train[year] = Subset(gt_change[year], train_idx)
gt_change_test[year] = Subset(gt_change[year], val_idx)
# data for train
codes_train, labels_train = fun.prepare_codes_metrics(gt_change_train, args, trained_model)
dem_train = fun.prepare_data_metrics(gt_change_train, 1)
rad_train= fun.prepare_data_metrics(gt_change_train, 2)
# data for test
codes_test, labels_test = fun.prepare_codes_metrics(gt_change_test, args, trained_model)
dem_test = fun.prepare_data_metrics(gt_change_test, 1)
rad_test = fun.prepare_data_metrics(gt_change_test, 2)
# training the model for dem
lr_dem = LinearRegression()
lr_dem.fit(codes_train, dem_train)
pred_dem = lr_dem.predict(codes_test)
mae_dem = sum(abs(pred_dem - dem_test)) / dem_test.shape[0]
r2_dem = sklearn.metrics.r2_score(dem_test, pred_dem)
#print(mae_dem)
print("R2 for dem is %1.2f" % (r2_dem))
print("\n")
print(abs(lr_dem.coef_).mean())
# training the model for rad
lr_rad = LinearRegression()
lr_rad.fit(codes_train, rad_train)
pred_rad = lr_rad.predict(codes_test)
mae_rad = sum(abs(pred_rad - rad_test)) / dem_test.shape[0]
r2_rad = sklearn.metrics.r2_score(rad_test, pred_rad)
#print(mae_rad)
print("R2 for rad is %1.2f" % (r2_rad))
print("\n")
print(abs(lr_rad.coef_).mean())
### computing the MI
# adding test data to train data
codes_train = np.concatenate((codes_train, codes_test), axis=0)
dem_train = np.concatenate((dem_train, dem_test), axis=None)
rad_train = np.concatenate((rad_train, rad_test), axis=None)
## binning the data
# getting the value of the quantiles
values_dem_cut = np.quantile(dem_train, [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
values_rad_cut = np.quantile(rad_train, [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
# binning the data with the quantiles
dem_discrete = np.digitize(dem_train,bins=values_dem_cut)
rad_discrete = np.digitize(rad_train,bins=values_rad_cut)
# lists to store class related indexes
classes_dem_idx = []
classes_rad_idx = []
# lists to store the number of samples per class
nb_classes_dem = []
nb_classes_rad = []
for i in range(10):
## class related data for DEM
# boolean per class
class_idx = dem_discrete == i
classes_dem_idx.append(class_idx)
# number of sample of the class
nb_classes_dem.append(np.count_nonzero(dem_discrete == i))
# same opertation, for the radiometry
class_idx = rad_discrete == i
classes_rad_idx.append(class_idx)
nb_classes_rad.append(np.count_nonzero(rad_discrete == i))
# calculating the NMI for DEM
mi_dem = fun_metrics.NMI_continuous_discrete(dem_discrete, codes_train,
nb_classes_dem, list(range(10)), classes_dem_idx)
print("%1.2f" % (mi_dem))
# calculating the NMI for rad
mi_rad = fun_metrics.NMI_continuous_discrete(rad_discrete, codes_train,
nb_classes_rad, list(range(10)), classes_rad_idx)
print("%1.2f" % (mi_rad))
print("""
calculating the MI per raster
""")
# getting a random raster from the GT
nb = random.randint(0, 40)
raster = gt_change["1970"][nb]
# getting the MI per raster
print("rad")
fun.MI_raster(raster, "AE_rad")
print("\n")
print("Mmodal")
fun.MI_raster(raster, "AE_Mmodal", visu=True)
print("\n")
print("DAN")
fun.MI_raster(raster, "AE_Mmodal+DAN")
print("\n")
print("""
Doing tsne visualization on the ground truth
""")
# tsne on a single raster with different models
nb = random.randint(0, 40)
raster = gt_change["1970"][nb]
fun.tsne_visualization(raster, trained_model, "AE_rad")
fun.tsne_visualization(raster, trained_model, "AE_rad+DAN")
fun.tsne_visualization(raster, trained_model, "AE_Mmodal")
fun.tsne_visualization(raster, trained_model, "AE_Mmodal+DAN")
# tsne on the whole dataset with different model
fun.tsne_dataset(gt_change, "AE_rad")
fun.tsne_dataset(gt_change, "AE_rad+DAN")
fun.tsne_dataset(gt_change, "AE_Mmodal")
fun.tsne_dataset(gt_change, "AE_Mmodal+DAN")
fun.tsne_dataset(gt_change, "AE_Mmodal+DAN+split")
print(
"""
We now test the results for several models
""")
print("AE_rad")
eval_model.evaluate_model("AE_rad", gt_change)
print("AE_rad+DAN")
eval_model.evaluate_model("AE_rad+DAN", gt_change)
print("AE_Mmodal")
eval_model.evaluate_model("AE_Mmodal", gt_change)
print("AE_Mmodal+DAN")
eval_model.evaluate_model("AE_Mmodal+DAN", gt_change)
print("AE_Mmodal+DAN+split")
eval_model.evaluate_model("AE_Mmodal+DAN+split", gt_change)
print("AE_alt+DAN")
eval_model.evaluate_model("AE_alt+DAN", gt_change)
print("bayesian_model")
eval_model.evaluate_model("bayesian_model", gt_change)
print(
"""
Visualizing some predictions for the autoencoder
""")
# removing the year vectors
datasets = [raster[0] for raster in train_data]
for i in range(10,15):
# visualizing training raster
raster = datasets[i]
fun.visualize(raster, third_dim=False)
# visualizing prediction
pred = trained_model.predict(raster[None,:,:,:].float().cuda(), args)[0].cpu()
pred = fun.numpy_raster(pred)
fun.visualize(pred, third_dim=False, defiance=args.defiance)
# scatter plot for the defiance
if args.defiance:
fun.scatter_aleo(raster[1,:,:], pred[1,:,:], pred[2,:,:])
print(
'''
Now we are going to visualize various embeddings in the model itself
''')
# visualizing for a random index number the inner embeddings
fun.view_u(datasets, trained_model, args, random.randint(0, 900))
# visualizing embedding inside the model
nb = random.randint(0, 900)
print(nb)
fun.view_u(numpy_rasters["1989"], trained_model, args, nb)
fun.view_u(numpy_rasters["1970"], trained_model, args, nb)
137
print(
"""
Performing change detection analysis on some examples
""")
# loading two random rasters
nb = random.randint(0, 900)
print(i)
rast1 = numpy_rasters["1954"][i][None,:,:,:]
rast2 = numpy_rasters["1989"][i][None,:,:,:]
# computing change raster
cmap, dccode, code1, code2 = fun.change_detection(rast1, rast2, trained_model, args,
threshold=threshold, visualization=True)
|
[
"evaluate.evaluate_model",
"evaluate.generate_prediction_baseline",
"evaluate.generate_prediction_model",
"evaluate.generate_prediction_baseline_model"
] |
[((436, 499), 'os.chdir', 'os.chdir', (['"""/home/adminlocal/Bureau/GIT/hiatus_change_detection"""'], {}), "('/home/adminlocal/Bureau/GIT/hiatus_change_detection')\n", (444, 499), False, 'import os\n'), ((648, 681), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (671, 681), False, 'import warnings\n'), ((904, 973), 'frejus_dataset.get_datasets', 'frejus_dataset.get_datasets', (["['1954', '1966', '1970', '1978', '1989']"], {}), "(['1954', '1966', '1970', '1978', '1989'])\n", (931, 973), False, 'import frejus_dataset\n'), ((1042, 1087), 'torch.load', 'torch.load', (["('evaluation_models/' + name_model)"], {}), "('evaluation_models/' + name_model)\n", (1052, 1087), False, 'import torch\n'), ((1128, 1164), 'utils.load_model_from_dict', 'fun.load_model_from_dict', (['dict_model'], {}), '(dict_model)\n', (1152, 1164), True, 'import utils as fun\n'), ((1185, 1219), 'utils.set_seed', 'fun.set_seed', (['args.seed', 'args.cuda'], {}), '(args.seed, args.cuda)\n', (1197, 1219), True, 'import utils as fun\n'), ((1817, 1885), 'evaluate.generate_prediction_model', 'eval_model.generate_prediction_model', (['gt_change', 'trained_model', 'args'], {}), '(gt_change, trained_model, args)\n', (1853, 1885), True, 'import evaluate as eval_model\n'), ((2029, 2079), 'evaluate.generate_prediction_baseline', 'eval_model.generate_prediction_baseline', (['gt_change'], {}), '(gt_change)\n', (2068, 2079), True, 'import evaluate as eval_model\n'), ((2115, 2173), 'metrics.visualize_roc', 'fun_metrics.visualize_roc', (['y', 'pred_alt'], {'return_thresh': '(True)'}), '(y, pred_alt, return_thresh=True)\n', (2140, 2173), True, 'import metrics as fun_metrics\n'), ((2174, 2231), 'metrics.iou_accuracy', 'fun_metrics.iou_accuracy', (['pred_alt', 'threshold', 'y', 'classes'], {}), '(pred_alt, threshold, y, classes)\n', (2198, 2231), True, 'import metrics as fun_metrics\n'), ((2242, 2300), 'metrics.visualize_roc', 'fun_metrics.visualize_roc', (['y', 'pred_rad'], {'return_thresh': '(True)'}), '(y, pred_rad, return_thresh=True)\n', (2267, 2300), True, 'import metrics as fun_metrics\n'), ((2301, 2358), 'metrics.iou_accuracy', 'fun_metrics.iou_accuracy', (['pred_rad', 'threshold', 'y', 'classes'], {}), '(pred_rad, threshold, y, classes)\n', (2325, 2358), True, 'import metrics as fun_metrics\n'), ((2390, 2444), 'metrics.visualize_roc', 'fun_metrics.visualize_roc', (['y', 'pred'], {'return_thresh': '(True)'}), '(y, pred, return_thresh=True)\n', (2415, 2444), True, 'import metrics as fun_metrics\n'), ((2484, 2537), 'metrics.iou_accuracy', 'fun_metrics.iou_accuracy', (['pred', 'threshold', 'y', 'classes'], {}), '(pred, threshold, y, classes)\n', (2508, 2537), True, 'import metrics as fun_metrics\n'), ((3354, 3411), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change', 'args', 'trained_model'], {}), '(gt_change, args, trained_model)\n', (3379, 3411), True, 'import utils as fun\n'), ((3424, 3462), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change', '(1)'], {}), '(gt_change, 1)\n', (3448, 3462), True, 'import utils as fun\n'), ((3475, 3513), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change', '(2)'], {}), '(gt_change, 2)\n', (3499, 3513), True, 'import utils as fun\n'), ((3570, 3605), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels_clean == 1)'], {}), '(labels_clean == 1)\n', (3586, 3605), True, 'import numpy as np\n'), ((3616, 3651), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels_clean == 2)'], {}), '(labels_clean == 2)\n', (3632, 3651), True, 'import numpy as np\n'), ((3663, 3698), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels_clean == 3)'], {}), '(labels_clean == 3)\n', (3679, 3698), True, 'import numpy as np\n'), ((4018, 4120), 'metrics.NMI_continuous_discrete', 'fun_metrics.NMI_continuous_discrete', (['labels_clean', 'codes_clean', 'nb_classes', '[1, 2, 3]', 'classes_idx'], {}), '(labels_clean, codes_clean, nb_classes,\n [1, 2, 3], classes_idx)\n', (4053, 4120), True, 'import metrics as fun_metrics\n'), ((4185, 4294), 'metrics.NMI_continuous_discrete', 'fun_metrics.NMI_continuous_discrete', (['labels_clean', 'dem_clean[:, None]', 'nb_classes', '[1, 2, 3]', 'classes_idx'], {}), '(labels_clean, dem_clean[:, None],\n nb_classes, [1, 2, 3], classes_idx)\n', (4220, 4294), True, 'import metrics as fun_metrics\n'), ((4359, 4468), 'metrics.NMI_continuous_discrete', 'fun_metrics.NMI_continuous_discrete', (['labels_clean', 'rad_clean[:, None]', 'nb_classes', '[1, 2, 3]', 'classes_idx'], {}), '(labels_clean, rad_clean[:, None],\n nb_classes, [1, 2, 3], classes_idx)\n', (4394, 4468), True, 'import metrics as fun_metrics\n'), ((4551, 4615), 'numpy.concatenate', 'np.concatenate', (['(rad_clean[:, None], dem_clean[:, None])'], {'axis': '(1)'}), '((rad_clean[:, None], dem_clean[:, None]), axis=1)\n', (4565, 4615), True, 'import numpy as np\n'), ((4614, 4713), 'metrics.NMI_continuous_discrete', 'fun_metrics.NMI_continuous_discrete', (['labels_clean', 'dem_rad', 'nb_classes', '[1, 2, 3]', 'classes_idx'], {}), '(labels_clean, dem_rad, nb_classes, [1, \n 2, 3], classes_idx)\n', (4649, 4713), True, 'import metrics as fun_metrics\n'), ((4863, 4925), 'metrics.svm_accuracy_estimation', 'fun_metrics.svm_accuracy_estimation', (['codes_clean', 'labels_clean'], {}), '(codes_clean, labels_clean)\n', (4898, 4925), True, 'import metrics as fun_metrics\n'), ((5071, 5131), 'metrics.svm_accuracy_estimation', 'fun_metrics.svm_accuracy_estimation', (['dem_clean', 'labels_clean'], {}), '(dem_clean, labels_clean)\n', (5106, 5131), True, 'import metrics as fun_metrics\n'), ((5277, 5337), 'metrics.svm_accuracy_estimation', 'fun_metrics.svm_accuracy_estimation', (['rad_clean', 'labels_clean'], {}), '(rad_clean, labels_clean)\n', (5312, 5337), True, 'import metrics as fun_metrics\n'), ((5935, 5998), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_train', 'args', 'trained_model'], {}), '(gt_change_train, args, trained_model)\n', (5960, 5998), True, 'import utils as fun\n'), ((6011, 6055), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(1)'], {}), '(gt_change_train, 1)\n', (6035, 6055), True, 'import utils as fun\n'), ((6067, 6111), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(2)'], {}), '(gt_change_train, 2)\n', (6091, 6111), True, 'import utils as fun\n'), ((6155, 6217), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_test', 'args', 'trained_model'], {}), '(gt_change_test, args, trained_model)\n', (6180, 6217), True, 'import utils as fun\n'), ((6229, 6272), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(1)'], {}), '(gt_change_test, 1)\n', (6253, 6272), True, 'import utils as fun\n'), ((6284, 6327), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(2)'], {}), '(gt_change_test, 2)\n', (6308, 6327), True, 'import utils as fun\n'), ((6412, 6515), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['codes_train', 'codes_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(codes_train, codes_test, labels_train,\n labels_test, cv=False)\n', (6449, 6515), True, 'import metrics as fun_metrics\n'), ((6588, 6687), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['dem_train', 'dem_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(dem_train, dem_test, labels_train,\n labels_test, cv=False)\n', (6625, 6687), True, 'import metrics as fun_metrics\n'), ((6760, 6859), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['rad_train', 'rad_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(rad_train, rad_test, labels_train,\n labels_test, cv=False)\n', (6797, 6859), True, 'import metrics as fun_metrics\n'), ((7182, 7245), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_train', 'args', 'trained_model'], {}), '(gt_change_train, args, trained_model)\n', (7207, 7245), True, 'import utils as fun\n'), ((7258, 7302), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(1)'], {}), '(gt_change_train, 1)\n', (7282, 7302), True, 'import utils as fun\n'), ((7314, 7358), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(2)'], {}), '(gt_change_train, 2)\n', (7338, 7358), True, 'import utils as fun\n'), ((7402, 7464), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_test', 'args', 'trained_model'], {}), '(gt_change_test, args, trained_model)\n', (7427, 7464), True, 'import utils as fun\n'), ((7476, 7519), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(1)'], {}), '(gt_change_test, 1)\n', (7500, 7519), True, 'import utils as fun\n'), ((7531, 7574), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(2)'], {}), '(gt_change_test, 2)\n', (7555, 7574), True, 'import utils as fun\n'), ((7659, 7762), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['codes_train', 'codes_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(codes_train, codes_test, labels_train,\n labels_test, cv=False)\n', (7696, 7762), True, 'import metrics as fun_metrics\n'), ((7835, 7934), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['dem_train', 'dem_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(dem_train, dem_test, labels_train,\n labels_test, cv=False)\n', (7872, 7934), True, 'import metrics as fun_metrics\n'), ((8007, 8106), 'metrics.svm_accuracy_estimation_2', 'fun_metrics.svm_accuracy_estimation_2', (['rad_train', 'rad_test', 'labels_train', 'labels_test'], {'cv': '(False)'}), '(rad_train, rad_test, labels_train,\n labels_test, cv=False)\n', (8044, 8106), True, 'import metrics as fun_metrics\n'), ((8218, 8268), 'torch.load', 'torch.load', (['"""evaluation_models/test_transfer_aleo"""'], {}), "('evaluation_models/test_transfer_aleo')\n", (8228, 8268), False, 'import torch\n'), ((8564, 8621), 'train.train_full', 'train.train_full', (['args', 'train_data', 'gt_change', 'dict_model'], {}), '(args, train_data, gt_change, dict_model)\n', (8580, 8621), True, 'import train as train\n'), ((8902, 8971), 'frejus_dataset.get_datasets', 'frejus_dataset.get_datasets', (["['1954', '1966', '1970', '1978', '1989']"], {}), "(['1954', '1966', '1970', '1978', '1989'])\n", (8929, 8971), False, 'import frejus_dataset\n'), ((9028, 9080), 'torch.load', 'torch.load', (['"""evaluation_models/pre_trained_baseline"""'], {}), "('evaluation_models/pre_trained_baseline')\n", (9038, 9080), False, 'import torch\n'), ((10150, 10212), 'evaluate.generate_prediction_baseline_model', 'eval_model.generate_prediction_baseline_model', (['gt_change', 'args'], {}), '(gt_change, args)\n', (10195, 10212), True, 'import evaluate as eval_model\n'), ((10230, 10285), 'metrics.visualize_roc', 'fun_metrics.visualize_roc', (['y', 'pred'], {'return_thresh': '(False)'}), '(y, pred, return_thresh=False)\n', (10255, 10285), True, 'import metrics as fun_metrics\n'), ((10306, 10354), 'metrics.iou_accuracy', 'fun_metrics.iou_accuracy', (['pred', '(0.69)', 'y', 'classes'], {}), '(pred, 0.69, y, classes)\n', (10330, 10354), True, 'import metrics as fun_metrics\n'), ((11503, 11566), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_train', 'args', 'trained_model'], {}), '(gt_change_train, args, trained_model)\n', (11528, 11566), True, 'import utils as fun\n'), ((11579, 11623), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(1)'], {}), '(gt_change_train, 1)\n', (11603, 11623), True, 'import utils as fun\n'), ((11635, 11679), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_train', '(2)'], {}), '(gt_change_train, 2)\n', (11659, 11679), True, 'import utils as fun\n'), ((11723, 11785), 'utils.prepare_codes_metrics', 'fun.prepare_codes_metrics', (['gt_change_test', 'args', 'trained_model'], {}), '(gt_change_test, args, trained_model)\n', (11748, 11785), True, 'import utils as fun\n'), ((11797, 11840), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(1)'], {}), '(gt_change_test, 1)\n', (11821, 11840), True, 'import utils as fun\n'), ((11852, 11895), 'utils.prepare_data_metrics', 'fun.prepare_data_metrics', (['gt_change_test', '(2)'], {}), '(gt_change_test, 2)\n', (11876, 11895), True, 'import utils as fun\n'), ((11935, 11953), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11951, 11953), False, 'from sklearn.linear_model import LinearRegression\n'), ((12104, 12148), 'sklearn.metrics.r2_score', 'sklearn.metrics.r2_score', (['dem_test', 'pred_dem'], {}), '(dem_test, pred_dem)\n', (12128, 12148), False, 'import sklearn\n'), ((12289, 12307), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (12305, 12307), False, 'from sklearn.linear_model import LinearRegression\n'), ((12458, 12502), 'sklearn.metrics.r2_score', 'sklearn.metrics.r2_score', (['rad_test', 'pred_rad'], {}), '(rad_test, pred_rad)\n', (12482, 12502), False, 'import sklearn\n'), ((12677, 12726), 'numpy.concatenate', 'np.concatenate', (['(codes_train, codes_test)'], {'axis': '(0)'}), '((codes_train, codes_test), axis=0)\n', (12691, 12726), True, 'import numpy as np\n'), ((12739, 12787), 'numpy.concatenate', 'np.concatenate', (['(dem_train, dem_test)'], {'axis': 'None'}), '((dem_train, dem_test), axis=None)\n', (12753, 12787), True, 'import numpy as np\n'), ((12800, 12848), 'numpy.concatenate', 'np.concatenate', (['(rad_train, rad_test)'], {'axis': 'None'}), '((rad_train, rad_test), axis=None)\n', (12814, 12848), True, 'import numpy as np\n'), ((12924, 12993), 'numpy.quantile', 'np.quantile', (['dem_train', '[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]'], {}), '(dem_train, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n', (12935, 12993), True, 'import numpy as np\n'), ((13003, 13072), 'numpy.quantile', 'np.quantile', (['rad_train', '[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]'], {}), '(rad_train, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n', (13014, 13072), True, 'import numpy as np\n'), ((13119, 13162), 'numpy.digitize', 'np.digitize', (['dem_train'], {'bins': 'values_dem_cut'}), '(dem_train, bins=values_dem_cut)\n', (13130, 13162), True, 'import numpy as np\n'), ((13177, 13220), 'numpy.digitize', 'np.digitize', (['rad_train'], {'bins': 'values_rad_cut'}), '(rad_train, bins=values_rad_cut)\n', (13188, 13220), True, 'import numpy as np\n'), ((14386, 14407), 'random.randint', 'random.randint', (['(0)', '(40)'], {}), '(0, 40)\n', (14400, 14407), False, 'import random\n'), ((14481, 14512), 'utils.MI_raster', 'fun.MI_raster', (['raster', '"""AE_rad"""'], {}), "(raster, 'AE_rad')\n", (14494, 14512), True, 'import utils as fun\n'), ((14541, 14586), 'utils.MI_raster', 'fun.MI_raster', (['raster', '"""AE_Mmodal"""'], {'visu': '(True)'}), "(raster, 'AE_Mmodal', visu=True)\n", (14554, 14586), True, 'import utils as fun\n'), ((14612, 14650), 'utils.MI_raster', 'fun.MI_raster', (['raster', '"""AE_Mmodal+DAN"""'], {}), "(raster, 'AE_Mmodal+DAN')\n", (14625, 14650), True, 'import utils as fun\n'), ((14792, 14813), 'random.randint', 'random.randint', (['(0)', '(40)'], {}), '(0, 40)\n', (14806, 14813), False, 'import random\n'), ((14845, 14900), 'utils.tsne_visualization', 'fun.tsne_visualization', (['raster', 'trained_model', '"""AE_rad"""'], {}), "(raster, trained_model, 'AE_rad')\n", (14867, 14900), True, 'import utils as fun\n'), ((14901, 14960), 'utils.tsne_visualization', 'fun.tsne_visualization', (['raster', 'trained_model', '"""AE_rad+DAN"""'], {}), "(raster, trained_model, 'AE_rad+DAN')\n", (14923, 14960), True, 'import utils as fun\n'), ((14961, 15019), 'utils.tsne_visualization', 'fun.tsne_visualization', (['raster', 'trained_model', '"""AE_Mmodal"""'], {}), "(raster, trained_model, 'AE_Mmodal')\n", (14983, 15019), True, 'import utils as fun\n'), ((15020, 15082), 'utils.tsne_visualization', 'fun.tsne_visualization', (['raster', 'trained_model', '"""AE_Mmodal+DAN"""'], {}), "(raster, trained_model, 'AE_Mmodal+DAN')\n", (15042, 15082), True, 'import utils as fun\n'), ((15133, 15170), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_rad"""'], {}), "(gt_change, 'AE_rad')\n", (15149, 15170), True, 'import utils as fun\n'), ((15171, 15212), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_rad+DAN"""'], {}), "(gt_change, 'AE_rad+DAN')\n", (15187, 15212), True, 'import utils as fun\n'), ((15213, 15253), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_Mmodal"""'], {}), "(gt_change, 'AE_Mmodal')\n", (15229, 15253), True, 'import utils as fun\n'), ((15254, 15298), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_Mmodal+DAN"""'], {}), "(gt_change, 'AE_Mmodal+DAN')\n", (15270, 15298), True, 'import utils as fun\n'), ((15299, 15349), 'utils.tsne_dataset', 'fun.tsne_dataset', (['gt_change', '"""AE_Mmodal+DAN+split"""'], {}), "(gt_change, 'AE_Mmodal+DAN+split')\n", (15315, 15349), True, 'import utils as fun\n'), ((15428, 15474), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_rad"""', 'gt_change'], {}), "('AE_rad', gt_change)\n", (15453, 15474), True, 'import evaluate as eval_model\n'), ((15495, 15545), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_rad+DAN"""', 'gt_change'], {}), "('AE_rad+DAN', gt_change)\n", (15520, 15545), True, 'import evaluate as eval_model\n'), ((15565, 15614), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_Mmodal"""', 'gt_change'], {}), "('AE_Mmodal', gt_change)\n", (15590, 15614), True, 'import evaluate as eval_model\n'), ((15638, 15691), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_Mmodal+DAN"""', 'gt_change'], {}), "('AE_Mmodal+DAN', gt_change)\n", (15663, 15691), True, 'import evaluate as eval_model\n'), ((15722, 15781), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_Mmodal+DAN+split"""', 'gt_change'], {}), "('AE_Mmodal+DAN+split', gt_change)\n", (15747, 15781), True, 'import evaluate as eval_model\n'), ((15802, 15852), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_alt+DAN"""', 'gt_change'], {}), "('AE_alt+DAN', gt_change)\n", (15827, 15852), True, 'import evaluate as eval_model\n'), ((15877, 15931), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""bayesian_model"""', 'gt_change'], {}), "('bayesian_model', gt_change)\n", (15902, 15931), True, 'import evaluate as eval_model\n'), ((16845, 16867), 'random.randint', 'random.randint', (['(0)', '(900)'], {}), '(0, 900)\n', (16859, 16867), False, 'import random\n'), ((16879, 16937), 'utils.view_u', 'fun.view_u', (["numpy_rasters['1989']", 'trained_model', 'args', 'nb'], {}), "(numpy_rasters['1989'], trained_model, args, nb)\n", (16889, 16937), True, 'import utils as fun\n'), ((16938, 16996), 'utils.view_u', 'fun.view_u', (["numpy_rasters['1970']", 'trained_model', 'args', 'nb'], {}), "(numpy_rasters['1970'], trained_model, args, nb)\n", (16948, 16996), True, 'import utils as fun\n'), ((17121, 17143), 'random.randint', 'random.randint', (['(0)', '(900)'], {}), '(0, 900)\n', (17135, 17143), False, 'import random\n'), ((17299, 17399), 'utils.change_detection', 'fun.change_detection', (['rast1', 'rast2', 'trained_model', 'args'], {'threshold': 'threshold', 'visualization': '(True)'}), '(rast1, rast2, trained_model, args, threshold=threshold,\n visualization=True)\n', (17319, 17399), True, 'import utils as fun\n'), ((1557, 1591), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'train_idx'], {}), '(gt_change[year], train_idx)\n', (1563, 1591), False, 'from torch.utils.data import Subset\n'), ((2940, 3049), 'utils.change_detection', 'fun.change_detection', (['rast1', 'rast2', 'trained_model', 'args'], {'visualization': '(True)', 'threshold': 'threshold', 'gts': 'gts'}), '(rast1, rast2, trained_model, args, visualization=True,\n threshold=threshold, gts=gts)\n', (2960, 3049), True, 'import utils as fun\n'), ((5794, 5828), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'train_idx'], {}), '(gt_change[year], train_idx)\n', (5800, 5828), False, 'from torch.utils.data import Subset\n'), ((5856, 5888), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'val_idx'], {}), '(gt_change[year], val_idx)\n', (5862, 5888), False, 'from torch.utils.data import Subset\n'), ((10737, 10852), 'utils.change_detection_baseline', 'fun.change_detection_baseline', (['rast1', 'rast2', "['1954', '1970']", 'args'], {'visualization': '(True)', 'threshold': '(1.3)', 'gts': 'gts'}), "(rast1, rast2, ['1954', '1970'], args,\n visualization=True, threshold=1.3, gts=gts)\n", (10766, 10852), True, 'import utils as fun\n'), ((11358, 11392), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'train_idx'], {}), '(gt_change[year], train_idx)\n', (11364, 11392), False, 'from torch.utils.data import Subset\n'), ((11420, 11452), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'val_idx'], {}), '(gt_change[year], val_idx)\n', (11426, 11452), False, 'from torch.utils.data import Subset\n'), ((16180, 16218), 'utils.visualize', 'fun.visualize', (['raster'], {'third_dim': '(False)'}), '(raster, third_dim=False)\n', (16193, 16218), True, 'import utils as fun\n'), ((16347, 16369), 'utils.numpy_raster', 'fun.numpy_raster', (['pred'], {}), '(pred)\n', (16363, 16369), True, 'import utils as fun\n'), ((16374, 16434), 'utils.visualize', 'fun.visualize', (['pred'], {'third_dim': '(False)', 'defiance': 'args.defiance'}), '(pred, third_dim=False, defiance=args.defiance)\n', (16387, 16434), True, 'import utils as fun\n'), ((16774, 16796), 'random.randint', 'random.randint', (['(0)', '(900)'], {}), '(0, 900)\n', (16788, 16796), False, 'import random\n'), ((13610, 13645), 'numpy.count_nonzero', 'np.count_nonzero', (['(dem_discrete == i)'], {}), '(dem_discrete == i)\n', (13626, 13645), True, 'import numpy as np\n'), ((13792, 13827), 'numpy.count_nonzero', 'np.count_nonzero', (['(rad_discrete == i)'], {}), '(rad_discrete == i)\n', (13808, 13827), True, 'import numpy as np\n'), ((16506, 16569), 'utils.scatter_aleo', 'fun.scatter_aleo', (['raster[1, :, :]', 'pred[1, :, :]', 'pred[2, :, :]'], {}), '(raster[1, :, :], pred[1, :, :], pred[2, :, :])\n', (16522, 16569), True, 'import utils as fun\n'), ((9604, 9647), 'frejus_dataset.get_datasets', 'frejus_dataset.get_datasets', (['[year1, year2]'], {}), '([year1, year2])\n', (9631, 9647), False, 'import frejus_dataset\n'), ((10021, 10088), 'train.train_full_alternative_model', 'train.train_full_alternative_model', (['args', 'numpy_rasters', 'dict_model'], {}), '(args, numpy_rasters, dict_model)\n', (10055, 10088), True, 'import train as train\n'), ((9753, 9789), 'utils.torch_raster', 'fun.torch_raster', (['raster'], {'cuda': '(False)'}), '(raster, cuda=False)\n', (9769, 9789), True, 'import utils as fun\n'), ((9862, 9898), 'utils.torch_raster', 'fun.torch_raster', (['raster'], {'cuda': '(False)'}), '(raster, cuda=False)\n', (9878, 9898), True, 'import utils as fun\n')]
|
import logging
import os
import random
import torch
import numpy as np
from dataset import Dataset
from dcparser import Parser
from domain import DomainEngine
from detect import DetectEngine
from repair import RepairEngine
from evaluate import EvalEngine
# if not os.path.exists('examples/hospital.log'):
# with open('examples/hospital.log', 'w'): pass
logging.basicConfig(format="%(asctime)s - [%(levelname)5s] - %(message)s",
datefmt='%H:%M:%S',
filename='usecase1.log')
# filename='adult.log')
root_logger = logging.getLogger()
gensim_logger = logging.getLogger('gensim')
root_logger.setLevel(logging.INFO)
gensim_logger.setLevel(logging.WARNING)
# Arguments for HoloClean
arguments = [
(('-u', '--db_user'),
{'metavar': 'DB_USER',
'dest': 'db_user',
'default': 'holocleanuser',
'type': str,
'help': 'User for DB used to persist state.'}),
(('-p', '--db-pwd', '--pass'),
{'metavar': 'DB_PWD',
'dest': 'db_pwd',
'default': 'abcd<PASSWORD>',
'type': str,
'help': 'Password for DB used to persist state.'}),
(('-h', '--db-host'),
{'metavar': 'DB_HOST',
'dest': 'db_host',
'default': 'localhost',
'type': str,
'help': 'Host for DB used to persist state.'}),
(('-d', '--db_name'),
{'metavar': 'DB_NAME',
'dest': 'db_name',
'default': 'holo',
'type': str,
'help': 'Name of DB used to persist state.'}),
(('-t', '--threads'),
{'metavar': 'THREADS',
'dest': 'threads',
'default': 20,
'type': int,
'help': 'How many threads to use for parallel execution. If <= 1, then no pool workers are used.'}),
(('-dbt', '--timeout'),
{'metavar': 'TIMEOUT',
'dest': 'timeout',
'default': 60000,
'type': int,
'help': 'Timeout for expensive featurization queries.'}),
(('-s', '--seed'),
{'metavar': 'SEED',
'dest': 'seed',
'default': 45,
'type': int,
'help': 'The seed to be used for torch.'}),
(('-l', '--learning-rate'),
{'metavar': 'LEARNING_RATE',
'dest': 'learning_rate',
'default': 0.001,
'type': float,
'help': 'The learning rate used during training.'}),
(('-o', '--optimizer'),
{'metavar': 'OPTIMIZER',
'dest': 'optimizer',
'default': 'adam',
'type': str,
'help': 'Optimizer used for learning.'}),
(('-e', '--epochs'),
{'metavar': 'LEARNING_EPOCHS',
'dest': 'epochs',
'default': 20,
'type': float,
'help': 'Number of epochs used for training.'}),
(('-w', '--weight_decay'),
{'metavar': 'WEIGHT_DECAY',
'dest': 'weight_decay',
'default': 0.01,
'type': float,
'help': 'Weight decay across iterations.'}),
(('-m', '--momentum'),
{'metavar': 'MOMENTUM',
'dest': 'momentum',
'default': 0.0,
'type': float,
'help': 'Momentum for SGD.'}),
(('-b', '--batch-size'),
{'metavar': 'BATCH_SIZE',
'dest': 'batch_size',
'default': 1,
'type': int,
'help': 'The batch size during training.'}),
(('-wlt', '--weak-label-thresh'),
{'metavar': 'WEAK_LABEL_THRESH',
'dest': 'weak_label_thresh',
'default': 0.90,
'type': float,
'help': 'Threshold of posterior probability to assign weak labels.'}),
(('-dt1', '--domain_thresh_1'),
{'metavar': 'DOMAIN_THRESH_1',
'dest': 'domain_thresh_1',
'default': 0.1,
'type': float,
'help': 'Minimum co-occurrence probability threshold required for domain values in the first domain pruning stage. Between 0 and 1.'}),
(('-dt2', '--domain-thresh-2'),
{'metavar': 'DOMAIN_THRESH_2',
'dest': 'domain_thresh_2',
'default': 0,
'type': float,
'help': 'Threshold of posterior probability required for values to be included in the final domain in the second domain pruning stage. Between 0 and 1.'}),
(('-md', '--max-domain'),
{'metavar': 'MAX_DOMAIN',
'dest': 'max_domain',
'default': 1000000,
'type': int,
'help': 'Maximum number of values to include in the domain for a given cell.'}),
(('-cs', '--cor-strength'),
{'metavar': 'COR_STRENGTH',
'dest': 'cor_strength',
'default': 0.05,
'type': float,
'help': 'Correlation threshold (absolute) when selecting correlated attributes for domain pruning.'}),
(('-cs', '--nb-cor-strength'),
{'metavar': 'NB_COR_STRENGTH',
'dest': 'nb_cor_strength',
'default': 0.3,
'type': float,
'help': 'Correlation threshold for correlated attributes when using NaiveBayes estimator.'}),
(('-fn', '--feature-norm'),
{'metavar': 'FEATURE_NORM',
'dest': 'feature_norm',
'default': True,
'type': bool,
'help': 'Normalize the features before training.'}),
(('-wn', '--weight_norm'),
{'metavar': 'WEIGHT_NORM',
'dest': 'weight_norm',
'default': False,
'type': bool,
'help': 'Normalize the weights after every forward pass during training.'}),
(('-ee', '--estimator_epochs'),
{'metavar': 'ESTIMATOR_EPOCHS',
'dest': 'estimator_epochs',
'default': 3,
'type': int,
'help': 'Number of epochs to run the weak labelling and domain generation estimator.'}),
(('-ebs', '--estimator_batch_size'),
{'metavar': 'ESTIMATOR_BATCH_SIZE',
'dest': 'estimator_batch_size',
'default': 32,
'type': int,
'help': 'Size of batch used in SGD in the weak labelling and domain generation estimator.'}),
]
# Flags for Holoclean mode
flags = [
(tuple(['--verbose']),
{'default': False,
'dest': 'verbose',
'action': 'store_true',
'help': 'verbose'}),
(tuple(['--bias']),
{'default': False,
'dest': 'bias',
'action': 'store_true',
'help': 'Use bias term'}),
(tuple(['--printfw']),
{'default': False,
'dest': 'print_fw',
'action': 'store_true',
'help': 'print the weights of featurizers'}),
(tuple(['--debug-mode']),
{'default': False,
'dest': 'debug_mode',
'action': 'store_true',
'help': 'dump a bunch of debug information to debug\/'}),
]
class HoloClean:
"""
Main entry point for HoloClean.
It creates a HoloClean Data Engine
"""
def __init__(self, **kwargs):
"""
Constructor for Holoclean
:param kwargs: arguments for HoloClean
"""
# Initialize default execution arguments
arg_defaults = {}
for arg, opts in arguments:
if 'directory' in arg[0]:
arg_defaults['directory'] = opts['default']
else:
arg_defaults[opts['dest']] = opts['default']
# Initialize default execution flags
for arg, opts in flags:
arg_defaults[opts['dest']] = opts['default']
# check env vars
for arg, opts in arguments:
# if env var is set use that
if opts["metavar"] and opts["metavar"] in os.environ.keys():
logging.debug(
"Overriding {} with env varible {} set to {}".format(
opts['dest'],
opts["metavar"],
os.environ[opts["metavar"]])
)
arg_defaults[opts['dest']] = os.environ[opts["metavar"]]
# Override defaults with manual flags
for key in kwargs:
arg_defaults[key] = kwargs[key]
# Initialize additional arguments
for (arg, default) in arg_defaults.items():
setattr(self, arg, kwargs.get(arg, default))
# Init empty session collection
self.session = Session(arg_defaults)
class Session:
"""
Session class controls the entire pipeline of HC
"""
def __init__(self, env, name="session"):
"""
Constructor for Holoclean session
:param env: Holoclean environment
:param name: Name for the Holoclean session
"""
# use DEBUG logging level if verbose enabled
if env['verbose']:
root_logger.setLevel(logging.DEBUG)
gensim_logger.setLevel(logging.DEBUG)
logging.debug('initiating session with parameters: %s', env)
# Initialize random seeds.
random.seed(env['seed'])
torch.manual_seed(env['seed'])
np.random.seed(seed=env['seed'])
# Initialize members
self.name = name
self.env = env
self.ds = Dataset(name, env)
self.dc_parser = Parser(env, self.ds)
self.domain_engine = DomainEngine(env, self.ds)
self.detect_engine = DetectEngine(env, self.ds)
self.repair_engine = RepairEngine(env, self.ds)
self.eval_engine = EvalEngine(env, self.ds)
def load_data(self, name, fpath, na_values=None, entity_col=None, src_col=None):
"""
load_data takes the filepath to a CSV file to load as the initial dataset.
:param name: (str) name to initialize dataset with.
:param fpath: (str) filepath to CSV file.
:param na_values: (str) value that identifies a NULL value
:param entity_col: (st) column containing the unique
identifier/ID of an entity. For fusion tasks, rows with
the same ID will be fused together in the output.
If None, assumes every row is a unique entity.
:param src_col: (str) if not None, for fusion tasks
specifies the column containing the source for each "mention" of an
entity.
"""
status, load_time = self.ds.load_data(name,
fpath,
na_values=na_values,
entity_col=entity_col,
src_col=src_col)
logging.info(status)
logging.debug('Time to load dataset: %.2f secs', load_time)
def load_dcs(self, fpath):
"""
load_dcs ingests the Denial Constraints for initialized dataset.
:param fpath: filepath to TXT file where each line contains one denial constraint.
"""
status, load_time = self.dc_parser.load_denial_constraints(fpath)
logging.info(status)
logging.debug('Time to load dirty data: %.2f secs', load_time)
def get_dcs(self):
return self.dc_parser.get_dcs()
def detect_errors(self, detect_list):
status, detect_time = self.detect_engine.detect_errors(detect_list)
logging.info(status)
logging.debug('Time to detect errors: %.2f secs', detect_time)
def setup_domain(self):
status, domain_time = self.domain_engine.setup()
logging.info(status)
logging.debug('Time to setup the domain: %.2f secs', domain_time)
def repair_errors(self, featurizers):
status, feat_time = self.repair_engine.setup_featurized_ds(featurizers)
logging.info(status)
logging.debug('Time to featurize data: %.2f secs', feat_time)
status, setup_time = self.repair_engine.setup_repair_model()
logging.info(status)
logging.debug('Time to setup repair model: %.2f secs', feat_time)
status, fit_time = self.repair_engine.fit_repair_model()
logging.info(status)
logging.debug('Time to fit repair model: %.2f secs', fit_time)
status, infer_time = self.repair_engine.infer_repairs()
logging.info(status)
logging.debug('Time to infer correct cell values: %.2f secs', infer_time)
status, time = self.ds.get_inferred_values()
logging.info(status)
logging.debug('Time to collect inferred values: %.2f secs', time)
status, time = self.ds.get_repaired_dataset()
logging.info(status)
logging.debug('Time to store repaired dataset: %.2f secs', time)
if self.env['print_fw']:
status, time = self.repair_engine.get_featurizer_weights()
logging.info(status)
logging.debug('Time to store featurizer weights: %.2f secs', time)
return status
def evaluate(self, fpath, tid_col, attr_col, val_col, na_values=None):
"""
evaluate generates an evaluation report with metrics (e.g. precision,
recall) given a test set.
:param fpath: (str) filepath to test set (ground truth) CSV file.
:param tid_col: (str) column in CSV that corresponds to the TID.
:param attr_col: (str) column in CSV that corresponds to the attribute.
:param val_col: (str) column in CSV that corresponds to correct value
for the current TID and attribute (i.e. cell).
:param na_values: (Any) how na_values are represented in the data.
Returns an EvalReport named tuple containing the experiment results.
"""
name = self.ds.raw_data.name + '_clean'
status, load_time = self.eval_engine.load_data(name, fpath, tid_col, attr_col, val_col, na_values=na_values)
logging.info(status)
logging.debug('Time to evaluate repairs: %.2f secs', load_time)
status, report_time, eval_report = self.eval_engine.eval_report()
logging.info(status)
logging.debug('Time to generate report: %.2f secs', report_time)
return eval_report
|
[
"evaluate.EvalEngine"
] |
[((361, 484), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - [%(levelname)5s] - %(message)s"""', 'datefmt': '"""%H:%M:%S"""', 'filename': '"""usecase1.log"""'}), "(format='%(asctime)s - [%(levelname)5s] - %(message)s',\n datefmt='%H:%M:%S', filename='usecase1.log')\n", (380, 484), False, 'import logging\n'), ((579, 598), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (596, 598), False, 'import logging\n'), ((615, 642), 'logging.getLogger', 'logging.getLogger', (['"""gensim"""'], {}), "('gensim')\n", (632, 642), False, 'import logging\n'), ((8393, 8453), 'logging.debug', 'logging.debug', (['"""initiating session with parameters: %s"""', 'env'], {}), "('initiating session with parameters: %s', env)\n", (8406, 8453), False, 'import logging\n'), ((8498, 8522), 'random.seed', 'random.seed', (["env['seed']"], {}), "(env['seed'])\n", (8509, 8522), False, 'import random\n'), ((8531, 8561), 'torch.manual_seed', 'torch.manual_seed', (["env['seed']"], {}), "(env['seed'])\n", (8548, 8561), False, 'import torch\n'), ((8570, 8602), 'numpy.random.seed', 'np.random.seed', ([], {'seed': "env['seed']"}), "(seed=env['seed'])\n", (8584, 8602), True, 'import numpy as np\n'), ((8699, 8717), 'dataset.Dataset', 'Dataset', (['name', 'env'], {}), '(name, env)\n', (8706, 8717), False, 'from dataset import Dataset\n'), ((8743, 8763), 'dcparser.Parser', 'Parser', (['env', 'self.ds'], {}), '(env, self.ds)\n', (8749, 8763), False, 'from dcparser import Parser\n'), ((8793, 8819), 'domain.DomainEngine', 'DomainEngine', (['env', 'self.ds'], {}), '(env, self.ds)\n', (8805, 8819), False, 'from domain import DomainEngine\n'), ((8849, 8875), 'detect.DetectEngine', 'DetectEngine', (['env', 'self.ds'], {}), '(env, self.ds)\n', (8861, 8875), False, 'from detect import DetectEngine\n'), ((8905, 8931), 'repair.RepairEngine', 'RepairEngine', (['env', 'self.ds'], {}), '(env, self.ds)\n', (8917, 8931), False, 'from repair import RepairEngine\n'), ((8959, 8983), 'evaluate.EvalEngine', 'EvalEngine', (['env', 'self.ds'], {}), '(env, self.ds)\n', (8969, 8983), False, 'from evaluate import EvalEngine\n'), ((10078, 10098), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (10090, 10098), False, 'import logging\n'), ((10107, 10166), 'logging.debug', 'logging.debug', (['"""Time to load dataset: %.2f secs"""', 'load_time'], {}), "('Time to load dataset: %.2f secs', load_time)\n", (10120, 10166), False, 'import logging\n'), ((10470, 10490), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (10482, 10490), False, 'import logging\n'), ((10499, 10561), 'logging.debug', 'logging.debug', (['"""Time to load dirty data: %.2f secs"""', 'load_time'], {}), "('Time to load dirty data: %.2f secs', load_time)\n", (10512, 10561), False, 'import logging\n'), ((10753, 10773), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (10765, 10773), False, 'import logging\n'), ((10782, 10844), 'logging.debug', 'logging.debug', (['"""Time to detect errors: %.2f secs"""', 'detect_time'], {}), "('Time to detect errors: %.2f secs', detect_time)\n", (10795, 10844), False, 'import logging\n'), ((10939, 10959), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (10951, 10959), False, 'import logging\n'), ((10968, 11033), 'logging.debug', 'logging.debug', (['"""Time to setup the domain: %.2f secs"""', 'domain_time'], {}), "('Time to setup the domain: %.2f secs', domain_time)\n", (10981, 11033), False, 'import logging\n'), ((11165, 11185), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (11177, 11185), False, 'import logging\n'), ((11194, 11255), 'logging.debug', 'logging.debug', (['"""Time to featurize data: %.2f secs"""', 'feat_time'], {}), "('Time to featurize data: %.2f secs', feat_time)\n", (11207, 11255), False, 'import logging\n'), ((11333, 11353), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (11345, 11353), False, 'import logging\n'), ((11362, 11427), 'logging.debug', 'logging.debug', (['"""Time to setup repair model: %.2f secs"""', 'feat_time'], {}), "('Time to setup repair model: %.2f secs', feat_time)\n", (11375, 11427), False, 'import logging\n'), ((11501, 11521), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (11513, 11521), False, 'import logging\n'), ((11530, 11592), 'logging.debug', 'logging.debug', (['"""Time to fit repair model: %.2f secs"""', 'fit_time'], {}), "('Time to fit repair model: %.2f secs', fit_time)\n", (11543, 11592), False, 'import logging\n'), ((11665, 11685), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (11677, 11685), False, 'import logging\n'), ((11694, 11767), 'logging.debug', 'logging.debug', (['"""Time to infer correct cell values: %.2f secs"""', 'infer_time'], {}), "('Time to infer correct cell values: %.2f secs', infer_time)\n", (11707, 11767), False, 'import logging\n'), ((11829, 11849), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (11841, 11849), False, 'import logging\n'), ((11858, 11923), 'logging.debug', 'logging.debug', (['"""Time to collect inferred values: %.2f secs"""', 'time'], {}), "('Time to collect inferred values: %.2f secs', time)\n", (11871, 11923), False, 'import logging\n'), ((11986, 12006), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (11998, 12006), False, 'import logging\n'), ((12015, 12079), 'logging.debug', 'logging.debug', (['"""Time to store repaired dataset: %.2f secs"""', 'time'], {}), "('Time to store repaired dataset: %.2f secs', time)\n", (12028, 12079), False, 'import logging\n'), ((13225, 13245), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (13237, 13245), False, 'import logging\n'), ((13254, 13317), 'logging.debug', 'logging.debug', (['"""Time to evaluate repairs: %.2f secs"""', 'load_time'], {}), "('Time to evaluate repairs: %.2f secs', load_time)\n", (13267, 13317), False, 'import logging\n'), ((13400, 13420), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (13412, 13420), False, 'import logging\n'), ((13429, 13493), 'logging.debug', 'logging.debug', (['"""Time to generate report: %.2f secs"""', 'report_time'], {}), "('Time to generate report: %.2f secs', report_time)\n", (13442, 13493), False, 'import logging\n'), ((12196, 12216), 'logging.info', 'logging.info', (['status'], {}), '(status)\n', (12208, 12216), False, 'import logging\n'), ((12229, 12295), 'logging.debug', 'logging.debug', (['"""Time to store featurizer weights: %.2f secs"""', 'time'], {}), "('Time to store featurizer weights: %.2f secs', time)\n", (12242, 12295), False, 'import logging\n'), ((7211, 7228), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (7226, 7228), False, 'import os\n')]
|
import SimpleITK as sitk
import numpy as np
import tensorflow as tf
from medpy.metric import hd,asd
from config.Defines import Get_Name_By_Index
from dirutil.helper import get_name_wo_suffix
from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std
from excelutil.output2excel import outpu2excel
from tfop import utils as util, layers as layer, losses as loss
from tfop.losses import restore_loss
from learn2reg.challenge_sampler import CHallengeSampler
from learn2reg.loss import NVISimilarity
from learn2reg.sampler import MMSampler
from model.base_model import BaseModelV2
from sitkImageIO.itkdatawriter import sitk_write_lab,sitk_write_images,sitk_write_labs
class MMReg_base(BaseModelV2):
def __init__(self,sess,args):
BaseModelV2.__init__(self, sess, args)
self.train_sampler = MMSampler(self.args, 'train')
self.validate_sampler = MMSampler(self.args, 'validate')
self.minibatch_size = self.args.batch_size
self.image_size = [self.args.image_size, self.args.image_size, self.args.image_size]
self.grid_ref = util.get_reference_grid(self.image_size)
if args.phase == 'train':
self.is_train = True
else:
self.is_train = False
self.build_network()
self.summary()
def warp_image(self, input_,ddf):
return util.resample_linear(input_, self.grid_ref+ ddf)
def _regnet(self, mv_img,mv_lab, fix_img,fix_lab, reuse=False,scop_name="shared_regnet"):
input_layer = tf.concat([layer.resize_volume(mv_img, self.image_size), fix_img], axis=4)
ddf_levels = [0, 1, 2, 3, 4]
self.num_channel_initial = self.args.num_channel_initial
nc = [int(self.num_channel_initial * (2 ** i)) for i in range(5)]
min_level = min(ddf_levels)
with tf.variable_scope(scop_name,reuse=reuse):
h0, hc0 = layer.downsample_resnet_block(self.is_train, input_layer, 2, nc[0], k_conv0=[7, 7, 7],name='local_down_0')
h1, hc1 = layer.downsample_resnet_block(self.is_train, h0, nc[0], nc[1], name='local_down_1')
h2, hc2 = layer.downsample_resnet_block(self.is_train, h1, nc[1], nc[2], name='local_down_2')
h3, hc3 = layer.downsample_resnet_block(self.is_train, h2, nc[2], nc[3], name='local_down_3')
hm = [layer.conv3_block(self.is_train, h3, nc[3], nc[4], name='local_deep_4')]
hm += [layer.upsample_resnet_block(self.is_train, hm[0], hc3, nc[4], nc[3],name='local_up_3')] if min_level < 4 else []
hm += [layer.upsample_resnet_block(self.is_train, hm[1], hc2, nc[3], nc[2],name='local_up_2')] if min_level < 3 else []
hm += [layer.upsample_resnet_block(self.is_train, hm[2], hc1, nc[2], nc[1],name='local_up_1')] if min_level < 2 else []
hm += [layer.upsample_resnet_block(self.is_train, hm[3], hc0, nc[1], nc[0],name='local_up_0')] if min_level < 1 else []
ddf_list = [layer.ddf_summand(hm[4 - idx], nc[idx], self.image_size, name='ddf1_sum_%d' % idx) for idx in ddf_levels]
ddf_list = tf.stack(ddf_list, axis=5)
ddf_MV_FIX = tf.reduce_sum(ddf_list, axis=5)
ddf_list2 = [layer.ddf_summand(hm[4 - idx], nc[idx], self.image_size, name='ddf2_sum_%d' % idx) for idx in ddf_levels]
ddf_list2 = tf.stack(ddf_list2, axis=5)
ddf_FIX_MV = tf.reduce_sum(ddf_list2, axis=5)
w_mv_img = self.warp_image(mv_img, ddf_MV_FIX)
w_mv_lab = self.warp_image(mv_lab, ddf_MV_FIX)
r_mv_img = self.warp_image(w_mv_img, ddf_FIX_MV)
w_fix_img = self.warp_image(fix_img, ddf_FIX_MV)
w_fix_lab = self.warp_image(fix_lab, ddf_FIX_MV)
r_fix_img = self.warp_image(w_fix_img, ddf_MV_FIX)
return ddf_MV_FIX,ddf_FIX_MV,w_mv_img,w_mv_lab,r_mv_img,w_fix_img,w_fix_lab,r_fix_img
def cal_nvi_loss(self,w_mv_img,i_fix_img,w_fix_img,i_mv_img):
nvi_loss_1 = self.multiScaleNVILoss(w_mv_img, i_fix_img)
nvi_loss_2 = self.multiScaleNVILoss(w_fix_img, i_mv_img)
nvi_loss = nvi_loss_1 + nvi_loss_2
return nvi_loss
def consis_loss(self,i_mv_img,r_mv_img,i_fix_img,r_fix_img):
consistent = (restore_loss(i_mv_img, r_mv_img) + restore_loss(i_fix_img, r_fix_img))
return consistent
def bend_loss(self,ddf_mv_f,ddf_f_mv):
# create loss
ddf1_bend = tf.reduce_mean(loss.local_displacement_energy(ddf_mv_f, 'bending', 1))
ddf2_bend = tf.reduce_mean(loss.local_displacement_energy(ddf_f_mv, 'bending', 1))
ddf_bend = (ddf1_bend + ddf2_bend)
return ddf_bend
def multiScaleNVILoss(self, warped_mv1_img, input_FIX_image):
grad_loss=0
scales=[1,2,3]
for s in scales :
grad_loss=grad_loss+NVISimilarity(warped_mv1_img, input_FIX_image, s)
return grad_loss/len(scales)
def train(self):
self.is_train=True
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.writer = tf.summary.FileWriter(self.args.log_dir, self.sess.graph)
self.saver = tf.train.Saver()
for glob_step in range(self.args.iteration):
mv_img1s, mv_lab1s, mv_img2s, mv_lab2s, fix_imgs, fix_labs=self.train_sampler.next_sample()
trainFeed = self.create_feed_dict(mv_img1s, mv_lab1s, mv_img2s, mv_lab2s, fix_imgs, fix_labs, is_aug=True)
_,nv_loss,cyc_consis,bend,multi_consis,summary=self.sess.run([self.train_op, self.nvi_loss, self.cycle_consistent, self.ddf_bend, self.multi_consis, self.summary_all], feed_dict=trainFeed)
self.writer.add_summary(summary,glob_step)
self.logger.debug("step %d: nv_loss=%f,cyc_consis=%f,bend=%f,multi_consis=%f"%(glob_step,nv_loss,cyc_consis,bend,multi_consis))
if np.mod(glob_step, self.args.print_freq) == 1:
# self.sample(glob_step)
self.validate_set()
if np.mod(glob_step, self.args.save_freq) == 1:
self.save(self.args.checkpoint_dir, glob_step)
def summary(self):
tf.summary.scalar("nvi_loss_1",self.nvi1)
tf.summary.scalar("nvi_loss_2", self.nvi2)
tf.summary.scalar("ddf1_bend", self.bend1)
tf.summary.scalar("ddf2_bend",self.bend2)
tf.summary.scalar('multi_consis', self.multi_consis)
tf.summary.scalar("cycle_consis", self.cycle_consistent)
# tf.summary.scalar("anti_folding_loss", self.anti_folding_loss)
tf.summary.image("fix_img", tf.expand_dims(self.i_fix_img[:, :, 48, :, 0], -1))
tf.summary.image("warped_fix_img", tf.expand_dims(self.w_fix1_img[:, :, 48, :, 0], -1))
tf.summary.image("mv1_img", tf.expand_dims(self.i_mv1_img[:, :, 48, :, 0], -1))
tf.summary.image("warped_mv1_img", tf.expand_dims(self.w_mv1_img[:, :, 48, :, 0], -1))
tf.summary.image("mv2_img", tf.expand_dims(self.i_mv2_img[:, :, 48, :, 0], -1))
tf.summary.image("warped_mv2_img", tf.expand_dims(self.w_mv2_img[:, :, 48, :, 0], -1))
self.summary_all=tf.summary.merge_all()
def sample(self, iter,write_img=False):
p_img_mv1s,p_lab_mv1s, p_img_mv2s,p_lab_mv2s,p_img_fixs, p_lab_fixs = self.validate_sampler.get_data_path()
img_mv1s, lab_mv1s, img_mv2s, lab_mv2s, img_fixs, lab_fixs = self.validate_sampler.get_batch_data(p_img_mv1s,p_lab_mv1s, p_img_mv2s,p_lab_mv2s,p_img_fixs, p_lab_fixs)
trainFeed = self.create_feed_dict(img_mv1s, lab_mv1s, img_mv2s, lab_mv2s, img_fixs, lab_fixs,is_aug=False)
warped_mv1_lab,warped_mv2_lab,input_mv_lab1,input_mv_lab2,input_fix_lab=self.sess.run([self.w_mv1_lab, self.w_mv2_lab, self.i_mv1_lab,self.i_mv2_lab, self.i_fix_lab], feed_dict=trainFeed)
if write_img:
sitk_write_labs(warped_mv1_lab, None, self.args.sample_dir, '%d_warped_mv1_lab' % (iter))
sitk_write_labs(warped_mv1_lab, None, self.args.sample_dir, '%d_warped_mv2_lab' % (iter))
sitk_write_labs(input_fix_lab, None, self.args.sample_dir, '%d_fixe_lab' % (iter))
warped_mv1_img, warped_mv2_img, input_fix_img,input_mv1_img,input_mv2_img ,i_mv1_lab,i_mv2_lab= self.sess.run([self.w_mv1_img, self.w_mv2_img, self.i_fix_img,self.i_mv1_img,self.i_mv2_img,self.i_mv1_lab,self.i_mv2_lab], feed_dict=trainFeed)
sitk_write_images(warped_mv1_img, None, self.args.sample_dir, '%d_warped_mv1_img' % (iter))
sitk_write_images(input_mv1_img, None, self.args.sample_dir, '%d_input_mv1_img' % (iter))
sitk_write_labs(i_mv1_lab, None, self.args.sample_dir, '%d_input_mv1_lab' % (iter))
sitk_write_images(warped_mv2_img, None, self.args.sample_dir, '%d_warped_mv2_img' % (iter))
sitk_write_images(input_mv2_img, None, self.args.sample_dir, '%d_input_mv2_img' % (iter))
sitk_write_labs(i_mv2_lab, None, self.args.sample_dir, '%d_input_mv2_lab' % (iter))
sitk_write_images(input_fix_img, None, self.args.sample_dir, '%d_fixe_img' % (iter))
dice_before_reg1 = calculate_binary_dice(input_mv_lab1, input_fix_lab)
dice_before_reg2 = calculate_binary_dice(input_mv_lab2, input_fix_lab)
warp_mv1_dice=calculate_binary_dice(warped_mv1_lab, input_fix_lab)
warp_mv2_dice=calculate_binary_dice(warped_mv2_lab, input_fix_lab)
para=sitk.ReadImage(p_lab_fixs[0])
mv1_hd=asd(np.squeeze(warped_mv1_lab[0,...]),np.squeeze(input_fix_lab[0,...]),voxelspacing=para.GetSpacing())
mv2_hd=asd(np.squeeze(warped_mv2_lab[0,...]),np.squeeze(input_mv_lab1[0,...]),voxelspacing=para.GetSpacing())
ddf_mv1_f,ddf_mv2_f=self.sess.run([self.ddf_mv1_f, self.ddf_f_mv1], feed_dict=trainFeed)
_,_,neg_ddf_mv1_f=neg_jac(ddf_mv1_f[0,...])
_,_,neg_ddf_mv2_f=neg_jac(ddf_mv2_f[0,...])
self.logger.debug("test_step %d: before_reg_dice=%f, mv1_dice =%f , mv2_dice=%f, mv1_hd=%f, mv2_hd=%f neg_jac %d %d"%(iter,dice_before_reg1,warp_mv1_dice,warp_mv2_dice,mv1_hd,mv2_hd,neg_ddf_mv1_f,neg_ddf_mv2_f))
return dice_before_reg1,dice_before_reg2,warp_mv1_dice,warp_mv2_dice,mv1_hd,mv2_hd,neg_ddf_mv1_f,neg_ddf_mv2_f
def validate(self):
self.is_train=False
init_op = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.sess.run(init_op)
if self.load(self.args.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# res={'mv1_dice':[],'mv1_hd':[],'mv2_dice':[],'mv2_hd':[],'neg_ddf1':[],'neg_ddf2':[]}
self.validate_set(True)
def validate_set(self ,write_img=False):
res={'mv1_dice':[],'mv1_asd':[],'mv2_dice':[],'mv2_asd':[],'bf_reg1':[],'bf_reg2':[]}
for i in range(self.validate_sampler.nb_pairs):
_bf_reg1,_bf_reg2,_mv1_dice, _mv2_dice, _mv1_hd, _mv2_hd, _neg_ddf1, _neg_ddf2 = self.sample(i,write_img)
res["mv1_dice"].append(_mv1_dice)
res["mv2_dice"].append(_mv2_dice)
res["mv1_asd"].append(_mv1_hd)
res["mv2_asd"].append(_mv2_hd)
res["bf_reg1"].append(_bf_reg1)
res["bf_reg2"].append(_bf_reg2)
# res["neg_ddf1"].append(_neg_ddf1)
# res["neg_ddf2"].append(_neg_ddf2)
print(Get_Name_By_Index(self.args.component))
print("=============%s================" % (self.args.mode))
for itr in ['mv1_dice','mv2_dice','mv1_asd','mv2_asd','bf_reg1','bf_reg2']:
print(itr)
outpu2excel(self.args.res_excel, self.args.MODEL_ID + "_" + itr, res[itr])
print_mean_and_std(res[itr], itr)
def test(self):
self.is_train = False
init_op = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.sess.run(init_op)
if self.load(self.args.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
csample = CHallengeSampler(self.args,self.is_train)
for atlas_ind in range(csample.len_mv):
for tgt_ind in range(csample.len_fix):
fix_imgs, fix_labs,mv_imgs,mv_labs=csample.get_batch_data([atlas_ind],[tgt_ind])
trainFeed = self.create_feed_dict(fix_imgs, fix_labs, mv_imgs, mv_labs,is_aug=False)
warp_mv_img, warp_mv_label = self.sess.run([self.w_mv1_img, self.warped_MV_label], feed_dict=trainFeed)
p_ata=csample.img_mv[atlas_ind]
p_tgt=csample.img_fix[tgt_ind]
outputdir= self.args.test_dir+"/atlas_%s/"%(get_name_wo_suffix(p_ata))
name=get_name_wo_suffix(p_tgt).replace('image','label')
sitk_write_lab(warp_mv_label[0,...],sitk.ReadImage(p_tgt),outputdir,name)
def create_feed_dict(self, mv_img1s, mv_lab1s, mv_img2s, mv_lab2s, fix_imgs, fix_labs, is_aug=False):
trainFeed = {self.ph_mv_img1: mv_img1s,
self.ph_mv_lab1: mv_lab1s,
self.ph_mv_img2: mv_img2s,
self.ph_mv_lab2: mv_lab2s,
self.ph_fix_img: fix_imgs,
self.ph_fix_lab: fix_labs,
self.ph_fixed_affine: util.random_transform_generator(self.args.batch_size),
self.ph_moving_affine1: util.random_transform_generator(self.args.batch_size, 0.1),
self.ph_moving_affine2: util.random_transform_generator(self.args.batch_size, 0.1),
}
if is_aug==True:
pass
else:
trainFeed = {self.ph_mv_img1: mv_img1s,
self.ph_mv_lab1: mv_lab1s,
self.ph_mv_img2: mv_img2s,
self.ph_mv_lab2: mv_lab2s,
self.ph_fix_img: fix_imgs,
self.ph_fix_lab: fix_labs,
self.ph_fixed_affine: util.initial_transform_generator(self.args.batch_size),
self.ph_moving_affine1: util.initial_transform_generator(self.args.batch_size),
self.ph_moving_affine2: util.initial_transform_generator(self.args.batch_size),
}
return trainFeed
class MMReg(MMReg_base):
def build_network(self):
self.global_step = tf.Variable(0, trainable=False)
self.learning_rate = tf.train.exponential_decay(self.args.lr, self.global_step, self.args.decay_freq, 0.96,staircase=True)
# input
self.ph_mv_img1 = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_mv_lab1 = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_mv_img2 = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_mv_lab2 = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_fix_img = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_fix_lab = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_moving_affine1 = tf.placeholder(tf.float32, [self.args.batch_size] + [1, 12]) # 数据进行augment,4x4矩阵,但是最后四个参数为0001,所以一共12个参数
self.ph_moving_affine2 = tf.placeholder(tf.float32, [self.args.batch_size] + [1, 12]) # 数据进行augment,4x4矩阵,但是最后四个参数为0001,所以一共12个参数
self.ph_fixed_affine = tf.placeholder(tf.float32, [self.args.batch_size] + [1,12])
#data augmentation
self.i_mv1_img, self.i_mv1_lab=util.augment_3Ddata_by_affine(self.ph_mv_img1, self.ph_mv_lab1, self.ph_moving_affine1)
self.i_mv2_img, self.i_mv2_lab=util.augment_3Ddata_by_affine(self.ph_mv_img2, self.ph_mv_lab2, self.ph_moving_affine2)
self.i_fix_img, self.i_fix_lab=util.augment_3Ddata_by_affine(self.ph_fix_img, self.ph_fix_lab, self.ph_fixed_affine)
self.ddf_mv1_f, self.ddf_f_mv1, self.w_mv1_img,self.w_mv1_lab ,self.r_mv1_img, self.w_fix1_img,self.w_fix1_lab, self.r_fix1_img=self._regnet(self.i_mv1_img,self.i_mv1_lab,self.i_fix_img,self.i_fix_lab,scop_name="regA")
self.ddf_mv2_f, self.ddf_f_mv2, self.w_mv2_img,self.w_mv2_lab ,self.r_mv2_img, self.w_fix2_img,self.w_fix2_lab, self.r_fix2_img=self._regnet(self.i_mv2_img,self.i_mv2_lab,self.i_fix_img,self.i_fix_lab,scop_name='reg_b')
self.bend1=self.bend_loss(self.ddf_f_mv1,self.ddf_mv1_f)
self.bend2=self.bend_loss(self.ddf_f_mv2,self.ddf_mv2_f)
self.ddf_bend=self.bend1+self.bend2
self.cyc_consis1=self.consis_loss(self.i_mv1_img, self.r_mv1_img, self.i_fix_img, self.r_fix1_img)
self.cyc_consis2=self.consis_loss(self.i_mv2_img, self.r_mv2_img, self.i_fix_img, self.r_fix2_img)
self.cycle_consistent = self.cyc_consis1 + self.cyc_consis2
'''
#这个和后面的nvil+nvi2重复,因为nvi1会让w_mv1_img和i_fix_img相同,而nvi2会让w_mv2_img和i_fix_img相同.
等效于w_mv1_img==w_mv2_img
'''
# self.consis=restore_loss(self.w_mv1_img, self.w_mv2_img)
# self.multi_consis=tf.reduce_mean(loss.multi_scale_loss(self.w_mv1_lab, self.w_mv2_lab, 'dice', [0, 1, 2, 4]))
_warp_mv1_mv2=self.warp_image(self.w_mv1_img,self.ddf_f_mv2)
_warp_mv2_mv1=self.warp_image(self.w_mv2_img,self.ddf_f_mv1)
self.multi_consis = self.cal_nvi_loss(self.i_mv1_img, _warp_mv2_mv1, self.i_mv2_img, _warp_mv1_mv2)
self.nvi1=self.cal_nvi_loss(self.w_mv1_img, self.i_fix_img, self.w_fix1_img, self.i_mv1_img)
self.nvi2=self.cal_nvi_loss(self.w_mv2_img, self.i_fix_img, self.w_fix2_img, self.i_mv2_img)
self.nvi_loss= self.nvi1 + self.nvi2
# self.anti_folding_loss = self.args.lambda_anti* (loss.anti_folding(self.ddf_mv1_f) + loss.anti_folding(self.ddf_f_mv1))
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(
self.nvi_loss
+ self.args.lambda_bend*self.ddf_bend
+self.args.lambda_cycle_consis*self.cycle_consistent
+self.args.lambda_multi_consis*self.multi_consis,
global_step=self.global_step)
self.logger.debug("build network finish")
|
[
"evaluate.metric.print_mean_and_std",
"evaluate.metric.calculate_binary_dice",
"evaluate.metric.neg_jac"
] |
[((760, 798), 'model.base_model.BaseModelV2.__init__', 'BaseModelV2.__init__', (['self', 'sess', 'args'], {}), '(self, sess, args)\n', (780, 798), False, 'from model.base_model import BaseModelV2\n'), ((829, 858), 'learn2reg.sampler.MMSampler', 'MMSampler', (['self.args', '"""train"""'], {}), "(self.args, 'train')\n", (838, 858), False, 'from learn2reg.sampler import MMSampler\n'), ((891, 923), 'learn2reg.sampler.MMSampler', 'MMSampler', (['self.args', '"""validate"""'], {}), "(self.args, 'validate')\n", (900, 923), False, 'from learn2reg.sampler import MMSampler\n'), ((1092, 1132), 'tfop.utils.get_reference_grid', 'util.get_reference_grid', (['self.image_size'], {}), '(self.image_size)\n', (1115, 1132), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((1353, 1402), 'tfop.utils.resample_linear', 'util.resample_linear', (['input_', '(self.grid_ref + ddf)'], {}), '(input_, self.grid_ref + ddf)\n', (1373, 1402), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((4929, 4962), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4960, 4962), True, 'import tensorflow as tf\n'), ((5016, 5073), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.args.log_dir', 'self.sess.graph'], {}), '(self.args.log_dir, self.sess.graph)\n', (5037, 5073), True, 'import tensorflow as tf\n'), ((5095, 5111), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5109, 5111), True, 'import tensorflow as tf\n'), ((6079, 6121), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""nvi_loss_1"""', 'self.nvi1'], {}), "('nvi_loss_1', self.nvi1)\n", (6096, 6121), True, 'import tensorflow as tf\n'), ((6129, 6171), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""nvi_loss_2"""', 'self.nvi2'], {}), "('nvi_loss_2', self.nvi2)\n", (6146, 6171), True, 'import tensorflow as tf\n'), ((6180, 6222), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""ddf1_bend"""', 'self.bend1'], {}), "('ddf1_bend', self.bend1)\n", (6197, 6222), True, 'import tensorflow as tf\n'), ((6231, 6273), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""ddf2_bend"""', 'self.bend2'], {}), "('ddf2_bend', self.bend2)\n", (6248, 6273), True, 'import tensorflow as tf\n'), ((6281, 6333), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""multi_consis"""', 'self.multi_consis'], {}), "('multi_consis', self.multi_consis)\n", (6298, 6333), True, 'import tensorflow as tf\n'), ((6342, 6398), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cycle_consis"""', 'self.cycle_consistent'], {}), "('cycle_consis', self.cycle_consistent)\n", (6359, 6398), True, 'import tensorflow as tf\n'), ((7047, 7069), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (7067, 7069), True, 'import tensorflow as tf\n'), ((9026, 9077), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['input_mv_lab1', 'input_fix_lab'], {}), '(input_mv_lab1, input_fix_lab)\n', (9047, 9077), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9105, 9156), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['input_mv_lab2', 'input_fix_lab'], {}), '(input_mv_lab2, input_fix_lab)\n', (9126, 9156), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9179, 9231), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['warped_mv1_lab', 'input_fix_lab'], {}), '(warped_mv1_lab, input_fix_lab)\n', (9200, 9231), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9254, 9306), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['warped_mv2_lab', 'input_fix_lab'], {}), '(warped_mv2_lab, input_fix_lab)\n', (9275, 9306), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9321, 9350), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_lab_fixs[0]'], {}), '(p_lab_fixs[0])\n', (9335, 9350), True, 'import SimpleITK as sitk\n'), ((9711, 9737), 'evaluate.metric.neg_jac', 'neg_jac', (['ddf_mv1_f[0, ...]'], {}), '(ddf_mv1_f[0, ...])\n', (9718, 9737), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9763, 9789), 'evaluate.metric.neg_jac', 'neg_jac', (['ddf_mv2_f[0, ...]'], {}), '(ddf_mv2_f[0, ...])\n', (9770, 9789), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((10199, 10232), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10230, 10232), True, 'import tensorflow as tf\n'), ((10254, 10270), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10268, 10270), True, 'import tensorflow as tf\n'), ((11681, 11714), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11712, 11714), True, 'import tensorflow as tf\n'), ((11736, 11752), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11750, 11752), True, 'import tensorflow as tf\n'), ((11944, 11986), 'learn2reg.challenge_sampler.CHallengeSampler', 'CHallengeSampler', (['self.args', 'self.is_train'], {}), '(self.args, self.is_train)\n', (11960, 11986), False, 'from learn2reg.challenge_sampler import CHallengeSampler\n'), ((14292, 14323), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (14303, 14323), True, 'import tensorflow as tf\n'), ((14353, 14460), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['self.args.lr', 'self.global_step', 'self.args.decay_freq', '(0.96)'], {'staircase': '(True)'}), '(self.args.lr, self.global_step, self.args.\n decay_freq, 0.96, staircase=True)\n', (14379, 14460), True, 'import tensorflow as tf\n'), ((14498, 14572), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14512, 14572), True, 'import tensorflow as tf\n'), ((14599, 14673), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14613, 14673), True, 'import tensorflow as tf\n'), ((14700, 14774), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14714, 14774), True, 'import tensorflow as tf\n'), ((14801, 14875), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14815, 14875), True, 'import tensorflow as tf\n'), ((14902, 14976), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14916, 14976), True, 'import tensorflow as tf\n'), ((15003, 15077), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (15017, 15077), True, 'import tensorflow as tf\n'), ((15112, 15172), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + [1, 12])'], {}), '(tf.float32, [self.args.batch_size] + [1, 12])\n', (15126, 15172), True, 'import tensorflow as tf\n'), ((15251, 15311), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + [1, 12])'], {}), '(tf.float32, [self.args.batch_size] + [1, 12])\n', (15265, 15311), True, 'import tensorflow as tf\n'), ((15388, 15448), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + [1, 12])'], {}), '(tf.float32, [self.args.batch_size] + [1, 12])\n', (15402, 15448), True, 'import tensorflow as tf\n'), ((15515, 15607), 'tfop.utils.augment_3Ddata_by_affine', 'util.augment_3Ddata_by_affine', (['self.ph_mv_img1', 'self.ph_mv_lab1', 'self.ph_moving_affine1'], {}), '(self.ph_mv_img1, self.ph_mv_lab1, self.\n ph_moving_affine1)\n', (15544, 15607), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((15642, 15734), 'tfop.utils.augment_3Ddata_by_affine', 'util.augment_3Ddata_by_affine', (['self.ph_mv_img2', 'self.ph_mv_lab2', 'self.ph_moving_affine2'], {}), '(self.ph_mv_img2, self.ph_mv_lab2, self.\n ph_moving_affine2)\n', (15671, 15734), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((15769, 15859), 'tfop.utils.augment_3Ddata_by_affine', 'util.augment_3Ddata_by_affine', (['self.ph_fix_img', 'self.ph_fix_lab', 'self.ph_fixed_affine'], {}), '(self.ph_fix_img, self.ph_fix_lab, self.\n ph_fixed_affine)\n', (15798, 15859), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((1819, 1860), 'tensorflow.variable_scope', 'tf.variable_scope', (['scop_name'], {'reuse': 'reuse'}), '(scop_name, reuse=reuse)\n', (1836, 1860), True, 'import tensorflow as tf\n'), ((1883, 1995), 'tfop.layers.downsample_resnet_block', 'layer.downsample_resnet_block', (['self.is_train', 'input_layer', '(2)', 'nc[0]'], {'k_conv0': '[7, 7, 7]', 'name': '"""local_down_0"""'}), "(self.is_train, input_layer, 2, nc[0], k_conv0\n =[7, 7, 7], name='local_down_0')\n", (1912, 1995), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2012, 2100), 'tfop.layers.downsample_resnet_block', 'layer.downsample_resnet_block', (['self.is_train', 'h0', 'nc[0]', 'nc[1]'], {'name': '"""local_down_1"""'}), "(self.is_train, h0, nc[0], nc[1], name=\n 'local_down_1')\n", (2041, 2100), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2118, 2206), 'tfop.layers.downsample_resnet_block', 'layer.downsample_resnet_block', (['self.is_train', 'h1', 'nc[1]', 'nc[2]'], {'name': '"""local_down_2"""'}), "(self.is_train, h1, nc[1], nc[2], name=\n 'local_down_2')\n", (2147, 2206), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2224, 2312), 'tfop.layers.downsample_resnet_block', 'layer.downsample_resnet_block', (['self.is_train', 'h2', 'nc[2]', 'nc[3]'], {'name': '"""local_down_3"""'}), "(self.is_train, h2, nc[2], nc[3], name=\n 'local_down_3')\n", (2253, 2312), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((3080, 3106), 'tensorflow.stack', 'tf.stack', (['ddf_list'], {'axis': '(5)'}), '(ddf_list, axis=5)\n', (3088, 3106), True, 'import tensorflow as tf\n'), ((3132, 3163), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['ddf_list'], {'axis': '(5)'}), '(ddf_list, axis=5)\n', (3145, 3163), True, 'import tensorflow as tf\n'), ((3320, 3347), 'tensorflow.stack', 'tf.stack', (['ddf_list2'], {'axis': '(5)'}), '(ddf_list2, axis=5)\n', (3328, 3347), True, 'import tensorflow as tf\n'), ((3373, 3405), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['ddf_list2'], {'axis': '(5)'}), '(ddf_list2, axis=5)\n', (3386, 3405), True, 'import tensorflow as tf\n'), ((4195, 4227), 'tfop.losses.restore_loss', 'restore_loss', (['i_mv_img', 'r_mv_img'], {}), '(i_mv_img, r_mv_img)\n', (4207, 4227), False, 'from tfop.losses import restore_loss\n'), ((4230, 4264), 'tfop.losses.restore_loss', 'restore_loss', (['i_fix_img', 'r_fix_img'], {}), '(i_fix_img, r_fix_img)\n', (4242, 4264), False, 'from tfop.losses import restore_loss\n'), ((4393, 4447), 'tfop.losses.local_displacement_energy', 'loss.local_displacement_energy', (['ddf_mv_f', '"""bending"""', '(1)'], {}), "(ddf_mv_f, 'bending', 1)\n", (4423, 4447), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((4484, 4538), 'tfop.losses.local_displacement_energy', 'loss.local_displacement_energy', (['ddf_f_mv', '"""bending"""', '(1)'], {}), "(ddf_f_mv, 'bending', 1)\n", (4514, 4538), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((6508, 6558), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.i_fix_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.i_fix_img[:, :, 48, :, 0], -1)\n', (6522, 6558), True, 'import tensorflow as tf\n'), ((6603, 6654), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.w_fix1_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.w_fix1_img[:, :, 48, :, 0], -1)\n', (6617, 6654), True, 'import tensorflow as tf\n'), ((6692, 6742), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.i_mv1_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.i_mv1_img[:, :, 48, :, 0], -1)\n', (6706, 6742), True, 'import tensorflow as tf\n'), ((6787, 6837), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.w_mv1_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.w_mv1_img[:, :, 48, :, 0], -1)\n', (6801, 6837), True, 'import tensorflow as tf\n'), ((6875, 6925), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.i_mv2_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.i_mv2_img[:, :, 48, :, 0], -1)\n', (6889, 6925), True, 'import tensorflow as tf\n'), ((6970, 7020), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.w_mv2_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.w_mv2_img[:, :, 48, :, 0], -1)\n', (6984, 7020), True, 'import tensorflow as tf\n'), ((7756, 7848), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['warped_mv1_lab', 'None', 'self.args.sample_dir', "('%d_warped_mv1_lab' % iter)"], {}), "(warped_mv1_lab, None, self.args.sample_dir, \n '%d_warped_mv1_lab' % iter)\n", (7771, 7848), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((7858, 7950), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['warped_mv1_lab', 'None', 'self.args.sample_dir', "('%d_warped_mv2_lab' % iter)"], {}), "(warped_mv1_lab, None, self.args.sample_dir, \n '%d_warped_mv2_lab' % iter)\n", (7873, 7950), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((7960, 8045), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['input_fix_lab', 'None', 'self.args.sample_dir', "('%d_fixe_lab' % iter)"], {}), "(input_fix_lab, None, self.args.sample_dir, '%d_fixe_lab' % iter\n )\n", (7975, 8045), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8309, 8403), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['warped_mv1_img', 'None', 'self.args.sample_dir', "('%d_warped_mv1_img' % iter)"], {}), "(warped_mv1_img, None, self.args.sample_dir, \n '%d_warped_mv1_img' % iter)\n", (8326, 8403), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8413, 8505), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['input_mv1_img', 'None', 'self.args.sample_dir', "('%d_input_mv1_img' % iter)"], {}), "(input_mv1_img, None, self.args.sample_dir, \n '%d_input_mv1_img' % iter)\n", (8430, 8505), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8515, 8600), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['i_mv1_lab', 'None', 'self.args.sample_dir', "('%d_input_mv1_lab' % iter)"], {}), "(i_mv1_lab, None, self.args.sample_dir, '%d_input_mv1_lab' %\n iter)\n", (8530, 8600), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8611, 8705), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['warped_mv2_img', 'None', 'self.args.sample_dir', "('%d_warped_mv2_img' % iter)"], {}), "(warped_mv2_img, None, self.args.sample_dir, \n '%d_warped_mv2_img' % iter)\n", (8628, 8705), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8715, 8807), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['input_mv2_img', 'None', 'self.args.sample_dir', "('%d_input_mv2_img' % iter)"], {}), "(input_mv2_img, None, self.args.sample_dir, \n '%d_input_mv2_img' % iter)\n", (8732, 8807), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8817, 8902), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['i_mv2_lab', 'None', 'self.args.sample_dir', "('%d_input_mv2_lab' % iter)"], {}), "(i_mv2_lab, None, self.args.sample_dir, '%d_input_mv2_lab' %\n iter)\n", (8832, 8902), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8913, 8999), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['input_fix_img', 'None', 'self.args.sample_dir', "('%d_fixe_img' % iter)"], {}), "(input_fix_img, None, self.args.sample_dir, '%d_fixe_img' %\n iter)\n", (8930, 8999), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((9370, 9404), 'numpy.squeeze', 'np.squeeze', (['warped_mv1_lab[0, ...]'], {}), '(warped_mv1_lab[0, ...])\n', (9380, 9404), True, 'import numpy as np\n'), ((9404, 9437), 'numpy.squeeze', 'np.squeeze', (['input_fix_lab[0, ...]'], {}), '(input_fix_lab[0, ...])\n', (9414, 9437), True, 'import numpy as np\n'), ((9488, 9522), 'numpy.squeeze', 'np.squeeze', (['warped_mv2_lab[0, ...]'], {}), '(warped_mv2_lab[0, ...])\n', (9498, 9522), True, 'import numpy as np\n'), ((9522, 9555), 'numpy.squeeze', 'np.squeeze', (['input_mv_lab1[0, ...]'], {}), '(input_mv_lab1[0, ...])\n', (9532, 9555), True, 'import numpy as np\n'), ((11264, 11302), 'config.Defines.Get_Name_By_Index', 'Get_Name_By_Index', (['self.args.component'], {}), '(self.args.component)\n', (11281, 11302), False, 'from config.Defines import Get_Name_By_Index\n'), ((11491, 11565), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MODEL_ID + '_' + itr)", 'res[itr]'], {}), "(self.args.res_excel, self.args.MODEL_ID + '_' + itr, res[itr])\n", (11502, 11565), False, 'from excelutil.output2excel import outpu2excel\n'), ((11578, 11611), 'evaluate.metric.print_mean_and_std', 'print_mean_and_std', (['res[itr]', 'itr'], {}), '(res[itr], itr)\n', (11596, 11611), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((13187, 13240), 'tfop.utils.random_transform_generator', 'util.random_transform_generator', (['self.args.batch_size'], {}), '(self.args.batch_size)\n', (13218, 13240), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((13287, 13345), 'tfop.utils.random_transform_generator', 'util.random_transform_generator', (['self.args.batch_size', '(0.1)'], {}), '(self.args.batch_size, 0.1)\n', (13318, 13345), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((13392, 13450), 'tfop.utils.random_transform_generator', 'util.random_transform_generator', (['self.args.batch_size', '(0.1)'], {}), '(self.args.batch_size, 0.1)\n', (13423, 13450), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((1530, 1574), 'tfop.layers.resize_volume', 'layer.resize_volume', (['mv_img', 'self.image_size'], {}), '(mv_img, self.image_size)\n', (1549, 1574), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2326, 2397), 'tfop.layers.conv3_block', 'layer.conv3_block', (['self.is_train', 'h3', 'nc[3]', 'nc[4]'], {'name': '"""local_deep_4"""'}), "(self.is_train, h3, nc[3], nc[4], name='local_deep_4')\n", (2343, 2397), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2951, 3037), 'tfop.layers.ddf_summand', 'layer.ddf_summand', (['hm[4 - idx]', 'nc[idx]', 'self.image_size'], {'name': "('ddf1_sum_%d' % idx)"}), "(hm[4 - idx], nc[idx], self.image_size, name='ddf1_sum_%d' %\n idx)\n", (2968, 3037), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((3190, 3276), 'tfop.layers.ddf_summand', 'layer.ddf_summand', (['hm[4 - idx]', 'nc[idx]', 'self.image_size'], {'name': "('ddf2_sum_%d' % idx)"}), "(hm[4 - idx], nc[idx], self.image_size, name='ddf2_sum_%d' %\n idx)\n", (3207, 3276), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((4775, 4824), 'learn2reg.loss.NVISimilarity', 'NVISimilarity', (['warped_mv1_img', 'input_FIX_image', 's'], {}), '(warped_mv1_img, input_FIX_image, s)\n', (4788, 4824), False, 'from learn2reg.loss import NVISimilarity\n'), ((5801, 5840), 'numpy.mod', 'np.mod', (['glob_step', 'self.args.print_freq'], {}), '(glob_step, self.args.print_freq)\n', (5807, 5840), True, 'import numpy as np\n'), ((5939, 5977), 'numpy.mod', 'np.mod', (['glob_step', 'self.args.save_freq'], {}), '(glob_step, self.args.save_freq)\n', (5945, 5977), True, 'import numpy as np\n'), ((13890, 13944), 'tfop.utils.initial_transform_generator', 'util.initial_transform_generator', (['self.args.batch_size'], {}), '(self.args.batch_size)\n', (13922, 13944), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((13995, 14049), 'tfop.utils.initial_transform_generator', 'util.initial_transform_generator', (['self.args.batch_size'], {}), '(self.args.batch_size)\n', (14027, 14049), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((14100, 14154), 'tfop.utils.initial_transform_generator', 'util.initial_transform_generator', (['self.args.batch_size'], {}), '(self.args.batch_size)\n', (14132, 14154), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((17748, 17790), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (17770, 17790), True, 'import tensorflow as tf\n'), ((2418, 2510), 'tfop.layers.upsample_resnet_block', 'layer.upsample_resnet_block', (['self.is_train', 'hm[0]', 'hc3', 'nc[4]', 'nc[3]'], {'name': '"""local_up_3"""'}), "(self.is_train, hm[0], hc3, nc[4], nc[3], name=\n 'local_up_3')\n", (2445, 2510), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2550, 2642), 'tfop.layers.upsample_resnet_block', 'layer.upsample_resnet_block', (['self.is_train', 'hm[1]', 'hc2', 'nc[3]', 'nc[2]'], {'name': '"""local_up_2"""'}), "(self.is_train, hm[1], hc2, nc[3], nc[2], name=\n 'local_up_2')\n", (2577, 2642), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2682, 2774), 'tfop.layers.upsample_resnet_block', 'layer.upsample_resnet_block', (['self.is_train', 'hm[2]', 'hc1', 'nc[2]', 'nc[1]'], {'name': '"""local_up_1"""'}), "(self.is_train, hm[2], hc1, nc[2], nc[1], name=\n 'local_up_1')\n", (2709, 2774), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2814, 2906), 'tfop.layers.upsample_resnet_block', 'layer.upsample_resnet_block', (['self.is_train', 'hm[3]', 'hc0', 'nc[1]', 'nc[0]'], {'name': '"""local_up_0"""'}), "(self.is_train, hm[3], hc0, nc[1], nc[0], name=\n 'local_up_0')\n", (2841, 2906), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((12711, 12732), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_tgt'], {}), '(p_tgt)\n', (12725, 12732), True, 'import SimpleITK as sitk\n'), ((12559, 12584), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_ata'], {}), '(p_ata)\n', (12577, 12584), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((12607, 12632), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_tgt'], {}), '(p_tgt)\n', (12625, 12632), False, 'from dirutil.helper import get_name_wo_suffix\n')]
|
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""Class to train the Neural Network."""
import numpy as np
from config import CFG
from mcts import MonteCarloTreeSearch, TreeNode
from neural_net import NeuralNetworkWrapper
from evaluate import Evaluate
from copy import deepcopy
class Train(object):
"""Class with functions to train the Neural Network using MCTS.
Attributes:
game: An object containing the game state.
net: An object containing the neural network.
"""
def __init__(self, game, net):
"""Initializes Train with the board state and neural network."""
self.game = game
self.net = net
self.eval_net = NeuralNetworkWrapper(game)
def start(self):
"""Main training loop."""
for i in range(CFG.num_iterations):
print("Iteration", i + 1)
training_data = [] # list to store self play states, pis and vs
for j in range(CFG.num_games):
print("Start Training Self-Play Game", j + 1)
game = self.game.clone() # Create a fresh clone for each game.
self.play_game(game, training_data)
# Save the current neural network model.
self.net.save_model()
# Load the recently saved model into the evaluator network.
self.eval_net.load_model()
# Train the network using self play values.
self.net.train(training_data)
# Initialize MonteCarloTreeSearch objects for both networks.
current_mcts = MonteCarloTreeSearch(self.net)
eval_mcts = MonteCarloTreeSearch(self.eval_net)
evaluator = Evaluate(current_mcts=current_mcts, eval_mcts=eval_mcts,
game=self.game)
wins, losses = evaluator.evaluate()
print("wins:", wins)
print("losses:", losses)
num_games = wins + losses
if num_games == 0:
win_rate = 0
else:
win_rate = wins / num_games
print("win rate:", win_rate)
if win_rate > CFG.eval_win_rate:
# Save current model as the best model.
print("New model saved as best model.")
self.net.save_model("best_model")
else:
print("New model discarded and previous model loaded.")
# Discard current model and use previous best model.
self.net.load_model()
def play_game(self, game, training_data):
"""Loop for each self-play game.
Runs MCTS for each game state and plays a move based on the MCTS output.
Stops when the game is over and prints out a winner.
Args:
game: An object containing the game state.
training_data: A list to store self play states, pis and vs.
"""
mcts = MonteCarloTreeSearch(self.net)
game_over = False
value = 0
self_play_data = []
count = 0
node = TreeNode()
# Keep playing until the game is in a terminal state.
while not game_over:
# MCTS simulations to get the best child node.
if count < CFG.temp_thresh:
best_child = mcts.search(game, node, CFG.temp_init)
else:
best_child = mcts.search(game, node, CFG.temp_final)
# Store state, prob and v for training.
self_play_data.append([deepcopy(game.state),
deepcopy(best_child.parent.child_psas),
0])
action = best_child.action
game.play_action(action) # Play the child node's action.
count += 1
game_over, value = game.check_game_over(game.current_player)
best_child.parent = None
node = best_child # Make the child node the root node.
# Update v as the value of the game result.
for game_state in self_play_data:
value = -value
game_state[2] = value
self.augment_data(game_state, training_data, game.row, game.column)
def augment_data(self, game_state, training_data, row, column):
"""Loop for each self-play game.
Runs MCTS for each game state and plays a move based on the MCTS output.
Stops when the game is over and prints out a winner.
Args:
game_state: An object containing the state, pis and value.
training_data: A list to store self play states, pis and vs.
row: An integer indicating the length of the board row.
column: An integer indicating the length of the board column.
"""
state = deepcopy(game_state[0])
psa_vector = deepcopy(game_state[1])
if CFG.game == 2 or CFG.game == 1:
training_data.append([state, psa_vector, game_state[2]])
else:
psa_vector = np.reshape(psa_vector, (row, column))
# Augment data by rotating and flipping the game state.
for i in range(4):
training_data.append([np.rot90(state, i),
np.rot90(psa_vector, i).flatten(),
game_state[2]])
training_data.append([np.fliplr(np.rot90(state, i)),
np.fliplr(
np.rot90(psa_vector, i)).flatten(),
game_state[2]])
|
[
"evaluate.Evaluate"
] |
[((1816, 1842), 'neural_net.NeuralNetworkWrapper', 'NeuralNetworkWrapper', (['game'], {}), '(game)\n', (1836, 1842), False, 'from neural_net import NeuralNetworkWrapper\n'), ((4048, 4078), 'mcts.MonteCarloTreeSearch', 'MonteCarloTreeSearch', (['self.net'], {}), '(self.net)\n', (4068, 4078), False, 'from mcts import MonteCarloTreeSearch, TreeNode\n'), ((4186, 4196), 'mcts.TreeNode', 'TreeNode', ([], {}), '()\n', (4194, 4196), False, 'from mcts import MonteCarloTreeSearch, TreeNode\n'), ((5898, 5921), 'copy.deepcopy', 'deepcopy', (['game_state[0]'], {}), '(game_state[0])\n', (5906, 5921), False, 'from copy import deepcopy\n'), ((5943, 5966), 'copy.deepcopy', 'deepcopy', (['game_state[1]'], {}), '(game_state[1])\n', (5951, 5966), False, 'from copy import deepcopy\n'), ((2697, 2727), 'mcts.MonteCarloTreeSearch', 'MonteCarloTreeSearch', (['self.net'], {}), '(self.net)\n', (2717, 2727), False, 'from mcts import MonteCarloTreeSearch, TreeNode\n'), ((2752, 2787), 'mcts.MonteCarloTreeSearch', 'MonteCarloTreeSearch', (['self.eval_net'], {}), '(self.eval_net)\n', (2772, 2787), False, 'from mcts import MonteCarloTreeSearch, TreeNode\n'), ((2813, 2885), 'evaluate.Evaluate', 'Evaluate', ([], {'current_mcts': 'current_mcts', 'eval_mcts': 'eval_mcts', 'game': 'self.game'}), '(current_mcts=current_mcts, eval_mcts=eval_mcts, game=self.game)\n', (2821, 2885), False, 'from evaluate import Evaluate\n'), ((6119, 6156), 'numpy.reshape', 'np.reshape', (['psa_vector', '(row, column)'], {}), '(psa_vector, (row, column))\n', (6129, 6156), True, 'import numpy as np\n'), ((4631, 4651), 'copy.deepcopy', 'deepcopy', (['game.state'], {}), '(game.state)\n', (4639, 4651), False, 'from copy import deepcopy\n'), ((4688, 4726), 'copy.deepcopy', 'deepcopy', (['best_child.parent.child_psas'], {}), '(best_child.parent.child_psas)\n', (4696, 4726), False, 'from copy import deepcopy\n'), ((6295, 6313), 'numpy.rot90', 'np.rot90', (['state', 'i'], {}), '(state, i)\n', (6303, 6313), True, 'import numpy as np\n'), ((6491, 6509), 'numpy.rot90', 'np.rot90', (['state', 'i'], {}), '(state, i)\n', (6499, 6509), True, 'import numpy as np\n'), ((6353, 6376), 'numpy.rot90', 'np.rot90', (['psa_vector', 'i'], {}), '(psa_vector, i)\n', (6361, 6376), True, 'import numpy as np\n'), ((6603, 6626), 'numpy.rot90', 'np.rot90', (['psa_vector', 'i'], {}), '(psa_vector, i)\n', (6611, 6626), True, 'import numpy as np\n')]
|
from skimage.segmentation._watershed import watershed
from i3Deep import utils
import os
from evaluate import evaluate
import numpy as np
from tqdm import tqdm
def compute_predictions(image_path, mask_path, gt_path, save_path, nr_modalities, class_labels):
image_filenames = utils.load_filenames(image_path)[::nr_modalities]
mask_filenames = utils.load_filenames(mask_path)
for i in tqdm(range(len(image_filenames))):
image, affine, spacing, header = utils.load_nifty(image_filenames[i])
mask, _, _, _ = utils.load_nifty(mask_filenames[i])
labels = np.unique(mask)
# labels = labels[labels > 0]
for label in np.flip(labels):
mask[mask == label] = label + 1
mask = mask.astype(np.uint8)
mask = watershed(image=image, markers=mask)
for label in labels:
mask[mask == label + 1] = label
utils.save_nifty(save_path + os.path.basename(mask_filenames[i][:-12] + ".nii.gz"), mask, affine, spacing, header, is_mask=True)
results = evaluate(gt_path, save_path, class_labels)
return results
|
[
"evaluate.evaluate"
] |
[((360, 391), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['mask_path'], {}), '(mask_path)\n', (380, 391), False, 'from i3Deep import utils\n'), ((1059, 1101), 'evaluate.evaluate', 'evaluate', (['gt_path', 'save_path', 'class_labels'], {}), '(gt_path, save_path, class_labels)\n', (1067, 1101), False, 'from evaluate import evaluate\n'), ((288, 320), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['image_path'], {}), '(image_path)\n', (308, 320), False, 'from i3Deep import utils\n'), ((485, 521), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (501, 521), False, 'from i3Deep import utils\n'), ((547, 582), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['mask_filenames[i]'], {}), '(mask_filenames[i])\n', (563, 582), False, 'from i3Deep import utils\n'), ((601, 616), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (610, 616), True, 'import numpy as np\n'), ((678, 693), 'numpy.flip', 'np.flip', (['labels'], {}), '(labels)\n', (685, 693), True, 'import numpy as np\n'), ((794, 830), 'skimage.segmentation._watershed.watershed', 'watershed', ([], {'image': 'image', 'markers': 'mask'}), '(image=image, markers=mask)\n', (803, 830), False, 'from skimage.segmentation._watershed import watershed\n'), ((944, 997), 'os.path.basename', 'os.path.basename', (["(mask_filenames[i][:-12] + '.nii.gz')"], {}), "(mask_filenames[i][:-12] + '.nii.gz')\n", (960, 997), False, 'import os\n')]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ast
import argparse
import warnings
from functools import partial
from data import read, load_dict, convert_example_to_features
from model import ErnieForTokenClassification
from utils import set_seed
from evaluate import evaluate
import paddle
import paddle.nn.functional as F
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import ErnieTokenizer, ErnieModel, LinearDecayWithWarmup
from paddlenlp.data import Stack, Pad, Tuple
from paddlenlp.metrics import ChunkEvaluator
warnings.filterwarnings("ignore")
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--model_name", type=str, default="trigger", help="The trigger or role model which you wanna train")
parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.")
parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.")
parser.add_argument("--tag_path", type=str, default=None, help="tag set path")
parser.add_argument("--train_path", type=str, default=None, help="train data")
parser.add_argument("--dev_path", type=str, default=None, help="dev data")
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.")
parser.add_argument("--warmup_proportion", type=float, default=0.1, help="Warmup proportion params for warmup strategy")
parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.")
parser.add_argument("--eval_step", type=int, default=100, help="evaluation step")
parser.add_argument("--log_step", type=int, default=20, help="log step")
parser.add_argument("--batch_size", type=int, default=32, help="Total examples' number in batch for training.")
parser.add_argument("--checkpoint", type=str, default=None, help="Directory to model checkpoint")
parser.add_argument("--seed", type=int, default=1000, help="random seed for initialization")
parser.add_argument('--device', choices=['cpu', 'gpu'], default="gpu", help="Select which device to train model, defaults to gpu.")
args = parser.parse_args()
# yapf: enable
def train():
# set running envir
paddle.set_device(args.device)
world_size = paddle.distributed.get_world_size()
rank = paddle.distributed.get_rank()
if world_size > 1:
paddle.distributed.init_parallel_env()
set_seed(args.seed)
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
model_name = "ernie-1.0"
# load and process data
tag2id, id2tag = load_dict(args.tag_path)
train_ds = load_dataset(read, data_path=args.train_path, lazy=False)
dev_ds = load_dataset(read, data_path=args.dev_path, lazy=False)
tokenizer = ErnieTokenizer.from_pretrained(model_name)
trans_func = partial(convert_example_to_features, tokenizer=tokenizer, tag2id=tag2id, max_seq_length=args.max_seq_len, pad_default_tag="O", is_test=False)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
Stack(), # seq len
Pad(axis=0, pad_val=-1) # tag_ids
): fn(samples)
train_batch_sampler = paddle.io.DistributedBatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
dev_batch_sampler = paddle.io.DistributedBatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
train_loader = paddle.io.DataLoader(train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
dev_loader = paddle.io.DataLoader(dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn)
# configure model training
ernie = ErnieModel.from_pretrained(model_name)
event_model = ErnieForTokenClassification(ernie, num_classes=len(tag2id))
event_model = paddle.DataParallel(event_model)
num_training_steps = len(train_loader) * args.num_epoch
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_proportion)
decay_params = [p.name for n, p in event_model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, parameters=event_model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params)
metric = ChunkEvaluator(label_list=tag2id.keys(), suffix=False)
# start to train event_model
global_step, best_f1 = 0, 0.
event_model.train()
for epoch in range(1, args.num_epoch+1):
for batch_data in train_loader:
input_ids, token_type_ids, seq_len, tag_ids = batch_data
# logits: [batch_size, seq_len, num_tags] --> [batch_size*seq_len, num_tags]
logits = event_model(input_ids, token_type_ids).reshape([-1, len(tag2id)])
loss = paddle.mean(F.cross_entropy(logits, tag_ids.reshape([-1]), ignore_index=-1))
loss.backward()
lr_scheduler.step()
optimizer.step()
optimizer.clear_grad()
if global_step > 0 and global_step % args.log_step == 0 and rank == 0:
print(f"{args.model_name} - epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}")
if global_step > 0 and global_step % args.eval_step == 0 and rank == 0:
precision, recall, f1_score = evaluate(event_model, dev_loader, metric)
event_model.train()
if f1_score > best_f1:
print(f"best F1 performence has been updated: {best_f1:.5f} --> {f1_score:.5f}")
best_f1 = f1_score
paddle.save(event_model.state_dict(), f"{args.checkpoint}/{args.model_name}_best.pdparams")
print(f'{args.model_name} evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1_score:.5f} current best {best_f1:.5f}')
global_step += 1
if rank == 0:
paddle.save(event_model.state_dict(), f"{args.checkpoint}/{args.model_name}_final.pdparams")
if __name__=="__main__":
train()
|
[
"evaluate.evaluate"
] |
[((1129, 1162), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1152, 1162), False, 'import warnings\n'), ((1189, 1221), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['__doc__'], {}), '(__doc__)\n', (1212, 1221), False, 'import argparse\n'), ((2804, 2834), 'paddle.set_device', 'paddle.set_device', (['args.device'], {}), '(args.device)\n', (2821, 2834), False, 'import paddle\n'), ((2852, 2887), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (2885, 2887), False, 'import paddle\n'), ((2899, 2928), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (2926, 2928), False, 'import paddle\n'), ((3004, 3023), 'utils.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (3012, 3023), False, 'from utils import set_seed\n'), ((3188, 3212), 'data.load_dict', 'load_dict', (['args.tag_path'], {}), '(args.tag_path)\n', (3197, 3212), False, 'from data import read, load_dict, convert_example_to_features\n'), ((3228, 3285), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['read'], {'data_path': 'args.train_path', 'lazy': '(False)'}), '(read, data_path=args.train_path, lazy=False)\n', (3240, 3285), False, 'from paddlenlp.datasets import load_dataset\n'), ((3299, 3354), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['read'], {'data_path': 'args.dev_path', 'lazy': '(False)'}), '(read, data_path=args.dev_path, lazy=False)\n', (3311, 3354), False, 'from paddlenlp.datasets import load_dataset\n'), ((3372, 3414), 'paddlenlp.transformers.ErnieTokenizer.from_pretrained', 'ErnieTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (3402, 3414), False, 'from paddlenlp.transformers import ErnieTokenizer, ErnieModel, LinearDecayWithWarmup\n'), ((3432, 3577), 'functools.partial', 'partial', (['convert_example_to_features'], {'tokenizer': 'tokenizer', 'tag2id': 'tag2id', 'max_seq_length': 'args.max_seq_len', 'pad_default_tag': '"""O"""', 'is_test': '(False)'}), "(convert_example_to_features, tokenizer=tokenizer, tag2id=tag2id,\n max_seq_length=args.max_seq_len, pad_default_tag='O', is_test=False)\n", (3439, 3577), False, 'from functools import partial\n'), ((3975, 4064), 'paddle.io.DistributedBatchSampler', 'paddle.io.DistributedBatchSampler', (['train_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_ds, batch_size=args.batch_size,\n shuffle=True)\n', (4008, 4064), False, 'import paddle\n'), ((4085, 4173), 'paddle.io.DistributedBatchSampler', 'paddle.io.DistributedBatchSampler', (['dev_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dev_ds, batch_size=args.batch_size,\n shuffle=False)\n', (4118, 4173), False, 'import paddle\n'), ((4189, 4282), 'paddle.io.DataLoader', 'paddle.io.DataLoader', (['train_ds'], {'batch_sampler': 'train_batch_sampler', 'collate_fn': 'batchify_fn'}), '(train_ds, batch_sampler=train_batch_sampler,\n collate_fn=batchify_fn)\n', (4209, 4282), False, 'import paddle\n'), ((4296, 4386), 'paddle.io.DataLoader', 'paddle.io.DataLoader', (['dev_ds'], {'batch_sampler': 'dev_batch_sampler', 'collate_fn': 'batchify_fn'}), '(dev_ds, batch_sampler=dev_batch_sampler, collate_fn=\n batchify_fn)\n', (4316, 4386), False, 'import paddle\n'), ((4431, 4469), 'paddlenlp.transformers.ErnieModel.from_pretrained', 'ErnieModel.from_pretrained', (['model_name'], {}), '(model_name)\n', (4457, 4469), False, 'from paddlenlp.transformers import ErnieTokenizer, ErnieModel, LinearDecayWithWarmup\n'), ((4566, 4598), 'paddle.DataParallel', 'paddle.DataParallel', (['event_model'], {}), '(event_model)\n', (4585, 4598), False, 'import paddle\n'), ((4679, 4769), 'paddlenlp.transformers.LinearDecayWithWarmup', 'LinearDecayWithWarmup', (['args.learning_rate', 'num_training_steps', 'args.warmup_proportion'], {}), '(args.learning_rate, num_training_steps, args.\n warmup_proportion)\n', (4700, 4769), False, 'from paddlenlp.transformers import ErnieTokenizer, ErnieModel, LinearDecayWithWarmup\n'), ((2960, 2998), 'paddle.distributed.init_parallel_env', 'paddle.distributed.init_parallel_env', ([], {}), '()\n', (2996, 2998), False, 'import paddle\n'), ((3036, 3067), 'os.path.exists', 'os.path.exists', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3050, 3067), False, 'import os\n'), ((3077, 3102), 'os.mkdir', 'os.mkdir', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3085, 3102), False, 'import os\n'), ((3728, 3771), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_id'}), '(axis=0, pad_val=tokenizer.pad_token_id)\n', (3731, 3771), False, 'from paddlenlp.data import Stack, Pad, Tuple\n'), ((3793, 3841), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_type_id'}), '(axis=0, pad_val=tokenizer.pad_token_type_id)\n', (3796, 3841), False, 'from paddlenlp.data import Stack, Pad, Tuple\n'), ((3864, 3871), 'paddlenlp.data.Stack', 'Stack', ([], {}), '()\n', (3869, 3871), False, 'from paddlenlp.data import Stack, Pad, Tuple\n'), ((3891, 3914), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': '(-1)'}), '(axis=0, pad_val=-1)\n', (3894, 3914), False, 'from paddlenlp.data import Stack, Pad, Tuple\n'), ((6159, 6200), 'evaluate.evaluate', 'evaluate', (['event_model', 'dev_loader', 'metric'], {}), '(event_model, dev_loader, metric)\n', (6167, 6200), False, 'from evaluate import evaluate\n')]
|
import argparse
from datetime import datetime
from pathlib import Path
from collections import defaultdict
import numpy as np
import torch as th
import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN
from dataset import MAESTRO_small, allocate_batch
from evaluate import evaluate
from constants import HOP_SIZE
def cycle(iterable):
while True:
for item in iterable:
yield item
def train(model_type, logdir, batch_size, iterations, validation_interval, sequence_length, learning_rate, weight_decay, cnn_unit, fc_unit, debug=False, save_midi=False):
if logdir is None:
logdir = Path('runs') / ('exp_' + datetime.now().strftime('%y%m%d-%H%M%S'))
Path(logdir).mkdir(parents=True, exist_ok=True)
if sequence_length % HOP_SIZE != 0:
adj_length = sequence_length // HOP_SIZE * HOP_SIZE
print(f'sequence_length: {sequence_length} is not divide by {HOP_SIZE}.\n \
adjusted into : {adj_length}')
sequence_length = adj_length
if debug:
dataset = MAESTRO_small(groups=['debug'], sequence_length=sequence_length, hop_size=HOP_SIZE, random_sample=True)
valid_dataset = dataset
iterations = 100
validation_interval = 10
else:
dataset = MAESTRO_small(groups=['train'], sequence_length=sequence_length, hop_size=HOP_SIZE, random_sample=True)
valid_dataset = MAESTRO_small(groups=['validation'], sequence_length=sequence_length, hop_size=HOP_SIZE, random_sample=False)
loader = DataLoader(dataset, batch_size, shuffle=True)
device = th.device('cuda') if th.cuda.is_available() else th.device('cpu')
if model_type == 'baseline':
model = Transcriber(cnn_unit=cnn_unit, fc_unit=fc_unit)
elif model_type == 'rnn':
model = Transcriber_RNN(cnn_unit=cnn_unit, fc_unit=fc_unit)
elif model_type == 'crnn':
model = Transcriber_CRNN(cnn_unit=cnn_unit, fc_unit=fc_unit)
elif model_type == 'ONF':
model = Transcriber_ONF(cnn_unit=cnn_unit, fc_unit=fc_unit)
optimizer = th.optim.Adam(model.parameters(), learning_rate, weight_decay=weight_decay)
scheduler = StepLR(optimizer, step_size=1000, gamma=0.98)
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
loop = tqdm(range(1, iterations+1))
for step, batch in zip(loop, cycle(loader)):
optimizer.zero_grad()
batch = allocate_batch(batch, device)
frame_logit, onset_logit = model(batch['audio'])
frame_loss = criterion(frame_logit, batch['frame'])
onset_loss = criterion(onset_logit, batch['onset'])
loss = onset_loss + frame_loss
loss.mean().backward()
for parameter in model.parameters():
clip_grad_norm_([parameter], 3.0)
optimizer.step()
scheduler.step()
loop.set_postfix_str("loss: {:.3e}".format(loss.mean()))
if step % validation_interval == 0:
model.eval()
with th.no_grad():
loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
metrics = defaultdict(list)
for batch in loader:
batch_results = evaluate(model, batch, device)
for key, value in batch_results.items():
metrics[key].extend(value)
print('')
for key, value in metrics.items():
if key[-2:] == 'f1' or 'loss' in key:
print(f'{key:27} : {np.mean(value):.4f}')
model.train()
th.save({'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'step' : step,
'cnn_unit' : cnn_unit,
'fc_unit' : fc_unit
},
Path(logdir) / f'model-{step}.pt')
del dataset, valid_dataset
test_dataset = MAESTRO_small(groups=['test'], hop_size=HOP_SIZE, random_sample=False)
model.eval()
with th.no_grad():
loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
metrics = defaultdict(list)
for batch in loader:
batch_results = evaluate(model, batch, device, save=save_midi, save_path=logdir)
for key, value in batch_results.items():
metrics[key].extend(value)
print('')
for key, value in metrics.items():
if key[-2:] == 'f1' or 'loss' in key:
print(f'{key} : {np.mean(value)}')
with open(Path(logdir) / 'results.txt', 'w') as f:
for key, values in metrics.items():
_, category, name = key.split('/')
metric_string = f'{category:>32} {name:26}: {np.mean(values):.3f} +- {np.std(values):.3f}'
print(metric_string)
f.write(metric_string + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', default='baseline', type=str)
parser.add_argument('--logdir', default=None, type=str)
parser.add_argument('-v', '--sequence_length', default=102400, type=int)
parser.add_argument('-lr', '--learning_rate', default=6e-4, type=float)
parser.add_argument('-b', '--batch_size', default=16, type=int)
parser.add_argument('-i', '--iterations', default=10000, type=int)
parser.add_argument('-vi', '--validation_interval', default=1000, type=int)
parser.add_argument('-wd', '--weight_decay', default=0)
parser.add_argument('-cnn', '--cnn_unit', default=48, type=int)
parser.add_argument('-fc', '--fc_unit', default=256, type=int)
parser.add_argument('--save_midi', action='store_true')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
train(**vars(parser.parse_args()))
|
[
"evaluate.evaluate"
] |
[((1710, 1755), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'shuffle': '(True)'}), '(dataset, batch_size, shuffle=True)\n', (1720, 1755), False, 'from torch.utils.data import DataLoader\n'), ((2338, 2383), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': '(1000)', 'gamma': '(0.98)'}), '(optimizer, step_size=1000, gamma=0.98)\n', (2344, 2383), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((2400, 2422), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (2420, 2422), True, 'import torch.nn as nn\n'), ((4088, 4158), 'dataset.MAESTRO_small', 'MAESTRO_small', ([], {'groups': "['test']", 'hop_size': 'HOP_SIZE', 'random_sample': '(False)'}), "(groups=['test'], hop_size=HOP_SIZE, random_sample=False)\n", (4101, 4158), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((5041, 5066), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5064, 5066), False, 'import argparse\n'), ((1237, 1345), 'dataset.MAESTRO_small', 'MAESTRO_small', ([], {'groups': "['debug']", 'sequence_length': 'sequence_length', 'hop_size': 'HOP_SIZE', 'random_sample': '(True)'}), "(groups=['debug'], sequence_length=sequence_length, hop_size=\n HOP_SIZE, random_sample=True)\n", (1250, 1345), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((1459, 1567), 'dataset.MAESTRO_small', 'MAESTRO_small', ([], {'groups': "['train']", 'sequence_length': 'sequence_length', 'hop_size': 'HOP_SIZE', 'random_sample': '(True)'}), "(groups=['train'], sequence_length=sequence_length, hop_size=\n HOP_SIZE, random_sample=True)\n", (1472, 1567), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((1587, 1700), 'dataset.MAESTRO_small', 'MAESTRO_small', ([], {'groups': "['validation']", 'sequence_length': 'sequence_length', 'hop_size': 'HOP_SIZE', 'random_sample': '(False)'}), "(groups=['validation'], sequence_length=sequence_length,\n hop_size=HOP_SIZE, random_sample=False)\n", (1600, 1700), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((1791, 1813), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (1811, 1813), True, 'import torch as th\n'), ((1770, 1787), 'torch.device', 'th.device', (['"""cuda"""'], {}), "('cuda')\n", (1779, 1787), True, 'import torch as th\n'), ((1819, 1835), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (1828, 1835), True, 'import torch as th\n'), ((1886, 1933), 'model.Transcriber', 'Transcriber', ([], {'cnn_unit': 'cnn_unit', 'fc_unit': 'fc_unit'}), '(cnn_unit=cnn_unit, fc_unit=fc_unit)\n', (1897, 1933), False, 'from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN\n'), ((2594, 2623), 'dataset.allocate_batch', 'allocate_batch', (['batch', 'device'], {}), '(batch, device)\n', (2608, 2623), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((4185, 4197), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (4195, 4197), True, 'import torch as th\n'), ((4216, 4269), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(test_dataset, batch_size=1, shuffle=False)\n', (4226, 4269), False, 'from torch.utils.data import DataLoader\n'), ((4288, 4305), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4299, 4305), False, 'from collections import defaultdict\n'), ((816, 828), 'pathlib.Path', 'Path', (['"""runs"""'], {}), "('runs')\n", (820, 828), False, 'from pathlib import Path\n'), ((887, 899), 'pathlib.Path', 'Path', (['logdir'], {}), '(logdir)\n', (891, 899), False, 'from pathlib import Path\n'), ((1980, 2031), 'model.Transcriber_RNN', 'Transcriber_RNN', ([], {'cnn_unit': 'cnn_unit', 'fc_unit': 'fc_unit'}), '(cnn_unit=cnn_unit, fc_unit=fc_unit)\n', (1995, 2031), False, 'from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN\n'), ((2931, 2964), 'torch.nn.utils.clip_grad_norm_', 'clip_grad_norm_', (['[parameter]', '(3.0)'], {}), '([parameter], 3.0)\n', (2946, 2964), False, 'from torch.nn.utils import clip_grad_norm_\n'), ((3998, 4010), 'pathlib.Path', 'Path', (['logdir'], {}), '(logdir)\n', (4002, 4010), False, 'from pathlib import Path\n'), ((4363, 4427), 'evaluate.evaluate', 'evaluate', (['model', 'batch', 'device'], {'save': 'save_midi', 'save_path': 'logdir'}), '(model, batch, device, save=save_midi, save_path=logdir)\n', (4371, 4427), False, 'from evaluate import evaluate\n'), ((2079, 2131), 'model.Transcriber_CRNN', 'Transcriber_CRNN', ([], {'cnn_unit': 'cnn_unit', 'fc_unit': 'fc_unit'}), '(cnn_unit=cnn_unit, fc_unit=fc_unit)\n', (2095, 2131), False, 'from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN\n'), ((3168, 3180), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (3178, 3180), True, 'import torch as th\n'), ((3207, 3270), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(valid_dataset, batch_size=batch_size, shuffle=False)\n', (3217, 3270), False, 'from torch.utils.data import DataLoader\n'), ((3297, 3314), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3308, 3314), False, 'from collections import defaultdict\n'), ((4685, 4697), 'pathlib.Path', 'Path', (['logdir'], {}), '(logdir)\n', (4689, 4697), False, 'from pathlib import Path\n'), ((2178, 2229), 'model.Transcriber_ONF', 'Transcriber_ONF', ([], {'cnn_unit': 'cnn_unit', 'fc_unit': 'fc_unit'}), '(cnn_unit=cnn_unit, fc_unit=fc_unit)\n', (2193, 2229), False, 'from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN\n'), ((3388, 3418), 'evaluate.evaluate', 'evaluate', (['model', 'batch', 'device'], {}), '(model, batch, device)\n', (3396, 3418), False, 'from evaluate import evaluate\n'), ((4874, 4889), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (4881, 4889), True, 'import numpy as np\n'), ((4899, 4913), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (4905, 4913), True, 'import numpy as np\n'), ((841, 855), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (853, 855), False, 'from datetime import datetime\n'), ((4652, 4666), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (4659, 4666), True, 'import numpy as np\n'), ((3715, 3729), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (3722, 3729), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from typing import List
import numpy as np
import torch
from torch.utils.data import DataLoader
import evaluate
from accelerate import Accelerator, DistributedType
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from transformers import (
AdamW,
AutoModelForSequenceClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
set_seed,
)
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
# New Code #
# We need a different `get_dataloaders` function that will build dataloaders by indexs
def get_fold_dataloaders(
accelerator: Accelerator, dataset: DatasetDict, train_idxs: List[int], valid_idxs: List[int], batch_size: int = 16
):
"""
Gets a set of train, valid, and test dataloaders for a particular fold
Args:
accelerator (`Accelerator`):
The main `Accelerator` object
train_idxs (list of `int`):
The split indicies for the training dataset
valid_idxs (list of `int`):
The split indicies for the validation dataset
batch_size (`int`):
The size of the minibatch. Default is 16
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = DatasetDict(
{
"train": dataset["train"].select(train_idxs),
"validation": dataset["train"].select(valid_idxs),
"test": dataset["validation"],
}
)
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
test_dataloader = DataLoader(
tokenized_datasets["test"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader, test_dataloader
def training_function(config, args):
# New Code #
test_labels = None
test_predictions = []
# Download the dataset
datasets = load_dataset("glue", "mrpc")
# Create our splits
kfold = StratifiedKFold(n_splits=int(args.num_folds))
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
correct_bias = config["correct_bias"]
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
set_seed(seed)
# New Code #
# Create our folds:
folds = kfold.split(np.zeros(datasets["train"].num_rows), datasets["train"]["label"])
# Iterate over them
for train_idxs, valid_idxs in folds:
train_dataloader, eval_dataloader, test_dataloader = get_fold_dataloaders(
accelerator,
datasets,
train_idxs,
valid_idxs,
)
if test_labels is None:
test_labels = datasets["validation"]["label"]
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
# New Code #
# We also run predictions on the test set at the very end
fold_predictions = []
for step, batch in enumerate(test_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits
predictions, references = accelerator.gather((predictions, batch["labels"]))
fold_predictions.append(predictions.cpu())
metric.add_batch(
predictions=predictions.argmax(dim=-1),
references=references,
)
test_metric = metric.compute()
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(fold_predictions, dim=0))
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
preds = torch.stack(test_predictions, dim=0).sum(dim=0).div(int(config["n_splits"])).argmax(dim=-1)
test_metric = metric.compute(predictions=preds, references=test_labels)
accelerator.print("Average test metrics from all folds:", test_metric)
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
# New Code #
parser.add_argument("--num_folds", type=int, default=3, help="The number of splits to perform across the dataset")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "correct_bias": True, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
|
[
"evaluate.load"
] |
[((2841, 2889), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""bert-base-cased"""'], {}), "('bert-base-cased')\n", (2870, 2889), False, 'from transformers import AdamW, AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n'), ((4246, 4349), 'torch.utils.data.DataLoader', 'DataLoader', (["tokenized_datasets['train']"], {'shuffle': '(True)', 'collate_fn': 'collate_fn', 'batch_size': 'batch_size'}), "(tokenized_datasets['train'], shuffle=True, collate_fn=collate_fn,\n batch_size=batch_size)\n", (4256, 4349), False, 'from torch.utils.data import DataLoader\n'), ((4382, 4497), 'torch.utils.data.DataLoader', 'DataLoader', (["tokenized_datasets['validation']"], {'shuffle': '(False)', 'collate_fn': 'collate_fn', 'batch_size': 'EVAL_BATCH_SIZE'}), "(tokenized_datasets['validation'], shuffle=False, collate_fn=\n collate_fn, batch_size=EVAL_BATCH_SIZE)\n", (4392, 4497), False, 'from torch.utils.data import DataLoader\n'), ((4530, 4638), 'torch.utils.data.DataLoader', 'DataLoader', (["tokenized_datasets['test']"], {'shuffle': '(False)', 'collate_fn': 'collate_fn', 'batch_size': 'EVAL_BATCH_SIZE'}), "(tokenized_datasets['test'], shuffle=False, collate_fn=collate_fn,\n batch_size=EVAL_BATCH_SIZE)\n", (4540, 4638), False, 'from torch.utils.data import DataLoader\n'), ((4859, 4887), 'datasets.load_dataset', 'load_dataset', (['"""glue"""', '"""mrpc"""'], {}), "('glue', 'mrpc')\n", (4871, 4887), False, 'from datasets import DatasetDict, load_dataset\n'), ((5017, 5080), 'accelerate.Accelerator', 'Accelerator', ([], {'cpu': 'args.cpu', 'mixed_precision': 'args.mixed_precision'}), '(cpu=args.cpu, mixed_precision=args.mixed_precision)\n', (5028, 5080), False, 'from accelerate import Accelerator, DistributedType\n'), ((5362, 5391), 'evaluate.load', 'evaluate.load', (['"""glue"""', '"""mrpc"""'], {}), "('glue', 'mrpc')\n", (5375, 5391), False, 'import evaluate\n'), ((5649, 5663), 'transformers.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (5657, 5663), False, 'from transformers import AdamW, AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n'), ((10337, 10410), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple example of training script."""'}), "(description='Simple example of training script.')\n", (10360, 10410), False, 'import argparse\n'), ((5730, 5766), 'numpy.zeros', 'np.zeros', (["datasets['train'].num_rows"], {}), "(datasets['train'].num_rows)\n", (5738, 5766), True, 'import numpy as np\n'), ((6271, 6362), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['"""bert-base-cased"""'], {'return_dict': '(True)'}), "('bert-base-cased',\n return_dict=True)\n", (6321, 6362), False, 'from transformers import AdamW, AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n'), ((9812, 9846), 'torch.cat', 'torch.cat', (['fold_predictions'], {'dim': '(0)'}), '(fold_predictions, dim=0)\n', (9821, 9846), False, 'import torch\n'), ((9292, 9307), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9305, 9307), False, 'import torch\n'), ((8431, 8446), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8444, 8446), False, 'import torch\n'), ((10067, 10103), 'torch.stack', 'torch.stack', (['test_predictions'], {'dim': '(0)'}), '(test_predictions, dim=0)\n', (10078, 10103), False, 'import torch\n')]
|
"""
This script demonstrates initialisation, training, evaluation, and forecasting of ForecastNet. The dataset used for the
time-invariance test in section 6.1 of the ForecastNet paper is used for this demonstration.
Paper:
"ForecastNet: A Time-Variant Deep Feed-Forward Neural Network Architecture for Multi-Step-Ahead Time-Series Forecasting"
by <NAME>, <NAME>, and <NAME>
Link to the paper: https://arxiv.org/abs/2002.04155
"""
import numpy as np
import matplotlib.pyplot as plt
from forecastNet import forecastnet
from train import train
from evaluate import evaluate
from demoDataset import generate_data
#Use a fixed seed for repreducible results
np.random.seed(1)
# Generate the dataset
train_data, test_data, valid_data, period = generate_data(T=2750, period = 50)
# Model parameters
model_type = 'dense2' #'dense' or 'conv', 'dense2' or 'conv2'
in_seq_length = 2 * period
hidden_dim = 24
out_seq_length = period
learning_rate = 0.0001
n_epochs= 100
# Initialise model
fcstnet = forecastnet(in_seq_length=in_seq_length, out_seq_length=out_seq_length, hidden_dim=hidden_dim,
learning_rate=learning_rate, n_epochs=n_epochs, save_file='./forecastnet3.ckpt', model=model_type)
# Train the model
training_costs, validation_costs = train(fcstnet, train_data, valid_data)
# Plot the training curves
plt.figure()
plt.plot(training_costs)
plt.plot(validation_costs)
# Evaluate the model
mase, smape = evaluate(fcstnet, test_data, return_lists=False)
print('')
print('MASE:', mase)
print('SMAPE:', smape)
# Generate and plot forecasts for various samples from the test dataset
start_idx = 20
for start_idx in [0, 50, 100]:
test_sample = test_data[:, start_idx:]
# Models with a Gaussian Mixture Density Component output
if model_type == 'dense' or model_type == 'conv':
# Generate a set of n_samples forecasts (Monte Carlo Forecasts)
n_samples = 10 # 100 is a better value, but takes longer to compute
batch_size = test_sample.shape[0]
y_pred = np.zeros((batch_size, fcstnet.out_seq_length, n_samples))
mu = np.zeros((batch_size, fcstnet.out_seq_length, n_samples))
sigma = np.zeros((batch_size, fcstnet.out_seq_length, n_samples))
for i in range(n_samples):
print('Forecast sample', i)
y_pred[:, :, i], mu[:, :, i], sigma[:, :, i] = fcstnet.forecast(test_sample)
# Compute the Monte Carlo estimates of the mean and standard deviation
s_mean = np.mean(y_pred, axis=2)
s_std = np.std(y_pred, axis=2)
botVarLine = s_mean - s_std
topVarLine = s_mean + s_std
# Plot the Monte Carlo mean and standard deviation
plt.figure()
plt.plot(np.arange(0, fcstnet.in_seq_length + fcstnet.out_seq_length),
test_sample[0, 0:fcstnet.in_seq_length + fcstnet.out_seq_length],
'o-', label='test_data')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
s_mean[0, :],
'*-', linewidth=0.7, label='mean')
plt.fill_between(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
botVarLine[0, :],
topVarLine[0, :],
color='gray', alpha=0.3, label='Uncertainty')
# Models with a linear output
elif model_type == 'dense2' or model_type == 'conv2':
# Generate a forecast
y_pred = fcstnet.forecast(test_sample)
# Plot the forecast
plt.figure()
plt.plot(np.arange(0, fcstnet.in_seq_length + fcstnet.out_seq_length),
test_sample[0, 0:fcstnet.in_seq_length + fcstnet.out_seq_length],
'o-', label='test_data')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
y_pred[0, :],
'*-', linewidth=0.7, label='mean')
plt.show()
|
[
"evaluate.evaluate"
] |
[((656, 673), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (670, 673), True, 'import numpy as np\n'), ((742, 774), 'demoDataset.generate_data', 'generate_data', ([], {'T': '(2750)', 'period': '(50)'}), '(T=2750, period=50)\n', (755, 774), False, 'from demoDataset import generate_data\n'), ((993, 1194), 'forecastNet.forecastnet', 'forecastnet', ([], {'in_seq_length': 'in_seq_length', 'out_seq_length': 'out_seq_length', 'hidden_dim': 'hidden_dim', 'learning_rate': 'learning_rate', 'n_epochs': 'n_epochs', 'save_file': '"""./forecastnet3.ckpt"""', 'model': 'model_type'}), "(in_seq_length=in_seq_length, out_seq_length=out_seq_length,\n hidden_dim=hidden_dim, learning_rate=learning_rate, n_epochs=n_epochs,\n save_file='./forecastnet3.ckpt', model=model_type)\n", (1004, 1194), False, 'from forecastNet import forecastnet\n'), ((1258, 1296), 'train.train', 'train', (['fcstnet', 'train_data', 'valid_data'], {}), '(fcstnet, train_data, valid_data)\n', (1263, 1296), False, 'from train import train\n'), ((1324, 1336), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1334, 1336), True, 'import matplotlib.pyplot as plt\n'), ((1337, 1361), 'matplotlib.pyplot.plot', 'plt.plot', (['training_costs'], {}), '(training_costs)\n', (1345, 1361), True, 'import matplotlib.pyplot as plt\n'), ((1362, 1388), 'matplotlib.pyplot.plot', 'plt.plot', (['validation_costs'], {}), '(validation_costs)\n', (1370, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1425, 1473), 'evaluate.evaluate', 'evaluate', (['fcstnet', 'test_data'], {'return_lists': '(False)'}), '(fcstnet, test_data, return_lists=False)\n', (1433, 1473), False, 'from evaluate import evaluate\n'), ((3951, 3961), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3959, 3961), True, 'import matplotlib.pyplot as plt\n'), ((2014, 2071), 'numpy.zeros', 'np.zeros', (['(batch_size, fcstnet.out_seq_length, n_samples)'], {}), '((batch_size, fcstnet.out_seq_length, n_samples))\n', (2022, 2071), True, 'import numpy as np\n'), ((2085, 2142), 'numpy.zeros', 'np.zeros', (['(batch_size, fcstnet.out_seq_length, n_samples)'], {}), '((batch_size, fcstnet.out_seq_length, n_samples))\n', (2093, 2142), True, 'import numpy as np\n'), ((2159, 2216), 'numpy.zeros', 'np.zeros', (['(batch_size, fcstnet.out_seq_length, n_samples)'], {}), '((batch_size, fcstnet.out_seq_length, n_samples))\n', (2167, 2216), True, 'import numpy as np\n'), ((2478, 2501), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(2)'}), '(y_pred, axis=2)\n', (2485, 2501), True, 'import numpy as np\n'), ((2518, 2540), 'numpy.std', 'np.std', (['y_pred'], {'axis': '(2)'}), '(y_pred, axis=2)\n', (2524, 2540), True, 'import numpy as np\n'), ((2681, 2693), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2691, 2693), True, 'import matplotlib.pyplot as plt\n'), ((2711, 2771), 'numpy.arange', 'np.arange', (['(0)', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(0, fcstnet.in_seq_length + fcstnet.out_seq_length)\n', (2720, 2771), True, 'import numpy as np\n'), ((2915, 3000), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (2924, 3000), True, 'import numpy as np\n'), ((3105, 3190), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (3114, 3190), True, 'import numpy as np\n'), ((3551, 3563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3561, 3563), True, 'import matplotlib.pyplot as plt\n'), ((3581, 3641), 'numpy.arange', 'np.arange', (['(0)', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(0, fcstnet.in_seq_length + fcstnet.out_seq_length)\n', (3590, 3641), True, 'import numpy as np\n'), ((3785, 3870), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (3794, 3870), True, 'import numpy as np\n')]
|
import os, sys
root_path = os.path.realpath(__file__).split('/evaluate/multipose_keypoint_val.py')[0]
os.chdir(root_path)
sys.path.append(root_path)
from training.batch_processor import batch_processor
from network.posenet import poseNet
from datasets.coco import get_loader
from evaluate.tester import Tester
# Hyper-params
coco_root = '/data/COCO/'
backbone = 'resnet101' # 'resnet50'
data_dir = coco_root+'images/'
mask_dir = coco_root
json_path = coco_root+'COCO.json'
inp_size = 480 # input size 480*480
feat_stride = 4
# Set Training parameters
params = Tester.TestParams()
params.subnet_name = 'keypoint_subnet'
params.gpus = [0]
params.ckpt = './demo/models/ckpt_baseline_resnet101.h5'
params.batch_size = 6 * len(params.gpus)
params.print_freq = 50
# validation data
valid_data = get_loader(json_path, data_dir, mask_dir, inp_size, feat_stride,
preprocess='resnet', batch_size=params.batch_size-2*len(params.gpus), training=False,
shuffle=False, num_workers=4, subnet=params.subnet_name)
print('val dataset len: {}'.format(len(valid_data.dataset)))
# model
if backbone == 'resnet101':
model = poseNet(101)
elif backbone == 'resnet50':
model = poseNet(50)
for name, module in model.named_children():
for para in module.parameters():
para.requires_grad = False
tester = Tester(model, params, batch_processor, valid_data)
tester.val()
|
[
"evaluate.tester.Tester",
"evaluate.tester.Tester.TestParams"
] |
[((102, 121), 'os.chdir', 'os.chdir', (['root_path'], {}), '(root_path)\n', (110, 121), False, 'import os, sys\n'), ((122, 148), 'sys.path.append', 'sys.path.append', (['root_path'], {}), '(root_path)\n', (137, 148), False, 'import os, sys\n'), ((565, 584), 'evaluate.tester.Tester.TestParams', 'Tester.TestParams', ([], {}), '()\n', (582, 584), False, 'from evaluate.tester import Tester\n'), ((1354, 1404), 'evaluate.tester.Tester', 'Tester', (['model', 'params', 'batch_processor', 'valid_data'], {}), '(model, params, batch_processor, valid_data)\n', (1360, 1404), False, 'from evaluate.tester import Tester\n'), ((1161, 1173), 'network.posenet.poseNet', 'poseNet', (['(101)'], {}), '(101)\n', (1168, 1173), False, 'from network.posenet import poseNet\n'), ((1215, 1226), 'network.posenet.poseNet', 'poseNet', (['(50)'], {}), '(50)\n', (1222, 1226), False, 'from network.posenet import poseNet\n'), ((27, 53), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (43, 53), False, 'import os, sys\n')]
|
#!/usr/bin/env python3
import sys
sys.path.append('./lib')
import argparse
import os
import datetime
import numpy as np
import time
import pickle
import torch
from torch import optim
from param_stamp import get_param_stamp, get_param_stamp_from_args
import evaluate
from lib.encoder import Classifier
from lib.vae_models import AutoEncoder
import lib.callbacks as cb
from lib.train import train_cl
from lib.continual_learner import ContinualLearner
from lib.exemplars import ExemplarHandler
from lib.replayer import Replayer
RESULT_DIR = './results'
parser = argparse.ArgumentParser('./main.py', description='Run individual continual learning experiment.')
parser.add_argument('--get-stamp', action='store_true')
parser.add_argument('--no-gpus', action='store_false', dest='cuda')
parser.add_argument('--gpuID', type=int, nargs='+', default=[0, 1, 2, 3], help='GPU #')
parser.add_argument('--savepath', type=str, default='./results', dest='savepath')
parser.add_argument('--vis-cross-methods', action='store_true', dest='cross_methods', help='draw plots for cross methods')
parser.add_argument('--vis-cross-methods-type', nargs='+', default=['spider'], dest='cross_methods_type', help='alternatives=[\'spider\', \'bar\']')
parser.add_argument('--vis-cross-tasks', action='store_true', dest='cross_tasks', help='draw plots for cross tasks')
parser.add_argument('--matrices', type=str, nargs='+', default=['ACC', 'BWT', 'FWT', 'Overall ACC'])
parser.add_argument('--seed', type=int, default=7)
parser.add_argument('--factor', type=str, default='clutter', dest='factor')
parser.add_argument('--cumulative', type=int, default=0, dest='cul')
parser.add_argument('--bce', action='store_true')
parser.add_argument('--tasks', type=int, default=9)
parser.add_argument('--dataset', type=str, default='OpenLORIS-Object', dest='dataset')
parser.add_argument('--fc-layers', type=int, default=3, dest='fc_lay')
parser.add_argument('--fc-units', type=int, default=400, metavar="N")
parser.add_argument('--fc-drop', type=float, default=0.)
parser.add_argument('--fc-bn', type=str, default="no")
parser.add_argument('--fc-nl', type=str, default="relu", choices=["relu", "leakyrelu"])
parser.add_argument('--iters', type=int, default=3000)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--batch', type=int, default=32)
parser.add_argument('--optimizer', type=str, choices=['adam', 'adam_reset', 'sgd'], default='adam')
parser.add_argument('--feedback', action="store_true")
replay_choices = ['offline', 'exact', 'generative', 'none', 'current', 'exemplars']
parser.add_argument('--replay', type=str, default='none', choices=replay_choices)
parser.add_argument('--distill', action='store_true')
parser.add_argument('--temp', type=float, default=2., dest='temp')
parser.add_argument('--z_dim', type=int, default=100)
parser.add_argument('--g-z-dim', type=int, default=100)
parser.add_argument('--g-fc-lay', type=int)
parser.add_argument('--g-fc-uni', type=int)
parser.add_argument('--g-iters', type=int)
parser.add_argument('--lr-gen', type=float)
parser.add_argument('--ewc', action='store_true')
parser.add_argument('--lambda', type=float, default=5240., dest="ewc_lambda")
parser.add_argument('--fisher-n', type=int)
parser.add_argument('--online', action='store_true')
parser.add_argument('--gamma', type=float, default=1.)
parser.add_argument('--emp-fi', action='store_true')
parser.add_argument('--si', action='store_true')
parser.add_argument('--c', type=float, default=0.3, dest="si_c")
parser.add_argument('--epsilon', type=float, default=0.2, dest="epsilon")
parser.add_argument('--icarl', action='store_true')
parser.add_argument('--use-exemplars', action='store_true')
parser.add_argument('--add-exemplars', action='store_true')
parser.add_argument('--budget', type=int, default=2500, dest="budget")
parser.add_argument('--herding', action='store_true')
parser.add_argument('--norm-exemplars', action='store_true')
parser.add_argument('--log-per-task', action='store_true')
parser.add_argument('--loss-log', type=int, default=200, metavar="N")
parser.add_argument('--prec-log', type=int, default=200, metavar="N")
parser.add_argument('--prec-n', type=int, default=1024)
parser.add_argument('--sample-log', type=int, default=500, metavar="N")
parser.add_argument('--sample-n', type=int, default=64)
def run(args):
result_path = os.path.join('./benchmarks/results', args.savepath)
savepath = result_path + '/' + str(datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')) + '.csv'
if not os.path.exists(result_path):
print('no exist the path and create one ...')
os.makedirs(result_path, exist_ok=True)
# Set default arguments
args.lr_gen = args.lr if args.lr_gen is None else args.lr_gen
args.g_iters = args.iters if args.g_iters is None else args.g_iters
args.g_fc_lay = args.fc_lay if args.g_fc_lay is None else args.g_fc_lay
args.g_fc_uni = args.fc_units if args.g_fc_uni is None else args.g_fc_uni
# -if [log_per_task], reset all logs
if args.log_per_task:
args.prec_log = args.iters
args.loss_log = args.iters
args.sample_log = args.iters
# -if [iCaRL] is selected, select all accompanying options
if hasattr(args, "icarl") and args.icarl:
args.use_exemplars = True
args.add_exemplars = True
# -if EWC or SI is selected together with 'feedback', give error
if args.feedback and (args.ewc or args.si or args.icarl):
raise NotImplementedError("EWC, SI and iCaRL are not supported with feedback connections.")
# -if binary classification loss is selected together with 'feedback', give error
if args.feedback and args.bce:
raise NotImplementedError("Binary classification loss not supported with feedback connections.")
if not os.path.isdir(RESULT_DIR):
os.mkdir(RESULT_DIR)
# If only want param-stamp, get it printed to screen and exit
if hasattr(args, "get_stamp") and args.get_stamp:
_ = get_param_stamp_from_args(args=args)
exit()
# Use cuda?
cuda = torch.cuda.is_available() and args.cuda
device = "cuda" if cuda else "cpu"
gpu_devices = None
if args.gpuID == None:
if torch.cuda.device_count() > 1:
gpu_devices = ','.join([str(id) for id in range(torch.cuda.device_count())])
print('==> training with CUDA (GPU id: ' + gpu_devices + ') ... <==')
else:
gpu_devices = ','.join([str(id) for id in args.gpuID])
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_devices
print('==> training with CUDA (GPU id: ' + str(args.gpuID) + ') ... <==')
# Set random seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
if args.factor == 'sequence':
args.tasks = 12
# -------------------------------------------------------------------------------------------------#
# ----------------#
# ----- DATA -----#
# ----------------#
# Prepare data for OpenLORIS-Object
if args.dataset == 'OpenLORIS-Object':
with open('./benchmarks/data/OpenLORIS-Object/' + args.factor + '.pk', 'rb') as f:
((train_datasets, test_datasets), config, classes_per_task) = pickle.load(f)
else:
with open('./benchmarks/data/' + args.dataset + '/' + args.dataset + '.pk', 'rb') as f:
((train_datasets, test_datasets), config, classes_per_task) = pickle.load(f)
if args.cul == 1:
for i in range(1, len(train_datasets)):
train_datasets[i].imgs.extend(train_datasets[i - 1].imgs)
train_datasets[i].labels.extend(train_datasets[i - 1].labels)
# -------------------------------------------------------------------------------------------------#
# ------------------------------#
# ----- MODEL (CLASSIFIER) -----#
# ------------------------------#
# Define main model (i.e., classifier, if requested with feedback connections)
if args.feedback:
model = AutoEncoder(
image_size=config['size'], image_channels=config['channels'], classes=config['classes'],
fc_layers=args.fc_lay, fc_units=args.g_fc_uni, z_dim=args.z_dim,
fc_drop=args.fc_drop, fc_bn=True if args.fc_bn == "yes" else False, fc_nl=args.fc_nl,
).to(device)
model.lamda_pl = 1. # --> to make that this VAE is also trained to classify
else:
model = Classifier(
image_size=config['size'], image_channels=config['channels'], classes=config['classes'],
fc_layers=args.fc_lay, fc_units=args.fc_units, fc_drop=args.fc_drop, fc_nl=args.fc_nl,
fc_bn=True if args.fc_bn == "yes" else False, excit_buffer=False,
binaryCE=args.bce
).to(device)
# Define optimizer (only include parameters that "requires_grad")
model.optim_list = [{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': args.lr}]
model.optim_type = args.optimizer
if model.optim_type in ("adam", "adam_reset"):
model.optimizer = optim.Adam(model.optim_list, betas=(0.9, 0.999))
elif model.optim_type == "sgd":
model.optimizer = optim.SGD(model.optim_list)
else:
raise ValueError("Unrecognized optimizer, '{}' is not currently a valid option".format(args.optimizer))
# ----------------------------------#
# ----- CL-STRATEGY: EXEMPLARS -----#
# ----------------------------------#
# Store in model whether, how many and in what way to store exemplars
if isinstance(model, ExemplarHandler) and (args.use_exemplars or args.add_exemplars or args.replay == "exemplars"):
model.memory_budget = args.budget
model.norm_exemplars = args.norm_exemplars
model.herding = args.herding
# -----------------------------------#
# ----- CL-STRATEGY: ALLOCATION -----#
# -----------------------------------#
# Elastic Weight Consolidation (EWC)
if isinstance(model, ContinualLearner):
model.ewc_lambda = args.ewc_lambda if args.ewc else 0
if args.ewc:
model.fisher_n = args.fisher_n
model.gamma = args.gamma
model.online = args.online
model.emp_FI = args.emp_fi
# Synpatic Intelligence (SI)
if isinstance(model, ContinualLearner):
model.si_c = args.si_c if args.si else 0
if args.si:
model.epsilon = args.epsilon
# -------------------------------------------------------------------------------------------------#
# -------------------------------#
# ----- CL-STRATEGY: REPLAY -----#
# -------------------------------#
# Use distillation loss (i.e., soft targets) for replayed data? (and set temperature)
if isinstance(model, Replayer):
model.replay_targets = "soft" if args.distill else "hard"
model.KD_temp = args.temp
# If needed, specify separate model for the generator
train_gen = True if (args.replay == "generative" and not args.feedback) else False
if train_gen:
# -specify architecture
generator = AutoEncoder(
image_size=config['size'], image_channels=config['channels'],
fc_layers=args.g_fc_lay, fc_units=args.g_fc_uni, z_dim=args.z_dim, classes=config['classes'],
fc_drop=args.fc_drop, fc_bn=True if args.fc_bn == "yes" else False, fc_nl=args.fc_nl,
).to(device)
# -set optimizer(s)
generator.optim_list = [
{'params': filter(lambda p: p.requires_grad, generator.parameters()), 'lr': args.lr_gen}]
generator.optim_type = args.optimizer
if generator.optim_type in ("adam", "adam_reset"):
generator.optimizer = optim.Adam(generator.optim_list, betas=(0.9, 0.999))
elif generator.optim_type == "sgd":
generator.optimizer = optim.SGD(generator.optim_list)
else:
generator = None
# ---------------------#
# ----- REPORTING -----#
# ---------------------#
# Get parameter-stamp (and print on screen)
param_stamp = get_param_stamp(
args, model.name, verbose=True, replay=True if (not args.replay == "none") else False,
replay_model_name=generator.name if (args.replay == "generative" and not args.feedback) else None,
)
# -define [precision_dict] to keep track of performance during training for storing and for later plotting in pdf
precision_dict = evaluate.initiate_precision_dict(args.tasks)
precision_dict_exemplars = evaluate.initiate_precision_dict(args.tasks) if args.use_exemplars else None
# ---------------------#
# ----- CALLBACKS -----#
# ---------------------#
# Callbacks for reporting on and visualizing loss
generator_loss_cbs = [
cb._VAE_loss_cb(log=args.loss_log, model=model if args.feedback else generator, tasks=args.tasks,
iters_per_task=args.iters if args.feedback else args.g_iters,
replay=False if args.replay == "none" else True)
] if (train_gen or args.feedback) else [None]
solver_loss_cbs = [
cb._solver_loss_cb(log=args.loss_log, model=model, tasks=args.tasks,
iters_per_task=args.iters, replay=False if args.replay == "none" else True)
] if (not args.feedback) else [None]
# Callbacks for evaluating and plotting generated / reconstructed samples
sample_cbs = [
cb._sample_cb(log=args.sample_log, config=config, test_datasets=test_datasets,
sample_size=args.sample_n, iters_per_task=args.iters if args.feedback else args.g_iters)
] if (train_gen or args.feedback) else [None]
# Callbacks for reporting and visualizing accuracy
eval_cb = cb._eval_cb(
log=args.prec_log, test_datasets=test_datasets, precision_dict=None, iters_per_task=args.iters,
test_size=args.prec_n, classes_per_task=classes_per_task
)
# -pdf / reporting: summary plots (i.e, only after each task)
eval_cb_full = cb._eval_cb(
log=args.iters, test_datasets=test_datasets, precision_dict=precision_dict,
iters_per_task=args.iters, classes_per_task=classes_per_task
)
eval_cb_exemplars = cb._eval_cb(
log=args.iters, test_datasets=test_datasets, classes_per_task=classes_per_task,
precision_dict=precision_dict_exemplars, iters_per_task=args.iters,
with_exemplars=True,
) if args.use_exemplars else None
# -collect them in <lists>
eval_cbs = [eval_cb, eval_cb_full]
eval_cbs_exemplars = [eval_cb_exemplars]
# --------------------#
# ----- TRAINING -----#
# --------------------#
print("--> Training:")
# Keep track of training-time
start = time.time()
# Train model
train_cl(
model, train_datasets, test_datasets, replay_mode=args.replay,
classes_per_task=classes_per_task,
iters=args.iters, batch_size=args.batch, savepath=savepath,
generator=generator, gen_iters=args.g_iters, gen_loss_cbs=generator_loss_cbs,
sample_cbs=sample_cbs, eval_cbs=eval_cbs, loss_cbs=generator_loss_cbs if args.feedback else solver_loss_cbs,
eval_cbs_exemplars=eval_cbs_exemplars, use_exemplars=args.use_exemplars, add_exemplars=args.add_exemplars,
)
# -------------------------------------------------------------------------------------------------#
# --------------------#
# -- VISUALIZATION ---#
# --------------------#
matrices_names = args.matrices
method_names = []
if args.cul == 1:
method_names.append('Cumulative')
elif args.cul == 0:
method_names.append('Naive')
if args.replay == 'current':
method_names.append('LwF')
if args.online and args.ewc:
method_names.append('Online EWC')
if args.si:
method_names.append('SI')
if args.replay == "generative" and not args.feedback and not args.distill:
method_names.append('DGR')
if args.replay == "generative" and not args.feedback and args.distill:
method_names.append('DGR with distillation')
if args.replay == "generative" and args.feedback and args.distill:
method_names.append('DGR with feedback')
if args.ewc and not args.online:
method_names.append('EWC')
print('The selected methods are:', method_names)
print('The selected performance matrices are:', matrices_names)
if args.cross_methods:
print('==> Drawing results for cross selected-methods ... <==')
if 'spider' in args.cross_methods_type:
spider = True
if 'bar' in args.cross_methods_type:
bar = True
if args.cross_tasks:
print('==> Drawing results for cross tasks ... <==')
if __name__ == '__main__':
args = parser.parse_args()
run(args)
|
[
"evaluate.initiate_precision_dict"
] |
[((35, 59), 'sys.path.append', 'sys.path.append', (['"""./lib"""'], {}), "('./lib')\n", (50, 59), False, 'import sys\n'), ((564, 666), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""./main.py"""'], {'description': '"""Run individual continual learning experiment."""'}), "('./main.py', description=\n 'Run individual continual learning experiment.')\n", (587, 666), False, 'import argparse\n'), ((4370, 4421), 'os.path.join', 'os.path.join', (['"""./benchmarks/results"""', 'args.savepath'], {}), "('./benchmarks/results', args.savepath)\n", (4382, 4421), False, 'import os\n'), ((6663, 6688), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (6677, 6688), True, 'import numpy as np\n'), ((6693, 6721), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6710, 6721), False, 'import torch\n'), ((12078, 12285), 'param_stamp.get_param_stamp', 'get_param_stamp', (['args', 'model.name'], {'verbose': '(True)', 'replay': "(True if not args.replay == 'none' else False)", 'replay_model_name': "(generator.name if args.replay == 'generative' and not args.feedback else None)"}), "(args, model.name, verbose=True, replay=True if not args.\n replay == 'none' else False, replay_model_name=generator.name if args.\n replay == 'generative' and not args.feedback else None)\n", (12093, 12285), False, 'from param_stamp import get_param_stamp, get_param_stamp_from_args\n'), ((12443, 12487), 'evaluate.initiate_precision_dict', 'evaluate.initiate_precision_dict', (['args.tasks'], {}), '(args.tasks)\n', (12475, 12487), False, 'import evaluate\n'), ((13742, 13916), 'lib.callbacks._eval_cb', 'cb._eval_cb', ([], {'log': 'args.prec_log', 'test_datasets': 'test_datasets', 'precision_dict': 'None', 'iters_per_task': 'args.iters', 'test_size': 'args.prec_n', 'classes_per_task': 'classes_per_task'}), '(log=args.prec_log, test_datasets=test_datasets, precision_dict=\n None, iters_per_task=args.iters, test_size=args.prec_n,\n classes_per_task=classes_per_task)\n', (13753, 13916), True, 'import lib.callbacks as cb\n'), ((14015, 14174), 'lib.callbacks._eval_cb', 'cb._eval_cb', ([], {'log': 'args.iters', 'test_datasets': 'test_datasets', 'precision_dict': 'precision_dict', 'iters_per_task': 'args.iters', 'classes_per_task': 'classes_per_task'}), '(log=args.iters, test_datasets=test_datasets, precision_dict=\n precision_dict, iters_per_task=args.iters, classes_per_task=\n classes_per_task)\n', (14026, 14174), True, 'import lib.callbacks as cb\n'), ((14729, 14740), 'time.time', 'time.time', ([], {}), '()\n', (14738, 14740), False, 'import time\n'), ((14763, 15250), 'lib.train.train_cl', 'train_cl', (['model', 'train_datasets', 'test_datasets'], {'replay_mode': 'args.replay', 'classes_per_task': 'classes_per_task', 'iters': 'args.iters', 'batch_size': 'args.batch', 'savepath': 'savepath', 'generator': 'generator', 'gen_iters': 'args.g_iters', 'gen_loss_cbs': 'generator_loss_cbs', 'sample_cbs': 'sample_cbs', 'eval_cbs': 'eval_cbs', 'loss_cbs': '(generator_loss_cbs if args.feedback else solver_loss_cbs)', 'eval_cbs_exemplars': 'eval_cbs_exemplars', 'use_exemplars': 'args.use_exemplars', 'add_exemplars': 'args.add_exemplars'}), '(model, train_datasets, test_datasets, replay_mode=args.replay,\n classes_per_task=classes_per_task, iters=args.iters, batch_size=args.\n batch, savepath=savepath, generator=generator, gen_iters=args.g_iters,\n gen_loss_cbs=generator_loss_cbs, sample_cbs=sample_cbs, eval_cbs=\n eval_cbs, loss_cbs=generator_loss_cbs if args.feedback else\n solver_loss_cbs, eval_cbs_exemplars=eval_cbs_exemplars, use_exemplars=\n args.use_exemplars, add_exemplars=args.add_exemplars)\n', (14771, 15250), False, 'from lib.train import train_cl\n'), ((4536, 4563), 'os.path.exists', 'os.path.exists', (['result_path'], {}), '(result_path)\n', (4550, 4563), False, 'import os\n'), ((4627, 4666), 'os.makedirs', 'os.makedirs', (['result_path'], {'exist_ok': '(True)'}), '(result_path, exist_ok=True)\n', (4638, 4666), False, 'import os\n'), ((5809, 5834), 'os.path.isdir', 'os.path.isdir', (['RESULT_DIR'], {}), '(RESULT_DIR)\n', (5822, 5834), False, 'import os\n'), ((5844, 5864), 'os.mkdir', 'os.mkdir', (['RESULT_DIR'], {}), '(RESULT_DIR)\n', (5852, 5864), False, 'import os\n'), ((5998, 6034), 'param_stamp.get_param_stamp_from_args', 'get_param_stamp_from_args', ([], {'args': 'args'}), '(args=args)\n', (6023, 6034), False, 'from param_stamp import get_param_stamp, get_param_stamp_from_args\n'), ((6078, 6103), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6101, 6103), False, 'import torch\n'), ((6743, 6776), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6765, 6776), False, 'import torch\n'), ((9086, 9134), 'torch.optim.Adam', 'optim.Adam', (['model.optim_list'], {'betas': '(0.9, 0.999)'}), '(model.optim_list, betas=(0.9, 0.999))\n', (9096, 9134), False, 'from torch import optim\n'), ((12519, 12563), 'evaluate.initiate_precision_dict', 'evaluate.initiate_precision_dict', (['args.tasks'], {}), '(args.tasks)\n', (12551, 12563), False, 'import evaluate\n'), ((14211, 14400), 'lib.callbacks._eval_cb', 'cb._eval_cb', ([], {'log': 'args.iters', 'test_datasets': 'test_datasets', 'classes_per_task': 'classes_per_task', 'precision_dict': 'precision_dict_exemplars', 'iters_per_task': 'args.iters', 'with_exemplars': '(True)'}), '(log=args.iters, test_datasets=test_datasets, classes_per_task=\n classes_per_task, precision_dict=precision_dict_exemplars,\n iters_per_task=args.iters, with_exemplars=True)\n', (14222, 14400), True, 'import lib.callbacks as cb\n'), ((6219, 6244), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6242, 6244), False, 'import torch\n'), ((7265, 7279), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7276, 7279), False, 'import pickle\n'), ((7460, 7474), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7471, 7474), False, 'import pickle\n'), ((9197, 9224), 'torch.optim.SGD', 'optim.SGD', (['model.optim_list'], {}), '(model.optim_list)\n', (9206, 9224), False, 'from torch import optim\n'), ((11725, 11777), 'torch.optim.Adam', 'optim.Adam', (['generator.optim_list'], {'betas': '(0.9, 0.999)'}), '(generator.optim_list, betas=(0.9, 0.999))\n', (11735, 11777), False, 'from torch import optim\n'), ((12774, 12991), 'lib.callbacks._VAE_loss_cb', 'cb._VAE_loss_cb', ([], {'log': 'args.loss_log', 'model': '(model if args.feedback else generator)', 'tasks': 'args.tasks', 'iters_per_task': '(args.iters if args.feedback else args.g_iters)', 'replay': "(False if args.replay == 'none' else True)"}), "(log=args.loss_log, model=model if args.feedback else\n generator, tasks=args.tasks, iters_per_task=args.iters if args.feedback\n else args.g_iters, replay=False if args.replay == 'none' else True)\n", (12789, 12991), True, 'import lib.callbacks as cb\n'), ((13113, 13261), 'lib.callbacks._solver_loss_cb', 'cb._solver_loss_cb', ([], {'log': 'args.loss_log', 'model': 'model', 'tasks': 'args.tasks', 'iters_per_task': 'args.iters', 'replay': "(False if args.replay == 'none' else True)"}), "(log=args.loss_log, model=model, tasks=args.tasks,\n iters_per_task=args.iters, replay=False if args.replay == 'none' else True)\n", (13131, 13261), True, 'import lib.callbacks as cb\n'), ((13432, 13608), 'lib.callbacks._sample_cb', 'cb._sample_cb', ([], {'log': 'args.sample_log', 'config': 'config', 'test_datasets': 'test_datasets', 'sample_size': 'args.sample_n', 'iters_per_task': '(args.iters if args.feedback else args.g_iters)'}), '(log=args.sample_log, config=config, test_datasets=\n test_datasets, sample_size=args.sample_n, iters_per_task=args.iters if\n args.feedback else args.g_iters)\n', (13445, 13608), True, 'import lib.callbacks as cb\n'), ((8032, 8297), 'lib.vae_models.AutoEncoder', 'AutoEncoder', ([], {'image_size': "config['size']", 'image_channels': "config['channels']", 'classes': "config['classes']", 'fc_layers': 'args.fc_lay', 'fc_units': 'args.g_fc_uni', 'z_dim': 'args.z_dim', 'fc_drop': 'args.fc_drop', 'fc_bn': "(True if args.fc_bn == 'yes' else False)", 'fc_nl': 'args.fc_nl'}), "(image_size=config['size'], image_channels=config['channels'],\n classes=config['classes'], fc_layers=args.fc_lay, fc_units=args.\n g_fc_uni, z_dim=args.z_dim, fc_drop=args.fc_drop, fc_bn=True if args.\n fc_bn == 'yes' else False, fc_nl=args.fc_nl)\n", (8043, 8297), False, 'from lib.vae_models import AutoEncoder\n'), ((8453, 8738), 'lib.encoder.Classifier', 'Classifier', ([], {'image_size': "config['size']", 'image_channels': "config['channels']", 'classes': "config['classes']", 'fc_layers': 'args.fc_lay', 'fc_units': 'args.fc_units', 'fc_drop': 'args.fc_drop', 'fc_nl': 'args.fc_nl', 'fc_bn': "(True if args.fc_bn == 'yes' else False)", 'excit_buffer': '(False)', 'binaryCE': 'args.bce'}), "(image_size=config['size'], image_channels=config['channels'],\n classes=config['classes'], fc_layers=args.fc_lay, fc_units=args.\n fc_units, fc_drop=args.fc_drop, fc_nl=args.fc_nl, fc_bn=True if args.\n fc_bn == 'yes' else False, excit_buffer=False, binaryCE=args.bce)\n", (8463, 8738), False, 'from lib.encoder import Classifier\n'), ((11111, 11377), 'lib.vae_models.AutoEncoder', 'AutoEncoder', ([], {'image_size': "config['size']", 'image_channels': "config['channels']", 'fc_layers': 'args.g_fc_lay', 'fc_units': 'args.g_fc_uni', 'z_dim': 'args.z_dim', 'classes': "config['classes']", 'fc_drop': 'args.fc_drop', 'fc_bn': "(True if args.fc_bn == 'yes' else False)", 'fc_nl': 'args.fc_nl'}), "(image_size=config['size'], image_channels=config['channels'],\n fc_layers=args.g_fc_lay, fc_units=args.g_fc_uni, z_dim=args.z_dim,\n classes=config['classes'], fc_drop=args.fc_drop, fc_bn=True if args.\n fc_bn == 'yes' else False, fc_nl=args.fc_nl)\n", (11122, 11377), False, 'from lib.vae_models import AutoEncoder\n'), ((11856, 11887), 'torch.optim.SGD', 'optim.SGD', (['generator.optim_list'], {}), '(generator.optim_list)\n', (11865, 11887), False, 'from torch import optim\n'), ((4461, 4484), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4482, 4484), False, 'import datetime\n'), ((6310, 6335), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6333, 6335), False, 'import torch\n')]
|
import torch
import torch.nn.utils.prune as prune
import numpy as np
from copy import deepcopy
import sys
sys.path.append('../src')
from train import train
from evaluate import test
class PruneModel:
def __init__(self, network, batch_size, train_loader, val_loader, test_loader, optimizer, epochs, scheduler, device, pruning_rounds=1):
"""
Class for pruning a model.
Args:
network (nn.Module): the network/model to be pruned
pruning_rounds (int): the number of rounds in iterative pruning (1 if One Shot pruning)
"""
self.network = network
self.original_network = deepcopy(network)
print('ORIGINAL NETWORK:')
for name, param in self.original_network.named_parameters():
print(name, param)
break
self.batch_size = batch_size
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.optimizer = optimizer
self.epochs = epochs
self.scheduler = scheduler
self.device = device
self.pruning_rounds = pruning_rounds
self.layers = []
self.masks = {}
self.p = 0.002 # p as in the paper
self.pruning_rate = self.p ** (1/self.pruning_rounds)
self.percent_remaining_weights_list = []
# "Connections to outputs are pruned at half the rate of the rest of the network"
# todo: not sure if this is done the same as the paper
# should it be self.pruning_rate_output_layer = self.pruning_rate*2.0?
self.p_output_layer = self.p*2.0
self.pruning_rate_output_layer = self.p_output_layer ** (1/self.pruning_rounds)
print(self.pruning_rate_output_layer)
self.percent_remaining_weights_output_layer_list = []
# predetermine % of weights at each pruning iteration
for i in range(self.pruning_rounds+1):
self.percent_remaining_weights_list.append(self.pruning_rate ** i)
self.percent_remaining_weights_output_layer_list.append(self.pruning_rate_output_layer ** i)
# print('\nRemaining weights: {}'.format(self.percent_remaining_weights_list))
# print('Remaining weights output layer: {}'.format(self.percent_remaining_weights_output_layer_list))
def prune(self):
"""
Prune a network for pruning_rounds # of iterations. This function is the main driver of pruning, calling other
functions such as _compute_masks, _apply_masks, and _retrain.
"""
for pruning_iteration in range(self.pruning_rounds):
print('-'*30)
print('Pruning iteration:', pruning_iteration)
print('-' * 30)
print()
# print('Percent Remain:', self.percent_remaining_weights_list[pruning_iteration])
# print('Percent Remain:', self.percent_remaining_weights_output_layer_list[pruning_iteration])
# compute masks
self.masks = self._compute_masks()
# reinit
self.network = self._reinitialize(random=False)
# apply the masks
self._apply_masks()
# verifying correct amount of parameters were pruned and correct amount is remaining
self._test_pruning(pruning_iteration)
# retrain after prune
self._retrain()
def _compute_masks(self):
"""
Computes masks on self.network for a given iteration
Returns:
masks (Dict[str, torch.Tensor]: the masks for each layer
a tensor of 0s and 1s having the same dimension as the parameter
"""
masks = {}
for idx, (name, param) in enumerate(self.network.named_parameters()):
# todo: check linear, conv, etc. (isinstance())
# todo: random sparse networks, so prune randomly, not based on magnitude
if 'weight' in name:
# get unpruned weights (nonzero)
unpruned_weights = param[param != 0]
# not output layer
if idx < len(list(self.network.named_parameters())) - 1:
num_to_keep = int(self.pruning_rate * len(unpruned_weights))
# output layer
else:
num_to_keep = int(self.pruning_rate_output_layer * len(unpruned_weights))
# find largest magnitude weights
topk = torch.topk(torch.abs(param).view(-1), k=num_to_keep, largest=True)
# create mask, keep largest magnitude weights by setting them to 1
# remove smallest magnitude weights by setting them to 0
mask = torch.zeros_like(param)
mask.view(-1)[topk.indices] = 1
masks[name] = mask
return masks
def _reinitialize(self, random=False):
"""
Reinitialize the parameters. If random=True, reinitialize the parameters randomly
Else: reinitialize parameters to original parameters (theta_0 in the paper)
"""
if random:
# create another instance of the neural network model class (randomly reinit)
network_class = self.network.__class__
new_random_network = network_class().to(self.device)
return new_random_network
else:
# reinit to original weights
return deepcopy(self.original_network)
def _apply_masks(self):
"""
Applies masks to self.network parameters.
e.g. if this is a parameter [.1, -.2, .3, -.15, .05] and its mask is [0, 1, 0, 1, 1],
the result is [0, -.2, 0, -.15, 0.5]
"""
for name, param in self.network.named_parameters():
if name in self.masks.keys():
param.requires_grad_(requires_grad=False)
param.mul_(self.masks[name])
param.requires_grad_(requires_grad=True)
# print(name)
# print(param)
# print(self.masks[name])
# print()
def _test_pruning(self, pruning_iteration):
"""
Verify correct amount of weights have been pruned
"""
for idx, (name, param) in enumerate(self.network.named_parameters()):
if name in self.masks.keys():
# not output layer
if idx < len(list(self.network.named_parameters())) - 1:
theoretical_unpruned = int(
(self.pruning_rate ** (pruning_iteration + 1) * len(param.view(-1)))
)
# output layer
else:
theoretical_unpruned = int(
(self.pruning_rate_output_layer ** (pruning_iteration + 1) * len(param.view(-1)))
)
actual_unpruned_param = len(param[param != 0])
actual_nonzero_mask = torch.sum(self.masks[name])
# all these should tell us how many weights/params still remain at a given pruning iteration
diff = (theoretical_unpruned - actual_unpruned_param)
assert (abs(diff) < 3)
diff2 = (actual_unpruned_param - actual_nonzero_mask)
assert (abs(diff2) < 3)
def _retrain(self):
"""
Retrains the network after pruning and weight reinitialization.
"""
# run the training loop
for epoch in range(1, self.epochs + 1):
stop, stopping_iteration = train(
self.network, self.device, self.train_loader, self.val_loader, self.test_loader, self.optimizer, epoch
)
self.scheduler.step()
# test after each epoch
test(self.network, self.device, self.test_loader)
if stop:
print('Stopped at overall iteration {}\n'.format(
stopping_iteration + ((len(self.train_loader.dataset) / self.batch_size) * (epoch - 1))))
break
# if save_model:
# torch.save(model.state_dict(), model.__class__.__name__ + '_' + dataset + ".pt")
|
[
"evaluate.test"
] |
[((107, 132), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (122, 132), False, 'import sys\n'), ((644, 661), 'copy.deepcopy', 'deepcopy', (['network'], {}), '(network)\n', (652, 661), False, 'from copy import deepcopy\n'), ((5405, 5436), 'copy.deepcopy', 'deepcopy', (['self.original_network'], {}), '(self.original_network)\n', (5413, 5436), False, 'from copy import deepcopy\n'), ((7528, 7642), 'train.train', 'train', (['self.network', 'self.device', 'self.train_loader', 'self.val_loader', 'self.test_loader', 'self.optimizer', 'epoch'], {}), '(self.network, self.device, self.train_loader, self.val_loader, self.\n test_loader, self.optimizer, epoch)\n', (7533, 7642), False, 'from train import train\n'), ((7752, 7801), 'evaluate.test', 'test', (['self.network', 'self.device', 'self.test_loader'], {}), '(self.network, self.device, self.test_loader)\n', (7756, 7801), False, 'from evaluate import test\n'), ((4692, 4715), 'torch.zeros_like', 'torch.zeros_like', (['param'], {}), '(param)\n', (4708, 4715), False, 'import torch\n'), ((6931, 6958), 'torch.sum', 'torch.sum', (['self.masks[name]'], {}), '(self.masks[name])\n', (6940, 6958), False, 'import torch\n'), ((4456, 4472), 'torch.abs', 'torch.abs', (['param'], {}), '(param)\n', (4465, 4472), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.