code
stringlengths 42
43.2k
| apis
list | extract_api
stringlengths 115
61.9k
|
---|---|---|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from network import NN
from evaluate import accuracy
def read_data(fpath):
iris = pd.read_csv(fpath)
iris.loc[iris['species'] == 'virginica', 'species'] = 0
iris.loc[iris['species'] == 'versicolor', 'species'] = 1
iris.loc[iris['species'] == 'setosa', 'species'] = 2
iris = iris[iris['species'] != 2]
return iris[['petal_length', 'petal_width']].values, iris[['species']].values.astype('uint8')
def plot_data(X, y):
plt.scatter(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral)
plt.title("IRIS DATA | Blue - Versicolor, Red - Virginica ")
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.show()
def train_test_split(X, y, ratio=0.8):
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
train_len = int(X.shape[0] * ratio)
return X[indices[:train_len]], y[indices[:train_len]], X[indices[train_len:]], y[indices[train_len:]]
if __name__ == '__main__':
X, y = read_data('iris.csv')
# comment the following line if you don't need the plot anymore
plot_data(X, y)
X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7)
nn = NN(len(X[0]), 5, 1)
output = nn.feedforward(X_train)
print(output)
print(f'w1 before backward propagation: \n{nn.w1} \nw2 before backward propagation:\n{nn.w2}')
nn.backward(X_train, y_train, output)
print(f'w1 after backward propagation: \n{nn.w1} \nw2 after backward propagation:\n{nn.w2}')
nn.train(X_train, y_train)
print("Accuracy:")
print(accuracy(nn, X_test, y_test))
|
[
"evaluate.accuracy"
] |
[((159, 177), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {}), '(fpath)\n', (170, 177), True, 'import pandas as pd\n'), ((519, 587), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y[:, 0]', 's': '(40)', 'cmap': 'plt.cm.Spectral'}), '(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral)\n', (530, 587), True, 'import matplotlib.pyplot as plt\n'), ((592, 652), 'matplotlib.pyplot.title', 'plt.title', (['"""IRIS DATA | Blue - Versicolor, Red - Virginica """'], {}), "('IRIS DATA | Blue - Versicolor, Red - Virginica ')\n", (601, 652), True, 'import matplotlib.pyplot as plt\n'), ((657, 683), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal Length"""'], {}), "('Petal Length')\n", (667, 683), True, 'import matplotlib.pyplot as plt\n'), ((688, 713), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal Width"""'], {}), "('Petal Width')\n", (698, 713), True, 'import matplotlib.pyplot as plt\n'), ((718, 728), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (726, 728), True, 'import matplotlib.pyplot as plt\n'), ((784, 805), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (793, 805), True, 'import numpy as np\n'), ((810, 836), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (827, 836), True, 'import numpy as np\n'), ((1586, 1614), 'evaluate.accuracy', 'accuracy', (['nn', 'X_test', 'y_test'], {}), '(nn, X_test, y_test)\n', (1594, 1614), False, 'from evaluate import accuracy\n')]
|
import datetime
import os
import copy
import json
import numpy as np
from pytz import timezone
from gamified_squad import GamifiedSquad
from agent import CustomAgent
import generic
import evaluate
SAVE_CHECKPOINT = 100000
def train():
time_1 = datetime.datetime.now()
config = generic.load_config()
env = GamifiedSquad(config)
env.split_reset("train")
agent = CustomAgent(config, env.has_token_set)
if config["general"]["visdom"]:
# visdom
import visdom
viz = visdom.Visdom()
plt_win = None
eval_plt_win = None
plt_q_value_win = None
plt_steps_win = None
eval_plt_steps_win = None
viz_avg_ig_acc, viz_avg_qa_acc = [], []
viz_avg_ig_q_value = []
viz_eval_ig_acc, viz_eval_qa_acc, viz_eval_steps = [], [], []
viz_avg_steps = []
step_in_total = 0
batch_no = 0
episode_no = 0
running_avg_qa_acc = generic.HistoryScoreCache(capacity=50)
running_avg_ig_acc = generic.HistoryScoreCache(capacity=50)
running_avg_qa_loss = generic.HistoryScoreCache(capacity=50)
running_avg_ig_loss = generic.HistoryScoreCache(capacity=50)
running_avg_ig_q_value = generic.HistoryScoreCache(capacity=50)
running_avg_steps = generic.HistoryScoreCache(capacity=50)
output_dir = "."
data_dir = "."
json_file_name = agent.experiment_tag.replace(" ", "_")
best_qa_acc_so_far = 0.0
prev_performance = 0.0
i_am_patient = 0
# load model from checkpoint
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
print("checkpoint already exist.")
exit(0)
if os.path.exists(data_dir + "/" + agent.load_graph_generation_model_from_tag + ".pt"):
agent.load_pretrained_graph_generation_model(data_dir + "/" + agent.load_graph_generation_model_from_tag + ".pt")
if agent.load_pretrained:
if os.path.exists(data_dir + "/" + agent.load_from_tag + ".pt"):
agent.load_pretrained_model(data_dir + "/" + agent.load_from_tag + ".pt") # load partial graph
agent.update_target_net()
while(True):
if episode_no > agent.max_episode:
break
np.random.seed(episode_no)
env.seed(episode_no)
obs, infos = env.reset()
batch_size = len(obs)
report = agent.report_frequency > 0 and (episode_no % agent.report_frequency <= max(episode_no - batch_size, 0) % agent.report_frequency)
__save__ = episode_no % SAVE_CHECKPOINT <= max(episode_no - batch_size, 0) % SAVE_CHECKPOINT
if report:
print("====================================================================================", episode_no)
print("-- Q: %s" % (agent.bert_tokenizer.decode(infos[0]["q"]).encode('utf-8')))
print("-- A: %s" % (infos[0]["a_string"][0].encode('utf-8')))
agent.train()
agent.init(obs, infos)
quest_list = agent.get_game_quest_info(infos)
agent.kg.push_batch_question(quest_list, [item["q_srl"] for item in infos])
previous_dynamics = None
previous_belief = None
input_quest, input_quest_mask, quest_id_list = agent.get_agent_inputs(quest_list)
tmp_replay_buffer = []
print_cmds = []
prev_commands = ["restart" for _ in range(batch_size)]
belief_buffer = []
act_randomly = False if agent.noisy_net else episode_no < agent.learn_start_from_this_episode
for _ in range(agent.max_nb_steps_per_episode):
# generate commands
if agent.noisy_net:
agent.reset_noise() # Draw a new set of noisy weights
commands, replay_info, current_dynamics, current_belief = agent.act(obs, infos, input_quest, input_quest_mask, quest_id_list, prev_commands, previous_dynamics, previous_belief, random=act_randomly)
tmp_replay_buffer.append(replay_info)
obs, infos = env.step(commands)
prev_commands = commands
previous_dynamics = current_dynamics
previous_belief = current_belief
belief_buffer.append(current_belief)
if agent.noisy_net and step_in_total % agent.update_per_k_game_steps == 0:
agent.reset_noise() # Draw a new set of noisy weights
if episode_no >= agent.learn_start_from_this_episode and step_in_total % agent.update_per_k_game_steps == 0:
interaction_loss, interaction_q_value = agent.update_interaction()
if interaction_loss is not None:
running_avg_ig_loss.push(interaction_loss)
running_avg_ig_q_value.push(interaction_q_value)
qa_loss = agent.update_qa()
if qa_loss is not None:
running_avg_qa_loss.push(qa_loss)
step_in_total += 1
still_running = generic.to_np(replay_info[-1])
print_cmds.append(commands[0] if still_running[0] else "--")
if np.sum(still_running) == 0:
break
if report:
print(" / ".join(print_cmds).encode('utf-8'))
# The agent has exhausted all steps, now answer question.
chosen_head_tails = agent.answer_question_act(agent.naozi.get(), quest_list, current_belief) # batch
chosen_head_tails_np = generic.to_np(chosen_head_tails)
chosen_answer_strings = generic.get_answer_strings(agent.naozi.get(), chosen_head_tails_np, agent.bert_tokenizer, agent.special_token_ids)
answer_strings = [item["a_string"] for item in infos]
answer_token_ids = [item["a"] for item in infos]
qa_reward_np = generic.get_qa_reward(chosen_answer_strings, answer_strings)
obs_strings = [agent.bert_tokenizer.decode(agent.naozi.get(i)) for i in range(batch_size)]
ig_reward_np = generic.get_sufficient_info_reward(agent.naozi.get(), answer_token_ids)
ig_reward = generic.to_pt(ig_reward_np, enable_cuda=False, type='float') # batch
# push qa experience into qa replay buffer
replay_node_vocab = agent.kg.get_node_vocabulary()
replay_relation_vocab = agent.kg.get_relation_vocabulary()
replay_triplets = agent.kg.get_triplets()
for b in range(batch_size): # data points in batch
is_prior = qa_reward_np[b] > agent.qa_reward_prior_threshold * agent.qa_replay_memory.avg_rewards()
# if the agent is not in the correct state, do not push it into replay buffer
if np.mean(ig_reward_np[b]) == 0.0:
continue
agent.qa_replay_memory.push(is_prior, qa_reward_np[b], agent.naozi.get_sentence_lists(b), quest_list[b], replay_node_vocab[b], replay_relation_vocab[b], replay_triplets[b], answer_token_ids[b], belief_buffer[-1][b].cpu() if belief_buffer[-1][b] is not None else None)
# small positive reward whenever it answers question correctly
masks_np = [generic.to_np(item[-1]) for item in tmp_replay_buffer]
command_rewards_np = []
for i in range(len(tmp_replay_buffer)):
if i == len(tmp_replay_buffer) - 1:
r = ig_reward * tmp_replay_buffer[i][-1]
r_np = ig_reward_np * masks_np[i]
else:
# give reward only at that one game step, not all
r = ig_reward * (tmp_replay_buffer[i][-1] - tmp_replay_buffer[i + 1][-1])
r_np = ig_reward_np * (masks_np[i] - masks_np[i + 1])
tmp_replay_buffer[i].append(r)
command_rewards_np.append(r_np)
command_rewards_np = np.array(command_rewards_np)
if report:
print(command_rewards_np[:, 0])
# push experience into replay buffer
for b in range(len(ig_reward_np)):
is_prior = np.sum(command_rewards_np, 0)[b] > 0.0
mem = []
for i in range(len(tmp_replay_buffer)):
batch_description_list, batch_chosen_indices, batch_chosen_ctrlf_indices, batch_graph_node_vocabulary, batch_graph_relation_vocabulary, batch_graph_triplets, _, batch_rewards = tmp_replay_buffer[i]
mem.append([copy.deepcopy(batch_description_list[b]),
copy.deepcopy(quest_list[b]),
batch_chosen_indices[b],
batch_chosen_ctrlf_indices[b],
copy.deepcopy(batch_graph_node_vocabulary[b]),
copy.deepcopy(batch_graph_relation_vocabulary[b]),
copy.deepcopy(batch_graph_triplets[b]),
copy.deepcopy(belief_buffer[i][b].cpu()) if belief_buffer[i][b] is not None else None,
batch_rewards[b]])
if masks_np[i][b] == 0.0:
break
agent.replay_memory.push(is_prior, mem)
qa_acc = np.mean(qa_reward_np)
ig_acc = np.mean(ig_reward_np)
step_masks_np = np.sum(np.array(masks_np), 0) # batch
for i in range(len(qa_reward_np)):
# if the answer is totally wrong, we assume it used all steps
if qa_reward_np[i] == 0.0:
step_masks_np[i] = agent.max_nb_steps_per_episode
used_steps = np.mean(step_masks_np)
running_avg_qa_acc.push(qa_acc)
running_avg_ig_acc.push(ig_acc)
running_avg_steps.push(used_steps)
print_rewards = np.sum(np.mean(command_rewards_np, -1))
if report:
print("-- OBS: %s" % (obs_strings[0].encode('utf-8')))
print("-- PRED: %s" % (chosen_answer_strings[0].encode('utf-8')))
# finish game
agent.finish_of_episode(episode_no, batch_no, batch_size)
time_2 = datetime.datetime.now()
eastern_time = datetime.datetime.now(timezone('US/Eastern')).strftime("%b %d %Y %H:%M:%S")
if report:
print("Episode: {:3d} | {:s} | time spent: {:s} | interaction loss: {:2.3f} | interaction qvalue: {:2.3f} | qa loss: {:2.3f} | rewards: {:2.3f} | qa acc: {:2.3f}/{:2.3f} | sufficient info: {:2.3f}/{:2.3f} | used steps: {:2.3f}".format(episode_no, eastern_time, str(time_2 - time_1).rsplit(".")[0], running_avg_ig_loss.get_avg(), running_avg_ig_q_value.get_avg(), running_avg_qa_loss.get_avg(), print_rewards, qa_acc, running_avg_qa_acc.get_avg(), ig_acc, running_avg_ig_acc.get_avg(), running_avg_steps.get_avg()))
if __save__:
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_ep" + str(episode_no) + "_model.pt")
if not report or episode_no < agent.learn_start_from_this_episode:
episode_no += batch_size
batch_no += 1
continue
eval_qa_acc, eval_ig_acc, eval_used_steps = 0.0, 0.0, 0.0
# evaluate
if agent.run_eval:
eval_qa_acc, eval_ig_acc, eval_used_steps = evaluate.evaluate(env, agent, "valid")
env.split_reset("train")
# if run eval, then save model by eval accucacy
if eval_qa_acc >= best_qa_acc_so_far:
best_qa_acc_so_far = eval_qa_acc
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
curr_performance = eval_qa_acc
else:
if running_avg_qa_acc.get_avg() >= best_qa_acc_so_far:
best_qa_acc_so_far = running_avg_qa_acc.get_avg()
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
curr_performance = running_avg_qa_acc.get_avg()
if prev_performance <= curr_performance:
i_am_patient = 0
else:
i_am_patient += 1
prev_performance = curr_performance
# if patient >= patience, resume from checkpoint
if agent.patience > 0 and i_am_patient >= agent.patience:
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
print('reload from a good checkpoint...')
agent.load_pretrained_model(output_dir + "/" + agent.experiment_tag + "_model.pt", load_partial_graph=False)
agent.update_target_net()
i_am_patient = 0
# plot using visdom
if config["general"]["visdom"] and not agent.debug_mode:
viz_avg_ig_acc.append(running_avg_ig_acc.get_avg())
viz_avg_qa_acc.append(running_avg_qa_acc.get_avg())
viz_avg_ig_q_value.append(running_avg_ig_q_value.get_avg())
viz_eval_ig_acc.append(eval_ig_acc)
viz_eval_qa_acc.append(eval_qa_acc)
viz_eval_steps.append(eval_used_steps)
viz_avg_steps.append(running_avg_steps.get_avg())
viz_x = np.arange(len(viz_avg_ig_acc)).tolist()
if plt_win is None:
plt_win = viz.line(X=viz_x, Y=viz_avg_ig_acc,
opts=dict(title=agent.experiment_tag + "_train"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_avg_qa_acc,
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_avg_ig_acc) - 1], Y=[viz_avg_ig_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_avg_qa_acc) - 1], Y=[viz_avg_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="qa")
if plt_q_value_win is None:
plt_q_value_win = viz.line(X=viz_x, Y=viz_avg_ig_q_value,
opts=dict(title=agent.experiment_tag + "_train_q_value"),
name="sufficient info")
else:
viz.line(X=[len(viz_avg_ig_q_value) - 1], Y=[viz_avg_ig_q_value[-1]],
opts=dict(title=agent.experiment_tag + "_train_q_value"),
win=plt_q_value_win,
update='append', name="sufficient info")
if plt_steps_win is None:
plt_steps_win = viz.line(X=viz_x, Y=viz_avg_steps,
opts=dict(title=agent.experiment_tag + "_train_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_avg_steps[-1]],
opts=dict(title=agent.experiment_tag + "_train_step"),
win=plt_steps_win,
update='append', name="used steps")
if agent.run_eval:
if eval_plt_win is None:
eval_plt_win = viz.line(X=viz_x, Y=viz_eval_ig_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_eval_qa_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_eval_ig_acc) - 1], Y=[viz_eval_ig_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_eval_qa_acc) - 1], Y=[viz_eval_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="qa")
if eval_plt_steps_win is None:
eval_plt_steps_win = viz.line(X=viz_x, Y=viz_eval_steps,
opts=dict(title=agent.experiment_tag + "_eval_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_eval_steps[-1]],
opts=dict(title=agent.experiment_tag + "_eval_step"),
win=eval_plt_steps_win,
update='append', name="used steps")
# write accucacies down into file
_s = json.dumps({"time spent": str(time_2 - time_1).rsplit(".")[0],
"sufficient info": str(running_avg_ig_acc.get_avg()),
"qa": str(running_avg_qa_acc.get_avg()),
"sufficient qvalue": str(running_avg_ig_q_value.get_avg()),
"eval sufficient info": str(eval_ig_acc),
"eval qa": str(eval_qa_acc),
"eval steps": str(eval_used_steps),
"used steps": str(running_avg_steps.get_avg())})
with open(output_dir + "/" + json_file_name + '.json', 'a+') as outfile:
outfile.write(_s + '\n')
outfile.flush()
episode_no += batch_size
batch_no += 1
if __name__ == '__main__':
train()
|
[
"evaluate.evaluate"
] |
[((252, 275), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (273, 275), False, 'import datetime\n'), ((289, 310), 'generic.load_config', 'generic.load_config', ([], {}), '()\n', (308, 310), False, 'import generic\n'), ((321, 342), 'gamified_squad.GamifiedSquad', 'GamifiedSquad', (['config'], {}), '(config)\n', (334, 342), False, 'from gamified_squad import GamifiedSquad\n'), ((384, 422), 'agent.CustomAgent', 'CustomAgent', (['config', 'env.has_token_set'], {}), '(config, env.has_token_set)\n', (395, 422), False, 'from agent import CustomAgent\n'), ((936, 974), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (961, 974), False, 'import generic\n'), ((1000, 1038), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1025, 1038), False, 'import generic\n'), ((1065, 1103), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1090, 1103), False, 'import generic\n'), ((1130, 1168), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1155, 1168), False, 'import generic\n'), ((1198, 1236), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1223, 1236), False, 'import generic\n'), ((1261, 1299), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1286, 1299), False, 'import generic\n'), ((1523, 1592), 'os.path.exists', 'os.path.exists', (["(output_dir + '/' + agent.experiment_tag + '_model.pt')"], {}), "(output_dir + '/' + agent.experiment_tag + '_model.pt')\n", (1537, 1592), False, 'import os\n'), ((1661, 1748), 'os.path.exists', 'os.path.exists', (["(data_dir + '/' + agent.load_graph_generation_model_from_tag + '.pt')"], {}), "(data_dir + '/' + agent.load_graph_generation_model_from_tag +\n '.pt')\n", (1675, 1748), False, 'import os\n'), ((514, 529), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (527, 529), False, 'import visdom\n'), ((1909, 1969), 'os.path.exists', 'os.path.exists', (["(data_dir + '/' + agent.load_from_tag + '.pt')"], {}), "(data_dir + '/' + agent.load_from_tag + '.pt')\n", (1923, 1969), False, 'import os\n'), ((2204, 2230), 'numpy.random.seed', 'np.random.seed', (['episode_no'], {}), '(episode_no)\n', (2218, 2230), True, 'import numpy as np\n'), ((5347, 5379), 'generic.to_np', 'generic.to_np', (['chosen_head_tails'], {}), '(chosen_head_tails)\n', (5360, 5379), False, 'import generic\n'), ((5670, 5730), 'generic.get_qa_reward', 'generic.get_qa_reward', (['chosen_answer_strings', 'answer_strings'], {}), '(chosen_answer_strings, answer_strings)\n', (5691, 5730), False, 'import generic\n'), ((5945, 6005), 'generic.to_pt', 'generic.to_pt', (['ig_reward_np'], {'enable_cuda': '(False)', 'type': '"""float"""'}), "(ig_reward_np, enable_cuda=False, type='float')\n", (5958, 6005), False, 'import generic\n'), ((7600, 7628), 'numpy.array', 'np.array', (['command_rewards_np'], {}), '(command_rewards_np)\n', (7608, 7628), True, 'import numpy as np\n'), ((8900, 8921), 'numpy.mean', 'np.mean', (['qa_reward_np'], {}), '(qa_reward_np)\n', (8907, 8921), True, 'import numpy as np\n'), ((8939, 8960), 'numpy.mean', 'np.mean', (['ig_reward_np'], {}), '(ig_reward_np)\n', (8946, 8960), True, 'import numpy as np\n'), ((9267, 9289), 'numpy.mean', 'np.mean', (['step_masks_np'], {}), '(step_masks_np)\n', (9274, 9289), True, 'import numpy as np\n'), ((9750, 9773), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9771, 9773), False, 'import datetime\n'), ((4892, 4922), 'generic.to_np', 'generic.to_np', (['replay_info[-1]'], {}), '(replay_info[-1])\n', (4905, 4922), False, 'import generic\n'), ((6950, 6973), 'generic.to_np', 'generic.to_np', (['item[-1]'], {}), '(item[-1])\n', (6963, 6973), False, 'import generic\n'), ((8992, 9010), 'numpy.array', 'np.array', (['masks_np'], {}), '(masks_np)\n', (9000, 9010), True, 'import numpy as np\n'), ((9445, 9476), 'numpy.mean', 'np.mean', (['command_rewards_np', '(-1)'], {}), '(command_rewards_np, -1)\n', (9452, 9476), True, 'import numpy as np\n'), ((10888, 10926), 'evaluate.evaluate', 'evaluate.evaluate', (['env', 'agent', '"""valid"""'], {}), "(env, agent, 'valid')\n", (10905, 10926), False, 'import evaluate\n'), ((11871, 11940), 'os.path.exists', 'os.path.exists', (["(output_dir + '/' + agent.experiment_tag + '_model.pt')"], {}), "(output_dir + '/' + agent.experiment_tag + '_model.pt')\n", (11885, 11940), False, 'import os\n'), ((5011, 5032), 'numpy.sum', 'np.sum', (['still_running'], {}), '(still_running)\n', (5017, 5032), True, 'import numpy as np\n'), ((6520, 6544), 'numpy.mean', 'np.mean', (['ig_reward_np[b]'], {}), '(ig_reward_np[b])\n', (6527, 6544), True, 'import numpy as np\n'), ((7805, 7834), 'numpy.sum', 'np.sum', (['command_rewards_np', '(0)'], {}), '(command_rewards_np, 0)\n', (7811, 7834), True, 'import numpy as np\n'), ((9819, 9841), 'pytz.timezone', 'timezone', (['"""US/Eastern"""'], {}), "('US/Eastern')\n", (9827, 9841), False, 'from pytz import timezone\n'), ((8159, 8199), 'copy.deepcopy', 'copy.deepcopy', (['batch_description_list[b]'], {}), '(batch_description_list[b])\n', (8172, 8199), False, 'import copy\n'), ((8230, 8258), 'copy.deepcopy', 'copy.deepcopy', (['quest_list[b]'], {}), '(quest_list[b])\n', (8243, 8258), False, 'import copy\n'), ((8403, 8448), 'copy.deepcopy', 'copy.deepcopy', (['batch_graph_node_vocabulary[b]'], {}), '(batch_graph_node_vocabulary[b])\n', (8416, 8448), False, 'import copy\n'), ((8479, 8528), 'copy.deepcopy', 'copy.deepcopy', (['batch_graph_relation_vocabulary[b]'], {}), '(batch_graph_relation_vocabulary[b])\n', (8492, 8528), False, 'import copy\n'), ((8559, 8597), 'copy.deepcopy', 'copy.deepcopy', (['batch_graph_triplets[b]'], {}), '(batch_graph_triplets[b])\n', (8572, 8597), False, 'import copy\n')]
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import division, print_function, unicode_literals
import argparse
import json
import os
import shutil
import time
import torch
from utils import util
from evaluate import MultiWozEvaluator
from model.model import Model
parser = argparse.ArgumentParser(description='S2S')
parser.add_argument('--no_cuda', type=util.str2bool, nargs='?', const=True, default=True, help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--no_models', type=int, default=20, help='how many models to evaluate')
parser.add_argument('--original', type=str, default='model/model/', help='Original path.')
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--use_emb', type=str, default='False')
parser.add_argument('--beam_width', type=int, default=10, help='Beam width used in beamsearch')
parser.add_argument('--write_n_best', type=util.str2bool, nargs='?', const=True, default=False, help='Write n-best list (n=beam_width)')
parser.add_argument('--model_path', type=str, default='model/model/translate.ckpt', help='Path to a specific model checkpoint.')
parser.add_argument('--model_dir', type=str, default='model/')
parser.add_argument('--model_name', type=str, default='translate.ckpt')
parser.add_argument('--valid_output', type=str, default='model/data/val_dials/', help='Validation Decoding output dir path')
parser.add_argument('--decode_output', type=str, default='model/data/test_dials/', help='Decoding output dir path')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
def load_config(args):
config = util.unicode_to_utf8(
json.load(open('%s.json' % args.model_path, 'rb')))
for key, value in args.__args.items():
try:
config[key] = value.value
except:
config[key] = value
return config
def loadModelAndData(num):
# Load dictionaries
with open('data/input_lang.index2word.json') as f:
input_lang_index2word = json.load(f)
with open('data/input_lang.word2index.json') as f:
input_lang_word2index = json.load(f)
with open('data/output_lang.index2word.json') as f:
output_lang_index2word = json.load(f)
with open('data/output_lang.word2index.json') as f:
output_lang_word2index = json.load(f)
# Reload existing checkpoint
model = Model(args, input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index)
if args.load_param:
model.loadModel(iter=num)
# Load data
if os.path.exists(args.decode_output):
shutil.rmtree(args.decode_output)
os.makedirs(args.decode_output)
else:
os.makedirs(args.decode_output)
if os.path.exists(args.valid_output):
shutil.rmtree(args.valid_output)
os.makedirs(args.valid_output)
else:
os.makedirs(args.valid_output)
# Load validation file list:
with open('data/val_dials.json') as outfile:
val_dials = json.load(outfile)
# Load test file list:
with open('data/test_dials.json') as outfile:
test_dials = json.load(outfile)
return model, val_dials, test_dials
def decode(num=1):
model, val_dials, test_dials = loadModelAndData(num)
evaluator_valid = MultiWozEvaluator("valid")
evaluator_test = MultiWozEvaluator("test")
start_time = time.time()
for ii in range(2):
if ii == 0:
print(50 * '-' + 'GREEDY')
model.beam_search = False
else:
print(50 * '-' + 'BEAM')
model.beam_search = True
# VALIDATION
val_dials_gen = {}
valid_loss = 0
for name, val_file in val_dials.items():
input_tensor = []; target_tensor = [];bs_tensor = [];db_tensor = []
input_tensor, target_tensor, bs_tensor, db_tensor = util.loadDialogue(model, val_file, input_tensor, target_tensor, bs_tensor, db_tensor)
# create an empty matrix with padding tokens
input_tensor, input_lengths = util.padSequence(input_tensor)
target_tensor, target_lengths = util.padSequence(target_tensor)
bs_tensor = torch.tensor(bs_tensor, dtype=torch.float, device=device)
db_tensor = torch.tensor(db_tensor, dtype=torch.float, device=device)
output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,
db_tensor, bs_tensor)
valid_loss += 0
val_dials_gen[name] = output_words
print('Current VALID LOSS:', valid_loss)
with open(args.valid_output + 'val_dials_gen.json', 'w') as outfile:
json.dump(val_dials_gen, outfile)
evaluator_valid.evaluateModel(val_dials_gen, val_dials, mode='valid')
# TESTING
test_dials_gen = {}
test_loss = 0
for name, test_file in test_dials.items():
input_tensor = []; target_tensor = [];bs_tensor = [];db_tensor = []
input_tensor, target_tensor, bs_tensor, db_tensor = util.loadDialogue(model, test_file, input_tensor, target_tensor, bs_tensor, db_tensor)
# create an empty matrix with padding tokens
input_tensor, input_lengths = util.padSequence(input_tensor)
target_tensor, target_lengths = util.padSequence(target_tensor)
bs_tensor = torch.tensor(bs_tensor, dtype=torch.float, device=device)
db_tensor = torch.tensor(db_tensor, dtype=torch.float, device=device)
output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,
db_tensor, bs_tensor)
test_loss += 0
test_dials_gen[name] = output_words
test_loss /= len(test_dials)
print('Current TEST LOSS:', test_loss)
with open(args.decode_output + 'test_dials_gen.json', 'w') as outfile:
json.dump(test_dials_gen, outfile)
evaluator_test.evaluateModel(test_dials_gen, test_dials, mode='test')
print('TIME:', time.time() - start_time)
def decodeWrapper():
# Load config file
with open(args.model_path + '.config') as f:
add_args = json.load(f)
for k, v in add_args.items():
setattr(args, k, v)
args.mode = 'test'
args.load_param = True
args.dropout = 0.0
assert args.dropout == 0.0
# Start going through models
args.original = args.model_path
for ii in range(1, args.no_models + 1):
print(70 * '-' + 'EVALUATING EPOCH %s' % ii)
args.model_path = args.model_path + '-' + str(ii)
try:
decode(ii)
except:
print('cannot decode')
args.model_path = args.original
if __name__ == '__main__':
decodeWrapper()
|
[
"evaluate.MultiWozEvaluator"
] |
[((286, 328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""S2S"""'}), "(description='S2S')\n", (309, 328), False, 'import argparse\n'), ((1679, 1707), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1696, 1707), False, 'import torch\n'), ((1718, 1762), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (1730, 1762), False, 'import torch\n'), ((1652, 1677), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1675, 1677), False, 'import torch\n'), ((2547, 2656), 'model.model.Model', 'Model', (['args', 'input_lang_index2word', 'output_lang_index2word', 'input_lang_word2index', 'output_lang_word2index'], {}), '(args, input_lang_index2word, output_lang_index2word,\n input_lang_word2index, output_lang_word2index)\n', (2552, 2656), False, 'from model.model import Model\n'), ((2735, 2769), 'os.path.exists', 'os.path.exists', (['args.decode_output'], {}), '(args.decode_output)\n', (2749, 2769), False, 'import os\n'), ((2911, 2944), 'os.path.exists', 'os.path.exists', (['args.valid_output'], {}), '(args.valid_output)\n', (2925, 2944), False, 'import os\n'), ((3455, 3481), 'evaluate.MultiWozEvaluator', 'MultiWozEvaluator', (['"""valid"""'], {}), "('valid')\n", (3472, 3481), False, 'from evaluate import MultiWozEvaluator\n'), ((3503, 3528), 'evaluate.MultiWozEvaluator', 'MultiWozEvaluator', (['"""test"""'], {}), "('test')\n", (3520, 3528), False, 'from evaluate import MultiWozEvaluator\n'), ((3547, 3558), 'time.time', 'time.time', ([], {}), '()\n', (3556, 3558), False, 'import time\n'), ((2184, 2196), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2193, 2196), False, 'import json\n'), ((2284, 2296), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2293, 2296), False, 'import json\n'), ((2386, 2398), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2395, 2398), False, 'import json\n'), ((2488, 2500), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2497, 2500), False, 'import json\n'), ((2779, 2812), 'shutil.rmtree', 'shutil.rmtree', (['args.decode_output'], {}), '(args.decode_output)\n', (2792, 2812), False, 'import shutil\n'), ((2821, 2852), 'os.makedirs', 'os.makedirs', (['args.decode_output'], {}), '(args.decode_output)\n', (2832, 2852), False, 'import os\n'), ((2871, 2902), 'os.makedirs', 'os.makedirs', (['args.decode_output'], {}), '(args.decode_output)\n', (2882, 2902), False, 'import os\n'), ((2954, 2986), 'shutil.rmtree', 'shutil.rmtree', (['args.valid_output'], {}), '(args.valid_output)\n', (2967, 2986), False, 'import shutil\n'), ((2995, 3025), 'os.makedirs', 'os.makedirs', (['args.valid_output'], {}), '(args.valid_output)\n', (3006, 3025), False, 'import os\n'), ((3044, 3074), 'os.makedirs', 'os.makedirs', (['args.valid_output'], {}), '(args.valid_output)\n', (3055, 3074), False, 'import os\n'), ((3178, 3196), 'json.load', 'json.load', (['outfile'], {}), '(outfile)\n', (3187, 3196), False, 'import json\n'), ((3296, 3314), 'json.load', 'json.load', (['outfile'], {}), '(outfile)\n', (3305, 3314), False, 'import json\n'), ((6453, 6465), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6462, 6465), False, 'import json\n'), ((4034, 4123), 'utils.util.loadDialogue', 'util.loadDialogue', (['model', 'val_file', 'input_tensor', 'target_tensor', 'bs_tensor', 'db_tensor'], {}), '(model, val_file, input_tensor, target_tensor, bs_tensor,\n db_tensor)\n', (4051, 4123), False, 'from utils import util\n'), ((4219, 4249), 'utils.util.padSequence', 'util.padSequence', (['input_tensor'], {}), '(input_tensor)\n', (4235, 4249), False, 'from utils import util\n'), ((4294, 4325), 'utils.util.padSequence', 'util.padSequence', (['target_tensor'], {}), '(target_tensor)\n', (4310, 4325), False, 'from utils import util\n'), ((4350, 4407), 'torch.tensor', 'torch.tensor', (['bs_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(bs_tensor, dtype=torch.float, device=device)\n', (4362, 4407), False, 'import torch\n'), ((4432, 4489), 'torch.tensor', 'torch.tensor', (['db_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(db_tensor, dtype=torch.float, device=device)\n', (4444, 4489), False, 'import torch\n'), ((4900, 4933), 'json.dump', 'json.dump', (['val_dials_gen', 'outfile'], {}), '(val_dials_gen, outfile)\n', (4909, 4933), False, 'import json\n'), ((5277, 5367), 'utils.util.loadDialogue', 'util.loadDialogue', (['model', 'test_file', 'input_tensor', 'target_tensor', 'bs_tensor', 'db_tensor'], {}), '(model, test_file, input_tensor, target_tensor, bs_tensor,\n db_tensor)\n', (5294, 5367), False, 'from utils import util\n'), ((5463, 5493), 'utils.util.padSequence', 'util.padSequence', (['input_tensor'], {}), '(input_tensor)\n', (5479, 5493), False, 'from utils import util\n'), ((5538, 5569), 'utils.util.padSequence', 'util.padSequence', (['target_tensor'], {}), '(target_tensor)\n', (5554, 5569), False, 'from utils import util\n'), ((5594, 5651), 'torch.tensor', 'torch.tensor', (['bs_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(bs_tensor, dtype=torch.float, device=device)\n', (5606, 5651), False, 'import torch\n'), ((5676, 5733), 'torch.tensor', 'torch.tensor', (['db_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(db_tensor, dtype=torch.float, device=device)\n', (5688, 5733), False, 'import torch\n'), ((6180, 6214), 'json.dump', 'json.dump', (['test_dials_gen', 'outfile'], {}), '(test_dials_gen, outfile)\n', (6189, 6214), False, 'import json\n'), ((6313, 6324), 'time.time', 'time.time', ([], {}), '()\n', (6322, 6324), False, 'import time\n')]
|
import argparse
import os
import torch
import yaml
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils.model import get_vocoder, get_param_num
from utils.tools import to_device, log, synth_one_sample
from model import FastSpeech2Loss, FastSpeech2
from dataset import Dataset
from evaluate import evaluate
import wandb
import pandas as pd
from pytorch_lamb import Lamb, log_lamb_rs
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args, configs):
print("Prepare training ...")
preprocess_config, model_config, train_config = configs
# Get dataset
dataset = Dataset(
"train.txt", preprocess_config, train_config, sort=True, drop_last=True
)
batch_size = args.batch_size if args.batch_size else train_config["optimizer"]["batch_size"]
group_size = 1 # Set this larger than 1 to enable sorting in Dataset
print(f"Number of rows in training dataset: {len(dataset)}")
assert batch_size * group_size < len(dataset)
loader = DataLoader(
dataset,
batch_size=batch_size * group_size,
shuffle=True,
collate_fn=dataset.collate_fn,
)
# Prepare model
model = FastSpeech2(preprocess_config, model_config).to(device)
model.train() # sets the model into training mode
step_size = train_config["optimizer"]["step_size"]
weight_decay = args.wd if args.wd else train_config["optimizer"]["weight_decay"]
betas=train_config["optimizer"]["betas"]
eps=train_config["optimizer"]["eps"]
# reading the paper you'd think 'lr' (learning rate) is not needed for Lamb but basically lr param is an Adam learning rate
# which is then multiplied by Trust Ratio for that layer to get the actual learning rate used at each step/each layer
optimizer = Lamb(model.parameters(), lr=step_size, weight_decay=weight_decay, betas=betas, eps=eps)
model = nn.DataParallel(model)
num_param = get_param_num(model)
Loss = FastSpeech2Loss(preprocess_config, model_config).to(device)
print("Number of FastSpeech2 Parameters:", num_param)
if args.wandb:
from flatten_json import flatten
log_config = {}
for key, val in pd.json_normalize(model_config["transformer"]).iloc[0].items():
log_config[f"transformer.{key}"]=str(val)
log_config["multi_speaker"]=model_config["multi_speaker"]
log_config["vocoder"]=model_config["vocoder"]
log_config["sample_rate"] = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
log_config["train.batch_size"] = batch_size
log_config["train.optimizer"] = "lamb"
log_config["train.step_size"] = step_size
log_config["train.weight_decay"] = weight_decay
log_config["train.betas"] = str(betas)
log_config["train.eps"] = eps
log_config["num_params"] = num_param
log_config["len(dataset)"] = len(dataset)
print(log_config)
if args.wandb:
wandb.init(project="synthesis-fastspeech2", entity="papa-reo", config=log_config)
# Load vocoder
vocoder = get_vocoder(model_config, device)
# Init logger
for p in train_config["path"].values():
os.makedirs(p, exist_ok=True)
train_log_path = os.path.join(train_config["path"]["log_path"], "train")
val_log_path = os.path.join(train_config["path"]["log_path"], "val")
os.makedirs(train_log_path, exist_ok=True)
os.makedirs(val_log_path, exist_ok=True)
train_logger = SummaryWriter(train_log_path)
val_logger = SummaryWriter(val_log_path)
# Training
step = args.restore_step + 1
epoch = 1
grad_acc_step = train_config["optimizer"]["grad_acc_step"]
grad_clip_thresh = train_config["optimizer"]["grad_clip_thresh"]
total_step = train_config["step"]["total_step"]
log_step = train_config["step"]["log_step"]
save_step = train_config["step"]["save_step"]
synth_step = train_config["step"]["synth_step"]
val_step = train_config["step"]["val_step"]
outer_bar = tqdm(total=total_step, desc="Training", position=0)
outer_bar.n = args.restore_step
outer_bar.update()
while True:
inner_bar = tqdm(total=len(loader), desc="Epoch {}".format(epoch), position=1)
for batchs in loader:
for batch in batchs:
batch = to_device(batch, device)
# Forward
output = model(*(batch[2:]))
# Cal Loss
losses = Loss(batch, output)
total_loss = losses[0]
# Backward
total_loss = total_loss / grad_acc_step
total_loss.backward()
if step % grad_acc_step == 0:
# Clipping gradients to avoid gradient explosion
nn.utils.clip_grad_norm_(model.parameters(), grad_clip_thresh)
# Update weights
optimizer.step()
optimizer.zero_grad()
if step % log_step == 0:
losses = [l.item() for l in losses]
message1 = "Step {}/{}, ".format(step, total_step)
message2 = "Total Loss: {:.4f}, Mel Loss: {:.4f}, Mel PostNet Loss: {:.4f}, Pitch Loss: {:.4f}, Energy Loss: {:.4f}, Duration Loss: {:.4f}".format(
*losses
)
with open(os.path.join(train_log_path, "log.txt"), "a") as f:
f.write(message1 + message2 + "\n")
outer_bar.write(message1 + message2)
log(train_logger, step, losses=losses)
log_lamb_rs(optimizer, train_logger, step)
if args.wandb:
wandb.log({"train_loss": total_loss})
wandb.watch(model)
if step % synth_step == 0:
fig, wav_reconstruction, wav_prediction, tag = synth_one_sample(
batch,
output,
vocoder,
model_config,
preprocess_config,
)
log(
train_logger,
fig=fig,
tag="Training/step_{}_{}".format(step, tag),
)
sampling_rate = preprocess_config["preprocessing"]["audio"][
"sampling_rate"
]
log(
train_logger,
audio=wav_reconstruction,
sampling_rate=sampling_rate,
tag="Training/step_{}_{}_reconstructed".format(step, tag),
)
log(
train_logger,
audio=wav_prediction,
sampling_rate=sampling_rate,
tag="Training/step_{}_{}_synthesized".format(step, tag),
)
if step % val_step == 0:
model.eval()
message = evaluate(model, step, configs, val_logger, vocoder, log_to_wandb=args.wandb)
with open(os.path.join(val_log_path, "log.txt"), "a") as f:
f.write(message + "\n")
outer_bar.write(message)
model.train()
if step % save_step == 0:
torch.save(
{
"model": model.module.state_dict(),
"optimizer": optimizer.state_dict(),
},
os.path.join(
train_config["path"]["ckpt_path"],
"{}.pth.tar".format(step),
),
)
if step == total_step:
quit()
step += 1
outer_bar.update(1)
inner_bar.update(1)
epoch += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--restore_step", type=int, default=0)
parser.add_argument(
"-p",
"--preprocess_config",
type=str,
required=True,
help="path to preprocess.yaml",
)
parser.add_argument(
"-m", "--model_config", type=str, required=True, help="path to model.yaml"
)
parser.add_argument(
"-t", "--train_config", type=str, required=True, help="path to train.yaml"
)
parser.add_argument(
'--batch-size', type=int, default=None, metavar='N', help='input batch size for training (for default val see config/../train.yaml)'
)
parser.add_argument(
'--wd', type=float, default=None, metavar='WD', help='weight decay (for default val see config/../train.yaml)'
)
parser.add_argument(
'--wandb', type=bool, default=False, help='log to wandb'
)
args = parser.parse_args()
# Read Config
preprocess_config = yaml.load(
open(args.preprocess_config, "r"), Loader=yaml.FullLoader
)
model_config = yaml.load(open(args.model_config, "r"), Loader=yaml.FullLoader)
train_config = yaml.load(open(args.train_config, "r"), Loader=yaml.FullLoader)
configs = (preprocess_config, model_config, train_config)
main(args, configs)
|
[
"evaluate.evaluate"
] |
[((740, 825), 'dataset.Dataset', 'Dataset', (['"""train.txt"""', 'preprocess_config', 'train_config'], {'sort': '(True)', 'drop_last': '(True)'}), "('train.txt', preprocess_config, train_config, sort=True, drop_last=True\n )\n", (747, 825), False, 'from dataset import Dataset\n'), ((1141, 1245), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(batch_size * group_size)', 'shuffle': '(True)', 'collate_fn': 'dataset.collate_fn'}), '(dataset, batch_size=batch_size * group_size, shuffle=True,\n collate_fn=dataset.collate_fn)\n', (1151, 1245), False, 'from torch.utils.data import DataLoader\n'), ((2044, 2066), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (2059, 2066), True, 'import torch.nn as nn\n'), ((2084, 2104), 'utils.model.get_param_num', 'get_param_num', (['model'], {}), '(model)\n', (2097, 2104), False, 'from utils.model import get_vocoder, get_param_num\n'), ((3275, 3308), 'utils.model.get_vocoder', 'get_vocoder', (['model_config', 'device'], {}), '(model_config, device)\n', (3286, 3308), False, 'from utils.model import get_vocoder, get_param_num\n'), ((3436, 3491), 'os.path.join', 'os.path.join', (["train_config['path']['log_path']", '"""train"""'], {}), "(train_config['path']['log_path'], 'train')\n", (3448, 3491), False, 'import os\n'), ((3512, 3565), 'os.path.join', 'os.path.join', (["train_config['path']['log_path']", '"""val"""'], {}), "(train_config['path']['log_path'], 'val')\n", (3524, 3565), False, 'import os\n'), ((3571, 3613), 'os.makedirs', 'os.makedirs', (['train_log_path'], {'exist_ok': '(True)'}), '(train_log_path, exist_ok=True)\n', (3582, 3613), False, 'import os\n'), ((3619, 3659), 'os.makedirs', 'os.makedirs', (['val_log_path'], {'exist_ok': '(True)'}), '(val_log_path, exist_ok=True)\n', (3630, 3659), False, 'import os\n'), ((3680, 3709), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['train_log_path'], {}), '(train_log_path)\n', (3693, 3709), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3728, 3755), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['val_log_path'], {}), '(val_log_path)\n', (3741, 3755), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((4231, 4282), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_step', 'desc': '"""Training"""', 'position': '(0)'}), "(total=total_step, desc='Training', position=0)\n", (4235, 4282), False, 'from tqdm import tqdm\n'), ((8412, 8437), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8435, 8437), False, 'import argparse\n'), ((534, 559), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (557, 559), False, 'import torch\n'), ((3384, 3413), 'os.makedirs', 'os.makedirs', (['p'], {'exist_ok': '(True)'}), '(p, exist_ok=True)\n', (3395, 3413), False, 'import os\n'), ((1322, 1366), 'model.FastSpeech2', 'FastSpeech2', (['preprocess_config', 'model_config'], {}), '(preprocess_config, model_config)\n', (1333, 1366), False, 'from model import FastSpeech2Loss, FastSpeech2\n'), ((2117, 2165), 'model.FastSpeech2Loss', 'FastSpeech2Loss', (['preprocess_config', 'model_config'], {}), '(preprocess_config, model_config)\n', (2132, 2165), False, 'from model import FastSpeech2Loss, FastSpeech2\n'), ((3156, 3242), 'wandb.init', 'wandb.init', ([], {'project': '"""synthesis-fastspeech2"""', 'entity': '"""papa-reo"""', 'config': 'log_config'}), "(project='synthesis-fastspeech2', entity='papa-reo', config=\n log_config)\n", (3166, 3242), False, 'import wandb\n'), ((4541, 4565), 'utils.tools.to_device', 'to_device', (['batch', 'device'], {}), '(batch, device)\n', (4550, 4565), False, 'from utils.tools import to_device, log, synth_one_sample\n'), ((5832, 5870), 'utils.tools.log', 'log', (['train_logger', 'step'], {'losses': 'losses'}), '(train_logger, step, losses=losses)\n', (5835, 5870), False, 'from utils.tools import to_device, log, synth_one_sample\n'), ((5892, 5934), 'pytorch_lamb.log_lamb_rs', 'log_lamb_rs', (['optimizer', 'train_logger', 'step'], {}), '(optimizer, train_logger, step)\n', (5903, 5934), False, 'from pytorch_lamb import Lamb, log_lamb_rs\n'), ((6194, 6267), 'utils.tools.synth_one_sample', 'synth_one_sample', (['batch', 'output', 'vocoder', 'model_config', 'preprocess_config'], {}), '(batch, output, vocoder, model_config, preprocess_config)\n', (6210, 6267), False, 'from utils.tools import to_device, log, synth_one_sample\n'), ((7412, 7488), 'evaluate.evaluate', 'evaluate', (['model', 'step', 'configs', 'val_logger', 'vocoder'], {'log_to_wandb': 'args.wandb'}), '(model, step, configs, val_logger, vocoder, log_to_wandb=args.wandb)\n', (7420, 7488), False, 'from evaluate import evaluate\n'), ((2350, 2396), 'pandas.json_normalize', 'pd.json_normalize', (["model_config['transformer']"], {}), "(model_config['transformer'])\n", (2367, 2396), True, 'import pandas as pd\n'), ((5998, 6035), 'wandb.log', 'wandb.log', (["{'train_loss': total_loss}"], {}), "({'train_loss': total_loss})\n", (6007, 6035), False, 'import wandb\n'), ((6061, 6079), 'wandb.watch', 'wandb.watch', (['model'], {}), '(model)\n', (6072, 6079), False, 'import wandb\n'), ((5636, 5675), 'os.path.join', 'os.path.join', (['train_log_path', '"""log.txt"""'], {}), "(train_log_path, 'log.txt')\n", (5648, 5675), False, 'import os\n'), ((7520, 7557), 'os.path.join', 'os.path.join', (['val_log_path', '"""log.txt"""'], {}), "(val_log_path, 'log.txt')\n", (7532, 7557), False, 'import os\n')]
|
from i3Deep import utils
import os
from evaluate import evaluate
import numpy as np
from skimage.segmentation.random_walker_segmentation import random_walker
from tqdm import tqdm
import torchio
import torch
def compute_predictions(image_path, mask_path, gt_path, save_path, nr_modalities, class_labels, resize=True, beta=10):
image_filenames = utils.load_filenames(image_path)[::nr_modalities]
mask_filenames = utils.load_filenames(mask_path)
target_shape = (256, 256, 200) # (256, 256, 100)
is_resized = False
for i in tqdm(range(len(image_filenames))):
image, affine, spacing, header = utils.load_nifty(image_filenames[i])
mask, _, _, _ = utils.load_nifty(mask_filenames[i])
if resize and image.size > np.prod(target_shape):
is_resized = True
print("Resized: ", os.path.basename(image_filenames[i]))
original_shape = image.shape
image = utils.interpolate(image, (target_shape[0], target_shape[1], original_shape[2]))
mask = utils.interpolate(mask, (target_shape[0], target_shape[1], original_shape[2]), mask=True)
image = utils.normalize(image)
labels = np.unique(mask)
# labels = labels[labels > 0]
for label in np.flip(labels):
mask[mask == label] = label + 1
mask = mask.astype(np.uint8)
mask = random_walker(data=image, labels=mask, beta=beta, mode='cg_mg')
for label in labels:
mask[mask == label + 1] = label
if is_resized:
mask = utils.interpolate(mask, original_shape, mask=True)
utils.save_nifty(save_path + os.path.basename(mask_filenames[i][:-12] + ".nii.gz"), mask, affine, spacing, header, is_mask=True)
results = evaluate(gt_path, save_path, class_labels)
return results
# def compute_predictions(image_path, mask_path, gt_path, save_path):
# image_filenames = utils.load_filenames(image_path)
# mask_filenames = utils.load_filenames(mask_path)
#
# for i in tqdm(range(len(image_filenames))):
# _, affine, spacing, header = utils.load_nifty(mask_filenames[i])
# subject = torchio.Subject(image=torchio.ScalarImage(image_filenames[i]), mask=torchio.LabelMap(mask_filenames[i]))
# sampler = torchio.inference.GridSampler(subject, patch_size=(20, 20, 10), padding_mode='edge')
# aggregator = torchio.inference.GridAggregator(sampler)
# for patch in sampler:
# image = patch["image"][torchio.DATA].numpy()[0]
# image = utils.normalize(image)
# mask = patch["mask"][torchio.DATA].numpy()[0]
# location = torch.tensor(patch[torchio.LOCATION]).unsqueeze(0)
# if not(image.max() <= 0 or mask.max() == 0):
# # image[image < 0] = 0
# mask = mask.astype(np.int32)
# mask = random_walker(data=image, labels=mask, mode='cg_j')
# mask = torch.tensor(mask).unsqueeze(0).unsqueeze(0)
# aggregator.add_batch(mask, location)
# mask = aggregator.get_output_tensor()
# utils.save_nifty(save_path + os.path.basename(mask_filenames[i]), mask, affine, spacing, header, is_mask=True)
# mean_dice_score, median_dice_score = evaluate(gt_path, save_path)
# return mean_dice_score, median_dice_score
|
[
"evaluate.evaluate"
] |
[((432, 463), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['mask_path'], {}), '(mask_path)\n', (452, 463), False, 'from i3Deep import utils\n'), ((1785, 1827), 'evaluate.evaluate', 'evaluate', (['gt_path', 'save_path', 'class_labels'], {}), '(gt_path, save_path, class_labels)\n', (1793, 1827), False, 'from evaluate import evaluate\n'), ((360, 392), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['image_path'], {}), '(image_path)\n', (380, 392), False, 'from i3Deep import utils\n'), ((636, 672), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (652, 672), False, 'from i3Deep import utils\n'), ((698, 733), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['mask_filenames[i]'], {}), '(mask_filenames[i])\n', (714, 733), False, 'from i3Deep import utils\n'), ((1164, 1186), 'i3Deep.utils.normalize', 'utils.normalize', (['image'], {}), '(image)\n', (1179, 1186), False, 'from i3Deep import utils\n'), ((1205, 1220), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (1214, 1220), True, 'import numpy as np\n'), ((1282, 1297), 'numpy.flip', 'np.flip', (['labels'], {}), '(labels)\n', (1289, 1297), True, 'import numpy as np\n'), ((1398, 1461), 'skimage.segmentation.random_walker_segmentation.random_walker', 'random_walker', ([], {'data': 'image', 'labels': 'mask', 'beta': 'beta', 'mode': '"""cg_mg"""'}), "(data=image, labels=mask, beta=beta, mode='cg_mg')\n", (1411, 1461), False, 'from skimage.segmentation.random_walker_segmentation import random_walker\n'), ((957, 1036), 'i3Deep.utils.interpolate', 'utils.interpolate', (['image', '(target_shape[0], target_shape[1], original_shape[2])'], {}), '(image, (target_shape[0], target_shape[1], original_shape[2]))\n', (974, 1036), False, 'from i3Deep import utils\n'), ((1057, 1151), 'i3Deep.utils.interpolate', 'utils.interpolate', (['mask', '(target_shape[0], target_shape[1], original_shape[2])'], {'mask': '(True)'}), '(mask, (target_shape[0], target_shape[1], original_shape[2\n ]), mask=True)\n', (1074, 1151), False, 'from i3Deep import utils\n'), ((1581, 1631), 'i3Deep.utils.interpolate', 'utils.interpolate', (['mask', 'original_shape'], {'mask': '(True)'}), '(mask, original_shape, mask=True)\n', (1598, 1631), False, 'from i3Deep import utils\n'), ((770, 791), 'numpy.prod', 'np.prod', (['target_shape'], {}), '(target_shape)\n', (777, 791), True, 'import numpy as np\n'), ((856, 892), 'os.path.basename', 'os.path.basename', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (872, 892), False, 'import os\n'), ((1670, 1723), 'os.path.basename', 'os.path.basename', (["(mask_filenames[i][:-12] + '.nii.gz')"], {}), "(mask_filenames[i][:-12] + '.nii.gz')\n", (1686, 1723), False, 'import os\n')]
|
import random
import os
import sys
from models.bert import BERT_Model
from models.bilstm_crf_ import BiLSTM_CRF_Model
from data import build_corpus
from config import ModelPathConfig,ResultPathConfig
from datetime import datetime
from utils import extend_map_bert,save_model,load_model,extend_map,add_label_for_lstmcrf
from evaluate import evaluate_entity_label,evaluate_multiclass,evaluate_single_label,unitstopd
from tabulate import tabulate
import pandas as pd
def bert_test():
model_is_exitsed=os.path.exists(ModelPathConfig.bert)
print("upload data!")
word_lists,tag_lists,word2id,tag2id=build_corpus("train")
test_word_lists,test_tag_lists,_,_=build_corpus("test")
labels=list(tag2id.keys())
dev_indices=random.sample(range(len(word_lists)),len(word_lists)//5)
train_indices=[i for i in range(len(word_lists)) if i not in dev_indices]
dev_word_lists=[ word_lists[ind] for ind in dev_indices]
dev_tag_lists=[tag_lists[ind] for ind in dev_indices]
train_word_lists=[word_lists[ind] for ind in train_indices]
train_tag_lists=[tag_lists[ind] for ind in train_indices]
bert_tag2id=extend_map_bert(tag2id)
if not model_is_exitsed:
print('start to training')
start=datetime.now()
vocab_size=len(word2id)
out_size=len(bert_tag2id)
bert_model=BERT_Model(vocab_size,out_size)
bert_model.train(train_word_lists,train_tag_lists,\
word2id,bert_tag2id,dev_word_lists,dev_tag_lists)
deltatime=datetime.now()-start
print("Training is finished, {} second".format(deltatime.seconds))
try:
print("Save the model")
save_model(bert_model,ModelPathConfig.bert)
except:
print("fail to save model")
else:
try:
print("load model")
bert_model=load_model(ModelPathConfig.bert)
except:
print("fail to load model")
sys.exit(0)
print("test the model")
pred_tag_lists=bert_model.test(test_word_lists,test_tag_lists,word2id,bert_tag2id)
label_tag_lists=test_tag_lists
units=evaluate_entity_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig.bert_entity)
print(tabulate(df,headers='keys',tablefmt='psql'))
units=evaluate_single_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig.bert_model)
print(tabulate(df,headers='keys',tablefmt='psql'))
def _bilstm_crf_test(if_train=True):
model_is_existed=os.path.exists(ModelPathConfig._bilstm_crf)
print("upload data!")
word_lists,tag_lists,word2id,tag2id=build_corpus("train")
test_word_lists,test_tag_lists,_,_=build_corpus("test")
labels=list(tag2id.keys())
dev_indices=random.sample(range(len(word_lists)),len(word_lists)//5)
train_indices=[i for i in range(len(word_lists)) if i not in dev_indices]
dev_word_lists=[ word_lists[ind] for ind in dev_indices]
dev_tag_lists=[tag_lists[ind] for ind in dev_indices]
train_word_lists=[word_lists[ind] for ind in train_indices]
train_tag_lists=[tag_lists[ind] for ind in train_indices]
# bilstm_crf_word2id,bilstm_crf_tag2id=extend_map(word2id,tag2id,crf=True)
bilstm_crf_word2id,bilstm_crf_tag2id=extend_map(word2id,tag2id,crf=False)
if if_train or not model_is_existed:
print('start to training')
# sample_print_test(train_word_lists,train_tag_lists)
start=datetime.now()
vocab_size=len(bilstm_crf_word2id)
out_size=len(tag2id)
bilstm_model=BiLSTM_CRF_Model(vocab_size,out_size)
bilstm_model.train(train_word_lists,train_tag_lists,\
word2id,bilstm_crf_tag2id,dev_word_lists,dev_tag_lists)
deltatime=datetime.now()-start
print("Training is finished, {} second".format(deltatime.seconds))
save_model(bilstm_model,ModelPathConfig._bilstm_crf)
print("Save the model")
else:
print("load model")
bilstm_model=load_model(ModelPathConfig._bilstm_crf)
print("test the model")
pred_tag_lists,label_tag_lists,=bilstm_model.test(test_word_lists,test_tag_lists,word2id,tag2id)
units=evaluate_entity_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig._bilstm_crf_entity)
print(tabulate(df,headers='keys',tablefmt='psql'))
units=evaluate_single_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig._bilstm_crf_model)
print(tabulate(df,headers='keys',tablefmt='psql'))
def mrc_bert_test(if_train=True):
pass
if __name__=='__main__':
pass
|
[
"evaluate.unitstopd",
"evaluate.evaluate_single_label",
"evaluate.evaluate_entity_label"
] |
[((504, 540), 'os.path.exists', 'os.path.exists', (['ModelPathConfig.bert'], {}), '(ModelPathConfig.bert)\n', (518, 540), False, 'import os\n'), ((609, 630), 'data.build_corpus', 'build_corpus', (['"""train"""'], {}), "('train')\n", (621, 630), False, 'from data import build_corpus\n'), ((670, 690), 'data.build_corpus', 'build_corpus', (['"""test"""'], {}), "('test')\n", (682, 690), False, 'from data import build_corpus\n'), ((1138, 1161), 'utils.extend_map_bert', 'extend_map_bert', (['tag2id'], {}), '(tag2id)\n', (1153, 1161), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((2138, 2200), 'evaluate.evaluate_entity_label', 'evaluate_entity_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (2159, 2200), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((2206, 2222), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (2215, 2222), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((2333, 2395), 'evaluate.evaluate_single_label', 'evaluate_single_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (2354, 2395), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((2401, 2417), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (2410, 2417), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((2577, 2620), 'os.path.exists', 'os.path.exists', (['ModelPathConfig._bilstm_crf'], {}), '(ModelPathConfig._bilstm_crf)\n', (2591, 2620), False, 'import os\n'), ((2688, 2709), 'data.build_corpus', 'build_corpus', (['"""train"""'], {}), "('train')\n", (2700, 2709), False, 'from data import build_corpus\n'), ((2749, 2769), 'data.build_corpus', 'build_corpus', (['"""test"""'], {}), "('test')\n", (2761, 2769), False, 'from data import build_corpus\n'), ((3318, 3356), 'utils.extend_map', 'extend_map', (['word2id', 'tag2id'], {'crf': '(False)'}), '(word2id, tag2id, crf=False)\n', (3328, 3356), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((4235, 4297), 'evaluate.evaluate_entity_label', 'evaluate_entity_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (4256, 4297), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((4303, 4319), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (4312, 4319), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((4437, 4499), 'evaluate.evaluate_single_label', 'evaluate_single_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (4458, 4499), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((4505, 4521), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (4514, 4521), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((1245, 1259), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1257, 1259), False, 'from datetime import datetime\n'), ((1345, 1377), 'models.bert.BERT_Model', 'BERT_Model', (['vocab_size', 'out_size'], {}), '(vocab_size, out_size)\n', (1355, 1377), False, 'from models.bert import BERT_Model\n'), ((2277, 2322), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (2285, 2322), False, 'from tabulate import tabulate\n'), ((2471, 2516), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (2479, 2516), False, 'from tabulate import tabulate\n'), ((3508, 3522), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3520, 3522), False, 'from datetime import datetime\n'), ((3617, 3655), 'models.bilstm_crf_.BiLSTM_CRF_Model', 'BiLSTM_CRF_Model', (['vocab_size', 'out_size'], {}), '(vocab_size, out_size)\n', (3633, 3655), False, 'from models.bilstm_crf_ import BiLSTM_CRF_Model\n'), ((3908, 3961), 'utils.save_model', 'save_model', (['bilstm_model', 'ModelPathConfig._bilstm_crf'], {}), '(bilstm_model, ModelPathConfig._bilstm_crf)\n', (3918, 3961), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((4052, 4091), 'utils.load_model', 'load_model', (['ModelPathConfig._bilstm_crf'], {}), '(ModelPathConfig._bilstm_crf)\n', (4062, 4091), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((4381, 4426), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (4389, 4426), False, 'from tabulate import tabulate\n'), ((4582, 4627), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (4590, 4627), False, 'from tabulate import tabulate\n'), ((1518, 1532), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1530, 1532), False, 'from datetime import datetime\n'), ((1675, 1719), 'utils.save_model', 'save_model', (['bert_model', 'ModelPathConfig.bert'], {}), '(bert_model, ModelPathConfig.bert)\n', (1685, 1719), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((1862, 1894), 'utils.load_model', 'load_model', (['ModelPathConfig.bert'], {}), '(ModelPathConfig.bert)\n', (1872, 1894), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((3803, 3817), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3815, 3817), False, 'from datetime import datetime\n'), ((1963, 1974), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1971, 1974), False, 'import sys\n')]
|
from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
# from raft import RAFT
from core.ours import RAFT
import evaluate
import datasets
import flow_vis
import random
from torch.utils.tensorboard import SummaryWriter
from utils.scheduler import CosineAnnealingWarmupRestarts
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
def __init__(self):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 100
VAL_FREQ = 5000
def sequence_loss(flow_preds, flow_gt, valid, sparse_lambda=1.0, gamma=0.8, max_flow=MAX_FLOW):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds[0])
flow_loss = 0.0
sparse_loss = 0.0
# exlude invalid pixels and extremely large diplacements
mag = torch.sum(flow_gt ** 2, dim=1).sqrt()
dense_valid = (valid >= 0.5) & (mag < max_flow)
bs, _, I_H, I_W = flow_gt.shape
for i in range(n_predictions):
# i_weight = gamma ** (n_predictions - i - 1)
i_weight = 1.0
i_loss = (flow_preds[0][i] - flow_gt).abs()
# i_loss = (flow_preds[0][i] - flow_gt).square()
flow_loss += i_weight * (dense_valid[:, None] * i_loss).mean()
if sparse_lambda > 0.0:
ref, sparse_flow, _, _ = flow_preds[1][i]
scale = torch.tensor((I_W - 1, I_H - 1), dtype=torch.float32).view(1, 1, 2).to(sparse_flow.device)
flatten_gt = flow_gt.flatten(2).permute(0, 2, 1)
flatten_valid = valid.flatten(1)
coords = torch.round(ref * scale).long()
coords = torch.clamp_max(coords[..., 1] * coords[..., 0], I_H * I_W - 1)
sparse_gt = torch.gather(flatten_gt, 1, coords.unsqueeze(-1).repeat(1, 1, 2))
sparse_valid = torch.gather(flatten_valid, 1, coords)
sparse_valid = (sparse_valid >= 0.5) & (torch.sum(sparse_gt ** 2, dim=-1).sqrt() < max_flow)
sparse_i_loss = (sparse_flow * scale - sparse_gt).abs()
# sparse_i_loss = (sparse_flow * scale - sparse_gt).square()
sparse_loss += i_weight * (sparse_valid[..., None] * sparse_i_loss).mean()
loss = flow_loss + sparse_loss * sparse_lambda
epe = torch.sum((flow_preds[0][-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[dense_valid.view(-1)]
metrics = {
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
'loss': loss,
'flow_loss': flow_loss,
'sparse_loss': sparse_loss
}
return loss, metrics
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.wdecay)
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, round(args.num_steps * 0.8))
# scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100,
# pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
# scheduler = torch.optim.lr_scheduler.OneCycleLR(
# optimizer, args.lr,
# args.num_steps + 10,
# pct_start=0.05,
# cycle_momentum=False,
# anneal_strategy='cos')
# scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
# optimizer, 1000, T_mult=1, eta_min=0, last_epoch=- 1, verbose=False)
return optimizer, scheduler
class Logger:
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = 0
self.running_loss = {}
self.writer = None
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
# print the training status
print(training_str + metrics_str)
if self.writer is None:
self.writer = SummaryWriter()
for k in self.running_loss:
self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
self.running_loss[k] = 0.0
def push(self, metrics):
self.total_steps += 1
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter()
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def write_image(self, image1, image2, target, pred, phase="T", idx=0):
if self.writer is None:
self.writer = SummaryWriter()
_, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (1, 2, 0))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (1, 2, 0))
target = target.detach().cpu().numpy()
target = np.transpose(target, (1, 2, 0))
target_img = flow_vis.flow_to_color(target, convert_to_bgr=False)
pred_img = list()
for p_i in range(len(pred[0])):
ref, sparse_flow, masks, scores = pred[1][p_i]
coords = torch.round(ref.squeeze(0) * scale).long()
coords = coords.cpu().numpy()
confidence = np.squeeze(scores.squeeze(0).cpu().numpy())
ref_img = cv2.cvtColor(np.array(image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = pred[0][p_i].squeeze(0).detach().cpu().numpy()
this_pred = np.transpose(this_pred, (1, 2, 0))
this_pred = flow_vis.flow_to_color(this_pred, convert_to_bgr=False)
pred_img.append(this_pred)
mask_img = list()
top_k = len(pred[0])
# top_k_indices = np.argsort(-confidence)[:top_k]
masks = masks.squeeze(0).cpu()
# masks = masks.reshape(self.num_keypoints, 1, H, W)
masks = F.interpolate(masks, size=(I_H, I_W), mode="bilinear", align_corners=False).numpy()
masks = masks.squeeze(1)
top_k_indices = np.argsort(-np.sum(masks, axis=(1, 2)))[:top_k]
for m_i in top_k_indices:
coord = coords[m_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.cvtColor(np.array(image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[m_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
mask_img.append(ref_img)
masked_flow = np.expand_dims(masks[m_i], axis=-1) * this_pred
mask_img.append(masked_flow)
pred_img = np.concatenate(pred_img, axis=1)
mask_img = np.concatenate(mask_img, axis=1)
image = np.concatenate((np.concatenate((image1, image2, target_img, pred_img), axis=1),
np.concatenate((image1, image2, target_img, mask_img), axis=1)), axis=0)
image = image.astype(np.uint8)
self.writer.add_image("{}_Image_{:02d}".format(phase, idx + 1), image, self.total_steps, dataformats='HWC')
def write_images(self, image1, image2, targets, preds, phase="T"):
if self.writer is None:
self.writer = SummaryWriter()
_, _, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (0, 2, 3, 1))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (0, 2, 3, 1))
targets = targets.detach().cpu().numpy()
targets = np.transpose(targets, (0, 2, 3, 1))
sampled_indices = random.sample(range(len(targets)), min(10, len(targets)))
for i_i, n_i in enumerate(sampled_indices):
this_image1 = image1[n_i]
this_image2 = image2[n_i]
target_img = flow_vis.flow_to_color(targets[n_i], convert_to_bgr=False)
pred_img = list()
for p_i in range(len(preds[0])):
ref, sparse_flow, masks, scores = preds[1][p_i]
coords = torch.round(ref * scale).long()
coords = coords.cpu().numpy()[n_i]
confidence = np.squeeze(scores.cpu().numpy()[n_i])
ref_img = cv2.cvtColor(np.array(this_image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = preds[0][p_i].detach().cpu().numpy()[n_i]
this_pred = np.transpose(this_pred, (1, 2, 0))
this_pred = flow_vis.flow_to_color(this_pred, convert_to_bgr=False)
pred_img.append(this_pred)
mask_img = list()
top_k = len(preds[0])
# top_k_indices = np.argsort(-confidence)[:top_k]
masks = masks[n_i].cpu()
masks = F.interpolate(masks, size=(I_H, I_W), mode="bilinear", align_corners=False).numpy()
masks = masks.squeeze(1)
top_k_indices = np.argsort(-np.sum(masks, axis=(1, 2)))[:top_k]
for m_i in top_k_indices:
coord = coords[m_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.cvtColor(np.array(this_image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[m_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
mask_img.append(ref_img)
masked_flow = np.expand_dims(masks[m_i], axis=-1) * this_pred
mask_img.append(masked_flow)
pred_img = np.concatenate(pred_img, axis=1)
mask_img = np.concatenate(mask_img, axis=1)
image = np.concatenate((np.concatenate((this_image1, this_image2, target_img, pred_img), axis=1),
np.concatenate((this_image1, this_image2, target_img, mask_img), axis=1)), axis=0)
image = image.astype(np.uint8)
self.writer.add_image("{}_Image_{:02d}".format(phase, i_i + 1), image, self.total_steps, dataformats='HWC')
def write_seg_images(self, image1, image2, targets, preds, phase="T"):
if self.writer is None:
self.writer = SummaryWriter()
_, _, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (0, 2, 3, 1))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (0, 2, 3, 1))
targets = targets.detach().cpu().numpy()
targets = np.transpose(targets, (0, 2, 3, 1))
for n_i in range(len(targets)):
this_image1 = image1[n_i]
this_image2 = image2[n_i]
target_img = flow_vis.flow_to_color(targets[n_i], convert_to_bgr=False)
pred_img = list()
for p_i in range(len(preds[0])):
ref, sparse_flow, masks, scores = preds[1][p_i]
coords = torch.round(ref * scale).long()
coords = coords.detach().cpu().numpy()[n_i]
confidence = np.squeeze(scores.detach().cpu().numpy()[n_i])
ref_img = cv2.cvtColor(np.array(this_image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = preds[0][p_i].detach().cpu().numpy()[n_i]
this_pred = np.transpose(this_pred, (1, 2, 0))
pred_img.append(flow_vis.flow_to_color(this_pred, convert_to_bgr=False))
pred_img = np.concatenate(pred_img, axis=1)
image = np.concatenate((this_image1, this_image2, target_img, pred_img), axis=1)
image = image.astype(np.uint8)
self.writer.add_image("{}_Image_{:02d}".format(phase, n_i + 1), image, self.total_steps, dataformats='HWC')
def close(self):
self.writer.close()
def train(args):
model = nn.DataParallel(RAFT(args), device_ids=args.gpus)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
model.cuda()
model.train()
# if args.stage != 'chairs':
# model.module.freeze_bn()
train_loader = datasets.fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
# scaler = GradScaler(enabled=args.mixed_precision)
logger = Logger(model, scheduler)
VAL_FREQ = 5000
# VAL_FREQ = 10
IMAGE_FREQ = 5000
add_noise = True
should_keep_training = True
while should_keep_training:
for i_batch, data_blob in enumerate(train_loader):
optimizer.zero_grad()
image1, image2, flow, valid = [x.cuda() for x in data_blob]
if args.add_noise:
stdv = np.random.uniform(0.0, 5.0)
image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
flow_predictions = model(image1, image2, iters=args.iters)
sparse_lambda = 1.0 if total_steps < 20000 else 0.0
# sparse_lambda = 1.0
loss, metrics = sequence_loss(flow_predictions, flow, valid, sparse_lambda, args.gamma)
# scaler.scale(loss).backward()
# scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
loss.backward()
optimizer.step()
# scaler.step(optimizer)
scheduler.step()
# scaler.update()
logger.push(metrics)
if total_steps % IMAGE_FREQ == IMAGE_FREQ - 1:
logger.write_images(image1, image2, flow, flow_predictions, phase="T")
if total_steps % VAL_FREQ == VAL_FREQ - 1:
PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name)
torch.save(model.state_dict(), PATH)
results = {}
for val_dataset in args.validation:
if val_dataset == 'chairs':
results.update(evaluate.validate_chairs(model.module, logger=logger, iters=args.iters))
elif val_dataset == 'sintel':
results.update(evaluate.validate_sintel(model.module, iters=args.iters))
elif val_dataset == 'kitti':
results.update(evaluate.validate_kitti(model.module, iters=args.iters))
logger.write_dict(results)
model.train()
if args.stage != 'chairs':
model.module.freeze_bn()
total_steps += 1
if total_steps > args.num_steps:
should_keep_training = False
break
logger.close()
PATH = 'checkpoints/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--stage', help="determines which dataset to use for training")
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--validation', type=str, nargs='+')
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--gpus', type=int, nargs='+', default=[0,1])
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--iters', type=int, default=3)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
parser.add_argument('--add_noise', action='store_true')
args = parser.parse_args()
torch.manual_seed(2022)
np.random.seed(2022)
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
train(args)
|
[
"evaluate.validate_chairs",
"evaluate.validate_kitti",
"evaluate.validate_sintel"
] |
[((59, 82), 'sys.path.append', 'sys.path.append', (['"""core"""'], {}), "('core')\n", (74, 82), False, 'import sys\n'), ((14695, 14726), 'datasets.fetch_dataloader', 'datasets.fetch_dataloader', (['args'], {}), '(args)\n', (14720, 14726), False, 'import datasets\n'), ((17465, 17490), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17488, 17490), False, 'import argparse\n'), ((18792, 18815), 'torch.manual_seed', 'torch.manual_seed', (['(2022)'], {}), '(2022)\n', (18809, 18815), False, 'import torch\n'), ((18820, 18840), 'numpy.random.seed', 'np.random.seed', (['(2022)'], {}), '(2022)\n', (18834, 18840), True, 'import numpy as np\n'), ((5981, 6012), 'numpy.transpose', 'np.transpose', (['image1', '(1, 2, 0)'], {}), '(image1, (1, 2, 0))\n', (5993, 6012), True, 'import numpy as np\n'), ((6077, 6108), 'numpy.transpose', 'np.transpose', (['image2', '(1, 2, 0)'], {}), '(image2, (1, 2, 0))\n', (6089, 6108), True, 'import numpy as np\n'), ((6173, 6204), 'numpy.transpose', 'np.transpose', (['target', '(1, 2, 0)'], {}), '(target, (1, 2, 0))\n', (6185, 6204), True, 'import numpy as np\n'), ((6227, 6279), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['target'], {'convert_to_bgr': '(False)'}), '(target, convert_to_bgr=False)\n', (6249, 6279), False, 'import flow_vis\n'), ((8223, 8255), 'numpy.concatenate', 'np.concatenate', (['pred_img'], {'axis': '(1)'}), '(pred_img, axis=1)\n', (8237, 8255), True, 'import numpy as np\n'), ((8275, 8307), 'numpy.concatenate', 'np.concatenate', (['mask_img'], {'axis': '(1)'}), '(mask_img, axis=1)\n', (8289, 8307), True, 'import numpy as np\n'), ((9010, 9044), 'numpy.transpose', 'np.transpose', (['image1', '(0, 2, 3, 1)'], {}), '(image1, (0, 2, 3, 1))\n', (9022, 9044), True, 'import numpy as np\n'), ((9109, 9143), 'numpy.transpose', 'np.transpose', (['image2', '(0, 2, 3, 1)'], {}), '(image2, (0, 2, 3, 1))\n', (9121, 9143), True, 'import numpy as np\n'), ((9211, 9246), 'numpy.transpose', 'np.transpose', (['targets', '(0, 2, 3, 1)'], {}), '(targets, (0, 2, 3, 1))\n', (9223, 9246), True, 'import numpy as np\n'), ((12457, 12491), 'numpy.transpose', 'np.transpose', (['image1', '(0, 2, 3, 1)'], {}), '(image1, (0, 2, 3, 1))\n', (12469, 12491), True, 'import numpy as np\n'), ((12556, 12590), 'numpy.transpose', 'np.transpose', (['image2', '(0, 2, 3, 1)'], {}), '(image2, (0, 2, 3, 1))\n', (12568, 12590), True, 'import numpy as np\n'), ((12658, 12693), 'numpy.transpose', 'np.transpose', (['targets', '(0, 2, 3, 1)'], {}), '(targets, (0, 2, 3, 1))\n', (12670, 12693), True, 'import numpy as np\n'), ((14363, 14373), 'core.ours.RAFT', 'RAFT', (['args'], {}), '(args)\n', (14367, 14373), False, 'from core.ours import RAFT\n'), ((18853, 18881), 'os.path.isdir', 'os.path.isdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (18866, 18881), False, 'import os\n'), ((18891, 18914), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (18899, 18914), False, 'import os\n'), ((1332, 1362), 'torch.sum', 'torch.sum', (['(flow_gt ** 2)'], {'dim': '(1)'}), '(flow_gt ** 2, dim=1)\n', (1341, 1362), False, 'import torch\n'), ((2130, 2193), 'torch.clamp_max', 'torch.clamp_max', (['(coords[..., 1] * coords[..., 0])', '(I_H * I_W - 1)'], {}), '(coords[..., 1] * coords[..., 0], I_H * I_W - 1)\n', (2145, 2193), False, 'import torch\n'), ((2311, 2349), 'torch.gather', 'torch.gather', (['flatten_valid', '(1)', 'coords'], {}), '(flatten_valid, 1, coords)\n', (2323, 2349), False, 'import torch\n'), ((2746, 2798), 'torch.sum', 'torch.sum', (['((flow_preds[0][-1] - flow_gt) ** 2)'], {'dim': '(1)'}), '((flow_preds[0][-1] - flow_gt) ** 2, dim=1)\n', (2755, 2798), False, 'import torch\n'), ((4886, 4901), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (4899, 4901), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5522, 5537), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5535, 5537), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5773, 5788), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5786, 5788), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((7068, 7102), 'numpy.transpose', 'np.transpose', (['this_pred', '(1, 2, 0)'], {}), '(this_pred, (1, 2, 0))\n', (7080, 7102), True, 'import numpy as np\n'), ((7127, 7182), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['this_pred'], {'convert_to_bgr': '(False)'}), '(this_pred, convert_to_bgr=False)\n', (7149, 7182), False, 'import flow_vis\n'), ((8796, 8811), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (8809, 8811), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((9484, 9542), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['targets[n_i]'], {'convert_to_bgr': '(False)'}), '(targets[n_i], convert_to_bgr=False)\n', (9506, 9542), False, 'import flow_vis\n'), ((11627, 11659), 'numpy.concatenate', 'np.concatenate', (['pred_img'], {'axis': '(1)'}), '(pred_img, axis=1)\n', (11641, 11659), True, 'import numpy as np\n'), ((11683, 11715), 'numpy.concatenate', 'np.concatenate', (['mask_img'], {'axis': '(1)'}), '(mask_img, axis=1)\n', (11697, 11715), True, 'import numpy as np\n'), ((12243, 12258), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (12256, 12258), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((12835, 12893), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['targets[n_i]'], {'convert_to_bgr': '(False)'}), '(targets[n_i], convert_to_bgr=False)\n', (12857, 12893), False, 'import flow_vis\n'), ((13974, 14006), 'numpy.concatenate', 'np.concatenate', (['pred_img'], {'axis': '(1)'}), '(pred_img, axis=1)\n', (13988, 14006), True, 'import numpy as np\n'), ((14027, 14099), 'numpy.concatenate', 'np.concatenate', (['(this_image1, this_image2, target_img, pred_img)'], {'axis': '(1)'}), '((this_image1, this_image2, target_img, pred_img), axis=1)\n', (14041, 14099), True, 'import numpy as np\n'), ((14525, 14554), 'torch.load', 'torch.load', (['args.restore_ckpt'], {}), '(args.restore_ckpt)\n', (14535, 14554), False, 'import torch\n'), ((6615, 6647), 'numpy.array', 'np.array', (['image1'], {'dtype': 'np.uint8'}), '(image1, dtype=np.uint8)\n', (6623, 6647), True, 'import numpy as np\n'), ((6881, 6914), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (6889, 6914), True, 'import numpy as np\n'), ((7452, 7527), 'torch.nn.functional.interpolate', 'F.interpolate', (['masks'], {'size': '(I_H, I_W)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(masks, size=(I_H, I_W), mode='bilinear', align_corners=False)\n", (7465, 7527), True, 'import torch.nn.functional as F\n'), ((7814, 7846), 'numpy.array', 'np.array', (['image1'], {'dtype': 'np.uint8'}), '(image1, dtype=np.uint8)\n', (7822, 7846), True, 'import numpy as np\n'), ((7997, 8030), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (8005, 8030), True, 'import numpy as np\n'), ((8114, 8149), 'numpy.expand_dims', 'np.expand_dims', (['masks[m_i]'], {'axis': '(-1)'}), '(masks[m_i], axis=-1)\n', (8128, 8149), True, 'import numpy as np\n'), ((8340, 8402), 'numpy.concatenate', 'np.concatenate', (['(image1, image2, target_img, pred_img)'], {'axis': '(1)'}), '((image1, image2, target_img, pred_img), axis=1)\n', (8354, 8402), True, 'import numpy as np\n'), ((8436, 8498), 'numpy.concatenate', 'np.concatenate', (['(image1, image2, target_img, mask_img)'], {'axis': '(1)'}), '((image1, image2, target_img, mask_img), axis=1)\n', (8450, 8498), True, 'import numpy as np\n'), ((10457, 10491), 'numpy.transpose', 'np.transpose', (['this_pred', '(1, 2, 0)'], {}), '(this_pred, (1, 2, 0))\n', (10469, 10491), True, 'import numpy as np\n'), ((10520, 10575), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['this_pred'], {'convert_to_bgr': '(False)'}), '(this_pred, convert_to_bgr=False)\n', (10542, 10575), False, 'import flow_vis\n'), ((13826, 13860), 'numpy.transpose', 'np.transpose', (['this_pred', '(1, 2, 0)'], {}), '(this_pred, (1, 2, 0))\n', (13838, 13860), True, 'import numpy as np\n'), ((15267, 15294), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(5.0)'], {}), '(0.0, 5.0)\n', (15284, 15294), True, 'import numpy as np\n'), ((2077, 2101), 'torch.round', 'torch.round', (['(ref * scale)'], {}), '(ref * scale)\n', (2088, 2101), False, 'import torch\n'), ((7605, 7631), 'numpy.sum', 'np.sum', (['masks'], {'axis': '(1, 2)'}), '(masks, axis=(1, 2))\n', (7611, 7631), True, 'import numpy as np\n'), ((9896, 9933), 'numpy.array', 'np.array', (['this_image1'], {'dtype': 'np.uint8'}), '(this_image1, dtype=np.uint8)\n', (9904, 9933), True, 'import numpy as np\n'), ((10263, 10296), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (10271, 10296), True, 'import numpy as np\n'), ((10803, 10878), 'torch.nn.functional.interpolate', 'F.interpolate', (['masks'], {'size': '(I_H, I_W)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(masks, size=(I_H, I_W), mode='bilinear', align_corners=False)\n", (10816, 10878), True, 'import torch.nn.functional as F\n'), ((11189, 11226), 'numpy.array', 'np.array', (['this_image1'], {'dtype': 'np.uint8'}), '(this_image1, dtype=np.uint8)\n', (11197, 11226), True, 'import numpy as np\n'), ((11385, 11418), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (11393, 11418), True, 'import numpy as np\n'), ((11510, 11545), 'numpy.expand_dims', 'np.expand_dims', (['masks[m_i]'], {'axis': '(-1)'}), '(masks[m_i], axis=-1)\n', (11524, 11545), True, 'import numpy as np\n'), ((11752, 11824), 'numpy.concatenate', 'np.concatenate', (['(this_image1, this_image2, target_img, pred_img)'], {'axis': '(1)'}), '((this_image1, this_image2, target_img, pred_img), axis=1)\n', (11766, 11824), True, 'import numpy as np\n'), ((11862, 11934), 'numpy.concatenate', 'np.concatenate', (['(this_image1, this_image2, target_img, mask_img)'], {'axis': '(1)'}), '((this_image1, this_image2, target_img, mask_img), axis=1)\n', (11876, 11934), True, 'import numpy as np\n'), ((13265, 13302), 'numpy.array', 'np.array', (['this_image1'], {'dtype': 'np.uint8'}), '(this_image1, dtype=np.uint8)\n', (13273, 13302), True, 'import numpy as np\n'), ((13632, 13665), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (13640, 13665), True, 'import numpy as np\n'), ((13893, 13948), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['this_pred'], {'convert_to_bgr': '(False)'}), '(this_pred, convert_to_bgr=False)\n', (13915, 13948), False, 'import flow_vis\n'), ((5841, 5886), 'torch.tensor', 'torch.tensor', (['(I_W, I_H)'], {'dtype': 'torch.float32'}), '((I_W, I_H), dtype=torch.float32)\n', (5853, 5886), False, 'import torch\n'), ((8867, 8912), 'torch.tensor', 'torch.tensor', (['(I_W, I_H)'], {'dtype': 'torch.float32'}), '((I_W, I_H), dtype=torch.float32)\n', (8879, 8912), False, 'import torch\n'), ((9707, 9731), 'torch.round', 'torch.round', (['(ref * scale)'], {}), '(ref * scale)\n', (9718, 9731), False, 'import torch\n'), ((10964, 10990), 'numpy.sum', 'np.sum', (['masks'], {'axis': '(1, 2)'}), '(masks, axis=(1, 2))\n', (10970, 10990), True, 'import numpy as np\n'), ((12314, 12359), 'torch.tensor', 'torch.tensor', (['(I_W, I_H)'], {'dtype': 'torch.float32'}), '((I_W, I_H), dtype=torch.float32)\n', (12326, 12359), False, 'import torch\n'), ((13058, 13082), 'torch.round', 'torch.round', (['(ref * scale)'], {}), '(ref * scale)\n', (13069, 13082), False, 'import torch\n'), ((1859, 1912), 'torch.tensor', 'torch.tensor', (['(I_W - 1, I_H - 1)'], {'dtype': 'torch.float32'}), '((I_W - 1, I_H - 1), dtype=torch.float32)\n', (1871, 1912), False, 'import torch\n'), ((2402, 2435), 'torch.sum', 'torch.sum', (['(sparse_gt ** 2)'], {'dim': '(-1)'}), '(sparse_gt ** 2, dim=-1)\n', (2411, 2435), False, 'import torch\n'), ((16602, 16673), 'evaluate.validate_chairs', 'evaluate.validate_chairs', (['model.module'], {'logger': 'logger', 'iters': 'args.iters'}), '(model.module, logger=logger, iters=args.iters)\n', (16626, 16673), False, 'import evaluate\n'), ((16764, 16820), 'evaluate.validate_sintel', 'evaluate.validate_sintel', (['model.module'], {'iters': 'args.iters'}), '(model.module, iters=args.iters)\n', (16788, 16820), False, 'import evaluate\n'), ((16910, 16965), 'evaluate.validate_kitti', 'evaluate.validate_kitti', (['model.module'], {'iters': 'args.iters'}), '(model.module, iters=args.iters)\n', (16933, 16965), False, 'import evaluate\n'), ((15337, 15363), 'torch.randn', 'torch.randn', (['*image1.shape'], {}), '(*image1.shape)\n', (15348, 15363), False, 'import torch\n'), ((15432, 15458), 'torch.randn', 'torch.randn', (['*image2.shape'], {}), '(*image2.shape)\n', (15443, 15458), False, 'import torch\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
from evaluate import evaluate
from utils import get_data, tf_melspectogram
from shallow_nn import shallow_nn
from deep_nn import deep_nn
from shallow_nn_improve import shallow_nn as shallow_nn_improve
from deep_nn_improve import deep_nn as deep_nn_improve
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('epochs', 100,
'Number of mini-batches to train on. (default: %(default)d)')
tf.app.flags.DEFINE_integer('network', 0,
'Type of network to use, 0 for shallow, 1 for deep. (default: %(default)d)')
tf.app.flags.DEFINE_integer('improve', 0,
'Turn improvements on or off, 0 for off, 1 for improvements on. (default: %(default)d)')
tf.app.flags.DEFINE_float('decay', 0,
'Amount to decay learning rate. Greater than 0 enables decaying (default: %(default)d')
tf.app.flags.DEFINE_integer('log_frequency', 100,
'Number of steps between logging results to the console and saving summaries (default: %(default)d)')
tf.app.flags.DEFINE_integer('augment', 0,
'Use augmentation, 0 for off, 1 for on (default: %(default)d)')
tf.app.flags.DEFINE_integer('num_parallel_calls', 1,
'Number of cpu cores to use to preprocess data')
tf.app.flags.DEFINE_integer('save_model', 1000,
'Number of steps between model saves (default: %(default)d)')
tf.app.flags.DEFINE_integer('save_images', 0,
'Whether to save spectrogram images, 0 to not save, 1 to save. (default: %(default)d)')
# Optimisation hyperparameters
tf.app.flags.DEFINE_integer(
'batch_size', 16, 'Number of examples per mini-batch (default: %(default)d)')
tf.app.flags.DEFINE_float(
'learning_rate', 5e-5, 'Learning rate (default: %(default)d)')
tf.app.flags.DEFINE_integer(
"input_width", 80, "Input width (default: %(default)d)")
tf.app.flags.DEFINE_integer(
"input_height", 80, "Input height (default: %(default)d)")
tf.app.flags.DEFINE_integer(
"input_channels", 1, "Input channels (default: %(default)d)"
)
tf.app.flags.DEFINE_integer(
"num_classes", 10, "Number of classes (default: %(default)d)"
)
tf.app.flags.DEFINE_string(
"log_dir",
"{cwd}/logs/".format(cwd=os.getcwd()),
"Directory where to write event logs and checkpoint. (default: %(default)s)",
)
run_log_dir = os.path.join(FLAGS.log_dir, 'exp_lr_{learning_rate}_decay_{decay}_bs_{batch_size}_e_{epochs}_{network}_improve_{improve}_augment_{augment}'.format(
learning_rate=FLAGS.learning_rate, decay=FLAGS.decay, batch_size=FLAGS.batch_size, epochs=FLAGS.epochs, network='shallow' if (FLAGS.network == 0) else 'deep', improve=FLAGS.improve, augment=FLAGS.augment))
def model(iterator, is_training, nn):
next_x, next_y = iterator.get_next()
with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
y_out, img_summary = nn(next_x, is_training)
# Compute categorical loss
with tf.variable_scope("cross_entropy"):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=next_y, logits=y_out)
)
# L1 regularise
regularization_penalty = tf.losses.get_regularization_loss(
name="total_regularization_loss"
)
regularized_loss = cross_entropy + regularization_penalty
accuracy, accuracy_op = tf.metrics.accuracy(
tf.argmax(next_y, axis=1), tf.argmax(y_out, axis=1), name="accuracy_train")
return regularized_loss, img_summary, accuracy, accuracy_op
def calc_accuracy(iterator, is_training, nn):
next_x, next_y = iterator.get_next()
with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
y_out, _ = nn(next_x, is_training)
accuracy, accuracy_op = tf.metrics.accuracy(
tf.argmax(next_y, axis=1), tf.argmax(y_out, axis=1), name="accuracy_test")
return accuracy, accuracy_op
def accumulate_results(iterator, is_training, nn):
x, y, i = iterator.get_next()
with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
y_out, _ = nn(x, is_training)
return (x, y, y_out, i)
def _preprocess(features, label):
label = tf.one_hot(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8)
return features, label
def main(_):
(
train_set_data,
train_set_labels,
_,
test_set_data,
test_set_labels,
test_set_track_ids,
) = get_data()
print("Making TF graph")
start = time.time()
is_training_placeholder = tf.placeholder_with_default(False, shape=())
features_placeholder = tf.placeholder(
tf.float32, (None, np.shape(train_set_data)[1])
)
labels_placeholder = tf.placeholder(tf.uint8, (None))
track_ids_placeholder = tf.placeholder(tf.uint8, (None))
shuffle_buffer_size = len(train_set_data)
dataset = tf.data.Dataset.from_tensor_slices(
(features_placeholder, labels_placeholder)
)
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
_preprocess, FLAGS.batch_size, num_parallel_calls=FLAGS.num_parallel_calls)
)
dataset = dataset.prefetch(1)
train_iterator = dataset.make_initializable_iterator()
test_iterator = dataset.make_initializable_iterator()
eval_dataset = tf.data.Dataset.from_tensor_slices(
(features_placeholder, labels_placeholder, track_ids_placeholder)
)
eval_dataset = eval_dataset.map(
lambda features, label, track_id: (
features,
tf.one_hot(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8),
track_id,
)
)
eval_dataset = eval_dataset.batch(1)
eval_iterator = eval_dataset.make_initializable_iterator()
if (FLAGS.network == 0):
if (FLAGS.improve == 0):
print("Using Shallow network")
nn = shallow_nn
else:
print("Using Shallow Improved network")
nn = shallow_nn_improve
else:
if (FLAGS.improve == 0):
print("Using Deep Network")
nn = deep_nn
else:
print("Using Deep Improved Network")
nn = deep_nn_improve
loss, _, train_acc, train_acc_op = model(
train_iterator, is_training_placeholder, nn)
global_step = tf.Variable(0, trainable=False)
if (FLAGS.decay > 0):
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate, global_step, 15000, FLAGS.decay)
else:
learning_rate = FLAGS.learning_rate
# Adam Optimiser
# default values match that in paper
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimiser = tf.train.AdamOptimizer(
learning_rate, name="AdamOpt").minimize(loss, global_step=global_step)
validation_accuracy, acc_op = calc_accuracy(
test_iterator, is_training_placeholder, nn)
evaluator = accumulate_results(
eval_iterator, is_training_placeholder, nn)
loss_summary = tf.summary.scalar("Loss", loss)
acc_summary = tf.summary.scalar("Accuracy", validation_accuracy)
train_acc_summary = tf.summary.scalar("Accuracy", train_acc)
training_summary = tf.summary.merge([loss_summary, train_acc_summary])
validation_summary = tf.summary.merge([acc_summary])
# Isolate the variables stored behind the scenes by the metric operation
running_vars = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="accuracy_test")
train_running_vars = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="accuracy_train")
# Define initializer to initialize/reset running variables
running_vars_initializer = tf.variables_initializer(
var_list=running_vars)
train_running_vars_initializer = tf.variables_initializer(
var_list=train_running_vars)
end = time.time()
print("Time to prep TF ops: {:.2f}s".format(end - start))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.graph.finalize()
summary_writer = tf.summary.FileWriter(
run_log_dir + "_train", sess.graph)
summary_writer_validation = tf.summary.FileWriter(
run_log_dir + "_validate", sess.graph
)
for epoch in range(FLAGS.epochs):
sess.run(running_vars_initializer)
sess.run(train_running_vars_initializer)
sess.run(train_iterator.initializer, feed_dict={
features_placeholder: train_set_data, labels_placeholder: train_set_labels})
# Run until all samples done
while True:
try:
_, acc_train, summary_str = sess.run([optimiser, train_acc_op, training_summary], feed_dict={
is_training_placeholder: True})
except tf.errors.OutOfRangeError:
break
summary_writer.add_summary(summary_str, epoch)
sess.run(test_iterator.initializer, feed_dict={
features_placeholder: test_set_data, labels_placeholder: test_set_labels})
while True:
try:
acc, acc_summary_str = sess.run(
[acc_op, validation_summary])
except tf.errors.OutOfRangeError:
break
summary_writer_validation.add_summary(acc_summary_str, epoch)
print("Accuracy after epoch {} - Training: {:.2f}% Validation: {:.2f}%".format(
str(epoch), acc_train * 100.0, acc * 100.0))
sess.run(eval_iterator.initializer, feed_dict={
features_placeholder: test_set_data, labels_placeholder: test_set_labels, track_ids_placeholder: test_set_track_ids})
results = [None] * np.shape(test_set_data)[0]
count = 0
while True:
try:
evaluated = sess.run(evaluator)
results[count] = evaluated
count += 1
except tf.errors.OutOfRangeError:
break
raw_probability, maximum_probability, majority_vote = evaluate(results)
print("-----===== Summary =====-----")
print("Raw Probability: {:.2f}%".format(raw_probability * 100.0))
print("Maximum Probability: {:.2f}%".format(
maximum_probability * 100.0))
print("Majority Vote: {:.2f}%".format(majority_vote * 100))
if __name__ == "__main__":
tf.app.run(main=main)
|
[
"evaluate.evaluate"
] |
[((461, 569), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""epochs"""', '(100)', '"""Number of mini-batches to train on. (default: %(default)d)"""'], {}), "('epochs', 100,\n 'Number of mini-batches to train on. (default: %(default)d)')\n", (488, 569), True, 'import tensorflow as tf\n'), ((594, 721), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""network"""', '(0)', '"""Type of network to use, 0 for shallow, 1 for deep. (default: %(default)d)"""'], {}), "('network', 0,\n 'Type of network to use, 0 for shallow, 1 for deep. (default: %(default)d)'\n )\n", (621, 721), True, 'import tensorflow as tf\n'), ((741, 880), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""improve"""', '(0)', '"""Turn improvements on or off, 0 for off, 1 for improvements on. (default: %(default)d)"""'], {}), "('improve', 0,\n 'Turn improvements on or off, 0 for off, 1 for improvements on. (default: %(default)d)'\n )\n", (768, 880), True, 'import tensorflow as tf\n'), ((900, 1034), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""decay"""', '(0)', '"""Amount to decay learning rate. Greater than 0 enables decaying (default: %(default)d"""'], {}), "('decay', 0,\n 'Amount to decay learning rate. Greater than 0 enables decaying (default: %(default)d'\n )\n", (925, 1034), True, 'import tensorflow as tf\n'), ((1052, 1212), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""log_frequency"""', '(100)', '"""Number of steps between logging results to the console and saving summaries (default: %(default)d)"""'], {}), "('log_frequency', 100,\n 'Number of steps between logging results to the console and saving summaries (default: %(default)d)'\n )\n", (1079, 1212), True, 'import tensorflow as tf\n'), ((1232, 1341), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""augment"""', '(0)', '"""Use augmentation, 0 for off, 1 for on (default: %(default)d)"""'], {}), "('augment', 0,\n 'Use augmentation, 0 for off, 1 for on (default: %(default)d)')\n", (1259, 1341), True, 'import tensorflow as tf\n'), ((1366, 1471), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_parallel_calls"""', '(1)', '"""Number of cpu cores to use to preprocess data"""'], {}), "('num_parallel_calls', 1,\n 'Number of cpu cores to use to preprocess data')\n", (1393, 1471), True, 'import tensorflow as tf\n'), ((1496, 1609), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_model"""', '(1000)', '"""Number of steps between model saves (default: %(default)d)"""'], {}), "('save_model', 1000,\n 'Number of steps between model saves (default: %(default)d)')\n", (1523, 1609), True, 'import tensorflow as tf\n'), ((1634, 1776), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_images"""', '(0)', '"""Whether to save spectrogram images, 0 to not save, 1 to save. (default: %(default)d)"""'], {}), "('save_images', 0,\n 'Whether to save spectrogram images, 0 to not save, 1 to save. (default: %(default)d)'\n )\n", (1661, 1776), True, 'import tensorflow as tf\n'), ((1828, 1937), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(16)', '"""Number of examples per mini-batch (default: %(default)d)"""'], {}), "('batch_size', 16,\n 'Number of examples per mini-batch (default: %(default)d)')\n", (1855, 1937), True, 'import tensorflow as tf\n'), ((1939, 2032), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(5e-05)', '"""Learning rate (default: %(default)d)"""'], {}), "('learning_rate', 5e-05,\n 'Learning rate (default: %(default)d)')\n", (1964, 2032), True, 'import tensorflow as tf\n'), ((2033, 2121), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""input_width"""', '(80)', '"""Input width (default: %(default)d)"""'], {}), "('input_width', 80,\n 'Input width (default: %(default)d)')\n", (2060, 2121), True, 'import tensorflow as tf\n'), ((2123, 2213), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""input_height"""', '(80)', '"""Input height (default: %(default)d)"""'], {}), "('input_height', 80,\n 'Input height (default: %(default)d)')\n", (2150, 2213), True, 'import tensorflow as tf\n'), ((2215, 2308), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""input_channels"""', '(1)', '"""Input channels (default: %(default)d)"""'], {}), "('input_channels', 1,\n 'Input channels (default: %(default)d)')\n", (2242, 2308), True, 'import tensorflow as tf\n'), ((2311, 2405), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_classes"""', '(10)', '"""Number of classes (default: %(default)d)"""'], {}), "('num_classes', 10,\n 'Number of classes (default: %(default)d)')\n", (2338, 2405), True, 'import tensorflow as tf\n'), ((3423, 3490), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {'name': '"""total_regularization_loss"""'}), "(name='total_regularization_loss')\n", (3456, 3490), True, 'import tensorflow as tf\n'), ((4385, 4451), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'label', 'depth': 'FLAGS.num_classes', 'dtype': 'tf.uint8'}), '(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8)\n', (4395, 4451), True, 'import tensorflow as tf\n'), ((4646, 4656), 'utils.get_data', 'get_data', ([], {}), '()\n', (4654, 4656), False, 'from utils import get_data, tf_melspectogram\n'), ((4699, 4710), 'time.time', 'time.time', ([], {}), '()\n', (4708, 4710), False, 'import time\n'), ((4742, 4786), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)'], {'shape': '()'}), '(False, shape=())\n', (4769, 4786), True, 'import tensorflow as tf\n'), ((4918, 4948), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', 'None'], {}), '(tf.uint8, None)\n', (4932, 4948), True, 'import tensorflow as tf\n'), ((4979, 5009), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', 'None'], {}), '(tf.uint8, None)\n', (4993, 5009), True, 'import tensorflow as tf\n'), ((5073, 5151), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(features_placeholder, labels_placeholder)'], {}), '((features_placeholder, labels_placeholder))\n', (5107, 5151), True, 'import tensorflow as tf\n'), ((5568, 5673), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(features_placeholder, labels_placeholder, track_ids_placeholder)'], {}), '((features_placeholder,\n labels_placeholder, track_ids_placeholder))\n', (5602, 5673), True, 'import tensorflow as tf\n'), ((6568, 6599), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (6579, 6599), True, 'import tensorflow as tf\n'), ((6877, 6919), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (6894, 6919), True, 'import tensorflow as tf\n'), ((7304, 7335), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'loss'], {}), "('Loss', loss)\n", (7321, 7335), True, 'import tensorflow as tf\n'), ((7354, 7404), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'validation_accuracy'], {}), "('Accuracy', validation_accuracy)\n", (7371, 7404), True, 'import tensorflow as tf\n'), ((7429, 7469), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'train_acc'], {}), "('Accuracy', train_acc)\n", (7446, 7469), True, 'import tensorflow as tf\n'), ((7494, 7545), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_summary, train_acc_summary]'], {}), '([loss_summary, train_acc_summary])\n', (7510, 7545), True, 'import tensorflow as tf\n'), ((7571, 7602), 'tensorflow.summary.merge', 'tf.summary.merge', (['[acc_summary]'], {}), '([acc_summary])\n', (7587, 7602), True, 'import tensorflow as tf\n'), ((7700, 7770), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.LOCAL_VARIABLES'], {'scope': '"""accuracy_test"""'}), "(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_test')\n", (7717, 7770), True, 'import tensorflow as tf\n'), ((7805, 7876), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.LOCAL_VARIABLES'], {'scope': '"""accuracy_train"""'}), "(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_train')\n", (7822, 7876), True, 'import tensorflow as tf\n'), ((7981, 8028), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'running_vars'}), '(var_list=running_vars)\n', (8005, 8028), True, 'import tensorflow as tf\n'), ((8075, 8128), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'train_running_vars'}), '(var_list=train_running_vars)\n', (8099, 8128), True, 'import tensorflow as tf\n'), ((8149, 8160), 'time.time', 'time.time', ([], {}), '()\n', (8158, 8160), False, 'import time\n'), ((10739, 10760), 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main'}), '(main=main)\n', (10749, 10760), True, 'import tensorflow as tf\n'), ((3043, 3090), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Model', reuse=tf.AUTO_REUSE)\n", (3060, 3090), True, 'import tensorflow as tf\n'), ((3186, 3220), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cross_entropy"""'], {}), "('cross_entropy')\n", (3203, 3220), True, 'import tensorflow as tf\n'), ((3625, 3650), 'tensorflow.argmax', 'tf.argmax', (['next_y'], {'axis': '(1)'}), '(next_y, axis=1)\n', (3634, 3650), True, 'import tensorflow as tf\n'), ((3652, 3676), 'tensorflow.argmax', 'tf.argmax', (['y_out'], {'axis': '(1)'}), '(y_out, axis=1)\n', (3661, 3676), True, 'import tensorflow as tf\n'), ((3865, 3912), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Model', reuse=tf.AUTO_REUSE)\n", (3882, 3912), True, 'import tensorflow as tf\n'), ((4015, 4040), 'tensorflow.argmax', 'tf.argmax', (['next_y'], {'axis': '(1)'}), '(next_y, axis=1)\n', (4024, 4040), True, 'import tensorflow as tf\n'), ((4042, 4066), 'tensorflow.argmax', 'tf.argmax', (['y_out'], {'axis': '(1)'}), '(y_out, axis=1)\n', (4051, 4066), True, 'import tensorflow as tf\n'), ((4221, 4268), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Model', reuse=tf.AUTO_REUSE)\n", (4238, 4268), True, 'import tensorflow as tf\n'), ((5266, 5380), 'tensorflow.data.experimental.map_and_batch', 'tf.data.experimental.map_and_batch', (['_preprocess', 'FLAGS.batch_size'], {'num_parallel_calls': 'FLAGS.num_parallel_calls'}), '(_preprocess, FLAGS.batch_size,\n num_parallel_calls=FLAGS.num_parallel_calls)\n', (5300, 5380), True, 'import tensorflow as tf\n'), ((6650, 6735), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['FLAGS.learning_rate', 'global_step', '(15000)', 'FLAGS.decay'], {}), '(FLAGS.learning_rate, global_step, 15000, FLAGS.decay\n )\n', (6676, 6735), True, 'import tensorflow as tf\n'), ((6929, 6964), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (6952, 6964), True, 'import tensorflow as tf\n'), ((8233, 8245), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8243, 8245), True, 'import tensorflow as tf\n'), ((8363, 8420), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(run_log_dir + '_train')", 'sess.graph'], {}), "(run_log_dir + '_train', sess.graph)\n", (8384, 8420), True, 'import tensorflow as tf\n'), ((8470, 8530), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(run_log_dir + '_validate')", 'sess.graph'], {}), "(run_log_dir + '_validate', sess.graph)\n", (8491, 8530), True, 'import tensorflow as tf\n'), ((10403, 10420), 'evaluate.evaluate', 'evaluate', (['results'], {}), '(results)\n', (10411, 10420), False, 'from evaluate import evaluate\n'), ((2480, 2491), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2489, 2491), False, 'import os\n'), ((3274, 3345), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'next_y', 'logits': 'y_out'}), '(labels=next_y, logits=y_out)\n', (3316, 3345), True, 'import tensorflow as tf\n'), ((8272, 8305), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8303, 8305), True, 'import tensorflow as tf\n'), ((4858, 4882), 'numpy.shape', 'np.shape', (['train_set_data'], {}), '(train_set_data)\n', (4866, 4882), True, 'import numpy as np\n'), ((5799, 5865), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'label', 'depth': 'FLAGS.num_classes', 'dtype': 'tf.uint8'}), '(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8)\n', (5809, 5865), True, 'import tensorflow as tf\n'), ((6986, 7039), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {'name': '"""AdamOpt"""'}), "(learning_rate, name='AdamOpt')\n", (7008, 7039), True, 'import tensorflow as tf\n'), ((10071, 10094), 'numpy.shape', 'np.shape', (['test_set_data'], {}), '(test_set_data)\n', (10079, 10094), True, 'import numpy as np\n')]
|
from implicit_neural_networks import IMLP
import torch
import torch.optim as optim
import numpy as np
from evaluate import evaluate_model
from datetime import datetime
from loss_utils import get_gradient_loss, get_rigidity_loss, \
get_optical_flow_loss, get_optical_flow_alpha_loss
from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow
import sys
from torch.utils.tensorboard import SummaryWriter
import logging
import json
from pathlib import Path
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(config):
maximum_number_of_frames = config["maximum_number_of_frames"]
resx = np.int64(config["resx"])
resy = np.int64(config["resy"])
iters_num = config["iters_num"]
#batch size:
samples = config["samples_batch"]
# evaluation frequency (in terms of iterations number)
evaluate_every = np.int64(config["evaluate_every"])
# optionally it is possible to load a checkpoint
load_checkpoint = config["load_checkpoint"] # set to true to continue from a checkpoint
checkpoint_path = config["checkpoint_path"]
# a data folder that contains folders named "[video_name]","[video_name]_flow","[video_name]_maskrcnn"
data_folder = Path(config["data_folder"])
results_folder_name = config["results_folder_name"] # the folder (under the code's folder where the experiments will be saved.
add_to_experiment_folder_name = config["add_to_experiment_folder_name"] # for each experiment folder (saved inside "results_folder_name") add this string
# boolean variables for determining if a pretraining is used:
pretrain_mapping1 = config["pretrain_mapping1"]
pretrain_mapping2 = config["pretrain_mapping2"]
pretrain_iter_number = config["pretrain_iter_number"]
# the scale of the atlas uv coordinates relative to frame's xy coordinates
uv_mapping_scale = config["uv_mapping_scale"]
# M_\alpha's hyper parameters:
positional_encoding_num_alpha = config["positional_encoding_num_alpha"]
number_of_channels_alpha = config["number_of_channels_alpha"]
number_of_layers_alpha = config["number_of_layers_alpha"]
# M_f's hyper parameters
use_positional_encoding_mapping1 = config["use_positional_encoding_mapping1"]
number_of_positional_encoding_mapping1 = config["number_of_positional_encoding_mapping1"]
number_of_layers_mapping1 = config["number_of_layers_mapping1"]
number_of_channels_mapping1 = config["number_of_channels_mapping1"]
# M_b's hyper parameters
use_positional_encoding_mapping2 = config["use_positional_encoding_mapping2"]
number_of_positional_encoding_mapping2 = config["number_of_positional_encoding_mapping2"]
number_of_layers_mapping2 = config["number_of_layers_mapping2"]
number_of_channels_mapping2 = config["number_of_channels_mapping2"]
# Atlas MLP's hyper parameters
number_of_channels_atlas = config["number_of_channels_atlas"]
number_of_layers_atlas = config["number_of_layers_atlas"]
positional_encoding_num_atlas = config[
"positional_encoding_num_atlas"]
# bootstrapping configuration:
alpha_bootstrapping_factor = config["alpha_bootstrapping_factor"]
stop_bootstrapping_iteration = config["stop_bootstrapping_iteration"]
# coefficients for the different loss terms
rgb_coeff = config["rgb_coeff"] # coefficient for rgb loss term:
alpha_flow_factor = config["alpha_flow_factor"]
sparsity_coeff = config["sparsity_coeff"]
# optical flow loss term coefficient (beta_f in the paper):
optical_flow_coeff = config["optical_flow_coeff"]
use_gradient_loss = config["use_gradient_loss"]
gradient_loss_coeff = config["gradient_loss_coeff"]
rigidity_coeff = config["rigidity_coeff"] # coefficient for the rigidity loss term
derivative_amount = config["derivative_amount"] # For finite differences gradient computation:
# for using global (in addition to the current local) rigidity loss:
include_global_rigidity_loss = config["include_global_rigidity_loss"]
# Finite differences parameters for the global rigidity terms:
global_rigidity_derivative_amount_fg = config["global_rigidity_derivative_amount_fg"]
global_rigidity_derivative_amount_bg = config["global_rigidity_derivative_amount_bg"]
global_rigidity_coeff_fg = config["global_rigidity_coeff_fg"]
global_rigidity_coeff_bg = config["global_rigidity_coeff_bg"]
stop_global_rigidity = config["stop_global_rigidity"]
use_optical_flow = True
vid_name = data_folder.name
vid_root = data_folder.parent
results_folder = Path(
f'./{results_folder_name}/{vid_name}_{datetime.utcnow().strftime("%m_%d_%Y__%H_%M_%S_%f")}{add_to_experiment_folder_name}')
results_folder.mkdir(parents=True, exist_ok=True)
with open('%s/config.json' % results_folder, 'w') as json_file:
json.dump(config, json_file, indent=4)
logging.basicConfig(
filename='%s/log.log' % results_folder,
level=logging.INFO,
format='%(asctime)s %(message)s')
logging.info('Started')
writer = SummaryWriter(log_dir=str(results_folder))
optical_flows_mask, video_frames, optical_flows_reverse_mask, mask_frames, video_frames_dx, video_frames_dy, optical_flows_reverse, optical_flows = load_input_data(
resy, resx, maximum_number_of_frames, data_folder, True, True, vid_root, vid_name)
number_of_frames=video_frames.shape[3]
# save a video showing the masked part of the forward optical flow:s
save_mask_flow(optical_flows_mask, video_frames, results_folder)
model_F_mapping1 = IMLP(
input_dim=3,
output_dim=2,
hidden_dim=number_of_channels_mapping1,
use_positional=use_positional_encoding_mapping1,
positional_dim=number_of_positional_encoding_mapping1,
num_layers=number_of_layers_mapping1,
skip_layers=[]).to(device)
model_F_mapping2 = IMLP(
input_dim=3,
output_dim=2,
hidden_dim=number_of_channels_mapping2,
use_positional=use_positional_encoding_mapping2,
positional_dim=number_of_positional_encoding_mapping2,
num_layers=number_of_layers_mapping2,
skip_layers=[]).to(device)
model_F_atlas = IMLP(
input_dim=2,
output_dim=3,
hidden_dim=number_of_channels_atlas,
use_positional=True,
positional_dim=positional_encoding_num_atlas,
num_layers=number_of_layers_atlas,
skip_layers=[4, 7]).to(device)
model_alpha = IMLP(
input_dim=3,
output_dim=1,
hidden_dim=number_of_channels_alpha,
use_positional=True,
positional_dim=positional_encoding_num_alpha,
num_layers=number_of_layers_alpha,
skip_layers=[]).to(device)
start_iteration = 0
optimizer_all = optim.Adam(
[{'params': list(model_F_mapping1.parameters())},
{'params': list(model_F_mapping2.parameters())},
{'params': list(model_alpha.parameters())},
{'params': list(model_F_atlas.parameters())}], lr=0.0001)
larger_dim = np.maximum(resx, resy)
if not load_checkpoint:
if pretrain_mapping1:
model_F_mapping1 = pre_train_mapping(model_F_mapping1, number_of_frames, uv_mapping_scale, resx=resx, resy=resy,
larger_dim=larger_dim,device=device, pretrain_iters=pretrain_iter_number)
if pretrain_mapping2:
model_F_mapping2 = pre_train_mapping(model_F_mapping2, number_of_frames, uv_mapping_scale, resx=resx, resy=resy,
larger_dim=larger_dim, device=device,pretrain_iters=pretrain_iter_number)
else:
init_file = torch.load(checkpoint_path)
model_F_atlas.load_state_dict(init_file["F_atlas_state_dict"])
model_F_mapping1.load_state_dict(init_file["model_F_mapping1_state_dict"])
model_F_mapping2.load_state_dict(init_file["model_F_mapping2_state_dict"])
model_alpha.load_state_dict(init_file["model_F_alpha_state_dict"])
optimizer_all.load_state_dict(init_file["optimizer_all_state_dict"])
start_iteration = init_file["iteration"]
jif_all = get_tuples(number_of_frames, video_frames)
# Start training!
for i in range(start_iteration, iters_num):
if i > stop_bootstrapping_iteration:
alpha_bootstrapping_factor = 0
if i > stop_global_rigidity:
global_rigidity_coeff_fg = 0
global_rigidity_coeff_bg = 0
print(i)
logging.info('Iteration %d' % i)
# randomly choose indices for the current batch
inds_foreground = torch.randint(jif_all.shape[1],
(np.int64(samples * 1.0), 1))
jif_current = jif_all[:, inds_foreground] # size (3, batch, 1)
rgb_current = video_frames[jif_current[1, :], jif_current[0, :], :,
jif_current[2, :]].squeeze(1).to(device)
# the correct alpha according to the precomputed maskrcnn
alpha_maskrcnn = mask_frames[jif_current[1, :], jif_current[0, :],
jif_current[2, :]].squeeze(1).to(device).unsqueeze(-1)
# normalize coordinates to be in [-1,1]
xyt_current = torch.cat(
(jif_current[0, :] / (larger_dim / 2) - 1, jif_current[1, :] / (larger_dim / 2) - 1,
jif_current[2, :] / (number_of_frames / 2.0) - 1),
dim=1).to(device) # size (batch, 3)
# get the atlas UV coordinates from the two mapping networks;
uv_foreground1 = model_F_mapping1(xyt_current)
uv_foreground2 = model_F_mapping2(xyt_current)
# map tanh output of the alpha network to the range (0,1) :
alpha = 0.5 * (model_alpha(xyt_current) + 1.0)
# prevent a situation of alpha=0, or alpha=1 (for the BCE loss that uses log(alpha),log(1-alpha) below)
alpha = alpha * 0.99
alpha = alpha + 0.001
# Sample atlas values. Foreground colors are sampled from [0,1]x[0,1] and background colors are sampled from [-1,0]x[-1,0]
# Note that the original [u,v] coorinates are in [-1,1]x[-1,1] for both networks
rgb_output1 = (model_F_atlas(uv_foreground1 * 0.5 + 0.5) + 1.0) * 0.5
rgb_output2 = (model_F_atlas(
uv_foreground2 * 0.5 - 0.5) + 1.0) * 0.5
# Reconstruct final colors from the two layers (using alpha)
rgb_output_foreground = rgb_output1 * alpha + rgb_output2 * (1.0 - alpha)
if use_gradient_loss:
gradient_loss = get_gradient_loss(video_frames_dx, video_frames_dy, jif_current,
model_F_mapping1, model_F_mapping2, model_F_atlas,
rgb_output_foreground,device,resx,number_of_frames,model_alpha)
else:
gradient_loss = 0.0
print("gradient_loss:")
print(gradient_loss)
rgb_output_foreground_not = rgb_output1 * (1.0 - alpha)
rgb_loss = (torch.norm(rgb_output_foreground - rgb_current, dim=1) ** 2).mean()
rgb_loss_sparsity = (torch.norm(rgb_output_foreground_not, dim=1) ** 2).mean()
rigidity_loss1 = get_rigidity_loss(
jif_current,
derivative_amount,
larger_dim,
number_of_frames,
model_F_mapping1,
uv_foreground1,device,
uv_mapping_scale=uv_mapping_scale)
rigidity_loss2 = get_rigidity_loss(
jif_current,
derivative_amount,
larger_dim,
number_of_frames,
model_F_mapping2,
uv_foreground2,device,
uv_mapping_scale=uv_mapping_scale)
if include_global_rigidity_loss and i <= stop_global_rigidity:
global_rigidity_loss1 = get_rigidity_loss(
jif_current,
global_rigidity_derivative_amount_fg,
larger_dim,
number_of_frames,
model_F_mapping1,
uv_foreground1,device,
uv_mapping_scale=uv_mapping_scale)
global_rigidity_loss2 = get_rigidity_loss(
jif_current,
global_rigidity_derivative_amount_bg,
larger_dim,
number_of_frames,
model_F_mapping2,
uv_foreground2,device,
uv_mapping_scale=uv_mapping_scale)
flow_loss1 = get_optical_flow_loss(
jif_current, uv_foreground1, optical_flows_reverse, optical_flows_reverse_mask, larger_dim,
number_of_frames, model_F_mapping1, optical_flows, optical_flows_mask, uv_mapping_scale,device, use_alpha=True,
alpha=alpha)
flow_loss2 = get_optical_flow_loss(
jif_current, uv_foreground2, optical_flows_reverse, optical_flows_reverse_mask, larger_dim,
number_of_frames, model_F_mapping2, optical_flows, optical_flows_mask, uv_mapping_scale,device, use_alpha=True,
alpha=1 - alpha)
flow_alpha_loss = get_optical_flow_alpha_loss(model_alpha,
jif_current, alpha, optical_flows_reverse,
optical_flows_reverse_mask, larger_dim,
number_of_frames, optical_flows,
optical_flows_mask, device)
print("flow alpha loss:")
print(flow_alpha_loss)
alpha_bootstrapping_loss = torch.mean(
-alpha_maskrcnn * torch.log(alpha) - (1 - alpha_maskrcnn) * torch.log(1 - alpha))
print("alpha_balancing_loss")
print(alpha_bootstrapping_loss)
if include_global_rigidity_loss and i <= stop_global_rigidity:
loss = rigidity_coeff * (
rigidity_loss1 + rigidity_loss2) + global_rigidity_coeff_fg * global_rigidity_loss1 + global_rigidity_coeff_bg * global_rigidity_loss2 + \
rgb_loss * rgb_coeff + optical_flow_coeff * (
flow_loss1 + flow_loss2) + alpha_bootstrapping_loss * alpha_bootstrapping_factor + flow_alpha_loss * alpha_flow_factor + rgb_loss_sparsity * sparsity_coeff + gradient_loss * gradient_loss_coeff
else:
loss = rigidity_coeff * (rigidity_loss1 + rigidity_loss2) + rgb_loss * rgb_coeff + optical_flow_coeff * (
flow_loss1 + flow_loss2) + alpha_bootstrapping_loss * alpha_bootstrapping_factor + flow_alpha_loss * alpha_flow_factor + rgb_loss_sparsity * sparsity_coeff + gradient_loss * gradient_loss_coeff
optimizer_all.zero_grad()
loss.backward()
optimizer_all.step()
try:
if use_optical_flow:
print("of_loss1:%f" % flow_loss1.detach())
print("of_loss2:%f" % flow_loss2.detach())
logging.info("of_loss1:%f" % flow_loss1.detach())
writer.add_scalar('Loss/train_of1', flow_loss1.detach(), i)
logging.info("of_loss2:%f" % flow_loss2.detach())
writer.add_scalar('Loss/train_of2', flow_loss2.detach(), i)
except:
pass
logging.info("flow_alpha_loss: %f", flow_alpha_loss.detach())
logging.info("rgb_loss:%f" % rgb_loss.detach())
logging.info("total_loss:%f" % loss.detach())
logging.info("rigidity_loss1:%f" % rigidity_loss1.detach())
logging.info("rigidity_loss2:%f" % rigidity_loss2.detach())
logging.info('rgb_loss_negative %f' % rgb_loss_sparsity.detach())
logging.info('-------------------------------')
print("rgb_loss:%f" % rgb_loss.detach())
print('rgb_loss_negative %f' % rgb_loss_sparsity.detach())
print("total_loss:%f" % loss.detach())
print("rigidity_loss1:%f" % rigidity_loss1.detach())
print("rigidity_loss2:%f" % rigidity_loss2.detach())
print("alpha_mean:%f" % alpha.mean().detach())
logging.info("alpha_mean:%f" % alpha.mean().detach())
print("alpha_mean_1:%f" % alpha[alpha > 0.5].mean().detach())
logging.info("alpha_mean_1:%f" % alpha[alpha > 0.5].mean().detach())
print("alpha_mean_0:%f" % alpha[alpha < 0.5].mean().detach())
logging.info("alpha_mean_0:%f" % alpha[alpha < 0.5].mean().detach())
print(f'------------{results_folder.name}------------------')
writer.add_scalar('Loss/alpha_mean', alpha.mean().detach(), i)
writer.add_scalar('Loss/rgb_loss', rgb_loss.detach(), i)
writer.add_scalar('Loss/rigidity_loss1', rigidity_loss1.detach(), i)
writer.add_scalar('Loss/rigidity_loss2', rigidity_loss2.detach(), i)
try:
# render and evaluate videos every N iterations
if i % evaluate_every == 0 and i > start_iteration:
evaluate_model(model_F_atlas, resx, resy, number_of_frames, model_F_mapping1,
model_F_mapping2, model_alpha,
video_frames, results_folder, i, mask_frames, optimizer_all,
writer, vid_name, derivative_amount, uv_mapping_scale,
optical_flows,
optical_flows_mask,device)
rgb_img = video_frames[:, :, :, 0].numpy()
writer.add_image('Input/rgb_0', rgb_img, i, dataformats='HWC')
model_F_atlas.train()
model_F_mapping1.train()
model_F_mapping2.train()
model_alpha.train()
except Exception:
pass
if __name__ == "__main__":
with open(sys.argv[1]) as f:
main(json.load(f))
|
[
"evaluate.evaluate_model"
] |
[((659, 683), 'numpy.int64', 'np.int64', (["config['resx']"], {}), "(config['resx'])\n", (667, 683), True, 'import numpy as np\n'), ((695, 719), 'numpy.int64', 'np.int64', (["config['resy']"], {}), "(config['resy'])\n", (703, 719), True, 'import numpy as np\n'), ((893, 927), 'numpy.int64', 'np.int64', (["config['evaluate_every']"], {}), "(config['evaluate_every'])\n", (901, 927), True, 'import numpy as np\n'), ((1248, 1275), 'pathlib.Path', 'Path', (["config['data_folder']"], {}), "(config['data_folder'])\n", (1252, 1275), False, 'from pathlib import Path\n'), ((4931, 5049), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "('%s/log.log' % results_folder)", 'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(filename='%s/log.log' % results_folder, level=logging.\n INFO, format='%(asctime)s %(message)s')\n", (4950, 5049), False, 'import logging\n'), ((5074, 5097), 'logging.info', 'logging.info', (['"""Started"""'], {}), "('Started')\n", (5086, 5097), False, 'import logging\n'), ((5306, 5409), 'unwrap_utils.load_input_data', 'load_input_data', (['resy', 'resx', 'maximum_number_of_frames', 'data_folder', '(True)', '(True)', 'vid_root', 'vid_name'], {}), '(resy, resx, maximum_number_of_frames, data_folder, True, \n True, vid_root, vid_name)\n', (5321, 5409), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((5535, 5599), 'unwrap_utils.save_mask_flow', 'save_mask_flow', (['optical_flows_mask', 'video_frames', 'results_folder'], {}), '(optical_flows_mask, video_frames, results_folder)\n', (5549, 5599), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((7110, 7132), 'numpy.maximum', 'np.maximum', (['resx', 'resy'], {}), '(resx, resy)\n', (7120, 7132), True, 'import numpy as np\n'), ((8228, 8270), 'unwrap_utils.get_tuples', 'get_tuples', (['number_of_frames', 'video_frames'], {}), '(number_of_frames, video_frames)\n', (8238, 8270), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((524, 549), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (547, 549), False, 'import torch\n'), ((4888, 4926), 'json.dump', 'json.dump', (['config', 'json_file'], {'indent': '(4)'}), '(config, json_file, indent=4)\n', (4897, 4926), False, 'import json\n'), ((7747, 7774), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (7757, 7774), False, 'import torch\n'), ((8575, 8607), 'logging.info', 'logging.info', (["('Iteration %d' % i)"], {}), "('Iteration %d' % i)\n", (8587, 8607), False, 'import logging\n'), ((11256, 11420), 'loss_utils.get_rigidity_loss', 'get_rigidity_loss', (['jif_current', 'derivative_amount', 'larger_dim', 'number_of_frames', 'model_F_mapping1', 'uv_foreground1', 'device'], {'uv_mapping_scale': 'uv_mapping_scale'}), '(jif_current, derivative_amount, larger_dim,\n number_of_frames, model_F_mapping1, uv_foreground1, device,\n uv_mapping_scale=uv_mapping_scale)\n', (11273, 11420), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((11522, 11686), 'loss_utils.get_rigidity_loss', 'get_rigidity_loss', (['jif_current', 'derivative_amount', 'larger_dim', 'number_of_frames', 'model_F_mapping2', 'uv_foreground2', 'device'], {'uv_mapping_scale': 'uv_mapping_scale'}), '(jif_current, derivative_amount, larger_dim,\n number_of_frames, model_F_mapping2, uv_foreground2, device,\n uv_mapping_scale=uv_mapping_scale)\n', (11539, 11686), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((12505, 12756), 'loss_utils.get_optical_flow_loss', 'get_optical_flow_loss', (['jif_current', 'uv_foreground1', 'optical_flows_reverse', 'optical_flows_reverse_mask', 'larger_dim', 'number_of_frames', 'model_F_mapping1', 'optical_flows', 'optical_flows_mask', 'uv_mapping_scale', 'device'], {'use_alpha': '(True)', 'alpha': 'alpha'}), '(jif_current, uv_foreground1, optical_flows_reverse,\n optical_flows_reverse_mask, larger_dim, number_of_frames,\n model_F_mapping1, optical_flows, optical_flows_mask, uv_mapping_scale,\n device, use_alpha=True, alpha=alpha)\n', (12526, 12756), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((12803, 13058), 'loss_utils.get_optical_flow_loss', 'get_optical_flow_loss', (['jif_current', 'uv_foreground2', 'optical_flows_reverse', 'optical_flows_reverse_mask', 'larger_dim', 'number_of_frames', 'model_F_mapping2', 'optical_flows', 'optical_flows_mask', 'uv_mapping_scale', 'device'], {'use_alpha': '(True)', 'alpha': '(1 - alpha)'}), '(jif_current, uv_foreground2, optical_flows_reverse,\n optical_flows_reverse_mask, larger_dim, number_of_frames,\n model_F_mapping2, optical_flows, optical_flows_mask, uv_mapping_scale,\n device, use_alpha=True, alpha=1 - alpha)\n', (12824, 13058), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((13110, 13302), 'loss_utils.get_optical_flow_alpha_loss', 'get_optical_flow_alpha_loss', (['model_alpha', 'jif_current', 'alpha', 'optical_flows_reverse', 'optical_flows_reverse_mask', 'larger_dim', 'number_of_frames', 'optical_flows', 'optical_flows_mask', 'device'], {}), '(model_alpha, jif_current, alpha,\n optical_flows_reverse, optical_flows_reverse_mask, larger_dim,\n number_of_frames, optical_flows, optical_flows_mask, device)\n', (13137, 13302), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((15681, 15728), 'logging.info', 'logging.info', (['"""-------------------------------"""'], {}), "('-------------------------------')\n", (15693, 15728), False, 'import logging\n'), ((5624, 5867), 'implicit_neural_networks.IMLP', 'IMLP', ([], {'input_dim': '(3)', 'output_dim': '(2)', 'hidden_dim': 'number_of_channels_mapping1', 'use_positional': 'use_positional_encoding_mapping1', 'positional_dim': 'number_of_positional_encoding_mapping1', 'num_layers': 'number_of_layers_mapping1', 'skip_layers': '[]'}), '(input_dim=3, output_dim=2, hidden_dim=number_of_channels_mapping1,\n use_positional=use_positional_encoding_mapping1, positional_dim=\n number_of_positional_encoding_mapping1, num_layers=\n number_of_layers_mapping1, skip_layers=[])\n', (5628, 5867), False, 'from implicit_neural_networks import IMLP\n'), ((5946, 6189), 'implicit_neural_networks.IMLP', 'IMLP', ([], {'input_dim': '(3)', 'output_dim': '(2)', 'hidden_dim': 'number_of_channels_mapping2', 'use_positional': 'use_positional_encoding_mapping2', 'positional_dim': 'number_of_positional_encoding_mapping2', 'num_layers': 'number_of_layers_mapping2', 'skip_layers': '[]'}), '(input_dim=3, output_dim=2, hidden_dim=number_of_channels_mapping2,\n use_positional=use_positional_encoding_mapping2, positional_dim=\n number_of_positional_encoding_mapping2, num_layers=\n number_of_layers_mapping2, skip_layers=[])\n', (5950, 6189), False, 'from implicit_neural_networks import IMLP\n'), ((6265, 6463), 'implicit_neural_networks.IMLP', 'IMLP', ([], {'input_dim': '(2)', 'output_dim': '(3)', 'hidden_dim': 'number_of_channels_atlas', 'use_positional': '(True)', 'positional_dim': 'positional_encoding_num_atlas', 'num_layers': 'number_of_layers_atlas', 'skip_layers': '[4, 7]'}), '(input_dim=2, output_dim=3, hidden_dim=number_of_channels_atlas,\n use_positional=True, positional_dim=positional_encoding_num_atlas,\n num_layers=number_of_layers_atlas, skip_layers=[4, 7])\n', (6269, 6463), False, 'from implicit_neural_networks import IMLP\n'), ((6543, 6737), 'implicit_neural_networks.IMLP', 'IMLP', ([], {'input_dim': '(3)', 'output_dim': '(1)', 'hidden_dim': 'number_of_channels_alpha', 'use_positional': '(True)', 'positional_dim': 'positional_encoding_num_alpha', 'num_layers': 'number_of_layers_alpha', 'skip_layers': '[]'}), '(input_dim=3, output_dim=1, hidden_dim=number_of_channels_alpha,\n use_positional=True, positional_dim=positional_encoding_num_alpha,\n num_layers=number_of_layers_alpha, skip_layers=[])\n', (6547, 6737), False, 'from implicit_neural_networks import IMLP\n'), ((7222, 7398), 'unwrap_utils.pre_train_mapping', 'pre_train_mapping', (['model_F_mapping1', 'number_of_frames', 'uv_mapping_scale'], {'resx': 'resx', 'resy': 'resy', 'larger_dim': 'larger_dim', 'device': 'device', 'pretrain_iters': 'pretrain_iter_number'}), '(model_F_mapping1, number_of_frames, uv_mapping_scale,\n resx=resx, resy=resy, larger_dim=larger_dim, device=device,\n pretrain_iters=pretrain_iter_number)\n', (7239, 7398), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((7500, 7676), 'unwrap_utils.pre_train_mapping', 'pre_train_mapping', (['model_F_mapping2', 'number_of_frames', 'uv_mapping_scale'], {'resx': 'resx', 'resy': 'resy', 'larger_dim': 'larger_dim', 'device': 'device', 'pretrain_iters': 'pretrain_iter_number'}), '(model_F_mapping2, number_of_frames, uv_mapping_scale,\n resx=resx, resy=resy, larger_dim=larger_dim, device=device,\n pretrain_iters=pretrain_iter_number)\n', (7517, 7676), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((10608, 10799), 'loss_utils.get_gradient_loss', 'get_gradient_loss', (['video_frames_dx', 'video_frames_dy', 'jif_current', 'model_F_mapping1', 'model_F_mapping2', 'model_F_atlas', 'rgb_output_foreground', 'device', 'resx', 'number_of_frames', 'model_alpha'], {}), '(video_frames_dx, video_frames_dy, jif_current,\n model_F_mapping1, model_F_mapping2, model_F_atlas,\n rgb_output_foreground, device, resx, number_of_frames, model_alpha)\n', (10625, 10799), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((11871, 12054), 'loss_utils.get_rigidity_loss', 'get_rigidity_loss', (['jif_current', 'global_rigidity_derivative_amount_fg', 'larger_dim', 'number_of_frames', 'model_F_mapping1', 'uv_foreground1', 'device'], {'uv_mapping_scale': 'uv_mapping_scale'}), '(jif_current, global_rigidity_derivative_amount_fg,\n larger_dim, number_of_frames, model_F_mapping1, uv_foreground1, device,\n uv_mapping_scale=uv_mapping_scale)\n', (11888, 12054), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((12195, 12378), 'loss_utils.get_rigidity_loss', 'get_rigidity_loss', (['jif_current', 'global_rigidity_derivative_amount_bg', 'larger_dim', 'number_of_frames', 'model_F_mapping2', 'uv_foreground2', 'device'], {'uv_mapping_scale': 'uv_mapping_scale'}), '(jif_current, global_rigidity_derivative_amount_bg,\n larger_dim, number_of_frames, model_F_mapping2, uv_foreground2, device,\n uv_mapping_scale=uv_mapping_scale)\n', (12212, 12378), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((17837, 17849), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17846, 17849), False, 'import json\n'), ((8764, 8787), 'numpy.int64', 'np.int64', (['(samples * 1.0)'], {}), '(samples * 1.0)\n', (8772, 8787), True, 'import numpy as np\n'), ((9311, 9472), 'torch.cat', 'torch.cat', (['(jif_current[0, :] / (larger_dim / 2) - 1, jif_current[1, :] / (larger_dim /\n 2) - 1, jif_current[2, :] / (number_of_frames / 2.0) - 1)'], {'dim': '(1)'}), '((jif_current[0, :] / (larger_dim / 2) - 1, jif_current[1, :] / (\n larger_dim / 2) - 1, jif_current[2, :] / (number_of_frames / 2.0) - 1),\n dim=1)\n', (9320, 9472), False, 'import torch\n'), ((16941, 17224), 'evaluate.evaluate_model', 'evaluate_model', (['model_F_atlas', 'resx', 'resy', 'number_of_frames', 'model_F_mapping1', 'model_F_mapping2', 'model_alpha', 'video_frames', 'results_folder', 'i', 'mask_frames', 'optimizer_all', 'writer', 'vid_name', 'derivative_amount', 'uv_mapping_scale', 'optical_flows', 'optical_flows_mask', 'device'], {}), '(model_F_atlas, resx, resy, number_of_frames,\n model_F_mapping1, model_F_mapping2, model_alpha, video_frames,\n results_folder, i, mask_frames, optimizer_all, writer, vid_name,\n derivative_amount, uv_mapping_scale, optical_flows, optical_flows_mask,\n device)\n', (16955, 17224), False, 'from evaluate import evaluate_model\n'), ((11074, 11128), 'torch.norm', 'torch.norm', (['(rgb_output_foreground - rgb_current)'], {'dim': '(1)'}), '(rgb_output_foreground - rgb_current, dim=1)\n', (11084, 11128), False, 'import torch\n'), ((11172, 11216), 'torch.norm', 'torch.norm', (['rgb_output_foreground_not'], {'dim': '(1)'}), '(rgb_output_foreground_not, dim=1)\n', (11182, 11216), False, 'import torch\n'), ((13655, 13671), 'torch.log', 'torch.log', (['alpha'], {}), '(alpha)\n', (13664, 13671), False, 'import torch\n'), ((13697, 13717), 'torch.log', 'torch.log', (['(1 - alpha)'], {}), '(1 - alpha)\n', (13706, 13717), False, 'import torch\n'), ((4671, 4688), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4686, 4688), False, 'from datetime import datetime\n')]
|
'''Train CIFAR10/100 with PyTorch using standard Contrastive Learning. This script tunes the L2 reg weight of the
final classifier.'''
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import math
import os
import argparse
from models import *
from configs import get_datasets
from evaluate import train_clf, encode_feature_averaging
parser = argparse.ArgumentParser(description='Final evaluation with feature averaging.')
parser.add_argument("--num-workers", type=int, default=2, help='Number of threads for data loaders')
parser.add_argument("--load-from", type=str, default='ckpt.pth', help='File to load from')
parser.add_argument("--num-passes", type=int, default=10, help='Number of passes to average')
parser.add_argument("--reg-lower", type=float, default=-7, help='Minimum log regularization parameter (base 10)')
parser.add_argument("--reg-upper", type=float, default=-3, help='Maximum log regularization parameter (base 10)')
parser.add_argument("--num-steps", type=int, default=10, help='Number of log-linearly spaced reg parameters to try')
args = parser.parse_args()
# Load checkpoint.
print('==> Loading settings from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
resume_from = os.path.join('./checkpoint', args.load_from)
checkpoint = torch.load(resume_from)
args.dataset = checkpoint['args']['dataset']
args.arch = checkpoint['args']['arch']
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Data
print('==> Preparing data..')
_, testset, clftrainset, num_classes, stem = get_datasets(args.dataset, test_as_train=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=1000, shuffle=False, num_workers=args.num_workers,
pin_memory=True)
clftrainloader = torch.utils.data.DataLoader(clftrainset, batch_size=1000, shuffle=False, num_workers=args.num_workers,
pin_memory=True)
# Model
print('==> Building model..')
##############################################################
# Encoder
##############################################################
if args.arch == 'resnet18':
net = ResNet18(stem=stem)
elif args.arch == 'resnet34':
net = ResNet34(stem=stem)
elif args.arch == 'resnet50':
net = ResNet50(stem=stem)
else:
raise ValueError("Bad architecture specification")
net = net.to(device)
if device == 'cuda':
repr_dim = net.representation_dim
net = torch.nn.DataParallel(net)
net.representation_dim = repr_dim
cudnn.benchmark = True
print('==> Loading encoder from checkpoint..')
net.load_state_dict(checkpoint['net'])
best_acc = 0
X, y = encode_feature_averaging(clftrainloader, device, net, num_passes=args.num_passes)
X_test, y_test = encode_feature_averaging(testloader, device, net, num_passes=args.num_passes)
for reg_weight in torch.exp(math.log(10) * torch.linspace(args.reg_lower, args.reg_upper, args.num_steps,
dtype=torch.float, device=device)):
clf = train_clf(X, y, net.representation_dim, num_classes, device, reg_weight=reg_weight)
raw_scores = clf(X_test)
_, predicted = raw_scores.max(1)
correct = predicted.eq(y_test).sum().item()
acc = 100 * correct / predicted.shape[0]
print('Test accuracy', acc, '%')
if acc > best_acc:
best_acc = acc
print("Best test accuracy", best_acc, "%")
|
[
"evaluate.encode_feature_averaging",
"evaluate.train_clf"
] |
[((368, 447), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Final evaluation with feature averaging."""'}), "(description='Final evaluation with feature averaging.')\n", (391, 447), False, 'import argparse\n'), ((1181, 1208), 'os.path.isdir', 'os.path.isdir', (['"""checkpoint"""'], {}), "('checkpoint')\n", (1194, 1208), False, 'import os\n'), ((1264, 1308), 'os.path.join', 'os.path.join', (['"""./checkpoint"""', 'args.load_from'], {}), "('./checkpoint', args.load_from)\n", (1276, 1308), False, 'import os\n'), ((1322, 1345), 'torch.load', 'torch.load', (['resume_from'], {}), '(resume_from)\n', (1332, 1345), False, 'import torch\n'), ((1570, 1616), 'configs.get_datasets', 'get_datasets', (['args.dataset'], {'test_as_train': '(True)'}), '(args.dataset, test_as_train=True)\n', (1582, 1616), False, 'from configs import get_datasets\n'), ((1631, 1750), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(testset, batch_size=1000, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n', (1658, 1750), False, 'import torch\n'), ((1805, 1928), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['clftrainset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(clftrainset, batch_size=1000, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n', (1832, 1928), False, 'import torch\n'), ((2676, 2762), 'evaluate.encode_feature_averaging', 'encode_feature_averaging', (['clftrainloader', 'device', 'net'], {'num_passes': 'args.num_passes'}), '(clftrainloader, device, net, num_passes=args.\n num_passes)\n', (2700, 2762), False, 'from evaluate import train_clf, encode_feature_averaging\n'), ((2775, 2852), 'evaluate.encode_feature_averaging', 'encode_feature_averaging', (['testloader', 'device', 'net'], {'num_passes': 'args.num_passes'}), '(testloader, device, net, num_passes=args.num_passes)\n', (2799, 2852), False, 'from evaluate import train_clf, encode_feature_averaging\n'), ((1450, 1475), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1473, 1475), False, 'import torch\n'), ((2475, 2501), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (2496, 2501), False, 'import torch\n'), ((3063, 3151), 'evaluate.train_clf', 'train_clf', (['X', 'y', 'net.representation_dim', 'num_classes', 'device'], {'reg_weight': 'reg_weight'}), '(X, y, net.representation_dim, num_classes, device, reg_weight=\n reg_weight)\n', (3072, 3151), False, 'from evaluate import train_clf, encode_feature_averaging\n'), ((2881, 2893), 'math.log', 'math.log', (['(10)'], {}), '(10)\n', (2889, 2893), False, 'import math\n'), ((2896, 2997), 'torch.linspace', 'torch.linspace', (['args.reg_lower', 'args.reg_upper', 'args.num_steps'], {'dtype': 'torch.float', 'device': 'device'}), '(args.reg_lower, args.reg_upper, args.num_steps, dtype=torch.\n float, device=device)\n', (2910, 2997), False, 'import torch\n')]
|
import os, sys
root_path = os.path.realpath(__file__).split('/evaluate/multipose_coco_eval.py')[0]
os.chdir(root_path)
sys.path.append(root_path)
from network.posenet import poseNet
from evaluate.tester import Tester
backbone = 'resnet101'
# Set Training parameters
params = Tester.TestParams()
params.subnet_name = 'both'
params.inp_size = 480 # input picture size = (inp_size, inp_size)
params.coeff = 2
params.in_thres = 0.21
params.coco_root = '/mnt/hdd10tb/Datasets/COCO2017/'
params.testresult_write_json = True # Whether to write json result
params.coco_result_filename = './demo/multipose_coco2017_results.json'
params.ckpt = '/home/vietnguyen/MultiPoseNet/extra/models/res50_detection_subnet/ckpt_39_0.59604.h5.best'
# model
model = poseNet(backbone)
for name, module in model.named_children():
for para in module.parameters():
para.requires_grad = False
tester = Tester(model, params)
tester.coco_eval() # pic_test
|
[
"evaluate.tester.Tester",
"evaluate.tester.Tester.TestParams"
] |
[((99, 118), 'os.chdir', 'os.chdir', (['root_path'], {}), '(root_path)\n', (107, 118), False, 'import os, sys\n'), ((119, 145), 'sys.path.append', 'sys.path.append', (['root_path'], {}), '(root_path)\n', (134, 145), False, 'import os, sys\n'), ((278, 297), 'evaluate.tester.Tester.TestParams', 'Tester.TestParams', ([], {}), '()\n', (295, 297), False, 'from evaluate.tester import Tester\n'), ((748, 765), 'network.posenet.poseNet', 'poseNet', (['backbone'], {}), '(backbone)\n', (755, 765), False, 'from network.posenet import poseNet\n'), ((893, 914), 'evaluate.tester.Tester', 'Tester', (['model', 'params'], {}), '(model, params)\n', (899, 914), False, 'from evaluate.tester import Tester\n'), ((27, 53), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (43, 53), False, 'import os, sys\n')]
|
import torch
from torch import nn
from Fashion_Mnist import load_data_fashion_mnist
from evaluate import Accumulator, accurate_num, evaluate_accuracy
net = nn.Sequential(nn.Flatten(),
nn.Linear(784,512), nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 256), nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 128), nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 64), nn.BatchNorm1d(64),
nn.ReLU(),
nn.Linear(64, 32), nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32,10))
def init_weights(m):
if type(m) == nn.Linear:
nn.init.kaiming_uniform_(m.weight)
net.apply(init_weights)
batch_size, lr, num_epochs = 256, 0.2, 10
loss = nn.CrossEntropyLoss()
trainer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.75)
train_iter, test_iter = load_data_fashion_mnist(batch_size)
def train_epoch(net, train_iter, loss, updater):
if isinstance(net, torch.nn.Module):
net.train()
metric = Accumulator(3)
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
updater.zero_grad()
l.sum().backward()
updater.step()
else:
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accurate_num(y_hat, y), y.numel())
return metric[0] / metric[2], metric[1] / metric[2]
def train(net, train_iter, test_iter, loss, num_epochs, updater):
for epoch in range(num_epochs):
train_loss, train_acc = train_epoch(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
print(f'epoch{epoch+1}: train loss: {train_loss:.5f} train acc: {train_acc:.2%} test acc: {test_acc:.2%}')
train(net, train_iter, test_iter, loss, num_epochs, trainer)
|
[
"evaluate.Accumulator",
"evaluate.evaluate_accuracy",
"evaluate.accurate_num"
] |
[((874, 895), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (893, 895), False, 'from torch import nn\n'), ((988, 1023), 'Fashion_Mnist.load_data_fashion_mnist', 'load_data_fashion_mnist', (['batch_size'], {}), '(batch_size)\n', (1011, 1023), False, 'from Fashion_Mnist import load_data_fashion_mnist\n'), ((176, 188), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (186, 188), False, 'from torch import nn\n'), ((211, 230), 'torch.nn.Linear', 'nn.Linear', (['(784)', '(512)'], {}), '(784, 512)\n', (220, 230), False, 'from torch import nn\n'), ((231, 250), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (245, 250), False, 'from torch import nn\n'), ((273, 282), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (280, 282), False, 'from torch import nn\n'), ((305, 324), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (314, 324), False, 'from torch import nn\n'), ((326, 345), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (340, 345), False, 'from torch import nn\n'), ((368, 377), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (375, 377), False, 'from torch import nn\n'), ((400, 419), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (409, 419), False, 'from torch import nn\n'), ((421, 440), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (435, 440), False, 'from torch import nn\n'), ((463, 472), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (470, 472), False, 'from torch import nn\n'), ((495, 513), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (504, 513), False, 'from torch import nn\n'), ((515, 533), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (529, 533), False, 'from torch import nn\n'), ((556, 565), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (563, 565), False, 'from torch import nn\n'), ((588, 605), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (597, 605), False, 'from torch import nn\n'), ((607, 625), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(32)'], {}), '(32)\n', (621, 625), False, 'from torch import nn\n'), ((648, 657), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (655, 657), False, 'from torch import nn\n'), ((680, 697), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(10)'], {}), '(32, 10)\n', (689, 697), False, 'from torch import nn\n'), ((1153, 1167), 'evaluate.Accumulator', 'Accumulator', (['(3)'], {}), '(3)\n', (1164, 1167), False, 'from evaluate import Accumulator, accurate_num, evaluate_accuracy\n'), ((759, 793), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['m.weight'], {}), '(m.weight)\n', (783, 793), False, 'from torch import nn\n'), ((1809, 1842), 'evaluate.evaluate_accuracy', 'evaluate_accuracy', (['net', 'test_iter'], {}), '(net, test_iter)\n', (1826, 1842), False, 'from evaluate import Accumulator, accurate_num, evaluate_accuracy\n'), ((1514, 1536), 'evaluate.accurate_num', 'accurate_num', (['y_hat', 'y'], {}), '(y_hat, y)\n', (1526, 1536), False, 'from evaluate import Accumulator, accurate_num, evaluate_accuracy\n')]
|
import argparse, os
import matplotlib
matplotlib.use('Agg')
import torch
from evaluate import evaluate_synthesis, evaluate_projection
import numpy as np
from synth.synthesize import create_synth
from utils.data import get_external_sounds
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default="/fast-1/philippe/flow_results_final/32par/models/vae_flow_mel_mse_cnn_mlp_iaf_1.model", help='')
parser.add_argument('--real_output', type=str, default="/fast-1/naotake", help='')
parser.add_argument('--batch_evals', type=int, default=16, help='')
parser.add_argument('--epochs', type=int, default=200, help='')
parser.add_argument('--device', type=str, default='cpu', help='Device for CUDA')
parser.add_argument('--project', action="store_true", help='')
parser.add_argument('--dataset', type=str, default='32par', help='')
parser.add_argument('--n_classes', type=int, default=32, help='')
parser.add_argument('--batch_out', type=int, default=3, help='')
parser.add_argument('--test_sounds', type=str, default='/fast-2/datasets/flow_synth_test', help='')
parser.add_argument('--nbworkers', type=int, default=0, help='')
args = parser.parse_args()
args.output = os.path.split(args.model_path)[0] #.../models
args.output = os.path.split(args.output)[0]
args.synthesize=True
model_name = os.path.splitext(os.path.basename(args.model_path))[0]
print
args.base_model = args.real_output + '/models/' + model_name
model_pars = model_name.split("_")
if model_pars[0]+model_pars[1] in ["vaeflow", "gatedcnn", "gatedmlp", "rescnn"]:
args.model = model_pars[0] + "_" + model_pars[1]
idx = 2
else:
args.model = model_pars[0]
idx = 1
if model_pars[idx+1] == "mfcc": #mel_mfcc
args.data="mel_mfcc"
idx += 1
else:
args.data = model_pars[idx]
args.loss = model_pars[idx+1]
base_img = '{0}/images/{1}_re'.format(args.real_output, model_name)
base_audio = '{0}/audio/{1}_re'.format(args.real_output, model_name)
args.cuda = not args.device == 'cpu' and torch.cuda.is_available()
args.device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
args.model_name, args.base_img, args.base_audio = model_name, base_img, base_audio
ref_split = '/fast-2/datasets/diva_dataset' + '/reference_split_' + args.dataset+ "_" +args.data + '.npz'
data = np.load(ref_split)['arr_0']
train_loader, valid_loader, test_loader = data[0], data[1], data[2]
args.batch_size = test_loader.batch_size
args.output_size = train_loader.dataset.output_size
args.input_size = train_loader.dataset.input_size
model = torch.load(args.model_path, map_location=args.device)
model.to(args.device)
args.engine, args.generator, args.param_defaults, args.rev_idx = create_synth(args.dataset)
if not args.project:
evaluate_synthesis(model, test_loader, args, train=False)
else:
test_sounds = get_external_sounds(args.test_sounds, test_loader.dataset, args)
evaluate_projection(model, test_sounds, args, train=False)
|
[
"evaluate.evaluate_projection",
"evaluate.evaluate_synthesis"
] |
[((38, 59), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (52, 59), False, 'import matplotlib\n'), ((247, 272), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (270, 272), False, 'import argparse, os\n'), ((2635, 2688), 'torch.load', 'torch.load', (['args.model_path'], {'map_location': 'args.device'}), '(args.model_path, map_location=args.device)\n', (2645, 2688), False, 'import torch\n'), ((2776, 2802), 'synth.synthesize.create_synth', 'create_synth', (['args.dataset'], {}), '(args.dataset)\n', (2788, 2802), False, 'from synth.synthesize import create_synth\n'), ((1279, 1309), 'os.path.split', 'os.path.split', (['args.model_path'], {}), '(args.model_path)\n', (1292, 1309), False, 'import argparse, os\n'), ((1339, 1365), 'os.path.split', 'os.path.split', (['args.output'], {}), '(args.output)\n', (1352, 1365), False, 'import argparse, os\n'), ((2084, 2109), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2107, 2109), False, 'import torch\n'), ((2388, 2406), 'numpy.load', 'np.load', (['ref_split'], {}), '(ref_split)\n', (2395, 2406), True, 'import numpy as np\n'), ((2828, 2885), 'evaluate.evaluate_synthesis', 'evaluate_synthesis', (['model', 'test_loader', 'args'], {'train': '(False)'}), '(model, test_loader, args, train=False)\n', (2846, 2885), False, 'from evaluate import evaluate_synthesis, evaluate_projection\n'), ((2910, 2974), 'utils.data.get_external_sounds', 'get_external_sounds', (['args.test_sounds', 'test_loader.dataset', 'args'], {}), '(args.test_sounds, test_loader.dataset, args)\n', (2929, 2974), False, 'from utils.data import get_external_sounds\n'), ((2979, 3037), 'evaluate.evaluate_projection', 'evaluate_projection', (['model', 'test_sounds', 'args'], {'train': '(False)'}), '(model, test_sounds, args, train=False)\n', (2998, 3037), False, 'from evaluate import evaluate_synthesis, evaluate_projection\n'), ((1421, 1454), 'os.path.basename', 'os.path.basename', (['args.model_path'], {}), '(args.model_path)\n', (1437, 1454), False, 'import argparse, os\n'), ((2152, 2177), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2175, 2177), False, 'import torch\n')]
|
import evaluate
import pandas as pd
import sys
import glob
sys.path.append('../gopher')
import utils
import numpy as np
import json
def get_runs(glob_pattern):
bin_run = {}
for run_dir in glob.glob(glob_pattern):
config = utils.get_config(run_dir)
if config['loss_fn']['value'] == 'poisson':
bin_run[config['bin_size']['value']] = run_dir
return bin_run
result_base_dir = utils.make_dir('inter_results')
# get datasets
testset, targets = utils.collect_whole_testset('../datasets/quantitative_data/testset/')
model_run_pattern = {'binloss_basenji_v2': '../trained_models/basenji_v2/binloss_basenji_v2/run*',
'bpnet_bin_loss_40': '../trained_models/bpnet/bin_loss_40/run*'}
for model_label, run_pattern in model_run_pattern.items():
bin_run = get_runs(run_pattern)
# get performance metrics for various evaluation bin sizes
result_path = result_base_dir + '/{}_triangle_plot.txt'.format(model_label)
bin_sizes = sorted(list(bin_run.keys()))
performance_per_resolution = []
for raw_bin_size in bin_sizes:
model, _ = utils.read_model(bin_run[raw_bin_size])
all_true, all_pred = utils.get_true_pred(model, raw_bin_size, testset)
for eval_bin_size in bin_sizes:
if eval_bin_size >= raw_bin_size:
print(raw_bin_size, '--->', eval_bin_size)
true_for_eval = evaluate.change_resolution(all_true, raw_bin_size, eval_bin_size)
pred_for_eval = evaluate.change_resolution(all_pred, raw_bin_size, eval_bin_size)
performance = evaluate.get_performance(true_for_eval, pred_for_eval, targets, 'whole')
performance_per_resolution.append([raw_bin_size, eval_bin_size] + list(performance.mean().values))
metric = 'pr_corr'
label = '<NAME>'
sorted_personr = pd.DataFrame(performance_per_resolution,
columns=['train', 'eval'] + list(performance.columns[:-1].values)).sort_values(
['train', 'eval'])[['train', 'eval', metric]]
padded_values = []
for train_bin, df in sorted_personr.groupby('train'):
pr_values = list(df[metric].values)
add_N = len(bin_sizes) - len(pr_values)
if add_N > 0:
pr_values = [np.nan for n in range(add_N)] + pr_values
padded_values.append(pr_values)
with open(result_path, 'w') as f:
f.write(json.dumps(padded_values))
|
[
"evaluate.change_resolution",
"evaluate.get_performance"
] |
[((59, 87), 'sys.path.append', 'sys.path.append', (['"""../gopher"""'], {}), "('../gopher')\n", (74, 87), False, 'import sys\n'), ((415, 446), 'utils.make_dir', 'utils.make_dir', (['"""inter_results"""'], {}), "('inter_results')\n", (429, 446), False, 'import utils\n'), ((481, 550), 'utils.collect_whole_testset', 'utils.collect_whole_testset', (['"""../datasets/quantitative_data/testset/"""'], {}), "('../datasets/quantitative_data/testset/')\n", (508, 550), False, 'import utils\n'), ((198, 221), 'glob.glob', 'glob.glob', (['glob_pattern'], {}), '(glob_pattern)\n', (207, 221), False, 'import glob\n'), ((240, 265), 'utils.get_config', 'utils.get_config', (['run_dir'], {}), '(run_dir)\n', (256, 265), False, 'import utils\n'), ((1110, 1149), 'utils.read_model', 'utils.read_model', (['bin_run[raw_bin_size]'], {}), '(bin_run[raw_bin_size])\n', (1126, 1149), False, 'import utils\n'), ((1179, 1228), 'utils.get_true_pred', 'utils.get_true_pred', (['model', 'raw_bin_size', 'testset'], {}), '(model, raw_bin_size, testset)\n', (1198, 1228), False, 'import utils\n'), ((2419, 2444), 'json.dumps', 'json.dumps', (['padded_values'], {}), '(padded_values)\n', (2429, 2444), False, 'import json\n'), ((1406, 1471), 'evaluate.change_resolution', 'evaluate.change_resolution', (['all_true', 'raw_bin_size', 'eval_bin_size'], {}), '(all_true, raw_bin_size, eval_bin_size)\n', (1432, 1471), False, 'import evaluate\n'), ((1504, 1569), 'evaluate.change_resolution', 'evaluate.change_resolution', (['all_pred', 'raw_bin_size', 'eval_bin_size'], {}), '(all_pred, raw_bin_size, eval_bin_size)\n', (1530, 1569), False, 'import evaluate\n'), ((1600, 1672), 'evaluate.get_performance', 'evaluate.get_performance', (['true_for_eval', 'pred_for_eval', 'targets', '"""whole"""'], {}), "(true_for_eval, pred_for_eval, targets, 'whole')\n", (1624, 1672), False, 'import evaluate\n')]
|
import logging
import numpy as np
import torch
from torch import nn
from anchor_based import anchor_helper
from anchor_based.dsnet import DSNet
from anchor_based.losses import calc_cls_loss, calc_loc_loss
from evaluate import evaluate
from helpers import data_helper, vsumm_helper, bbox_helper
logger = logging.getLogger()
def xavier_init(module):
cls_name = module.__class__.__name__
if 'Linear' in cls_name or 'Conv' in cls_name:
nn.init.xavier_uniform_(module.weight, gain=np.sqrt(2.0))
if module.bias is not None:
nn.init.constant_(module.bias, 0.1)
def train(args, split, save_path):
model = DSNet(base_model=args.base_model, num_feature=args.num_feature,
num_hidden=args.num_hidden, anchor_scales=args.anchor_scales,
num_head=args.num_head)
model = model.to(args.device)
model.apply(xavier_init)
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(parameters, lr=args.lr,
weight_decay=args.weight_decay)
max_val_fscore = -1
train_set = data_helper.VideoDataset(split['train_keys'])
train_loader = data_helper.DataLoader(train_set, shuffle=True)
val_set = data_helper.VideoDataset(split['test_keys'])
val_loader = data_helper.DataLoader(val_set, shuffle=False)
for epoch in range(args.max_epoch):
model.train()
stats = data_helper.AverageMeter('loss', 'cls_loss', 'loc_loss')
for _, seq, gtscore, cps, n_frames, nfps, picks, _ in train_loader:
keyshot_summ = vsumm_helper.get_keyshot_summ(
gtscore, cps, n_frames, nfps, picks)
target = vsumm_helper.downsample_summ(keyshot_summ)
if not target.any():
continue
target_bboxes = bbox_helper.seq2bbox(target)
target_bboxes = bbox_helper.lr2cw(target_bboxes)
anchors = anchor_helper.get_anchors(target.size, args.anchor_scales)
# Get class and location label for positive samples
cls_label, loc_label = anchor_helper.get_pos_label(
anchors, target_bboxes, args.pos_iou_thresh)
# Get negative samples
num_pos = cls_label.sum()
cls_label_neg, _ = anchor_helper.get_pos_label(
anchors, target_bboxes, args.neg_iou_thresh)
cls_label_neg = anchor_helper.get_neg_label(
cls_label_neg, int(args.neg_sample_ratio * num_pos))
# Get incomplete samples
cls_label_incomplete, _ = anchor_helper.get_pos_label(
anchors, target_bboxes, args.incomplete_iou_thresh)
cls_label_incomplete[cls_label_neg != 1] = 1
cls_label_incomplete = anchor_helper.get_neg_label(
cls_label_incomplete,
int(args.incomplete_sample_ratio * num_pos))
cls_label[cls_label_neg == -1] = -1
cls_label[cls_label_incomplete == -1] = -1
cls_label = torch.tensor(cls_label, dtype=torch.float32).to(args.device)
loc_label = torch.tensor(loc_label, dtype=torch.float32).to(args.device)
seq = torch.tensor(seq, dtype=torch.float32).unsqueeze(0).to(args.device)
pred_cls, pred_loc = model(seq)
loc_loss = calc_loc_loss(pred_loc, loc_label, cls_label)
cls_loss = calc_cls_loss(pred_cls, cls_label)
loss = cls_loss + args.lambda_reg * loc_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
stats.update(loss=loss.item(), cls_loss=cls_loss.item(),
loc_loss=loc_loss.item())
val_fscore, _ = evaluate(model, val_loader, args.nms_thresh, args.device)
if max_val_fscore < val_fscore:
max_val_fscore = val_fscore
torch.save(model.state_dict(), str(save_path))
logger.info(f'Epoch: {epoch}/{args.max_epoch} '
f'Loss: {stats.cls_loss:.4f}/{stats.loc_loss:.4f}/{stats.loss:.4f} '
f'F-score cur/max: {val_fscore:.4f}/{max_val_fscore:.4f}')
return max_val_fscore
|
[
"evaluate.evaluate"
] |
[((306, 325), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (323, 325), False, 'import logging\n'), ((644, 798), 'anchor_based.dsnet.DSNet', 'DSNet', ([], {'base_model': 'args.base_model', 'num_feature': 'args.num_feature', 'num_hidden': 'args.num_hidden', 'anchor_scales': 'args.anchor_scales', 'num_head': 'args.num_head'}), '(base_model=args.base_model, num_feature=args.num_feature, num_hidden=\n args.num_hidden, anchor_scales=args.anchor_scales, num_head=args.num_head)\n', (649, 798), False, 'from anchor_based.dsnet import DSNet\n'), ((979, 1051), 'torch.optim.Adam', 'torch.optim.Adam', (['parameters'], {'lr': 'args.lr', 'weight_decay': 'args.weight_decay'}), '(parameters, lr=args.lr, weight_decay=args.weight_decay)\n', (995, 1051), False, 'import torch\n'), ((1127, 1172), 'helpers.data_helper.VideoDataset', 'data_helper.VideoDataset', (["split['train_keys']"], {}), "(split['train_keys'])\n", (1151, 1172), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1192, 1239), 'helpers.data_helper.DataLoader', 'data_helper.DataLoader', (['train_set'], {'shuffle': '(True)'}), '(train_set, shuffle=True)\n', (1214, 1239), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1255, 1299), 'helpers.data_helper.VideoDataset', 'data_helper.VideoDataset', (["split['test_keys']"], {}), "(split['test_keys'])\n", (1279, 1299), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1317, 1363), 'helpers.data_helper.DataLoader', 'data_helper.DataLoader', (['val_set'], {'shuffle': '(False)'}), '(val_set, shuffle=False)\n', (1339, 1363), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1443, 1499), 'helpers.data_helper.AverageMeter', 'data_helper.AverageMeter', (['"""loss"""', '"""cls_loss"""', '"""loc_loss"""'], {}), "('loss', 'cls_loss', 'loc_loss')\n", (1467, 1499), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((3745, 3802), 'evaluate.evaluate', 'evaluate', (['model', 'val_loader', 'args.nms_thresh', 'args.device'], {}), '(model, val_loader, args.nms_thresh, args.device)\n', (3753, 3802), False, 'from evaluate import evaluate\n'), ((559, 594), 'torch.nn.init.constant_', 'nn.init.constant_', (['module.bias', '(0.1)'], {}), '(module.bias, 0.1)\n', (576, 594), False, 'from torch import nn\n'), ((1604, 1670), 'helpers.vsumm_helper.get_keyshot_summ', 'vsumm_helper.get_keyshot_summ', (['gtscore', 'cps', 'n_frames', 'nfps', 'picks'], {}), '(gtscore, cps, n_frames, nfps, picks)\n', (1633, 1670), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1709, 1751), 'helpers.vsumm_helper.downsample_summ', 'vsumm_helper.downsample_summ', (['keyshot_summ'], {}), '(keyshot_summ)\n', (1737, 1751), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1840, 1868), 'helpers.bbox_helper.seq2bbox', 'bbox_helper.seq2bbox', (['target'], {}), '(target)\n', (1860, 1868), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1897, 1929), 'helpers.bbox_helper.lr2cw', 'bbox_helper.lr2cw', (['target_bboxes'], {}), '(target_bboxes)\n', (1914, 1929), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1952, 2010), 'anchor_based.anchor_helper.get_anchors', 'anchor_helper.get_anchors', (['target.size', 'args.anchor_scales'], {}), '(target.size, args.anchor_scales)\n', (1977, 2010), False, 'from anchor_based import anchor_helper\n'), ((2110, 2182), 'anchor_based.anchor_helper.get_pos_label', 'anchor_helper.get_pos_label', (['anchors', 'target_bboxes', 'args.pos_iou_thresh'], {}), '(anchors, target_bboxes, args.pos_iou_thresh)\n', (2137, 2182), False, 'from anchor_based import anchor_helper\n'), ((2305, 2377), 'anchor_based.anchor_helper.get_pos_label', 'anchor_helper.get_pos_label', (['anchors', 'target_bboxes', 'args.neg_iou_thresh'], {}), '(anchors, target_bboxes, args.neg_iou_thresh)\n', (2332, 2377), False, 'from anchor_based import anchor_helper\n'), ((2597, 2676), 'anchor_based.anchor_helper.get_pos_label', 'anchor_helper.get_pos_label', (['anchors', 'target_bboxes', 'args.incomplete_iou_thresh'], {}), '(anchors, target_bboxes, args.incomplete_iou_thresh)\n', (2624, 2676), False, 'from anchor_based import anchor_helper\n'), ((3345, 3390), 'anchor_based.losses.calc_loc_loss', 'calc_loc_loss', (['pred_loc', 'loc_label', 'cls_label'], {}), '(pred_loc, loc_label, cls_label)\n', (3358, 3390), False, 'from anchor_based.losses import calc_cls_loss, calc_loc_loss\n'), ((3414, 3448), 'anchor_based.losses.calc_cls_loss', 'calc_cls_loss', (['pred_cls', 'cls_label'], {}), '(pred_cls, cls_label)\n', (3427, 3448), False, 'from anchor_based.losses import calc_cls_loss, calc_loc_loss\n'), ((497, 509), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (504, 509), True, 'import numpy as np\n'), ((3043, 3087), 'torch.tensor', 'torch.tensor', (['cls_label'], {'dtype': 'torch.float32'}), '(cls_label, dtype=torch.float32)\n', (3055, 3087), False, 'import torch\n'), ((3128, 3172), 'torch.tensor', 'torch.tensor', (['loc_label'], {'dtype': 'torch.float32'}), '(loc_label, dtype=torch.float32)\n', (3140, 3172), False, 'import torch\n'), ((3208, 3246), 'torch.tensor', 'torch.tensor', (['seq'], {'dtype': 'torch.float32'}), '(seq, dtype=torch.float32)\n', (3220, 3246), False, 'import torch\n')]
|
import chess
import chess.pgn
from evaluate import Evaluator
from weightsHandler import WeightsHandler
import config
def main():
# Open PGN file with games database
gamesFile = open(config.GAMES_FILE_NAME)
# Initialize counter
gamesCounter = 0
# Initialize move selection module
# moveSelector = MoveSelector(config.MAX_ITER_MTD, config.MAX_DEPTH, config.MAX_SCORE)
evaluator = Evaluator(config.MAX_ITER_MTD,
config.MAX_DEPTH, config.MAX_SCORE)
# Initialize weight-handling module
weightsHandler = WeightsHandler("weights.py")
# Initialize learning rate
learningRate = config.ALPHA_INIT
current_game_num = weightsHandler.getGameNum()
# Loop over recorded games in file until counter reaches limit
while gamesCounter < config.MAX_GAMES + current_game_num:
# Get a game
game = chess.pgn.read_game(gamesFile)
if gamesCounter < current_game_num:
gamesCounter += 1
continue
try:
game.variation(0)
except KeyError:
continue
if not game:
break
# Find the winner
whitePnt = game.headers["Result"]
if whitePnt == "1-0":
winColor = chess.WHITE
elif whitePnt == "0-1":
winColor = chess.BLACK
else:
continue
print("\nGame ", gamesCounter + 1)
# Clear transposition table
evaluator.clearTranspositionTable()
# Play as both black and white
for color in range(2):
# Use local copy of game
state = game
# Get board object from game
board = state.board()
# Initialize list of board and board scores
scores = []
boards = [board.copy()]
# Initialize features list
featuresInit = []
featuresFinal = []
# Initialize turn counter
turnCounter = 0
if color:
print("White")
else:
print("Black")
# Loop through game, move by move
while not state.is_end():
# Get next board position
state = state.variation(0)
board = state.board()
# If computer's turn to move
if board.turn == color:
# Get score of board and position that computer aims to reach
_, score, finalBoard = evaluator.selectMove(board)
# Store score, finalBoard and features of finalBoard
scores.append(score)
boards.append(finalBoard)
fI, fF = evaluator.findFeatures(finalBoard, color)
featuresInit.append(fI)
featuresFinal.append(fF)
turnCounter = turnCounter + 1
print("Turn ", turnCounter, '\r', end='')
print('\n', end='')
# Depending on winner, store final score
if winColor == color:
scores.append(config.MAX_SCORE)
else:
scores.append(-config.MAX_SCORE)
# Learn weights
initPosWeights, finalPosWeights = weightsHandler.getWeights()
initPosWeights, finalPosWeights = learn(
initPosWeights,
finalPosWeights,
featuresInit,
featuresFinal,
scores,
learningRate,
config.LAMBDA,
config.MAX_POSITION_SCORE
)
# Store weights
weightsHandler.setWeights(initPosWeights, finalPosWeights)
weightsHandler.setGameNum(gamesCounter)
weightsHandler.writeWeightsToFile()
# Decrease learning rate
learningRate /= config.ALPHA_DEC_FACTOR
# Debug info
# print scores
# print featuresInit[10]
# print featuresFinal[10]
# print moveSelector._transTable.hits
# print moveSelector._transTable.notHits
# print moveSelector._transTable.size
# print initPosWeights
# Done learning from one game, so increment game counter
gamesCounter = gamesCounter + 1
# Close file handlers when learning is complete
weightsHandler.closeWeightsFile()
gamesFile.close()
def learn(wRawInit, wRawFin, fInit, fFinal, J, alpha, lambdaDecay, clampVal):
wInit = []
wFin = []
sizeJ = len(J)
# Unrolling parameters into vector
for j in range(6):
for i in range(64):
wInit.append(wRawInit[j][i])
wFin.append(wRawFin[j][i])
sizeW = len(wInit)
# Calculate update amount (with sign) for parameters
updateMagInit = [0 for i in range(sizeW)]
updateMagFinal = [0 for i in range(sizeW)]
for t in range(sizeJ - 1):
propTempDiff = 0 # Propagated temporal difference
for j in range(t, sizeJ - 1):
propTempDiff += lambdaDecay**(j - t) * (J[j + 1] - J[j])
updateMagInit = [updateMagInit[i] +
(propTempDiff * fInit[t][i]) for i in range(sizeW)]
updateMagFinal = [updateMagFinal[i] +
(propTempDiff * fFinal[t][i]) for i in range(sizeW)]
# Update parameters
for i in range(len(wInit)):
wInit[i] += alpha * updateMagInit[i]
wFin[i] += alpha * updateMagFinal[i]
# Rolling parameter vector
wRawInit = [[max(min(int(round(wInit[i + 64*j])), clampVal), -clampVal)
for i in range(0, 64)] for j in range(0, 6)]
wRawFin = [[max(min(int(round(wFin[i + 64*j])), clampVal), -clampVal)
for i in range(0, 64)] for j in range(0, 6)]
# print(wRawInit, wRawFin)
# Return final weights
return (wRawInit, wRawFin)
if __name__ == "__main__":
main()
|
[
"evaluate.Evaluator"
] |
[((409, 475), 'evaluate.Evaluator', 'Evaluator', (['config.MAX_ITER_MTD', 'config.MAX_DEPTH', 'config.MAX_SCORE'], {}), '(config.MAX_ITER_MTD, config.MAX_DEPTH, config.MAX_SCORE)\n', (418, 475), False, 'from evaluate import Evaluator\n'), ((563, 591), 'weightsHandler.WeightsHandler', 'WeightsHandler', (['"""weights.py"""'], {}), "('weights.py')\n", (577, 591), False, 'from weightsHandler import WeightsHandler\n'), ((878, 908), 'chess.pgn.read_game', 'chess.pgn.read_game', (['gamesFile'], {}), '(gamesFile)\n', (897, 908), False, 'import chess\n')]
|
import evaluate
from formulas import jaccard, ochiai, tarantula, ample, wong1, wong2, wong3, op1, op2, gp_list, gpif, gpasgn, gpcall, gpseq
import math
import sys
def compare_formula(spectra_list, f1, f2):
f1_list = list(map(lambda sp : f1(sp[0], sp[1], sp[2], sp[3]), spectra_list))
f2_list = list(map(lambda sp : f2(sp[0], sp[1], sp[2], sp[3]), spectra_list))
f1_sorted = sorted(f1_list, reverse = True)
f2_sorted = sorted(f2_list, reverse = True)
f1_rank = list(map(lambda x : f1_sorted.index(x), f1_list))
f2_rank = list(map(lambda x : f2_sorted.index(x), f2_list))
return measure_similarity(f1_rank, f2_rank)
def measure_similarity(order1, order2):
res = 0
for (i1, i2) in zip(order1, order2):
res += abs(i1 - i2)
return res
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
if b == 0:
return 1
else:
return a / b
def sqrt(a):
return math.sqrt(abs(a))
def parse_formula(fun_str):
return lambda ep, ef, np, nf : eval(fun_str)
def read_formulas(file_path):
formula_list = []
with open(file_path, 'r') as file:
while True:
line = file.readline()
if '"' not in line:
continue
if not line:
break
fun_str = line.split('"')[1]
formula_list.append(parse_formula(fun_str))
return formula_list
def file_test(formula_path, data_path, write_path):
formula_path = sys.argv[1]
formula_list = read_formulas(formula_path)
data_path = sys.argv[2]
write_path = sys.argv[3]
human_list = [("jaccard", jaccard), ("ochiai", ochiai), ("tarantula", tarantula),
("ample", ample), ("wong1", wong1), ("wong2", wong2), ("wong3", wong3), ("op1", op1), ("op2", op2)]
spectra_list = evaluate.spectra_list([data_path])[0]["spectra_list"]
with open(write_path, 'w') as out:
for i in range(1, 31):
eval_formula = gp_list[i - 1]
formula_name = "gp" + str(i)
sim_list = list(map(lambda f : str(compare_formula(spectra_list, f, eval_formula)), formula_list))
out.write(formula_name + "," + ','.join(sim_list) + '\n')
for name, eval_formula in human_list:
sim_list = list(map(lambda f : str(compare_formula(spectra_list, f, eval_formula)), formula_list))
out.write(name + "," + ','.join(sim_list) + '\n')
def form_test(data_path, write_path):
formula_list = [gpif, gpasgn, gpcall, gpseq]
human_list = [("jaccard", jaccard), ("ochiai", ochiai), ("tarantula", tarantula),
("ample", ample), ("wong1", wong1), ("wong2", wong2), ("wong3", wong3), ("op1", op1), ("op2", op2)]
spectra_list = evaluate.spectra_list([data_path])[0]["spectra_list"]
with open(write_path, 'w') as out:
for i in range(1, 31):
eval_formula = gp_list[i - 1]
formula_name = "gp" + str(i)
sim_list = list(map(lambda f : str(compare_formula(spectra_list, f, eval_formula)), formula_list))
out.write(formula_name + "," + ','.join(sim_list) + '\n')
for name, eval_formula in human_list:
sim_list = list(map(lambda f : str(compare_formula(spectra_list, f, eval_formula)), formula_list))
out.write(name + "," + ','.join(sim_list) + '\n')
if __name__ == "__main__":
if len(sys.argv) == 4:
file_test(sys.argv[1], sys.argv[2], sys.argv[3])
else:
form_test(sys.argv[1], sys.argv[2])
|
[
"evaluate.spectra_list"
] |
[((1864, 1898), 'evaluate.spectra_list', 'evaluate.spectra_list', (['[data_path]'], {}), '([data_path])\n', (1885, 1898), False, 'import evaluate\n'), ((2784, 2818), 'evaluate.spectra_list', 'evaluate.spectra_list', (['[data_path]'], {}), '([data_path])\n', (2805, 2818), False, 'import evaluate\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2018/11/4 15:34
# @Author : QuietWoods
# @FileName: eval_full_model.py
# @Software: PyCharm
""" Evaluate the baselines ont ROUGE/METEOR"""
import argparse
import json
import os
from os.path import join, exists
from evaluate import eval_meteor, eval_rouge
try:
_DATA_DIR = os.environ['DATA']
except KeyError:
print('please use environment variable to specify data directories')
def main(args):
dec_dir = args.decode_file
ref_dir = args.ref_file
assert exists(ref_dir)
if args.rouge:
output = eval_rouge(dec_dir, ref_dir)
metric = 'rouge'
else:
output = eval_meteor(dec_dir, ref_dir)
metric = 'meteor'
print(output)
with open('{}.txt'.format(metric), 'w') as f:
f.write(output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluate the output files for the RL full models')
# choose metric to evaluate
metric_opt = parser.add_mutually_exclusive_group(required=True)
metric_opt.add_argument('--rouge', action='store_true',
help='ROUGE evaluation')
metric_opt.add_argument('--meteor', action='store_true',
help='METEOR evaluation')
parser.add_argument('--decode_file', action='store', required=True,
help='directory of decoded summaries')
parser.add_argument('--ref_file', action='store', required=True,
help='directory of decoded summaries')
args = parser.parse_args()
main(args)
|
[
"evaluate.eval_meteor",
"evaluate.eval_rouge"
] |
[((515, 530), 'os.path.exists', 'exists', (['ref_dir'], {}), '(ref_dir)\n', (521, 530), False, 'from os.path import join, exists\n'), ((839, 931), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate the output files for the RL full models"""'}), "(description=\n 'Evaluate the output files for the RL full models')\n", (862, 931), False, 'import argparse\n'), ((568, 596), 'evaluate.eval_rouge', 'eval_rouge', (['dec_dir', 'ref_dir'], {}), '(dec_dir, ref_dir)\n', (578, 596), False, 'from evaluate import eval_meteor, eval_rouge\n'), ((649, 678), 'evaluate.eval_meteor', 'eval_meteor', (['dec_dir', 'ref_dir'], {}), '(dec_dir, ref_dir)\n', (660, 678), False, 'from evaluate import eval_meteor, eval_rouge\n')]
|
import argparse
import os
import torch
import yaml
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils.model import get_model, get_vocoder, get_param_num
from utils.tools import get_configs_of, to_device, log, synth_one_sample
from model import FastSpeech2Loss
from dataset import Dataset
from evaluate import evaluate
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args, configs):
print("Prepare training ...")
preprocess_config, model_config, train_config = configs
# Get dataset
dataset = Dataset(
"train.txt", preprocess_config, train_config, sort=True, drop_last=True
)
batch_size = train_config["optimizer"]["batch_size"]
group_size = 4 # Set this larger than 1 to enable sorting in Dataset
assert batch_size * group_size < len(dataset)
loader = DataLoader(
dataset,
batch_size=batch_size * group_size,
shuffle=True,
collate_fn=dataset.collate_fn,
)
# Prepare model
model, optimizer = get_model(args, configs, device, train=True)
model = nn.DataParallel(model)
num_param = get_param_num(model)
Loss = FastSpeech2Loss(preprocess_config, model_config).to(device)
print("Number of FastSpeech2 Parameters:", num_param)
# Load vocoder
vocoder = get_vocoder(model_config, device)
# Init logger
for p in train_config["path"].values():
os.makedirs(p, exist_ok=True)
train_log_path = os.path.join(train_config["path"]["log_path"], "train")
val_log_path = os.path.join(train_config["path"]["log_path"], "val")
os.makedirs(train_log_path, exist_ok=True)
os.makedirs(val_log_path, exist_ok=True)
train_logger = SummaryWriter(train_log_path)
val_logger = SummaryWriter(val_log_path)
# Training
step = args.restore_step + 1
epoch = 1
grad_acc_step = train_config["optimizer"]["grad_acc_step"]
grad_clip_thresh = train_config["optimizer"]["grad_clip_thresh"]
total_step = train_config["step"]["total_step"]
log_step = train_config["step"]["log_step"]
save_step = train_config["step"]["save_step"]
synth_step = train_config["step"]["synth_step"]
val_step = train_config["step"]["val_step"]
outer_bar = tqdm(total=total_step, desc="Training", position=0)
outer_bar.n = args.restore_step
outer_bar.update()
while True:
inner_bar = tqdm(total=len(loader), desc="Epoch {}".format(epoch), position=1)
for batchs in loader:
for batch in batchs:
batch = to_device(batch, device)
# Forward
with torch.no_grad():
output = model(*(batch[2:]))
# Cal Loss
losses = Loss(batch, output)
total_loss = losses[0]
# Backward
total_loss = total_loss / grad_acc_step
total_loss.requires_grad_(True)
total_loss.backward()
if step % grad_acc_step == 0:
# Clipping gradients to avoid gradient explosion
nn.utils.clip_grad_norm_(model.parameters(), grad_clip_thresh)
# Update weights
optimizer.step_and_update_lr()
optimizer.zero_grad()
if step % log_step == 0:
losses = [l.item() for l in losses]
message1 = "Step {}/{}, ".format(step, total_step)
message2 = "Total Loss: {:.4f}, Mel Loss: {:.4f}, Mel PostNet Loss: {:.4f}, Pitch Loss: {:.4f}, Energy Loss: {:.4f}, Duration Loss: {:.4f}".format(
*losses
)
with open(os.path.join(train_log_path, "log.txt"), "a") as f:
f.write(message1 + message2 + "\n")
outer_bar.write(message1 + message2)
log(train_logger, step, losses=losses)
if step % synth_step == 0:
fig, wav_reconstruction, wav_prediction, tag = synth_one_sample(
batch,
output,
vocoder,
model_config,
preprocess_config,
)
log(
train_logger,
fig=fig,
tag="Training/step_{}_{}".format(step, tag),
)
sampling_rate = preprocess_config["preprocessing"]["audio"][
"sampling_rate"
]
log(
train_logger,
audio=wav_reconstruction,
sampling_rate=sampling_rate,
tag="Training/step_{}_{}_reconstructed".format(step, tag),
)
log(
train_logger,
audio=wav_prediction,
sampling_rate=sampling_rate,
tag="Training/step_{}_{}_synthesized".format(step, tag),
)
if step % val_step == 0:
model.eval()
message = evaluate(model, step, configs, val_logger, vocoder)
with open(os.path.join(val_log_path, "log.txt"), "a") as f:
f.write(message + "\n")
outer_bar.write(message)
model.train()
if step % save_step == 0:
torch.save(
{
"model": model.module.state_dict(),
"optimizer": optimizer._optimizer.state_dict(),
},
os.path.join(
train_config["path"]["ckpt_path"],
"{}.pth.tar".format(step),
),
)
if step == total_step:
quit()
step += 1
outer_bar.update(1)
inner_bar.update(1)
epoch += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--restore_step", type=int, default=0)
parser.add_argument("--FineTune", type=bool, default=False)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="name of dataset",
)
args = parser.parse_args()
# Read Config
preprocess_config, model_config, train_config = get_configs_of(
args.dataset)
configs = (preprocess_config, model_config, train_config)
main(args, configs)
|
[
"evaluate.evaluate"
] |
[((641, 726), 'dataset.Dataset', 'Dataset', (['"""train.txt"""', 'preprocess_config', 'train_config'], {'sort': '(True)', 'drop_last': '(True)'}), "('train.txt', preprocess_config, train_config, sort=True, drop_last=True\n )\n", (648, 726), False, 'from dataset import Dataset\n'), ((930, 1034), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(batch_size * group_size)', 'shuffle': '(True)', 'collate_fn': 'dataset.collate_fn'}), '(dataset, batch_size=batch_size * group_size, shuffle=True,\n collate_fn=dataset.collate_fn)\n', (940, 1034), False, 'from torch.utils.data import DataLoader\n'), ((1114, 1158), 'utils.model.get_model', 'get_model', (['args', 'configs', 'device'], {'train': '(True)'}), '(args, configs, device, train=True)\n', (1123, 1158), False, 'from utils.model import get_model, get_vocoder, get_param_num\n'), ((1171, 1193), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (1186, 1193), True, 'import torch.nn as nn\n'), ((1210, 1230), 'utils.model.get_param_num', 'get_param_num', (['model'], {}), '(model)\n', (1223, 1230), False, 'from utils.model import get_model, get_vocoder, get_param_num\n'), ((1394, 1427), 'utils.model.get_vocoder', 'get_vocoder', (['model_config', 'device'], {}), '(model_config, device)\n', (1405, 1427), False, 'from utils.model import get_model, get_vocoder, get_param_num\n'), ((1550, 1605), 'os.path.join', 'os.path.join', (["train_config['path']['log_path']", '"""train"""'], {}), "(train_config['path']['log_path'], 'train')\n", (1562, 1605), False, 'import os\n'), ((1625, 1678), 'os.path.join', 'os.path.join', (["train_config['path']['log_path']", '"""val"""'], {}), "(train_config['path']['log_path'], 'val')\n", (1637, 1678), False, 'import os\n'), ((1683, 1725), 'os.makedirs', 'os.makedirs', (['train_log_path'], {'exist_ok': '(True)'}), '(train_log_path, exist_ok=True)\n', (1694, 1725), False, 'import os\n'), ((1730, 1770), 'os.makedirs', 'os.makedirs', (['val_log_path'], {'exist_ok': '(True)'}), '(val_log_path, exist_ok=True)\n', (1741, 1770), False, 'import os\n'), ((1790, 1819), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['train_log_path'], {}), '(train_log_path)\n', (1803, 1819), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1837, 1864), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['val_log_path'], {}), '(val_log_path)\n', (1850, 1864), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2327, 2378), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_step', 'desc': '"""Training"""', 'position': '(0)'}), "(total=total_step, desc='Training', position=0)\n", (2331, 2378), False, 'from tqdm import tqdm\n'), ((6286, 6311), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6309, 6311), False, 'import argparse\n'), ((6666, 6694), 'utils.tools.get_configs_of', 'get_configs_of', (['args.dataset'], {}), '(args.dataset)\n', (6680, 6694), False, 'from utils.tools import get_configs_of, to_device, log, synth_one_sample\n'), ((448, 473), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (471, 473), False, 'import torch\n'), ((1499, 1528), 'os.makedirs', 'os.makedirs', (['p'], {'exist_ok': '(True)'}), '(p, exist_ok=True)\n', (1510, 1528), False, 'import os\n'), ((1242, 1290), 'model.FastSpeech2Loss', 'FastSpeech2Loss', (['preprocess_config', 'model_config'], {}), '(preprocess_config, model_config)\n', (1257, 1290), False, 'from model import FastSpeech2Loss\n'), ((2629, 2653), 'utils.tools.to_device', 'to_device', (['batch', 'device'], {}), '(batch, device)\n', (2638, 2653), False, 'from utils.tools import get_configs_of, to_device, log, synth_one_sample\n'), ((2702, 2717), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2715, 2717), False, 'import torch\n'), ((3992, 4030), 'utils.tools.log', 'log', (['train_logger', 'step'], {'losses': 'losses'}), '(train_logger, step, losses=losses)\n', (3995, 4030), False, 'from utils.tools import get_configs_of, to_device, log, synth_one_sample\n'), ((4142, 4215), 'utils.tools.synth_one_sample', 'synth_one_sample', (['batch', 'output', 'vocoder', 'model_config', 'preprocess_config'], {}), '(batch, output, vocoder, model_config, preprocess_config)\n', (4158, 4215), False, 'from utils.tools import get_configs_of, to_device, log, synth_one_sample\n'), ((5330, 5381), 'evaluate.evaluate', 'evaluate', (['model', 'step', 'configs', 'val_logger', 'vocoder'], {}), '(model, step, configs, val_logger, vocoder)\n', (5338, 5381), False, 'from evaluate import evaluate\n'), ((3801, 3840), 'os.path.join', 'os.path.join', (['train_log_path', '"""log.txt"""'], {}), "(train_log_path, 'log.txt')\n", (3813, 3840), False, 'import os\n'), ((5412, 5449), 'os.path.join', 'os.path.join', (['val_log_path', '"""log.txt"""'], {}), "(val_log_path, 'log.txt')\n", (5424, 5449), False, 'import os\n')]
|
import random
import numpy as np
import os
import logging
import torch
from utilities import get_device, current_utc_time
import pandas as pd
from imp import reload
from data_loader import get_loader, prepare_dataset
from transformers import AdamW, get_linear_schedule_with_warmup
from models import get_model
from trainer import train_model
from evaluate import evaluate_model
import pickle
from datetime import datetime
reload(logging)
# Parameters
model_name = "BERT"
seed = 57
epochs = 15
batch_size = 16
learning_rate = 2e-4
epsilon = 1e-8
golden_1 = pd.read_excel("./data/P1-Golden.xlsx")
SAVE_MODEL = True
output_dir = "./models/"
# Set up log file
current_time = current_utc_time()
logging.basicConfig(
filename=f"{os.getcwd()}/bert-p1.log",
filemode="a",
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
datefmt="%H:%M:%S",
level=logging.INFO,
)
device = get_device()
# Set the seed value all over the place to make this reproducible.
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Prepare dataset
all_input_ids, all_attention_masks, all_labels = prepare_dataset(golden_1)
# Shuffle data and separate evaluation dataset
print("Separating test and train data")
while True:
indices = np.arange(all_input_ids.shape[0])
np.random.shuffle(indices)
all_input_ids = all_input_ids[indices]
all_attention_masks = all_attention_masks[indices]
all_labels = all_labels[indices]
val_labels = all_labels[:50]
# Ensure that we do not have too much bias in validation dataset
bias_ratio = np.count_nonzero(val_labels == 1) / np.count_nonzero(val_labels == 0)
if 0.9 < bias_ratio < 1.1:
val_input_ids = all_input_ids[:50]
val_attention_masks = all_attention_masks[:50]
break
val_dataloader = get_loader(
val_input_ids, val_attention_masks, val_labels, batch_size=batch_size, loader_type="VALIDATE"
)
input_ids = all_input_ids[50:]
attention_masks = all_attention_masks[50:]
labels = all_labels[50:]
logging.info(f"Number of train samples: {len(input_ids)}")
logging.info(f"Number of validation samples: {len(val_input_ids)}")
# Measure the total training time for the whole run.
start_time = datetime.now()
# ========================================
# Training
# ========================================
# Prepare dataloader
train_dataloader = get_loader(input_ids, attention_masks, labels, batch_size=batch_size)
# model
model = get_model(model_name).to(device)
# Optimizer
optimizer = AdamW(
model.parameters(),
lr=learning_rate, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps=epsilon, # args.adam_epsilon - default is 1e-8.
)
# Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples).
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0, # Default value in run_glue.py
num_training_steps=total_steps,
)
model, stats = train_model(
model, train_dataloader, val_dataloader, optimizer, scheduler, seed=seed, epochs=epochs
)
# ========================================
# Evaluation
# ========================================
train_time = (datetime.now() - start_time).total_seconds()
eval_time_start = datetime.now()
eval_report = evaluate_model(model, val_dataloader)
eval_time = (datetime.now() - eval_time_start).total_seconds()
training_stats = {
"train_size": len(labels),
"val_size": len(val_labels),
"training_stats": stats,
"evaluation_report": eval_report,
"train_time": train_time,
"eval_time": eval_time,
}
logging.info(f"Training Stats: \n {training_stats}")
print(f"Evaluation Report: \n {eval_report}")
# Save report
with open("bert-p1.pkl", "wb") as f:
pickle.dump(training_stats, f)
if SAVE_MODEL:
# Create output directory if needed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(f"Saving model to {output_dir}")
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
|
[
"evaluate.evaluate_model"
] |
[((439, 454), 'imp.reload', 'reload', (['logging'], {}), '(logging)\n', (445, 454), False, 'from imp import reload\n'), ((583, 621), 'pandas.read_excel', 'pd.read_excel', (['"""./data/P1-Golden.xlsx"""'], {}), "('./data/P1-Golden.xlsx')\n", (596, 621), True, 'import pandas as pd\n'), ((704, 722), 'utilities.current_utc_time', 'current_utc_time', ([], {}), '()\n', (720, 722), False, 'from utilities import get_device, current_utc_time\n'), ((945, 957), 'utilities.get_device', 'get_device', ([], {}), '()\n', (955, 957), False, 'from utilities import get_device, current_utc_time\n'), ((1027, 1044), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1038, 1044), False, 'import random\n'), ((1046, 1066), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1060, 1066), True, 'import numpy as np\n'), ((1068, 1091), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1085, 1091), False, 'import torch\n'), ((1093, 1125), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1119, 1125), False, 'import torch\n'), ((1197, 1222), 'data_loader.prepare_dataset', 'prepare_dataset', (['golden_1'], {}), '(golden_1)\n', (1212, 1222), False, 'from data_loader import get_loader, prepare_dataset\n'), ((1907, 2017), 'data_loader.get_loader', 'get_loader', (['val_input_ids', 'val_attention_masks', 'val_labels'], {'batch_size': 'batch_size', 'loader_type': '"""VALIDATE"""'}), "(val_input_ids, val_attention_masks, val_labels, batch_size=\n batch_size, loader_type='VALIDATE')\n", (1917, 2017), False, 'from data_loader import get_loader, prepare_dataset\n'), ((2326, 2340), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2338, 2340), False, 'from datetime import datetime\n'), ((2501, 2570), 'data_loader.get_loader', 'get_loader', (['input_ids', 'attention_masks', 'labels'], {'batch_size': 'batch_size'}), '(input_ids, attention_masks, labels, batch_size=batch_size)\n', (2511, 2570), False, 'from data_loader import get_loader, prepare_dataset\n'), ((3084, 3182), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0)', 'num_training_steps': 'total_steps'}), '(optimizer, num_warmup_steps=0,\n num_training_steps=total_steps)\n', (3115, 3182), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((3248, 3352), 'trainer.train_model', 'train_model', (['model', 'train_dataloader', 'val_dataloader', 'optimizer', 'scheduler'], {'seed': 'seed', 'epochs': 'epochs'}), '(model, train_dataloader, val_dataloader, optimizer, scheduler,\n seed=seed, epochs=epochs)\n', (3259, 3352), False, 'from trainer import train_model\n'), ((3554, 3568), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3566, 3568), False, 'from datetime import datetime\n'), ((3584, 3621), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'val_dataloader'], {}), '(model, val_dataloader)\n', (3598, 3621), False, 'from evaluate import evaluate_model\n'), ((3909, 3964), 'logging.info', 'logging.info', (['f"""Training Stats: \n {training_stats}"""'], {}), '(f"""Training Stats: \n {training_stats}""")\n', (3921, 3964), False, 'import logging\n'), ((1342, 1375), 'numpy.arange', 'np.arange', (['all_input_ids.shape[0]'], {}), '(all_input_ids.shape[0])\n', (1351, 1375), True, 'import numpy as np\n'), ((1381, 1407), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1398, 1407), True, 'import numpy as np\n'), ((4069, 4099), 'pickle.dump', 'pickle.dump', (['training_stats', 'f'], {}), '(training_stats, f)\n', (4080, 4099), False, 'import pickle\n'), ((1670, 1703), 'numpy.count_nonzero', 'np.count_nonzero', (['(val_labels == 1)'], {}), '(val_labels == 1)\n', (1686, 1703), True, 'import numpy as np\n'), ((1706, 1739), 'numpy.count_nonzero', 'np.count_nonzero', (['(val_labels == 0)'], {}), '(val_labels == 0)\n', (1722, 1739), True, 'import numpy as np\n'), ((2591, 2612), 'models.get_model', 'get_model', (['model_name'], {}), '(model_name)\n', (2600, 2612), False, 'from models import get_model\n'), ((4171, 4197), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (4185, 4197), False, 'import os\n'), ((4208, 4231), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (4219, 4231), False, 'import os\n'), ((3490, 3504), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3502, 3504), False, 'from datetime import datetime\n'), ((3636, 3650), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3648, 3650), False, 'from datetime import datetime\n'), ((762, 773), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (771, 773), False, 'import os\n')]
|
from pathlib import Path
import random
from fire import Fire
from munch import Munch
import torch
import numpy as np
from config import config, debug_options
from dataset import get_iterator
from utils import wait_for_key, suppress_stdout
from train import train
from evaluate import evaluate
from infer import infer
class Cli:
def __init__(self):
self.defaults = config
self.debug = debug_options
def _default_args(self, **kwargs):
args = self.defaults
if 'debug' in kwargs:
args.update(self.debug)
args.update(kwargs)
args.update(resolve_paths(config))
args.update(fix_seed(args))
args.update(get_device(args))
print(args)
return Munch(args)
def check_dataloader(self, **kwargs):
args = self._default_args(**kwargs)
iters, vocab = get_iterator(args)
for batch in iters['train']:
import ipdb; ipdb.set_trace() # XXX DEBUG
def train(self, **kwargs):
args = self._default_args(**kwargs)
train(args)
wait_for_key()
def evaluate(self, **kwargs):
args = self._default_args(**kwargs)
evaluate(args)
wait_for_key()
def infer(self, **kwargs):
with suppress_stdout():
args = self._default_args(**kwargs)
ans = infer(args)
print(ans)
def resolve_paths(config):
paths = [k for k in config.keys() if k.endswith('_path')]
res = {}
for path in paths:
res[path] = Path(config[path])
return res
def fix_seed(args):
if 'random_seed' not in args:
args['random_seed'] = 0
random.seed(args['random_seed'])
np.random.seed(args['random_seed'])
torch.manual_seed(args['random_seed'])
torch.cuda.manual_seed_all(args['random_seed'])
return args
def get_device(args):
if hasattr(args, 'device'):
device = args.device
else:
device = "cuda" if torch.cuda.is_available() else "cpu"
return {'device': device}
if __name__ == "__main__":
Fire(Cli)
|
[
"evaluate.evaluate"
] |
[((1658, 1690), 'random.seed', 'random.seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (1669, 1690), False, 'import random\n'), ((1695, 1730), 'numpy.random.seed', 'np.random.seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (1709, 1730), True, 'import numpy as np\n'), ((1735, 1773), 'torch.manual_seed', 'torch.manual_seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (1752, 1773), False, 'import torch\n'), ((1778, 1825), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["args['random_seed']"], {}), "(args['random_seed'])\n", (1804, 1825), False, 'import torch\n'), ((2064, 2073), 'fire.Fire', 'Fire', (['Cli'], {}), '(Cli)\n', (2068, 2073), False, 'from fire import Fire\n'), ((739, 750), 'munch.Munch', 'Munch', (['args'], {}), '(args)\n', (744, 750), False, 'from munch import Munch\n'), ((862, 880), 'dataset.get_iterator', 'get_iterator', (['args'], {}), '(args)\n', (874, 880), False, 'from dataset import get_iterator\n'), ((1058, 1069), 'train.train', 'train', (['args'], {}), '(args)\n', (1063, 1069), False, 'from train import train\n'), ((1079, 1093), 'utils.wait_for_key', 'wait_for_key', ([], {}), '()\n', (1091, 1093), False, 'from utils import wait_for_key, suppress_stdout\n'), ((1182, 1196), 'evaluate.evaluate', 'evaluate', (['args'], {}), '(args)\n', (1190, 1196), False, 'from evaluate import evaluate\n'), ((1206, 1220), 'utils.wait_for_key', 'wait_for_key', ([], {}), '()\n', (1218, 1220), False, 'from utils import wait_for_key, suppress_stdout\n'), ((1531, 1549), 'pathlib.Path', 'Path', (['config[path]'], {}), '(config[path])\n', (1535, 1549), False, 'from pathlib import Path\n'), ((943, 959), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (957, 959), False, 'import ipdb\n'), ((1266, 1283), 'utils.suppress_stdout', 'suppress_stdout', ([], {}), '()\n', (1281, 1283), False, 'from utils import wait_for_key, suppress_stdout\n'), ((1352, 1363), 'infer.infer', 'infer', (['args'], {}), '(args)\n', (1357, 1363), False, 'from infer import infer\n'), ((1437, 1450), 'config.config.keys', 'config.keys', ([], {}), '()\n', (1448, 1450), False, 'from config import config, debug_options\n'), ((1964, 1989), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1987, 1989), False, 'import torch\n')]
|
import subprocess as sp
import datetime
import os
from math import isclose
import sys
import pytest
import json
import argparse
sys.path.append("../../../DeepSpeedExamples/BingBertSquad")
import evaluate as eval
squad_dir = "/data/BingBertSquad"
base_dir = "../../../DeepSpeedExamples/BingBertSquad"
script_file_name = "run_squad_deepspeed.sh"
model_file_name = "training_state_checkpoint_162.tar"
eval_file_name = "dev-v1.1.json"
pred_file_name = "predictions.json"
num_gpus = "4"
timeout_sec = 5 * 60 * 60 # 5 hours
eval_version = "1.1"
def create_config_file(tmpdir, zeroenabled=False):
config_dict = {
"train_batch_size": 24,
"train_micro_batch_size_per_gpu": 6,
"steps_per_print": 10,
"optimizer": {
"type": "Adam",
"params": {
"lr": 3e-5,
"weight_decay": 0.0,
"bias_correction": False
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
}
}
config_dict["zero_optimization"] = zeroenabled
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as fd:
json.dump(config_dict, fd)
return config_path
def test_e2e_squad_deepspeed_base(tmpdir):
config_file = create_config_file(tmpdir)
# base run results => {"exact_match": 83.9829706717124, "f1": 90.71138132004097}
expected_exact_match = 83.98
expected_f1 = 90.71
model_file = os.path.join(squad_dir, model_file_name)
eval_file = os.path.join(squad_dir, eval_file_name)
output_dir = os.path.join(tmpdir, "output")
pred_file = os.path.join(output_dir, pred_file_name)
proc = sp.Popen([
"bash",
script_file_name,
num_gpus,
model_file,
squad_dir,
output_dir,
config_file
],
cwd=base_dir)
try:
proc.communicate(timeout=timeout_sec)
if os.path.exists(pred_file):
eval_result = eval.evaluate(eval_version, eval_file, pred_file)
print("evaluation result: ", json.dumps(eval_result))
assert isclose(eval_result["exact_match"],
expected_exact_match,
abs_tol=1e-2)
assert isclose(eval_result["f1"], expected_f1, abs_tol=1e-2)
else:
pytest.fail("Error: Run Failed")
except sp.TimeoutExpired:
proc.kill()
pytest.fail("Error: Timeout")
except sp.CalledProcessError:
pytest.fail("Error: Run Failed")
def test_e2e_squad_deepspeed_zero(tmpdir):
config_file = create_config_file(tmpdir, True)
# base run results => {"exact_match": 84.1438032166509, "f1": 90.89776136505441}
expected_exact_match = 84.14
expected_f1 = 90.89
model_file = os.path.join(squad_dir, model_file_name)
eval_file = os.path.join(squad_dir, eval_file_name)
output_dir = os.path.join(tmpdir, "output")
pred_file = os.path.join(output_dir, pred_file_name)
proc = sp.Popen([
"bash",
script_file_name,
num_gpus,
model_file,
squad_dir,
output_dir,
config_file
],
cwd=base_dir)
try:
proc.communicate(timeout=timeout_sec)
if os.path.exists(pred_file):
eval_result = eval.evaluate(eval_version, eval_file, pred_file)
print("evaluation result: ", json.dumps(eval_result))
assert isclose(eval_result["exact_match"],
expected_exact_match,
abs_tol=1e-2)
assert isclose(eval_result["f1"], expected_f1, abs_tol=1e-2)
else:
pytest.fail("Error: Run Failed")
except sp.TimeoutExpired:
proc.kill()
pytest.fail("Error: Timeout")
except sp.CalledProcessError:
pytest.fail("Error: Run Failed")
|
[
"evaluate.evaluate"
] |
[((129, 188), 'sys.path.append', 'sys.path.append', (['"""../../../DeepSpeedExamples/BingBertSquad"""'], {}), "('../../../DeepSpeedExamples/BingBertSquad')\n", (144, 188), False, 'import sys\n'), ((1098, 1138), 'os.path.join', 'os.path.join', (['tmpdir', '"""temp_config.json"""'], {}), "(tmpdir, 'temp_config.json')\n", (1110, 1138), False, 'import os\n'), ((1487, 1527), 'os.path.join', 'os.path.join', (['squad_dir', 'model_file_name'], {}), '(squad_dir, model_file_name)\n', (1499, 1527), False, 'import os\n'), ((1544, 1583), 'os.path.join', 'os.path.join', (['squad_dir', 'eval_file_name'], {}), '(squad_dir, eval_file_name)\n', (1556, 1583), False, 'import os\n'), ((1602, 1632), 'os.path.join', 'os.path.join', (['tmpdir', '"""output"""'], {}), "(tmpdir, 'output')\n", (1614, 1632), False, 'import os\n'), ((1649, 1689), 'os.path.join', 'os.path.join', (['output_dir', 'pred_file_name'], {}), '(output_dir, pred_file_name)\n', (1661, 1689), False, 'import os\n'), ((1702, 1814), 'subprocess.Popen', 'sp.Popen', (["['bash', script_file_name, num_gpus, model_file, squad_dir, output_dir,\n config_file]"], {'cwd': 'base_dir'}), "(['bash', script_file_name, num_gpus, model_file, squad_dir,\n output_dir, config_file], cwd=base_dir)\n", (1710, 1814), True, 'import subprocess as sp\n'), ((2831, 2871), 'os.path.join', 'os.path.join', (['squad_dir', 'model_file_name'], {}), '(squad_dir, model_file_name)\n', (2843, 2871), False, 'import os\n'), ((2888, 2927), 'os.path.join', 'os.path.join', (['squad_dir', 'eval_file_name'], {}), '(squad_dir, eval_file_name)\n', (2900, 2927), False, 'import os\n'), ((2946, 2976), 'os.path.join', 'os.path.join', (['tmpdir', '"""output"""'], {}), "(tmpdir, 'output')\n", (2958, 2976), False, 'import os\n'), ((2993, 3033), 'os.path.join', 'os.path.join', (['output_dir', 'pred_file_name'], {}), '(output_dir, pred_file_name)\n', (3005, 3033), False, 'import os\n'), ((3046, 3158), 'subprocess.Popen', 'sp.Popen', (["['bash', script_file_name, num_gpus, model_file, squad_dir, output_dir,\n config_file]"], {'cwd': 'base_dir'}), "(['bash', script_file_name, num_gpus, model_file, squad_dir,\n output_dir, config_file], cwd=base_dir)\n", (3054, 3158), True, 'import subprocess as sp\n'), ((1186, 1212), 'json.dump', 'json.dump', (['config_dict', 'fd'], {}), '(config_dict, fd)\n', (1195, 1212), False, 'import json\n'), ((1961, 1986), 'os.path.exists', 'os.path.exists', (['pred_file'], {}), '(pred_file)\n', (1975, 1986), False, 'import os\n'), ((3305, 3330), 'os.path.exists', 'os.path.exists', (['pred_file'], {}), '(pred_file)\n', (3319, 3330), False, 'import os\n'), ((2014, 2063), 'evaluate.evaluate', 'eval.evaluate', (['eval_version', 'eval_file', 'pred_file'], {}), '(eval_version, eval_file, pred_file)\n', (2027, 2063), True, 'import evaluate as eval\n'), ((2151, 2222), 'math.isclose', 'isclose', (["eval_result['exact_match']", 'expected_exact_match'], {'abs_tol': '(0.01)'}), "(eval_result['exact_match'], expected_exact_match, abs_tol=0.01)\n", (2158, 2222), False, 'from math import isclose\n'), ((2296, 2349), 'math.isclose', 'isclose', (["eval_result['f1']", 'expected_f1'], {'abs_tol': '(0.01)'}), "(eval_result['f1'], expected_f1, abs_tol=0.01)\n", (2303, 2349), False, 'from math import isclose\n'), ((2377, 2409), 'pytest.fail', 'pytest.fail', (['"""Error: Run Failed"""'], {}), "('Error: Run Failed')\n", (2388, 2409), False, 'import pytest\n'), ((2469, 2498), 'pytest.fail', 'pytest.fail', (['"""Error: Timeout"""'], {}), "('Error: Timeout')\n", (2480, 2498), False, 'import pytest\n'), ((2541, 2573), 'pytest.fail', 'pytest.fail', (['"""Error: Run Failed"""'], {}), "('Error: Run Failed')\n", (2552, 2573), False, 'import pytest\n'), ((3358, 3407), 'evaluate.evaluate', 'eval.evaluate', (['eval_version', 'eval_file', 'pred_file'], {}), '(eval_version, eval_file, pred_file)\n', (3371, 3407), True, 'import evaluate as eval\n'), ((3495, 3566), 'math.isclose', 'isclose', (["eval_result['exact_match']", 'expected_exact_match'], {'abs_tol': '(0.01)'}), "(eval_result['exact_match'], expected_exact_match, abs_tol=0.01)\n", (3502, 3566), False, 'from math import isclose\n'), ((3640, 3693), 'math.isclose', 'isclose', (["eval_result['f1']", 'expected_f1'], {'abs_tol': '(0.01)'}), "(eval_result['f1'], expected_f1, abs_tol=0.01)\n", (3647, 3693), False, 'from math import isclose\n'), ((3721, 3753), 'pytest.fail', 'pytest.fail', (['"""Error: Run Failed"""'], {}), "('Error: Run Failed')\n", (3732, 3753), False, 'import pytest\n'), ((3813, 3842), 'pytest.fail', 'pytest.fail', (['"""Error: Timeout"""'], {}), "('Error: Timeout')\n", (3824, 3842), False, 'import pytest\n'), ((3885, 3917), 'pytest.fail', 'pytest.fail', (['"""Error: Run Failed"""'], {}), "('Error: Run Failed')\n", (3896, 3917), False, 'import pytest\n'), ((2106, 2129), 'json.dumps', 'json.dumps', (['eval_result'], {}), '(eval_result)\n', (2116, 2129), False, 'import json\n'), ((3450, 3473), 'json.dumps', 'json.dumps', (['eval_result'], {}), '(eval_result)\n', (3460, 3473), False, 'import json\n')]
|
"""
This tests how assymetric window sizes affects the evaluation results on the development set.
"""
from config import base
import evaluate as e
config = base.get_config()
config['test_filepath'] = 'resources/test/teddev/data-with-doc.csv'
window_sizes = [(4, 0), (4, 1), (4, 2), (4, 3), (3, 4), (2, 4), (1, 4), (0, 4)]
for window_size in window_sizes:
print("Running {}".format(window_size))
config['window_size'] = window_size
config['no_embeddings'] = 2 * (window_size[0] + window_size[1]) + 1 + config['n_tags'] * 2
predictions = e.evaluate(config)
test_data = e.load_data(config['test_filepath'])
e.output(predictions, test_data, config['classes'],
'results/base.dev.window_size.{}+{}.txt'.format(window_size[0], window_size[1]))
print("Saving {}".format(window_size))
|
[
"evaluate.load_data",
"evaluate.evaluate"
] |
[((158, 175), 'config.base.get_config', 'base.get_config', ([], {}), '()\n', (173, 175), False, 'from config import base\n'), ((556, 574), 'evaluate.evaluate', 'e.evaluate', (['config'], {}), '(config)\n', (566, 574), True, 'import evaluate as e\n'), ((591, 627), 'evaluate.load_data', 'e.load_data', (["config['test_filepath']"], {}), "(config['test_filepath'])\n", (602, 627), True, 'import evaluate as e\n')]
|
import os
from matplotlib.colors import from_levels_and_colors
import numpy as np
import argparse
from config import config
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = config['gpu_num']
from Models import Resnet3DBuilder, resnet3d_model, densenet3d_model
from Models.training import get_callbacks
from Models.generator import convertData, preprocess, DataGeneratorNew
from keras.models import Sequential
from keras.optimizers import SGD, Adam
from keras.regularizers import l2
from keras.layers import Activation, Dense, Flatten
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.utils import np_utils
from evaluate import evaluate
from keras import backend as K
from skimage.transform import resize
from math import ceil
K.image_data_format() == "channels_last"
#%%
class_weight = None
dimz = config['dimz']
dimx = config['dimx']
dimy = config['dimy']
channelNum = config['channelNum']
#%%
def get_model_new(baseModeType, num_outputs, optType, learning_rate, reg_factor=1e-4):
if baseModeType == 'resori':
model = Resnet3DBuilder.build_resnet_18((dimz, dimx, dimy, channelNum), num_outputs,
reg_factor = reg_factor, ifbase=False)
elif baseModeType == 'resnew':
model = resnet3d_model(input_shape=(dimz, dimx, dimy, channelNum), num_outputs=num_outputs,
n_base_filters=64, depth=3, dropout_rate=0.3, kernel_reg_factor = reg_factor, ifbase=False)
elif baseModeType == 'dense':
model = densenet3d_model(input_shape=(dimz, dimx, dimy, channelNum), num_outputs=num_outputs,
n_base_filters=64, depth=3, dropout_rate=0.3, kernel_reg_factor = reg_factor, ifbase=False)
# Name layers
for i, layer in enumerate(model.layers):
layer.name = 'layer_' + str(i)
if optType == 'adam':
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
elif optType == 'sgd':
opt = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
# model.summary()
return model
# train
def train_and_predict(projectRoot, args):
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
inputDict = dict()
inputDict['MSA'] = os.path.join(projectRoot, 'MSA.npy')
inputDict['PID'] = os.path.join(projectRoot, 'PID.npy')
inputDict['PSP'] = os.path.join(projectRoot, 'PSP.npy')
features=dict()
features['MSA'] = np.load(inputDict['MSA'])
features['PID'] = np.load(inputDict['PID'])
features['PSP'] = np.load(inputDict['PSP'])
itemNum = int(0)
for key, value in features.items():
itemNum = itemNum+value.shape[0]
steps_per_epoch = ceil(itemNum / args.batch_size)
labelDef = config['labelDef']
labelPercentage = dict()
labelPercentage['MSA'] = 1/float(3)
labelPercentage['PSP'] = 1/float(3)
labelPercentage['PID'] = 1/float(3)
X_test = np.load(os.path.join(projectRoot,'testFeatures.npy'))
y_test = np.load(os.path.join(projectRoot,'testLabels.npy'))
X_test,Y_test = convertData(X_test,y_test, config)
validation_data = (X_test,Y_test)
TrainData = DataGeneratorNew(features, labelDef, labelPercentage, args.batch_size)
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
#---------------------------------#
model = get_model_new(args.baseModeType, args.num_outputs, args.optType, args.learning_rate, reg_factor=1e-4)
#---------------------------------#
weightDir = os.path.join(projectRoot, 'Weights.h5')
callbackList = get_callbacks(weightDir, args.learning_rate, args.learning_rate_drop, args.learning_rate_patience,
learning_rate_epochs=None, logging_file="training.log", verbosity=1,
early_stopping_patience=args.early_stopping_patience)
print('-'*30)
print('Fitting model...')
print('-'*30)
if os.path.exists(args.pretrained_weight_dir) and args.UseWeight == True:
model.load_weights(args.pretrained_weight_dir)
if class_weight != None:
train_history = model.fit_generator(TrainData.generator(),
steps_per_epoch=steps_per_epoch,
epochs=args.epochs,
verbose=1,
callbacks=callbackList,
validation_data=validation_data,
validation_steps=1,
class_weight=class_weight,
max_queue_size=10,
workers=1,
use_multiprocessing=False)
else:
train_history = model.fit_generator(TrainData.generator(),
steps_per_epoch=steps_per_epoch,
epochs=args.epochs,
verbose=1,
callbacks=callbackList,
validation_data=validation_data,
validation_steps=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False)
loss = train_history.history['loss']
val_loss = train_history.history['val_loss']
np.save(os.path.join(projectRoot,'loss.npy'),loss)
np.save(os.path.join(projectRoot,'val_loss.npy'),val_loss)
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
X_test = np.load(os.path.join(projectRoot,'testFeatures.npy'))
y_test = np.load(os.path.join(projectRoot,'testLabels.npy'))
X_test, Y_test = convertData(X_test,y_test, config)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights(weightDir)
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
Y_predict = model.predict(X_test, verbose=1)
np.save(os.path.join(projectRoot,'Y_predict.npy'), Y_predict)
def value_predict(X_test, baseModeType, optType, load_weight_dir, outputDir=None):
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
X_test = preprocess(X_test,dimz,dimx,dimy,channelNum)
X_test = X_test.astype('float32')
print('-'*30)
print('Loading saved weights...')
print('-'*30)
#---------------------------------#
model = get_model_new(baseModeType, optType, reg_factor=1e-4)
#---------------------------------#
model.load_weights(load_weight_dir)
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
Y_predict = model.predict(X_test, verbose=1)
if outputDir != None:
if not os.path.exists(outputDir):
os.mkdir(outputDir)
np.save(os.path.join(outputDir,'Y_predict.npy'), Y_predict)
return Y_predict
def main():
parser = argparse.ArgumentParser(description = "PDDNET command line tool")
parser.add_argument("--project_folder", type=str, help = "project folder to save the output data.")
parser.add_argument("--baseModeType", type=str, default='resnew', help = "network type: 'resori', 'resnew', 'dense'")
parser.add_argument("--num_outputs", type=int, default=3, help = "class number")
parser.add_argument("--optType", type=str, default='adam', help = "optimizer type: 'adam', 'sgd'")
parser.add_argument("--epochs", type=int, default=30, help = "training epochs")
parser.add_argument("--batch_size", type=int, default=4, help = "class number")
parser.add_argument('--CV_fold', type=int, default=6, help='cross validation fold')
parser.add_argument('--currentFold', type=int, default=1, help='current training fold')
parser.add_argument("--UseWeight", help = "Whether conduct data normalization.", action = 'store_true')
parser.add_argument("--pretrained_weight_dir", type=str, default=None, help = "pretrained weights as the starting point, usefull if UseWeight=True")
parser.add_argument('--learning_rate', type=float, default=1e-4, help='learning rate')
parser.add_argument('--learning_rate_drop', type=float, default=0.5, help='learning rate drop')
parser.add_argument('--learning_rate_patience', type=int, default=10, help='learning rate drop patience')
parser.add_argument('--early_stopping_patience', type=int, default=30, help='early stopping patience')
args = parser.parse_args()
currentOpePath = os.path.realpath(__file__)
print(currentOpePath)
projectRoot = os.path.join(args.project_folder, 'data_{0}_{1}'.format(kfold=args.CV_fold, currentFold=args.currentFold))
train_and_predict(projectRoot, args)
evaluate(projectRoot)
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate"
] |
[((782, 803), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (801, 803), True, 'from keras import backend as K\n'), ((2403, 2439), 'os.path.join', 'os.path.join', (['projectRoot', '"""MSA.npy"""'], {}), "(projectRoot, 'MSA.npy')\n", (2415, 2439), False, 'import os\n'), ((2463, 2499), 'os.path.join', 'os.path.join', (['projectRoot', '"""PID.npy"""'], {}), "(projectRoot, 'PID.npy')\n", (2475, 2499), False, 'import os\n'), ((2523, 2559), 'os.path.join', 'os.path.join', (['projectRoot', '"""PSP.npy"""'], {}), "(projectRoot, 'PSP.npy')\n", (2535, 2559), False, 'import os\n'), ((2603, 2628), 'numpy.load', 'np.load', (["inputDict['MSA']"], {}), "(inputDict['MSA'])\n", (2610, 2628), True, 'import numpy as np\n'), ((2651, 2676), 'numpy.load', 'np.load', (["inputDict['PID']"], {}), "(inputDict['PID'])\n", (2658, 2676), True, 'import numpy as np\n'), ((2699, 2724), 'numpy.load', 'np.load', (["inputDict['PSP']"], {}), "(inputDict['PSP'])\n", (2706, 2724), True, 'import numpy as np\n'), ((2854, 2885), 'math.ceil', 'ceil', (['(itemNum / args.batch_size)'], {}), '(itemNum / args.batch_size)\n', (2858, 2885), False, 'from math import ceil\n'), ((3228, 3263), 'Models.generator.convertData', 'convertData', (['X_test', 'y_test', 'config'], {}), '(X_test, y_test, config)\n', (3239, 3263), False, 'from Models.generator import convertData, preprocess, DataGeneratorNew\n'), ((3319, 3389), 'Models.generator.DataGeneratorNew', 'DataGeneratorNew', (['features', 'labelDef', 'labelPercentage', 'args.batch_size'], {}), '(features, labelDef, labelPercentage, args.batch_size)\n', (3335, 3389), False, 'from Models.generator import convertData, preprocess, DataGeneratorNew\n'), ((3686, 3725), 'os.path.join', 'os.path.join', (['projectRoot', '"""Weights.h5"""'], {}), "(projectRoot, 'Weights.h5')\n", (3698, 3725), False, 'import os\n'), ((3749, 3985), 'Models.training.get_callbacks', 'get_callbacks', (['weightDir', 'args.learning_rate', 'args.learning_rate_drop', 'args.learning_rate_patience'], {'learning_rate_epochs': 'None', 'logging_file': '"""training.log"""', 'verbosity': '(1)', 'early_stopping_patience': 'args.early_stopping_patience'}), "(weightDir, args.learning_rate, args.learning_rate_drop, args.\n learning_rate_patience, learning_rate_epochs=None, logging_file=\n 'training.log', verbosity=1, early_stopping_patience=args.\n early_stopping_patience)\n", (3762, 3985), False, 'from Models.training import get_callbacks\n'), ((6148, 6183), 'Models.generator.convertData', 'convertData', (['X_test', 'y_test', 'config'], {}), '(X_test, y_test, config)\n', (6159, 6183), False, 'from Models.generator import convertData, preprocess, DataGeneratorNew\n'), ((6825, 6873), 'Models.generator.preprocess', 'preprocess', (['X_test', 'dimz', 'dimx', 'dimy', 'channelNum'], {}), '(X_test, dimz, dimx, dimy, channelNum)\n', (6835, 6873), False, 'from Models.generator import convertData, preprocess, DataGeneratorNew\n'), ((7530, 7593), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PDDNET command line tool"""'}), "(description='PDDNET command line tool')\n", (7553, 7593), False, 'import argparse\n'), ((9080, 9106), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (9096, 9106), False, 'import os\n'), ((9303, 9324), 'evaluate.evaluate', 'evaluate', (['projectRoot'], {}), '(projectRoot)\n', (9311, 9324), False, 'from evaluate import evaluate\n'), ((1093, 1210), 'Models.Resnet3DBuilder.build_resnet_18', 'Resnet3DBuilder.build_resnet_18', (['(dimz, dimx, dimy, channelNum)', 'num_outputs'], {'reg_factor': 'reg_factor', 'ifbase': '(False)'}), '((dimz, dimx, dimy, channelNum), num_outputs,\n reg_factor=reg_factor, ifbase=False)\n', (1124, 1210), False, 'from Models import Resnet3DBuilder, resnet3d_model, densenet3d_model\n'), ((1907, 1970), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)'}), '(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n', (1911, 1970), False, 'from keras.optimizers import SGD, Adam\n'), ((3097, 3142), 'os.path.join', 'os.path.join', (['projectRoot', '"""testFeatures.npy"""'], {}), "(projectRoot, 'testFeatures.npy')\n", (3109, 3142), False, 'import os\n'), ((3164, 3207), 'os.path.join', 'os.path.join', (['projectRoot', '"""testLabels.npy"""'], {}), "(projectRoot, 'testLabels.npy')\n", (3176, 3207), False, 'import os\n'), ((4110, 4152), 'os.path.exists', 'os.path.exists', (['args.pretrained_weight_dir'], {}), '(args.pretrained_weight_dir)\n', (4124, 4152), False, 'import os\n'), ((5791, 5828), 'os.path.join', 'os.path.join', (['projectRoot', '"""loss.npy"""'], {}), "(projectRoot, 'loss.npy')\n", (5803, 5828), False, 'import os\n'), ((5846, 5887), 'os.path.join', 'os.path.join', (['projectRoot', '"""val_loss.npy"""'], {}), "(projectRoot, 'val_loss.npy')\n", (5858, 5887), False, 'import os\n'), ((6016, 6061), 'os.path.join', 'os.path.join', (['projectRoot', '"""testFeatures.npy"""'], {}), "(projectRoot, 'testFeatures.npy')\n", (6028, 6061), False, 'import os\n'), ((6083, 6126), 'os.path.join', 'os.path.join', (['projectRoot', '"""testLabels.npy"""'], {}), "(projectRoot, 'testLabels.npy')\n", (6095, 6126), False, 'import os\n'), ((6574, 6616), 'os.path.join', 'os.path.join', (['projectRoot', '"""Y_predict.npy"""'], {}), "(projectRoot, 'Y_predict.npy')\n", (6586, 6616), False, 'import os\n'), ((1309, 1491), 'Models.resnet3d_model', 'resnet3d_model', ([], {'input_shape': '(dimz, dimx, dimy, channelNum)', 'num_outputs': 'num_outputs', 'n_base_filters': '(64)', 'depth': '(3)', 'dropout_rate': '(0.3)', 'kernel_reg_factor': 'reg_factor', 'ifbase': '(False)'}), '(input_shape=(dimz, dimx, dimy, channelNum), num_outputs=\n num_outputs, n_base_filters=64, depth=3, dropout_rate=0.3,\n kernel_reg_factor=reg_factor, ifbase=False)\n', (1323, 1491), False, 'from Models import Resnet3DBuilder, resnet3d_model, densenet3d_model\n'), ((2012, 2075), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=learning_rate, decay=1e-06, momentum=0.9, nesterov=True)\n', (2015, 2075), False, 'from keras.optimizers import SGD, Adam\n'), ((7351, 7376), 'os.path.exists', 'os.path.exists', (['outputDir'], {}), '(outputDir)\n', (7365, 7376), False, 'import os\n'), ((7390, 7409), 'os.mkdir', 'os.mkdir', (['outputDir'], {}), '(outputDir)\n', (7398, 7409), False, 'import os\n'), ((1556, 1740), 'Models.densenet3d_model', 'densenet3d_model', ([], {'input_shape': '(dimz, dimx, dimy, channelNum)', 'num_outputs': 'num_outputs', 'n_base_filters': '(64)', 'depth': '(3)', 'dropout_rate': '(0.3)', 'kernel_reg_factor': 'reg_factor', 'ifbase': '(False)'}), '(input_shape=(dimz, dimx, dimy, channelNum), num_outputs=\n num_outputs, n_base_filters=64, depth=3, dropout_rate=0.3,\n kernel_reg_factor=reg_factor, ifbase=False)\n', (1572, 1740), False, 'from Models import Resnet3DBuilder, resnet3d_model, densenet3d_model\n'), ((7430, 7470), 'os.path.join', 'os.path.join', (['outputDir', '"""Y_predict.npy"""'], {}), "(outputDir, 'Y_predict.npy')\n", (7442, 7470), False, 'import os\n')]
|
import yaml
import torch
import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
from tqdm import tqdm
from transformers.models.bert.tokenization_bert import BertTokenizer
from train_util import create_masks, create_train_data, seed_everything
from dataset import ChatDataSet, SampledDataLoader
from torch.optim import AdamW
from model.transformer import Transformer
from evaluate import evaluate
def train(epoch, config, device, data_loader, toker, model, optimizer, criterion):
# set model to train mode
model.train()
with tqdm(total=len(data_loader), desc=f"Epoch {epoch + 1}") as pbar:
for i, batch in enumerate(data_loader):
batch = tuple(t.to(device) for t in batch)
x, y = batch
target = y[:, :-1]
target_y = y[:, 1:]
source_mask, target_mask = create_masks(x, target, toker.pad_token_id)
out = model(x, source_mask, target, target_mask)
optimizer.zero_grad()
loss = criterion(out.transpose(1, 2), target_y).mean()
loss.backward()
optimizer.step()
clip_grad_norm_(model.parameters(), config['max_grad_norm'])
pbar.update(1)
pbar.set_postfix_str(f"loss: {loss.item():.5f}")
# Save model for each epoch with a different name
torch.save(
{
"epoch": epoch,
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
},
f"{config['data_dir']}/{config['fn']}_{epoch}.pth",
)
# Save the final model
torch.save(
{
"epoch": epoch,
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
},
f"{config['data_dir']}/{config['fn']}.pth",
)
print("--------------------------------")
print("Model Saved")
print("--------------------------------")
def main():
with open('config.yaml') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
seed_everything(config['seed'])
toker = BertTokenizer.from_pretrained(config['bert_model_name'])
data = create_train_data(config, toker, True)
dataset = ChatDataSet(data)
data_loader = SampledDataLoader(
dataset, batch_size=config['batch_size'], padding=toker.pad_token_id
)
model = Transformer(config)
model = model.to(device)
adam_opim = AdamW(
model.parameters(), lr=config['learning_rate'], betas=config['betas'], eps=1e-9
)
criterion = nn.CrossEntropyLoss(ignore_index=toker.pad_token_id, reduction="none")
start_epoch = 0
if config['load']:
start_epoch = 10
state_dict = torch.load(config['ckpt_path'], map_location=device)
model.load_state_dict(state_dict["model"])
adam_opim.load_state_dict(state_dict["optimizer"])
for epoch in range(start_epoch, config['n_epochs']):
train(epoch, config, device, data_loader, toker, model, adam_opim, criterion)
evaluate(
config,
"if you accomplish your task, it is great then",
toker,
model,
device,
False,
)
print("Training Finished")
if __name__ == "__main__":
main()
|
[
"evaluate.evaluate"
] |
[((2101, 2132), 'train_util.seed_everything', 'seed_everything', (["config['seed']"], {}), "(config['seed'])\n", (2116, 2132), False, 'from train_util import create_masks, create_train_data, seed_everything\n'), ((2145, 2201), 'transformers.models.bert.tokenization_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (["config['bert_model_name']"], {}), "(config['bert_model_name'])\n", (2174, 2201), False, 'from transformers.models.bert.tokenization_bert import BertTokenizer\n'), ((2213, 2251), 'train_util.create_train_data', 'create_train_data', (['config', 'toker', '(True)'], {}), '(config, toker, True)\n', (2230, 2251), False, 'from train_util import create_masks, create_train_data, seed_everything\n'), ((2266, 2283), 'dataset.ChatDataSet', 'ChatDataSet', (['data'], {}), '(data)\n', (2277, 2283), False, 'from dataset import ChatDataSet, SampledDataLoader\n'), ((2302, 2394), 'dataset.SampledDataLoader', 'SampledDataLoader', (['dataset'], {'batch_size': "config['batch_size']", 'padding': 'toker.pad_token_id'}), "(dataset, batch_size=config['batch_size'], padding=toker.\n pad_token_id)\n", (2319, 2394), False, 'from dataset import ChatDataSet, SampledDataLoader\n'), ((2417, 2436), 'model.transformer.Transformer', 'Transformer', (['config'], {}), '(config)\n', (2428, 2436), False, 'from model.transformer import Transformer\n'), ((2601, 2671), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'toker.pad_token_id', 'reduction': '"""none"""'}), "(ignore_index=toker.pad_token_id, reduction='none')\n", (2620, 2671), True, 'import torch.nn as nn\n'), ((1973, 2012), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (1982, 2012), False, 'import yaml\n'), ((2762, 2814), 'torch.load', 'torch.load', (["config['ckpt_path']"], {'map_location': 'device'}), "(config['ckpt_path'], map_location=device)\n", (2772, 2814), False, 'import torch\n'), ((3077, 3175), 'evaluate.evaluate', 'evaluate', (['config', '"""if you accomplish your task, it is great then"""', 'toker', 'model', 'device', '(False)'], {}), "(config, 'if you accomplish your task, it is great then', toker,\n model, device, False)\n", (3085, 3175), False, 'from evaluate import evaluate\n'), ((846, 889), 'train_util.create_masks', 'create_masks', (['x', 'target', 'toker.pad_token_id'], {}), '(x, target, toker.pad_token_id)\n', (858, 889), False, 'from train_util import create_masks, create_train_data, seed_everything\n'), ((2058, 2083), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2081, 2083), False, 'import torch\n')]
|
from typing import List
from numpy import ndarray
import matplotlib.pyplot as plt
import threading
# import tensorflow as tf
from PIL import Image, ImageTk
import PySimpleGUI as sg
from config import *
import evaluate
import utility
# Global variables
input_values = [] # Input values for evaluator (ex. image paths)
ml_datas = [] # Output values from evaluator (ex. evaluated raw data)
display_image = None # Image displayed.
window = None # main window.
eval_event = threading.Event()
def initialize_window() -> sg.Window:
sg.theme(WINDOW_THEME)
sg_image = sg.Image(size=(900, 9000), key=KEY_CANVAS, expand_x=True, expand_y=True)
image_column = [[sg.pin(
sg.Column([[sg_image]], key=KEY_IMAGE_COLUMN, visible=False, scrollable=True, expand_x=True, expand_y=True)
)]]
output_text = sg.Text(TEXT_OUTPUT_INITIAL, key=KEY_OUTPUT)
control_group = [[sg.Input(key='_FILES_'), sg.FilesBrowse(key=KEY_BROWSE_FILES, file_types=IMAGE_FILETYPES, initial_folder=DIR_PATH, disabled=True)],
[sg.OK(button_text=BUTTON_DETECT, key=BUTTON_DETECT, disabled=True),
sg.FileSaveAs(
key=BUTTON_SAVE_FILE,
target=KEY_SAVE_FILE,
file_types=IMAGE_FILETYPES,
default_extension=".jpg",
disabled=True
),
sg.In(key=KEY_SAVE_FILE, enable_events=True, visible=False)],
[sg.Submit(KEY_RERENDER, key=KEY_RERENDER, disabled=True)],
[output_text]]
output_group = [[sg.Output(size=(200, 10))]]
# output_group = [[sg.Text("__")]] # Dummy group for evaluation
layout = [[sg.Column(control_group, key=KEY_CONTROL_GROUP), sg.Column(output_group, key=KEY_OUTPUT_GROUP)],
[image_column]]
window = sg.Window('Example GUI for ML Project', resizable=True, auto_size_text=True, size=(900, 800), finalize=True).Layout(layout)
return window
def create_thread(window:sg.Window, result_values:List[evaluate.MLData], eval_event:threading.Event):
evaluator = evaluate.Evaluator(window)
print("evaluator ready")
while True:
eval_event.wait()
print("starting...")
evaluator.try_evaluate(input_values, result_values)
eval_event.clear()
# Do evaluator clearing action here
def excepthook(args):
print(args)
# TODO: reactivate buttons in main thread when exception happens in other thread
# disable_buttons(window, False)
eval_event.clear()
print("Unknown problem while evaluating. Run inside console to see tensorflow debug messages.")
print("Possibly: Your GPU may not have enough memory to run model. Try running it in a CPU mode.")
threading.excepthook = excepthook
def main() -> None:
# Initialize Window
window = initialize_window()
# Set memory growth for all GPUs to use least VRAM possible
# gpus = tf.config.experimental.list_physical_devices('GPU')
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
# Initialize evaluator with default window
eval_thread = threading.Thread(target=create_thread, args=(window, ml_datas, eval_event), daemon=True)
eval_thread.start()
# Main loop
while True:
event, values = window.Read()
if event == sg.WIN_CLOSED or event == 'Cancel' or event == BUTTON_EXIT: # if user closes window or clicks cancel
break
if (event == KEY_RERENDER): # if Rerender without evaluating
display_image = draw_on_image(window, ml_datas)
if (event.startswith(BUTTON_DETECT)): # if user presses Detect button
window[KEY_IMAGE_COLUMN].Update(visible = False)
input_values.clear()
input_values.extend(values['_FILES_'].split(';'))
print(input_values)
try:
# physical_devices = tf.config.list_physical_devices('GPU')
# print("GPU Available: ", len(physical_devices))
disable_buttons(window, True)
eval_event.set()
except Exception as e:
eval_event.clear()
disable_buttons(window, False)
print(e)
print("Unknown problem while evaluating. Run inside console to see tensorflow debug messages.")
print("Possibly: Your GPU may not have enough memory to run model. Try running it in a CPU mode.")
if (event.startswith(KEY_SAVE_FILE)): # if user closes file save dialog
print("Try Saving file...")
try:
filename = values['filename']
print(filename)
display_image.convert('RGB').save(filename)
print("Image saved")
except Exception as e:
print(e)
if (event == THREAD_EVENT): # if try_evaluate signals THREAD_EVENT
if values[THREAD_EVENT] == EVAL_READY:
disable_buttons(window, False)
window[KEY_OUTPUT].Update(value=TEXT_EVAL_READY)
if values[THREAD_EVENT] == EVAL_START:
window[KEY_OUTPUT].Update(value=TEXT_EVAL_START)
if values[THREAD_EVENT] == EVAL_COMPLETE:
window[KEY_OUTPUT].Update(value=TEXT_EVAL_COMPLETE)
disable_buttons(window, False)
display_image = draw_on_image(window, ml_datas)
window.close()
def disable_buttons(window, disabled):
window[BUTTON_DETECT].Update(disabled=disabled)
window[KEY_SAVE_FILE].Update(disabled=disabled)
window[KEY_BROWSE_FILES].Update(disabled=disabled)
window[BUTTON_SAVE_FILE].Update(disabled=disabled)
window[KEY_RERENDER].Update(disabled=disabled)
def draw_on_image(window: sg.Window, ml_datas:List[evaluate.MLData]) -> Image.Image:
"""
Draw contents of RESULTS inside WINDOW.
"""
print(f"drawing {len(ml_datas)} image(s)...")
sg_image = window[KEY_CANVAS]
display_image = None
try:
fig, axs = plt.subplots(nrows=len(ml_datas), ncols=1)
for i, ml_data in enumerate(ml_datas):
ax = axs[i] if type(axs) == ndarray else axs
ax.set_anchor('N')
# use drawing function that might come with your ML Package,
# or simple draw image and data on ax using ml_data.
ax.text(0, 0, ml_data.result["key"])
ax.imshow(ml_data.image)
fig.set_dpi(120)
fig.subplots_adjust(left=0, right=1, wspace=0.01, hspace=0.15)
# FIXME: magic number 대신, 추가 legend 비례한 값으로 fig 크기 늘리기
height = 2.3 * len(ml_datas)
fig.set_size_inches(7, height, forward=True)
fig.tight_layout()
display_image = Image.fromarray(utility.figure_to_array(fig))
result_image = ImageTk.PhotoImage(image=display_image)
# display image in main screen
sg_image.update(data=result_image)
window.refresh()
window[KEY_IMAGE_COLUMN].contents_changed()
window[KEY_IMAGE_COLUMN].Update(visible = True)
except ValueError as e:
print(e)
print("length of data detected is ", len(ml_datas))
finally:
return display_image
if __name__ == "__main__":
main()
|
[
"evaluate.Evaluator"
] |
[((473, 490), 'threading.Event', 'threading.Event', ([], {}), '()\n', (488, 490), False, 'import threading\n'), ((534, 556), 'PySimpleGUI.theme', 'sg.theme', (['WINDOW_THEME'], {}), '(WINDOW_THEME)\n', (542, 556), True, 'import PySimpleGUI as sg\n'), ((572, 644), 'PySimpleGUI.Image', 'sg.Image', ([], {'size': '(900, 9000)', 'key': 'KEY_CANVAS', 'expand_x': '(True)', 'expand_y': '(True)'}), '(size=(900, 9000), key=KEY_CANVAS, expand_x=True, expand_y=True)\n', (580, 644), True, 'import PySimpleGUI as sg\n'), ((820, 864), 'PySimpleGUI.Text', 'sg.Text', (['TEXT_OUTPUT_INITIAL'], {'key': 'KEY_OUTPUT'}), '(TEXT_OUTPUT_INITIAL, key=KEY_OUTPUT)\n', (827, 864), True, 'import PySimpleGUI as sg\n'), ((2091, 2117), 'evaluate.Evaluator', 'evaluate.Evaluator', (['window'], {}), '(window)\n', (2109, 2117), False, 'import evaluate\n'), ((3140, 3232), 'threading.Thread', 'threading.Thread', ([], {'target': 'create_thread', 'args': '(window, ml_datas, eval_event)', 'daemon': '(True)'}), '(target=create_thread, args=(window, ml_datas, eval_event),\n daemon=True)\n', (3156, 3232), False, 'import threading\n'), ((6840, 6879), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'display_image'}), '(image=display_image)\n', (6858, 6879), False, 'from PIL import Image, ImageTk\n'), ((887, 910), 'PySimpleGUI.Input', 'sg.Input', ([], {'key': '"""_FILES_"""'}), "(key='_FILES_')\n", (895, 910), True, 'import PySimpleGUI as sg\n'), ((912, 1020), 'PySimpleGUI.FilesBrowse', 'sg.FilesBrowse', ([], {'key': 'KEY_BROWSE_FILES', 'file_types': 'IMAGE_FILETYPES', 'initial_folder': 'DIR_PATH', 'disabled': '(True)'}), '(key=KEY_BROWSE_FILES, file_types=IMAGE_FILETYPES,\n initial_folder=DIR_PATH, disabled=True)\n', (926, 1020), True, 'import PySimpleGUI as sg\n'), ((1036, 1102), 'PySimpleGUI.OK', 'sg.OK', ([], {'button_text': 'BUTTON_DETECT', 'key': 'BUTTON_DETECT', 'disabled': '(True)'}), '(button_text=BUTTON_DETECT, key=BUTTON_DETECT, disabled=True)\n', (1041, 1102), True, 'import PySimpleGUI as sg\n'), ((1121, 1252), 'PySimpleGUI.FileSaveAs', 'sg.FileSaveAs', ([], {'key': 'BUTTON_SAVE_FILE', 'target': 'KEY_SAVE_FILE', 'file_types': 'IMAGE_FILETYPES', 'default_extension': '""".jpg"""', 'disabled': '(True)'}), "(key=BUTTON_SAVE_FILE, target=KEY_SAVE_FILE, file_types=\n IMAGE_FILETYPES, default_extension='.jpg', disabled=True)\n", (1134, 1252), True, 'import PySimpleGUI as sg\n'), ((1386, 1445), 'PySimpleGUI.In', 'sg.In', ([], {'key': 'KEY_SAVE_FILE', 'enable_events': '(True)', 'visible': '(False)'}), '(key=KEY_SAVE_FILE, enable_events=True, visible=False)\n', (1391, 1445), True, 'import PySimpleGUI as sg\n'), ((1465, 1521), 'PySimpleGUI.Submit', 'sg.Submit', (['KEY_RERENDER'], {'key': 'KEY_RERENDER', 'disabled': '(True)'}), '(KEY_RERENDER, key=KEY_RERENDER, disabled=True)\n', (1474, 1521), True, 'import PySimpleGUI as sg\n'), ((1576, 1601), 'PySimpleGUI.Output', 'sg.Output', ([], {'size': '(200, 10)'}), '(size=(200, 10))\n', (1585, 1601), True, 'import PySimpleGUI as sg\n'), ((1687, 1734), 'PySimpleGUI.Column', 'sg.Column', (['control_group'], {'key': 'KEY_CONTROL_GROUP'}), '(control_group, key=KEY_CONTROL_GROUP)\n', (1696, 1734), True, 'import PySimpleGUI as sg\n'), ((1736, 1781), 'PySimpleGUI.Column', 'sg.Column', (['output_group'], {'key': 'KEY_OUTPUT_GROUP'}), '(output_group, key=KEY_OUTPUT_GROUP)\n', (1745, 1781), True, 'import PySimpleGUI as sg\n'), ((1829, 1941), 'PySimpleGUI.Window', 'sg.Window', (['"""Example GUI for ML Project"""'], {'resizable': '(True)', 'auto_size_text': '(True)', 'size': '(900, 800)', 'finalize': '(True)'}), "('Example GUI for ML Project', resizable=True, auto_size_text=True,\n size=(900, 800), finalize=True)\n", (1838, 1941), True, 'import PySimpleGUI as sg\n'), ((6787, 6815), 'utility.figure_to_array', 'utility.figure_to_array', (['fig'], {}), '(fig)\n', (6810, 6815), False, 'import utility\n'), ((682, 794), 'PySimpleGUI.Column', 'sg.Column', (['[[sg_image]]'], {'key': 'KEY_IMAGE_COLUMN', 'visible': '(False)', 'scrollable': '(True)', 'expand_x': '(True)', 'expand_y': '(True)'}), '([[sg_image]], key=KEY_IMAGE_COLUMN, visible=False, scrollable=\n True, expand_x=True, expand_y=True)\n', (691, 794), True, 'import PySimpleGUI as sg\n')]
|
import argparse, torch, gc, os, random, json
from data import MyDataset, load_data, my_collate_fn, device, word2sememe, label_multihot, word2POS, word2chara, word2Cn, my_collate_fn_test
from model import Encoder
from tqdm import tqdm
from evaluate import evaluate, evaluate_test
import numpy as np
def main(epoch_num, batch_size, verbose, UNSEEN, SEEN, MODE):
[hownet_file, sememe_file, word_index_file, word_vector_file, dictionary_file, word_cilinClass_file] = ['hownet.json', 'sememe.json', 'word_index.json', 'word_vector.npy', 'dictionary_sense.json', 'word_cilinClass.json']
word2index, index2word, word2vec, sememe_num, label_size, label_size_chara, word_defi_idx_all = load_data(hownet_file, sememe_file, word_index_file, word_vector_file, dictionary_file, word_cilinClass_file)
(word_defi_idx_TrainDev, word_defi_idx_seen, word_defi_idx_test2000, word_defi_idx_test200, word_defi_idx_test272) = word_defi_idx_all
index2word = np.array(index2word)
length = len(word_defi_idx_TrainDev)
valid_dataset = MyDataset(word_defi_idx_TrainDev[int(0.9*length):])
test_dataset = MyDataset(word_defi_idx_test2000 + word_defi_idx_test200 + word_defi_idx_test272)
if SEEN:
mode = 'S_'+MODE
print('*METHOD: Seen defi.')
print('*TRAIN: [Train + allSeen(2000+200+272)]')
print('*TEST: [2000rand1 + 200desc + 272desc]')
train_dataset = MyDataset(word_defi_idx_TrainDev[:int(0.9*length)] + word_defi_idx_seen)
elif UNSEEN:
mode = 'U_'+MODE
print('*METHOD: Unseen All words and defi.')
print('*TRAIN: [Train]')
print('*TEST: [2000rand1 + 200desc + 272desc]')
train_dataset = MyDataset(word_defi_idx_TrainDev[:int(0.9*length)])
print('*MODE: [%s]'%mode)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=my_collate_fn)
valid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, collate_fn=my_collate_fn)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=my_collate_fn_test)
print('Train dataset: ', len(train_dataset))
print('Valid dataset: ', len(valid_dataset))
print('Test dataset: ', len(test_dataset))
word_defi_idx = word_defi_idx_TrainDev + word_defi_idx_seen
wd2sem = word2sememe(word_defi_idx, len(word2index), sememe_num)
wd_sems = label_multihot(wd2sem, sememe_num)
wd_sems = torch.from_numpy(np.array(wd_sems[:label_size])).to(device)
wd_POSs = label_multihot(word2POS(word_defi_idx, len(word2index), 13), 13)
wd_POSs = torch.from_numpy(np.array(wd_POSs[:label_size])).to(device)
wd_charas = label_multihot(word2chara(word_defi_idx, len(word2index), label_size_chara), label_size_chara)
wd_charas = torch.from_numpy(np.array(wd_charas[:label_size])).to(device)
wd2Cilin1 = word2Cn(word_defi_idx, len(word2index), 'C1', 13)
wd_C1 = label_multihot(wd2Cilin1, 13) #13 96 1426 4098
wd_C1 = torch.from_numpy(np.array(wd_C1[:label_size])).to(device)
wd_C2 = label_multihot(word2Cn(word_defi_idx, len(word2index), 'C2', 96), 96)
wd_C2 = torch.from_numpy(np.array(wd_C2[:label_size])).to(device)
wd_C3 = label_multihot(word2Cn(word_defi_idx, len(word2index), 'C3', 1426), 1426)
wd_C3 = torch.from_numpy(np.array(wd_C3[:label_size])).to(device)
wd_C4 = label_multihot(word2Cn(word_defi_idx, len(word2index), 'C4', 4098), 4098)
wd_C4 = torch.from_numpy(np.array(wd_C4[:label_size])).to(device)
'''wd2Cilin = word2Cn(word_defi_idx, len(word2index), 'C', 5633)
wd_C0 = label_multihot(wd2Cilin, 5633)
wd_C0 = torch.from_numpy(np.array(wd_C0[:label_size])).to(device)
wd_C = [wd_C1, wd_C2, wd_C3, wd_C4, wd_C0]
'''
wd_C = [wd_C1, wd_C2, wd_C3, wd_C4]
#----------mask of no sememes
print('calculating mask of no sememes...')
mask_s = torch.zeros(label_size, dtype=torch.float32, device=device)
for i in range(label_size):
sems = set(wd2sem[i].detach().cpu().numpy().tolist())-set([sememe_num])
if len(sems)==0:
mask_s[i] = 1
mask_c = torch.zeros(label_size, dtype=torch.float32, device=device)
for i in range(label_size):
cc = set(wd2Cilin1[i].detach().cpu().numpy().tolist())-set([13])
if len(cc)==0:
mask_c[i] = 1
model = Encoder(vocab_size=len(word2index), embed_dim=word2vec.shape[1], hidden_dim=200, layers=1, class_num=label_size, sememe_num=sememe_num, chara_num=label_size_chara)
model.embedding.weight.data = torch.from_numpy(word2vec)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # Adam
best_valid_accu = 0
DEF_UPDATE = True
for epoch in range(epoch_num):
print('epoch: ', epoch)
model.train()
train_loss = 0
label_list = list()
pred_list = list()
for words_t, sememes_t, definition_words_t, POS_t, sememes, POSs, charas_t, C, C_t in tqdm(train_dataloader, disable=verbose):
optimizer.zero_grad()
loss, _, indices = model('train', x=definition_words_t, w=words_t, ws=wd_sems, wP=wd_POSs, wc=wd_charas, wC=wd_C, msk_s=mask_s, msk_c=mask_c, mode=MODE)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
predicted = indices[:, :100].detach().cpu().numpy().tolist()
train_loss += loss.item()
label_list.extend(words_t.detach().cpu().numpy())
pred_list.extend(predicted)
train_accu_1, train_accu_10, train_accu_100 = evaluate(label_list, pred_list)
del label_list
del pred_list
gc.collect()
print('train_loss: ', train_loss/len(train_dataset))
print('train_accu(1/10/100): %.2f %.2F %.2f'%(train_accu_1, train_accu_10, train_accu_100))
model.eval()
with torch.no_grad():
valid_loss = 0
label_list = []
pred_list = []
for words_t, sememes_t, definition_words_t, POS_t, sememes, POSs, charas_t, C, C_t in tqdm(valid_dataloader, disable=verbose):
loss, _, indices = model('train', x=definition_words_t, w=words_t, ws=wd_sems, wP=wd_POSs, wc=wd_charas, wC=wd_C, msk_s=mask_s, msk_c=mask_c, mode=MODE)
predicted = indices[:, :100].detach().cpu().numpy().tolist()
valid_loss += loss.item()
label_list.extend(words_t.detach().cpu().numpy())
pred_list.extend(predicted)
valid_accu_1, valid_accu_10, valid_accu_100 = evaluate(label_list, pred_list)
print('valid_loss: ', valid_loss/len(valid_dataset))
print('valid_accu(1/10/100): %.2f %.2F %.2f'%(valid_accu_1, valid_accu_10, valid_accu_100))
del label_list
del pred_list
gc.collect()
if valid_accu_10>best_valid_accu:
best_valid_accu = valid_accu_10
print('-----best_valid_accu-----')
#torch.save(model, 'saved.model')
label_list = []
pred_list = []
for words_t, definition_words_t in tqdm(test_dataloader, disable=verbose):
indices = model('test', x=definition_words_t, w=words_t, ws=wd_sems, wP=wd_POSs, wc=wd_charas, wC=wd_C, msk_s=mask_s, msk_c=mask_c, mode=MODE)
predicted = indices[:, :1000].detach().cpu().numpy().tolist()
label_list.extend(words_t.detach().cpu().numpy())
pred_list.extend(predicted)
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(label_list, pred_list)
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
if epoch>10:
json.dump((index2word[label_list]).tolist(), open(mode+'_label_list.json', 'w'))
json.dump((index2word[np.array(pred_list)]).tolist(), open(mode+'_pred_list.json', 'w'))
del label_list
del pred_list
gc.collect()
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--epoch_num', type=int, default=50)
parser.add_argument('-v', '--verbose',default=True, action='store_false')
parser.add_argument('-g', '--gpu', type=str, default='0')
parser.add_argument('-b', '--batch_size', type=int, default=128) # 256
parser.add_argument('-u', '--unseen', default=False, action='store_true')
parser.add_argument('-s', '--seen', default=False, action='store_true')
parser.add_argument('-m', '--mode', type=str, default='b')
parser.add_argument('-sd', '--seed', type=int, default=543624)
args = parser.parse_args()
setup_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
main(args.epoch_num, args.batch_size, args.verbose, args.unseen, args.seen, args.mode)
|
[
"evaluate.evaluate_test",
"evaluate.evaluate"
] |
[((695, 808), 'data.load_data', 'load_data', (['hownet_file', 'sememe_file', 'word_index_file', 'word_vector_file', 'dictionary_file', 'word_cilinClass_file'], {}), '(hownet_file, sememe_file, word_index_file, word_vector_file,\n dictionary_file, word_cilinClass_file)\n', (704, 808), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2sememe, label_multihot, word2POS, word2chara, word2Cn, my_collate_fn_test\n'), ((963, 983), 'numpy.array', 'np.array', (['index2word'], {}), '(index2word)\n', (971, 983), True, 'import numpy as np\n'), ((1119, 1204), 'data.MyDataset', 'MyDataset', (['(word_defi_idx_test2000 + word_defi_idx_test200 + word_defi_idx_test272)'], {}), '(word_defi_idx_test2000 + word_defi_idx_test200 +\n word_defi_idx_test272)\n', (1128, 1204), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2sememe, label_multihot, word2POS, word2chara, word2Cn, my_collate_fn_test\n'), ((1815, 1925), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'collate_fn': 'my_collate_fn'}), '(train_dataset, batch_size=batch_size, shuffle=\n True, collate_fn=my_collate_fn)\n', (1842, 1925), False, 'import argparse, torch, gc, os, random, json\n'), ((1945, 2055), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'collate_fn': 'my_collate_fn'}), '(valid_dataset, batch_size=batch_size, shuffle=\n True, collate_fn=my_collate_fn)\n', (1972, 2055), False, 'import argparse, torch, gc, os, random, json\n'), ((2074, 2189), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'collate_fn': 'my_collate_fn_test'}), '(test_dataset, batch_size=batch_size, shuffle=\n False, collate_fn=my_collate_fn_test)\n', (2101, 2189), False, 'import argparse, torch, gc, os, random, json\n'), ((2499, 2533), 'data.label_multihot', 'label_multihot', (['wd2sem', 'sememe_num'], {}), '(wd2sem, sememe_num)\n', (2513, 2533), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2sememe, label_multihot, word2POS, word2chara, word2Cn, my_collate_fn_test\n'), ((3035, 3064), 'data.label_multihot', 'label_multihot', (['wd2Cilin1', '(13)'], {}), '(wd2Cilin1, 13)\n', (3049, 3064), False, 'from data import MyDataset, load_data, my_collate_fn, device, word2sememe, label_multihot, word2POS, word2chara, word2Cn, my_collate_fn_test\n'), ((4007, 4066), 'torch.zeros', 'torch.zeros', (['label_size'], {'dtype': 'torch.float32', 'device': 'device'}), '(label_size, dtype=torch.float32, device=device)\n', (4018, 4066), False, 'import argparse, torch, gc, os, random, json\n'), ((4250, 4309), 'torch.zeros', 'torch.zeros', (['label_size'], {'dtype': 'torch.float32', 'device': 'device'}), '(label_size, dtype=torch.float32, device=device)\n', (4261, 4309), False, 'import argparse, torch, gc, os, random, json\n'), ((4686, 4712), 'torch.from_numpy', 'torch.from_numpy', (['word2vec'], {}), '(word2vec)\n', (4702, 4712), False, 'import argparse, torch, gc, os, random, json\n'), ((8414, 8437), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (8431, 8437), False, 'import argparse, torch, gc, os, random, json\n'), ((8443, 8475), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (8469, 8475), False, 'import argparse, torch, gc, os, random, json\n'), ((8481, 8501), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8495, 8501), True, 'import numpy as np\n'), ((8507, 8524), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (8518, 8524), False, 'import argparse, torch, gc, os, random, json\n'), ((8624, 8649), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8647, 8649), False, 'import argparse, torch, gc, os, random, json\n'), ((5124, 5163), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'disable': 'verbose'}), '(train_dataloader, disable=verbose)\n', (5128, 5163), False, 'from tqdm import tqdm\n'), ((5764, 5795), 'evaluate.evaluate', 'evaluate', (['label_list', 'pred_list'], {}), '(label_list, pred_list)\n', (5772, 5795), False, 'from evaluate import evaluate, evaluate_test\n'), ((5852, 5864), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5862, 5864), False, 'import argparse, torch, gc, os, random, json\n'), ((6064, 6079), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6077, 6079), False, 'import argparse, torch, gc, os, random, json\n'), ((6265, 6304), 'tqdm.tqdm', 'tqdm', (['valid_dataloader'], {'disable': 'verbose'}), '(valid_dataloader, disable=verbose)\n', (6269, 6304), False, 'from tqdm import tqdm\n'), ((6768, 6799), 'evaluate.evaluate', 'evaluate', (['label_list', 'pred_list'], {}), '(label_list, pred_list)\n', (6776, 6799), False, 'from evaluate import evaluate, evaluate_test\n'), ((7039, 7051), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7049, 7051), False, 'import argparse, torch, gc, os, random, json\n'), ((2566, 2596), 'numpy.array', 'np.array', (['wd_sems[:label_size]'], {}), '(wd_sems[:label_size])\n', (2574, 2596), True, 'import numpy as np\n'), ((2721, 2751), 'numpy.array', 'np.array', (['wd_POSs[:label_size]'], {}), '(wd_POSs[:label_size])\n', (2729, 2751), True, 'import numpy as np\n'), ((2910, 2942), 'numpy.array', 'np.array', (['wd_charas[:label_size]'], {}), '(wd_charas[:label_size])\n', (2918, 2942), True, 'import numpy as np\n'), ((3112, 3140), 'numpy.array', 'np.array', (['wd_C1[:label_size]'], {}), '(wd_C1[:label_size])\n', (3120, 3140), True, 'import numpy as np\n'), ((3267, 3295), 'numpy.array', 'np.array', (['wd_C2[:label_size]'], {}), '(wd_C2[:label_size])\n', (3275, 3295), True, 'import numpy as np\n'), ((3426, 3454), 'numpy.array', 'np.array', (['wd_C3[:label_size]'], {}), '(wd_C3[:label_size])\n', (3434, 3454), True, 'import numpy as np\n'), ((3585, 3613), 'numpy.array', 'np.array', (['wd_C4[:label_size]'], {}), '(wd_C4[:label_size])\n', (3593, 3613), True, 'import numpy as np\n'), ((7382, 7420), 'tqdm.tqdm', 'tqdm', (['test_dataloader'], {'disable': 'verbose'}), '(test_dataloader, disable=verbose)\n', (7386, 7420), False, 'from tqdm import tqdm\n'), ((7867, 7903), 'evaluate.evaluate_test', 'evaluate_test', (['label_list', 'pred_list'], {}), '(label_list, pred_list)\n', (7880, 7903), False, 'from evaluate import evaluate, evaluate_test\n'), ((8359, 8371), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8369, 8371), False, 'import argparse, torch, gc, os, random, json\n'), ((8212, 8231), 'numpy.array', 'np.array', (['pred_list'], {}), '(pred_list)\n', (8220, 8231), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from evaluate import evaluate
from models import CRISSWrapper, LexiconInducer
cos = nn.CosineSimilarity(dim=-1)
def setup_configs(configs):
configs.save_path = configs.save_path.format(src=configs.src_lang, trg=configs.trg_lang)
configs.stats_path = configs.save_path + '/stats.pt'
def collect_bitext_stats(bitext_path, align_path, save_path, src_lang, trg_lang, is_reversed=False):
stats_path = save_path + '/stats.pt'
freq_path = save_path + '/freqs.pt'
if os.path.exists(stats_path):
coocc, semi_matched_coocc, matched_coocc = torch.load(stats_path)
else:
coocc = collections.defaultdict(collections.Counter)
semi_matched_coocc = collections.defaultdict(collections.Counter)
matched_coocc = collections.defaultdict(collections.Counter)
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
os.system(f'cat {align_path} > {tmpdir.name}/aligns.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
aligns = open(f'{tmpdir.name}/aligns.txt').readlines()
tmpdir.cleanup()
assert len(bitext) == len(aligns)
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
align = [tuple(x if not is_reversed else reversed(x)) for x in json.loads(aligns[i])['inter']] # only focus on inter based alignment
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
src_words = src_sent.lower().split()
trg_words = trg_sent.lower().split()
src_cnt = collections.Counter([x[0] for x in align])
trg_cnt = collections.Counter([x[1] for x in align])
for x, sw in enumerate(src_words):
for y, tw in enumerate(trg_words):
if (x, y) in align:
semi_matched_coocc[sw][tw] += 1
if src_cnt[x] == 1 and trg_cnt[y] == 1:
matched_coocc[sw][tw] += 1
coocc[sw][tw] += 1
torch.save((coocc, semi_matched_coocc, matched_coocc), stats_path)
if os.path.exists(freq_path):
freq_src, freq_trg = torch.load(freq_path)
else:
freq_src = collections.Counter()
freq_trg = collections.Counter()
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
tmpdir.cleanup()
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
for w in src_sent.split():
freq_src[w] += 1
for w in trg_sent.split():
freq_trg[w] += 1
torch.save((freq_src, freq_trg), freq_path)
return coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg
def load_lexicon(path):
lexicon = [regex.split(r'\t| ', x.strip()) for x in open(path)]
return set([tuple(x) for x in lexicon])
def get_test_lexicon(test_lexicon, info):
induced_lexicon = list()
coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg = info
for tsw in tqdm(set([x[0] for x in test_lexicon])):
ssw = to_simplified(tsw)
candidates = list()
for stw in matched_coocc[ssw]:
ttw = to_traditional(stw)
candidates.append([tsw, ttw, matched_coocc[ssw][stw] / (coocc[ssw][stw] + 20)])
if len(candidates) == 0:
continue
candidates = sorted(candidates, key=lambda x:-x[-1])
induced_lexicon.append(candidates[0][:2])
eval_result = evaluate(induced_lexicon, test_lexicon)
return induced_lexicon, eval_result
def test(configs, logging_steps=50000):
setup_configs(configs)
# prepare feature extractor
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path, configs.src_lang, configs.trg_lang, configs.reversed
)
# dataset
test_lexicon = load_lexicon(configs.test_set)
induced_test_lexicon, test_eval = get_test_lexicon(test_lexicon, info)
with open(configs.save_path + '/induced.fullyunsup.dict', 'w') as fout:
for item in induced_test_lexicon:
fout.write('\t'.join([str(x) for x in item]) + '\n')
fout.close()
return induced_test_lexicon, test_eval
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-te', '--test', type=str, help='path to test lexicon')
parser.add_argument('-o', '--output', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'test_set': args.test,
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.output,
'batch_size': 128,
'epochs': 50,
'device': args.device,
'hiddens': [8]
}
)
res = test(configs)
print(res[-1])
|
[
"evaluate.evaluate"
] |
[((402, 429), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(-1)'}), '(dim=-1)\n', (421, 429), True, 'import torch.nn as nn\n'), ((801, 827), 'os.path.exists', 'os.path.exists', (['stats_path'], {}), '(stats_path)\n', (815, 827), False, 'import os\n'), ((2770, 2795), 'os.path.exists', 'os.path.exists', (['freq_path'], {}), '(freq_path)\n', (2784, 2795), False, 'import os\n'), ((4634, 4673), 'evaluate.evaluate', 'evaluate', (['induced_lexicon', 'test_lexicon'], {}), '(induced_lexicon, test_lexicon)\n', (4642, 4673), False, 'from evaluate import evaluate\n'), ((5404, 5429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5427, 5429), False, 'import argparse\n'), ((6088, 6288), 'dotdict.DotDict', 'dotdict.DotDict', (["{'test_set': args.test, 'align_path': args.align, 'bitext_path': args.\n bitext, 'save_path': args.output, 'batch_size': 128, 'epochs': 50,\n 'device': args.device, 'hiddens': [8]}"], {}), "({'test_set': args.test, 'align_path': args.align,\n 'bitext_path': args.bitext, 'save_path': args.output, 'batch_size': 128,\n 'epochs': 50, 'device': args.device, 'hiddens': [8]})\n", (6103, 6288), False, 'import dotdict\n'), ((880, 902), 'torch.load', 'torch.load', (['stats_path'], {}), '(stats_path)\n', (890, 902), False, 'import torch\n'), ((929, 973), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (952, 973), False, 'import collections\n'), ((1003, 1047), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (1026, 1047), False, 'import collections\n'), ((1072, 1116), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (1095, 1116), False, 'import collections\n'), ((1134, 1163), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1161, 1163), False, 'import tempfile\n'), ((1172, 1230), 'os.system', 'os.system', (['f"""cat {bitext_path} > {tmpdir.name}/bitext.txt"""'], {}), "(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')\n", (1181, 1230), False, 'import os\n'), ((1239, 1296), 'os.system', 'os.system', (['f"""cat {align_path} > {tmpdir.name}/aligns.txt"""'], {}), "(f'cat {align_path} > {tmpdir.name}/aligns.txt')\n", (1248, 1296), False, 'import os\n'), ((1504, 1516), 'tqdm.tqdm', 'tqdm', (['bitext'], {}), '(bitext)\n', (1508, 1516), False, 'from tqdm import tqdm\n'), ((2696, 2762), 'torch.save', 'torch.save', (['(coocc, semi_matched_coocc, matched_coocc)', 'stats_path'], {}), '((coocc, semi_matched_coocc, matched_coocc), stats_path)\n', (2706, 2762), False, 'import torch\n'), ((2826, 2847), 'torch.load', 'torch.load', (['freq_path'], {}), '(freq_path)\n', (2836, 2847), False, 'import torch\n'), ((2877, 2898), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (2896, 2898), False, 'import collections\n'), ((2918, 2939), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (2937, 2939), False, 'import collections\n'), ((2957, 2986), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2984, 2986), False, 'import tempfile\n'), ((2995, 3053), 'os.system', 'os.system', (['f"""cat {bitext_path} > {tmpdir.name}/bitext.txt"""'], {}), "(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')\n", (3004, 3053), False, 'import os\n'), ((3156, 3168), 'tqdm.tqdm', 'tqdm', (['bitext'], {}), '(bitext)\n', (3160, 3168), False, 'from tqdm import tqdm\n'), ((3762, 3805), 'torch.save', 'torch.save', (['(freq_src, freq_trg)', 'freq_path'], {}), '((freq_src, freq_trg), freq_path)\n', (3772, 3805), False, 'import torch\n'), ((4235, 4253), 'chinese_converter.to_simplified', 'to_simplified', (['tsw'], {}), '(tsw)\n', (4248, 4253), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((2228, 2270), 'collections.Counter', 'collections.Counter', (['[x[0] for x in align]'], {}), '([x[0] for x in align])\n', (2247, 2270), False, 'import collections\n'), ((2293, 2335), 'collections.Counter', 'collections.Counter', (['[x[1] for x in align]'], {}), '([x[1] for x in align])\n', (2312, 2335), False, 'import collections\n'), ((4339, 4358), 'chinese_converter.to_traditional', 'to_traditional', (['stw'], {}), '(stw)\n', (4353, 4358), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((1997, 2020), 'chinese_converter.to_simplified', 'to_simplified', (['src_sent'], {}), '(src_sent)\n', (2010, 2020), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((2084, 2107), 'chinese_converter.to_simplified', 'to_simplified', (['trg_sent'], {}), '(trg_sent)\n', (2097, 2107), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((3499, 3522), 'chinese_converter.to_simplified', 'to_simplified', (['src_sent'], {}), '(src_sent)\n', (3512, 3522), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((3586, 3609), 'chinese_converter.to_simplified', 'to_simplified', (['trg_sent'], {}), '(trg_sent)\n', (3599, 3609), False, 'from chinese_converter import to_traditional, to_simplified\n'), ((1818, 1839), 'json.loads', 'json.loads', (['aligns[i]'], {}), '(aligns[i])\n', (1828, 1839), False, 'import json\n')]
|
#!/usr/bin/env python
# <NAME> (<EMAIL>)
# April 2018
import os, sys
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.join(BASE_DIR, '..'))
from datasets import dataset
from datetime import datetime
from generate_outputs import *
from network import Network
from network_sem_seg import NetworkSemSeg
from train_util import validate, train
import argparse
import numpy as np
import evaluate
import evaluate_keypoints
import evaluate_obj_det
import evaluate_sem_seg
import random
import tensorflow as tf
'''
parser = argparse.ArgumentParser()
parser.add_argument('--exp_type', type=str, default='ours',\
choices=['ours', 'sem_seg'])
parser.add_argument('--eval_type', action='append', default=['eval'],
help='[eval, eval_keypoints, eval_obj_det, save_dict]')
parser.add_argument('--net_options', action='append', default=['softmax', 'use_stn'],
help='[softmax, column_softmax, sigmoid, clip_A, use_stn, pointnet2]')
parser.add_argument('--in_model_dirs', type=str, default='', help='')
parser.add_argument('--in_model_scopes', type=str, default='', help='')
parser.add_argument('--out_model_dir', type=str, default='model', help='')
parser.add_argument('--out_dir', type=str, default='outputs', help='')
parser.add_argument('--log_dir', type=str, default='log', help='')
parser.add_argument('--train', action="store_true", help='')
parser.add_argument('--init_learning_rate', type=float, default=0.001,\
help='Initial learning rate [default: 0.001]')
parser.add_argument('--decay_step', type=int, default=200000,\
help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7,\
help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--bn_decay_step', type=int, default=200000,\
help='Decay step for bn decay [default: 200000]')
parser.add_argument('--n_epochs', type=int, default=1000,\
help='Number of epochs')
parser.add_argument('--batch_size', type=int, default=32,\
help='Batch size')
parser.add_argument('--snapshot_epoch', type=int, default=100,\
help='Interval of snapshot')
parser.add_argument('--validation_epoch', type=int, default=10,\
help='Interval of validation')
parser.add_argument('--K', type=int, default=10,\
help='Number of predicted basis functions [default: 10]')
parser.add_argument('--l21_norm_weight', type=float, default=0.0,
help='L2,1 norm regularizer weight [default: 0.0]')
parser.add_argument('--part_removal_fraction', type=float, default=0.0,
help='Fraction of parts to be removed [default: 0.0]')
parser.add_argument('--indicator_noise_probability', type=float, default=0.0,
help='Probability of adding noise in indicator functions [default: 0.0]')
args = parser.parse_args()
'''
def load_model(sess, in_model_dir, include=''):
# Read variables names in checkpoint.
var_names = [x for x,_ in tf.contrib.framework.list_variables(in_model_dir)]
# Find variables with given names.
# HACK:
# Convert unicode to string and remove postfix ':0'.
var_list = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\
if str(x.name)[:-2] in var_names]
if include != '':
var_list = [x for x in var_list if include in x.name]
#print([x.name for x in var_list])
saver = tf.train.Saver(var_list)
ckpt = tf.train.get_checkpoint_state(in_model_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print ("Loaded '{}'.".format(ckpt.model_checkpoint_path))
else:
print ("Failed to loaded '{}'.".format(in_model_dir))
return False
return True
def run(args, train_data, val_data, test_data):
tf.set_random_seed(1234)
np.random.seed(1234)
random.seed(1234)
print('\n==== PARAMS ====')
for arg in vars(args):
print('{}={}'.format(arg, getattr(args, arg)))
print('========\n')
if args.exp_type == 'ours':
net = Network(train_data.n_points, train_data.n_dim,
test_data.n_seg_ids, args.K, args.batch_size,
args.init_learning_rate, args.decay_step, args.decay_rate,
args.bn_decay_step, args.l21_norm_weight, args.net_options)
elif args.exp_type == 'sem_seg':
print("## Sementic Segmentation ##")
net = NetworkSemSeg(train_data.n_points, train_data.n_dim,
train_data.n_labels, args.batch_size, args.init_learning_rate,
args.decay_step, args.decay_rate, args.bn_decay_step,
args.net_options)
else:
assert(False)
config = tf.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.allow_growth = True
with tf.Session(config=config, graph=net.graph) as sess:
sess.run(tf.global_variables_initializer(), {net.is_training: True})
if args.in_model_dirs:
include = ''
for in_model_dir in args.in_model_dirs.split(','):
assert(load_model(sess, in_model_dir, include))
if args.train:
train(sess, net, args.exp_type, train_data, val_data,
n_epochs=args.n_epochs, snapshot_epoch=args.snapshot_epoch,
validation_epoch=args.validation_epoch,
model_dir=args.out_model_dir, log_dir=args.log_dir,
data_name=train_data.name, output_generator=None)
"""
train_loss, _ = validate(sess, net, args.exp_type, train_data)
test_loss, _ = validate(sess, net, args.exp_type, test_data)
msg = "|| Train Loss: {:6f}".format(train_loss)
msg += " | Test Loss: {:6f}".format(test_loss)
msg += " ||"
print(msg)
if args.train:
# Save training result.
if not os.path.exists(args.out_dir): os.makedirs(args.out_dir)
out_file = os.path.join(args.out_dir, '{}.txt'.format(
datetime.now().strftime("%Y-%m-%d_%H-%M-%S")))
with open(out_file, 'w') as f:
f.write(msg + '\n')
print("Saved '{}'.".format(out_file))
"""
if args.exp_type == 'ours':
if 'eval' in args.eval_type:
evaluate.evaluate(sess, net, test_data, args.out_dir)
if 'eval_keypoints' in args.eval_type:
evaluate_keypoints.evaluate(sess, net, test_data, args.out_dir)
if 'eval_obj_det' in args.eval_type:
evaluate_obj_det.evaluate(sess, net, test_data, args.out_dir)
if 'save_dict' in args.eval_type:
P = test_data.point_clouds
A = predict_A(P, sess, net)
out_file = os.path.join(args.out_dir, 'dictionary.npy')
np.save(out_file, A)
print("Saved '{}'".format(out_file))
elif args.exp_type == 'sem_seg':
evaluate_sem_seg.evaluate(sess, net, test_data, args.out_dir)
|
[
"evaluate.evaluate"
] |
[((181, 209), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""'], {}), "(BASE_DIR, '..')\n", (193, 209), False, 'import os, sys\n'), ((3418, 3442), 'tensorflow.train.Saver', 'tf.train.Saver', (['var_list'], {}), '(var_list)\n', (3432, 3442), True, 'import tensorflow as tf\n'), ((3455, 3498), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['in_model_dir'], {}), '(in_model_dir)\n', (3484, 3498), True, 'import tensorflow as tf\n'), ((3828, 3852), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), '(1234)\n', (3846, 3852), True, 'import tensorflow as tf\n'), ((3857, 3877), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (3871, 3877), True, 'import numpy as np\n'), ((3882, 3899), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (3893, 3899), False, 'import random\n'), ((4726, 4742), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4740, 4742), True, 'import tensorflow as tf\n'), ((4087, 4307), 'network.Network', 'Network', (['train_data.n_points', 'train_data.n_dim', 'test_data.n_seg_ids', 'args.K', 'args.batch_size', 'args.init_learning_rate', 'args.decay_step', 'args.decay_rate', 'args.bn_decay_step', 'args.l21_norm_weight', 'args.net_options'], {}), '(train_data.n_points, train_data.n_dim, test_data.n_seg_ids, args.K,\n args.batch_size, args.init_learning_rate, args.decay_step, args.\n decay_rate, args.bn_decay_step, args.l21_norm_weight, args.net_options)\n', (4094, 4307), False, 'from network import Network\n'), ((4835, 4877), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config', 'graph': 'net.graph'}), '(config=config, graph=net.graph)\n', (4845, 4877), True, 'import tensorflow as tf\n'), ((136, 161), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os, sys\n'), ((2998, 3047), 'tensorflow.contrib.framework.list_variables', 'tf.contrib.framework.list_variables', (['in_model_dir'], {}), '(in_model_dir)\n', (3033, 3047), True, 'import tensorflow as tf\n'), ((3185, 3233), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (3202, 3233), True, 'import tensorflow as tf\n'), ((4443, 4639), 'network_sem_seg.NetworkSemSeg', 'NetworkSemSeg', (['train_data.n_points', 'train_data.n_dim', 'train_data.n_labels', 'args.batch_size', 'args.init_learning_rate', 'args.decay_step', 'args.decay_rate', 'args.bn_decay_step', 'args.net_options'], {}), '(train_data.n_points, train_data.n_dim, train_data.n_labels,\n args.batch_size, args.init_learning_rate, args.decay_step, args.\n decay_rate, args.bn_decay_step, args.net_options)\n', (4456, 4639), False, 'from network_sem_seg import NetworkSemSeg\n'), ((4904, 4937), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4935, 4937), True, 'import tensorflow as tf\n'), ((5184, 5453), 'train_util.train', 'train', (['sess', 'net', 'args.exp_type', 'train_data', 'val_data'], {'n_epochs': 'args.n_epochs', 'snapshot_epoch': 'args.snapshot_epoch', 'validation_epoch': 'args.validation_epoch', 'model_dir': 'args.out_model_dir', 'log_dir': 'args.log_dir', 'data_name': 'train_data.name', 'output_generator': 'None'}), '(sess, net, args.exp_type, train_data, val_data, n_epochs=args.\n n_epochs, snapshot_epoch=args.snapshot_epoch, validation_epoch=args.\n validation_epoch, model_dir=args.out_model_dir, log_dir=args.log_dir,\n data_name=train_data.name, output_generator=None)\n', (5189, 5453), False, 'from train_util import validate, train\n'), ((6323, 6376), 'evaluate.evaluate', 'evaluate.evaluate', (['sess', 'net', 'test_data', 'args.out_dir'], {}), '(sess, net, test_data, args.out_dir)\n', (6340, 6376), False, 'import evaluate\n'), ((6444, 6507), 'evaluate_keypoints.evaluate', 'evaluate_keypoints.evaluate', (['sess', 'net', 'test_data', 'args.out_dir'], {}), '(sess, net, test_data, args.out_dir)\n', (6471, 6507), False, 'import evaluate_keypoints\n'), ((6573, 6634), 'evaluate_obj_det.evaluate', 'evaluate_obj_det.evaluate', (['sess', 'net', 'test_data', 'args.out_dir'], {}), '(sess, net, test_data, args.out_dir)\n', (6598, 6634), False, 'import evaluate_obj_det\n'), ((6795, 6839), 'os.path.join', 'os.path.join', (['args.out_dir', '"""dictionary.npy"""'], {}), "(args.out_dir, 'dictionary.npy')\n", (6807, 6839), False, 'import os, sys\n'), ((6856, 6876), 'numpy.save', 'np.save', (['out_file', 'A'], {}), '(out_file, A)\n', (6863, 6876), True, 'import numpy as np\n'), ((6983, 7044), 'evaluate_sem_seg.evaluate', 'evaluate_sem_seg.evaluate', (['sess', 'net', 'test_data', 'args.out_dir'], {}), '(sess, net, test_data, args.out_dir)\n', (7008, 7044), False, 'import evaluate_sem_seg\n')]
|
# Lint as: python3
"""Pipeline for training and evaluating a loudness predictor."""
import os
from typing import List
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
from data_processing import get_datasets, get_testdata
from evaluate import evaluate, write_predictions
from model import LoudnessPredictor
import train
import helpers
tf.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string(
"mode",
"train",
"Whether to train, test, or predict.")
flags.DEFINE_string(
"loudness_traindata_proto_file_pattern",
"../../data/preprocessed_data/all_dl_data/traindataraw_*",
"Proto file to read loudness data from.")
flags.DEFINE_string(
"extra_loudness_traindata_proto_file_pattern",
"../../data/preprocessed_data/all_dl_data/extratraindataraw_*",
"Proto file to read loudness data from.")
flags.DEFINE_string(
"loudness_testdata_proto_file_pattern",
"../../data/preprocessed_data/all_dl_data/traindataraw_*",
"Proto file to read loudness data from.")
flags.DEFINE_bool("use_carfac", True,
"Whether to use CARFAC features or not.")
flags.DEFINE_string(
"logs_dir",
"logs/loudness_predictor",
"Directory to put summary logs.")
flags.DEFINE_string(
"load_from_checkpoint",
"",
"Which checkpoint from log dir to load when starting (e.g., cp.ckpt).")
flags.DEFINE_integer(
"num_rows_channel_kernel",
5,
"Number of rows on the kernel that will convolve the input across "
"CARFAC channels; it will summarize num_rows_channel_kernel channels.")
flags.DEFINE_integer(
"num_cols_channel_kernel",
1,
"Number of cols on the kernel that will convolve the input across "
"CARFAC channels; it will summarize num_cols_channel_kernel freq. bins.")
flags.DEFINE_integer(
"num_filters_channels",
5,
"Number of filters when convolving channels across bins.")
flags.DEFINE_integer(
"num_rows_bin_kernel",
1,
"Number of rows on the kernel that will convolve the input across "
"frequency bins; it will summarize num_rows_bin_kernel CARFAC channels.")
flags.DEFINE_integer(
"num_cols_bin_kernel",
1,
"Number of rows on the kernel that will convolve the input across "
"frequency bins; it will summarize num_cols_bin_kernel frequency bins.")
flags.DEFINE_integer(
"num_filters_bins",
5,
"Number of filters when convolving bins across channels.")
flags.DEFINE_integer(
"batch_size",
1,
"Batch size when training.")
flags.DEFINE_integer(
"epochs",
150,
"Number of epochs (full passes of the training data) to train the model.")
flags.DEFINE_string(
"save_model_to_dir",
"saved_model",
"Destination directory for saving the model before early exit.")
flags.DEFINE_string(
"load_model_from_file",
"",
"A saved model to eval once and then return.")
flags.DEFINE_string(
"save_predictions_file",
"predictions.txt",
"A saved model to eval once and then return.")
flags.DEFINE_integer(
"seed",
4,
"TensorFlow random seed.")
flags.DEFINE_float("learning_rate", 1e-3, "The initial learning rate for Adam.")
flags.DEFINE_float("dropout_p", 0., "Dropout to apply to the fc layers.")
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
tf.random.set_seed(FLAGS.seed)
if FLAGS.loudness_traindata_proto_file_pattern is None:
raise app.UsageError("Must provide --loudness_data_proto_file_pattern.")
log_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), FLAGS.logs_dir)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
logging.info("TensorFlow seed: %d", FLAGS.seed)
input_shape = None
if FLAGS.mode == "test":
raise NotImplementedError("Did not implement mode test.")
data = get_datasets(FLAGS.loudness_testdata_proto_file_pattern, 1,
carfac=FLAGS.use_carfac)
logging.info("Created testing datasets")
model = tf.keras.models.load_model(FLAGS.load_model_from_file)
logging.info("Loaded model")
elif FLAGS.mode == "train":
data = get_datasets(FLAGS.loudness_traindata_proto_file_pattern,
FLAGS.batch_size, carfac=FLAGS.use_carfac,
extra_file_pattern=FLAGS.extra_loudness_traindata_proto_file_pattern)
frequency_bins = None
for example in data["train"].take(1):
input_example, target_example = example
input_shape = input_example.shape
carfac_channels = input_example.shape[1]
frequency_bins = input_example.shape[2]
logging.info("Created model")
elif FLAGS.mode == "eval_once":
data = get_testdata(FLAGS.loudness_testdata_proto_file_pattern,
carfac=FLAGS.use_carfac)
frequency_bins = None
for example in data["test"].take(1):
input_example, target_example, _ = example
input_shape = input_example.shape
carfac_channels = input_example.shape[1]
frequency_bins = input_example.shape[2]
model = LoudnessPredictor(
frequency_bins=frequency_bins,
carfac_channels=carfac_channels,
num_rows_channel_kernel=FLAGS.num_rows_channel_kernel,
num_cols_channel_kernel=FLAGS.num_cols_channel_kernel,
num_filters_channels=FLAGS.num_filters_channels,
num_rows_bin_kernel=FLAGS.num_rows_bin_kernel,
num_cols_bin_kernel=FLAGS.num_cols_bin_kernel,
num_filters_bins=FLAGS.num_filters_bins,
dropout_p=FLAGS.dropout_p,
use_channels=FLAGS.use_carfac,
seed=FLAGS.seed)
if FLAGS.load_from_checkpoint:
path_to_load = os.path.join(log_dir, FLAGS.load_from_checkpoint)
logging.info("Attempting to load model from %s", path_to_load)
loaded = False
try:
model.load_weights(path_to_load)
loaded = True
logging.info("Loaded model")
except Exception as err:
logging.info("Unable to load log dir checkpoint %s, trying "
"'load_from_checkpoint' flag: %s", path_to_load, err)
path_to_load = FLAGS.load_from_checkpoint
try:
model.load_weights(path_to_load)
loaded = True
except Exception as err:
logging.info("Unable to load flag checkpoint %s: %s",
path_to_load, err)
else:
loaded = False
example_image_batch = []
if FLAGS.mode == "train":
data_key = "train"
for example in data[data_key].take(4):
input_example, target = example
input_shape = input_example.shape
tf.print("(start train) input shape: ", input_shape)
tf.print("(start train) target phons shape: ", target.shape)
input_example = tf.expand_dims(input_example[0], axis=0)
example_image_batch.append(
[input_example, target])
elif FLAGS.mode == "eval_once":
data_key = "test"
for example in data[data_key].take(4):
input_example, target, _ = example
input_shape = input_example.shape
tf.print("(start eval) input shape: ", input_shape)
tf.print("(start eval) target phons shape: ", target.shape)
input_example = tf.expand_dims(input_example[0], axis=0)
example_image_batch.append(
[input_example, target])
callbacks = [helpers.StepIncrementingCallback()]
callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=log_dir,
histogram_freq=1,
update_freq="batch",
write_graph=True))
model.build(input_shape)
logging.info("Model summary")
model.summary()
if FLAGS.extra_loudness_traindata_proto_file_pattern:
extra_data = True
else:
extra_data = False
save_ckpt = log_dir + "/cp_carfac{}_extradata{}".format(
FLAGS.use_carfac, extra_data) + "_{epoch:04d}.ckpt"
logging.info("Save checkpoint to: %s" % save_ckpt)
callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=save_ckpt,
save_weights_only=True,
verbose=1,
period=5))
if FLAGS.mode == "train":
logging.info("Starting training for %d epochs" % FLAGS.epochs)
if FLAGS.extra_loudness_traindata_proto_file_pattern:
steps_per_epoch = (317 + 639) // FLAGS.batch_size
else:
steps_per_epoch = 317 // FLAGS.batch_size
train(model, data["train"], data["validate"], FLAGS.learning_rate,
FLAGS.epochs, steps_per_epoch, callbacks)
elif FLAGS.mode == "test":
raise NotImplementedError("Mode test not implemented.")
evaluate(model, data["test"], batch_size=FLAGS.eval_batch_size)
elif FLAGS.mode == "eval_once":
if not loaded:
raise ValueError("Trying to eval. a model with unitialized weights.")
save_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), log_dir)
write_predictions(model, data["test"], batch_size=1,
save_directory=save_dir,
save_file=FLAGS.save_predictions_file)
return
else:
raise ValueError("Specified value for '--mode' (%s) unknown", FLAGS.mode)
if __name__ == "__main__":
app.run(main)
|
[
"evaluate.evaluate",
"evaluate.write_predictions"
] |
[((392, 415), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (413, 415), True, 'import tensorflow.compat.v2 as tf\n'), ((438, 513), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""mode"""', '"""train"""', '"""Whether to train, test, or predict."""'], {}), "('mode', 'train', 'Whether to train, test, or predict.')\n", (457, 513), False, 'from absl import flags\n'), ((527, 696), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""loudness_traindata_proto_file_pattern"""', '"""../../data/preprocessed_data/all_dl_data/traindataraw_*"""', '"""Proto file to read loudness data from."""'], {}), "('loudness_traindata_proto_file_pattern',\n '../../data/preprocessed_data/all_dl_data/traindataraw_*',\n 'Proto file to read loudness data from.')\n", (546, 696), False, 'from absl import flags\n'), ((702, 882), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""extra_loudness_traindata_proto_file_pattern"""', '"""../../data/preprocessed_data/all_dl_data/extratraindataraw_*"""', '"""Proto file to read loudness data from."""'], {}), "('extra_loudness_traindata_proto_file_pattern',\n '../../data/preprocessed_data/all_dl_data/extratraindataraw_*',\n 'Proto file to read loudness data from.')\n", (721, 882), False, 'from absl import flags\n'), ((888, 1056), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""loudness_testdata_proto_file_pattern"""', '"""../../data/preprocessed_data/all_dl_data/traindataraw_*"""', '"""Proto file to read loudness data from."""'], {}), "('loudness_testdata_proto_file_pattern',\n '../../data/preprocessed_data/all_dl_data/traindataraw_*',\n 'Proto file to read loudness data from.')\n", (907, 1056), False, 'from absl import flags\n'), ((1062, 1141), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""use_carfac"""', '(True)', '"""Whether to use CARFAC features or not."""'], {}), "('use_carfac', True, 'Whether to use CARFAC features or not.')\n", (1079, 1141), False, 'from absl import flags\n'), ((1160, 1256), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""logs_dir"""', '"""logs/loudness_predictor"""', '"""Directory to put summary logs."""'], {}), "('logs_dir', 'logs/loudness_predictor',\n 'Directory to put summary logs.')\n", (1179, 1256), False, 'from absl import flags\n'), ((1266, 1389), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""load_from_checkpoint"""', '""""""', '"""Which checkpoint from log dir to load when starting (e.g., cp.ckpt)."""'], {}), "('load_from_checkpoint', '',\n 'Which checkpoint from log dir to load when starting (e.g., cp.ckpt).')\n", (1285, 1389), False, 'from absl import flags\n'), ((1399, 1595), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_rows_channel_kernel"""', '(5)', '"""Number of rows on the kernel that will convolve the input across CARFAC channels; it will summarize num_rows_channel_kernel channels."""'], {}), "('num_rows_channel_kernel', 5,\n 'Number of rows on the kernel that will convolve the input across CARFAC channels; it will summarize num_rows_channel_kernel channels.'\n )\n", (1419, 1595), False, 'from absl import flags\n'), ((1607, 1805), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_cols_channel_kernel"""', '(1)', '"""Number of cols on the kernel that will convolve the input across CARFAC channels; it will summarize num_cols_channel_kernel freq. bins."""'], {}), "('num_cols_channel_kernel', 1,\n 'Number of cols on the kernel that will convolve the input across CARFAC channels; it will summarize num_cols_channel_kernel freq. bins.'\n )\n", (1627, 1805), False, 'from absl import flags\n'), ((1817, 1927), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_filters_channels"""', '(5)', '"""Number of filters when convolving channels across bins."""'], {}), "('num_filters_channels', 5,\n 'Number of filters when convolving channels across bins.')\n", (1837, 1927), False, 'from absl import flags\n'), ((1937, 2131), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_rows_bin_kernel"""', '(1)', '"""Number of rows on the kernel that will convolve the input across frequency bins; it will summarize num_rows_bin_kernel CARFAC channels."""'], {}), "('num_rows_bin_kernel', 1,\n 'Number of rows on the kernel that will convolve the input across frequency bins; it will summarize num_rows_bin_kernel CARFAC channels.'\n )\n", (1957, 2131), False, 'from absl import flags\n'), ((2143, 2336), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_cols_bin_kernel"""', '(1)', '"""Number of rows on the kernel that will convolve the input across frequency bins; it will summarize num_cols_bin_kernel frequency bins."""'], {}), "('num_cols_bin_kernel', 1,\n 'Number of rows on the kernel that will convolve the input across frequency bins; it will summarize num_cols_bin_kernel frequency bins.'\n )\n", (2163, 2336), False, 'from absl import flags\n'), ((2348, 2454), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_filters_bins"""', '(5)', '"""Number of filters when convolving bins across channels."""'], {}), "('num_filters_bins', 5,\n 'Number of filters when convolving bins across channels.')\n", (2368, 2454), False, 'from absl import flags\n'), ((2464, 2530), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""', '(1)', '"""Batch size when training."""'], {}), "('batch_size', 1, 'Batch size when training.')\n", (2484, 2530), False, 'from absl import flags\n'), ((2544, 2658), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""epochs"""', '(150)', '"""Number of epochs (full passes of the training data) to train the model."""'], {}), "('epochs', 150,\n 'Number of epochs (full passes of the training data) to train the model.')\n", (2564, 2658), False, 'from absl import flags\n'), ((2668, 2792), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""save_model_to_dir"""', '"""saved_model"""', '"""Destination directory for saving the model before early exit."""'], {}), "('save_model_to_dir', 'saved_model',\n 'Destination directory for saving the model before early exit.')\n", (2687, 2792), False, 'from absl import flags\n'), ((2802, 2900), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""load_model_from_file"""', '""""""', '"""A saved model to eval once and then return."""'], {}), "('load_model_from_file', '',\n 'A saved model to eval once and then return.')\n", (2821, 2900), False, 'from absl import flags\n'), ((2910, 3024), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""save_predictions_file"""', '"""predictions.txt"""', '"""A saved model to eval once and then return."""'], {}), "('save_predictions_file', 'predictions.txt',\n 'A saved model to eval once and then return.')\n", (2929, 3024), False, 'from absl import flags\n'), ((3034, 3092), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""', '(4)', '"""TensorFlow random seed."""'], {}), "('seed', 4, 'TensorFlow random seed.')\n", (3054, 3092), False, 'from absl import flags\n'), ((3106, 3191), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""', '(0.001)', '"""The initial learning rate for Adam."""'], {}), "('learning_rate', 0.001,\n 'The initial learning rate for Adam.')\n", (3124, 3191), False, 'from absl import flags\n'), ((3187, 3261), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""dropout_p"""', '(0.0)', '"""Dropout to apply to the fc layers."""'], {}), "('dropout_p', 0.0, 'Dropout to apply to the fc layers.')\n", (3205, 3261), False, 'from absl import flags\n'), ((3365, 3395), 'tensorflow.compat.v2.random.set_seed', 'tf.random.set_seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (3383, 3395), True, 'import tensorflow.compat.v2 as tf\n'), ((3685, 3732), 'absl.logging.info', 'logging.info', (['"""TensorFlow seed: %d"""', 'FLAGS.seed'], {}), "('TensorFlow seed: %d', FLAGS.seed)\n", (3697, 3732), False, 'from absl import logging\n'), ((5060, 5538), 'model.LoudnessPredictor', 'LoudnessPredictor', ([], {'frequency_bins': 'frequency_bins', 'carfac_channels': 'carfac_channels', 'num_rows_channel_kernel': 'FLAGS.num_rows_channel_kernel', 'num_cols_channel_kernel': 'FLAGS.num_cols_channel_kernel', 'num_filters_channels': 'FLAGS.num_filters_channels', 'num_rows_bin_kernel': 'FLAGS.num_rows_bin_kernel', 'num_cols_bin_kernel': 'FLAGS.num_cols_bin_kernel', 'num_filters_bins': 'FLAGS.num_filters_bins', 'dropout_p': 'FLAGS.dropout_p', 'use_channels': 'FLAGS.use_carfac', 'seed': 'FLAGS.seed'}), '(frequency_bins=frequency_bins, carfac_channels=\n carfac_channels, num_rows_channel_kernel=FLAGS.num_rows_channel_kernel,\n num_cols_channel_kernel=FLAGS.num_cols_channel_kernel,\n num_filters_channels=FLAGS.num_filters_channels, num_rows_bin_kernel=\n FLAGS.num_rows_bin_kernel, num_cols_bin_kernel=FLAGS.\n num_cols_bin_kernel, num_filters_bins=FLAGS.num_filters_bins, dropout_p\n =FLAGS.dropout_p, use_channels=FLAGS.use_carfac, seed=FLAGS.seed)\n', (5077, 5538), False, 'from model import LoudnessPredictor\n'), ((7593, 7622), 'absl.logging.info', 'logging.info', (['"""Model summary"""'], {}), "('Model summary')\n", (7605, 7622), False, 'from absl import logging\n'), ((7870, 7920), 'absl.logging.info', 'logging.info', (["('Save checkpoint to: %s' % save_ckpt)"], {}), "('Save checkpoint to: %s' % save_ckpt)\n", (7882, 7920), False, 'from absl import logging\n'), ((9263, 9276), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (9270, 9276), False, 'from absl import app\n'), ((3311, 3361), 'absl.app.UsageError', 'app.UsageError', (['"""Too many command-line arguments."""'], {}), "('Too many command-line arguments.')\n", (3325, 3361), False, 'from absl import app\n'), ((3465, 3531), 'absl.app.UsageError', 'app.UsageError', (['"""Must provide --loudness_data_proto_file_pattern."""'], {}), "('Must provide --loudness_data_proto_file_pattern.')\n", (3479, 3531), False, 'from absl import app\n'), ((3635, 3658), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (3649, 3658), False, 'import os\n'), ((3664, 3681), 'os.mkdir', 'os.mkdir', (['log_dir'], {}), '(log_dir)\n', (3672, 3681), False, 'import os\n'), ((3855, 3944), 'data_processing.get_datasets', 'get_datasets', (['FLAGS.loudness_testdata_proto_file_pattern', '(1)'], {'carfac': 'FLAGS.use_carfac'}), '(FLAGS.loudness_testdata_proto_file_pattern, 1, carfac=FLAGS.\n use_carfac)\n', (3867, 3944), False, 'from data_processing import get_datasets, get_testdata\n'), ((3968, 4008), 'absl.logging.info', 'logging.info', (['"""Created testing datasets"""'], {}), "('Created testing datasets')\n", (3980, 4008), False, 'from absl import logging\n'), ((4021, 4075), 'tensorflow.compat.v2.keras.models.load_model', 'tf.keras.models.load_model', (['FLAGS.load_model_from_file'], {}), '(FLAGS.load_model_from_file)\n', (4047, 4075), True, 'import tensorflow.compat.v2 as tf\n'), ((4080, 4108), 'absl.logging.info', 'logging.info', (['"""Loaded model"""'], {}), "('Loaded model')\n", (4092, 4108), False, 'from absl import logging\n'), ((5652, 5701), 'os.path.join', 'os.path.join', (['log_dir', 'FLAGS.load_from_checkpoint'], {}), '(log_dir, FLAGS.load_from_checkpoint)\n', (5664, 5701), False, 'import os\n'), ((5706, 5768), 'absl.logging.info', 'logging.info', (['"""Attempting to load model from %s"""', 'path_to_load'], {}), "('Attempting to load model from %s', path_to_load)\n", (5718, 5768), False, 'from absl import logging\n'), ((7253, 7287), 'helpers.StepIncrementingCallback', 'helpers.StepIncrementingCallback', ([], {}), '()\n', (7285, 7287), False, 'import helpers\n'), ((7308, 7416), 'tensorflow.compat.v2.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(1)', 'update_freq': '"""batch"""', 'write_graph': '(True)'}), "(log_dir=log_dir, histogram_freq=1,\n update_freq='batch', write_graph=True)\n", (7338, 7416), True, 'import tensorflow.compat.v2 as tf\n'), ((7940, 8044), 'tensorflow.compat.v2.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'save_ckpt', 'save_weights_only': '(True)', 'verbose': '(1)', 'period': '(5)'}), '(filepath=save_ckpt, save_weights_only=\n True, verbose=1, period=5)\n', (7974, 8044), True, 'import tensorflow.compat.v2 as tf\n'), ((8236, 8298), 'absl.logging.info', 'logging.info', (["('Starting training for %d epochs' % FLAGS.epochs)"], {}), "('Starting training for %d epochs' % FLAGS.epochs)\n", (8248, 8298), False, 'from absl import logging\n'), ((8475, 8588), 'train', 'train', (['model', "data['train']", "data['validate']", 'FLAGS.learning_rate', 'FLAGS.epochs', 'steps_per_epoch', 'callbacks'], {}), "(model, data['train'], data['validate'], FLAGS.learning_rate, FLAGS.\n epochs, steps_per_epoch, callbacks)\n", (8480, 8588), False, 'import train\n'), ((3581, 3607), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3597, 3607), False, 'import os\n'), ((4150, 4329), 'data_processing.get_datasets', 'get_datasets', (['FLAGS.loudness_traindata_proto_file_pattern', 'FLAGS.batch_size'], {'carfac': 'FLAGS.use_carfac', 'extra_file_pattern': 'FLAGS.extra_loudness_traindata_proto_file_pattern'}), '(FLAGS.loudness_traindata_proto_file_pattern, FLAGS.batch_size,\n carfac=FLAGS.use_carfac, extra_file_pattern=FLAGS.\n extra_loudness_traindata_proto_file_pattern)\n', (4162, 4329), False, 'from data_processing import get_datasets, get_testdata\n'), ((4620, 4649), 'absl.logging.info', 'logging.info', (['"""Created model"""'], {}), "('Created model')\n", (4632, 4649), False, 'from absl import logging\n'), ((5862, 5890), 'absl.logging.info', 'logging.info', (['"""Loaded model"""'], {}), "('Loaded model')\n", (5874, 5890), False, 'from absl import logging\n'), ((6548, 6600), 'tensorflow.compat.v2.print', 'tf.print', (['"""(start train) input shape: """', 'input_shape'], {}), "('(start train) input shape: ', input_shape)\n", (6556, 6600), True, 'import tensorflow.compat.v2 as tf\n'), ((6607, 6667), 'tensorflow.compat.v2.print', 'tf.print', (['"""(start train) target phons shape: """', 'target.shape'], {}), "('(start train) target phons shape: ', target.shape)\n", (6615, 6667), True, 'import tensorflow.compat.v2 as tf\n'), ((6690, 6730), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['input_example[0]'], {'axis': '(0)'}), '(input_example[0], axis=0)\n', (6704, 6730), True, 'import tensorflow.compat.v2 as tf\n'), ((8687, 8750), 'evaluate.evaluate', 'evaluate', (['model', "data['test']"], {'batch_size': 'FLAGS.eval_batch_size'}), "(model, data['test'], batch_size=FLAGS.eval_batch_size)\n", (8695, 8750), False, 'from evaluate import evaluate, write_predictions\n'), ((4695, 4781), 'data_processing.get_testdata', 'get_testdata', (['FLAGS.loudness_testdata_proto_file_pattern'], {'carfac': 'FLAGS.use_carfac'}), '(FLAGS.loudness_testdata_proto_file_pattern, carfac=FLAGS.\n use_carfac)\n', (4707, 4781), False, 'from data_processing import get_datasets, get_testdata\n'), ((5926, 6047), 'absl.logging.info', 'logging.info', (['"""Unable to load log dir checkpoint %s, trying \'load_from_checkpoint\' flag: %s"""', 'path_to_load', 'err'], {}), '(\n "Unable to load log dir checkpoint %s, trying \'load_from_checkpoint\' flag: %s"\n , path_to_load, err)\n', (5938, 6047), False, 'from absl import logging\n'), ((6987, 7038), 'tensorflow.compat.v2.print', 'tf.print', (['"""(start eval) input shape: """', 'input_shape'], {}), "('(start eval) input shape: ', input_shape)\n", (6995, 7038), True, 'import tensorflow.compat.v2 as tf\n'), ((7045, 7104), 'tensorflow.compat.v2.print', 'tf.print', (['"""(start eval) target phons shape: """', 'target.shape'], {}), "('(start eval) target phons shape: ', target.shape)\n", (7053, 7104), True, 'import tensorflow.compat.v2 as tf\n'), ((7127, 7167), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['input_example[0]'], {'axis': '(0)'}), '(input_example[0], axis=0)\n', (7141, 7167), True, 'import tensorflow.compat.v2 as tf\n'), ((8975, 9096), 'evaluate.write_predictions', 'write_predictions', (['model', "data['test']"], {'batch_size': '(1)', 'save_directory': 'save_dir', 'save_file': 'FLAGS.save_predictions_file'}), "(model, data['test'], batch_size=1, save_directory=\n save_dir, save_file=FLAGS.save_predictions_file)\n", (8992, 9096), False, 'from evaluate import evaluate, write_predictions\n'), ((6221, 6293), 'absl.logging.info', 'logging.info', (['"""Unable to load flag checkpoint %s: %s"""', 'path_to_load', 'err'], {}), "('Unable to load flag checkpoint %s: %s', path_to_load, err)\n", (6233, 6293), False, 'from absl import logging\n'), ((8933, 8959), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (8949, 8959), False, 'import os\n')]
|
import keras.backend as K
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds
from sacred.observers import FileStorageObserver
from utils.util import prepare_dataset, split_data
from train import train
from evaluate import evaluate
import tensorflow as tf
import GPy
import GPyOpt
import functools
import json
import os
import importlib
# reset tensorflow graph
tf.reset_default_graph()
# remove unnecessary tensorflow output
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# set dimension ordering to tensorflow
K.set_image_dim_ordering('tf')
# set bounds for hyperparameter optimization
bounds = [
{ 'name': 'learning_rate', 'type': 'continuous', 'domain': (0.0, 0.0006) },
{ 'name': 'freeze_layers', 'type': 'discrete', 'domain': range(0, 594) }
]
def optimize( config, config_path=None ):
if config_path is not None:
experiment.add_artifact( config_path )
# optimize hyperparameters
op_function = functools.partial(run_model, config=config, experiment=experiment)
hyperparameter_optimizer = GPyOpt.methods.BayesianOptimization( f=op_function, domain=bounds )
hyperparameter_optimizer.run_optimization( max_iter=10 )
# print best hyperparameters and achieved score
for index, param in enumerate(bounds):
print(f'{ bounds[index]["name"] }: { hyperparameter_optimizer.x_opt[index] }')
print(f'optimized score: { hyperparameter_optimizer.fx_opt }')
def run_model( params, config, experiment ):
file_identifier = []
for index, param in enumerate(params[0]):
print(f'{ bounds[index]["name"] } => { param }')
file_identifier.extend((bounds[index]['name'], str(param)))
file_identifier = '_'.join(file_identifier)
# load dataset configuration
if config['dataset'].get('link', True):
dataset_config_path = f'../configs/datasets/{ config["dataset"]["link"] }'
experiment.add_artifact(dataset_config_path)
config['dataset'].update( json.load( open( dataset_config_path ) ) )
# split dataset into k split
split_dirs = split_data(config['dataset']['split'], config['dataset']['path'])
# build training and validation directory
training_directory, validation_directory = prepare_dataset(split_dirs, 0, len(split_dirs))
# optionally load pre-trainied model
if config['model'].get('load_model', False):
model = load_model(config['model']['load_model'])
else:
model_builder = importlib.import_module(f'models.{ config["model"]["build_file"] }')
model = model_builder.build(config)
# modify config to use optimization parameters
config['hyper_parameters']['learning_rate'] = float(params[:, 0])
config['hyper_parameters']['freeze_layers'] = int(params[:, 1])
# train model using optimized hyper parameters
model = train(model, config, experiment, training_directory, validation_directory, file_identifier)
# evalaute model using standard metrics
evaluation = evaluate(model, config, experiment, validation_directory, file_identifier)
return evaluation['f1']
if __name__ == '__main__':
# list configs in active directory
configs = os.listdir( '../configs/active' )
# iterate over each config and perform experiment
for config_file in configs:
# set config path
config_path = f'../configs/active/{ config_file }'
# load config file
config = json.load( open( config_path ) )
# get experiment path
experiment_name = config['experiment']['name']
experiment_path = f'../experiments/{ experiment_name }'
# initialize experiment
experiment = Experiment(experiment_name)
experiment.captured_out_filter = apply_backspaces_and_linefeeds
experiment.observers.append(FileStorageObserver.create(experiment_path))
def wrapper():
return optimize(config, config_path)
# run experiment
experiment.automain(wrapper)
|
[
"evaluate.evaluate"
] |
[((402, 426), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (424, 426), True, 'import tensorflow as tf\n'), ((548, 578), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (572, 578), True, 'import keras.backend as K\n'), ((968, 1034), 'functools.partial', 'functools.partial', (['run_model'], {'config': 'config', 'experiment': 'experiment'}), '(run_model, config=config, experiment=experiment)\n', (985, 1034), False, 'import functools\n'), ((1066, 1131), 'GPyOpt.methods.BayesianOptimization', 'GPyOpt.methods.BayesianOptimization', ([], {'f': 'op_function', 'domain': 'bounds'}), '(f=op_function, domain=bounds)\n', (1101, 1131), False, 'import GPyOpt\n'), ((2089, 2154), 'utils.util.split_data', 'split_data', (["config['dataset']['split']", "config['dataset']['path']"], {}), "(config['dataset']['split'], config['dataset']['path'])\n", (2099, 2154), False, 'from utils.util import prepare_dataset, split_data\n'), ((2852, 2947), 'train.train', 'train', (['model', 'config', 'experiment', 'training_directory', 'validation_directory', 'file_identifier'], {}), '(model, config, experiment, training_directory, validation_directory,\n file_identifier)\n', (2857, 2947), False, 'from train import train\n'), ((3006, 3080), 'evaluate.evaluate', 'evaluate', (['model', 'config', 'experiment', 'validation_directory', 'file_identifier'], {}), '(model, config, experiment, validation_directory, file_identifier)\n', (3014, 3080), False, 'from evaluate import evaluate\n'), ((3192, 3223), 'os.listdir', 'os.listdir', (['"""../configs/active"""'], {}), "('../configs/active')\n", (3202, 3223), False, 'import os\n'), ((2485, 2551), 'importlib.import_module', 'importlib.import_module', (['f"""models.{config[\'model\'][\'build_file\']}"""'], {}), '(f"models.{config[\'model\'][\'build_file\']}")\n', (2508, 2551), False, 'import importlib\n'), ((3681, 3708), 'sacred.Experiment', 'Experiment', (['experiment_name'], {}), '(experiment_name)\n', (3691, 3708), False, 'from sacred import Experiment\n'), ((3817, 3860), 'sacred.observers.FileStorageObserver.create', 'FileStorageObserver.create', (['experiment_path'], {}), '(experiment_path)\n', (3843, 3860), False, 'from sacred.observers import FileStorageObserver\n')]
|
from __future__ import print_function
import os
import sys
import importlib
import mxnet as mx
from dataset.iterator import DetRecordIter
from config.config import cfg
from evaluate.eval_metric import MApMetric, VOC07MApMetric
import logging
from symbol.symbol_factory import get_symbol
def evaluate_net(net, path_imgrec, num_classes, mean_pixels, data_shape,
model_prefix, epoch, ctx=mx.cpu(), batch_size=1,
path_imglist="", nms_thresh=0.45, force_nms=False,
ovp_thresh=0.5, use_difficult=False, class_names=None,
voc07_metric=False):
"""
evalute network given validation record file
Parameters:
----------
net : str or None
Network name or use None to load from json without modifying
path_imgrec : str
path to the record validation file
path_imglist : str
path to the list file to replace labels in record file, optional
num_classes : int
number of classes, not including background
mean_pixels : tuple
(mean_r, mean_g, mean_b)
data_shape : tuple or int
(3, height, width) or height/width
model_prefix : str
model prefix of saved checkpoint
epoch : int
load model epoch
ctx : mx.ctx
mx.gpu() or mx.cpu()
batch_size : int
validation batch size
nms_thresh : float
non-maximum suppression threshold
force_nms : boolean
whether suppress different class objects
ovp_thresh : float
AP overlap threshold for true/false postives
use_difficult : boolean
whether to use difficult objects in evaluation if applicable
class_names : comma separated str
class names in string, must correspond to num_classes if set
voc07_metric : boolean
whether to use 11-point evluation as in VOC07 competition
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# args
if isinstance(data_shape, int):
data_shape = (3, data_shape, data_shape)
assert len(data_shape) == 3 and data_shape[0] == 3
model_prefix += '_' + str(data_shape[1])
# iterator
eval_iter = DetRecordIter(path_imgrec, batch_size, data_shape,
path_imglist=path_imglist, **cfg.valid)
# model params
load_net, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
# network
if net is None:
net = load_net
else:
net = get_symbol(net, data_shape[1], num_classes=num_classes,
nms_thresh=nms_thresh, force_suppress=force_nms)
if not 'label' in net.list_arguments():
label = mx.sym.Variable(name='label')
net = mx.sym.Group([net, label])
# init module
mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx,
fixed_param_names=net.list_arguments())
mod.bind(data_shapes=eval_iter.provide_data, label_shapes=eval_iter.provide_label)
mod.set_params(args, auxs, allow_missing=False, force_init=True)
# run evaluation
if voc07_metric:
metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names)
else:
metric = MApMetric(ovp_thresh, use_difficult, class_names)
results = mod.score(eval_iter, metric, num_batch=None)
for k, v in results:
print("{}: {}".format(k, v))
|
[
"evaluate.eval_metric.VOC07MApMetric",
"evaluate.eval_metric.MApMetric"
] |
[((403, 411), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (409, 411), True, 'import mxnet as mx\n'), ((1895, 1916), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1914, 1916), False, 'import logging\n'), ((1930, 1949), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1947, 1949), False, 'import logging\n'), ((2213, 2308), 'dataset.iterator.DetRecordIter', 'DetRecordIter', (['path_imgrec', 'batch_size', 'data_shape'], {'path_imglist': 'path_imglist'}), '(path_imgrec, batch_size, data_shape, path_imglist=\n path_imglist, **cfg.valid)\n', (2226, 2308), False, 'from dataset.iterator import DetRecordIter\n'), ((2380, 2425), 'mxnet.model.load_checkpoint', 'mx.model.load_checkpoint', (['model_prefix', 'epoch'], {}), '(model_prefix, epoch)\n', (2404, 2425), True, 'import mxnet as mx\n'), ((2507, 2616), 'symbol.symbol_factory.get_symbol', 'get_symbol', (['net', 'data_shape[1]'], {'num_classes': 'num_classes', 'nms_thresh': 'nms_thresh', 'force_suppress': 'force_nms'}), '(net, data_shape[1], num_classes=num_classes, nms_thresh=\n nms_thresh, force_suppress=force_nms)\n', (2517, 2616), False, 'from symbol.symbol_factory import get_symbol\n'), ((2684, 2713), 'mxnet.sym.Variable', 'mx.sym.Variable', ([], {'name': '"""label"""'}), "(name='label')\n", (2699, 2713), True, 'import mxnet as mx\n'), ((2728, 2754), 'mxnet.sym.Group', 'mx.sym.Group', (['[net, label]'], {}), '([net, label])\n', (2740, 2754), True, 'import mxnet as mx\n'), ((3119, 3173), 'evaluate.eval_metric.VOC07MApMetric', 'VOC07MApMetric', (['ovp_thresh', 'use_difficult', 'class_names'], {}), '(ovp_thresh, use_difficult, class_names)\n', (3133, 3173), False, 'from evaluate.eval_metric import MApMetric, VOC07MApMetric\n'), ((3201, 3250), 'evaluate.eval_metric.MApMetric', 'MApMetric', (['ovp_thresh', 'use_difficult', 'class_names'], {}), '(ovp_thresh, use_difficult, class_names)\n', (3210, 3250), False, 'from evaluate.eval_metric import MApMetric, VOC07MApMetric\n')]
|
from evaluate import get_env, get_state_action_size, evaluate
from policy import NeuroevoPolicy
from evolutionary_methods import ga, ga_transfer_learning
from argparse import ArgumentParser
import numpy as np
import logging
import sys
use_tqdm = False
if "tqdm" in sys.modules:
use_tqdm = True
from tqdm import tqdm
def fitness(x, s, a, env, params):
policy = NeuroevoPolicy(s, a)
policy.set_params(x)
return evaluate(env, params, policy)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-e', '--env', help='environment', default='small', type=str)
parser.add_argument('-g', '--gens', help='number of generations', default=100, type=int)
parser.add_argument('-p', '--pop', help='population size (lambda for the 1+lambda ES)', default=10, type=int)
parser.add_argument('-s', '--seed', help='seed for evolution', default=0, type=int)
parser.add_argument('--log', help='log file', default='evolution.log', type=str)
parser.add_argument('--weights', help='filename to save policy weights', default='weights', type=str)
args = parser.parse_args()
logging.basicConfig(filename=args.log, level=logging.DEBUG,
format='%(asctime)s %(message)s')
# starting point
env, params = get_env(args.env)
s, a = get_state_action_size(env)
policy = NeuroevoPolicy(s, a)
# evolution
rng = np.random.default_rng(args.seed)
#start = rng.normal(size=(len(policy.get_params(),)))
start = policy.get_params()
def fit(x):
return fitness(x, s, a, env, params)
def fit_inv(x):
return -fitness(x, s, a, env, params)
print(len(start))
# x_best = mu_lambda(start, fit, args.gens, args.pop, rng=rng)#cmaES_strategy(start, fit_inv)# oneplus_lambda(start, fit, args.gens, args.pop, rng=rng) #
# x_best = ga(s,a, fit, std_init=0.5, n_elites=1, pop_size=10, gen=100, mutation_rate=0.1,scheduler=True, rng=rng)# 1/len(start),rng=rng)#
x_best = ga_transfer_learning(s,a, fitness, std_init=0.5, n_elites=1, pop_size=10, gen=100, mutation_rate=0.1,scheduler=True, rng=rng)
# Evaluation
policy.set_params(x_best)
policy.save(args.weights)
best_eval = evaluate(env, params, policy)
print('Best individual: ', x_best[:5])
print('Fitness: ', best_eval)
|
[
"evaluate.get_state_action_size",
"evaluate.evaluate",
"evaluate.get_env"
] |
[((375, 395), 'policy.NeuroevoPolicy', 'NeuroevoPolicy', (['s', 'a'], {}), '(s, a)\n', (389, 395), False, 'from policy import NeuroevoPolicy\n'), ((432, 461), 'evaluate.evaluate', 'evaluate', (['env', 'params', 'policy'], {}), '(env, params, policy)\n', (440, 461), False, 'from evaluate import get_env, get_state_action_size, evaluate\n'), ((504, 520), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (518, 520), False, 'from argparse import ArgumentParser\n'), ((1128, 1226), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'args.log', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(message)s"""'}), "(filename=args.log, level=logging.DEBUG, format=\n '%(asctime)s %(message)s')\n", (1147, 1226), False, 'import logging\n'), ((1286, 1303), 'evaluate.get_env', 'get_env', (['args.env'], {}), '(args.env)\n', (1293, 1303), False, 'from evaluate import get_env, get_state_action_size, evaluate\n'), ((1315, 1341), 'evaluate.get_state_action_size', 'get_state_action_size', (['env'], {}), '(env)\n', (1336, 1341), False, 'from evaluate import get_env, get_state_action_size, evaluate\n'), ((1355, 1375), 'policy.NeuroevoPolicy', 'NeuroevoPolicy', (['s', 'a'], {}), '(s, a)\n', (1369, 1375), False, 'from policy import NeuroevoPolicy\n'), ((1403, 1435), 'numpy.random.default_rng', 'np.random.default_rng', (['args.seed'], {}), '(args.seed)\n', (1424, 1435), True, 'import numpy as np\n'), ((1991, 2122), 'evolutionary_methods.ga_transfer_learning', 'ga_transfer_learning', (['s', 'a', 'fitness'], {'std_init': '(0.5)', 'n_elites': '(1)', 'pop_size': '(10)', 'gen': '(100)', 'mutation_rate': '(0.1)', 'scheduler': '(True)', 'rng': 'rng'}), '(s, a, fitness, std_init=0.5, n_elites=1, pop_size=10,\n gen=100, mutation_rate=0.1, scheduler=True, rng=rng)\n', (2011, 2122), False, 'from evolutionary_methods import ga, ga_transfer_learning\n'), ((2210, 2239), 'evaluate.evaluate', 'evaluate', (['env', 'params', 'policy'], {}), '(env, params, policy)\n', (2218, 2239), False, 'from evaluate import get_env, get_state_action_size, evaluate\n')]
|
#!/usr/bin/env python2
# Copyright (C) <2019> Intel Corporation
# SPDX-License-Identifier: MIT
# Author: <NAME>
import sys
import argparse
import os
import copy
import math
import pickle
import matplotlib.pyplot as plt
import evaluate
def update_results(path, gt_dict, seqs):
result_items = ['ate', 'ate_rmse', 'ate_num', 'c_ate_rmse', 'c_ate_num', 'reloc_time', 'reloc_correct', 'track_time', 'track_cr']
results = {}
scenes = ['office']
algorithms = os.listdir(path)
for alg in algorithms:
try:
files = os.listdir(path + '/' + alg)
except OSError:
continue
auto_scale = alg == 'svo' or alg == 'dso'
results[alg] = {}
for file in files:
if not file.endswith('.txt'): continue
if not '1-%d-1-%d' % (seqs[0], seqs[1]) in file.replace('_', '-'): continue
filename = path + '/' + alg + '/' + file
print('-----------------------------------')
print(filename + ':')
try:
info, sequences = evaluate.parse_input(filename, args.remove_repeat)
scene = [s for s in scenes if s in info['scene']][0]
gts = copy.deepcopy(gt_dict[scene]['gts'])
gt_info = gt_dict[scene]['info']
evaluate.evaluate(sequences, info, gts, gt_info, args.ate_threshold, args.max_pose_interval, auto_scale)
except Exception as ex:
print('Invalid result file')
print(ex)
else:
for seq in sequences:
if scene in results[alg] and seq in results[alg][scene]:
exit('Multiple results found for %s %s-%d' % (alg, scene, seq))
for item in result_items:
results[alg].setdefault(scene, {}).setdefault(seq, {})[item] = sequences[seq][item]
return results
def evaluate_all_per_seq(path, print_per_seq, print_per_scene, print_mean, latex_format, plot):
"""
path can either be a folder with algorithm outputs or a pickle file with dumped results
"""
scenes = ['office']
seqs = [2, 7]
scene = scenes[0]
if path.endswith('pkl'):
with open(path) as f:
results = pickle.load(f)
else:
gt_dict = {}
gt_file = 'data/gt_%s_1.txt' % scene
gt_info, gts = evaluate.parse_input(gt_file)
gt_dict.setdefault(scene, {})['gts'] = gts
gt_dict[scene]['info'] = gt_info
results = update_results(path, gt_dict, seqs)
with open('office-%d-%d.pkl' % (seqs[0], seqs[1]), 'wb') as output:
pickle.dump(results, output, pickle.HIGHEST_PROTOCOL)
algorithms = sorted(results.keys())
algorithms = ['orbslam2_t265', 'maslam', 'ds-slam', \
'vins_mono_color_imu_with_loop', 'vins_mono_fisheye_imu_with_loop', 'infinitam_aligned_depth',]
alg_names = ['ORB-s', 'ORB-d', 'DS-SLAM',\
'VINS-c', 'VINS-f', 'ITAM']
is_pose_correct = lambda ate: ate <= args.ate_threshold
max_alg_len = max([len(alg) for alg in algorithms])
column1_fmt = '%%-%ds' % max_alg_len
if latex_format:
num_fmt = ' & %6.3f'
str_fmt = ' & %6s'
perc_fmt = ' &%5.1f\\%%'
line_end = ' \\\\\n'
else:
num_fmt = ' %8f'
str_fmt = ' %8s'
perc_fmt = ' %8.3f%%'
line_end = '\n'
res_reloc = '%d-%d ' % (seqs[0], seqs[1])
res_score = res_reloc
for alg in algorithms:
if not (alg in results and 'office' in results[alg] and seqs[1] in results[alg]['office']):
res_reloc += str_fmt % 'NO!'
res_score += str_fmt % 'NO!'
continue
res = results[alg]['office'][seqs[1]]
reloc_time = res['reloc_time'] if (len(res['ate'].keys()) > 0 and is_pose_correct(res['ate'][min(res['ate'].keys())])) else float('inf')
res_reloc += num_fmt % reloc_time
res_score += num_fmt % math.exp(reloc_time / (-60.))
continue
print('==============================')
print(' ' + ''.join([str_fmt % alg[:6] for alg in alg_names]))
print(res_reloc)
print(res_score)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--remove-repeated-pose', help='ignore repeated poses', dest='remove_repeat', action='store_true')
parser.add_argument('-k', '--keep-repeated-pose', help='keep repeated poses', dest='remove_repeat', action='store_false')
parser.add_argument('-t', '--ate-threshold', type=float, help='ATE threshold of correctness', default=0.3)
parser.add_argument('-m', '--max-pose-interval', type=float, help='consider lost after no pose for such time (sec)', default=1.)
parser.add_argument('--print-seq', help='print each seq', dest='print_seq', action='store_true')
parser.add_argument('--no-print-seq', help='print each seq', dest='print_seq', action='store_false')
parser.add_argument('--print-scene', help='print each scene', dest='print_scene', action='store_true')
parser.add_argument('--no-print-scene', help='print each scene', dest='print_scene', action='store_false')
parser.add_argument('--print-mean', help='print total mean', dest='print_mean', action='store_true')
parser.add_argument('--no-print-mean', help='print total mean', dest='print_mean', action='store_false')
parser.add_argument('-l', '--latex', help='print in latex table format', dest='latex', action='store_true')
parser.add_argument('-p', '--plot', help='plot trajectories', dest='plot', action='store_true')
parser.add_argument('-np', '--no-plot', help='not plot trajectories', dest='plot', action='store_false')
parser.set_defaults(plot=True)
parser.set_defaults(remove_repeat=True)
parser.set_defaults(print_seq=True)
parser.set_defaults(print_scene=True)
parser.set_defaults(print_mean=True)
parser.set_defaults(latex=False)
args, left = parser.parse_known_args()
evaluate_all_per_seq(left[0], args.print_seq, args.print_scene, args.print_mean, args.latex, args.plot)
|
[
"evaluate.evaluate",
"evaluate.parse_input"
] |
[((471, 487), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (481, 487), False, 'import os\n'), ((4206, 4231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4229, 4231), False, 'import argparse\n'), ((2373, 2402), 'evaluate.parse_input', 'evaluate.parse_input', (['gt_file'], {}), '(gt_file)\n', (2393, 2402), False, 'import evaluate\n'), ((548, 576), 'os.listdir', 'os.listdir', (["(path + '/' + alg)"], {}), "(path + '/' + alg)\n", (558, 576), False, 'import os\n'), ((2259, 2273), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2270, 2273), False, 'import pickle\n'), ((2637, 2690), 'pickle.dump', 'pickle.dump', (['results', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(results, output, pickle.HIGHEST_PROTOCOL)\n', (2648, 2690), False, 'import pickle\n'), ((3961, 3989), 'math.exp', 'math.exp', (['(reloc_time / -60.0)'], {}), '(reloc_time / -60.0)\n', (3969, 3989), False, 'import math\n'), ((1059, 1109), 'evaluate.parse_input', 'evaluate.parse_input', (['filename', 'args.remove_repeat'], {}), '(filename, args.remove_repeat)\n', (1079, 1109), False, 'import evaluate\n'), ((1201, 1237), 'copy.deepcopy', 'copy.deepcopy', (["gt_dict[scene]['gts']"], {}), "(gt_dict[scene]['gts'])\n", (1214, 1237), False, 'import copy\n'), ((1303, 1412), 'evaluate.evaluate', 'evaluate.evaluate', (['sequences', 'info', 'gts', 'gt_info', 'args.ate_threshold', 'args.max_pose_interval', 'auto_scale'], {}), '(sequences, info, gts, gt_info, args.ate_threshold, args.\n max_pose_interval, auto_scale)\n', (1320, 1412), False, 'import evaluate\n')]
|
import os
import json
from exp import ex
from args import get_args
from train import _train
from utils import wait_for_key, count_parameters
from evaluate import _evaluate
from infer import _infer
from vis_tsne import _tsne, _silhouette
from distance import _distance
from extract_keyword import extract_and_save_all
from model import get_model_options
from ckpt import get_model_ckpt
from loss.loss import get_loss
from optimizer import get_optimizer
from data.dataloader import get_dataloaders
from logger import get_logger
from scripts import run_script
@ex.capture
def prepare_model(model_name):
return get_model_ckpt(model_name)
def prepare(no_logger=False):
logger = get_logger(log_file=no_logger)
model, tokenizer, ckpt, datasets, epoch = prepare_model()
dataloaders = get_dataloaders(datasets, model.make_batch, tokenizer)
'''
args.batch_per_epoch = {}
for key in dataloaders.keys():
args.batch_per_epoch[key] = \
math.ceil(len(dataloaders[key]) / args.batch_sizes[key])
'''
loss_fn = get_loss(padding_idx=tokenizer.pad_id)
optimizers = get_optimizer(model, dataloaders)
model.ckpt_epoch = epoch
return model, loss_fn, optimizers, tokenizer, dataloaders, logger
@ex.command
def train():
all_args = prepare()
res = _train(*all_args)
logger = all_args[-1]
# hold process to keep tensorboard alive
if 'tfboard' in logger.logger_dests:
wait_for_key()
return res
@ex.command
def evaluate(log_path):
all_args = prepare(no_logger=True)
stats, _, texts = _evaluate(*all_args, key='val', print_output=False)
print(stats)
model = all_args[0]
assert hasattr(model, 'ckpt_path'), "no ckpt loaded"
path = model.ckpt_path
parent = path.parent.parent.parent
dir_name = path.parent.stem
parent = parent / "evals" / dir_name
os.makedirs(parent, exist_ok=True)
with open(parent / 'eval_stats.json', 'w') as f:
json.dump(stats, f)
with open(parent / 'eval_text.json', 'w') as f:
json.dump(texts, f)
@ex.command
def tsne(log_path, test_path):
# all_args = prepare({'use_data': 'val', 'sample': True})
all_args = prepare()
_tsne(*all_args, key='test')
@ex.command
def silhouette(log_path):
# all_args = prepare({'use_data': 'val', 'sample': True})
all_args = prepare()
_silhouette(*all_args, key='test')
@ex.command
def distance(log_path):
# all_args = prepare({'use_data': 'val', 'sample': True})
all_args = prepare()
_distance(*all_args, key='val')
@ex.command
def infer():
#all_args = prepare({'use_data': 'val'})
all_args = prepare()
texts = _infer(*all_args)
@ex.command
def model_stats():
#all_args = prepare({'use_data': 'val'})
all_args = prepare(no_logger=True)
model = all_args[0]
stats = {}
stats['parameter_counts'] = count_parameters(model)
print(stats)
@ex.command
def extract():
model, _, _, tokenizer, \
dataloaders, _ = prepare()
for dataloader in dataloaders.values():
dataloader.training = False
extract_and_save_all(model, tokenizer, dataloaders)
@ex.command
def scripts(script):
run_script(script)
@ex.command
def print_models():
print(sorted(get_model_options()))
@ex.option_hook
def update_args(options):
args = get_args(options)
print(sorted(args.items()))
ex.add_config(args)
@ex.automain
def run():
train()
|
[
"evaluate._evaluate"
] |
[((615, 641), 'ckpt.get_model_ckpt', 'get_model_ckpt', (['model_name'], {}), '(model_name)\n', (629, 641), False, 'from ckpt import get_model_ckpt\n'), ((687, 717), 'logger.get_logger', 'get_logger', ([], {'log_file': 'no_logger'}), '(log_file=no_logger)\n', (697, 717), False, 'from logger import get_logger\n'), ((799, 853), 'data.dataloader.get_dataloaders', 'get_dataloaders', (['datasets', 'model.make_batch', 'tokenizer'], {}), '(datasets, model.make_batch, tokenizer)\n', (814, 853), False, 'from data.dataloader import get_dataloaders\n'), ((1056, 1094), 'loss.loss.get_loss', 'get_loss', ([], {'padding_idx': 'tokenizer.pad_id'}), '(padding_idx=tokenizer.pad_id)\n', (1064, 1094), False, 'from loss.loss import get_loss\n'), ((1112, 1145), 'optimizer.get_optimizer', 'get_optimizer', (['model', 'dataloaders'], {}), '(model, dataloaders)\n', (1125, 1145), False, 'from optimizer import get_optimizer\n'), ((1308, 1325), 'train._train', '_train', (['*all_args'], {}), '(*all_args)\n', (1314, 1325), False, 'from train import _train\n'), ((1577, 1628), 'evaluate._evaluate', '_evaluate', (['*all_args'], {'key': '"""val"""', 'print_output': '(False)'}), "(*all_args, key='val', print_output=False)\n", (1586, 1628), False, 'from evaluate import _evaluate\n'), ((1870, 1904), 'os.makedirs', 'os.makedirs', (['parent'], {'exist_ok': '(True)'}), '(parent, exist_ok=True)\n', (1881, 1904), False, 'import os\n'), ((2202, 2230), 'vis_tsne._tsne', '_tsne', (['*all_args'], {'key': '"""test"""'}), "(*all_args, key='test')\n", (2207, 2230), False, 'from vis_tsne import _tsne, _silhouette\n'), ((2362, 2396), 'vis_tsne._silhouette', '_silhouette', (['*all_args'], {'key': '"""test"""'}), "(*all_args, key='test')\n", (2373, 2396), False, 'from vis_tsne import _tsne, _silhouette\n'), ((2526, 2557), 'distance._distance', '_distance', (['*all_args'], {'key': '"""val"""'}), "(*all_args, key='val')\n", (2535, 2557), False, 'from distance import _distance\n'), ((2668, 2685), 'infer._infer', '_infer', (['*all_args'], {}), '(*all_args)\n', (2674, 2685), False, 'from infer import _infer\n'), ((2875, 2898), 'utils.count_parameters', 'count_parameters', (['model'], {}), '(model)\n', (2891, 2898), False, 'from utils import wait_for_key, count_parameters\n'), ((3096, 3147), 'extract_keyword.extract_and_save_all', 'extract_and_save_all', (['model', 'tokenizer', 'dataloaders'], {}), '(model, tokenizer, dataloaders)\n', (3116, 3147), False, 'from extract_keyword import extract_and_save_all\n'), ((3187, 3205), 'scripts.run_script', 'run_script', (['script'], {}), '(script)\n', (3197, 3205), False, 'from scripts import run_script\n'), ((3334, 3351), 'args.get_args', 'get_args', (['options'], {}), '(options)\n', (3342, 3351), False, 'from args import get_args\n'), ((3388, 3407), 'exp.ex.add_config', 'ex.add_config', (['args'], {}), '(args)\n', (3401, 3407), False, 'from exp import ex\n'), ((1447, 1461), 'utils.wait_for_key', 'wait_for_key', ([], {}), '()\n', (1459, 1461), False, 'from utils import wait_for_key, count_parameters\n'), ((1966, 1985), 'json.dump', 'json.dump', (['stats', 'f'], {}), '(stats, f)\n', (1975, 1985), False, 'import json\n'), ((2046, 2065), 'json.dump', 'json.dump', (['texts', 'f'], {}), '(texts, f)\n', (2055, 2065), False, 'import json\n'), ((3257, 3276), 'model.get_model_options', 'get_model_options', ([], {}), '()\n', (3274, 3276), False, 'from model import get_model_options\n')]
|
import torch
import torch.nn as nn
from tqdm import tqdm
import numpy as np
import logging
from evaluate import eval_func, re_rank
from evaluate import euclidean_dist
from utils import AvgerageMeter
import os.path as osp
import os
from model import convert_model
from optim import make_optimizer, WarmupMultiStepLR
try:
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
import apex
except:
pass
class BaseTrainer(object):
def __init__(self, cfg, model, train_dl, val_dl,
loss_func, num_query, num_gpus):
self.cfg = cfg
self.model = model
self.train_dl = train_dl
self.val_dl = val_dl
self.loss_func = loss_func
self.num_query = num_query
self.loss_avg = AvgerageMeter()
self.acc_avg = AvgerageMeter()
self.train_epoch = 1
self.batch_cnt = 0
self.logger = logging.getLogger('reid_baseline.train')
self.log_period = cfg.SOLVER.LOG_PERIOD
self.checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
self.eval_period = cfg.SOLVER.EVAL_PERIOD
self.output_dir = cfg.OUTPUT_DIR
self.device = cfg.MODEL.DEVICE
self.epochs = cfg.SOLVER.MAX_EPOCHS
if num_gpus > 1:
# convert to use sync_bn
self.logger.info('More than one gpu used, convert model to use SyncBN.')
if cfg.SOLVER.FP16:
# TODO: Multi-GPU model with FP16
raise NotImplementedError
self.logger.info('Using apex to perform SyncBN and FP16 training')
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
self.model = apex.parallel.convert_syncbn_model(self.model)
else:
# Multi-GPU model without FP16
self.model = nn.DataParallel(self.model)
self.model = convert_model(self.model)
self.model.cuda()
self.logger.info('Using pytorch SyncBN implementation')
self.optim = make_optimizer(cfg, self.model, num_gpus)
self.scheduler = WarmupMultiStepLR(self.optim,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_ITERS,
cfg.SOLVER.WARMUP_METHOD)
self.scheduler.step()
self.mix_precision = False
self.logger.info('Trainer Built')
return
else:
# Single GPU model
self.model.cuda()
self.optim = make_optimizer(cfg, self.model, num_gpus)
self.scheduler = WarmupMultiStepLR(self.optim, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA,
cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)
self.scheduler.step()
self.mix_precision = False
if cfg.SOLVER.FP16:
# Single model using FP16
self.model, self.optim = amp.initialize(self.model, self.optim,
opt_level='O1')
self.mix_precision = True
self.logger.info('Using fp16 training')
self.logger.info('Trainer Built')
return
# TODO: Multi-GPU model with FP16
raise NotImplementedError
self.model.to(self.device)
self.optim = make_optimizer(cfg, self.model, num_gpus)
self.scheduler = WarmupMultiStepLR(self.optim, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA,
cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)
self.scheduler.step()
self.model, self.optim = amp.initialize(self.model, self.optim,
opt_level='O1')
self.mix_precision = True
self.logger.info('Using fp16 training')
self.model = DDP(self.model, delay_allreduce=True)
self.logger.info('Convert model using apex')
self.logger.info('Trainer Built')
def handle_new_batch(self):
self.batch_cnt += 1
if self.batch_cnt % self.cfg.SOLVER.LOG_PERIOD == 0:
self.logger.info('Epoch[{}] Iteration[{}/{}] Loss: {:.3f},'
'Acc: {:.3f}, Base Lr: {:.2e}'
.format(self.train_epoch, self.batch_cnt,
len(self.train_dl), self.loss_avg.avg,
self.acc_avg.avg, self.scheduler.get_lr()[0]))
def handle_new_epoch(self):
self.batch_cnt = 1
self.scheduler.step()
self.logger.info('Epoch {} done'.format(self.train_epoch))
self.logger.info('-' * 20)
if self.train_epoch % self.checkpoint_period == 0:
self.save()
if self.train_epoch % self.eval_period == 0:
self.evaluate()
self.train_epoch += 1
def step(self, batch):
self.model.train()
self.optim.zero_grad()
img, target = batch
img, target = img.cuda(), target.cuda()
score, feat = self.model(img)
loss = self.loss_func(score, feat, target)
if self.mix_precision:
with amp.scale_loss(loss, self.optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.optim.step()
acc = (score.max(1)[1] == target).float().mean()
self.loss_avg.update(loss.cpu().item())
self.acc_avg.update(acc.cpu().item())
return self.loss_avg.avg, self.acc_avg.avg
def evaluate(self):
self.model.eval()
num_query = self.num_query
feats, pids, camids = [], [], []
with torch.no_grad():
for batch in tqdm(self.val_dl, total=len(self.val_dl),
leave=False):
data, pid, camid, _ = batch
data = data.cuda()
feat = self.model(data).detach().cpu()
feats.append(feat)
pids.append(pid)
camids.append(camid)
feats = torch.cat(feats, dim=0)
pids = torch.cat(pids, dim=0)
camids = torch.cat(camids, dim=0)
query_feat = feats[:num_query]
query_pid = pids[:num_query]
query_camid = camids[:num_query]
gallery_feat = feats[num_query:]
gallery_pid = pids[num_query:]
gallery_camid = camids[num_query:]
distmat = euclidean_dist(query_feat, gallery_feat)
cmc, mAP, _ = eval_func(distmat.numpy(), query_pid.numpy(), gallery_pid.numpy(),
query_camid.numpy(), gallery_camid.numpy(),
use_cython=self.cfg.SOLVER.CYTHON)
self.logger.info('Validation Result:')
for r in self.cfg.TEST.CMC:
self.logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r-1]))
self.logger.info('mAP: {:.2%}'.format(mAP))
self.logger.info('-' * 20)
def save(self):
torch.save(self.model.state_dict(), osp.join(self.output_dir,
self.cfg.MODEL.NAME + '_epoch' + str(self.train_epoch) + '.pth'))
torch.save(self.optim.state_dict(), osp.join(self.output_dir,
self.cfg.MODEL.NAME + '_epoch'+ str(self.train_epoch) + '_optim.pth'))
class PCBTrainer(BaseTrainer):
def __init__(self, cfg, model, train_dl, val_dl,
loss_func, num_query, num_gpus):
super().__init__(cfg, model, train_dl, val_dl,
loss_func, num_query, num_gpus)
def step(self, batch):
self.model.train()
self.optim.zero_grad()
img, target = batch
img, target = img.cuda(), target.cuda()
feat_list, score_list = self.model(img)
loss = torch.mean(torch.cat([
self.loss_func(score, feat, target).view(1) \
for score, feat in zip(score_list, feat_list)]))
if self.mix_precision:
with amp.scale_loss(loss, self.optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.optim.step()
acc = torch.mean(torch.cat([
(score.max(1)[1] == target).float() \
for score in score_list]))
self.loss_avg.update(loss.cpu().item())
self.acc_avg.update(acc.cpu().item())
return self.loss_avg.avg, self.acc_avg.avg
def evaluate(self):
self.model.eval()
num_query = self.num_query
feats, pids, camids = [], [], []
with torch.no_grad():
for batch in tqdm(self.val_dl, total=len(self.val_dl),
leave=False):
data, pid, camid, _ = batch
data = data.cuda()
local_feat_list = self.model(data)
feat = torch.cat([lf.data.cpu() for lf in local_feat_list], dim=1)
feats.append(feat)
pids.append(pid)
camids.append(camid)
feats = torch.cat(feats, dim=0)
pids = torch.cat(pids, dim=0)
camids = torch.cat(camids, dim=0)
query_feat = feats[:num_query]
query_pid = pids[:num_query]
query_camid = camids[:num_query]
gallery_feat = feats[num_query:]
gallery_pid = pids[num_query:]
gallery_camid = camids[num_query:]
distmat = euclidean_dist(query_feat, gallery_feat)
cmc, mAP, _ = eval_func(distmat.numpy(), query_pid.numpy(), gallery_pid.numpy(),
query_camid.numpy(), gallery_camid.numpy(),
use_cython=self.cfg.SOLVER.CYTHON)
self.logger.info('Validation Result:')
for r in self.cfg.TEST.CMC:
self.logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r-1]))
self.logger.info('mAP: {:.2%}'.format(mAP))
self.logger.info('-' * 20)
|
[
"evaluate.euclidean_dist"
] |
[((782, 797), 'utils.AvgerageMeter', 'AvgerageMeter', ([], {}), '()\n', (795, 797), False, 'from utils import AvgerageMeter\n'), ((821, 836), 'utils.AvgerageMeter', 'AvgerageMeter', ([], {}), '()\n', (834, 836), False, 'from utils import AvgerageMeter\n'), ((916, 956), 'logging.getLogger', 'logging.getLogger', (['"""reid_baseline.train"""'], {}), "('reid_baseline.train')\n", (933, 956), False, 'import logging\n'), ((3726, 3767), 'optim.make_optimizer', 'make_optimizer', (['cfg', 'self.model', 'num_gpus'], {}), '(cfg, self.model, num_gpus)\n', (3740, 3767), False, 'from optim import make_optimizer, WarmupMultiStepLR\n'), ((3793, 3940), 'optim.WarmupMultiStepLR', 'WarmupMultiStepLR', (['self.optim', 'cfg.SOLVER.STEPS', 'cfg.SOLVER.GAMMA', 'cfg.SOLVER.WARMUP_FACTOR', 'cfg.SOLVER.WARMUP_ITERS', 'cfg.SOLVER.WARMUP_METHOD'], {}), '(self.optim, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.\n SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n', (3810, 3940), False, 'from optim import make_optimizer, WarmupMultiStepLR\n'), ((4076, 4130), 'apex.amp.initialize', 'amp.initialize', (['self.model', 'self.optim'], {'opt_level': '"""O1"""'}), "(self.model, self.optim, opt_level='O1')\n", (4090, 4130), False, 'from apex import amp\n'), ((4283, 4320), 'apex.parallel.DistributedDataParallel', 'DDP', (['self.model'], {'delay_allreduce': '(True)'}), '(self.model, delay_allreduce=True)\n', (4286, 4320), True, 'from apex.parallel import DistributedDataParallel as DDP\n'), ((6473, 6496), 'torch.cat', 'torch.cat', (['feats'], {'dim': '(0)'}), '(feats, dim=0)\n', (6482, 6496), False, 'import torch\n'), ((6512, 6534), 'torch.cat', 'torch.cat', (['pids'], {'dim': '(0)'}), '(pids, dim=0)\n', (6521, 6534), False, 'import torch\n'), ((6552, 6576), 'torch.cat', 'torch.cat', (['camids'], {'dim': '(0)'}), '(camids, dim=0)\n', (6561, 6576), False, 'import torch\n'), ((6846, 6886), 'evaluate.euclidean_dist', 'euclidean_dist', (['query_feat', 'gallery_feat'], {}), '(query_feat, gallery_feat)\n', (6860, 6886), False, 'from evaluate import euclidean_dist\n'), ((9393, 9416), 'torch.cat', 'torch.cat', (['feats'], {'dim': '(0)'}), '(feats, dim=0)\n', (9402, 9416), False, 'import torch\n'), ((9432, 9454), 'torch.cat', 'torch.cat', (['pids'], {'dim': '(0)'}), '(pids, dim=0)\n', (9441, 9454), False, 'import torch\n'), ((9472, 9496), 'torch.cat', 'torch.cat', (['camids'], {'dim': '(0)'}), '(camids, dim=0)\n', (9481, 9496), False, 'import torch\n'), ((9766, 9806), 'evaluate.euclidean_dist', 'euclidean_dist', (['query_feat', 'gallery_feat'], {}), '(query_feat, gallery_feat)\n', (9780, 9806), False, 'from evaluate import euclidean_dist\n'), ((2833, 2874), 'optim.make_optimizer', 'make_optimizer', (['cfg', 'self.model', 'num_gpus'], {}), '(cfg, self.model, num_gpus)\n', (2847, 2874), False, 'from optim import make_optimizer, WarmupMultiStepLR\n'), ((2904, 3051), 'optim.WarmupMultiStepLR', 'WarmupMultiStepLR', (['self.optim', 'cfg.SOLVER.STEPS', 'cfg.SOLVER.GAMMA', 'cfg.SOLVER.WARMUP_FACTOR', 'cfg.SOLVER.WARMUP_ITERS', 'cfg.SOLVER.WARMUP_METHOD'], {}), '(self.optim, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.\n SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n', (2921, 3051), False, 'from optim import make_optimizer, WarmupMultiStepLR\n'), ((6091, 6106), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6104, 6106), False, 'import torch\n'), ((8932, 8947), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8945, 8947), False, 'import torch\n'), ((1612, 1686), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (1648, 1686), False, 'import torch\n'), ((1770, 1816), 'apex.parallel.convert_syncbn_model', 'apex.parallel.convert_syncbn_model', (['self.model'], {}), '(self.model)\n', (1804, 1816), False, 'import apex\n'), ((1911, 1938), 'torch.nn.DataParallel', 'nn.DataParallel', (['self.model'], {}), '(self.model)\n', (1926, 1938), True, 'import torch.nn as nn\n'), ((1968, 1993), 'model.convert_model', 'convert_model', (['self.model'], {}), '(self.model)\n', (1981, 1993), False, 'from model import convert_model\n'), ((2130, 2171), 'optim.make_optimizer', 'make_optimizer', (['cfg', 'self.model', 'num_gpus'], {}), '(cfg, self.model, num_gpus)\n', (2144, 2171), False, 'from optim import make_optimizer, WarmupMultiStepLR\n'), ((2205, 2352), 'optim.WarmupMultiStepLR', 'WarmupMultiStepLR', (['self.optim', 'cfg.SOLVER.STEPS', 'cfg.SOLVER.GAMMA', 'cfg.SOLVER.WARMUP_FACTOR', 'cfg.SOLVER.WARMUP_ITERS', 'cfg.SOLVER.WARMUP_METHOD'], {}), '(self.optim, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.\n SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)\n', (2222, 2352), False, 'from optim import make_optimizer, WarmupMultiStepLR\n'), ((3319, 3373), 'apex.amp.initialize', 'amp.initialize', (['self.model', 'self.optim'], {'opt_level': '"""O1"""'}), "(self.model, self.optim, opt_level='O1')\n", (3333, 3373), False, 'from apex import amp\n'), ((5582, 5614), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'self.optim'], {}), '(loss, self.optim)\n', (5596, 5614), False, 'from apex import amp\n'), ((8354, 8386), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'self.optim'], {}), '(loss, self.optim)\n', (8368, 8386), False, 'from apex import amp\n')]
|
#libraries for transformers training (text)
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm, trange
from transformers import AutoConfig, AutoTokenizer
from arguments import args
from dataset import TextDataset
from evaluate import evaluate
from modelling.text_modelling import (AlbertForSentimentClassification,
BertForSentimentClassification,
DistilBertForSentimentClassification,
RobertaForSentimentClassification)
#libraries for 2D CNN training (audio)
import os
import numpy as np
import pandas as pd
import pickle
from dataset import AudioDataset
from modelling.audio_modelling import get_2d_conv_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import tensorflow as tf
from dataset import prepare_data
#libraries for wav2vec2 training (audio)
from transformers import Wav2Vec2Processor
import torchaudio
from transformers import EvalPrediction
from modelling.audio_modelling import Wav2Vec2ForSpeechClassification, DataCollatorCTCWithPadding, CTCTrainer
from transformers import TrainingArguments
from datasets import Dataset
'''For training transformers (text)'''
def train(model, criterion, optimizer, train_loader, val_loader, args):
best_acc = 0
for epoch in trange(args.num_eps, desc="Epoch"):
model.train()
for i, (input_ids, attention_mask, labels, FileID) in enumerate(tqdm(iterable=train_loader, desc="Training")):
optimizer.zero_grad()
input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
loss = criterion(input=outputs, target=labels)
loss.backward()
optimizer.step()
val_acc, val_loss = evaluate(model=model, criterion=criterion, dataloader=val_loader, device=device) #CHECK THIS
print("Epoch {} complete! Validation Accuracy : {}, Validation Loss : {}".format(epoch, val_acc, val_loss))
if val_acc > best_acc:
print("Best validation accuracy improved from {} to {}, saving model...".format(best_acc, val_acc))
best_acc = val_acc
model.save_pretrained(save_directory=f'models/{args.output_dir}/')
config.save_pretrained(save_directory=f'models/{args.output_dir}/')
tokenizer.save_pretrained(save_directory=f'models/{args.output_dir}/')
'''preprocessing for wav2vec2 (audio)'''
def speech_file_to_array_fn(path):
speech_array, sampling_rate = torchaudio.load(path)
resampler = torchaudio.transforms.Resample(sampling_rate, target_sampling_rate)
speech = resampler(speech_array).squeeze().numpy()
return speech
def label_to_id(label, label_list):
if len(label_list) > 0:
return label_list.index(label) if label in label_list else -1
return label
def preprocess_function(examples):
speech_list = [speech_file_to_array_fn(path) for path in examples[input_column]]
target_list = [label_to_id(label, label_list) for label in examples[output_column]]
result = processor(speech_list, sampling_rate=target_sampling_rate)
result["labels"] = list(target_list)
return result
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
#remoing all audio files that are longer than 41 seconds
def remove_long_common_voicedata(dataset, max_seconds=41):
dftest= dataset.to_pandas()
dftest['len']= dftest['input_values'].apply(len)
maxLength = max_seconds*16000
dftest= dftest[dftest['len']<maxLength]
dftest = dftest.drop('len', 1)
dataset= dataset.from_pandas(dftest)
del dftest
return dataset
if __name__ == "__main__":
#training text
if args.data_format == 'text':
if args.model_name_or_path is None:
args.model_name_or_path = 'roberta-base'
#Configuration for the desired transformer model
config = AutoConfig.from_pretrained(args.model_name_or_path)
#Tokenizer for the desired transformer model
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
#Create the model with the desired transformer model
if config.model_type == 'bert':
model = BertForSentimentClassification.from_pretrained(args.model_name_or_path, config=config)
elif config.model_type == 'albert':
model = AlbertForSentimentClassification.from_pretrained(args.model_name_or_path, config=config)
elif config.model_type == 'distilbert':
model = DistilBertForSentimentClassification.from_pretrained(args.model_name_or_path, config=config)
elif config.model_type == 'roberta':
model = RobertaForSentimentClassification.from_pretrained(args.model_name_or_path, config=config)
else:
raise ValueError('This transformer model is not supported yet.')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
#Takes as the input the logits of the positive class and computes the cross-entropy
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(params=model.parameters(), lr=args.lr)
train_set = TextDataset(filename='data/text/train_meld.tsv', maxlen=args.maxlen_train, tokenizer=tokenizer)
val_set = TextDataset(filename='data/text/dev_meld.tsv', maxlen=args.maxlen_val, tokenizer=tokenizer)
train_loader = DataLoader(dataset=train_set, batch_size=args.batch_size, num_workers=args.num_threads)
val_loader = DataLoader(dataset=val_set, batch_size=args.batch_size, num_workers=args.num_threads)
train(model=model, criterion=criterion, optimizer=optimizer, train_loader=train_loader, val_loader=val_loader, args=args)
#training audio
elif args.data_format == 'audio_2dcnn':
train_set = AudioDataset(filename='data/text/train_meld.tsv',filepath='data/audio/audio_meld/train', extension='wav')
train_set2 = AudioDataset(filename='data/text/train_crema.tsv',filepath='data/audio/audio_crema', extension='wav')
train_set3 = AudioDataset(filename='data/text/train_ravdess.tsv',filepath='data/audio/audio_ravdess', extension='wav')
train_set4 = AudioDataset(filename='data/text/train_emo.tsv',filepath='data/audio/audio_emo', extension='wav')
dev_set = AudioDataset(filename='data/text/dev_meld.tsv',filepath='data/audio/audio_meld/dev', extension='wav')
#combining train and test as it feds into the model as one dataframe - train/test will occur automatically.
train_set = train_set.__addpath__()
train_set2 = train_set2.__addpath__()
train_set3 = train_set3.__addpath__()
train_set4 = train_set4.__addpath__()
dev_set = dev_set.__addpath__()
dfs = [train_set, train_set2, train_set3, train_set4, dev_set]
#dfs = [train_set, dev_set]
data = pd.concat(dfs, ignore_index = True)
data = data
print(data)
if args.feature == 'mfcc':
dirpath = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'models', 'my_model_audio', '2dcnn'))
#mfcc extraction as denoted by "mfcc=1"
mfcc = prepare_data(df=data, n=args.n_mfcc, mfcc=1, array_cols=args.array_cols, sampling_rate=args.sampling_rate,
audio_duration=args.audio_duration, n_mfcc=args.n_mfcc, n_melspec=args.n_melspec)
#split between train and test
X_train, X_test, y_train, y_test = train_test_split(mfcc, data.Sentiment, train_size=args.train_size,test_size=args.test_size, shuffle=True)
#one hot encode the target
lb = LabelEncoder()
y_train = np_utils.to_categorical(lb.fit_transform(y_train))
y_test = np_utils.to_categorical(lb.fit_transform(y_test))
#normalization as per the standard NN process
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train = (X_train - mean)/std
X_test = (X_test - mean)/std
#saving mean and std variables
pickle.dump([mean, std], open(str(dirpath) + "/norm_vals.pkl", 'wb'))
#run CNN model training
with tf.device("/gpu:0"):
model = get_2d_conv_model(n=args.n_mfcc, array_cols=args.array_cols)
model_history = model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=args.batch_size, verbose = 2, epochs=args.num_eps)
save_model_path = dirpath
#saving model to path
model.save(save_model_path)
print("Model saved to: ", save_model_path)
elif args.feature == 'melspec':
dirpath = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'models', 'my_model_audio', '2dcnn'))
#melspec extraction as denoted by "mfcc=0"
specgram = prepare_data(df=data, n=args.n_melspec, mfcc=0, array_cols=args.array_cols, sampling_rate=args.sampling_rate,
audio_duration=args.audio_duration, n_mfcc=args.n_mfcc, n_melspec=args.n_melspec)
#split between train and test
X_train, X_test, y_train, y_test = train_test_split(specgram, data.Sentiment, train_size=args.train_size,test_size=args.test_size, shuffle=False)
#one hot encode the target
lb = LabelEncoder()
y_train = np_utils.to_categorical(lb.fit_transform(y_train))
y_test = np_utils.to_categorical(lb.fit_transform(y_test))
#normalization as per the standard NN process
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train = (X_train - mean)/std
X_test = (X_test - mean)/std
#saving mean and std variables
pickle.dump([mean, std], open(str(dirpath) + "/norm_vals.pkl", 'wb'))
#run CNN model training
with tf.device("/gpu:0"):
model = get_2d_conv_model(n=args.n_melspec, array_cols=args.array_cols)
model_history = model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=args.batch_size, verbose = 2, epochs=args.num_eps)
save_model_path = dirpath
#save model to path
model.save(save_model_path)
print("Model saved to: ", save_model_path)
else:
raise ValueError("Please input the argument 'feature' and try again")
elif args.data_format == 'wav2vec2':
train_set = AudioDataset(filename='data/text/train_meld.tsv',filepath='data/audio/audio_meld/train', extension='wav')
train_set2 = AudioDataset(filename='data/text/train_crema.tsv',filepath='data/audio/audio_crema', extension='wav')
train_set3 = AudioDataset(filename='data/text/train_ravdess.tsv',filepath='data/audio/audio_ravdess', extension='wav')
train_set4 = AudioDataset(filename='data/text/train_emo.tsv',filepath='data/audio/audio_emo', extension='wav')
dev_set = AudioDataset(filename='data/text/dev_meld.tsv',filepath='data/audio/audio_meld/dev', extension='wav')
#combining train and test as it feds into the model as one dataframe - train/test will occur automatically.
train_set = train_set.__addpath__()
dev_set = dev_set.__addpath__()
train_set2 = train_set2.__addpath__()
train_set3 = train_set3.__addpath__()
train_set4 = train_set4.__addpath__()
dfs = [train_set, train_set2, train_set3, train_set4]
train_set = pd.concat(dfs, ignore_index = True)
train_set = train_set
#pd.set_option('display.max_colwidth', None)
print(train_set)
print(dev_set)
encode_map = {0: 'negative', 1: 'neutral', 2: 'positive'}
train_set['Sentiment'].replace(encode_map, inplace=True)
dev_set['Sentiment'].replace(encode_map, inplace=True)
train_set = Dataset.from_pandas(train_set[:args.train_size])
dev_set = Dataset.from_pandas(dev_set[:args.test_size])
#specifying the input and output column
input_column = "path"
output_column = "Sentiment"
#distinguishing the unique labels in our SER dataset
label_list = train_set.unique(output_column)
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
print(f"A classification problem with {num_labels} classes: {label_list}")
#specifying model name, config and processor
model_name_or_path = args.model_name_or_path
if args.model_name_or_path is None:
model_name_or_path = "facebook/wav2vec2-base-960h"
pooling_mode = "mean"
config = AutoConfig.from_pretrained(
model_name_or_path,
num_labels=num_labels,
mask_time_prob=0.00,
label2id={label: i for i, label in enumerate(label_list)},
id2label={i: label for i, label in enumerate(label_list)},
finetuning_task="wav2vec2_clf",
)
setattr(config, 'pooling_mode', pooling_mode)
processor = Wav2Vec2Processor.from_pretrained(model_name_or_path)
target_sampling_rate = processor.feature_extractor.sampling_rate
train_set = train_set.map(preprocess_function,batch_size=100,batched=True,num_proc=1)
dev_set = dev_set.map(preprocess_function,batch_size=100,batched=True,num_proc=1)
train_set = remove_long_common_voicedata(train_set, max_seconds=20)
dev_set = remove_long_common_voicedata(dev_set, max_seconds=20)
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
is_regression = False
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Wav2Vec2ForSpeechClassification.from_pretrained(model_name_or_path,config=config).to(device)
model.freeze_feature_extractor()
training_args = TrainingArguments(
output_dir=os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'models', 'my_model_audio', 'wav2vec2')),
overwrite_output_dir=True,
per_device_train_batch_size=args.per_device_train_batch_size,
per_device_eval_batch_size=args.per_device_eval_batch_size,
gradient_accumulation_steps=2,
evaluation_strategy="steps",
num_train_epochs=args.num_eps,
gradient_checkpointing=True,
save_steps=args.eval_steps,
eval_steps=args.eval_steps,
logging_steps=args.logging_steps,
learning_rate=args.lr,
weight_decay=0.005,
warmup_steps=args.warmup_steps,
save_total_limit=1,
load_best_model_at_end= True
)
trainer = CTCTrainer(
model=model,
data_collator=data_collator,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_set,
eval_dataset=dev_set,
tokenizer=processor.feature_extractor,
)
trainer.train()
else:
raise ValueError("Please input the argument 'data_format' and try again")
|
[
"evaluate.evaluate"
] |
[((1506, 1540), 'tqdm.trange', 'trange', (['args.num_eps'], {'desc': '"""Epoch"""'}), "(args.num_eps, desc='Epoch')\n", (1512, 1540), False, 'from tqdm import tqdm, trange\n'), ((2817, 2838), 'torchaudio.load', 'torchaudio.load', (['path'], {}), '(path)\n', (2832, 2838), False, 'import torchaudio\n'), ((2856, 2923), 'torchaudio.transforms.Resample', 'torchaudio.transforms.Resample', (['sampling_rate', 'target_sampling_rate'], {}), '(sampling_rate, target_sampling_rate)\n', (2886, 2923), False, 'import torchaudio\n'), ((2068, 2153), 'evaluate.evaluate', 'evaluate', ([], {'model': 'model', 'criterion': 'criterion', 'dataloader': 'val_loader', 'device': 'device'}), '(model=model, criterion=criterion, dataloader=val_loader, device=device\n )\n', (2076, 2153), False, 'from evaluate import evaluate\n'), ((3652, 3669), 'numpy.squeeze', 'np.squeeze', (['preds'], {}), '(preds)\n', (3662, 3669), True, 'import numpy as np\n'), ((3692, 3716), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (3701, 3716), True, 'import numpy as np\n'), ((4601, 4652), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (4627, 4652), False, 'from transformers import AutoConfig, AutoTokenizer\n'), ((4730, 4784), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (4759, 4784), False, 'from transformers import AutoConfig, AutoTokenizer\n'), ((5800, 5821), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5819, 5821), True, 'import torch.nn as nn\n'), ((5918, 6017), 'dataset.TextDataset', 'TextDataset', ([], {'filename': '"""data/text/train_meld.tsv"""', 'maxlen': 'args.maxlen_train', 'tokenizer': 'tokenizer'}), "(filename='data/text/train_meld.tsv', maxlen=args.maxlen_train,\n tokenizer=tokenizer)\n", (5929, 6017), False, 'from dataset import TextDataset\n'), ((6033, 6128), 'dataset.TextDataset', 'TextDataset', ([], {'filename': '"""data/text/dev_meld.tsv"""', 'maxlen': 'args.maxlen_val', 'tokenizer': 'tokenizer'}), "(filename='data/text/dev_meld.tsv', maxlen=args.maxlen_val,\n tokenizer=tokenizer)\n", (6044, 6128), False, 'from dataset import TextDataset\n'), ((6151, 6243), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'batch_size': 'args.batch_size', 'num_workers': 'args.num_threads'}), '(dataset=train_set, batch_size=args.batch_size, num_workers=args.\n num_threads)\n', (6161, 6243), False, 'from torch.utils.data import DataLoader\n'), ((6261, 6351), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_set', 'batch_size': 'args.batch_size', 'num_workers': 'args.num_threads'}), '(dataset=val_set, batch_size=args.batch_size, num_workers=args.\n num_threads)\n', (6271, 6351), False, 'from torch.utils.data import DataLoader\n'), ((1638, 1682), 'tqdm.tqdm', 'tqdm', ([], {'iterable': 'train_loader', 'desc': '"""Training"""'}), "(iterable=train_loader, desc='Training')\n", (1642, 1682), False, 'from tqdm import tqdm, trange\n'), ((4911, 5001), 'modelling.text_modelling.BertForSentimentClassification.from_pretrained', 'BertForSentimentClassification.from_pretrained', (['args.model_name_or_path'], {'config': 'config'}), '(args.model_name_or_path,\n config=config)\n', (4957, 5001), False, 'from modelling.text_modelling import AlbertForSentimentClassification, BertForSentimentClassification, DistilBertForSentimentClassification, RobertaForSentimentClassification\n'), ((6587, 6698), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/train_meld.tsv"""', 'filepath': '"""data/audio/audio_meld/train"""', 'extension': '"""wav"""'}), "(filename='data/text/train_meld.tsv', filepath=\n 'data/audio/audio_meld/train', extension='wav')\n", (6599, 6698), False, 'from dataset import AudioDataset\n'), ((6715, 6822), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/train_crema.tsv"""', 'filepath': '"""data/audio/audio_crema"""', 'extension': '"""wav"""'}), "(filename='data/text/train_crema.tsv', filepath=\n 'data/audio/audio_crema', extension='wav')\n", (6727, 6822), False, 'from dataset import AudioDataset\n'), ((6839, 6950), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/train_ravdess.tsv"""', 'filepath': '"""data/audio/audio_ravdess"""', 'extension': '"""wav"""'}), "(filename='data/text/train_ravdess.tsv', filepath=\n 'data/audio/audio_ravdess', extension='wav')\n", (6851, 6950), False, 'from dataset import AudioDataset\n'), ((6967, 7070), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/train_emo.tsv"""', 'filepath': '"""data/audio/audio_emo"""', 'extension': '"""wav"""'}), "(filename='data/text/train_emo.tsv', filepath=\n 'data/audio/audio_emo', extension='wav')\n", (6979, 7070), False, 'from dataset import AudioDataset\n'), ((7084, 7191), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/dev_meld.tsv"""', 'filepath': '"""data/audio/audio_meld/dev"""', 'extension': '"""wav"""'}), "(filename='data/text/dev_meld.tsv', filepath=\n 'data/audio/audio_meld/dev', extension='wav')\n", (7096, 7191), False, 'from dataset import AudioDataset\n'), ((7666, 7699), 'pandas.concat', 'pd.concat', (['dfs'], {'ignore_index': '(True)'}), '(dfs, ignore_index=True)\n', (7675, 7699), True, 'import pandas as pd\n'), ((5064, 5156), 'modelling.text_modelling.AlbertForSentimentClassification.from_pretrained', 'AlbertForSentimentClassification.from_pretrained', (['args.model_name_or_path'], {'config': 'config'}), '(args.model_name_or_path,\n config=config)\n', (5112, 5156), False, 'from modelling.text_modelling import AlbertForSentimentClassification, BertForSentimentClassification, DistilBertForSentimentClassification, RobertaForSentimentClassification\n'), ((5611, 5636), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5634, 5636), False, 'import torch\n'), ((8003, 8199), 'dataset.prepare_data', 'prepare_data', ([], {'df': 'data', 'n': 'args.n_mfcc', 'mfcc': '(1)', 'array_cols': 'args.array_cols', 'sampling_rate': 'args.sampling_rate', 'audio_duration': 'args.audio_duration', 'n_mfcc': 'args.n_mfcc', 'n_melspec': 'args.n_melspec'}), '(df=data, n=args.n_mfcc, mfcc=1, array_cols=args.array_cols,\n sampling_rate=args.sampling_rate, audio_duration=args.audio_duration,\n n_mfcc=args.n_mfcc, n_melspec=args.n_melspec)\n', (8015, 8199), False, 'from dataset import prepare_data\n'), ((8298, 8408), 'sklearn.model_selection.train_test_split', 'train_test_split', (['mfcc', 'data.Sentiment'], {'train_size': 'args.train_size', 'test_size': 'args.test_size', 'shuffle': '(True)'}), '(mfcc, data.Sentiment, train_size=args.train_size,\n test_size=args.test_size, shuffle=True)\n', (8314, 8408), False, 'from sklearn.model_selection import train_test_split\n'), ((8463, 8477), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (8475, 8477), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((8703, 8727), 'numpy.mean', 'np.mean', (['X_train'], {'axis': '(0)'}), '(X_train, axis=0)\n', (8710, 8727), True, 'import numpy as np\n'), ((8747, 8770), 'numpy.std', 'np.std', (['X_train'], {'axis': '(0)'}), '(X_train, axis=0)\n', (8753, 8770), True, 'import numpy as np\n'), ((11449, 11560), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/train_meld.tsv"""', 'filepath': '"""data/audio/audio_meld/train"""', 'extension': '"""wav"""'}), "(filename='data/text/train_meld.tsv', filepath=\n 'data/audio/audio_meld/train', extension='wav')\n", (11461, 11560), False, 'from dataset import AudioDataset\n'), ((11577, 11684), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/train_crema.tsv"""', 'filepath': '"""data/audio/audio_crema"""', 'extension': '"""wav"""'}), "(filename='data/text/train_crema.tsv', filepath=\n 'data/audio/audio_crema', extension='wav')\n", (11589, 11684), False, 'from dataset import AudioDataset\n'), ((11701, 11812), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/train_ravdess.tsv"""', 'filepath': '"""data/audio/audio_ravdess"""', 'extension': '"""wav"""'}), "(filename='data/text/train_ravdess.tsv', filepath=\n 'data/audio/audio_ravdess', extension='wav')\n", (11713, 11812), False, 'from dataset import AudioDataset\n'), ((11829, 11932), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/train_emo.tsv"""', 'filepath': '"""data/audio/audio_emo"""', 'extension': '"""wav"""'}), "(filename='data/text/train_emo.tsv', filepath=\n 'data/audio/audio_emo', extension='wav')\n", (11841, 11932), False, 'from dataset import AudioDataset\n'), ((11946, 12053), 'dataset.AudioDataset', 'AudioDataset', ([], {'filename': '"""data/text/dev_meld.tsv"""', 'filepath': '"""data/audio/audio_meld/dev"""', 'extension': '"""wav"""'}), "(filename='data/text/dev_meld.tsv', filepath=\n 'data/audio/audio_meld/dev', extension='wav')\n", (11958, 12053), False, 'from dataset import AudioDataset\n'), ((12488, 12521), 'pandas.concat', 'pd.concat', (['dfs'], {'ignore_index': '(True)'}), '(dfs, ignore_index=True)\n', (12497, 12521), True, 'import pandas as pd\n'), ((12891, 12939), 'datasets.Dataset.from_pandas', 'Dataset.from_pandas', (['train_set[:args.train_size]'], {}), '(train_set[:args.train_size])\n', (12910, 12939), False, 'from datasets import Dataset\n'), ((12959, 13004), 'datasets.Dataset.from_pandas', 'Dataset.from_pandas', (['dev_set[:args.test_size]'], {}), '(dev_set[:args.test_size])\n', (12978, 13004), False, 'from datasets import Dataset\n'), ((14103, 14156), 'transformers.Wav2Vec2Processor.from_pretrained', 'Wav2Vec2Processor.from_pretrained', (['model_name_or_path'], {}), '(model_name_or_path)\n', (14136, 14156), False, 'from transformers import Wav2Vec2Processor\n'), ((14606, 14667), 'modelling.audio_modelling.DataCollatorCTCWithPadding', 'DataCollatorCTCWithPadding', ([], {'processor': 'processor', 'padding': '(True)'}), '(processor=processor, padding=True)\n', (14632, 14667), False, 'from modelling.audio_modelling import Wav2Vec2ForSpeechClassification, DataCollatorCTCWithPadding, CTCTrainer\n'), ((15841, 16041), 'modelling.audio_modelling.CTCTrainer', 'CTCTrainer', ([], {'model': 'model', 'data_collator': 'data_collator', 'args': 'training_args', 'compute_metrics': 'compute_metrics', 'train_dataset': 'train_set', 'eval_dataset': 'dev_set', 'tokenizer': 'processor.feature_extractor'}), '(model=model, data_collator=data_collator, args=training_args,\n compute_metrics=compute_metrics, train_dataset=train_set, eval_dataset=\n dev_set, tokenizer=processor.feature_extractor)\n', (15851, 16041), False, 'from modelling.audio_modelling import Wav2Vec2ForSpeechClassification, DataCollatorCTCWithPadding, CTCTrainer\n'), ((5223, 5320), 'modelling.text_modelling.DistilBertForSentimentClassification.from_pretrained', 'DistilBertForSentimentClassification.from_pretrained', (['args.model_name_or_path'], {'config': 'config'}), '(args.\n model_name_or_path, config=config)\n', (5275, 5320), False, 'from modelling.text_modelling import AlbertForSentimentClassification, BertForSentimentClassification, DistilBertForSentimentClassification, RobertaForSentimentClassification\n'), ((9039, 9058), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (9048, 9058), True, 'import tensorflow as tf\n'), ((9085, 9145), 'modelling.audio_modelling.get_2d_conv_model', 'get_2d_conv_model', ([], {'n': 'args.n_mfcc', 'array_cols': 'args.array_cols'}), '(n=args.n_mfcc, array_cols=args.array_cols)\n', (9102, 9145), False, 'from modelling.audio_modelling import get_2d_conv_model\n'), ((9757, 9956), 'dataset.prepare_data', 'prepare_data', ([], {'df': 'data', 'n': 'args.n_melspec', 'mfcc': '(0)', 'array_cols': 'args.array_cols', 'sampling_rate': 'args.sampling_rate', 'audio_duration': 'args.audio_duration', 'n_mfcc': 'args.n_mfcc', 'n_melspec': 'args.n_melspec'}), '(df=data, n=args.n_melspec, mfcc=0, array_cols=args.array_cols,\n sampling_rate=args.sampling_rate, audio_duration=args.audio_duration,\n n_mfcc=args.n_mfcc, n_melspec=args.n_melspec)\n', (9769, 9956), False, 'from dataset import prepare_data\n'), ((10055, 10170), 'sklearn.model_selection.train_test_split', 'train_test_split', (['specgram', 'data.Sentiment'], {'train_size': 'args.train_size', 'test_size': 'args.test_size', 'shuffle': '(False)'}), '(specgram, data.Sentiment, train_size=args.train_size,\n test_size=args.test_size, shuffle=False)\n', (10071, 10170), False, 'from sklearn.model_selection import train_test_split\n'), ((10225, 10239), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (10237, 10239), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((10465, 10489), 'numpy.mean', 'np.mean', (['X_train'], {'axis': '(0)'}), '(X_train, axis=0)\n', (10472, 10489), True, 'import numpy as np\n'), ((10509, 10532), 'numpy.std', 'np.std', (['X_train'], {'axis': '(0)'}), '(X_train, axis=0)\n', (10515, 10532), True, 'import numpy as np\n'), ((5383, 5476), 'modelling.text_modelling.RobertaForSentimentClassification.from_pretrained', 'RobertaForSentimentClassification.from_pretrained', (['args.model_name_or_path'], {'config': 'config'}), '(args.model_name_or_path,\n config=config)\n', (5432, 5476), False, 'from modelling.text_modelling import AlbertForSentimentClassification, BertForSentimentClassification, DistilBertForSentimentClassification, RobertaForSentimentClassification\n'), ((7841, 7866), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7856, 7866), False, 'import os\n'), ((10801, 10820), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (10810, 10820), True, 'import tensorflow as tf\n'), ((10847, 10910), 'modelling.audio_modelling.get_2d_conv_model', 'get_2d_conv_model', ([], {'n': 'args.n_melspec', 'array_cols': 'args.array_cols'}), '(n=args.n_melspec, array_cols=args.array_cols)\n', (10864, 10910), False, 'from modelling.audio_modelling import get_2d_conv_model\n'), ((14750, 14775), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14773, 14775), False, 'import torch\n'), ((14805, 14892), 'modelling.audio_modelling.Wav2Vec2ForSpeechClassification.from_pretrained', 'Wav2Vec2ForSpeechClassification.from_pretrained', (['model_name_or_path'], {'config': 'config'}), '(model_name_or_path, config=\n config)\n', (14852, 14892), False, 'from modelling.audio_modelling import Wav2Vec2ForSpeechClassification, DataCollatorCTCWithPadding, CTCTrainer\n'), ((9588, 9613), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9603, 9613), False, 'import os\n'), ((15042, 15067), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15057, 15067), False, 'import os\n')]
|
import unittest
import torch
from collections import OrderedDict
from evaluate.coco_eval import run_eval
from lib.network.rtpose_vgg import get_model, use_vgg
from lib.network.openpose import OpenPose_Model, use_vgg
from torch import load
with torch.autograd.no_grad():
weight_name = './network/weight/best_pose.pth'
state_dict = torch.load(weight_name)
new_state_dict = OrderedDict()
for k,v in state_dict.items():
name = k[7:]
new_state_dict[name]=v
model = get_model(trunk='vgg19')
model.load_state_dict(new_state_dict)
model.eval()
model.float()
model = model.cuda()
run_eval(image_dir= 'E:/Deep_learning_Final/dataset/images/infant_val', anno_file = 'E:/Deep_learning_Final/dataset/annotations/annotation_val.json', vis_dir = 'E:/Deep_learning_Final/dataset/images/vis_data', model=model, preprocess='vgg')
|
[
"evaluate.coco_eval.run_eval"
] |
[((245, 269), 'torch.autograd.no_grad', 'torch.autograd.no_grad', ([], {}), '()\n', (267, 269), False, 'import torch\n'), ((339, 362), 'torch.load', 'torch.load', (['weight_name'], {}), '(weight_name)\n', (349, 362), False, 'import torch\n'), ((389, 402), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (400, 402), False, 'from collections import OrderedDict\n'), ((511, 535), 'lib.network.rtpose_vgg.get_model', 'get_model', ([], {'trunk': '"""vgg19"""'}), "(trunk='vgg19')\n", (520, 535), False, 'from lib.network.rtpose_vgg import get_model, use_vgg\n'), ((648, 900), 'evaluate.coco_eval.run_eval', 'run_eval', ([], {'image_dir': '"""E:/Deep_learning_Final/dataset/images/infant_val"""', 'anno_file': '"""E:/Deep_learning_Final/dataset/annotations/annotation_val.json"""', 'vis_dir': '"""E:/Deep_learning_Final/dataset/images/vis_data"""', 'model': 'model', 'preprocess': '"""vgg"""'}), "(image_dir='E:/Deep_learning_Final/dataset/images/infant_val',\n anno_file=\n 'E:/Deep_learning_Final/dataset/annotations/annotation_val.json',\n vis_dir='E:/Deep_learning_Final/dataset/images/vis_data', model=model,\n preprocess='vgg')\n", (656, 900), False, 'from evaluate.coco_eval import run_eval\n')]
|
import argparse
from args import init_parser, post_processing
import numpy as np
from envs import make_env
# find the carla module
import os
import math
import random
import time
import torch
import shutil
parser = argparse.ArgumentParser(description='SPC')
init_parser(parser) # See `args.py` for default arguments
args = parser.parse_args()
args = post_processing(args)
CARLA8_TIMEOUT = 100000
CARLA9_TIMEOUT = 20.0
def init_dirs(dir_list):
for path in dir_list:
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
def setup_dirs(args):
save_path = args.save_path
model_path = os.path.join(save_path, 'model')
optim_path = os.path.join(save_path, 'optimizer')
init_dirs([model_path, optim_path])
def create_carla9_env(args):
from envs.CARLA.carla9 import World
import carla # here the carla is installed by pip/conda
try:
import glob
import sys
sys.path.append(glob.glob('**/*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
client = carla.Client("localhost", args.port)
client.set_timeout(CARLA9_TIMEOUT)
carla_world = client.get_world()
settings = carla_world.get_settings()
settings.synchronous_mode = True
client.get_world().apply_settings(settings)
env = World(args, carla_world)
return env
def main():
if not args.resume and os.path.isdir(args.save_path):
print("the save path has already existed!")
exit(0)
setup_dirs(args)
script_path = os.path.join(args.save_path, 'scripts')
if not os.path.isdir(script_path):
shutil.copytree('scripts', script_path)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
env = None # placeholder
if 'carla9' in args.env:
# select CARLA v0.9.x as the platform
env = create_carla9_env(args)
elif 'carla8' in args.env:
# select CARLA v0.8.x as the platform
from envs.CARLA.carla_lib.carla.client import make_carla_client
from envs.CARLA.carla_env import CarlaEnv
client = make_carla_client('localhost', args.port, CARLA8_TIMEOUT)
env = CarlaEnv(client, args)
else:
# select PyTorcs or GTAV as the platform
# which is basically inherited from SPC, not fully supported in IPC
env = make_env(args)
if args.eval:
from evaluate import evaluate_policy
evaluate_policy(args, env)
else:
from train import train_policy
train_policy(args, env)
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate_policy"
] |
[((219, 261), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SPC"""'}), "(description='SPC')\n", (242, 261), False, 'import argparse\n'), ((262, 281), 'args.init_parser', 'init_parser', (['parser'], {}), '(parser)\n', (273, 281), False, 'from args import init_parser, post_processing\n'), ((355, 376), 'args.post_processing', 'post_processing', (['args'], {}), '(args)\n', (370, 376), False, 'from args import init_parser, post_processing\n'), ((629, 661), 'os.path.join', 'os.path.join', (['save_path', '"""model"""'], {}), "(save_path, 'model')\n", (641, 661), False, 'import os\n'), ((679, 715), 'os.path.join', 'os.path.join', (['save_path', '"""optimizer"""'], {}), "(save_path, 'optimizer')\n", (691, 715), False, 'import os\n'), ((1183, 1219), 'carla.Client', 'carla.Client', (['"""localhost"""', 'args.port'], {}), "('localhost', args.port)\n", (1195, 1219), False, 'import carla\n'), ((1433, 1457), 'envs.CARLA.carla9.World', 'World', (['args', 'carla_world'], {}), '(args, carla_world)\n', (1438, 1457), False, 'from envs.CARLA.carla9 import World\n'), ((1658, 1697), 'os.path.join', 'os.path.join', (['args.save_path', '"""scripts"""'], {}), "(args.save_path, 'scripts')\n", (1670, 1697), False, 'import os\n'), ((1789, 1817), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1806, 1817), False, 'import torch\n'), ((1822, 1847), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1836, 1847), True, 'import numpy as np\n'), ((1852, 1874), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1863, 1874), False, 'import random\n'), ((1514, 1543), 'os.path.isdir', 'os.path.isdir', (['args.save_path'], {}), '(args.save_path)\n', (1527, 1543), False, 'import os\n'), ((1709, 1735), 'os.path.isdir', 'os.path.isdir', (['script_path'], {}), '(script_path)\n', (1722, 1735), False, 'import os\n'), ((1745, 1784), 'shutil.copytree', 'shutil.copytree', (['"""scripts"""', 'script_path'], {}), "('scripts', script_path)\n", (1760, 1784), False, 'import shutil\n'), ((2564, 2590), 'evaluate.evaluate_policy', 'evaluate_policy', (['args', 'env'], {}), '(args, env)\n', (2579, 2590), False, 'from evaluate import evaluate_policy\n'), ((2648, 2671), 'train.train_policy', 'train_policy', (['args', 'env'], {}), '(args, env)\n', (2660, 2671), False, 'from train import train_policy\n'), ((491, 510), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (504, 510), False, 'import os\n'), ((524, 556), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (535, 556), False, 'import os\n'), ((2233, 2290), 'envs.CARLA.carla_lib.carla.client.make_carla_client', 'make_carla_client', (['"""localhost"""', 'args.port', 'CARLA8_TIMEOUT'], {}), "('localhost', args.port, CARLA8_TIMEOUT)\n", (2250, 2290), False, 'from envs.CARLA.carla_lib.carla.client import make_carla_client\n'), ((2305, 2327), 'envs.CARLA.carla_env.CarlaEnv', 'CarlaEnv', (['client', 'args'], {}), '(client, args)\n', (2313, 2327), False, 'from envs.CARLA.carla_env import CarlaEnv\n'), ((2477, 2491), 'envs.make_env', 'make_env', (['args'], {}), '(args)\n', (2485, 2491), False, 'from envs import make_env\n'), ((959, 1096), 'glob.glob', 'glob.glob', (["('**/*%d.%d-%s.egg' % (sys.version_info.major, sys.version_info.minor, \n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))"], {}), "('**/*%d.%d-%s.egg' % (sys.version_info.major, sys.version_info.\n minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))\n", (968, 1096), False, 'import glob\n')]
|
from evaluate import Evaluation
from collections import Counter
import itertools
#create a class with all the characteristics of the hand
class PokerHand:
def __init__(
self,
hand,
first_card=None,
second_card=None,
third_card=None,
fourth_card=None,
fifth_card=None,
):
self.hand = hand
self.first_card = first_card
self.second_card = second_card
self.third_card = third_card
self.fourth_card = fourth_card
self.fifth_card = fifth_card
#function that returns the list with all the cards
def get_cards(self):
cards = self.hand.split()
self.first_card = cards[0]
self.second_card = cards[1]
self.third_card = cards[2]
self.fourth_card = cards[3]
self.fifth_card = cards[4]
list_cards = []
list_cards.append(self.first_card)
list_cards.append(self.second_card)
list_cards.append(self.third_card)
list_cards.append(self.fourth_card)
list_cards.append(self.fifth_card)
return list_cards
# evaluate function that involves Evaluation.py
def evaluate_hand(self,list_cards):
evaluation = Evaluation(list_cards).evaluate_hand()
return evaluation
# Here we compare two hands
def compare_with(self,poker_hand2):
cards1 = self.get_cards()
cards2 = poker_hand2.get_cards()
score1 = self.evaluate_hand(cards1)
score2 = self.evaluate_hand(cards2)
if score1 > score2:
ans = "WIN"
elif score1 < score2:
ans = "LOSS"
else :
aux = self.check_high_card(cards1,cards2)
if aux == 1:
ans = "WIN"
elif aux == 2:
ans = "LOSS"
else :
ans = "tie"
return ans
# If we have a tie we use this function to solve the problem
@staticmethod
def check_high_card(cards1,cards2):
A = [card[0] for card in cards1]
B = [card[0] for card in cards2]
dic1 = {}
dic2 = {}
#detection of a lower hand
if set(A) == set(["A", "2", "3", "4", "5"]):
dic1 = {"T":'10',"J":'11',"Q":'12',"K":'13',"A":'1'}
else:
dic1 = {"T":'10',"J":'11',"Q":'12',"K":'13',"A":'14'}
if set(B) == set(["A", "2", "3", "4", "5"]):
dic2 = {"T":'10',"J":'11',"Q":'12',"K":'13',"A":'1'}
else:
dic2 = {"T":'10',"J":'11',"Q":'12',"K":'13',"A":'14'}
values1 = [dic1.get(n, n) for n in A]
values2 = [dic2.get(n, n) for n in B]
# we get a list with the numbers of the cards and sort by the number of repetitions
counts1 = Counter(values1)
new_list1 = sorted(counts1, key=lambda x: (counts1[x]), reverse=True)
counts2 = Counter(values2)
new_list2 = sorted(counts2, key=lambda x: (counts2[x]), reverse=True)
winner_hand = 0
for(a,b) in zip(new_list1,new_list2):
if winner_hand == 0:
if int(a) > int(b):
winner_hand = 1
elif int(b) > int(a):
winner_hand = 2
else:
pass
return winner_hand
|
[
"evaluate.Evaluation"
] |
[((2339, 2355), 'collections.Counter', 'Counter', (['values1'], {}), '(values1)\n', (2346, 2355), False, 'from collections import Counter\n'), ((2441, 2457), 'collections.Counter', 'Counter', (['values2'], {}), '(values2)\n', (2448, 2457), False, 'from collections import Counter\n'), ((1053, 1075), 'evaluate.Evaluation', 'Evaluation', (['list_cards'], {}), '(list_cards)\n', (1063, 1075), False, 'from evaluate import Evaluation\n')]
|
import numpy as np
import random
import itertools as it
import pygame as pg
# Local import
import env
import agentsEnv as ag
import reward
import policyGradient as PG
from model import GeneratePolicyNet
from evaluate import Evaluate
from visualize import draw
def main():
# action space
actionSpace = [[10, 0], [7, 7], [0, 10], [-7, 7], [-10, 0], [-7, -7], [0, -10], [7, -7]]
numActionSpace = len(actionSpace)
# state space
numStateSpace = 4
xBoundary = [0, 360]
yBoundary = [0, 360]
checkBoundaryAndAdjust = ag.CheckBoundaryAndAdjust(xBoundary, yBoundary)
initSheepPositionMean = np.array([180, 180])
initWolfPositionMean = np.array([180, 180])
initSheepPositionNoise = np.array([120, 120])
initWolfPositionNoise = np.array([60, 60])
sheepPositionReset = ag.SheepPositionReset(initSheepPositionMean, initSheepPositionNoise, checkBoundaryAndAdjust)
wolfPositionReset = ag.WolfPositionReset(initWolfPositionMean, initWolfPositionNoise, checkBoundaryAndAdjust)
numOneAgentState = 2
positionIndex = [0, 1]
sheepPositionTransition = ag.SheepPositionTransition(numOneAgentState, positionIndex, checkBoundaryAndAdjust)
wolfPositionTransition = ag.WolfPositionTransition(numOneAgentState, positionIndex, checkBoundaryAndAdjust)
numAgent = 2
sheepId = 0
wolfId = 1
transitionFunction = env.TransitionFunction(sheepId, wolfId, sheepPositionReset, wolfPositionReset,
sheepPositionTransition, wolfPositionTransition)
minDistance = 15
isTerminal = env.IsTerminal(sheepId, wolfId, numOneAgentState, positionIndex, minDistance)
screen = pg.display.set_mode([xBoundary[1], yBoundary[1]])
screenColor = [255, 255, 255]
circleColorList = [[50, 255, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50],
[50, 50, 50], [50, 50, 50], [50, 50, 50]]
circleSize = 8
saveImage = False
saveImageFile = 'image'
render = env.Render(numAgent, numOneAgentState, positionIndex, screen, screenColor, circleColorList, circleSize,
saveImage, saveImageFile)
aliveBouns = -1
deathPenalty = 20
rewardDecay = 0.99
rewardFunction = reward.TerminalPenalty(sheepId, wolfId, numOneAgentState, positionIndex, aliveBouns, deathPenalty, isTerminal)
accumulateRewards = PG.AccumulateRewards(rewardDecay, rewardFunction)
maxTimeStep = 150
sampleTrajectory = PG.SampleTrajectory(maxTimeStep, transitionFunction, isTerminal)
approximatePolicy = PG.ApproximatePolicy(actionSpace)
trainPG = PG.TrainTensorflow(actionSpace)
numTrajectory = 20
maxEpisode = 1000
# Generate models.
learningRate = 1e-4
hiddenNeuronNumbers = [128, 256, 512, 1024]
hiddenDepths = [2, 4, 8]
# hiddenNeuronNumbers = [128]
# hiddenDepths = [2]
generateModel = GeneratePolicyNet(numStateSpace, numActionSpace, learningRate)
models = {(n, d): generateModel(d, round(n / d)) for n, d in it.product(hiddenNeuronNumbers, hiddenDepths)}
print("Models generated")
# Train.
policyGradient = PG.PolicyGradient(numTrajectory, maxEpisode, render)
trainModel = lambda model: policyGradient(model, approximatePolicy,
sampleTrajectory,
accumulateRewards,
trainPG)
trainedModels = {key: trainModel(model) for key, model in models.items()}
print("Finished training")
# Evaluate
modelEvaluate = Evaluate(numTrajectory, approximatePolicy, sampleTrajectory, rewardFunction)
meanEpisodeRewards = {key: modelEvaluate(model) for key, model in trainedModels.items()}
print("Finished evaluating")
# print(meanEpisodeRewards)
# Visualize
independentVariableNames = ['NeuroTotalNumber', 'layerNumber']
draw(meanEpisodeRewards, independentVariableNames)
print("Finished visualizing", meanEpisodeRewards)
if __name__ == "__main__":
main()
|
[
"evaluate.Evaluate"
] |
[((546, 593), 'agentsEnv.CheckBoundaryAndAdjust', 'ag.CheckBoundaryAndAdjust', (['xBoundary', 'yBoundary'], {}), '(xBoundary, yBoundary)\n', (571, 593), True, 'import agentsEnv as ag\n'), ((623, 643), 'numpy.array', 'np.array', (['[180, 180]'], {}), '([180, 180])\n', (631, 643), True, 'import numpy as np\n'), ((671, 691), 'numpy.array', 'np.array', (['[180, 180]'], {}), '([180, 180])\n', (679, 691), True, 'import numpy as np\n'), ((721, 741), 'numpy.array', 'np.array', (['[120, 120]'], {}), '([120, 120])\n', (729, 741), True, 'import numpy as np\n'), ((770, 788), 'numpy.array', 'np.array', (['[60, 60]'], {}), '([60, 60])\n', (778, 788), True, 'import numpy as np\n'), ((814, 910), 'agentsEnv.SheepPositionReset', 'ag.SheepPositionReset', (['initSheepPositionMean', 'initSheepPositionNoise', 'checkBoundaryAndAdjust'], {}), '(initSheepPositionMean, initSheepPositionNoise,\n checkBoundaryAndAdjust)\n', (835, 910), True, 'import agentsEnv as ag\n'), ((931, 1024), 'agentsEnv.WolfPositionReset', 'ag.WolfPositionReset', (['initWolfPositionMean', 'initWolfPositionNoise', 'checkBoundaryAndAdjust'], {}), '(initWolfPositionMean, initWolfPositionNoise,\n checkBoundaryAndAdjust)\n', (951, 1024), True, 'import agentsEnv as ag\n'), ((1105, 1192), 'agentsEnv.SheepPositionTransition', 'ag.SheepPositionTransition', (['numOneAgentState', 'positionIndex', 'checkBoundaryAndAdjust'], {}), '(numOneAgentState, positionIndex,\n checkBoundaryAndAdjust)\n', (1131, 1192), True, 'import agentsEnv as ag\n'), ((1218, 1304), 'agentsEnv.WolfPositionTransition', 'ag.WolfPositionTransition', (['numOneAgentState', 'positionIndex', 'checkBoundaryAndAdjust'], {}), '(numOneAgentState, positionIndex,\n checkBoundaryAndAdjust)\n', (1243, 1304), True, 'import agentsEnv as ag\n'), ((1375, 1506), 'env.TransitionFunction', 'env.TransitionFunction', (['sheepId', 'wolfId', 'sheepPositionReset', 'wolfPositionReset', 'sheepPositionTransition', 'wolfPositionTransition'], {}), '(sheepId, wolfId, sheepPositionReset,\n wolfPositionReset, sheepPositionTransition, wolfPositionTransition)\n', (1397, 1506), False, 'import env\n'), ((1589, 1666), 'env.IsTerminal', 'env.IsTerminal', (['sheepId', 'wolfId', 'numOneAgentState', 'positionIndex', 'minDistance'], {}), '(sheepId, wolfId, numOneAgentState, positionIndex, minDistance)\n', (1603, 1666), False, 'import env\n'), ((1681, 1730), 'pygame.display.set_mode', 'pg.display.set_mode', (['[xBoundary[1], yBoundary[1]]'], {}), '([xBoundary[1], yBoundary[1]])\n', (1700, 1730), True, 'import pygame as pg\n'), ((2020, 2153), 'env.Render', 'env.Render', (['numAgent', 'numOneAgentState', 'positionIndex', 'screen', 'screenColor', 'circleColorList', 'circleSize', 'saveImage', 'saveImageFile'], {}), '(numAgent, numOneAgentState, positionIndex, screen, screenColor,\n circleColorList, circleSize, saveImage, saveImageFile)\n', (2030, 2153), False, 'import env\n'), ((2261, 2375), 'reward.TerminalPenalty', 'reward.TerminalPenalty', (['sheepId', 'wolfId', 'numOneAgentState', 'positionIndex', 'aliveBouns', 'deathPenalty', 'isTerminal'], {}), '(sheepId, wolfId, numOneAgentState, positionIndex,\n aliveBouns, deathPenalty, isTerminal)\n', (2283, 2375), False, 'import reward\n'), ((2396, 2445), 'policyGradient.AccumulateRewards', 'PG.AccumulateRewards', (['rewardDecay', 'rewardFunction'], {}), '(rewardDecay, rewardFunction)\n', (2416, 2445), True, 'import policyGradient as PG\n'), ((2492, 2556), 'policyGradient.SampleTrajectory', 'PG.SampleTrajectory', (['maxTimeStep', 'transitionFunction', 'isTerminal'], {}), '(maxTimeStep, transitionFunction, isTerminal)\n', (2511, 2556), True, 'import policyGradient as PG\n'), ((2582, 2615), 'policyGradient.ApproximatePolicy', 'PG.ApproximatePolicy', (['actionSpace'], {}), '(actionSpace)\n', (2602, 2615), True, 'import policyGradient as PG\n'), ((2630, 2661), 'policyGradient.TrainTensorflow', 'PG.TrainTensorflow', (['actionSpace'], {}), '(actionSpace)\n', (2648, 2661), True, 'import policyGradient as PG\n'), ((2912, 2974), 'model.GeneratePolicyNet', 'GeneratePolicyNet', (['numStateSpace', 'numActionSpace', 'learningRate'], {}), '(numStateSpace, numActionSpace, learningRate)\n', (2929, 2974), False, 'from model import GeneratePolicyNet\n'), ((3152, 3204), 'policyGradient.PolicyGradient', 'PG.PolicyGradient', (['numTrajectory', 'maxEpisode', 'render'], {}), '(numTrajectory, maxEpisode, render)\n', (3169, 3204), True, 'import policyGradient as PG\n'), ((3651, 3727), 'evaluate.Evaluate', 'Evaluate', (['numTrajectory', 'approximatePolicy', 'sampleTrajectory', 'rewardFunction'], {}), '(numTrajectory, approximatePolicy, sampleTrajectory, rewardFunction)\n', (3659, 3727), False, 'from evaluate import Evaluate\n'), ((3974, 4024), 'visualize.draw', 'draw', (['meanEpisodeRewards', 'independentVariableNames'], {}), '(meanEpisodeRewards, independentVariableNames)\n', (3978, 4024), False, 'from visualize import draw\n'), ((3040, 3085), 'itertools.product', 'it.product', (['hiddenNeuronNumbers', 'hiddenDepths'], {}), '(hiddenNeuronNumbers, hiddenDepths)\n', (3050, 3085), True, 'import itertools as it\n')]
|
import os
import logging
import tempfile
import argparse
import numpy as np
import torch
import torchmodels
import torch.optim as op
import torch.utils.data as td
import utils
import models
import models.jlu
import dataset
import evaluate
import inference
from . import embeds
MODES = ["word", "label", "intent"]
parser = argparse.ArgumentParser(fromfile_prefix_chars="@")
parser.add_argument("--debug", action="store_true", default=False)
group = parser.add_argument_group("Logging Options")
utils.add_logging_arguments(group, "train")
group.add_argument("--show-progress", action="store_true", default=False)
group = parser.add_argument_group("Model Options")
group.add_argument("--model-path", required=True)
group.add_argument("--hidden-dim", type=int, default=300)
group.add_argument("--word-dim", type=int, default=300)
group = parser.add_argument_group("Data Options")
for mode in MODES:
group.add_argument(f"--{mode}-path", type=str, required=True)
group.add_argument(f"--{mode}-vocab", type=str, default=None)
group.add_argument("--vocab-limit", type=int, default=None)
group.add_argument("--data-workers", type=int, default=8)
group.add_argument("--pin-memory", action="store_true", default=False)
group.add_argument("--shuffle", action="store_true", default=False)
group.add_argument("--max-length", type=int, default=None)
group.add_argument("--seed", type=int, default=None)
group.add_argument("--unk", type=str, default="<unk>")
group.add_argument("--eos", type=str, default="<eos>")
group.add_argument("--bos", type=str, default="<bos>")
group = parser.add_argument_group("Training Options")
group.add_argument("--save-dir", type=str, required=True)
group.add_argument("--save-period", type=int, default=None)
group.add_argument("--batch-size", type=int, default=32)
group.add_argument("--epochs", type=int, default=12)
group.add_argument("--optimizer", type=str, default="adam",
choices=["adam", "adamax", "adagrad", "adadelta", "sgd"])
group.add_argument("--loss-alpha", type=float, default=1.0)
group.add_argument("--loss-beta", type=float, default=1.0)
group.add_argument("--early-stop", action="store_true", default=False)
group.add_argument("--early-stop-patience", type=int, default=5)
group.add_argument("--early-stop-save", action="store_true", default=False)
group.add_argument("--early-stop-criterion", default="acc-label")
group.add_argument("--learning-rate", type=float, default=None)
group.add_argument("--weight-decay", type=float, default=None)
group.add_argument("--samples", type=int, default=None)
group.add_argument("--log-stats", action="store_true", default=False)
group.add_argument("--tensorboard", action="store_true", default=False)
group.add_argument("--resume-from")
group.add_argument("--gpu", type=int, action="append", default=[])
group = parser.add_argument_group("Validation Options")
group.add_argument("--validate", action="store_true", default=False)
for mode in MODES:
group.add_argument(f"--val-{mode}-path")
group = parser.add_argument_group("Word Embeddings Options")
embeds.add_embed_arguments(group)
def create_dataloader(args, vocabs=None, val=False):
argvalpfx = "val_" if val else ""
paths = [getattr(args, f"{argvalpfx}{mode}_path") for mode in MODES]
if vocabs is None:
vocabs = [getattr(args, f"{mode}_vocab") for mode in MODES]
vocabs = [utils.load_pkl(v) if v is not None else None for v in vocabs]
dset = dataset.TextSequenceDataset(
paths=paths,
feats=["string", "tensor"],
vocabs=vocabs,
vocab_limit=args.vocab_limit,
pad_eos=args.eos,
pad_bos=args.bos,
unk=args.unk,
)
if vocabs is None:
vocabs = dset.vocabs
collator = dataset.TextSequenceBatchCollator(
pad_idxs=[len(v) for v in vocabs]
)
return td.DataLoader(
dataset=dset,
batch_size=args.batch_size,
shuffle=False if val else args.shuffle,
num_workers=args.data_workers,
collate_fn=collator,
pin_memory=args.pin_memory
)
def prepare_model(args, vocabs, resume_from=None):
if resume_from is None:
resume_from = dict()
model_path = args.model_path
if resume_from.get("model_args") is not None:
temp_path = tempfile.mkstemp()[1]
utils.dump_yaml(resume_from["model_args"], temp_path)
torchmodels.register_packages(models)
mdl_cls = torchmodels.create_model_cls(models.jlu, model_path)
mdl = mdl_cls(
hidden_dim=args.hidden_dim,
word_dim=args.word_dim,
num_words=len(vocabs[0]),
num_slots=len(vocabs[1]),
num_intents=len(vocabs[2])
)
mdl.reset_parameters()
if resume_from.get("model") is not None:
mdl.load_state_dict(resume_from["model"])
else:
embeds.load_embeddings(args, vocabs[0], mdl.embeddings())
return mdl
def get_optimizer_cls(args):
kwargs = dict()
if args.learning_rate is not None:
kwargs["lr"] = args.learning_rate
if args.weight_decay is not None:
kwargs["weight_decay"] = args.weight_decay
return utils.map_val(args.optimizer, {
"sgd": lambda p: op.SGD(p, **kwargs),
"adam": lambda p: op.Adam(p, **kwargs),
"adamax": lambda p: op.Adamax(p, **kwargs),
"adagrad": lambda p: op.Adagrad(p, **kwargs),
"adadelta": lambda p: op.Adadelta(p, **kwargs)
}, "optimizer")
def normalize(x):
return x / sum(x)
def randidx(x, size):
"""x is either integer or array-like probability distribution"""
if isinstance(x, int):
return torch.randint(0, x, size)
else:
return np.random.choice(np.arange(len(x)), p=x, size=size)
class Validator(inference.LoggableLossInferencer,
inference.PredictingLossInferencer):
def __init__(self, *args, **kwargs):
super(Validator, self).__init__(
*args,
name="valid",
tensorboard=False,
persistant_steps=False,
**kwargs
)
def on_run_started(self, dataloader):
super(Validator, self).on_run_started(dataloader)
self.labels_gold, self.intents_gold = list(), list()
def on_batch_started(self, batch):
super(Validator, self).on_batch_started(batch)
self.model.train(False)
def on_loss_calculated(self, batch, data, model_outputs, losses, stats):
super(Validator, self)\
.on_loss_calculated(batch, data, model_outputs, losses, stats)
# dataset might produce bos/eos-padded strings
labels_gold = [self.trim(x[1]) for x in batch["string"]]
intents_gold = [self.trim(x[2]) for x in batch["string"]]
self.labels_gold.extend(labels_gold)
self.intents_gold.extend(intents_gold)
def on_run_finished(self, stats):
preds = super(Validator, self).on_run_finished(stats)
assert preds is not None, "polymorphism gone wrong?"
lpreds, ipreds = [x[0][0] for x in preds], [x[1][0] for x in preds]
res = evaluate.evaluate(
gold_labels=self.labels_gold,
gold_intents=self.intents_gold,
pred_labels=lpreds,
pred_intents=ipreds
)
tasks = ["slot-labeling", "intent-classification"]
stats = {
f"val-{measure}-{mode}": v
for mode, task in zip(MODES[1:], tasks)
for measure, v in res[task]["overall"].items()
}
stats["val-acc-sent"] = res["sentence-understanding"]
msg = utils.join_dict(
{k: f"{v:.4f}" for k, v in stats.items()},
item_dlm=", ", kvp_dlm="="
)
self.log(msg, tag="eval")
return stats
class Trainer(inference.LoggableLossInferencer):
def __init__(self, *args, epochs=10, optimizer_cls=op.Adam, model_path=None,
save_period=None, samples=None, validator=None,
early_stop=False, early_stop_patience=5,
early_stop_criterion="val-acc-sent", **kwargs):
super(Trainer, self).__init__(
*args,
name="train",
persistant_steps=True,
**kwargs
)
self.epochs = epochs
self.optimizer_cls = optimizer_cls
self.show_samples = samples is not None
self.num_samples = samples
self.should_validate = validator is not None
self.validator = validator
self.should_save_periodically = save_period is not None
self.save_period = save_period
self.model_path = model_path
self.early_stop = early_stop
self.early_stop_patience = early_stop_patience
self.early_stop_criterion = early_stop_criterion
self.global_step = 0
self.eidx = 0
self.optimizer = optimizer_cls(self.trainable_params())
self.early_stop_best = {
"crit": None,
"eidx": -1,
"sd": None,
"stats": None
}
def trainable_params(self):
for p in self.model.parameters():
if p.requires_grad:
yield p
def save_snapshot(self, state_dict, tag=None):
if tag is None:
tag = ""
else:
tag = f"-{tag}"
eidx = state_dict["eidx"]
path = os.path.join(self.save_dir, f"checkpoint-e{eidx:02d}{tag}")
torch.save(state_dict, path)
logging.info(f"checkpoint saved to '{path}'.")
def snapshot(self, stats=None):
exp_state_dict = {
"eidx": self.eidx,
"model": {
k: v.detach().cpu()
for k, v in self.module.state_dict().items()
},
"global_step": self.global_step,
"optimizer": self.optimizer.state_dict(),
}
if self.model_path is not None:
exp_state_dict["model_args"] = utils.load_yaml(self.model_path)
if stats is not None:
exp_state_dict["stats"] = stats
return exp_state_dict
def load_snapshot(self, state_dict):
if self.optimizer is not None and "optimizer" in state_dict:
self.optimizer.load_state_dict(state_dict["optimizer"])
if "global_step" in state_dict:
self.global_step = state_dict["global_step"]
if "eidx" in state_dict:
self.eidx = state_dict["eidx"]
def should_stop(self, eidx, stats):
if not self.early_stop:
return False
assert self.early_stop_criterion in stats, \
f"early stop criterion not found in training stats: " \
f"{self.early_stop_criterion} not in {stats.keys()}"
crit = stats.get(self.early_stop_criterion)
if self.early_stop_best["crit"] is None or crit > self.early_stop_best["crit"]:
self.early_stop_best["crit"] = crit
self.early_stop_best["sd"] = self.snapshot(stats)
return self.early_stop_best["sd"]["eidx"] is not None and \
eidx >= self.early_stop_best["sd"]["eidx"] + \
self.early_stop_patience
def report_early_stop(self, eidx):
stats_str = {k: f"{v:.4f}" for k, v in
self.early_stop_best["sd"]["stats"].items()}
logging.info(f"early stopping at {eidx} epoch as criterion "
f"({self.early_stop_criterion}) remains unchallenged "
f"for {self.early_stop_patience} epochs.")
logging.info(f"best stats so far:")
logging.info(f"[{eidx - self.early_stop_patience}] "
f"{utils.join_dict(stats_str, ', ', '=')}")
def on_batch_started(self, batch):
super(Trainer, self).on_batch_started(batch)
self.model.train(True)
self.optimizer.zero_grad()
def on_loss_calculated(self, batch, data, model_outputs, losses, stats):
ret = super(Trainer, self)\
.on_loss_calculated(batch, data, model_outputs, losses, stats)
loss = losses["loss-total"]
loss.backward()
self.optimizer.step()
return ret
def train(self, dataloader, val_dataloader=None):
if self.should_validate:
assert val_dataloader is not None, \
"must provide validation data if i need to validate"
self.optimizer = self.optimizer_cls(self.trainable_params())
self.progress_global = utils.tqdm(
total=self.epochs,
desc=f"training {self.epochs} epochs",
disable=not self.show_progress
)
self.progress_global.update(self.eidx)
for self.eidx in range(self.eidx + 1, self.epochs + 1):
self.progress_global.update(1)
self.progress_global.set_description(f"training e{self.eidx:02d}")
stats = self.inference(dataloader)
if self.should_validate:
with torch.no_grad():
valstats = self.validator.inference(val_dataloader)
stats.update(valstats)
if self.should_save_periodically \
and self.eidx % self.save_period == 0:
self.save_snapshot(self.snapshot())
if self.should_stop(self.eidx, stats):
self.save_snapshot(
state_dict=self.early_stop_best["sd"],
tag=f"best-{self.early_stop_best['crit']:.4f}"
)
self.report_early_stop(self.eidx)
break
self.progress_global.close()
def report_model(trainer):
params = sum(np.prod(p.size()) for p in trainer.trainable_params())
logging.info(f"Number of parameters: {params:,}")
logging.info(f"{trainer.module}")
def train(args):
devices = utils.get_devices(args.gpu)
if args.seed is not None:
utils.manual_seed(args.seed)
logging.info("Loading data...")
dataloader = create_dataloader(args)
vocabs = dataloader.dataset.vocabs
if args.validate:
val_dataloader = create_dataloader(args, vocabs, True)
else:
val_dataloader = None
fnames = [f"{mode}.vocab" for mode in MODES]
for vocab, fname in zip(vocabs, fnames):
utils.save_pkl(vocab, os.path.join(args.save_dir, fname))
logging.info("Initializing training environment...")
resume_from = dict()
if args.resume_from is not None:
resume_from = torch.load(args.resume_from)
mdl = prepare_model(args, vocabs, resume_from)
mdl = utils.to_device(mdl, devices)
optimizer_cls = get_optimizer_cls(args)
validator = None
if args.validate:
validator = Validator(
model=mdl,
device=devices[0],
vocabs=vocabs,
bos=args.bos,
eos=args.eos,
unk=args.unk,
alpha=args.loss_alpha,
beta=args.loss_beta,
progress=args.show_progress,
batch_stats=args.log_stats
)
trainer = Trainer(
model=mdl,
model_path=args.model_path,
alpha=args.loss_alpha,
beta=args.loss_beta,
device=devices[0],
vocabs=vocabs,
epochs=args.epochs,
save_dir=args.save_dir,
save_period=args.save_period,
optimizer_cls=optimizer_cls,
samples=args.samples,
tensorboard=args.tensorboard,
progress=args.show_progress,
validator=validator,
batch_stats=args.log_stats,
early_stop=args.early_stop,
early_stop_criterion=args.early_stop_criterion,
early_stop_patience=args.early_stop_patience
)
trainer.load_snapshot(resume_from)
report_model(trainer)
logging.info("Commencing training joint-lu...")
trainer.train(dataloader, val_dataloader)
logging.info("Done!")
if __name__ == "__main__":
train(utils.initialize_script(parser))
|
[
"evaluate.evaluate"
] |
[((326, 376), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'fromfile_prefix_chars': '"""@"""'}), "(fromfile_prefix_chars='@')\n", (349, 376), False, 'import argparse\n'), ((498, 541), 'utils.add_logging_arguments', 'utils.add_logging_arguments', (['group', '"""train"""'], {}), "(group, 'train')\n", (525, 541), False, 'import utils\n'), ((3447, 3620), 'dataset.TextSequenceDataset', 'dataset.TextSequenceDataset', ([], {'paths': 'paths', 'feats': "['string', 'tensor']", 'vocabs': 'vocabs', 'vocab_limit': 'args.vocab_limit', 'pad_eos': 'args.eos', 'pad_bos': 'args.bos', 'unk': 'args.unk'}), "(paths=paths, feats=['string', 'tensor'], vocabs\n =vocabs, vocab_limit=args.vocab_limit, pad_eos=args.eos, pad_bos=args.\n bos, unk=args.unk)\n", (3474, 3620), False, 'import dataset\n'), ((3835, 4019), 'torch.utils.data.DataLoader', 'td.DataLoader', ([], {'dataset': 'dset', 'batch_size': 'args.batch_size', 'shuffle': '(False if val else args.shuffle)', 'num_workers': 'args.data_workers', 'collate_fn': 'collator', 'pin_memory': 'args.pin_memory'}), '(dataset=dset, batch_size=args.batch_size, shuffle=False if\n val else args.shuffle, num_workers=args.data_workers, collate_fn=\n collator, pin_memory=args.pin_memory)\n', (3848, 4019), True, 'import torch.utils.data as td\n'), ((4368, 4405), 'torchmodels.register_packages', 'torchmodels.register_packages', (['models'], {}), '(models)\n', (4397, 4405), False, 'import torchmodels\n'), ((4420, 4472), 'torchmodels.create_model_cls', 'torchmodels.create_model_cls', (['models.jlu', 'model_path'], {}), '(models.jlu, model_path)\n', (4448, 4472), False, 'import torchmodels\n'), ((13570, 13619), 'logging.info', 'logging.info', (['f"""Number of parameters: {params:,}"""'], {}), "(f'Number of parameters: {params:,}')\n", (13582, 13619), False, 'import logging\n'), ((13624, 13657), 'logging.info', 'logging.info', (['f"""{trainer.module}"""'], {}), "(f'{trainer.module}')\n", (13636, 13657), False, 'import logging\n'), ((13691, 13718), 'utils.get_devices', 'utils.get_devices', (['args.gpu'], {}), '(args.gpu)\n', (13708, 13718), False, 'import utils\n'), ((13791, 13822), 'logging.info', 'logging.info', (['"""Loading data..."""'], {}), "('Loading data...')\n", (13803, 13822), False, 'import logging\n'), ((14193, 14245), 'logging.info', 'logging.info', (['"""Initializing training environment..."""'], {}), "('Initializing training environment...')\n", (14205, 14245), False, 'import logging\n'), ((14420, 14449), 'utils.to_device', 'utils.to_device', (['mdl', 'devices'], {}), '(mdl, devices)\n', (14435, 14449), False, 'import utils\n'), ((15599, 15646), 'logging.info', 'logging.info', (['"""Commencing training joint-lu..."""'], {}), "('Commencing training joint-lu...')\n", (15611, 15646), False, 'import logging\n'), ((15698, 15719), 'logging.info', 'logging.info', (['"""Done!"""'], {}), "('Done!')\n", (15710, 15719), False, 'import logging\n'), ((4309, 4362), 'utils.dump_yaml', 'utils.dump_yaml', (["resume_from['model_args']", 'temp_path'], {}), "(resume_from['model_args'], temp_path)\n", (4324, 4362), False, 'import utils\n'), ((5598, 5623), 'torch.randint', 'torch.randint', (['(0)', 'x', 'size'], {}), '(0, x, size)\n', (5611, 5623), False, 'import torch\n'), ((7036, 7161), 'evaluate.evaluate', 'evaluate.evaluate', ([], {'gold_labels': 'self.labels_gold', 'gold_intents': 'self.intents_gold', 'pred_labels': 'lpreds', 'pred_intents': 'ipreds'}), '(gold_labels=self.labels_gold, gold_intents=self.\n intents_gold, pred_labels=lpreds, pred_intents=ipreds)\n', (7053, 7161), False, 'import evaluate\n'), ((9285, 9344), 'os.path.join', 'os.path.join', (['self.save_dir', 'f"""checkpoint-e{eidx:02d}{tag}"""'], {}), "(self.save_dir, f'checkpoint-e{eidx:02d}{tag}')\n", (9297, 9344), False, 'import os\n'), ((9353, 9381), 'torch.save', 'torch.save', (['state_dict', 'path'], {}), '(state_dict, path)\n', (9363, 9381), False, 'import torch\n'), ((9390, 9436), 'logging.info', 'logging.info', (['f"""checkpoint saved to \'{path}\'."""'], {}), '(f"checkpoint saved to \'{path}\'.")\n', (9402, 9436), False, 'import logging\n'), ((11221, 11381), 'logging.info', 'logging.info', (['f"""early stopping at {eidx} epoch as criterion ({self.early_stop_criterion}) remains unchallenged for {self.early_stop_patience} epochs."""'], {}), "(\n f'early stopping at {eidx} epoch as criterion ({self.early_stop_criterion}) remains unchallenged for {self.early_stop_patience} epochs.'\n )\n", (11233, 11381), False, 'import logging\n'), ((11430, 11465), 'logging.info', 'logging.info', (['f"""best stats so far:"""'], {}), "(f'best stats so far:')\n", (11442, 11465), False, 'import logging\n'), ((12355, 12459), 'utils.tqdm', 'utils.tqdm', ([], {'total': 'self.epochs', 'desc': 'f"""training {self.epochs} epochs"""', 'disable': '(not self.show_progress)'}), "(total=self.epochs, desc=f'training {self.epochs} epochs',\n disable=not self.show_progress)\n", (12365, 12459), False, 'import utils\n'), ((13757, 13785), 'utils.manual_seed', 'utils.manual_seed', (['args.seed'], {}), '(args.seed)\n', (13774, 13785), False, 'import utils\n'), ((14330, 14358), 'torch.load', 'torch.load', (['args.resume_from'], {}), '(args.resume_from)\n', (14340, 14358), False, 'import torch\n'), ((15759, 15790), 'utils.initialize_script', 'utils.initialize_script', (['parser'], {}), '(parser)\n', (15782, 15790), False, 'import utils\n'), ((4279, 4297), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (4295, 4297), False, 'import tempfile\n'), ((9859, 9891), 'utils.load_yaml', 'utils.load_yaml', (['self.model_path'], {}), '(self.model_path)\n', (9874, 9891), False, 'import utils\n'), ((14152, 14186), 'os.path.join', 'os.path.join', (['args.save_dir', 'fname'], {}), '(args.save_dir, fname)\n', (14164, 14186), False, 'import os\n'), ((3374, 3391), 'utils.load_pkl', 'utils.load_pkl', (['v'], {}), '(v)\n', (3388, 3391), False, 'import utils\n'), ((5171, 5190), 'torch.optim.SGD', 'op.SGD', (['p'], {}), '(p, **kwargs)\n', (5177, 5190), True, 'import torch.optim as op\n'), ((5218, 5238), 'torch.optim.Adam', 'op.Adam', (['p'], {}), '(p, **kwargs)\n', (5225, 5238), True, 'import torch.optim as op\n'), ((5268, 5290), 'torch.optim.Adamax', 'op.Adamax', (['p'], {}), '(p, **kwargs)\n', (5277, 5290), True, 'import torch.optim as op\n'), ((5321, 5344), 'torch.optim.Adagrad', 'op.Adagrad', (['p'], {}), '(p, **kwargs)\n', (5331, 5344), True, 'import torch.optim as op\n'), ((5376, 5400), 'torch.optim.Adadelta', 'op.Adadelta', (['p'], {}), '(p, **kwargs)\n', (5387, 5400), True, 'import torch.optim as op\n'), ((11551, 11588), 'utils.join_dict', 'utils.join_dict', (['stats_str', '""", """', '"""="""'], {}), "(stats_str, ', ', '=')\n", (11566, 11588), False, 'import utils\n'), ((12841, 12856), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12854, 12856), False, 'import torch\n')]
|
import tensorflow as tf
from PIL import Image
import numpy as np
import time
from tqdm import tqdm
from nltk.translate import bleu
from nltk.translate.bleu_score import sentence_bleu
from evaluate import evaluate, bleu_score
from model import Attention, CNN_Encoder, RNN_Decoder
#from loss import loss_function
from preprocessing import datalimit, train_test_split
from utils import load_image, standardize, map_func, plot_attention
from data_download import data_download
from train import train_step
BATCH_SIZE = 64
BUFFER_SIZE = 1000
embedding_dim = 256
units = 512
features_shape = 2048
attention_features_shape = 64
limit = 1000 # number of images to use for training
def main(start=True):
annotation_file, PATH = data_download(data=True) # download data
train_captions, img_name_vector = datalimit(limit, annotation_file, PATH)
image_model = tf.keras.applications.InceptionV3(include_top=False, weights='imagenet') # download model
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
encode_train = sorted(set(img_name_vector))
image_dataset = tf.data.Dataset.from_tensor_slices(encode_train)
image_dataset = image_dataset.map(load_image, num_parallel_calls=tf.data.AUTOTUNE).batch(16)
for img, path in tqdm(image_dataset):
batch_features = image_features_extract_model(img)
batch_features = tf.reshape(batch_features,(batch_features.shape[0], -1, batch_features.shape[3]))
for bf, p in zip(batch_features, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
caption_dataset = tf.data.Dataset.from_tensor_slices(train_captions)
max_length = 50
vocabulary_size = 5000
tokenizer = tf.keras.layers.TextVectorization(max_tokens=vocabulary_size,standardize=standardize,output_sequence_length=max_length)
tokenizer.adapt(caption_dataset)
cap_vector = caption_dataset.map(lambda x: tokenizer(x))
word_to_index = tf.keras.layers.StringLookup(mask_token="",vocabulary=tokenizer.get_vocabulary())
index_to_word = tf.keras.layers.StringLookup(mask_token="",vocabulary=tokenizer.get_vocabulary(),invert=True)
img_name_train, cap_train, img_name_val, cap_val = train_test_split(img_name_vector, cap_vector)
num_steps = len(img_name_train) // BATCH_SIZE
dataset = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train))
dataset = dataset.map(lambda item1, item2: tf.numpy_function(map_func, [item1, item2], [tf.float32, tf.int64]),num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.prefetch(buffer_size=tf.data.AUTOTUNE)
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, tokenizer.vocabulary_size())
optimizer = tf.keras.optimizers.Adam()
#loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(encoder=encoder,decoder=decoder,optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
if ckpt_manager.latest_checkpoint:
start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1])
ckpt.restore(ckpt_manager.latest_checkpoint)
EPOCHS = 20 # number of epochs to train the model
loss_plot=[] # list to store the loss value for each epoch
for epoch in range(start_epoch, EPOCHS):
start = time.time()
total_loss = 0
for (batch, (img_tensor, target)) in enumerate(dataset):
batch_loss, t_loss = train_step(img_tensor, target)
total_loss += t_loss
if batch % 100 == 0:
average_batch_loss = batch_loss.numpy()/int(target.shape[1])
print(f'Epoch {epoch+1} Batch {batch} Loss {average_batch_loss:.4f}')
loss_plot.append(total_loss / num_steps)
if epoch % 5 == 0:
ckpt_manager.save()
print(f'Epoch {epoch+1} Loss {total_loss/num_steps:.6f}')
print(f'Time taken for 1 epoch {time.time()-start:.2f} sec\n')
# Validation Dataset testing
rid = np.random.randint(0, len(img_name_val))
image = img_name_val[rid]
real_caption = ' '.join([tf.compat.as_text(index_to_word(i).numpy()) for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image,encoder,decoder,word_to_index,index_to_word,max_length, image_features_extract_model,attention_features_shape)
print('Real Caption:', real_caption)
print('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
# Unseen Data testing
for i in range(1,3):
image_path = 'data/unseen_image/'+i+'.jpg' # User path to image
result, attention_plot = evaluate(image,encoder,decoder,word_to_index,index_to_word,max_length, image_features_extract_model,attention_features_shape)
print('Prediction Caption:', ' '.join(result))
plot_attention(image_path, result, attention_plot)
# opening the image
Image.open(image_path)
# Calculate the avergae bleu score
bleu_score = 0 # initialize the bleu score
image_limit=1000 # number of images to test
for i in tqdm(range(image_limit)):
rid1 = np.random.randint(0, len(img_name_val))
image1 = img_name_val[rid1]
real_caption1 = ' '.join([tf.compat.as_text(index_to_word(i).numpy()) for i in cap_val[rid] if i not in [0]])
result1, attention_plot1 = evaluate(image1,encoder,decoder,word_to_index,index_to_word,max_length, image_features_extract_model,attention_features_shape)
caption_bleu_score = 0
caption_bleu_score = sentence_bleu(list(real_caption1), list(result1)) # calculate the bleu score
bleu_score+=caption_bleu_score
print("The average bleu score : ", bleu_score/image_limit)
if __name__ == "__main__":
start=True
main(start)
|
[
"evaluate.evaluate"
] |
[((728, 752), 'data_download.data_download', 'data_download', ([], {'data': '(True)'}), '(data=True)\n', (741, 752), False, 'from data_download import data_download\n'), ((807, 846), 'preprocessing.datalimit', 'datalimit', (['limit', 'annotation_file', 'PATH'], {}), '(limit, annotation_file, PATH)\n', (816, 846), False, 'from preprocessing import datalimit, train_test_split\n'), ((871, 943), 'tensorflow.keras.applications.InceptionV3', 'tf.keras.applications.InceptionV3', ([], {'include_top': '(False)', 'weights': '"""imagenet"""'}), "(include_top=False, weights='imagenet')\n", (904, 943), True, 'import tensorflow as tf\n'), ((1080, 1119), 'tensorflow.keras.Model', 'tf.keras.Model', (['new_input', 'hidden_layer'], {}), '(new_input, hidden_layer)\n', (1094, 1119), True, 'import tensorflow as tf\n'), ((1190, 1238), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['encode_train'], {}), '(encode_train)\n', (1224, 1238), True, 'import tensorflow as tf\n'), ((1358, 1377), 'tqdm.tqdm', 'tqdm', (['image_dataset'], {}), '(image_dataset)\n', (1362, 1377), False, 'from tqdm import tqdm\n'), ((1710, 1760), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['train_captions'], {}), '(train_captions)\n', (1744, 1760), True, 'import tensorflow as tf\n'), ((1826, 1952), 'tensorflow.keras.layers.TextVectorization', 'tf.keras.layers.TextVectorization', ([], {'max_tokens': 'vocabulary_size', 'standardize': 'standardize', 'output_sequence_length': 'max_length'}), '(max_tokens=vocabulary_size, standardize=\n standardize, output_sequence_length=max_length)\n', (1859, 1952), True, 'import tensorflow as tf\n'), ((2317, 2362), 'preprocessing.train_test_split', 'train_test_split', (['img_name_vector', 'cap_vector'], {}), '(img_name_vector, cap_vector)\n', (2333, 2362), False, 'from preprocessing import datalimit, train_test_split\n'), ((2429, 2492), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(img_name_train, cap_train)'], {}), '((img_name_train, cap_train))\n', (2463, 2492), True, 'import tensorflow as tf\n'), ((2783, 2809), 'model.CNN_Encoder', 'CNN_Encoder', (['embedding_dim'], {}), '(embedding_dim)\n', (2794, 2809), False, 'from model import Attention, CNN_Encoder, RNN_Decoder\n'), ((2903, 2929), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (2927, 2929), True, 'import tensorflow as tf\n'), ((3087, 3161), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'encoder': 'encoder', 'decoder': 'decoder', 'optimizer': 'optimizer'}), '(encoder=encoder, decoder=decoder, optimizer=optimizer)\n', (3106, 3161), True, 'import tensorflow as tf\n'), ((3179, 3243), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'checkpoint_path'], {'max_to_keep': '(5)'}), '(ckpt, checkpoint_path, max_to_keep=5)\n', (3205, 3243), True, 'import tensorflow as tf\n'), ((4565, 4700), 'evaluate.evaluate', 'evaluate', (['image', 'encoder', 'decoder', 'word_to_index', 'index_to_word', 'max_length', 'image_features_extract_model', 'attention_features_shape'], {}), '(image, encoder, decoder, word_to_index, index_to_word, max_length,\n image_features_extract_model, attention_features_shape)\n', (4573, 4700), False, 'from evaluate import evaluate, bleu_score\n'), ((4787, 4832), 'utils.plot_attention', 'plot_attention', (['image', 'result', 'attention_plot'], {}), '(image, result, attention_plot)\n', (4801, 4832), False, 'from utils import load_image, standardize, map_func, plot_attention\n'), ((1463, 1550), 'tensorflow.reshape', 'tf.reshape', (['batch_features', '(batch_features.shape[0], -1, batch_features.shape[3])'], {}), '(batch_features, (batch_features.shape[0], -1, batch_features.\n shape[3]))\n', (1473, 1550), True, 'import tensorflow as tf\n'), ((3656, 3667), 'time.time', 'time.time', ([], {}), '()\n', (3665, 3667), False, 'import time\n'), ((4992, 5127), 'evaluate.evaluate', 'evaluate', (['image', 'encoder', 'decoder', 'word_to_index', 'index_to_word', 'max_length', 'image_features_extract_model', 'attention_features_shape'], {}), '(image, encoder, decoder, word_to_index, index_to_word, max_length,\n image_features_extract_model, attention_features_shape)\n', (5000, 5127), False, 'from evaluate import evaluate, bleu_score\n'), ((5181, 5231), 'utils.plot_attention', 'plot_attention', (['image_path', 'result', 'attention_plot'], {}), '(image_path, result, attention_plot)\n', (5195, 5231), False, 'from utils import load_image, standardize, map_func, plot_attention\n'), ((5268, 5290), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (5278, 5290), False, 'from PIL import Image\n'), ((5709, 5845), 'evaluate.evaluate', 'evaluate', (['image1', 'encoder', 'decoder', 'word_to_index', 'index_to_word', 'max_length', 'image_features_extract_model', 'attention_features_shape'], {}), '(image1, encoder, decoder, word_to_index, index_to_word, max_length,\n image_features_extract_model, attention_features_shape)\n', (5717, 5845), False, 'from evaluate import evaluate, bleu_score\n'), ((2540, 2607), 'tensorflow.numpy_function', 'tf.numpy_function', (['map_func', '[item1, item2]', '[tf.float32, tf.int64]'], {}), '(map_func, [item1, item2], [tf.float32, tf.int64])\n', (2557, 2607), True, 'import tensorflow as tf\n'), ((3789, 3819), 'train.train_step', 'train_step', (['img_tensor', 'target'], {}), '(img_tensor, target)\n', (3799, 3819), False, 'from train import train_step\n'), ((4274, 4285), 'time.time', 'time.time', ([], {}), '()\n', (4283, 4285), False, 'import time\n')]
|
'''train the model'''
import argparse
import logging
import os
import glob
from model.data_loader import *
from model.x2net import x2Net
from model.x3net import x3Net
from model.x4net import x4Net
from model.loss_fn import loss_fn
from model.metrics import metrics
from evaluate import evaluate
import utils
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='data/x3/', help="Directory containing the dataset")
parser.add_argument('--model', default='x3net', help='The model to train and test')
parser.add_argument('--model_dir', default='experiments/x3net/', help="Directory containing params.json")
parser.add_argument('--restore_file', default=None,
help="Optional, name of the file in --model_dir containing weights to reload before \
training")
def train(model, optimizer, loss_fn, dataloader, metrics, params):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
"""
# set model to training mode
model.train()
# summary for current training loop and a running average object for loss
summ = []
loss_avg = utils.RunningAverage()
model = model.to(params.device)
for i, (X_batch, y_batch) in enumerate(dataloader):
# move to GPU if available
# if params.cuda:
X_batch, y_batch = X_batch.to(params.device), y_batch.to(params.device)
# compute model output and loss
y_pred = model(X_batch)
loss = loss_fn(y_pred, y_batch)
# clear previous gradients, compute gradients of all variables w.r.t. loss
optimizer.zero_grad()
loss.backward()
# performs updates using calculated gradients
optimizer.step()
# Evaluate summaries only once in a while
if i % params.save_summary_steps == 0:
# move output and ground truth to cpu, convert to numpy arrays
y_pred = y_pred.detach().cpu()
y_batch = y_batch.detach().cpu()
# compute all metrics on this batch
summary_batch = {metric : metrics[metric](y_pred, y_batch) for metric in metrics}
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
# update the average loss
loss_avg.update(loss.item())
# compute mean of all metrics in summary
metrics_mean = {metric : np.mean([x[metric] for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.5f}".format(k, v) for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
def train_and_evaluate(model, train_dataloader, val_dataloader, optimizer, loss_fn, metrics, params, model_dir,
restore_file=None, lr_scheduler=None):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
train_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
val_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches validation data
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) optional- name of file to restore from (without its extension .pth.tar)
lr_scheduler: (optim.lr_scheduler) learning rate scheduler
"""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
utils.load_checkpoint(restore_path, model, optimizer)
best_val_metric = 0 # we use mse for metric here, so need to set the initial to a large number
for epoch in range(params.num_epochs):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# learning rate scheduler
if lr_scheduler:
lr_scheduler.step()
# compute number of batches in one epoch (one full pass over the training set)
train(model, optimizer, loss_fn, train_dataloader, metrics, params)
# Evaluate for one epoch on validation set
val_metrics = evaluate(model, loss_fn, val_dataloader, metrics, params)
val_metric = val_metrics['psnr']
is_best = val_metric >= best_val_metric
# Save weights
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict' : optimizer.state_dict()},
is_best=is_best,
checkpoint=model_dir)
# If best_eval, best_save_path
if is_best:
logging.info("- Found new best validation metric")
best_val_metric = val_metric
# Save best val metrics in a json file in the model directory
best_json_path = os.path.join(model_dir, "metrics_val_best_weights.json")
utils.save_dict_to_json(val_metrics, best_json_path)
# Save latest val metrics in a json file in the model directory
last_json_path = os.path.join(model_dir, "metrics_val_last_weights.json")
utils.save_dict_to_json(val_metrics, last_json_path)
if __name__ == '__main__':
# set thread number to 1
torch.set_num_threads(1)
# Load the parameters from json file
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# use GPU if available
params.cuda = torch.cuda.is_available()
params.device = torch.device("cuda:2" if params.cuda else "cpu")
# cudnn.benchmark = True
# Set the random seed for reproducible experiments
torch.manual_seed(590)
if params.cuda: torch.cuda.manual_seed(590)
# Set the logger
utils.set_logger(os.path.join(args.model_dir, 'train.log'))
# Create the input data pipeline
logging.info("Loading the datasets...")
# fetch dataloaders
dataloaders = fetch_dataloaders(['training', 'validation', 'test'], args.data_dir, params)
train_dl = dataloaders['training']
val_dl = dataloaders['validation']
logging.info("- done.")
# Define the model and optimizer
model_name = args.model
if model_name == 'x2net':
model = x2Net().to(params.device)
elif model_name == 'x3net':
model = x3Net().to(params.device)
elif model_name == 'x4net':
model = x4Net().to(params.device)
else:
print('not implemented')
exit()
# use 2 GPUs for training
if params.cuda:
model = nn.DataParallel(model, device_ids=[2,3])
# defin optimizer
optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
# learning rate scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[5, 30, 60], gamma=0.1)
# # fetch loss function and metrics
# loss_fn = net.loss_fn
# metrics = net.metrics
# Train the model
logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
train_and_evaluate(model, train_dl, val_dl, optimizer, loss_fn, metrics, params, args.model_dir, args.restore_file, lr_scheduler=scheduler)
|
[
"evaluate.evaluate"
] |
[((403, 428), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (426, 428), False, 'import argparse\n'), ((1673, 1695), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1693, 1695), False, 'import utils\n'), ((3073, 3123), 'logging.info', 'logging.info', (["('- Train metrics: ' + metrics_string)"], {}), "('- Train metrics: ' + metrics_string)\n", (3085, 3123), False, 'import logging\n'), ((6205, 6229), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (6226, 6229), False, 'import torch\n'), ((6319, 6362), 'os.path.join', 'os.path.join', (['args.model_dir', '"""params.json"""'], {}), "(args.model_dir, 'params.json')\n", (6331, 6362), False, 'import os\n'), ((6374, 6399), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (6388, 6399), False, 'import os\n'), ((6473, 6496), 'utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (6485, 6496), False, 'import utils\n'), ((6543, 6568), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6566, 6568), False, 'import torch\n'), ((6589, 6637), 'torch.device', 'torch.device', (["('cuda:2' if params.cuda else 'cpu')"], {}), "('cuda:2' if params.cuda else 'cpu')\n", (6601, 6637), False, 'import torch\n'), ((6727, 6749), 'torch.manual_seed', 'torch.manual_seed', (['(590)'], {}), '(590)\n', (6744, 6749), False, 'import torch\n'), ((6926, 6965), 'logging.info', 'logging.info', (['"""Loading the datasets..."""'], {}), "('Loading the datasets...')\n", (6938, 6965), False, 'import logging\n'), ((7170, 7193), 'logging.info', 'logging.info', (['"""- done."""'], {}), "('- done.')\n", (7182, 7193), False, 'import logging\n'), ((7798, 7874), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': '[5, 30, 60]', 'gamma': '(0.1)'}), '(optimizer, milestones=[5, 30, 60], gamma=0.1)\n', (7828, 7874), True, 'import torch.optim as optim\n'), ((2032, 2056), 'model.loss_fn.loss_fn', 'loss_fn', (['y_pred', 'y_batch'], {}), '(y_pred, y_batch)\n', (2039, 2056), False, 'from model.loss_fn import loss_fn\n'), ((2917, 2951), 'numpy.mean', 'np.mean', (['[x[metric] for x in summ]'], {}), '([x[metric] for x in summ])\n', (2924, 2951), True, 'import numpy as np\n'), ((4304, 4364), 'os.path.join', 'os.path.join', (['args.model_dir', "(args.restore_file + '.pth.tar')"], {}), "(args.model_dir, args.restore_file + '.pth.tar')\n", (4316, 4364), False, 'import os\n'), ((4447, 4500), 'utils.load_checkpoint', 'utils.load_checkpoint', (['restore_path', 'model', 'optimizer'], {}), '(restore_path, model, optimizer)\n', (4468, 4500), False, 'import utils\n'), ((5075, 5132), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', 'val_dataloader', 'metrics', 'params'], {}), '(model, loss_fn, val_dataloader, metrics, params)\n', (5083, 5132), False, 'from evaluate import evaluate\n'), ((6023, 6079), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_last_weights.json"""'], {}), "(model_dir, 'metrics_val_last_weights.json')\n", (6035, 6079), False, 'import os\n'), ((6088, 6140), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['val_metrics', 'last_json_path'], {}), '(val_metrics, last_json_path)\n', (6111, 6140), False, 'import utils\n'), ((6770, 6797), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(590)'], {}), '(590)\n', (6792, 6797), False, 'import torch\n'), ((6841, 6882), 'os.path.join', 'os.path.join', (['args.model_dir', '"""train.log"""'], {}), "(args.model_dir, 'train.log')\n", (6853, 6882), False, 'import os\n'), ((7609, 7650), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {'device_ids': '[2, 3]'}), '(model, device_ids=[2, 3])\n', (7624, 7650), True, 'import torch.nn as nn\n'), ((5607, 5657), 'logging.info', 'logging.info', (['"""- Found new best validation metric"""'], {}), "('- Found new best validation metric')\n", (5619, 5657), False, 'import logging\n'), ((5803, 5859), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_best_weights.json"""'], {}), "(model_dir, 'metrics_val_best_weights.json')\n", (5815, 5859), False, 'import os\n'), ((5872, 5924), 'utils.save_dict_to_json', 'utils.save_dict_to_json', (['val_metrics', 'best_json_path'], {}), '(val_metrics, best_json_path)\n', (5895, 5924), False, 'import utils\n'), ((7306, 7313), 'model.x2net.x2Net', 'x2Net', ([], {}), '()\n', (7311, 7313), False, 'from model.x2net import x2Net\n'), ((7380, 7387), 'model.x3net.x3Net', 'x3Net', ([], {}), '()\n', (7385, 7387), False, 'from model.x3net import x3Net\n'), ((7454, 7461), 'model.x4net.x4Net', 'x4Net', ([], {}), '()\n', (7459, 7461), False, 'from model.x4net import x4Net\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import pandas as pd
import sys
sys.path.append("..")
mpl.use('tkagg') # issues with Big Sur
import matplotlib.pyplot as plt
from strategy.williams_R import williamsR
from backtest import Backtest
from evaluate import SharpeRatio, MaxDrawdown, CAGR
# load data
df = pd.read_csv('../../database/microeconomic_data/hkex_ticks_day/hkex_0005.csv', header=0, index_col='Date', parse_dates=True)
# select time range
df = df.loc[pd.Timestamp('2017-01-01'):pd.Timestamp('2019-01-01')]
ticker = "0005.HK"
# William's R
wr = williamsR(df)
wr_fig = wr.plot_wr()
wr_fig.suptitle('HK.0005 - Williams %R', fontsize=14)
wr_fig.savefig('./figures/momentum/08-williamsR-plot')
plt.show()
signals = wr.gen_signals()
signal_fig = wr.plot_signals(signals)
signal_fig.suptitle('Williams %R - Signals', fontsize=14)
signal_fig.savefig('./figures/momentum/08-williamsR_signals')
plt.show()
# Backtesting
portfolio, backtest_fig = Backtest(ticker, signals, df)
print("Final total value: {value:.4f} ".format(value = portfolio['total'][-1]))
print("Total return: {value:.4f}%".format(value = (portfolio['total'][-1] - portfolio['total'][0])/portfolio['total'][-1]*100))
# for analysis
print("No. of trade: {value}".format(value = len(signals[signals.positions == 1])))
backtest_fig.suptitle('Williams %R - Portfolio value', fontsize=14)
backtest_fig.savefig('./figures/momentum/08-williamsR_portfolio-value')
plt.show()
# Evaluate strategy
# 1. Sharpe ratio
sharpe_ratio = SharpeRatio(portfolio)
print("Sharpe ratio: {ratio:.4f} ".format(ratio = sharpe_ratio))
# 2. Maximum drawdown
maxDrawdown_fig, max_daily_drawdown, daily_drawdown = MaxDrawdown(df)
maxDrawdown_fig.suptitle('Williams %R - Maximum drawdown', fontsize=14)
maxDrawdown_fig.savefig('./figures/momentum/08-williamsR_maximum-drawdown')
plt.show()
# 3. Compound Annual Growth Rate
cagr = CAGR(portfolio)
print("CAGR: {cagr:.4f} ".format(cagr = cagr))
|
[
"evaluate.MaxDrawdown",
"evaluate.CAGR",
"evaluate.SharpeRatio"
] |
[((107, 128), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (122, 128), False, 'import sys\n'), ((129, 145), 'matplotlib.use', 'mpl.use', (['"""tkagg"""'], {}), "('tkagg')\n", (136, 145), True, 'import matplotlib as mpl\n'), ((344, 471), 'pandas.read_csv', 'pd.read_csv', (['"""../../database/microeconomic_data/hkex_ticks_day/hkex_0005.csv"""'], {'header': '(0)', 'index_col': '"""Date"""', 'parse_dates': '(True)'}), "('../../database/microeconomic_data/hkex_ticks_day/hkex_0005.csv',\n header=0, index_col='Date', parse_dates=True)\n", (355, 471), True, 'import pandas as pd\n'), ((597, 610), 'strategy.williams_R.williamsR', 'williamsR', (['df'], {}), '(df)\n', (606, 610), False, 'from strategy.williams_R import williamsR\n'), ((742, 752), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (750, 752), True, 'import matplotlib.pyplot as plt\n'), ((940, 950), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (948, 950), True, 'import matplotlib.pyplot as plt\n'), ((993, 1022), 'backtest.Backtest', 'Backtest', (['ticker', 'signals', 'df'], {}), '(ticker, signals, df)\n', (1001, 1022), False, 'from backtest import Backtest\n'), ((1471, 1481), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1479, 1481), True, 'import matplotlib.pyplot as plt\n'), ((1537, 1559), 'evaluate.SharpeRatio', 'SharpeRatio', (['portfolio'], {}), '(portfolio)\n', (1548, 1559), False, 'from evaluate import SharpeRatio, MaxDrawdown, CAGR\n'), ((1702, 1717), 'evaluate.MaxDrawdown', 'MaxDrawdown', (['df'], {}), '(df)\n', (1713, 1717), False, 'from evaluate import SharpeRatio, MaxDrawdown, CAGR\n'), ((1866, 1876), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1874, 1876), True, 'import matplotlib.pyplot as plt\n'), ((1918, 1933), 'evaluate.CAGR', 'CAGR', (['portfolio'], {}), '(portfolio)\n', (1922, 1933), False, 'from evaluate import SharpeRatio, MaxDrawdown, CAGR\n'), ((501, 527), 'pandas.Timestamp', 'pd.Timestamp', (['"""2017-01-01"""'], {}), "('2017-01-01')\n", (513, 527), True, 'import pandas as pd\n'), ((528, 554), 'pandas.Timestamp', 'pd.Timestamp', (['"""2019-01-01"""'], {}), "('2019-01-01')\n", (540, 554), True, 'import pandas as pd\n')]
|
import tensorflow as tf
import utilities
import visualize
import evaluate
if __name__ == '__main__':
train_dataset, test_dataset, encoder = utilities.load_data()
model = tf.keras.Sequential([
encoder,
tf.keras.layers.Embedding(
input_dim=len(encoder.get_vocabulary()),
output_dim=64,
mask_zero=True),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1)
])
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
history = model.fit(train_dataset, epochs=1,
validation_data=test_dataset,
validation_steps=30)
test_loss, test_acc = model.evaluate(test_dataset)
visualize.display_results(test_loss, test_acc, history)
evaluate.predict(model)
|
[
"evaluate.predict"
] |
[((146, 167), 'utilities.load_data', 'utilities.load_data', ([], {}), '()\n', (165, 167), False, 'import utilities\n'), ((911, 966), 'visualize.display_results', 'visualize.display_results', (['test_loss', 'test_acc', 'history'], {}), '(test_loss, test_acc, history)\n', (936, 966), False, 'import visualize\n'), ((972, 995), 'evaluate.predict', 'evaluate.predict', (['model'], {}), '(model)\n', (988, 995), False, 'import evaluate\n'), ((437, 481), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (458, 481), True, 'import tensorflow as tf\n'), ((491, 515), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (512, 515), True, 'import tensorflow as tf\n'), ((547, 599), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (581, 599), True, 'import tensorflow as tf\n'), ((629, 661), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (653, 661), True, 'import tensorflow as tf\n'), ((402, 426), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(64)'], {}), '(64)\n', (422, 426), True, 'import tensorflow as tf\n')]
|
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utilities import (create_folder, get_filename, create_logging,
load_scalar, get_labels)
from data_generator import DataGenerator
from models import Cnn_5layers_AvgPooling, Cnn_9layers_MaxPooling, Cnn_9layers_AvgPooling, Cnn_13layers_AvgPooling
from losses import binary_cross_entropy
from evaluate import Evaluator, StatisticsContainer
from pytorch_utils import move_data_to_gpu
import config
def train(args):
'''Training. Model will be saved after several iterations.
Args:
dataset_dir: string, directory of dataset
workspace: string, directory of workspace
taxonomy_level: 'fine' | 'coarse'
model_type: string, e.g. 'Cnn_9layers_MaxPooling'
holdout_fold: '1' | 'None', where '1' indicates using validation and
'None' indicates using full data for training
batch_size: int
cuda: bool
mini_data: bool, set True for debugging on a small part of data
'''
# Arugments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
taxonomy_level = args.taxonomy_level
model_type = args.model_type
holdout_fold = args.holdout_fold
batch_size = args.batch_size
cuda = args.cuda and torch.cuda.is_available()
mini_data = args.mini_data
filename = args.filename
mel_bins = config.mel_bins
frames_per_second = config.frames_per_second
max_iteration = 10 # Number of mini-batches to evaluate on training data
reduce_lr = True
labels = get_labels(taxonomy_level)
classes_num = len(labels)
# Paths
if mini_data:
prefix = 'minidata_'
else:
prefix = ''
train_hdf5_path = os.path.join(workspace, 'features',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train.h5')
validate_hdf5_path = os.path.join(workspace, 'features',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'validate.h5')
scalar_path = os.path.join(workspace, 'scalars',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train.h5')
checkpoints_dir = os.path.join(workspace, 'checkpoints', filename,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'taxonomy_level={}'.format(taxonomy_level),
'holdout_fold={}'.format(holdout_fold), model_type)
create_folder(checkpoints_dir)
_temp_submission_path = os.path.join(workspace, '_temp_submissions', filename,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'taxonomy_level={}'.format(taxonomy_level),
'holdout_fold={}'.format(holdout_fold), model_type, '_submission.csv')
create_folder(os.path.dirname(_temp_submission_path))
validate_statistics_path = os.path.join(workspace, 'statistics', filename,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'taxonomy_level={}'.format(taxonomy_level),
'holdout_fold={}'.format(holdout_fold), model_type,
'validate_statistics.pickle')
create_folder(os.path.dirname(validate_statistics_path))
annotation_path = os.path.join(dataset_dir, 'annotations.csv')
yaml_path = os.path.join(dataset_dir, 'dcase-ust-taxonomy.yaml')
logs_dir = os.path.join(workspace, 'logs', filename, args.mode,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'taxonomy_level={}'.format(taxonomy_level),
'holdout_fold={}'.format(holdout_fold), model_type)
create_logging(logs_dir, 'w')
logging.info(args)
if cuda:
logging.info('Using GPU.')
else:
logging.info('Using CPU. Set --cuda flag to use GPU.')
# Load scalar
scalar = load_scalar(scalar_path)
# Model
Model = eval(model_type)
model = Model(classes_num)
if cuda:
model.cuda()
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999),
eps=1e-08, weight_decay=0., amsgrad=True)
# Data generator
data_generator = DataGenerator(
train_hdf5_path=train_hdf5_path,
validate_hdf5_path=validate_hdf5_path,
holdout_fold=holdout_fold,
scalar=scalar,
batch_size=batch_size)
# Evaluator
evaluator = Evaluator(
model=model,
data_generator=data_generator,
taxonomy_level=taxonomy_level,
cuda=cuda,
verbose=False)
# Statistics
validate_statistics_container = StatisticsContainer(validate_statistics_path)
train_bgn_time = time.time()
iteration = 0
# Train on mini batches
for batch_data_dict in data_generator.generate_train():
# Evaluate
if iteration % 200 == 0:
logging.info('------------------------------------')
logging.info('Iteration: {}, {} level statistics:'.format(
iteration, taxonomy_level))
train_fin_time = time.time()
# Evaluate on training data
if mini_data:
raise Exception('`mini_data` flag must be set to False to use '
'the official evaluation tool!')
train_statistics = evaluator.evaluate(
data_type='train',
max_iteration=None)
# Evaluate on validation data
if holdout_fold != 'none':
validate_statistics = evaluator.evaluate(
data_type='validate',
submission_path=_temp_submission_path,
annotation_path=annotation_path,
yaml_path=yaml_path,
max_iteration=None)
validate_statistics_container.append_and_dump(
iteration, validate_statistics)
train_time = train_fin_time - train_bgn_time
validate_time = time.time() - train_fin_time
logging.info(
'Train time: {:.3f} s, validate time: {:.3f} s'
''.format(train_time, validate_time))
train_bgn_time = time.time()
# Save model
if iteration % 1000 == 0 and iteration > 0:
checkpoint = {
'iteration': iteration,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}
checkpoint_path = os.path.join(
checkpoints_dir, '{}_iterations.pth'.format(iteration))
torch.save(checkpoint, checkpoint_path)
logging.info('Model saved to {}'.format(checkpoint_path))
# Reduce learning rate
if reduce_lr and iteration % 200 == 0 and iteration > 0:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.9
# Move data to GPU
for key in batch_data_dict.keys():
if key in ['feature', 'fine_target', 'coarse_target']:
batch_data_dict[key] = move_data_to_gpu(
batch_data_dict[key], cuda)
# Train
model.train()
batch_output = model(batch_data_dict['feature'])
# loss
batch_target = batch_data_dict['{}_target'.format(taxonomy_level)]
loss = binary_cross_entropy(batch_output, batch_target)
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Stop learning
if iteration == 5000:
break
iteration += 1
def inference_validation(args):
'''Inference and calculate metrics on validation data.
Args:
dataset_dir: string, directory of dataset
workspace: string, directory of workspace
taxonomy_level: 'fine' | 'coarse'
model_type: string, e.g. 'Cnn_9layers_MaxPooling'
iteration: int
holdout_fold: '1', which means using validation data
batch_size: int
cuda: bool
mini_data: bool, set True for debugging on a small part of data
visualize: bool
'''
# Arugments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
taxonomy_level = args.taxonomy_level
model_type = args.model_type
iteration = args.iteration
holdout_fold = args.holdout_fold
batch_size = args.batch_size
cuda = args.cuda and torch.cuda.is_available()
mini_data = args.mini_data
visualize = args.visualize
filename = args.filename
mel_bins = config.mel_bins
frames_per_second = config.frames_per_second
labels = get_labels(taxonomy_level)
classes_num = len(labels)
# Paths
if mini_data:
prefix = 'minidata_'
else:
prefix = ''
train_hdf5_path = os.path.join(workspace, 'features',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train.h5')
validate_hdf5_path = os.path.join(workspace, 'features',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'validate.h5')
scalar_path = os.path.join(workspace, 'scalars',
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'train.h5')
checkpoint_path = os.path.join(workspace, 'checkpoints', filename,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'taxonomy_level={}'.format(taxonomy_level),
'holdout_fold={}'.format(holdout_fold), model_type,
'{}_iterations.pth'.format(iteration))
submission_path = os.path.join(workspace, 'submissions', filename,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'taxonomy_level={}'.format(taxonomy_level),
'holdout_fold={}'.format(holdout_fold), model_type, 'submission.csv')
create_folder(os.path.dirname(submission_path))
annotation_path = os.path.join(dataset_dir, 'annotations.csv')
yaml_path = os.path.join(dataset_dir, 'dcase-ust-taxonomy.yaml')
logs_dir = os.path.join(workspace, 'logs', filename, args.mode,
'{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins),
'taxonomy_level={}'.format(taxonomy_level),
'holdout_fold={}'.format(holdout_fold), model_type)
create_logging(logs_dir, 'w')
logging.info(args)
# Load scalar
scalar = load_scalar(scalar_path)
# Load model
Model = eval(model_type)
model = Model(classes_num)
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model'])
if cuda:
model.cuda()
# Data generator
data_generator = DataGenerator(
train_hdf5_path=train_hdf5_path,
validate_hdf5_path=validate_hdf5_path,
holdout_fold=holdout_fold,
scalar=scalar,
batch_size=batch_size)
# Evaluator
evaluator = Evaluator(
model=model,
data_generator=data_generator,
taxonomy_level=taxonomy_level,
cuda=cuda,
verbose=True)
# Evaluate on validation data
evaluator.evaluate(
data_type='validate',
submission_path=submission_path,
annotation_path=annotation_path,
yaml_path=yaml_path,
max_iteration=None)
# Visualize
if visualize:
evaluator.visualize(data_type='validate')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--dataset_dir', type=str, required=True, help='Directory of dataset.')
parser_train.add_argument('--workspace', type=str, required=True, help='Directory of your workspace.')
parser_train.add_argument('--taxonomy_level', type=str, choices=['fine', 'coarse'], required=True)
parser_train.add_argument('--model_type', type=str, required=True, help='E.g., Cnn_9layers_AvgPooling.')
parser_train.add_argument('--holdout_fold', type=str, choices=['1', 'none'], required=True)
parser_train.add_argument('--batch_size', type=int, required=True)
parser_train.add_argument('--cuda', action='store_true', default=False)
parser_train.add_argument('--mini_data', action='store_true', default=False, help='Set True for debugging on a small part of data.')
parser_inference_validation = subparsers.add_parser('inference_validation')
parser_inference_validation.add_argument('--dataset_dir', type=str, required=True)
parser_inference_validation.add_argument('--workspace', type=str, required=True)
parser_inference_validation.add_argument('--taxonomy_level', type=str, choices=['fine', 'coarse'], required=True)
parser_inference_validation.add_argument('--model_type', type=str, required=True, help='E.g., Cnn_9layers_AvgPooling.')
parser_inference_validation.add_argument('--holdout_fold', type=str, choices=['1'], required=True)
parser_inference_validation.add_argument('--iteration', type=int, required=True, help='Load model of this iteration.')
parser_inference_validation.add_argument('--batch_size', type=int, required=True)
parser_inference_validation.add_argument('--cuda', action='store_true', default=False)
parser_inference_validation.add_argument('--visualize', action='store_true', default=False, help='Visualize log mel spectrogram of different sound classes.')
parser_inference_validation.add_argument('--mini_data', action='store_true', default=False, help='Set True for debugging on a small part of data.')
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'train':
train(args)
elif args.mode == 'inference_validation':
inference_validation(args)
else:
raise Exception('Error argument!')
|
[
"evaluate.Evaluator",
"evaluate.StatisticsContainer"
] |
[((40, 77), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../utils"""'], {}), "(sys.path[0], '../utils')\n", (52, 77), False, 'import os\n'), ((1793, 1819), 'utilities.get_labels', 'get_labels', (['taxonomy_level'], {}), '(taxonomy_level)\n', (1803, 1819), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, get_labels\n'), ((2739, 2769), 'utilities.create_folder', 'create_folder', (['checkpoints_dir'], {}), '(checkpoints_dir)\n', (2752, 2769), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, get_labels\n'), ((3542, 3586), 'os.path.join', 'os.path.join', (['dataset_dir', '"""annotations.csv"""'], {}), "(dataset_dir, 'annotations.csv')\n", (3554, 3586), False, 'import os\n'), ((3608, 3660), 'os.path.join', 'os.path.join', (['dataset_dir', '"""dcase-ust-taxonomy.yaml"""'], {}), "(dataset_dir, 'dcase-ust-taxonomy.yaml')\n", (3620, 3660), False, 'import os\n'), ((3936, 3965), 'utilities.create_logging', 'create_logging', (['logs_dir', '"""w"""'], {}), "(logs_dir, 'w')\n", (3950, 3965), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, get_labels\n'), ((3970, 3988), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (3982, 3988), False, 'import logging\n'), ((4143, 4167), 'utilities.load_scalar', 'load_scalar', (['scalar_path'], {}), '(scalar_path)\n', (4154, 4167), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, get_labels\n'), ((4474, 4633), 'data_generator.DataGenerator', 'DataGenerator', ([], {'train_hdf5_path': 'train_hdf5_path', 'validate_hdf5_path': 'validate_hdf5_path', 'holdout_fold': 'holdout_fold', 'scalar': 'scalar', 'batch_size': 'batch_size'}), '(train_hdf5_path=train_hdf5_path, validate_hdf5_path=\n validate_hdf5_path, holdout_fold=holdout_fold, scalar=scalar,\n batch_size=batch_size)\n', (4487, 4633), False, 'from data_generator import DataGenerator\n'), ((4707, 4822), 'evaluate.Evaluator', 'Evaluator', ([], {'model': 'model', 'data_generator': 'data_generator', 'taxonomy_level': 'taxonomy_level', 'cuda': 'cuda', 'verbose': '(False)'}), '(model=model, data_generator=data_generator, taxonomy_level=\n taxonomy_level, cuda=cuda, verbose=False)\n', (4716, 4822), False, 'from evaluate import Evaluator, StatisticsContainer\n'), ((4925, 4970), 'evaluate.StatisticsContainer', 'StatisticsContainer', (['validate_statistics_path'], {}), '(validate_statistics_path)\n', (4944, 4970), False, 'from evaluate import Evaluator, StatisticsContainer\n'), ((4997, 5008), 'time.time', 'time.time', ([], {}), '()\n', (5006, 5008), False, 'import time\n'), ((9045, 9071), 'utilities.get_labels', 'get_labels', (['taxonomy_level'], {}), '(taxonomy_level)\n', (9055, 9071), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, get_labels\n'), ((10410, 10454), 'os.path.join', 'os.path.join', (['dataset_dir', '"""annotations.csv"""'], {}), "(dataset_dir, 'annotations.csv')\n", (10422, 10454), False, 'import os\n'), ((10476, 10528), 'os.path.join', 'os.path.join', (['dataset_dir', '"""dcase-ust-taxonomy.yaml"""'], {}), "(dataset_dir, 'dcase-ust-taxonomy.yaml')\n", (10488, 10528), False, 'import os\n'), ((10804, 10833), 'utilities.create_logging', 'create_logging', (['logs_dir', '"""w"""'], {}), "(logs_dir, 'w')\n", (10818, 10833), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, get_labels\n'), ((10838, 10856), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (10850, 10856), False, 'import logging\n'), ((10897, 10921), 'utilities.load_scalar', 'load_scalar', (['scalar_path'], {}), '(scalar_path)\n', (10908, 10921), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, get_labels\n'), ((11017, 11044), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (11027, 11044), False, 'import torch\n'), ((11182, 11341), 'data_generator.DataGenerator', 'DataGenerator', ([], {'train_hdf5_path': 'train_hdf5_path', 'validate_hdf5_path': 'validate_hdf5_path', 'holdout_fold': 'holdout_fold', 'scalar': 'scalar', 'batch_size': 'batch_size'}), '(train_hdf5_path=train_hdf5_path, validate_hdf5_path=\n validate_hdf5_path, holdout_fold=holdout_fold, scalar=scalar,\n batch_size=batch_size)\n', (11195, 11341), False, 'from data_generator import DataGenerator\n'), ((11415, 11529), 'evaluate.Evaluator', 'Evaluator', ([], {'model': 'model', 'data_generator': 'data_generator', 'taxonomy_level': 'taxonomy_level', 'cuda': 'cuda', 'verbose': '(True)'}), '(model=model, data_generator=data_generator, taxonomy_level=\n taxonomy_level, cuda=cuda, verbose=True)\n', (11424, 11529), False, 'from evaluate import Evaluator, StatisticsContainer\n'), ((11941, 11999), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Example of parser. """'}), "(description='Example of parser. ')\n", (11964, 11999), False, 'import argparse\n'), ((14176, 14198), 'utilities.get_filename', 'get_filename', (['__file__'], {}), '(__file__)\n', (14188, 14198), False, 'from utilities import create_folder, get_filename, create_logging, load_scalar, get_labels\n'), ((1501, 1526), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1524, 1526), False, 'import torch\n'), ((3093, 3131), 'os.path.dirname', 'os.path.dirname', (['_temp_submission_path'], {}), '(_temp_submission_path)\n', (3108, 3131), False, 'import os\n'), ((3472, 3513), 'os.path.dirname', 'os.path.dirname', (['validate_statistics_path'], {}), '(validate_statistics_path)\n', (3487, 3513), False, 'import os\n'), ((4011, 4037), 'logging.info', 'logging.info', (['"""Using GPU."""'], {}), "('Using GPU.')\n", (4023, 4037), False, 'import logging\n'), ((4056, 4110), 'logging.info', 'logging.info', (['"""Using CPU. Set --cuda flag to use GPU."""'], {}), "('Using CPU. Set --cuda flag to use GPU.')\n", (4068, 4110), False, 'import logging\n'), ((7739, 7787), 'losses.binary_cross_entropy', 'binary_cross_entropy', (['batch_output', 'batch_target'], {}), '(batch_output, batch_target)\n', (7759, 7787), False, 'from losses import binary_cross_entropy\n'), ((8825, 8850), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8848, 8850), False, 'import torch\n'), ((10349, 10381), 'os.path.dirname', 'os.path.dirname', (['submission_path'], {}), '(submission_path)\n', (10364, 10381), False, 'import os\n'), ((5193, 5245), 'logging.info', 'logging.info', (['"""------------------------------------"""'], {}), "('------------------------------------')\n", (5205, 5245), False, 'import logging\n'), ((5391, 5402), 'time.time', 'time.time', ([], {}), '()\n', (5400, 5402), False, 'import time\n'), ((6556, 6567), 'time.time', 'time.time', ([], {}), '()\n', (6565, 6567), False, 'import time\n'), ((6955, 6994), 'torch.save', 'torch.save', (['checkpoint', 'checkpoint_path'], {}), '(checkpoint, checkpoint_path)\n', (6965, 6994), False, 'import torch\n'), ((6352, 6363), 'time.time', 'time.time', ([], {}), '()\n', (6361, 6363), False, 'import time\n'), ((7455, 7499), 'pytorch_utils.move_data_to_gpu', 'move_data_to_gpu', (['batch_data_dict[key]', 'cuda'], {}), '(batch_data_dict[key], cuda)\n', (7471, 7499), False, 'from pytorch_utils import move_data_to_gpu\n')]
|
#!/usr/bin/env python
from __future__ import print_function, division
from evaluate import Evaluation
import math
import heapq
import logging
def logger():
return logging.getLogger(__name__)
def expectimax(game, placements, feature_weights, beam = 1, return_all = False):
eval = Evaluation(game, feature_weights)
if not placements:
return None, eval.value()
def _expectimax(game, placements):
if not placements:
return eval.value()
value = 0
for p in placements[0]:
best = float('-inf')
moves = game.own.field.moves(p)
if moves and game.own.skips:
moves.append(None)
for m in moves:
eval.update(m, *game.own.move(m))
v = _expectimax(game, placements[1:])
eval.rollback(*game.own.undo())
if v > best:
best = v
value += best
return value / len(placements[0])
best = None, float('-inf')
all = [] if return_all else None
moves = game.own.field.moves(placements[0][0])
if moves and game.own.skips:
moves.append(None)
if beam < 1 and len(placements) > 1:
def _snap_eval(m):
eval.update(m, *game.own.move(m))
v = eval.value()
eval.rollback(*game.own.undo())
return v
num_beam = int(math.ceil(beam * len(moves)))
moves = heapq.nlargest(num_beam, moves, key=_snap_eval)
for m in moves:
eval.update(m, *game.own.move(m))
v = _expectimax(game, placements[1:])
eval.rollback(*game.own.undo())
if v > best[1]:
best = m, v
if all is not None:
all.append((m, v))
return (best, all) if return_all else best
|
[
"evaluate.Evaluation"
] |
[((178, 205), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (195, 205), False, 'import logging\n'), ((302, 335), 'evaluate.Evaluation', 'Evaluation', (['game', 'feature_weights'], {}), '(game, feature_weights)\n', (312, 335), False, 'from evaluate import Evaluation\n'), ((1336, 1383), 'heapq.nlargest', 'heapq.nlargest', (['num_beam', 'moves'], {'key': '_snap_eval'}), '(num_beam, moves, key=_snap_eval)\n', (1350, 1383), False, 'import heapq\n')]
|
import os
import sys
import time
import json
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, sampler
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import cv2
import math
from argument import get_args
from backbone import darknet53
from dataset import BOP_Dataset, collate_fn
from model import PoseModule
from scheduler import WarmupScheduler
import transform
from evaluate import evaluate
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
DistributedSampler,
all_gather,
)
from utils import (
load_bop_meshes,
visualize_pred,
print_accuracy_per_class,
)
from tensorboardX import SummaryWriter
# reproducibility: https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(0)
np.random.seed(0)
# close shared memory of pytorch
if True:
# https://github.com/huaweicloud/dls-example/issues/26
from torch.utils.data import dataloader
from torch.multiprocessing import reductions
from multiprocessing.reduction import ForkingPickler
default_collate_func = dataloader.default_collate
def default_collate_override(batch):
dataloader._use_shared_memory = False
return default_collate_func(batch)
setattr(dataloader, 'default_collate', default_collate_override)
for t in torch._storage_classes:
if sys.version_info[0] == 2:
if t in ForkingPickler.dispatch:
del ForkingPickler.dispatch[t]
else:
if t in ForkingPickler._extra_reducers:
del ForkingPickler._extra_reducers[t]
def accumulate_dicts(data):
all_data = all_gather(data)
if get_rank() != 0:
return
data = {}
for d in all_data:
data.update(d)
return data
@torch.no_grad()
def valid(cfg, epoch, loader, model, device, logger=None):
torch.cuda.empty_cache()
model.eval()
if get_rank() == 0:
pbar = tqdm(enumerate(loader), total=len(loader), dynamic_ncols=True)
else:
pbar = enumerate(loader)
preds = {}
meshes, _ = load_bop_meshes(cfg['DATASETS']['MESH_DIR'])
for idx, (images, targets, meta_infos) in pbar:
model.zero_grad()
images = images.to(device)
targets = [target.to(device) for target in targets]
pred, aux = model(images, targets=targets)
if get_rank() == 0 and idx % 10 == 0:
bIdx = 0
imgpath, imgname = os.path.split(meta_infos[bIdx]['path'])
name_prefix = imgpath.replace(os.sep, '_').replace('.', '') + '_' + os.path.splitext(imgname)[0]
rawImg, visImg, gtImg = visualize_pred(images.tensors[bIdx], targets[bIdx], pred[bIdx],
cfg['INPUT']['PIXEL_MEAN'], cfg['INPUT']['PIXEL_STD'], meshes)
# cv2.imwrite(cfg['RUNTIME']['WORKING_DIR'] + name_prefix + '.png', rawImg)
cv2.imwrite(cfg['RUNTIME']['WORKING_DIR'] + name_prefix + '_pred.png', visImg)
cv2.imwrite(cfg['RUNTIME']['WORKING_DIR'] + name_prefix + '_gt.png', gtImg)
# pred = [p.to('cpu') for p in pred]
for m, p in zip(meta_infos, pred):
preds.update({m['path']: {
'meta': m,
'pred': p
}})
preds = accumulate_dicts(preds)
if get_rank() != 0:
return
accuracy_adi_per_class, accuracy_rep_per_class, accuracy_adi_per_depth, accuracy_rep_per_depth, depth_range \
= evaluate(cfg, preds)
print_accuracy_per_class(accuracy_adi_per_class, accuracy_rep_per_class)
# writing log to tensorboard
if logger:
classNum = cfg['DATASETS']['N_CLASS'] - 1 # get rid of background class
assert (len(accuracy_adi_per_class) == classNum)
assert (len(accuracy_rep_per_class) == classNum)
all_adi = {}
all_rep = {}
validClassNum = 0
for i in range(classNum):
className = ('class_%02d' % i)
logger.add_scalars('ADI/' + className, accuracy_adi_per_class[i], epoch)
logger.add_scalars('REP/' + className, accuracy_rep_per_class[i], epoch)
#
assert (len(accuracy_adi_per_class[i]) == len(accuracy_rep_per_class[i]))
if len(accuracy_adi_per_class[i]) > 0:
for key, val in accuracy_adi_per_class[i].items():
if key in all_adi:
all_adi[key] += val
else:
all_adi[key] = val
for key, val in accuracy_rep_per_class[i].items():
if key in all_rep:
all_rep[key] += val
else:
all_rep[key] = val
validClassNum += 1
# averaging
for key, val in all_adi.items():
all_adi[key] = val / validClassNum
for key, val in all_rep.items():
all_rep[key] = val / validClassNum
logger.add_scalars('ADI/all_class', all_adi, epoch)
logger.add_scalars('REP/all_class', all_rep, epoch)
return accuracy_adi_per_class, accuracy_rep_per_class, accuracy_adi_per_depth, accuracy_rep_per_depth, depth_range
def train(cfg, epoch, max_epoch, loader, model, optimizer, scheduler, device, logger=None):
model.train()
if get_rank() == 0:
pbar = tqdm(enumerate(loader), total=len(loader), dynamic_ncols=True)
else:
pbar = enumerate(loader)
for idx, (images, targets, _) in pbar:
model.zero_grad()
images = images.to(device)
targets = [target.to(device) for target in targets]
_, loss_dict = model(images, targets=targets)
loss_cls = loss_dict['loss_cls'].mean()
loss_reg = loss_dict['loss_reg'].mean()
loss = loss_cls + loss_reg
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
scheduler.step()
loss_reduced = reduce_loss_dict(loss_dict)
loss_cls = loss_reduced['loss_cls'].mean().item()
loss_reg = loss_reduced['loss_reg'].mean().item()
if get_rank() == 0:
current_lr = optimizer.param_groups[0]['lr']
pbar_str = (("epoch: %d/%d, lr:%.6f, cls:%.4f, reg:%.4f") % (
epoch + 1, max_epoch, current_lr, loss_cls, loss_reg))
pbar.set_description(pbar_str)
# writing log to tensorboard
if logger and idx % 10 == 0:
# totalStep = (epoch * len(loader) + idx) * args.batch * args.n_gpu
totalStep = (epoch * len(loader) + idx) * cfg['SOLVER']['IMS_PER_BATCH']
logger.add_scalar('training/learning_rate', current_lr, totalStep)
logger.add_scalar('training/loss_cls', loss_cls, totalStep)
logger.add_scalar('training/loss_reg', loss_reg, totalStep)
logger.add_scalar('training/loss_all', (loss_cls + loss_reg), totalStep)
def data_sampler(dataset, shuffle, distributed):
if distributed:
return DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return sampler.RandomSampler(dataset)
else:
return sampler.SequentialSampler(dataset)
if __name__ == '__main__':
cfg = get_args()
n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
cfg['RUNTIME']['N_GPU'] = n_gpu
cfg['RUNTIME']['DISTRIBUTED'] = n_gpu > 1
if cfg['RUNTIME']['DISTRIBUTED']:
torch.cuda.set_device(cfg['RUNTIME']['LOCAL_RANK'])
torch.distributed.init_process_group(backend='gloo', init_method='env://')
synchronize()
# device = 'cuda'
device = cfg['RUNTIME']['RUNNING_DEVICE']
internal_K = np.array(cfg['INPUT']['INTERNAL_K']).reshape(3, 3)
train_trans = transform.Compose(
[
transform.Resize(
cfg['INPUT']['INTERNAL_WIDTH'],
cfg['INPUT']['INTERNAL_HEIGHT'], internal_K),
transform.RandomShiftScaleRotate(
cfg['SOLVER']['AUGMENTATION_SHIFT'],
cfg['SOLVER']['AUGMENTATION_SCALE'],
cfg['SOLVER']['AUGMENTATION_ROTATION'],
cfg['INPUT']['INTERNAL_WIDTH'],
cfg['INPUT']['INTERNAL_HEIGHT'],
internal_K),
transform.Normalize(
cfg['INPUT']['PIXEL_MEAN'],
cfg['INPUT']['PIXEL_STD']),
transform.ToTensor(),
]
)
valid_trans = transform.Compose(
[
transform.Resize(
cfg['INPUT']['INTERNAL_WIDTH'],
cfg['INPUT']['INTERNAL_HEIGHT'],
internal_K),
transform.Normalize(
cfg['INPUT']['PIXEL_MEAN'],
cfg['INPUT']['PIXEL_STD']),
transform.ToTensor(),
]
)
train_set = BOP_Dataset(
cfg['DATASETS']['TRAIN'],
cfg['DATASETS']['MESH_DIR'],
cfg['DATASETS']['BBOX_FILE'],
train_trans,
cfg['SOLVER']['STEPS_PER_EPOCH'] * cfg['SOLVER']['IMS_PER_BATCH'],
training=True)
valid_set = BOP_Dataset(
cfg['DATASETS']['VALID'],
cfg['DATASETS']['MESH_DIR'],
cfg['DATASETS']['BBOX_FILE'],
valid_trans,
training=False)
if cfg['MODEL']['BACKBONE'] == 'darknet53':
backbone = darknet53(pretrained=True)
else:
print("unsupported backbone!")
assert 0
model = PoseModule(cfg, backbone)
model = model.to(device)
start_epoch = 0
# https://discuss.pytorch.org/t/is-average-the-correct-way-for-the-gradient-in-distributeddataparallel-with-multi-nodes/34260/13
base_lr = cfg['SOLVER']['BASE_LR'] / cfg['RUNTIME']['N_GPU']
optimizer = optim.SGD(
model.parameters(),
lr=0, # the learning rate will be taken care by scheduler
momentum=0.9,
weight_decay=0.0001,
nesterov=True,
)
batch_size_per_gpu = int(cfg['SOLVER']['IMS_PER_BATCH'] / cfg['RUNTIME']['N_GPU'])
max_epoch = math.ceil(cfg['SOLVER']['MAX_ITER'] * cfg['SOLVER']['IMS_PER_BATCH'] / len(train_set))
scheduler_batch = WarmupScheduler(
optimizer, base_lr,
cfg['SOLVER']['MAX_ITER'], cfg['SOLVER']['SCHEDULER_POLICY'], cfg['SOLVER']['SCHEDULER_PARAMS'])
if cfg['RUNTIME']['DISTRIBUTED']:
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[cfg['RUNTIME']['LOCAL_RANK']],
output_device=cfg['RUNTIME']['LOCAL_RANK'],
broadcast_buffers=False,
)
model = model.module
# load weight and create working_dir dynamically
timestr = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
name_wo_ext = os.path.splitext(os.path.split(cfg['RUNTIME']['CONFIG_FILE'])[1])[0]
working_dir = 'working_dirs' + '/' + name_wo_ext + '/' + timestr + '/'
cfg['RUNTIME']['WORKING_DIR'] = working_dir
if os.path.exists(cfg['RUNTIME']['WEIGHT_FILE']):
try:
chkpt = torch.load(cfg['RUNTIME']['WEIGHT_FILE'], map_location='cpu') # load checkpoint
if 'model' in chkpt:
assert ('steps' in chkpt and 'optim' in chkpt)
scheduler_batch.step_multiple(chkpt['steps'])
start_epoch = int(chkpt['steps'] * cfg['SOLVER']['IMS_PER_BATCH'] / len(train_set))
model.load_state_dict(chkpt['model'])
optimizer.load_state_dict(chkpt['optim'])
# update working dir
cfg['RUNTIME']['WORKING_DIR'] = os.path.split(cfg['RUNTIME']['WEIGHT_FILE'])[0] + '/'
print('Weights and optimzer are loaded from ' + cfg['RUNTIME']['WEIGHT_FILE'])
else:
model.load_state_dict(chkpt)
print('Weights from are loaded from ' + cfg['RUNTIME']['WEIGHT_FILE'])
except:
pass
else:
pass
#
print("working directory: " + cfg['RUNTIME']['WORKING_DIR'])
if get_rank() == 0:
os.makedirs(cfg['RUNTIME']['WORKING_DIR'], exist_ok=True)
logger = SummaryWriter(cfg['RUNTIME']['WORKING_DIR'])
# compute model size
total_params_count = sum(p.numel() for p in model.parameters())
print("Model size: %d parameters" % total_params_count)
train_loader = DataLoader(
train_set,
batch_size=batch_size_per_gpu,
sampler=data_sampler(train_set, shuffle=True, distributed=cfg['RUNTIME']['DISTRIBUTED']),
num_workers=cfg['RUNTIME']['NUM_WORKERS'],
collate_fn=collate_fn(cfg['INPUT']['SIZE_DIVISIBLE']),
)
valid_loader = DataLoader(
valid_set,
batch_size=batch_size_per_gpu,
sampler=data_sampler(valid_set, shuffle=False, distributed=cfg['RUNTIME']['DISTRIBUTED']),
num_workers=cfg['RUNTIME']['NUM_WORKERS'],
collate_fn=collate_fn(cfg['INPUT']['SIZE_DIVISIBLE']),
)
# write cfg to working_dir
with open(cfg['RUNTIME']['WORKING_DIR'] + 'cfg.json', 'w') as f:
json.dump(cfg, f, indent=4, sort_keys=True)
for epoch in range(start_epoch, max_epoch):
train(cfg, epoch, max_epoch, train_loader, model, optimizer, scheduler_batch, device, logger=logger)
valid(cfg, epoch, valid_loader, model, device, logger=logger)
if get_rank() == 0:
torch.save({
'steps': (epoch + 1) * int(len(train_set) / cfg['SOLVER']['IMS_PER_BATCH']),
'model': model.state_dict(),
'optim': optimizer.state_dict(),
},
cfg['RUNTIME']['WORKING_DIR'] + 'latest.pth',
)
if epoch == (max_epoch - 1):
torch.save(model.state_dict(), cfg['RUNTIME']['WORKING_DIR'] + 'final.pth')
# output final info
if get_rank() == 0:
timestr = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
commandstr = ' '.join([str(elem) for elem in sys.argv])
final_msg = ("finished at: %s\nworking_dir: %s\ncommands:%s" % (
timestr, cfg['RUNTIME']['WORKING_DIR'], commandstr))
with open(cfg['RUNTIME']['WORKING_DIR'] + 'info.txt', 'w') as f:
f.write(final_msg)
print(final_msg)
|
[
"evaluate.evaluate"
] |
[((781, 801), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (798, 801), False, 'import torch\n'), ((802, 819), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (816, 819), True, 'import numpy as np\n'), ((1800, 1815), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1813, 1815), False, 'import torch\n'), ((1661, 1677), 'distributed.all_gather', 'all_gather', (['data'], {}), '(data)\n', (1671, 1677), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((1879, 1903), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1901, 1903), False, 'import torch\n'), ((2101, 2145), 'utils.load_bop_meshes', 'load_bop_meshes', (["cfg['DATASETS']['MESH_DIR']"], {}), "(cfg['DATASETS']['MESH_DIR'])\n", (2116, 2145), False, 'from utils import load_bop_meshes, visualize_pred, print_accuracy_per_class\n'), ((3502, 3522), 'evaluate.evaluate', 'evaluate', (['cfg', 'preds'], {}), '(cfg, preds)\n', (3510, 3522), False, 'from evaluate import evaluate\n'), ((3528, 3600), 'utils.print_accuracy_per_class', 'print_accuracy_per_class', (['accuracy_adi_per_class', 'accuracy_rep_per_class'], {}), '(accuracy_adi_per_class, accuracy_rep_per_class)\n', (3552, 3600), False, 'from utils import load_bop_meshes, visualize_pred, print_accuracy_per_class\n'), ((7280, 7290), 'argument.get_args', 'get_args', ([], {}), '()\n', (7288, 7290), False, 'from argument import get_args\n'), ((8880, 9080), 'dataset.BOP_Dataset', 'BOP_Dataset', (["cfg['DATASETS']['TRAIN']", "cfg['DATASETS']['MESH_DIR']", "cfg['DATASETS']['BBOX_FILE']", 'train_trans', "(cfg['SOLVER']['STEPS_PER_EPOCH'] * cfg['SOLVER']['IMS_PER_BATCH'])"], {'training': '(True)'}), "(cfg['DATASETS']['TRAIN'], cfg['DATASETS']['MESH_DIR'], cfg[\n 'DATASETS']['BBOX_FILE'], train_trans, cfg['SOLVER']['STEPS_PER_EPOCH'] *\n cfg['SOLVER']['IMS_PER_BATCH'], training=True)\n", (8891, 9080), False, 'from dataset import BOP_Dataset, collate_fn\n'), ((9137, 9267), 'dataset.BOP_Dataset', 'BOP_Dataset', (["cfg['DATASETS']['VALID']", "cfg['DATASETS']['MESH_DIR']", "cfg['DATASETS']['BBOX_FILE']", 'valid_trans'], {'training': '(False)'}), "(cfg['DATASETS']['VALID'], cfg['DATASETS']['MESH_DIR'], cfg[\n 'DATASETS']['BBOX_FILE'], valid_trans, training=False)\n", (9148, 9267), False, 'from dataset import BOP_Dataset, collate_fn\n'), ((9477, 9502), 'model.PoseModule', 'PoseModule', (['cfg', 'backbone'], {}), '(cfg, backbone)\n', (9487, 9502), False, 'from model import PoseModule\n'), ((10167, 10304), 'scheduler.WarmupScheduler', 'WarmupScheduler', (['optimizer', 'base_lr', "cfg['SOLVER']['MAX_ITER']", "cfg['SOLVER']['SCHEDULER_POLICY']", "cfg['SOLVER']['SCHEDULER_PARAMS']"], {}), "(optimizer, base_lr, cfg['SOLVER']['MAX_ITER'], cfg['SOLVER'\n ]['SCHEDULER_POLICY'], cfg['SOLVER']['SCHEDULER_PARAMS'])\n", (10182, 10304), False, 'from scheduler import WarmupScheduler\n'), ((10960, 11005), 'os.path.exists', 'os.path.exists', (["cfg['RUNTIME']['WEIGHT_FILE']"], {}), "(cfg['RUNTIME']['WEIGHT_FILE'])\n", (10974, 11005), False, 'import os\n'), ((1686, 1696), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (1694, 1696), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((1930, 1940), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (1938, 1940), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((3345, 3355), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (3353, 3355), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((5344, 5354), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (5352, 5354), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((5992, 6019), 'distributed.reduce_loss_dict', 'reduce_loss_dict', (['loss_dict'], {}), '(loss_dict)\n', (6008, 6019), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((7072, 7116), 'distributed.DistributedSampler', 'DistributedSampler', (['dataset'], {'shuffle': 'shuffle'}), '(dataset, shuffle=shuffle)\n', (7090, 7116), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((7149, 7179), 'torch.utils.data.sampler.RandomSampler', 'sampler.RandomSampler', (['dataset'], {}), '(dataset)\n', (7170, 7179), False, 'from torch.utils.data import DataLoader, sampler\n'), ((7206, 7240), 'torch.utils.data.sampler.SequentialSampler', 'sampler.SequentialSampler', (['dataset'], {}), '(dataset)\n', (7231, 7240), False, 'from torch.utils.data import DataLoader, sampler\n'), ((7500, 7551), 'torch.cuda.set_device', 'torch.cuda.set_device', (["cfg['RUNTIME']['LOCAL_RANK']"], {}), "(cfg['RUNTIME']['LOCAL_RANK'])\n", (7521, 7551), False, 'import torch\n'), ((7560, 7634), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""gloo"""', 'init_method': '"""env://"""'}), "(backend='gloo', init_method='env://')\n", (7596, 7634), False, 'import torch\n'), ((7643, 7656), 'distributed.synchronize', 'synchronize', ([], {}), '()\n', (7654, 7656), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((9372, 9398), 'backbone.darknet53', 'darknet53', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9381, 9398), False, 'from backbone import darknet53\n'), ((10372, 10535), 'torch.nn.parallel.DistributedDataParallel', 'nn.parallel.DistributedDataParallel', (['model'], {'device_ids': "[cfg['RUNTIME']['LOCAL_RANK']]", 'output_device': "cfg['RUNTIME']['LOCAL_RANK']", 'broadcast_buffers': '(False)'}), "(model, device_ids=[cfg['RUNTIME'][\n 'LOCAL_RANK']], output_device=cfg['RUNTIME']['LOCAL_RANK'],\n broadcast_buffers=False)\n", (10407, 10535), False, 'from torch import nn, optim\n'), ((12010, 12020), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (12018, 12020), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((12035, 12092), 'os.makedirs', 'os.makedirs', (["cfg['RUNTIME']['WORKING_DIR']"], {'exist_ok': '(True)'}), "(cfg['RUNTIME']['WORKING_DIR'], exist_ok=True)\n", (12046, 12092), False, 'import os\n'), ((12110, 12154), 'tensorboardX.SummaryWriter', 'SummaryWriter', (["cfg['RUNTIME']['WORKING_DIR']"], {}), "(cfg['RUNTIME']['WORKING_DIR'])\n", (12123, 12154), False, 'from tensorboardX import SummaryWriter\n'), ((13034, 13077), 'json.dump', 'json.dump', (['cfg', 'f'], {'indent': '(4)', 'sort_keys': '(True)'}), '(cfg, f, indent=4, sort_keys=True)\n', (13043, 13077), False, 'import json\n'), ((13804, 13814), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (13812, 13814), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((2472, 2511), 'os.path.split', 'os.path.split', (["meta_infos[bIdx]['path']"], {}), "(meta_infos[bIdx]['path'])\n", (2485, 2511), False, 'import os\n'), ((2658, 2789), 'utils.visualize_pred', 'visualize_pred', (['images.tensors[bIdx]', 'targets[bIdx]', 'pred[bIdx]', "cfg['INPUT']['PIXEL_MEAN']", "cfg['INPUT']['PIXEL_STD']", 'meshes'], {}), "(images.tensors[bIdx], targets[bIdx], pred[bIdx], cfg['INPUT'\n ]['PIXEL_MEAN'], cfg['INPUT']['PIXEL_STD'], meshes)\n", (2672, 2789), False, 'from utils import load_bop_meshes, visualize_pred, print_accuracy_per_class\n'), ((2936, 3014), 'cv2.imwrite', 'cv2.imwrite', (["(cfg['RUNTIME']['WORKING_DIR'] + name_prefix + '_pred.png')", 'visImg'], {}), "(cfg['RUNTIME']['WORKING_DIR'] + name_prefix + '_pred.png', visImg)\n", (2947, 3014), False, 'import cv2\n'), ((3027, 3102), 'cv2.imwrite', 'cv2.imwrite', (["(cfg['RUNTIME']['WORKING_DIR'] + name_prefix + '_gt.png')", 'gtImg'], {}), "(cfg['RUNTIME']['WORKING_DIR'] + name_prefix + '_gt.png', gtImg)\n", (3038, 3102), False, 'import cv2\n'), ((6148, 6158), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (6156, 6158), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((7744, 7780), 'numpy.array', 'np.array', (["cfg['INPUT']['INTERNAL_K']"], {}), "(cfg['INPUT']['INTERNAL_K'])\n", (7752, 7780), True, 'import numpy as np\n'), ((7855, 7953), 'transform.Resize', 'transform.Resize', (["cfg['INPUT']['INTERNAL_WIDTH']", "cfg['INPUT']['INTERNAL_HEIGHT']", 'internal_K'], {}), "(cfg['INPUT']['INTERNAL_WIDTH'], cfg['INPUT'][\n 'INTERNAL_HEIGHT'], internal_K)\n", (7871, 7953), False, 'import transform\n'), ((7995, 8232), 'transform.RandomShiftScaleRotate', 'transform.RandomShiftScaleRotate', (["cfg['SOLVER']['AUGMENTATION_SHIFT']", "cfg['SOLVER']['AUGMENTATION_SCALE']", "cfg['SOLVER']['AUGMENTATION_ROTATION']", "cfg['INPUT']['INTERNAL_WIDTH']", "cfg['INPUT']['INTERNAL_HEIGHT']", 'internal_K'], {}), "(cfg['SOLVER']['AUGMENTATION_SHIFT'], cfg[\n 'SOLVER']['AUGMENTATION_SCALE'], cfg['SOLVER']['AUGMENTATION_ROTATION'],\n cfg['INPUT']['INTERNAL_WIDTH'], cfg['INPUT']['INTERNAL_HEIGHT'], internal_K\n )\n", (8027, 8232), False, 'import transform\n'), ((8329, 8403), 'transform.Normalize', 'transform.Normalize', (["cfg['INPUT']['PIXEL_MEAN']", "cfg['INPUT']['PIXEL_STD']"], {}), "(cfg['INPUT']['PIXEL_MEAN'], cfg['INPUT']['PIXEL_STD'])\n", (8348, 8403), False, 'import transform\n'), ((8450, 8470), 'transform.ToTensor', 'transform.ToTensor', ([], {}), '()\n', (8468, 8470), False, 'import transform\n'), ((8548, 8646), 'transform.Resize', 'transform.Resize', (["cfg['INPUT']['INTERNAL_WIDTH']", "cfg['INPUT']['INTERNAL_HEIGHT']", 'internal_K'], {}), "(cfg['INPUT']['INTERNAL_WIDTH'], cfg['INPUT'][\n 'INTERNAL_HEIGHT'], internal_K)\n", (8564, 8646), False, 'import transform\n'), ((8704, 8778), 'transform.Normalize', 'transform.Normalize', (["cfg['INPUT']['PIXEL_MEAN']", "cfg['INPUT']['PIXEL_STD']"], {}), "(cfg['INPUT']['PIXEL_MEAN'], cfg['INPUT']['PIXEL_STD'])\n", (8723, 8778), False, 'import transform\n'), ((8825, 8845), 'transform.ToTensor', 'transform.ToTensor', ([], {}), '()\n', (8843, 8845), False, 'import transform\n'), ((10729, 10740), 'time.time', 'time.time', ([], {}), '()\n', (10738, 10740), False, 'import time\n'), ((11040, 11101), 'torch.load', 'torch.load', (["cfg['RUNTIME']['WEIGHT_FILE']"], {'map_location': '"""cpu"""'}), "(cfg['RUNTIME']['WEIGHT_FILE'], map_location='cpu')\n", (11050, 11101), False, 'import torch\n'), ((12567, 12609), 'dataset.collate_fn', 'collate_fn', (["cfg['INPUT']['SIZE_DIVISIBLE']"], {}), "(cfg['INPUT']['SIZE_DIVISIBLE'])\n", (12577, 12609), False, 'from dataset import BOP_Dataset, collate_fn\n'), ((12875, 12917), 'dataset.collate_fn', 'collate_fn', (["cfg['INPUT']['SIZE_DIVISIBLE']"], {}), "(cfg['INPUT']['SIZE_DIVISIBLE'])\n", (12885, 12917), False, 'from dataset import BOP_Dataset, collate_fn\n'), ((13319, 13329), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (13327, 13329), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((2385, 2395), 'distributed.get_rank', 'get_rank', ([], {}), '()\n', (2393, 2395), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, DistributedSampler, all_gather\n'), ((10778, 10822), 'os.path.split', 'os.path.split', (["cfg['RUNTIME']['CONFIG_FILE']"], {}), "(cfg['RUNTIME']['CONFIG_FILE'])\n", (10791, 10822), False, 'import os\n'), ((13885, 13896), 'time.time', 'time.time', ([], {}), '()\n', (13894, 13896), False, 'import time\n'), ((2592, 2617), 'os.path.splitext', 'os.path.splitext', (['imgname'], {}), '(imgname)\n', (2608, 2617), False, 'import os\n'), ((11576, 11620), 'os.path.split', 'os.path.split', (["cfg['RUNTIME']['WEIGHT_FILE']"], {}), "(cfg['RUNTIME']['WEIGHT_FILE'])\n", (11589, 11620), False, 'import os\n')]
|
from data import dev_data, sample_num
from optimizer import *
from feature import Features, FeatureVector
from model import write_predictions
from evaluate import evaluate
def train(
data,
feature_names,
tagset,
epochs,
optimizer,
score_func=perceptron_score,
step_size=1,
):
"""
Trains the model on the data and returns the parameters
:param data: Array of dictionaries representing the data. One dictionary for each data point (as created by the
make_data_point function).
:param feature_names: Array of Strings. The list of feature names.
:param tagset: Array of Strings. The list of tags.
:param epochs: Int. The number of epochs to train
:return: FeatureVector. The learned parameters.
"""
parameters = FeatureVector({}) # creates a zero vector
gradient = get_gradient(
data, feature_names, tagset, parameters, score_func
)
def training_observer(epoch, parameters):
"""
Evaluates the parameters on the development data, and writes out the parameters to a 'model.iter'+epoch and
the predictions to 'ner.dev.out'+epoch.
:param epoch: int. The epoch
:param parameters: Feature Vector. The current parameters
:return: Double. F1 on the development data
"""
(_, _, f1) = evaluate(
dev_data, parameters, feature_names, tagset, score_func
)
return f1
# return the final parameters
return optimizer(
sample_num,
epochs,
gradient,
parameters,
training_observer,
step_size=step_size,
)
|
[
"evaluate.evaluate"
] |
[((786, 803), 'feature.FeatureVector', 'FeatureVector', (['{}'], {}), '({})\n', (799, 803), False, 'from feature import Features, FeatureVector\n'), ((1337, 1402), 'evaluate.evaluate', 'evaluate', (['dev_data', 'parameters', 'feature_names', 'tagset', 'score_func'], {}), '(dev_data, parameters, feature_names, tagset, score_func)\n', (1345, 1402), False, 'from evaluate import evaluate\n')]
|
from typing import Iterable, List
import pysam
from evaluate.classification import (
Classification,
RecallClassification,
PrecisionClassification,
)
class Classifier:
def __init__(self, sam: Iterable[pysam.AlignedSegment] = None, name: str = ""):
if sam is None:
sam = []
self.sam = sam
self.name = name
def classify(self) -> List[Classification]:
classifications = []
for record in self.sam:
classification = self.make_classification(record=record)
classifications.append(classification)
return classifications
def make_classification(self, record):
return Classification(record)
class RecallClassifier(Classifier):
def make_classification(self, record):
return RecallClassification(record)
class PrecisionClassifier(Classifier):
def make_classification(self, record):
return PrecisionClassification(record)
|
[
"evaluate.classification.RecallClassification",
"evaluate.classification.Classification",
"evaluate.classification.PrecisionClassification"
] |
[((682, 704), 'evaluate.classification.Classification', 'Classification', (['record'], {}), '(record)\n', (696, 704), False, 'from evaluate.classification import Classification, RecallClassification, PrecisionClassification\n'), ((801, 829), 'evaluate.classification.RecallClassification', 'RecallClassification', (['record'], {}), '(record)\n', (821, 829), False, 'from evaluate.classification import Classification, RecallClassification, PrecisionClassification\n'), ((929, 960), 'evaluate.classification.PrecisionClassification', 'PrecisionClassification', (['record'], {}), '(record)\n', (952, 960), False, 'from evaluate.classification import Classification, RecallClassification, PrecisionClassification\n')]
|
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import torch.nn.utils.prune as prune
import numpy as np
import gin
import sys
sys.path.append('../src')
from models import LeNetFC, LeNetConv, Conv2
from train import train
from evaluate import test
from prepare_data import load_mnist, load_cifar10
from pruning import PruneModel
@gin.configurable
def main(
model=LeNetFC,
dataset='mnist',
batch_size=64,
train_size=None,
test_batch_size=1000,
epochs=3,
lr=1.0,
gamma=0.7,
no_cuda=False,
rand_seed=42,
save_model=False,
):
"""
This is the main script which trains and tests the model
Args:
model (torch.nn.Module): which model to use for the experiment
dataset (str): which dataset to use for the experiment
batch_size (int): size of training mini-batch
train_size (int): size of train set, not necessary to specify with 'mnist'
test_batch_size (int): size of testing batch
epochs (int): num epochs
lr (float): learning rate
gamma (float): rate at which to adjust lr with scheduler
no_cuda (bool): cuda or not
rand_seed (int): random seed
save_model (bool): whether to save pytorch model
conv_layers (bool): whether to include convolutional layers in LeNet architecture or not
"""
# view model
print(model)
if dataset == 'mnist':
train_loader, val_loader, test_loader, use_cuda = load_mnist(batch_size, test_batch_size, no_cuda, rand_seed)
elif dataset == 'cifar10':
train_loader, val_loader, test_loader, use_cuda = load_cifar10(
batch_size, train_size, test_batch_size, no_cuda, rand_seed
)
print(len(train_loader.dataset))
# setup device, model, optimizer, and lr scheduler
device = torch.device('cuda' if use_cuda else 'cpu')
print('device:', device)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
# run the training loop
for epoch in range(1, epochs + 1):
stop, stopping_iteration = train(model, device, train_loader, val_loader, test_loader, optimizer, epoch)
scheduler.step()
# test after each epoch
test(model, device, test_loader)
if stop:
print('Stopped at overall iteration {}\n'.format(stopping_iteration + ((len(train_loader.dataset)/batch_size) * (epoch-1))))
break
if save_model:
torch.save(model.state_dict(), model.__class__.__name__ + '_' + dataset + ".pt")
print('\nPruning...\n')
prune_model = PruneModel(
model, batch_size, train_loader, val_loader, test_loader, optimizer, epochs, scheduler, device, pruning_rounds=7
)
prune_model.prune()
# # now predict w/ pruned network
# test(model, device, test_loader)
if __name__ == '__main__':
gin.parse_config_file('../config/mnist_config.gin')
main()
|
[
"evaluate.test"
] |
[((164, 189), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (179, 189), False, 'import sys\n'), ((1900, 1943), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (1912, 1943), False, 'import torch\n'), ((2072, 2115), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': '(1)', 'gamma': 'gamma'}), '(optimizer, step_size=1, gamma=gamma)\n', (2078, 2115), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((2727, 2855), 'pruning.PruneModel', 'PruneModel', (['model', 'batch_size', 'train_loader', 'val_loader', 'test_loader', 'optimizer', 'epochs', 'scheduler', 'device'], {'pruning_rounds': '(7)'}), '(model, batch_size, train_loader, val_loader, test_loader,\n optimizer, epochs, scheduler, device, pruning_rounds=7)\n', (2737, 2855), False, 'from pruning import PruneModel\n'), ((3001, 3052), 'gin.parse_config_file', 'gin.parse_config_file', (['"""../config/mnist_config.gin"""'], {}), "('../config/mnist_config.gin')\n", (3022, 3052), False, 'import gin\n'), ((1548, 1607), 'prepare_data.load_mnist', 'load_mnist', (['batch_size', 'test_batch_size', 'no_cuda', 'rand_seed'], {}), '(batch_size, test_batch_size, no_cuda, rand_seed)\n', (1558, 1607), False, 'from prepare_data import load_mnist, load_cifar10\n'), ((2219, 2296), 'train.train', 'train', (['model', 'device', 'train_loader', 'val_loader', 'test_loader', 'optimizer', 'epoch'], {}), '(model, device, train_loader, val_loader, test_loader, optimizer, epoch)\n', (2224, 2296), False, 'from train import train\n'), ((2364, 2396), 'evaluate.test', 'test', (['model', 'device', 'test_loader'], {}), '(model, device, test_loader)\n', (2368, 2396), False, 'from evaluate import test\n'), ((1697, 1770), 'prepare_data.load_cifar10', 'load_cifar10', (['batch_size', 'train_size', 'test_batch_size', 'no_cuda', 'rand_seed'], {}), '(batch_size, train_size, test_batch_size, no_cuda, rand_seed)\n', (1709, 1770), False, 'from prepare_data import load_mnist, load_cifar10\n')]
|
""" Evaluate the baselines on ROUGE"""
import json
import os
from os.path import join, exists
import argparse
from evaluate import eval_rouge
def main(dec_dir, ref_dir):
dec_pattern = r'test-(\d+).txt'
ref_pattern = 'test-#ID#.txt'
output = eval_rouge(dec_pattern, dec_dir, ref_pattern, ref_dir)
metric = 'rouge'
print(output)
with open(join(dec_dir, '{}.txt'.format(metric)), 'w') as f:
f.write(output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluate summarization models')
parser.add_argument('--decode_dir', action='store', required=True,
help='directory of decoded summaries')
parser.add_argument('--ref_dir', action='store', required=True,
help='directory of reference summaries')
args = parser.parse_args()
dec_dir = args.decode_dir
ref_dir = args.ref_dir
main(dec_dir, ref_dir)
|
[
"evaluate.eval_rouge"
] |
[((254, 308), 'evaluate.eval_rouge', 'eval_rouge', (['dec_pattern', 'dec_dir', 'ref_pattern', 'ref_dir'], {}), '(dec_pattern, dec_dir, ref_pattern, ref_dir)\n', (264, 308), False, 'from evaluate import eval_rouge\n'), ((479, 547), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate summarization models"""'}), "(description='Evaluate summarization models')\n", (502, 547), False, 'import argparse\n')]
|
import sys
import numpy as np
from os.path import join
from copy import deepcopy
import torch
from torch.nn.functional import softmax
from torch.nn.utils import clip_grad_norm_
from transformers import BertTokenizer, BertForQuestionAnswering
from utils import AdamW
from data import get_dataloader
from evaluate import f1_score, exact_match_score, metric_max_over_ground_truths
from datedur import DateDurationQA
np.random.seed(42)
torch.manual_seed(42)
norm_tokenizer = BertTokenizer.from_pretrained('/home/M10815022/Models/bert-wwm-ext')
def validate_dataset(model, split, tokenizer, device, topk=1, prefix=None):
assert split in ('dev', 'test')
dataloader = get_dataloader('bert', split, tokenizer, bwd=False, \
batch_size=1, num_workers=16, prefix=prefix)
em, f1, count = 0, 0, 0
ddqa = DateDurationQA(tokenizer, model, device)
for batch in dataloader:
passage, question, answer = batch[0]
preds, _ = ddqa.direct_predict(passage, question, topk=topk)
count += 1
if len(preds) > 0:
norm_preds_tokens = [norm_tokenizer.basic_tokenizer.tokenize(pred) for pred in preds]
norm_preds = [norm_tokenizer.convert_tokens_to_string(norm_pred_tokens) for norm_pred_tokens in norm_preds_tokens]
norm_answer_tokens = [norm_tokenizer.basic_tokenizer.tokenize(ans) for ans in answer]
norm_answer = [norm_tokenizer.convert_tokens_to_string(ans_tokens) for ans_tokens in norm_answer_tokens]
em += max(metric_max_over_ground_truths(exact_match_score, norm_pred, norm_answer) for norm_pred in norm_preds)
f1 += max(metric_max_over_ground_truths(f1_score, norm_pred, norm_answer) for norm_pred in norm_preds)
ddqa.tokenizer = None
ddqa.model = None
ddqa.device = None
del ddqa, dataloader
return em, f1, count
def validate(model, tokenizer, device, topk=1, prefix=None):
if prefix:
print('---- Validation results on %s dataset ----' % prefix)
# Valid set
val_em, val_f1, val_count = validate_dataset(model, 'dev', tokenizer, device, topk, prefix)
val_avg_em = 100 * val_em / val_count
val_avg_f1 = 100 * val_f1 / val_count
# Test set
test_em, test_f1, test_count = validate_dataset(model, 'test', tokenizer, device, topk, prefix)
test_avg_em = 100 * test_em / test_count
test_avg_f1 = 100 * test_f1 / test_count
print('%d-best | val_em=%.5f, val_f1=%.5f | test_em=%.5f, test_f1=%.5f' \
% (topk, val_avg_em, val_avg_f1, test_avg_em, test_avg_f1))
return val_avg_em
if __name__ == '__main__':
if len(sys.argv) != 4:
print('Usage: python3 train_bert.py cuda:<n> <model_path> <save_path>')
exit(1)
# Config
lr = 3e-5
batch_size = 4
accumulate_batch_size = 64
assert accumulate_batch_size % batch_size == 0
update_stepsize = accumulate_batch_size // batch_size
model_path = sys.argv[2]
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForQuestionAnswering.from_pretrained(model_path)
device = torch.device(sys.argv[1])
model.to(device)
optimizer = AdamW(model.parameters(), lr=lr)
optimizer.zero_grad()
step = 0
patience, best_val = 0, 0
best_state_dict = model.state_dict()
dataloader = get_dataloader('bert', 'train', tokenizer, batch_size=batch_size, num_workers=16)
n_step_per_epoch = len(dataloader)
n_step_per_validation = n_step_per_epoch // 20
print('%d steps per epoch.' % n_step_per_epoch)
print('%d steps per validation.' % n_step_per_validation)
print('Start training...')
while True:
for batch in dataloader:
input_ids, attention_mask, token_type_ids, start_positions, end_positions = batch
input_ids = input_ids.cuda(device=device)
attention_mask = attention_mask.cuda(device=device)
token_type_ids = token_type_ids.cuda(device=device)
start_positions = start_positions.cuda(device=device)
end_positions = end_positions.cuda(device=device)
model.train()
loss = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, \
start_positions=start_positions, end_positions=end_positions)[0]
loss.backward()
step += 1
print('step %d | Training...\r' % step, end='')
if step % update_stepsize == 0:
optimizer.step()
optimizer.zero_grad()
if step % n_step_per_validation == 0:
print("step %d | Validating..." % step)
val_f1 = validate(model, tokenizer, device, topk=1)
if val_f1 > best_val:
patience = 0
best_val = val_f1
best_state_dict = deepcopy(model.state_dict())
save_path = join(sys.argv[3], 'state_dict.pt')
torch.save(best_state_dict, save_path)
else:
patience += 1
if patience >= 40 or step >= 200000:
print('Finish training. Scoring 1-5 best results...')
save_path = join(sys.argv[3], 'state_dict.pt')
torch.save(best_state_dict, save_path)
model.load_state_dict(best_state_dict)
for k in range(1, 6):
validate(model, tokenizer, device, topk=k)
del model, dataloader
exit(0)
|
[
"evaluate.metric_max_over_ground_truths"
] |
[((417, 435), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (431, 435), True, 'import numpy as np\n'), ((436, 457), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (453, 457), False, 'import torch\n'), ((476, 544), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""/home/M10815022/Models/bert-wwm-ext"""'], {}), "('/home/M10815022/Models/bert-wwm-ext')\n", (505, 544), False, 'from transformers import BertTokenizer, BertForQuestionAnswering\n'), ((676, 776), 'data.get_dataloader', 'get_dataloader', (['"""bert"""', 'split', 'tokenizer'], {'bwd': '(False)', 'batch_size': '(1)', 'num_workers': '(16)', 'prefix': 'prefix'}), "('bert', split, tokenizer, bwd=False, batch_size=1,\n num_workers=16, prefix=prefix)\n", (690, 776), False, 'from data import get_dataloader\n'), ((843, 883), 'datedur.DateDurationQA', 'DateDurationQA', (['tokenizer', 'model', 'device'], {}), '(tokenizer, model, device)\n', (857, 883), False, 'from datedur import DateDurationQA\n'), ((3010, 3051), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['model_path'], {}), '(model_path)\n', (3039, 3051), False, 'from transformers import BertTokenizer, BertForQuestionAnswering\n'), ((3064, 3116), 'transformers.BertForQuestionAnswering.from_pretrained', 'BertForQuestionAnswering.from_pretrained', (['model_path'], {}), '(model_path)\n', (3104, 3116), False, 'from transformers import BertTokenizer, BertForQuestionAnswering\n'), ((3131, 3156), 'torch.device', 'torch.device', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3143, 3156), False, 'import torch\n'), ((3356, 3441), 'data.get_dataloader', 'get_dataloader', (['"""bert"""', '"""train"""', 'tokenizer'], {'batch_size': 'batch_size', 'num_workers': '(16)'}), "('bert', 'train', tokenizer, batch_size=batch_size,\n num_workers=16)\n", (3370, 3441), False, 'from data import get_dataloader\n'), ((5276, 5310), 'os.path.join', 'join', (['sys.argv[3]', '"""state_dict.pt"""'], {}), "(sys.argv[3], 'state_dict.pt')\n", (5280, 5310), False, 'from os.path import join\n'), ((5327, 5365), 'torch.save', 'torch.save', (['best_state_dict', 'save_path'], {}), '(best_state_dict, save_path)\n', (5337, 5365), False, 'import torch\n'), ((1537, 1609), 'evaluate.metric_max_over_ground_truths', 'metric_max_over_ground_truths', (['exact_match_score', 'norm_pred', 'norm_answer'], {}), '(exact_match_score, norm_pred, norm_answer)\n', (1566, 1609), False, 'from evaluate import f1_score, exact_match_score, metric_max_over_ground_truths\n'), ((1661, 1724), 'evaluate.metric_max_over_ground_truths', 'metric_max_over_ground_truths', (['f1_score', 'norm_pred', 'norm_answer'], {}), '(f1_score, norm_pred, norm_answer)\n', (1690, 1724), False, 'from evaluate import f1_score, exact_match_score, metric_max_over_ground_truths\n'), ((4978, 5012), 'os.path.join', 'join', (['sys.argv[3]', '"""state_dict.pt"""'], {}), "(sys.argv[3], 'state_dict.pt')\n", (4982, 5012), False, 'from os.path import join\n'), ((5033, 5071), 'torch.save', 'torch.save', (['best_state_dict', 'save_path'], {}), '(best_state_dict, save_path)\n', (5043, 5071), False, 'import torch\n')]
|
'''
* @author Waileinsamkeit
* @email <EMAIL>
* @create date 2020-08-07 15:52:06
* @modify date 2020-08-07 15:52:06
'''
import random
import pandas as pd
import time
import os
from tabulate import tabulate
from utils import extend_map,add_label_for_lstmcrf,save_model,flatten_lists,load_model
from models.hmm import HMM
from models.standard import HMM_standard
from models.bilstm_crf import BiLSTM_CRF_Model
from data import build_corpus
from datetime import datetime
from evaluate import Eval_unit,evaluate_entity_label,evaluate_single_label,evaluate_multiclass,unitstopd
from config import ModelPathConfig,ResultPathConfig
import torch
from evaluating import bert_test,_bilstm_crf_test
if torch.cuda.is_available():
torch.cuda.set_device(1)
cwd=os.getcwd()
def sample_print_test(word_list,tag_list,sample_num=5):
indices=random.sample(range(len(word_list)),sample_num)
print_word_list=[word_list[i] for i in indices]
print_tag_list=[tag_list[i] for i in indices]
for i in range(sample_num):
s=' '.join(print_word_list[i])
s+='\n'
s+=' '.join(print_tag_list[i])
print(s)
def bilstm_crf_test(if_train=False):
model_is_existed=os.path.exists(ModelPathConfig.bilstm_crf)
print("upload data!")
word_lists,tag_lists,word2id,tag2id=build_corpus("train")
test_word_lists,test_tag_lists,_,_=build_corpus("test")
labels=list(tag2id.keys())
dev_indices=random.sample(range(len(word_lists)),len(word_lists)//5)
train_indices=[i for i in range(len(word_lists)) if i not in dev_indices]
dev_word_lists=[ word_lists[ind] for ind in dev_indices]
dev_tag_lists=[tag_lists[ind] for ind in dev_indices]
train_word_lists=[word_lists[ind] for ind in train_indices]
train_tag_lists=[tag_lists[ind] for ind in train_indices]
test_word_lists,test_tag_lists=add_label_for_lstmcrf(test_word_lists,test_tag_lists,test=True)
bilstm_crf_word2id,bilstm_crf_tag2id=extend_map(word2id,tag2id,crf=True)
if if_train or not model_is_existed:
print('start to training')
train_word_lists,train_tag_lists=add_label_for_lstmcrf(train_word_lists,train_tag_lists,test=False)
dev_word_lists,dev_tag_lists=add_label_for_lstmcrf(dev_word_lists,dev_tag_lists,test=False)
# sample_print_test(train_word_lists,train_tag_lists)
start=datetime.now()
vocab_size=len(bilstm_crf_word2id)
out_size=len(tag2id)
bilstm_model=BiLSTM_CRF_Model(vocab_size,out_size,crf=True)
bilstm_model.train(train_word_lists,train_tag_lists,\
bilstm_crf_word2id,bilstm_crf_tag2id,dev_word_lists,dev_tag_lists)
deltatime=datetime.now()-start
print("Training is finished, {} second".format(deltatime.seconds))
save_model(bilstm_model,ModelPathConfig.bilstm_crf)
print("Save the model")
else:
print("load model")
bilstm_model=load_model(ModelPathConfig.bilstm_crf)
print("test the model")
pred_tag_lists,label_tag_lists,=bilstm_model.test(test_word_lists,test_tag_lists,bilstm_crf_word2id,bilstm_crf_tag2id)
units=evaluate_entity_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig.bilstm_crf_entity)
print(tabulate(df,headers='keys',tablefmt='psql'))
units=evaluate_single_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig.bilstm_crf_model)
print(tabulate(df,headers='keys',tablefmt='psql'))
def HMM_test(if_train=True):
model_is_existed=os.path.exists(ModelPathConfig.hmm)
print("upload data!")
word_lists,tag_lists,word2id,tag2id=build_corpus("train")
test_word_lists,test_tag_lists,_,_=build_corpus("test")
# word_lists,tag_lists,word2id,tag2id=build_corpus("train",data_dir=os.path.join(os.getcwd(),"data",'ResumeNER'))
# test_word_lists,test_tag_lists,_,_=build_corpus("test",data_dir=os.path.join(os.getcwd(),"data",'ResumeNER'))
hmm_model=HMM(len(tag2id),len(word2id))
if if_train or not model_is_existed:
print("start to training")
hmm_model.train(word_lists,tag_lists,word2id,tag2id)
print("save the model")
save_model(hmm_model,ModelPathConfig.hmm)
else:
print("load model")
hmm_model=load_model(ModelPathConfig.hmm)
pred_tag_lists=hmm_model.test(test_word_lists,_,word2id,tag2id)
label_tag_lists=test_tag_lists
units=evaluate_entity_label(pred_tag_lists,label_tag_lists,list(tag2id.keys()))
df=unitstopd(units)
df.to_csv(ResultPathConfig.hmm_entity)
print(tabulate(df,headers='keys',tablefmt='psql'))
units=evaluate_single_label(pred_tag_lists,label_tag_lists,list(tag2id.keys()))
df=unitstopd(units)
df.to_csv(ResultPathConfig.hmm_model)
print(tabulate(df,headers='keys',tablefmt='psql'))
def HMM_test_standard(if_train=True):
model_is_existed=os.path.exists(ModelPathConfig.hmm_standard)
print("upload data!")
word_lists,tag_lists,word2id,tag2id=build_corpus("train",data_dir=os.path.join(os.getcwd(),"data",'ResumeNER'))
test_word_lists,test_tag_lists,_,_=build_corpus("test",data_dir=os.path.join(os.getcwd(),"data",'ResumeNER'))
hmm_model=HMM_standard(len(tag2id),len(word2id))
if if_train or not model_is_existed:
print("start to training")
hmm_model.train(word_lists,tag_lists,word2id,tag2id)
print("save the model")
save_model(hmm_model,ModelPathConfig.hmm_standard)
else:
print("load model")
hmm_model=load_model(ModelPathConfig.hmm_standard)
pred_tag_lists=hmm_model.test(test_word_lists,word2id,tag2id)
label_tag_lists=test_tag_lists
units=evaluate_entity_label(pred_tag_lists,label_tag_lists,list(tag2id.keys()))
df=unitstopd(units)
df.to_csv(ResultPathConfig.hmm_entity_standard)
print(tabulate(df,headers='keys',tablefmt='psql'))
units=evaluate_single_label(pred_tag_lists,label_tag_lists,list(tag2id.keys()))
df=unitstopd(units)
df.to_csv(ResultPathConfig.hmm_model_standard)
print(tabulate(df,headers='keys',tablefmt='psql'))
if __name__=='__main__':
bilstm_crf_test(if_train=True)
# _bilstm_crf_test(if_train=True)
|
[
"evaluate.unitstopd",
"evaluate.evaluate_single_label",
"evaluate.evaluate_entity_label"
] |
[((701, 726), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (724, 726), False, 'import torch\n'), ((767, 778), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (776, 778), False, 'import os\n'), ((732, 756), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(1)'], {}), '(1)\n', (753, 756), False, 'import torch\n'), ((1202, 1244), 'os.path.exists', 'os.path.exists', (['ModelPathConfig.bilstm_crf'], {}), '(ModelPathConfig.bilstm_crf)\n', (1216, 1244), False, 'import os\n'), ((1312, 1333), 'data.build_corpus', 'build_corpus', (['"""train"""'], {}), "('train')\n", (1324, 1333), False, 'from data import build_corpus\n'), ((1373, 1393), 'data.build_corpus', 'build_corpus', (['"""test"""'], {}), "('test')\n", (1385, 1393), False, 'from data import build_corpus\n'), ((1857, 1922), 'utils.add_label_for_lstmcrf', 'add_label_for_lstmcrf', (['test_word_lists', 'test_tag_lists'], {'test': '(True)'}), '(test_word_lists, test_tag_lists, test=True)\n', (1878, 1922), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((1962, 1999), 'utils.extend_map', 'extend_map', (['word2id', 'tag2id'], {'crf': '(True)'}), '(word2id, tag2id, crf=True)\n', (1972, 1999), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((3132, 3194), 'evaluate.evaluate_entity_label', 'evaluate_entity_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (3153, 3194), False, 'from evaluate import Eval_unit, evaluate_entity_label, evaluate_single_label, evaluate_multiclass, unitstopd\n'), ((3200, 3216), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (3209, 3216), False, 'from evaluate import Eval_unit, evaluate_entity_label, evaluate_single_label, evaluate_multiclass, unitstopd\n'), ((3333, 3395), 'evaluate.evaluate_single_label', 'evaluate_single_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (3354, 3395), False, 'from evaluate import Eval_unit, evaluate_entity_label, evaluate_single_label, evaluate_multiclass, unitstopd\n'), ((3401, 3417), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (3410, 3417), False, 'from evaluate import Eval_unit, evaluate_entity_label, evaluate_single_label, evaluate_multiclass, unitstopd\n'), ((3574, 3609), 'os.path.exists', 'os.path.exists', (['ModelPathConfig.hmm'], {}), '(ModelPathConfig.hmm)\n', (3588, 3609), False, 'import os\n'), ((3677, 3698), 'data.build_corpus', 'build_corpus', (['"""train"""'], {}), "('train')\n", (3689, 3698), False, 'from data import build_corpus\n'), ((3738, 3758), 'data.build_corpus', 'build_corpus', (['"""test"""'], {}), "('test')\n", (3750, 3758), False, 'from data import build_corpus\n'), ((4541, 4557), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (4550, 4557), False, 'from evaluate import Eval_unit, evaluate_entity_label, evaluate_single_label, evaluate_multiclass, unitstopd\n'), ((4748, 4764), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (4757, 4764), False, 'from evaluate import Eval_unit, evaluate_entity_label, evaluate_single_label, evaluate_multiclass, unitstopd\n'), ((4923, 4967), 'os.path.exists', 'os.path.exists', (['ModelPathConfig.hmm_standard'], {}), '(ModelPathConfig.hmm_standard)\n', (4937, 4967), False, 'import os\n'), ((5799, 5815), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (5808, 5815), False, 'from evaluate import Eval_unit, evaluate_entity_label, evaluate_single_label, evaluate_multiclass, unitstopd\n'), ((6015, 6031), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (6024, 6031), False, 'from evaluate import Eval_unit, evaluate_entity_label, evaluate_single_label, evaluate_multiclass, unitstopd\n'), ((2120, 2188), 'utils.add_label_for_lstmcrf', 'add_label_for_lstmcrf', (['train_word_lists', 'train_tag_lists'], {'test': '(False)'}), '(train_word_lists, train_tag_lists, test=False)\n', (2141, 2188), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((2224, 2288), 'utils.add_label_for_lstmcrf', 'add_label_for_lstmcrf', (['dev_word_lists', 'dev_tag_lists'], {'test': '(False)'}), '(dev_word_lists, dev_tag_lists, test=False)\n', (2245, 2288), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((2365, 2379), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2377, 2379), False, 'from datetime import datetime\n'), ((2474, 2522), 'models.bilstm_crf.BiLSTM_CRF_Model', 'BiLSTM_CRF_Model', (['vocab_size', 'out_size'], {'crf': '(True)'}), '(vocab_size, out_size, crf=True)\n', (2490, 2522), False, 'from models.bilstm_crf import BiLSTM_CRF_Model\n'), ((2785, 2837), 'utils.save_model', 'save_model', (['bilstm_model', 'ModelPathConfig.bilstm_crf'], {}), '(bilstm_model, ModelPathConfig.bilstm_crf)\n', (2795, 2837), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((2928, 2966), 'utils.load_model', 'load_model', (['ModelPathConfig.bilstm_crf'], {}), '(ModelPathConfig.bilstm_crf)\n', (2938, 2966), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((3277, 3322), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (3285, 3322), False, 'from tabulate import tabulate\n'), ((3477, 3522), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (3485, 3522), False, 'from tabulate import tabulate\n'), ((4216, 4258), 'utils.save_model', 'save_model', (['hmm_model', 'ModelPathConfig.hmm'], {}), '(hmm_model, ModelPathConfig.hmm)\n', (4226, 4258), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((4314, 4345), 'utils.load_model', 'load_model', (['ModelPathConfig.hmm'], {}), '(ModelPathConfig.hmm)\n', (4324, 4345), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((4611, 4656), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (4619, 4656), False, 'from tabulate import tabulate\n'), ((4817, 4862), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (4825, 4862), False, 'from tabulate import tabulate\n'), ((5457, 5508), 'utils.save_model', 'save_model', (['hmm_model', 'ModelPathConfig.hmm_standard'], {}), '(hmm_model, ModelPathConfig.hmm_standard)\n', (5467, 5508), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((5564, 5604), 'utils.load_model', 'load_model', (['ModelPathConfig.hmm_standard'], {}), '(ModelPathConfig.hmm_standard)\n', (5574, 5604), False, 'from utils import extend_map, add_label_for_lstmcrf, save_model, flatten_lists, load_model\n'), ((5878, 5923), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (5886, 5923), False, 'from tabulate import tabulate\n'), ((6093, 6138), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (6101, 6138), False, 'from tabulate import tabulate\n'), ((2680, 2694), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2692, 2694), False, 'from datetime import datetime\n'), ((5078, 5089), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5087, 5089), False, 'import os\n'), ((5192, 5203), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5201, 5203), False, 'import os\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
last mod 7/2/18
usage for new detector:
first disable metrics check
min_sensor_prob to <<0
use simple scoreToProb
use the plots to figure out a good scoreToProb function
then you can run metrics check
current avg precisions:
.5 iou -- .855, .783, .774 --> .863, .834, .837
.7 iou -- .538, .597, .619
n ground truths: 2608 easy, 6890 moderate, 8307 hard?
monogrnet at .3 IoU:: .815, .72, .653
voxeljones at .3: .91, .79, .76
"""
import numpy as np
from sklearn.neighbors import KernelDensity
from sklearn.linear_model import LinearRegression
from config import grndstart, grndstep, grndlen
min_sensor_prob_to_report = .03
dataformat = '/home/m2/Data/kitti/estimates/detectionsBT630/{:02d}f{:04d}.npy'
#dataformat = '/home/m2/Data/kitti/estimates/detectionsMGR/{:02d}f{:04d}.npy'
#def scoreToProb(score): return score+30 ### default before you've checked performance
#def scoreToProb(score): # BT323
# score = 1/(1+np.exp(-.3*score+1))
# if score < .25: return 0
# return max(0, min(1, 0.10 - 1.05*score + 2.04*score*score))*.8 + .1
#def scoreToProb(score): # BT528
# return np.minimum(.2 + score*.09 - score*score*.003, 1)
#def scoreToProb(score): # BT620
# return np.maximum(np.minimum(.2 + .11*score - .0025*score*score, .95), .05)
#def scoreToProb(score): # BT630 pre 8/14/19
# out = np.where(score < -3, score*.0025 + .07,
# .33 + .11*score - .01*score*score)
# return np.maximum(np.minimum(out, .99), .01)
def scoreToProb(score): # BT630 post 8/14/19
out = np.where(score < -3, score*.0025 + .07, 1/(1+np.exp(1.-score*.82)))
return np.maximum(np.minimum(out, .99), .01)
def getMsmts(sceneidx, fileidx):
data = np.load(dataformat.format(sceneidx, fileidx))
if data.shape == (0,):
data = np.zeros((0,6))
data[data[:,2]>np.pi, 2] -= 2*np.pi
data[:,5] = scoreToProb(data[:,5])
data = data[data[:,5] > min_sensor_prob_to_report]
return data
def getMsmtsInTile(msmts, tilex, tiley):
xlo = (tilex + grndstart[0])*grndstep[0]
xhi = xlo + grndstep[0]
ylo = (tiley + grndstart[1])*grndstep[1]
yhi = ylo + grndstep[1]
intile = ((msmts[:,0] >= xlo) & (msmts[:,0] < xhi) &
(msmts[:,1] >= ylo) & (msmts[:,1] < yhi))
assert sum(intile) <= 2 # for this simulation
return msmts[intile].copy()
if __name__ == '__main__':
# analyze score distribution for true and false detections
from scipy.optimize import linear_sum_assignment
import matplotlib.pyplot as plt
from evaluate import MetricAvgPrec, soMetricIoU
from analyzeGT import readGroundTruthFileTracking
from trackinginfo import sceneranges
gt_files = '/home/m2/Data/kitti/tracking_gt/{:04d}.txt'
scene_idxs = list(range(10))
scoresmatch = []
scorescrop = []
scoresmiss = []
nmissed = 0
nmissedcrop = 0
metric = MetricAvgPrec()
for scene_idx in scene_idxs:
startfileidx, endfileidx = sceneranges[scene_idx]
with open(gt_files.format(scene_idx), 'r') as fd: gtfilestr = fd.read()
gt_all, gtdontcares = readGroundTruthFileTracking(gtfilestr, ('Car', 'Van'))
selfposT = None # isn't actually used
for fileidx in range(startfileidx, endfileidx):
gt = gt_all[fileidx]
gtscored = np.array([gtobj['scored'] for gtobj in gt])
gtboxes = np.array([gtobj['box'] for gtobj in gt])
gtdiff = np.array([gtobj['difficulty'] for gtobj in gt])
msmts = getMsmts(scene_idx, fileidx)
ngt = gtscored.shape[0]
nmsmts = msmts.shape[0]
matches = np.zeros((ngt, nmsmts))
for gtidx, msmtidx in np.ndindex(ngt, nmsmts):
gtbox = gtboxes[gtidx]
msmt = msmts[msmtidx]
#closeness = np.hypot(*(gtbox[:2]-msmt[:2])) * .4
#closeness += ((gtbox[2]-msmt[2]+np.pi)%(2*np.pi)-np.pi) * 1.
#closeness += np.hypot(*(gtbox[3:]-msmt[3:5])) * .3
#closeness -= 1
#closeness = np.hypot(*(gtbox[:2]-msmt[:2])) - 1.5
closeness = soMetricIoU(gtbox, msmt, cutoff=.1)
matches[gtidx, msmtidx] = min(closeness, 0)
matchesnonmiss = matches < 0
rowpairs, colpairs = linear_sum_assignment(matches)
msmtsmissed = np.ones(nmsmts, dtype=bool)
for rowidx, colidx in zip(rowpairs, colpairs):
nonmiss = matchesnonmiss[rowidx, colidx]
noncrop = gtscored[rowidx]
if nonmiss:
msmtsmissed[colidx] = False
if noncrop:
scoresmatch.append(msmts[colidx,5])
else:
scorescrop.append(msmts[colidx,5])
else:
nmissed += 1
if noncrop:
nmissedcrop += 1
for msmtidx in range(nmsmts):
if msmtsmissed[msmtidx]:
scoresmiss.append(msmts[msmtidx,5])
metric.add(gtboxes, gtscored, gtdiff, msmts[:,:5], msmts[:,5])
scoresmatch.sort()
scorescrop.sort()
scoresmiss.sort()
nmatches = len(scoresmatch)
nmisses = len(scoresmiss)
relmatches = float(nmatches) / (nmatches + nmisses)
allscores = scoresmatch + scorescrop + scoresmiss
minscore = np.percentile(allscores, .5)
maxscore = np.percentile(allscores, 99.5)
scorearray = np.linspace(minscore, maxscore, 100)
kd = KernelDensity(bandwidth = (maxscore-minscore)/50, kernel='gaussian')
scoreT = kd.fit(np.array(scoresmatch)[:,None]).score_samples(
scorearray[:,None])
scoreT = np.exp(scoreT) * relmatches
scoreF = kd.fit(np.array(scoresmiss)[:,None]).score_samples(
scorearray[:,None])
scoreF = np.exp(scoreF) * (1-relmatches)
ratio = scoreT / np.maximum(scoreT + scoreF, 1e-8)
# fit a quadratic model to the ratio of true to false
X = np.column_stack((scorearray, scorearray**2))
lm = LinearRegression(fit_intercept=True, normalize=True).fit(X, ratio)
coefs = (lm.intercept_, lm.coef_[0], lm.coef_[1])
print(coefs)
ests = coefs[0] + coefs[1]*scorearray + coefs[2]*scorearray**2
plt.plot(scorearray, ratio, 'b', scorearray, ests, 'g--')
avgprec = metric.calc()
|
[
"evaluate.soMetricIoU",
"evaluate.MetricAvgPrec"
] |
[((2937, 2952), 'evaluate.MetricAvgPrec', 'MetricAvgPrec', ([], {}), '()\n', (2950, 2952), False, 'from evaluate import MetricAvgPrec, soMetricIoU\n'), ((5510, 5539), 'numpy.percentile', 'np.percentile', (['allscores', '(0.5)'], {}), '(allscores, 0.5)\n', (5523, 5539), True, 'import numpy as np\n'), ((5554, 5584), 'numpy.percentile', 'np.percentile', (['allscores', '(99.5)'], {}), '(allscores, 99.5)\n', (5567, 5584), True, 'import numpy as np\n'), ((5602, 5638), 'numpy.linspace', 'np.linspace', (['minscore', 'maxscore', '(100)'], {}), '(minscore, maxscore, 100)\n', (5613, 5638), True, 'import numpy as np\n'), ((5648, 5718), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': '((maxscore - minscore) / 50)', 'kernel': '"""gaussian"""'}), "(bandwidth=(maxscore - minscore) / 50, kernel='gaussian')\n", (5661, 5718), False, 'from sklearn.neighbors import KernelDensity\n'), ((6143, 6189), 'numpy.column_stack', 'np.column_stack', (['(scorearray, scorearray ** 2)'], {}), '((scorearray, scorearray ** 2))\n', (6158, 6189), True, 'import numpy as np\n'), ((6406, 6463), 'matplotlib.pyplot.plot', 'plt.plot', (['scorearray', 'ratio', '"""b"""', 'scorearray', 'ests', '"""g--"""'], {}), "(scorearray, ratio, 'b', scorearray, ests, 'g--')\n", (6414, 6463), True, 'import matplotlib.pyplot as plt\n'), ((1682, 1703), 'numpy.minimum', 'np.minimum', (['out', '(0.99)'], {}), '(out, 0.99)\n', (1692, 1703), True, 'import numpy as np\n'), ((1842, 1858), 'numpy.zeros', 'np.zeros', (['(0, 6)'], {}), '((0, 6))\n', (1850, 1858), True, 'import numpy as np\n'), ((3168, 3222), 'analyzeGT.readGroundTruthFileTracking', 'readGroundTruthFileTracking', (['gtfilestr', "('Car', 'Van')"], {}), "(gtfilestr, ('Car', 'Van'))\n", (3195, 3222), False, 'from analyzeGT import readGroundTruthFileTracking\n'), ((5840, 5854), 'numpy.exp', 'np.exp', (['scoreT'], {}), '(scoreT)\n', (5846, 5854), True, 'import numpy as np\n'), ((5990, 6004), 'numpy.exp', 'np.exp', (['scoreF'], {}), '(scoreF)\n', (5996, 6004), True, 'import numpy as np\n'), ((6043, 6077), 'numpy.maximum', 'np.maximum', (['(scoreT + scoreF)', '(1e-08)'], {}), '(scoreT + scoreF, 1e-08)\n', (6053, 6077), True, 'import numpy as np\n'), ((3390, 3433), 'numpy.array', 'np.array', (["[gtobj['scored'] for gtobj in gt]"], {}), "([gtobj['scored'] for gtobj in gt])\n", (3398, 3433), True, 'import numpy as np\n'), ((3456, 3496), 'numpy.array', 'np.array', (["[gtobj['box'] for gtobj in gt]"], {}), "([gtobj['box'] for gtobj in gt])\n", (3464, 3496), True, 'import numpy as np\n'), ((3518, 3565), 'numpy.array', 'np.array', (["[gtobj['difficulty'] for gtobj in gt]"], {}), "([gtobj['difficulty'] for gtobj in gt])\n", (3526, 3565), True, 'import numpy as np\n'), ((3722, 3745), 'numpy.zeros', 'np.zeros', (['(ngt, nmsmts)'], {}), '((ngt, nmsmts))\n', (3730, 3745), True, 'import numpy as np\n'), ((3780, 3803), 'numpy.ndindex', 'np.ndindex', (['ngt', 'nmsmts'], {}), '(ngt, nmsmts)\n', (3790, 3803), True, 'import numpy as np\n'), ((4391, 4421), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['matches'], {}), '(matches)\n', (4412, 4421), False, 'from scipy.optimize import linear_sum_assignment\n'), ((4448, 4475), 'numpy.ones', 'np.ones', (['nmsmts'], {'dtype': 'bool'}), '(nmsmts, dtype=bool)\n', (4455, 4475), True, 'import numpy as np\n'), ((6197, 6249), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(True)', 'normalize': '(True)'}), '(fit_intercept=True, normalize=True)\n', (6213, 6249), False, 'from sklearn.linear_model import LinearRegression\n'), ((1637, 1663), 'numpy.exp', 'np.exp', (['(1.0 - score * 0.82)'], {}), '(1.0 - score * 0.82)\n', (1643, 1663), True, 'import numpy as np\n'), ((4221, 4257), 'evaluate.soMetricIoU', 'soMetricIoU', (['gtbox', 'msmt'], {'cutoff': '(0.1)'}), '(gtbox, msmt, cutoff=0.1)\n', (4232, 4257), False, 'from evaluate import MetricAvgPrec, soMetricIoU\n'), ((5737, 5758), 'numpy.array', 'np.array', (['scoresmatch'], {}), '(scoresmatch)\n', (5745, 5758), True, 'import numpy as np\n'), ((5888, 5908), 'numpy.array', 'np.array', (['scoresmiss'], {}), '(scoresmiss)\n', (5896, 5908), True, 'import numpy as np\n')]
|
import torch
import time
import numpy as np
from torch import nn, optim
import torch.utils.data as data
import torch.nn.utils.rnn as rnn_utils
from data_process import MyDataset
from evaluate import valid_evaluate, test_evaluate
def train_epoch(model, train_data, loss_weights, optimizer, epoch, config, padded_quotes, quote_lens, padded_exp, exp_lens):
start = time.time()
model.train()
print('Train Epoch: %d start!' % epoch)
avg_loss = 0.0
train_loader = data.DataLoader(train_data, collate_fn=train_data.my_collate, batch_size=config.batch_size, num_workers=0, shuffle=True)
for batch_idx, batch in enumerate(train_loader):
convs, conv_lens, conv_turn_lens, labels = batch[0], batch[1], batch[2], batch[3]
if torch.cuda.is_available() and config.use_gpu: # run in GPU
convs = convs.cuda()
labels = labels.cuda()
padded_quotes = padded_quotes.cuda()
predictions = model(convs, conv_lens, conv_turn_lens, padded_quotes, quote_lens, padded_exp, exp_lens, labels=labels)
if config.quote_query_loss_weight > 0 or config.hist_query_loss_weight > 0:
preds, query_reps, quote_reps, hist_reps = predictions[0], predictions[1], predictions[2], predictions[3]
loss = nn.CrossEntropyLoss()(preds, labels.long())
if batch_idx == 0:
loss_print_1 = loss.data
loss_print_2 = 0
loss_print_3 = 0
if config.quote_query_loss_weight > 0:
loss += config.quote_query_loss_weight * nn.MSELoss(reduction='sum')(query_reps, quote_reps) / config.batch_size
if batch_idx == 0:
loss_print_2 = (nn.MSELoss(reduction='sum')(query_reps, quote_reps) / config.batch_size).data
if batch_idx == 0:
print('loss, quote_query, hist_query', loss_print_1, loss_print_2, loss_print_3)
else:
loss = nn.CrossEntropyLoss()(predictions, labels.long())
avg_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss /= len(train_data)
end = time.time()
print('Train Epoch: %d done! Train avg_loss: %g! Using time: %.2f minutes!' % (epoch, avg_loss, (end - start) / 60))
return avg_loss
def define_att_weight(model, train_data, config):
model.eval()
print('Begin to find objective attention weights!')
train_loader = data.DataLoader(train_data, collate_fn=train_data.my_collate, batch_size=config.batch_size, num_workers=0)
for batch_idx, batch in enumerate(train_loader):
convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens, labels = batch[0], batch[1], batch[2], batch[3], batch[4], batch[5], batch[6]
if torch.cuda.is_available() and config.use_gpu: # run in GPU
convs = convs.cuda()
users = users.cuda()
labels = labels.cuda()
important_turn = torch.zeros_like(convs[:, :, 0]).float()
predictions, _ = model(convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens)
for t in range(max(conv_lens)-1):
have_masked = []
for c in range(len(convs)):
if conv_lens[c] - 1 > t:
convs[c, t] = torch.ones_like(convs[c, t])
have_masked.append(c)
new_predictions, _ = model(convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens)
current_importance = torch.ge(torch.abs(new_predictions-labels), torch.abs(predictions-labels)).long()
for c in have_masked:
important_turn[c, t] = current_importance[c]
for turn_weight in important_turn:
if torch.sum(turn_weight) == 0:
turn_weight = 1 - turn_weight
turn_weight /= torch.sum(turn_weight)
train_data.att_labels.append(turn_weight)
train_data.att = True
train_data.att_labels = rnn_utils.pad_sequence(train_data.att_labels, batch_first=True)
print('Finish finding objective attention weights!')
return train_data
def define_att_weight_idf(corp, config):
print('Begin to find objective attention weights based on IDF!')
global_idf = np.zeros(corp.wordNum)
for w in range(corp.wordNum):
global_idf[w] = float(corp.convNum) / (len(corp.global_word_record[w]) + 1)
global_idf = np.log10(global_idf)
for c in range(len(corp.convs)):
local_idf = np.ones(corp.wordNum)
current_turn_num = len(corp.convs[c])
for w in corp.local_word_record[c]:
local_idf[w] = float(current_turn_num) / (len(corp.local_word_record[c][w]) + 1)
local_idf = np.log10(local_idf)
current_att_weights = []
for turn in corp.convs[c]:
word_idf = [global_idf[w]+config.idf_tradeoff*local_idf[w] for w in turn[1]]
current_att_weights.append(sum(word_idf) / np.log(len(turn[1])+1))
current_att_weights = torch.Tensor(current_att_weights)
if torch.cuda.is_available() and config.use_gpu: # run in GPU
current_att_weights = current_att_weights.cuda()
current_att_weights /= torch.sum(current_att_weights)
corp.train_data.att_labels.append(current_att_weights)
corp.train_data.att = True
corp.train_data.att_labels = rnn_utils.pad_sequence(corp.train_data.att_labels, batch_first=True)
print('Finish finding objective attention weights!')
return corp.train_data
def train(corp, model, config):
train_optimizer = optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.l2_weight)
# train_data: conv_vecs, conv_lens, conv_turn_lens, my_labels
train_data = corp.train_data
padded_quotes = corp.padded_quotes
quote_lens = corp.quote_lens
padded_exp = corp.padded_exp
exp_lens = corp.exp_lens
corp.test_corpus(config.valid_file, mode='VALID')
valid_data = MyDataset(corp, config, 'VALID')
valid_loader = data.DataLoader(valid_data, collate_fn=valid_data.my_collate, batch_size=config.batch_size,
num_workers=0)
best_state = None
best_valid_thr = 0.0
best_valid_map = -1.0
best_valid_loss = 999999.99
no_improve = 0
for epoch in range(config.max_epoch):
train_epoch(model, train_data, config.loss_weights, train_optimizer, epoch, config, padded_quotes, quote_lens, padded_exp, exp_lens)
valid_map, valid_loss = valid_evaluate(model, valid_loader, config, padded_quotes, quote_lens, padded_exp, exp_lens)
if best_valid_map < valid_map or best_valid_loss > valid_loss:
no_improve = 0
best_state = model.state_dict()
if best_valid_map < valid_map:
best_valid_map = valid_map
print('New Best MAP Valid Result!!! Valid MAP: %g, Valid Loss: %g' % (valid_map, valid_loss))
if best_valid_loss > valid_loss:
best_valid_loss = valid_loss
print('New Best Loss Valid Result!!! Valid MAP: %g, Valid Loss: %g' % (valid_map, valid_loss))
else:
no_improve += 1
print(
'No improve! Current Valid MAP: %g, Best Valid AUC: %g; Current Valid Loss: %g, Best Valid Loss: %g' % (
valid_map, best_valid_map, valid_loss, best_valid_loss))
if no_improve == 5:
break
model.load_state_dict(best_state)
# Final step: Evaluate the model
corp.test_corpus(config.test_file, mode='TEST')
test_data = MyDataset(corp, config, 'TEST')
test_loader = data.DataLoader(test_data, collate_fn=test_data.my_collate, batch_size=config.batch_size, num_workers=0)
res = test_evaluate(model, test_loader, config, padded_quotes, quote_lens, padded_exp, exp_lens)
print('Result in test set: MAP %g, P@1 %g, P@3 %g, nDCG@5 %g,nDCG@10 %g' % (res[0], res[1], res[2], res[3], res[4]))
torch.save(model.state_dict(), config.path + 'map%.4f_p@1%.4f_best_seed%d.model' % (res[0], res[1], config.random_seed))
with open(config.path + 'map%.4f_p@1%.4f_best_seed%d.res' % (res[0], res[1], config.random_seed), 'w') as f:
f.write('MAP %g \t P@1 %g \t P@3 %g \t nDCG@5 %g\t nDCG@10 %g\n'% (res[0], res[1], res[2], res[3], res[4]))
f.write('\n\nParameters:\n')
for key in config.__dict__:
f.write('%s : %s\n' % (key, config.__dict__[key]))
|
[
"evaluate.test_evaluate",
"evaluate.valid_evaluate"
] |
[((368, 379), 'time.time', 'time.time', ([], {}), '()\n', (377, 379), False, 'import time\n'), ((480, 605), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_data'], {'collate_fn': 'train_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)', 'shuffle': '(True)'}), '(train_data, collate_fn=train_data.my_collate, batch_size=\n config.batch_size, num_workers=0, shuffle=True)\n', (495, 605), True, 'import torch.utils.data as data\n'), ((2154, 2165), 'time.time', 'time.time', ([], {}), '()\n', (2163, 2165), False, 'import time\n'), ((2451, 2562), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_data'], {'collate_fn': 'train_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)'}), '(train_data, collate_fn=train_data.my_collate, batch_size=\n config.batch_size, num_workers=0)\n', (2466, 2562), True, 'import torch.utils.data as data\n'), ((3967, 4030), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['train_data.att_labels'], {'batch_first': '(True)'}), '(train_data.att_labels, batch_first=True)\n', (3989, 4030), True, 'import torch.nn.utils.rnn as rnn_utils\n'), ((4239, 4261), 'numpy.zeros', 'np.zeros', (['corp.wordNum'], {}), '(corp.wordNum)\n', (4247, 4261), True, 'import numpy as np\n'), ((4397, 4417), 'numpy.log10', 'np.log10', (['global_idf'], {}), '(global_idf)\n', (4405, 4417), True, 'import numpy as np\n'), ((5341, 5409), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['corp.train_data.att_labels'], {'batch_first': '(True)'}), '(corp.train_data.att_labels, batch_first=True)\n', (5363, 5409), True, 'import torch.nn.utils.rnn as rnn_utils\n'), ((5930, 5962), 'data_process.MyDataset', 'MyDataset', (['corp', 'config', '"""VALID"""'], {}), "(corp, config, 'VALID')\n", (5939, 5962), False, 'from data_process import MyDataset\n'), ((5982, 6093), 'torch.utils.data.DataLoader', 'data.DataLoader', (['valid_data'], {'collate_fn': 'valid_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)'}), '(valid_data, collate_fn=valid_data.my_collate, batch_size=\n config.batch_size, num_workers=0)\n', (5997, 6093), True, 'import torch.utils.data as data\n'), ((7551, 7582), 'data_process.MyDataset', 'MyDataset', (['corp', 'config', '"""TEST"""'], {}), "(corp, config, 'TEST')\n", (7560, 7582), False, 'from data_process import MyDataset\n'), ((7601, 7710), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_data'], {'collate_fn': 'test_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)'}), '(test_data, collate_fn=test_data.my_collate, batch_size=\n config.batch_size, num_workers=0)\n', (7616, 7710), True, 'import torch.utils.data as data\n'), ((7716, 7810), 'evaluate.test_evaluate', 'test_evaluate', (['model', 'test_loader', 'config', 'padded_quotes', 'quote_lens', 'padded_exp', 'exp_lens'], {}), '(model, test_loader, config, padded_quotes, quote_lens,\n padded_exp, exp_lens)\n', (7729, 7810), False, 'from evaluate import valid_evaluate, test_evaluate\n'), ((4475, 4496), 'numpy.ones', 'np.ones', (['corp.wordNum'], {}), '(corp.wordNum)\n', (4482, 4496), True, 'import numpy as np\n'), ((4700, 4719), 'numpy.log10', 'np.log10', (['local_idf'], {}), '(local_idf)\n', (4708, 4719), True, 'import numpy as np\n'), ((4986, 5019), 'torch.Tensor', 'torch.Tensor', (['current_att_weights'], {}), '(current_att_weights)\n', (4998, 5019), False, 'import torch\n'), ((5183, 5213), 'torch.sum', 'torch.sum', (['current_att_weights'], {}), '(current_att_weights)\n', (5192, 5213), False, 'import torch\n'), ((6464, 6560), 'evaluate.valid_evaluate', 'valid_evaluate', (['model', 'valid_loader', 'config', 'padded_quotes', 'quote_lens', 'padded_exp', 'exp_lens'], {}), '(model, valid_loader, config, padded_quotes, quote_lens,\n padded_exp, exp_lens)\n', (6478, 6560), False, 'from evaluate import valid_evaluate, test_evaluate\n'), ((755, 780), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (778, 780), False, 'import torch\n'), ((2776, 2801), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2799, 2801), False, 'import torch\n'), ((3836, 3858), 'torch.sum', 'torch.sum', (['turn_weight'], {}), '(turn_weight)\n', (3845, 3858), False, 'import torch\n'), ((5031, 5056), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5054, 5056), False, 'import torch\n'), ((1279, 1300), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1298, 1300), False, 'from torch import nn, optim\n'), ((1951, 1972), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1970, 1972), False, 'from torch import nn, optim\n'), ((2962, 2994), 'torch.zeros_like', 'torch.zeros_like', (['convs[:, :, 0]'], {}), '(convs[:, :, 0])\n', (2978, 2994), False, 'import torch\n'), ((3734, 3756), 'torch.sum', 'torch.sum', (['turn_weight'], {}), '(turn_weight)\n', (3743, 3756), False, 'import torch\n'), ((3288, 3316), 'torch.ones_like', 'torch.ones_like', (['convs[c, t]'], {}), '(convs[c, t])\n', (3303, 3316), False, 'import torch\n'), ((3508, 3543), 'torch.abs', 'torch.abs', (['(new_predictions - labels)'], {}), '(new_predictions - labels)\n', (3517, 3543), False, 'import torch\n'), ((3543, 3574), 'torch.abs', 'torch.abs', (['(predictions - labels)'], {}), '(predictions - labels)\n', (3552, 3574), False, 'import torch\n'), ((1569, 1596), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (1579, 1596), False, 'from torch import nn, optim\n'), ((1712, 1739), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (1722, 1739), False, 'from torch import nn, optim\n')]
|
import torch
from torch.utils import data
from utils import config
from network.model import Model
from network import dataset
from evaluate import evaluate
from sklearn.metrics import recall_score
from focal_loss.focal_loss import FocalLoss
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
def train(model, train_dataloader, val_dataloader, epochs):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
writer = SummaryWriter(comment=f"LR_{config.LR}_BATCH_{config.BATCH_SIZE}")
# criterion = nn.BCELoss()
criterion = FocalLoss(alpha=2, gamma=5)
optimizer = torch.optim.Adam(model.parameters(), lr=config.LR)
train_loss_history = []
train_accuracy_history = []
recall_history = []
val_loss_history = []
val_accuracy_history = []
val_recall_history = []
val_max_score = 0.0
for epoch in range(1, epochs + 1):
train_loss = 0.0
train_accuracy = 0.0
y_preds = []
y_labels = []
for field, candidate, words, positions, masks, labels in tqdm(train_dataloader, desc="Epoch %s" % epoch):
field = field.to(device)
candidate = candidate.to(device)
words = words.to(device)
positions = positions.to(device)
masks = masks.to(device)
labels = labels.to(device)
outputs = model(field, candidate, words, positions, masks)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
preds = outputs.round()
y_preds.extend(list(preds.cpu().detach().numpy().reshape(1, -1)[0]))
y_labels.extend(list(labels.cpu().detach().numpy().reshape(1, -1)[0]))
train_accuracy += torch.sum(preds == labels).item()
train_loss += loss.item()
else:
val_accuracy, val_loss, val_recall = evaluate(model, val_dataloader, criterion)
train_loss = train_loss / train_dataloader.sampler.num_samples
train_accuracy = train_accuracy / train_dataloader.sampler.num_samples
recall = recall_score(y_labels, y_preds)
train_loss_history.append(train_loss)
train_accuracy_history.append(train_accuracy)
recall_history.append(recall)
val_loss_history.append(val_loss)
val_accuracy_history.append(val_accuracy)
val_recall_history.append(val_recall)
writer.add_scalar('Recall/train', recall, epoch)
writer.add_scalar('Loss/train', train_loss, epoch)
writer.add_scalar('Accuracy/train', train_accuracy, epoch)
writer.add_scalar('Recall/validation', val_recall, epoch)
writer.add_scalar('Loss/validation', val_loss, epoch)
writer.add_scalar('Accuracy/validation', val_accuracy, epoch)
if val_recall > val_max_score: # Saving the best model
print('saving model....')
val_max_score = val_recall
torch.save(model, 'output/model.pth')
print(f"Metrics for Epoch {epoch}: Loss:{round(train_loss, 4)} \
Recall: {round(recall, 4)} \
Validation Loss: {round(val_loss, 4)} \
Validation Recall: {round(val_recall, 4)}")
writer.flush()
writer.close()
return {
'training_loss': train_loss_history,
'training_accuracy': train_accuracy_history,
'training_recall': recall_history,
'validation_loss': val_loss_history,
'validation_accuracy': val_accuracy_history,
'validation_recall': recall_history
}
if __name__ == '__main__':
# split name must equal to split filename eg: for train.txt -> train
train_data = dataset.DocumentsDataset(split_name='train')
val_data = dataset.DocumentsDataset(split_name='val')
VOCAB_SIZE = len(train_data.vocab)
training_data = data.DataLoader(train_data, batch_size=config.BATCH_SIZE, shuffle=True)
validation_data = data.DataLoader(val_data, batch_size=config.BATCH_SIZE, shuffle=True)
relie = Model(VOCAB_SIZE, config.EMBEDDING_SIZE, config.NEIGHBOURS, config.HEADS)
# relie = torch.load('output/model.pth')
history = train(relie, training_data, validation_data, config.EPOCHS)
print(history)
|
[
"evaluate.evaluate"
] |
[((495, 561), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'comment': 'f"""LR_{config.LR}_BATCH_{config.BATCH_SIZE}"""'}), "(comment=f'LR_{config.LR}_BATCH_{config.BATCH_SIZE}')\n", (508, 561), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((609, 636), 'focal_loss.focal_loss.FocalLoss', 'FocalLoss', ([], {'alpha': '(2)', 'gamma': '(5)'}), '(alpha=2, gamma=5)\n', (618, 636), False, 'from focal_loss.focal_loss import FocalLoss\n'), ((3848, 3892), 'network.dataset.DocumentsDataset', 'dataset.DocumentsDataset', ([], {'split_name': '"""train"""'}), "(split_name='train')\n", (3872, 3892), False, 'from network import dataset\n'), ((3908, 3950), 'network.dataset.DocumentsDataset', 'dataset.DocumentsDataset', ([], {'split_name': '"""val"""'}), "(split_name='val')\n", (3932, 3950), False, 'from network import dataset\n'), ((4012, 4083), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_data'], {'batch_size': 'config.BATCH_SIZE', 'shuffle': '(True)'}), '(train_data, batch_size=config.BATCH_SIZE, shuffle=True)\n', (4027, 4083), False, 'from torch.utils import data\n'), ((4106, 4175), 'torch.utils.data.DataLoader', 'data.DataLoader', (['val_data'], {'batch_size': 'config.BATCH_SIZE', 'shuffle': '(True)'}), '(val_data, batch_size=config.BATCH_SIZE, shuffle=True)\n', (4121, 4175), False, 'from torch.utils import data\n'), ((4189, 4262), 'network.model.Model', 'Model', (['VOCAB_SIZE', 'config.EMBEDDING_SIZE', 'config.NEIGHBOURS', 'config.HEADS'], {}), '(VOCAB_SIZE, config.EMBEDDING_SIZE, config.NEIGHBOURS, config.HEADS)\n', (4194, 4262), False, 'from network.model import Model\n'), ((1100, 1147), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': "('Epoch %s' % epoch)"}), "(train_dataloader, desc='Epoch %s' % epoch)\n", (1104, 1147), False, 'from tqdm import tqdm\n'), ((415, 440), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (438, 440), False, 'import torch\n'), ((1968, 2010), 'evaluate.evaluate', 'evaluate', (['model', 'val_dataloader', 'criterion'], {}), '(model, val_dataloader, criterion)\n', (1976, 2010), False, 'from evaluate import evaluate\n'), ((2191, 2222), 'sklearn.metrics.recall_score', 'recall_score', (['y_labels', 'y_preds'], {}), '(y_labels, y_preds)\n', (2203, 2222), False, 'from sklearn.metrics import recall_score\n'), ((3098, 3135), 'torch.save', 'torch.save', (['model', '"""output/model.pth"""'], {}), "(model, 'output/model.pth')\n", (3108, 3135), False, 'import torch\n'), ((1832, 1858), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (1841, 1858), False, 'import torch\n')]
|
import utils
import logging
import torch
import torch.nn as nn
from optimization import BertAdam
from tqdm import trange
from evaluate import evaluate
from dataloader import get_dataloader
from utils.logger import save_result_to_csv_and_json
def train(model, config):
# Prepare optimizer 看不懂的东西,先不管
param_optimizer = list(model.named_parameters())
param_pre = [(n, p) for n, p in param_optimizer if 'bert' in n]
param_downstream = [(n, p) for n, p in param_optimizer if 'bert' not in n]
no_decay = ['bias', 'LayerNorm', 'layer_norm']
optimizer_grouped_parameters = [
# pretrain model param
{'params': [p for n, p in param_pre if not any(nd in n for nd in no_decay)],
'weight_decay': config.weight_decay_rate, 'lr': config.fin_tuning_lr
},
{'params': [p for n, p in param_pre if any(nd in n for nd in no_decay)],
'weight_decay': 0.0, 'lr': config.fin_tuning_lr
},
# downstream model
{'params': [p for n, p in param_downstream if not any(nd in n for nd in no_decay)],
'weight_decay': config.weight_decay_rate, 'lr': config.downs_en_lr
},
{'params': [p for n, p in param_downstream if any(nd in n for nd in no_decay)],
'weight_decay': 0.0, 'lr': config.downs_en_lr
}
]
model.to(config.device)
# dataset
train_data = get_dataloader(config, 'train') # 5019
test_data = get_dataloader(config, 'test') # 703
val_data = get_dataloader(config, 'val') # 500
# optimizer
num_steps = len(train_data) * config.args.epoch_num
optimizer = BertAdam(optimizer_grouped_parameters, warmup=config.warmup_prop, schedule="warmup_cosine",
t_total=num_steps)
best_f1 = 0.0
early_stop = 0
for epoch in range(1, config.args.epoch_num+1):
logging.info("Epoch {}/{}".format(epoch, config.args.epoch_num))
loss, tab, rel, ent, cor = _train_model(model, train_data, optimizer, config)
logging.info("Training Avg Loss: {:.6f}, tab({:.6f})*{}, rel({:.6f})*{}, ent({:.6f})*{}, cor({:.6f})*{}".format(
loss, tab, config.args.tab_loss,
rel, config.args.rel_loss,
ent, config.args.ent_loss,
cor, config.args.cor_loss))
f1, p, r = evaluate(model, val_data, config, eval_type='val')
if f1 > best_f1:
best_f1 = f1
early_stop = 0
logging.warning("Find new best F1 >>> ({:.4f})".format(best_f1))
else:
early_stop += 1
if early_stop >= config.early_stop:
logging.warning("Early stoping in epoch {} and the best F1 is ({:.6f})!!!!".format(epoch, best_f1))
break
logging.info("Final evaluate in 'test set':")
f1, p, r, items = evaluate(model, test_data, config, eval_type='test')
logging.warning("\n\n\nFinal Result: \nVal Best F1 Score is ({:.6f}), Test F1 Score is ({:.6f})".format(best_f1, f1))
save_result_to_csv_and_json(items, config.log_dir, "{}".format(int(f1*1e4)))
def _train_model(model, dataloader, optimizer, config):
""" Training model in one epoch."""
model.train()
loss_avg = utils.RunningAverage()
tab_loss_avg = utils.RunningAverage()
rel_loss_avg = utils.RunningAverage()
ent_loss_avg = utils.RunningAverage()
cor_loss_avg = utils.RunningAverage()
loss_func = nn.BCELoss(reduction='mean')
t = trange(len(dataloader), ascii=True)
for step, _ in enumerate(t):
batch = next(iter(dataloader))
batch = tuple(t.to(config.device) for t in batch)
ids, masks, table_tags, neg_masks, cor_tags, rel_tags, ent_tags = batch
table, cors, rels, ents = model(ids, masks) # bsz, L, L, |R|
if config.args.use_negative_mask:
table = table * neg_masks
tab_loss = loss_func(table, table_tags)
rel_loss = loss_func(rels, rel_tags)
ent_loss = loss_func(ents, ent_tags)
cor_loss = loss_func(cors, cor_tags)
loss = config.args.tab_loss * tab_loss \
+ config.args.rel_loss * rel_loss \
+ config.args.ent_loss * ent_loss \
+ config.args.cor_loss * cor_loss
loss.backward()
optimizer.step()
model.zero_grad()
loss_avg.update(loss.item())
tab_loss_avg.update(tab_loss.item())
rel_loss_avg.update(rel_loss.item())
ent_loss_avg.update(ent_loss.item())
cor_loss_avg.update(cor_loss.item())
t.set_postfix(loss='{:.6f}/{:.6f}'.format(loss_avg(), loss.item()))
return loss_avg(), tab_loss_avg(), rel_loss_avg(), ent_loss_avg(), cor_loss_avg()
|
[
"evaluate.evaluate"
] |
[((1377, 1408), 'dataloader.get_dataloader', 'get_dataloader', (['config', '"""train"""'], {}), "(config, 'train')\n", (1391, 1408), False, 'from dataloader import get_dataloader\n'), ((1435, 1465), 'dataloader.get_dataloader', 'get_dataloader', (['config', '"""test"""'], {}), "(config, 'test')\n", (1449, 1465), False, 'from dataloader import get_dataloader\n'), ((1492, 1521), 'dataloader.get_dataloader', 'get_dataloader', (['config', '"""val"""'], {}), "(config, 'val')\n", (1506, 1521), False, 'from dataloader import get_dataloader\n'), ((1624, 1739), 'optimization.BertAdam', 'BertAdam', (['optimizer_grouped_parameters'], {'warmup': 'config.warmup_prop', 'schedule': '"""warmup_cosine"""', 't_total': 'num_steps'}), "(optimizer_grouped_parameters, warmup=config.warmup_prop, schedule=\n 'warmup_cosine', t_total=num_steps)\n", (1632, 1739), False, 'from optimization import BertAdam\n'), ((2746, 2791), 'logging.info', 'logging.info', (['"""Final evaluate in \'test set\':"""'], {}), '("Final evaluate in \'test set\':")\n', (2758, 2791), False, 'import logging\n'), ((2814, 2866), 'evaluate.evaluate', 'evaluate', (['model', 'test_data', 'config'], {'eval_type': '"""test"""'}), "(model, test_data, config, eval_type='test')\n", (2822, 2866), False, 'from evaluate import evaluate\n'), ((3203, 3225), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (3223, 3225), False, 'import utils\n'), ((3245, 3267), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (3265, 3267), False, 'import utils\n'), ((3287, 3309), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (3307, 3309), False, 'import utils\n'), ((3329, 3351), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (3349, 3351), False, 'import utils\n'), ((3371, 3393), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (3391, 3393), False, 'import utils\n'), ((3410, 3438), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (3420, 3438), True, 'import torch.nn as nn\n'), ((2318, 2368), 'evaluate.evaluate', 'evaluate', (['model', 'val_data', 'config'], {'eval_type': '"""val"""'}), "(model, val_data, config, eval_type='val')\n", (2326, 2368), False, 'from evaluate import evaluate\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Version : Python 3.6
import os
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from transformers import WEIGHTS_NAME, CONFIG_NAME
from config import Config
from utils import RelationLoader, SemEvalDataLoader
from model import R_BERT
from evaluate import Eval
class Runner(object):
def __init__(self, id2rel, loader, user_config):
self.class_num = len(id2rel)
self.id2rel = id2rel
self.loader = loader
self.user_config = user_config
self.model = R_BERT(self.class_num, user_config)
self.model = self.model.to(user_config.device)
self.eval_tool = Eval(user_config)
def train(self):
train_loader, dev_loader, _ = self.loader
num_training_steps = len(train_loader) // self.user_config.\
gradient_accumulation_steps * self.user_config.epoch
num_warmup_steps = int(num_training_steps *
self.user_config.warmup_proportion)
bert_params = list(map(id, self.model.bert.parameters()))
rest_params = filter(lambda p: id(
p) not in bert_params, self.model.parameters())
optimizer_grouped_parameters = [
{'params': self.model.bert.parameters()},
{'params': rest_params, 'lr': self.user_config.other_lr},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.user_config.lr,
eps=self.user_config.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
)
print('--------------------------------------')
print('traning model parameters (except PLM layers):')
for name, param in self.model.named_parameters():
if id(param) in bert_params:
continue
if param.requires_grad:
print('%s : %s' % (name, str(param.data.shape)))
print('--------------------------------------')
print('start to train the model ...')
max_f1 = -float('inf')
for epoch in range(1, 1+self.user_config.epoch):
train_loss = 0.0
data_iterator = tqdm(train_loader, desc='Train')
for _, (data, label) in enumerate(data_iterator):
self.model.train()
data = data.to(self.user_config.device)
label = label.to(self.user_config.device)
optimizer.zero_grad()
loss, _ = self.model(data, label)
train_loss += loss.item()
loss.backward()
nn.utils.clip_grad_norm_(
self.model.parameters(),
max_norm=self.user_config.max_grad_norm
)
optimizer.step()
scheduler.step()
train_loss = train_loss / len(train_loader)
f1, dev_loss, _ = self.eval_tool.evaluate(self.model, dev_loader)
print('[%03d] train_loss: %.3f | dev_loss: %.3f | micro f1 on dev: %.4f'
% (epoch, train_loss, dev_loss, f1), end=' ')
if f1 > max_f1:
max_f1 = f1
model_to_save = self.model.module if hasattr(
self.model, 'module') else self.model
output_model_file = os.path.join(
self.user_config.model_dir, WEIGHTS_NAME)
output_config_file = os.path.join(
self.user_config.model_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.bert.config.to_json_file(output_config_file)
print('>>> save models!')
else:
print()
def test(self):
print('--------------------------------------')
print('start load model ...')
if not os.path.exists(self.user_config.model_dir):
raise Exception('no pre-trained model exists!')
state_dict = torch.load(
os.path.join(self.user_config.model_dir, WEIGHTS_NAME),
map_location=self.user_config.device
)
self.model.load_state_dict(state_dict)
print('--------------------------------------')
print('start test ...')
_, _, test_loader = self.loader
f1, test_loss, predict_label = self.eval_tool.evaluate(self.model, test_loader)
print('test_loss: %.3f | micro f1 on test: %.4f' % (test_loss, f1))
return predict_label
def print_result(predict_label, id2rel, start_idx=8001):
des_file = './eval/predicted_result.txt'
with open(des_file, 'w', encoding='utf-8') as fw:
for i in range(0, predict_label.shape[0]):
fw.write('{}\t{}\n'.format(
start_idx+i, id2rel[int(predict_label[i])]))
if __name__ == '__main__':
user_config = Config()
print('--------------------------------------')
print('some config:')
user_config.print_config()
print('--------------------------------------')
print('start to load data ...')
rel2id, id2rel, class_num = RelationLoader(user_config).get_relation()
loader = SemEvalDataLoader(rel2id, user_config)
train_loader, dev_loader, test_loader = None, None, None
if user_config.mode == 0: # train mode
train_loader = loader.get_train()
dev_loader = loader.get_dev()
test_loader = loader.get_test()
elif user_config.mode == 1:
test_loader = loader.get_test()
loader = [train_loader, dev_loader, test_loader]
print('finish!')
runner = Runner(id2rel, loader, user_config)
if user_config.mode == 0: # train mode
runner.train()
predict_label = runner.test()
elif user_config.mode == 1:
predict_label = runner.test()
else:
raise ValueError('invalid train mode!')
print_result(predict_label, id2rel)
|
[
"evaluate.Eval"
] |
[((5095, 5103), 'config.Config', 'Config', ([], {}), '()\n', (5101, 5103), False, 'from config import Config\n'), ((5390, 5428), 'utils.SemEvalDataLoader', 'SemEvalDataLoader', (['rel2id', 'user_config'], {}), '(rel2id, user_config)\n', (5407, 5428), False, 'from utils import RelationLoader, SemEvalDataLoader\n'), ((671, 706), 'model.R_BERT', 'R_BERT', (['self.class_num', 'user_config'], {}), '(self.class_num, user_config)\n', (677, 706), False, 'from model import R_BERT\n'), ((787, 804), 'evaluate.Eval', 'Eval', (['user_config'], {}), '(user_config)\n', (791, 804), False, 'from evaluate import Eval\n'), ((1497, 1596), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'self.user_config.lr', 'eps': 'self.user_config.adam_epsilon'}), '(optimizer_grouped_parameters, lr=self.user_config.lr, eps=self.\n user_config.adam_epsilon)\n', (1502, 1596), False, 'from transformers import AdamW\n'), ((1658, 1779), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'num_warmup_steps', 'num_training_steps': 'num_training_steps'}), '(optimizer, num_warmup_steps=\n num_warmup_steps, num_training_steps=num_training_steps)\n', (1689, 1779), False, 'from transformers import get_linear_schedule_with_warmup\n'), ((2416, 2448), 'tqdm.tqdm', 'tqdm', (['train_loader'], {'desc': '"""Train"""'}), "(train_loader, desc='Train')\n", (2420, 2448), False, 'from tqdm import tqdm\n'), ((4103, 4145), 'os.path.exists', 'os.path.exists', (['self.user_config.model_dir'], {}), '(self.user_config.model_dir)\n', (4117, 4145), False, 'import os\n'), ((4253, 4307), 'os.path.join', 'os.path.join', (['self.user_config.model_dir', 'WEIGHTS_NAME'], {}), '(self.user_config.model_dir, WEIGHTS_NAME)\n', (4265, 4307), False, 'import os\n'), ((5334, 5361), 'utils.RelationLoader', 'RelationLoader', (['user_config'], {}), '(user_config)\n', (5348, 5361), False, 'from utils import RelationLoader, SemEvalDataLoader\n'), ((3552, 3606), 'os.path.join', 'os.path.join', (['self.user_config.model_dir', 'WEIGHTS_NAME'], {}), '(self.user_config.model_dir, WEIGHTS_NAME)\n', (3564, 3606), False, 'import os\n'), ((3665, 3718), 'os.path.join', 'os.path.join', (['self.user_config.model_dir', 'CONFIG_NAME'], {}), '(self.user_config.model_dir, CONFIG_NAME)\n', (3677, 3718), False, 'import os\n')]
|
from __future__ import print_function
from __future__ import division
import threading
import os
import argparse
import time
import tensorflow as tf
import itertools
from atari_env import make_env, S_DIM, A_DIM
from net import Net
from worker import Worker
from utils import print_params_nums
from evaluate import Evaluate
def main(args):
if args.save_path is not None and not os.path.exists(args.save_path):
os.makedirs(args.save_path)
summary_writer = tf.summary.FileWriter(os.path.join(args.save_path, 'log'))
global_steps_counter = itertools.count() # thread-safe
global_net = Net(S_DIM, A_DIM, 'global', args)
num_workers = args.threads
workers = []
# create workers
for i in range(1, num_workers + 1):
worker_summary_writer = summary_writer if i == 0 else None
worker = Worker(i, make_env(args), global_steps_counter,
worker_summary_writer, args)
workers.append(worker)
saver = tf.train.Saver(max_to_keep=5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
if args.model_path is not None:
print('Loading model...\n')
ckpt = tf.train.get_checkpoint_state(args.model_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Initializing a new model...\n')
sess.run(tf.global_variables_initializer())
print_params_nums()
# Start work process for each worker in a seperated thread
worker_threads = []
for worker in workers:
t = threading.Thread(target=lambda: worker.run(sess, coord, saver))
t.start()
time.sleep(0.5)
worker_threads.append(t)
if args.eval_every > 0:
evaluator = Evaluate(
global_net, summary_writer, global_steps_counter, args)
evaluate_thread = threading.Thread(
target=lambda: evaluator.run(sess, coord))
evaluate_thread.start()
coord.join(worker_threads)
def args_parse():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path', default=None, type=str,
help='Whether to use a saved model. (*None|model path)')
parser.add_argument(
'--save_path', default='/tmp/a3c', type=str,
help='Path to save a model during training.')
parser.add_argument(
'--max_steps', default=int(1e8), type=int, help='Max training steps')
parser.add_argument(
'--start_time', default=None, type=str, help='Time to start training')
parser.add_argument(
'--threads', default=16, type=int,
help='Numbers of parallel threads. [num_cpu_cores] by default')
# evaluate
parser.add_argument(
'--eval_every', default=500, type=int,
help='Evaluate the global policy every N seconds')
parser.add_argument(
'--record_video', default=True, type=bool,
help='Whether to save videos when evaluating')
parser.add_argument(
'--eval_episodes', default=5, type=int,
help='Numbers of episodes per evaluation')
# hyperparameters
parser.add_argument(
'--init_learning_rate', default=7e-4, type=float,
help='Learning rate of the optimizer')
parser.add_argument(
'--decay', default=0.99, type=float,
help='decay factor of the RMSProp optimizer')
parser.add_argument(
'--smooth', default=1e-7, type=float,
help='epsilon of the RMSProp optimizer')
parser.add_argument(
'--gamma', default=0.99, type=float,
help='Discout factor of reward and advantages')
parser.add_argument('--tmax', default=5, type=int, help='Rollout size')
parser.add_argument(
'--entropy_ratio', default=0.01, type=float,
help='Initial weight of entropy loss')
parser.add_argument(
'--clip_grads', default=40, type=float,
help='global norm gradients clipping')
parser.add_argument(
'--epsilon', default=1e-5, type=float,
help='epsilon of rmsprop optimizer')
return parser.parse_args()
if __name__ == '__main__':
# ignore warnings by tensorflow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# make GPU invisible
os.environ['CUDA_VISIBLE_DEVICES'] = ''
args = args_parse()
main(args)
|
[
"evaluate.Evaluate"
] |
[((561, 578), 'itertools.count', 'itertools.count', ([], {}), '()\n', (576, 578), False, 'import itertools\n'), ((612, 645), 'net.Net', 'Net', (['S_DIM', 'A_DIM', '"""global"""', 'args'], {}), "(S_DIM, A_DIM, 'global', args)\n", (615, 645), False, 'from net import Net\n'), ((985, 1014), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(5)'}), '(max_to_keep=5)\n', (999, 1014), True, 'import tensorflow as tf\n'), ((2085, 2110), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2108, 2110), False, 'import argparse\n'), ((425, 452), 'os.makedirs', 'os.makedirs', (['args.save_path'], {}), '(args.save_path)\n', (436, 452), False, 'import os\n'), ((497, 532), 'os.path.join', 'os.path.join', (['args.save_path', '"""log"""'], {}), "(args.save_path, 'log')\n", (509, 532), False, 'import os\n'), ((1025, 1037), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1035, 1037), True, 'import tensorflow as tf\n'), ((1063, 1085), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (1083, 1085), True, 'import tensorflow as tf\n'), ((1421, 1440), 'utils.print_params_nums', 'print_params_nums', ([], {}), '()\n', (1438, 1440), False, 'from utils import print_params_nums\n'), ((385, 415), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (399, 415), False, 'import os\n'), ((850, 864), 'atari_env.make_env', 'make_env', (['args'], {}), '(args)\n', (858, 864), False, 'from atari_env import make_env, S_DIM, A_DIM\n'), ((1185, 1231), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['args.model_path'], {}), '(args.model_path)\n', (1214, 1231), True, 'import tensorflow as tf\n'), ((1681, 1696), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1691, 1696), False, 'import time\n'), ((1791, 1855), 'evaluate.Evaluate', 'Evaluate', (['global_net', 'summary_writer', 'global_steps_counter', 'args'], {}), '(global_net, summary_writer, global_steps_counter, args)\n', (1799, 1855), False, 'from evaluate import Evaluate\n'), ((1378, 1411), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1409, 1411), True, 'import tensorflow as tf\n')]
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import copy
import argparse
import logging
import gc
import datetime
import pprint
from collections import OrderedDict, defaultdict
from functools import partial
from torch.optim import Adam
try:
from torch.utils.tensorboard import SummaryWriter
except BaseException as e:
from tensorboardX import SummaryWriter
from model import BMGFModel
from dataset import Dataset, Sampler
from evaluate import evaluate_accuracy, evaluate_precision_recall_f1
from util import *
INF = 1e30
_INF = -1e30
def eval_epoch(args, logger, writer, model, data_type, data_loader, device, epoch):
model.eval()
epoch_step = len(data_loader)
total_step = args.epochs * epoch_step
total_cnt = 0
total_ce = 0.0
total_mlce = 0.0
total_loss = 0.0
results = {"data": {"id": list(), "relation": list(), "prefered_relation": list()},
"prediction": {"prob": list(), "pred": list()},
"error": {"ce": list(), "mlce": list(), "mean_ce": INF, "mean_mlce": INF},
"evaluation": {"accuracy": dict(), "precision_recall_f1": dict()}}
with torch.no_grad():
for batch_id, batch in enumerate(data_loader):
step = epoch*epoch_step+batch_id
_id, arg1, arg1_mask, arg2, arg2_mask, relation, prefered_relation = batch
prefered_relation = (relation[:, 1] >= 0.5).long()
bsz = len(_id)
total_cnt += bsz
results["data"]["id"].extend(_id)
results["data"]["relation"].extend(relation)
results["data"]["prefered_relation"].extend(prefered_relation)
arg1 = arg1.to(device)
arg2 = arg2.to(device)
if arg1_mask is not None:
arg1_mask = arg1_mask.to(device)
if arg2_mask is not None:
arg2_mask = arg2_mask.to(device)
relation = relation.to(device)
prefered_relation = prefered_relation.to(device)
output = model(arg1, arg2, arg1_mask, arg2_mask)
logp = F.log_softmax(output, dim=-1)
prob = logp.exp()
results["prediction"]["prob"].extend(prob.cpu().detach())
results["prediction"]["pred"].extend(prob.cpu().argmax(dim=1).detach())
ce = F.nll_loss(logp, prefered_relation, reduction="none")
mlce = F.multilabel_soft_margin_loss(output, relation, reduction="none")
results["error"]["ce"].extend(ce.cpu().detach())
results["error"]["mlce"].extend(mlce.cpu().detach())
if args.loss == "ce":
loss = ce
elif args.loss == "mlce":
loss = mlce
else:
raise NotImplementedError("Error: loss=%s is not supported now." % (args.loss))
avg_ce = ce.mean()
avg_mlce = mlce.mean()
avg_loss = loss.mean()
total_ce += avg_ce.item() * bsz
total_mlce += avg_mlce.item() * bsz
total_loss += avg_loss.item() * bsz
if writer:
writer.add_scalar("%s/pdtb-loss" % (data_type), avg_loss.item(), step)
writer.add_scalar("%s/pdtb-ce" % (data_type), avg_ce.item(), step)
writer.add_scalar("%s/pdtb-mlce" % (data_type), avg_mlce.item(), step)
if logger and batch_id == epoch_step-1:
logger.info(
"epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}\tbatch: {:0>5d}/{:0>5d}".format(
epoch, args.epochs, data_type, batch_id, epoch_step) + "\n" +
"\tpdtb-loss: {:10.4f}\tpdtb-ce: {:10.4f}\tpdtb-mlce: {:10.4f}".format(
avg_loss.item(), avg_ce.item(), avg_mlce.item()) + "\n" +
"\tpdtb-gold: {}".format(results["data"]["relation"][-1]) + "\n" +
"\tpdtb-pred: {}".format(results["prediction"]["prob"][-1]))
mean_ce = total_ce / (total_cnt + 1e-6)
mean_mlce = total_mlce / (total_cnt + 1e-6)
mean_loss = total_loss / (total_cnt + 1e-6)
pred = np.array(results["prediction"]["pred"])
target = torch.cat(results["data"]["relation"], dim=0).view(total_cnt, -1).int().numpy()
prefered_target = np.array(results["data"]["prefered_relation"])
results["error"]["mean_ce"] = mean_ce
results["error"]["mean_mlce"] = mean_mlce
results["evaluation"]["accuracy"] = evaluate_accuracy(pred, target, prefered_target)
results["evaluation"]["precision_recall_f1"] = evaluate_precision_recall_f1(pred, target, prefered_target, "binary")
if writer:
writer.add_scalar("%s/pdtb-loss-epoch" % (data_type), mean_loss, epoch)
writer.add_scalar("%s/pdtb-ce-epoch" % (data_type), mean_ce, epoch)
writer.add_scalar("%s/pdtb-mlce-epoch" % (data_type), mean_mlce, epoch)
if logger:
logger.info(
"epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}".format(
epoch, args.epochs, data_type) + "\n" +
"\tpdtb-loss-epoch: {:10.4f}\tpdtb-ce-epoch: {:10.4f}\tpdtb-mlce-epoch: {:10.4f}".format(
mean_loss, mean_ce, mean_mlce) + "\n" +
"\tpdtb-accuray: {}".format(
pprint.pformat(results["evaluation"]["accuracy"]).replace("\n", "\n\t\t")) + "\n" +
"\tpdtb-precision_recall_f1: {}".format(
pprint.pformat(results["evaluation"]["precision_recall_f1"]).replace("\n", "\n\t\t")))
gc.collect()
return mean_loss, results
def train_epoch(args, logger, writer, model, optimizer, data_type, data_loader, device, epoch):
model.train()
epoch_step = len(data_loader)
total_step = args.epochs * epoch_step
total_cnt = 0
total_ce = 0.0
total_mlce = 0.0
total_loss = 0.0
results = {"data": {"id": list(), "relation": list(), "prefered_relation": list()},
"prediction": {"prob": list(), "pred": list()},
"error": {"ce": list(), "mlce": list(), "mean_ce": INF, "mean_mlce": INF},
"evaluation": {"accuracy": dict(), "precision_recall_f1": dict()}}
for batch_id, batch in enumerate(data_loader):
step = epoch*epoch_step+batch_id
_id, arg1, arg1_mask, arg2, arg2_mask, relation, prefered_relation = batch
prefered_relation = (relation[:, 1] >= 0.5).long()
bsz = len(_id)
total_cnt += bsz
results["data"]["id"].extend(_id)
results["data"]["relation"].extend(relation)
results["data"]["prefered_relation"].extend(prefered_relation)
arg1 = arg1.to(device)
arg2 = arg2.to(device)
if arg1_mask is not None:
arg1_mask = arg1_mask.to(device)
if arg2_mask is not None:
arg2_mask = arg2_mask.to(device)
relation = relation.to(device)
prefered_relation = prefered_relation.to(device)
output = model(arg1, arg2, arg1_mask, arg2_mask)
logp = F.log_softmax(output, dim=1)
prob = logp.exp()
results["prediction"]["prob"].extend(prob.cpu().detach())
results["prediction"]["pred"].extend(prob.cpu().argmax(dim=1).detach())
ce = F.nll_loss(logp, prefered_relation, reduction="none")
mlce = F.multilabel_soft_margin_loss(output, relation, reduction="none")
results["error"]["ce"].extend(ce.cpu().detach())
results["error"]["mlce"].extend(mlce.cpu().detach())
if args.loss == "ce":
loss = ce
elif args.loss == "mlce":
loss = mlce
else:
raise NotImplementedError("Error: loss=%s is not supported now." % (args.loss))
avg_ce = ce.mean()
avg_mlce = mlce.mean()
avg_loss = loss.mean()
total_ce += avg_ce.item() * bsz
total_mlce += avg_mlce.item() * bsz
total_loss += avg_loss.item() * bsz
avg_loss.backward()
if args.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if writer:
writer.add_scalar("%s/pdtb-loss" % (data_type), avg_loss.item(), step)
writer.add_scalar("%s/pdtb-ce" % (data_type), avg_ce.item(), step)
writer.add_scalar("%s/pdtb-mlce" % (data_type), avg_mlce.item(), step)
if logger and (batch_id%args.print_every == 0 or batch_id == epoch_step-1):
logger.info(
"epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}\tbatch: {:0>5d}/{:0>5d}".format(
epoch, args.epochs, data_type, batch_id, epoch_step) + "\n" +
"\tpdtb-loss: {:10.4f}\tpdtb-ce: {:10.4f}\tpdtb-mlce: {:10.4f}".format(
avg_loss.item(), avg_ce.item(), avg_mlce.item()) + "\n" +
"\tpdtb-gold: {}".format(results["data"]["relation"][-1]) + "\n" +
"\tpdtb-pred: {}".format(results["prediction"]["prob"][-1]))
mean_ce = total_ce / (total_cnt + 1e-6)
mean_mlce = total_mlce / (total_cnt + 1e-6)
mean_loss = total_loss / (total_cnt + 1e-6)
pred = np.array(results["prediction"]["pred"])
target = torch.cat(results["data"]["relation"], dim=0).view(total_cnt, -1).int().numpy()
prefered_relation = np.array(results["data"]["prefered_relation"])
results["error"]["mean_ce"] = mean_ce
results["error"]["mean_mlce"] = mean_mlce
results["evaluation"]["accuracy"] = evaluate_accuracy(pred, target, prefered_relation)
results["evaluation"]["precision_recall_f1"] = evaluate_precision_recall_f1(pred, target, prefered_relation, "binary")
if writer:
writer.add_scalar("%s/pdtb-loss-epoch" % (data_type), mean_loss, epoch)
writer.add_scalar("%s/pdtb-ce-epoch" % (data_type), mean_ce, epoch)
writer.add_scalar("%s/pdtb-mlce-epoch" % (data_type), mean_mlce, epoch)
if logger:
logger.info(
"epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}".format(
epoch, args.epochs, data_type) + "\n" +
"\tpdtb-loss-epoch: {:10.4f}\tpdtb-ce-epoch: {:10.4f}\tpdtb-mlce-epoch: {:10.4f}".format(
mean_loss, mean_ce, mean_mlce) + "\n" +
"\tpdtb-accuray: {}".format(
pprint.pformat(results["evaluation"]["accuracy"]).replace("\n", "\n\t\t")) + "\n" +
"\tpdtb-precision_recall_f1: {}".format(
pprint.pformat(results["evaluation"]["precision_recall_f1"]).replace("\n", "\n\t\t")))
gc.collect()
return mean_loss, results
def train(args, logger, writer):
# set device
if args.gpu_ids is None:
device = torch.device("cpu")
else:
if isinstance(args.gpu_ids, int):
args.gpu_ids = [args.gpu_ids]
device = torch.device("cuda:%d" % args.gpu_ids[0])
torch.cuda.set_device(device)
args.num_rels = 2 # for binary classification
if args.pretrained_model_path:
# load pretrained model
config = load_config(os.path.join(args.pretrained_model_path, "BMGFModel.config"))
for by in ["accf1", "f1", "accuracy", "loss"]:
best_epochs = get_best_epochs(os.path.join(args.pretrained_model_path, "BMGFModel.log"), by=by)
if len(best_epochs) > 0:
break
logger.info("retrieve the best epochs for BMGFModel: %s" % (best_epochs))
if len(best_epochs) > 0:
model = BMGFModel(**(config._asdict()))
if "test" in best_epochs:
model.load_state_dict(torch.load(
os.path.join(args.pretrained_model_path, "epoch%d.pt" % (best_epochs["test"])),
map_location=device))
elif "valid" in best_epochs:
model.load_state_dict(torch.load(
os.path.join(args.pretrained_model_path, "epoch%d.pt" % (best_epochs["valid"])),
map_location=device))
else:
model.load_state_dict(torch.load(
os.path.join(args.pretrained_model_path, "epoch%d.pt" % (best_epochs["train"])),
map_location=device))
if config.dropout != args.dropout:
change_dropout_rate(model, args.dropout)
else:
raise ValueError("Error: cannot load BMGFModel from %s." % (args.pretrained_model_path))
else:
# build model
model = BMGFModel(**vars(args))
model.set_finetune(args.finetune)
if args.gpu_ids and len(args.gpu_ids) > 1:
model = nn.DataParallel(model, device_ids=args.gpu_ids)
model = model.to(device)
logger.info(model)
logger.info("num of trainable parameters: %d" % (
sum(p.numel() for p in model.parameters() if p.requires_grad)))
# load data
datasets = OrderedDict({
"train": Dataset().load_pt(args.train_dataset_path),
"valid": Dataset().load_pt(args.valid_dataset_path),
"test": Dataset().load_pt(args.test_dataset_path)})
if args.explicit_dataset_path != "":
explicit_dataset = Dataset().load_pt(args.explicit_dataset_path)
datasets["train"].data.extend(explicit_dataset.data)
del explicit_dataset
logger.info("train:valid:test = %d:%d:%d" % (len(datasets["train"]), len(datasets["valid"]), len(datasets["test"])))
rel_map = defaultdict(int)
for r in args.relations:
for k in Dataset.rel_map_4.keys():
if k.startswith(r):
rel_map[k] = 1
assert len(rel_map) > 0
if args.encoder == "roberta":
pad_id = 1
else:
pad_id = 0
data_loaders = OrderedDict()
batchify = partial(Dataset.batchify,
rel_map=rel_map, min_arg=args.min_arg, max_arg=args.max_arg, pad_id=pad_id)
for data_type in datasets:
sampler = Sampler(datasets[data_type],
group_by=["arg1", "arg2"], batch_size=args.batch_size,
shuffle=data_type=="train", drop_last=False)
data_loaders[data_type] = data.DataLoader(datasets[data_type],
batch_sampler=sampler,
collate_fn=batchify,
pin_memory=data_type=="train")
# optimizer and losses
optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
optimizer.zero_grad()
best_losses = {dataset: INF for dataset in datasets}
best_loss_epochs = {dataset: -1 for dataset in datasets}
best_accs = {dataset: _INF for dataset in datasets}
best_acc_epochs = {dataset: -1 for dataset in datasets}
best_f1s = {dataset: _INF for dataset in datasets}
best_f1_epochs = {dataset: -1 for dataset in datasets}
best_accf1s = {dataset: _INF for dataset in datasets}
best_accf1_epochs = {dataset: -1 for dataset in datasets}
for epoch in range(args.epochs):
for data_type, data_loader in data_loaders.items():
if data_type == "train":
mean_loss, results = train_epoch(args, logger, writer,
model, optimizer, data_type, data_loader, device, epoch)
else:
mean_loss, results = eval_epoch(args, logger, writer,
model, data_type, data_loader, device, epoch)
save_results(results, os.path.join(args.save_model_dir, "%s_results%d.json" % (data_type, epoch)))
if mean_loss <= best_losses[data_type]:
best_losses[data_type] = mean_loss
best_loss_epochs[data_type] = epoch
logger.info("data_type: {:<5s}\tbest pdtb-loss: {:.4f} (epoch: {:0>3d})".format(
data_type, best_losses[data_type], best_loss_epochs[data_type]))
if args.save_best == "loss":
if args.gpu_ids and len(args.gpu_ids) > 1:
torch.save(model.module.state_dict(),
os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)),
_use_new_zipfile_serialization=False)
else:
torch.save(model.state_dict(),
os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)),
_use_new_zipfile_serialization=False)
if results["evaluation"]["accuracy"]["overall"] >= best_accs[data_type]:
best_accs[data_type] = results["evaluation"]["accuracy"]["overall"]
best_acc_epochs[data_type] = epoch
logger.info("data_type: {:<5s}\tbest pdtb-accuracy: {:.4f} (epoch: {:0>3d})".format(
data_type, best_accs[data_type], best_acc_epochs[data_type]))
if args.save_best == "acc":
if args.gpu_ids and len(args.gpu_ids) > 1:
torch.save(model.module.state_dict(),
os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)),
_use_new_zipfile_serialization=False)
else:
torch.save(model.state_dict(),
os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)),
_use_new_zipfile_serialization=False)
if results["evaluation"]["precision_recall_f1"]["overall"][-1] >= best_f1s[data_type]:
best_f1s[data_type] = results["evaluation"]["precision_recall_f1"]["overall"][-1]
best_f1_epochs[data_type] = epoch
logger.info("data_type: {:<5s}\tbest pdtb-f1: {:.4f} (epoch: {:0>3d})".format(
data_type, best_f1s[data_type], best_f1_epochs[data_type]))
if args.save_best == "f1":
if args.gpu_ids and len(args.gpu_ids) > 1:
torch.save(model.module.state_dict(),
os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)),
_use_new_zipfile_serialization=False)
else:
torch.save(model.state_dict(),
os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)),
_use_new_zipfile_serialization=False)
if results["evaluation"]["accuracy"]["overall"]+results["evaluation"]["precision_recall_f1"]["overall"][-1] >= best_accf1s[data_type]:
best_accf1s[data_type] = results["evaluation"]["accuracy"]["overall"]+results["evaluation"]["precision_recall_f1"]["overall"][-1]
best_accf1_epochs[data_type] = epoch
logger.info("data_type: {:<5s}\tbest pdtb-accf1: {:.4f} (epoch: {:0>3d})".format(
data_type, best_accf1s[data_type], best_accf1_epochs[data_type]))
if args.save_best == "accf1":
if args.gpu_ids and len(args.gpu_ids) > 1:
torch.save(model.module.state_dict(),
os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)),
_use_new_zipfile_serialization=False)
else:
torch.save(model.state_dict(),
os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)),
_use_new_zipfile_serialization=False)
for data_type in data_loaders:
logger.info("data_type: {:<5s}\tbest pdtb-loss: {:.4f} (epoch: {:0>3d})".format(
data_type, best_losses[data_type], best_loss_epochs[data_type]))
logger.info("data_type: {:<5s}\tbest pdtb-accuracy: {:.4f} (epoch: {:0>3d})".format(
data_type, best_accs[data_type], best_acc_epochs[data_type]))
logger.info("data_type: {:<5s}\tbest pdtb-f1: {:.4f} (epoch: {:0>3d})".format(
data_type, best_f1s[data_type], best_f1_epochs[data_type]))
logger.info("data_type: {:<5s}\tbest pdtb-accf1: {:.4f} (epoch: {:0>3d})".format(
data_type, best_accf1s[data_type], best_accf1_epochs[data_type]))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=0,
help="random seed")
parser.add_argument("--n_workers", type=int, default=1,
help="numer of processors")
# data config
parser.add_argument("--explicit_dataset_path", type=str, default="",
help="explicit Dataset path")
parser.add_argument("--train_dataset_path", type=str,
help="training Dataset path")
parser.add_argument("--valid_dataset_path", type=str,
help="validation Dataset path")
parser.add_argument("--test_dataset_path", type=str,
help="test Dataset path")
parser.add_argument("--pretrained_model_path", type=str, default="",
help="model path of pretrained BMGFModel")
parser.add_argument("--save_model_dir", type=str,
help="model dir to save models")
parser.add_argument("--relations", type=str2list, default="",
help="which relations are computed")
parser.add_argument("--min_arg", type=int, default=3,
help="the minimum length of arguments")
parser.add_argument("--max_arg", type=int, default=512,
help="the maximum length of arguments")
# training config
parser.add_argument("--gpu_ids", type=str2list, default=None,
help="gpu ids")
parser.add_argument("--epochs", type=int, default=50,
help="epochs of training")
parser.add_argument("--batch_size", type=int, default=32,
help="batch size of training")
parser.add_argument("--print_every", type=int, default=100,
help="printing log every K batchs")
parser.add_argument("--lr", type=float, default=0.001,
help="learning rate for the optimizer")
parser.add_argument("--weight_decay", type=float, default=0.0005,
help="weight decay")
parser.add_argument("--max_grad_norm", type=float, default=2.0,
help="max grad norm for gradient clipping")
parser.add_argument("--save_best", type=str, default="f1",
choices=["loss", "acc", "f1", "accf1"],
help="the criteria to save best models")
parser.add_argument("--loss", type=str, default="ce",
choices=["ce", "mlce"],
help="loss function")
# BMGFModel config
parser.add_argument("--encoder", type=str, default="roberta",
choices=["lstm", "bert", "roberta"],
help="the encoder")
parser.add_argument("--finetune", type=str, default="type",
choices=["none", "type", "last", "full"],
help="how to finetune the encoder")
parser.add_argument("--hidden_dim", type=int, default=128,
help="hidden dimension")
parser.add_argument("--num_lstm_layers", type=int, default=1,
help="number of lstm layers")
parser.add_argument("--num_perspectives", type=int, default=16,
help="number of perspectives for bimpm")
parser.add_argument("--num_filters", type=int, default=64,
help="number of filters for convolutional layers")
parser.add_argument("--activation", type=str, default="leaky_relu",
choices=["relu", "tanh", "softmax", "sigmoid", "leaky_relu", "prelu", "gelu"],
help="activation function type")
parser.add_argument("--dropout", type=float, default=0.2,
help="dropout for neural networks")
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
assert len(args.relations) > 0
ts = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
args.save_model_dir = os.path.join(args.save_model_dir, ts)
os.makedirs(args.save_model_dir, exist_ok=True)
# save config
save_config(args, os.path.join(args.save_model_dir, "BMGFModel.config"))
# build logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%Y/%m/%d %H:%M:%S')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
logfile = logging.FileHandler(os.path.join(args.save_model_dir, "BMGFModel.log"), 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
# build writer
writer = SummaryWriter(args.save_model_dir)
# train
train(args, logger, writer)
|
[
"evaluate.evaluate_precision_recall_f1",
"evaluate.evaluate_accuracy"
] |
[((5760, 5772), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5770, 5772), False, 'import gc\n'), ((9457, 9496), 'numpy.array', 'np.array', (["results['prediction']['pred']"], {}), "(results['prediction']['pred'])\n", (9465, 9496), True, 'import numpy as np\n'), ((9616, 9662), 'numpy.array', 'np.array', (["results['data']['prefered_relation']"], {}), "(results['data']['prefered_relation'])\n", (9624, 9662), True, 'import numpy as np\n'), ((9796, 9846), 'evaluate.evaluate_accuracy', 'evaluate_accuracy', (['pred', 'target', 'prefered_relation'], {}), '(pred, target, prefered_relation)\n', (9813, 9846), False, 'from evaluate import evaluate_accuracy, evaluate_precision_recall_f1\n'), ((9899, 9970), 'evaluate.evaluate_precision_recall_f1', 'evaluate_precision_recall_f1', (['pred', 'target', 'prefered_relation', '"""binary"""'], {}), "(pred, target, prefered_relation, 'binary')\n", (9927, 9970), False, 'from evaluate import evaluate_accuracy, evaluate_precision_recall_f1\n'), ((10874, 10886), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10884, 10886), False, 'import gc\n'), ((13750, 13766), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (13761, 13766), False, 'from collections import OrderedDict, defaultdict\n'), ((14041, 14054), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14052, 14054), False, 'from collections import OrderedDict, defaultdict\n'), ((14071, 14177), 'functools.partial', 'partial', (['Dataset.batchify'], {'rel_map': 'rel_map', 'min_arg': 'args.min_arg', 'max_arg': 'args.max_arg', 'pad_id': 'pad_id'}), '(Dataset.batchify, rel_map=rel_map, min_arg=args.min_arg, max_arg=\n args.max_arg, pad_id=pad_id)\n', (14078, 14177), False, 'from functools import partial\n'), ((20525, 20550), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (20548, 20550), False, 'import argparse\n'), ((24432, 24460), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (24449, 24460), False, 'import torch\n'), ((24466, 24491), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (24480, 24491), True, 'import numpy as np\n'), ((24623, 24660), 'os.path.join', 'os.path.join', (['args.save_model_dir', 'ts'], {}), '(args.save_model_dir, ts)\n', (24635, 24660), False, 'import os\n'), ((24666, 24713), 'os.makedirs', 'os.makedirs', (['args.save_model_dir'], {'exist_ok': '(True)'}), '(args.save_model_dir, exist_ok=True)\n', (24677, 24713), False, 'import os\n'), ((24849, 24868), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (24866, 24868), False, 'import logging\n'), ((24915, 24985), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: [ %(message)s ]"""', '"""%Y/%m/%d %H:%M:%S"""'], {}), "('%(asctime)s: [ %(message)s ]', '%Y/%m/%d %H:%M:%S')\n", (24932, 24985), False, 'import logging\n'), ((25001, 25024), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (25022, 25024), False, 'import logging\n'), ((25279, 25313), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['args.save_model_dir'], {}), '(args.save_model_dir)\n', (25292, 25313), False, 'from tensorboardX import SummaryWriter\n'), ((1238, 1253), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1251, 1253), False, 'import torch\n'), ((4287, 4326), 'numpy.array', 'np.array', (["results['prediction']['pred']"], {}), "(results['prediction']['pred'])\n", (4295, 4326), True, 'import numpy as np\n'), ((4452, 4498), 'numpy.array', 'np.array', (["results['data']['prefered_relation']"], {}), "(results['data']['prefered_relation'])\n", (4460, 4498), True, 'import numpy as np\n'), ((4644, 4692), 'evaluate.evaluate_accuracy', 'evaluate_accuracy', (['pred', 'target', 'prefered_target'], {}), '(pred, target, prefered_target)\n', (4661, 4692), False, 'from evaluate import evaluate_accuracy, evaluate_precision_recall_f1\n'), ((4749, 4818), 'evaluate.evaluate_precision_recall_f1', 'evaluate_precision_recall_f1', (['pred', 'target', 'prefered_target', '"""binary"""'], {}), "(pred, target, prefered_target, 'binary')\n", (4777, 4818), False, 'from evaluate import evaluate_accuracy, evaluate_precision_recall_f1\n'), ((7255, 7283), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (7268, 7283), True, 'import torch.nn.functional as F\n'), ((7485, 7538), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['logp', 'prefered_relation'], {'reduction': '"""none"""'}), "(logp, prefered_relation, reduction='none')\n", (7495, 7538), True, 'import torch.nn.functional as F\n'), ((7555, 7620), 'torch.nn.functional.multilabel_soft_margin_loss', 'F.multilabel_soft_margin_loss', (['output', 'relation'], {'reduction': '"""none"""'}), "(output, relation, reduction='none')\n", (7584, 7620), True, 'import torch.nn.functional as F\n'), ((11020, 11039), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (11032, 11039), False, 'import torch\n'), ((11155, 11196), 'torch.device', 'torch.device', (["('cuda:%d' % args.gpu_ids[0])"], {}), "('cuda:%d' % args.gpu_ids[0])\n", (11167, 11196), False, 'import torch\n'), ((11206, 11235), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (11227, 11235), False, 'import torch\n'), ((12935, 12982), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {'device_ids': 'args.gpu_ids'}), '(model, device_ids=args.gpu_ids)\n', (12950, 12982), True, 'import torch.nn as nn\n'), ((13815, 13839), 'dataset.Dataset.rel_map_4.keys', 'Dataset.rel_map_4.keys', ([], {}), '()\n', (13837, 13839), False, 'from dataset import Dataset, Sampler\n'), ((14233, 14368), 'dataset.Sampler', 'Sampler', (['datasets[data_type]'], {'group_by': "['arg1', 'arg2']", 'batch_size': 'args.batch_size', 'shuffle': "(data_type == 'train')", 'drop_last': '(False)'}), "(datasets[data_type], group_by=['arg1', 'arg2'], batch_size=args.\n batch_size, shuffle=data_type == 'train', drop_last=False)\n", (14240, 14368), False, 'from dataset import Dataset, Sampler\n'), ((14425, 14543), 'torch.utils.data.DataLoader', 'data.DataLoader', (['datasets[data_type]'], {'batch_sampler': 'sampler', 'collate_fn': 'batchify', 'pin_memory': "(data_type == 'train')"}), "(datasets[data_type], batch_sampler=sampler, collate_fn=\n batchify, pin_memory=data_type == 'train')\n", (14440, 14543), True, 'import torch.utils.data as data\n'), ((24758, 24811), 'os.path.join', 'os.path.join', (['args.save_model_dir', '"""BMGFModel.config"""'], {}), "(args.save_model_dir, 'BMGFModel.config')\n", (24770, 24811), False, 'import os\n'), ((25123, 25173), 'os.path.join', 'os.path.join', (['args.save_model_dir', '"""BMGFModel.log"""'], {}), "(args.save_model_dir, 'BMGFModel.log')\n", (25135, 25173), False, 'import os\n'), ((2192, 2221), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (2205, 2221), True, 'import torch.nn.functional as F\n'), ((2443, 2496), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['logp', 'prefered_relation'], {'reduction': '"""none"""'}), "(logp, prefered_relation, reduction='none')\n", (2453, 2496), True, 'import torch.nn.functional as F\n'), ((2517, 2582), 'torch.nn.functional.multilabel_soft_margin_loss', 'F.multilabel_soft_margin_loss', (['output', 'relation'], {'reduction': '"""none"""'}), "(output, relation, reduction='none')\n", (2546, 2582), True, 'import torch.nn.functional as F\n'), ((11388, 11448), 'os.path.join', 'os.path.join', (['args.pretrained_model_path', '"""BMGFModel.config"""'], {}), "(args.pretrained_model_path, 'BMGFModel.config')\n", (11400, 11448), False, 'import os\n'), ((24542, 24565), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24563, 24565), False, 'import datetime\n'), ((11549, 11606), 'os.path.join', 'os.path.join', (['args.pretrained_model_path', '"""BMGFModel.log"""'], {}), "(args.pretrained_model_path, 'BMGFModel.log')\n", (11561, 11606), False, 'import os\n'), ((13473, 13482), 'dataset.Dataset', 'Dataset', ([], {}), '()\n', (13480, 13482), False, 'from dataset import Dataset, Sampler\n'), ((13232, 13241), 'dataset.Dataset', 'Dataset', ([], {}), '()\n', (13239, 13241), False, 'from dataset import Dataset, Sampler\n'), ((13295, 13304), 'dataset.Dataset', 'Dataset', ([], {}), '()\n', (13302, 13304), False, 'from dataset import Dataset, Sampler\n'), ((13357, 13366), 'dataset.Dataset', 'Dataset', ([], {}), '()\n', (13364, 13366), False, 'from dataset import Dataset, Sampler\n'), ((15683, 15758), 'os.path.join', 'os.path.join', (['args.save_model_dir', "('%s_results%d.json' % (data_type, epoch))"], {}), "(args.save_model_dir, '%s_results%d.json' % (data_type, epoch))\n", (15695, 15758), False, 'import os\n'), ((11957, 12033), 'os.path.join', 'os.path.join', (['args.pretrained_model_path', "('epoch%d.pt' % best_epochs['test'])"], {}), "(args.pretrained_model_path, 'epoch%d.pt' % best_epochs['test'])\n", (11969, 12033), False, 'import os\n'), ((9511, 9556), 'torch.cat', 'torch.cat', (["results['data']['relation']"], {'dim': '(0)'}), "(results['data']['relation'], dim=0)\n", (9520, 9556), False, 'import torch\n'), ((10764, 10824), 'pprint.pformat', 'pprint.pformat', (["results['evaluation']['precision_recall_f1']"], {}), "(results['evaluation']['precision_recall_f1'])\n", (10778, 10824), False, 'import pprint\n'), ((12194, 12271), 'os.path.join', 'os.path.join', (['args.pretrained_model_path', "('epoch%d.pt' % best_epochs['valid'])"], {}), "(args.pretrained_model_path, 'epoch%d.pt' % best_epochs['valid'])\n", (12206, 12271), False, 'import os\n'), ((12409, 12486), 'os.path.join', 'os.path.join', (['args.pretrained_model_path', "('epoch%d.pt' % best_epochs['train'])"], {}), "(args.pretrained_model_path, 'epoch%d.pt' % best_epochs['train'])\n", (12421, 12486), False, 'import os\n'), ((16307, 16366), 'os.path.join', 'os.path.join', (['args.save_model_dir', "('%s_best.pt' % data_type)"], {}), "(args.save_model_dir, '%s_best.pt' % data_type)\n", (16319, 16366), False, 'import os\n'), ((16550, 16609), 'os.path.join', 'os.path.join', (['args.save_model_dir', "('%s_best.pt' % data_type)"], {}), "(args.save_model_dir, '%s_best.pt' % data_type)\n", (16562, 16609), False, 'import os\n'), ((17291, 17350), 'os.path.join', 'os.path.join', (['args.save_model_dir', "('%s_best.pt' % data_type)"], {}), "(args.save_model_dir, '%s_best.pt' % data_type)\n", (17303, 17350), False, 'import os\n'), ((17534, 17593), 'os.path.join', 'os.path.join', (['args.save_model_dir', "('%s_best.pt' % data_type)"], {}), "(args.save_model_dir, '%s_best.pt' % data_type)\n", (17546, 17593), False, 'import os\n'), ((18292, 18351), 'os.path.join', 'os.path.join', (['args.save_model_dir', "('%s_best.pt' % data_type)"], {}), "(args.save_model_dir, '%s_best.pt' % data_type)\n", (18304, 18351), False, 'import os\n'), ((18535, 18594), 'os.path.join', 'os.path.join', (['args.save_model_dir', "('%s_best.pt' % data_type)"], {}), "(args.save_model_dir, '%s_best.pt' % data_type)\n", (18547, 18594), False, 'import os\n'), ((19404, 19463), 'os.path.join', 'os.path.join', (['args.save_model_dir', "('%s_best.pt' % data_type)"], {}), "(args.save_model_dir, '%s_best.pt' % data_type)\n", (19416, 19463), False, 'import os\n'), ((19647, 19706), 'os.path.join', 'os.path.join', (['args.save_model_dir', "('%s_best.pt' % data_type)"], {}), "(args.save_model_dir, '%s_best.pt' % data_type)\n", (19659, 19706), False, 'import os\n'), ((4345, 4390), 'torch.cat', 'torch.cat', (["results['data']['relation']"], {'dim': '(0)'}), "(results['data']['relation'], dim=0)\n", (4354, 4390), False, 'import torch\n'), ((5668, 5728), 'pprint.pformat', 'pprint.pformat', (["results['evaluation']['precision_recall_f1']"], {}), "(results['evaluation']['precision_recall_f1'])\n", (5682, 5728), False, 'import pprint\n'), ((10609, 10658), 'pprint.pformat', 'pprint.pformat', (["results['evaluation']['accuracy']"], {}), "(results['evaluation']['accuracy'])\n", (10623, 10658), False, 'import pprint\n'), ((5505, 5554), 'pprint.pformat', 'pprint.pformat', (["results['evaluation']['accuracy']"], {}), "(results['evaluation']['accuracy'])\n", (5519, 5554), False, 'import pprint\n')]
|
#!/usr/bin/python
import sys,os
import argparse
import glob
from generate_windows import Generate_windows
from evaluate import Evaluate
from scanTranscriptome_forward import Scan_Forward
from scanTranscriptome_reverse import Scan_Backward
from postprocess import Postprocess
#
from multiprocessing import Pool
import datetime
#
def args():
parser = argparse.ArgumentParser()
parser.add_argument('--out_dir', default='out_dir', help='out dir')
parser.add_argument('--input_file', default=None, help='unstranded wig file')
parser.add_argument('--input_plus', default=None, help='plus strand wig file')
parser.add_argument('--input_minus', default=None, help='minus strand wig file')
parser.add_argument('--fa_file',default=None,help='path to one line fa file')
parser.add_argument('--keep_temp',default=None,help='if you want to keep temporary file, set to "yes"')
parser.add_argument('--window', default=201, type=int, help='input length')
parser.add_argument('--name', default='sample',help='sample name')
parser.add_argument("--model", help="the model weights file", required=True)
parser.add_argument("--RNASeqRCThreshold",default=0.05,type=float,help="RNA-Seq Coverage Threshold")
parser.add_argument('--threshold', default=0,type=int,help='peak length lower than threshold will be fiter out')
parser.add_argument('--penality', default=1,type=int,help='penality for prediction score lower than 0.5')
parser.add_argument('--DB_file', default=None, help='polyA database file')
parser.add_argument('--depth', default=1, type=float,help='total number of mapped reads( in millions)')
parser.add_argument('--t', default = 30, type = int, help='number of thread')
argv = parser.parse_args()
out_dir = argv.out_dir
input_file = argv.input_file
input_plus = argv.input_plus
input_minus = argv.input_minus
fa_file = argv.fa_file
keep_temp = argv.keep_temp
window = argv.window
name = argv.name
model = argv.model
rst = argv.RNASeqRCThreshold
threshold = argv.threshold
penality = argv.penality
DB_file = argv.DB_file
depth = argv.depth
thread = argv.t
return out_dir,input_file,input_plus,input_minus,fa_file,keep_temp,window,name,model,rst,threshold,penality,DB_file,depth,thread
def run_single_block(input_list):
data = input_list[0]
model = input_list[1]
out_dir = input_list[2]
rst = input_list[3]
window = input_list[4]
keep_temp = input_list[5]
threshold = input_list[6]
penality = input_list[7]
DB_file = input_list[8]
if 'wig' not in data:
baseName = data.split('/')[-1]
Evaluate(model,out_dir,rst,window,baseName,keep_temp)
Scan_Forward(baseName,threshold,penality,out_dir)
Scan_Backward(baseName,threshold,penality,out_dir)
if(keep_temp != 'yes'):
predict_file = out_dir+'/predict/'+baseName+'.txt'
os.system('rm %s'%predict_file)
Postprocess(DB_file,baseName,threshold,penality,out_dir)
if(keep_temp != 'yes'):
forward_file=out_dir+"/maxSum/%s.forward.%d.%d.txt"%(baseName,threshold,penality)
backward_file=out_dir+"/maxSum/%s.backward.%d.%d.txt"%(baseName,threshold,penality)
os.system('rm %s %s'%(forward_file,backward_file))
print('Finished postprocessing...\n')
def main(out_dir,input_file,input_plus,input_minus,fa_file,keep_temp,window,name,model,rst,threshold,penality,DB_file,depth,thread):
if(out_dir[-1] == '/'):
out_dir = out_dir[0:-1]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_dir = out_dir+'/'+name
####Generate sliding windlows
print("Generating blocks ...")
gw_start_time = datetime.datetime.now()
#Generate_windows(out_dir,input_file,input_plus,input_minus,fa_file,keep_temp,window,name,depth)
gw_end_time = datetime.datetime.now()
print("Gerate blocks used time: {}".format(gw_end_time - gw_start_time))
data_dir = out_dir+'/data'
data_files = glob.glob(data_dir+"/*")
block_input_list = []
for data in data_files:
block_input_list.append([data,model,out_dir,rst,window,keep_temp,threshold,penality,DB_file])
print("Predicting results ...")
pred_start_time = datetime.datetime.now()
with Pool(thread) as p:
p.map(run_single_block,block_input_list)
pred_end_time = datetime.datetime.now()
print("Prediction used time: {}".format(pred_end_time - pred_start_time))
out_file = '%s/%s.predicted.txt' %(out_dir,name)
ww = open(out_file,'w')
if(DB_file is not None):
ww.write('predicted_pasid\tdb_pasid\tdb_diff\tscore\n')
else:
ww.write('predicted_pasid\tscore\n')
ww.close()
os.system('cat %s/maxSum/*bidirection* >>%s'%(out_dir,out_file))
if(keep_temp != 'yes'):
os.system('rm -rf %s/data %s/predict %s/maxSum'%(out_dir,out_dir,out_dir))
print("Job Done!")
if __name__ == '__main__':
main(*args())
|
[
"evaluate.Evaluate"
] |
[((354, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (377, 379), False, 'import argparse\n'), ((3810, 3833), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3831, 3833), False, 'import datetime\n'), ((3953, 3976), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3974, 3976), False, 'import datetime\n'), ((4102, 4128), 'glob.glob', 'glob.glob', (["(data_dir + '/*')"], {}), "(data_dir + '/*')\n", (4111, 4128), False, 'import glob\n'), ((4341, 4364), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4362, 4364), False, 'import datetime\n'), ((4462, 4485), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4483, 4485), False, 'import datetime\n'), ((4813, 4880), 'os.system', 'os.system', (["('cat %s/maxSum/*bidirection* >>%s' % (out_dir, out_file))"], {}), "('cat %s/maxSum/*bidirection* >>%s' % (out_dir, out_file))\n", (4822, 4880), False, 'import sys, os\n'), ((2689, 2747), 'evaluate.Evaluate', 'Evaluate', (['model', 'out_dir', 'rst', 'window', 'baseName', 'keep_temp'], {}), '(model, out_dir, rst, window, baseName, keep_temp)\n', (2697, 2747), False, 'from evaluate import Evaluate\n'), ((2751, 2803), 'scanTranscriptome_forward.Scan_Forward', 'Scan_Forward', (['baseName', 'threshold', 'penality', 'out_dir'], {}), '(baseName, threshold, penality, out_dir)\n', (2763, 2803), False, 'from scanTranscriptome_forward import Scan_Forward\n'), ((2809, 2862), 'scanTranscriptome_reverse.Scan_Backward', 'Scan_Backward', (['baseName', 'threshold', 'penality', 'out_dir'], {}), '(baseName, threshold, penality, out_dir)\n', (2822, 2862), False, 'from scanTranscriptome_reverse import Scan_Backward\n'), ((3007, 3067), 'postprocess.Postprocess', 'Postprocess', (['DB_file', 'baseName', 'threshold', 'penality', 'out_dir'], {}), '(DB_file, baseName, threshold, penality, out_dir)\n', (3018, 3067), False, 'from postprocess import Postprocess\n'), ((3639, 3662), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (3653, 3662), False, 'import sys, os\n'), ((3672, 3692), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (3683, 3692), False, 'import sys, os\n'), ((4374, 4386), 'multiprocessing.Pool', 'Pool', (['thread'], {}), '(thread)\n', (4378, 4386), False, 'from multiprocessing import Pool\n'), ((4914, 4992), 'os.system', 'os.system', (["('rm -rf %s/data %s/predict %s/maxSum' % (out_dir, out_dir, out_dir))"], {}), "('rm -rf %s/data %s/predict %s/maxSum' % (out_dir, out_dir, out_dir))\n", (4923, 4992), False, 'import sys, os\n'), ((2967, 3000), 'os.system', 'os.system', (["('rm %s' % predict_file)"], {}), "('rm %s' % predict_file)\n", (2976, 3000), False, 'import sys, os\n'), ((3298, 3351), 'os.system', 'os.system', (["('rm %s %s' % (forward_file, backward_file))"], {}), "('rm %s %s' % (forward_file, backward_file))\n", (3307, 3351), False, 'import sys, os\n')]
|
#!/usr/bin/env python2
from __future__ import print_function
import math
import os
import random
import sys
from joblib import Parallel, delayed
import numpy as np
from tqdm import tqdm
import evaluate
import bo_target as bo_target_module
import gflags
FLAGS = gflags.FLAGS
gflags.DEFINE_string('prog_file', '', 'the prog file')
gflags.DEFINE_string('gold_prog_file', '', 'the file for list of gold progs as reference.')
gflags.DEFINE_integer('n_jobs', -1, 'nubmer of jobs. -1 for all.')
def do_many(bo_target, prog_list):
result = [bo_target(prog) for prog in prog_list]
return result
def main():
FLAGS(sys.argv)
random.seed(19260817)
cfg_grammar_file = (
os.path.dirname(os.path.realpath(__file__)) + '/../../dropbox/context_free_grammars/prog_leftskew.grammar'
)
prog_file = FLAGS.prog_file
n_jobs = FLAGS.n_jobs
# 1. reading
print('reading progs...')
prog_list = [_.strip() for _ in open(prog_file).readlines()]
# 2. compute
gold_prog_list = [_.strip() for _ in open(FLAGS.gold_prog_file).readlines() if _.strip() != '']
for gold_prog in gold_prog_list:
print('producing bo_target for [%s]...' % gold_prog)
target_file = FLAGS.prog_file + '.target_for_[%s].txt' % gold_prog
# simple_target_file = FLAGS.prog_file + '.simple_target_for_[%s].txt' % gold_prog
parser = evaluate.get_parser(cfg_grammar_file)
bo_target = bo_target_module.BOTarget(parser, gold_prog)
block_size = 1000
block_result = Parallel(
n_jobs=n_jobs, verbose=50
)(
delayed(do_many)(bo_target, prog_list[start:start + block_size])
for start in range(0, len(prog_list), block_size)
)
result = [_2 for _1 in block_result for _2 in _1]
# 3. saving
print('saving...')
with open(target_file, 'w') as fout:
for v in result:
print(v, file=fout)
#with open(simple_target_file, 'w') as fout:
# for prog in prog_list:
# print(len(prog.split(';')), file=fout)
import pdb, traceback, sys, code
if __name__ == '__main__':
try:
main()
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
|
[
"evaluate.get_parser"
] |
[((280, 334), 'gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""prog_file"""', '""""""', '"""the prog file"""'], {}), "('prog_file', '', 'the prog file')\n", (300, 334), False, 'import gflags\n'), ((335, 430), 'gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""gold_prog_file"""', '""""""', '"""the file for list of gold progs as reference."""'], {}), "('gold_prog_file', '',\n 'the file for list of gold progs as reference.')\n", (355, 430), False, 'import gflags\n'), ((427, 493), 'gflags.DEFINE_integer', 'gflags.DEFINE_integer', (['"""n_jobs"""', '(-1)', '"""nubmer of jobs. -1 for all."""'], {}), "('n_jobs', -1, 'nubmer of jobs. -1 for all.')\n", (448, 493), False, 'import gflags\n'), ((641, 662), 'random.seed', 'random.seed', (['(19260817)'], {}), '(19260817)\n', (652, 662), False, 'import random\n'), ((1383, 1420), 'evaluate.get_parser', 'evaluate.get_parser', (['cfg_grammar_file'], {}), '(cfg_grammar_file)\n', (1402, 1420), False, 'import evaluate\n'), ((1441, 1485), 'bo_target.BOTarget', 'bo_target_module.BOTarget', (['parser', 'gold_prog'], {}), '(parser, gold_prog)\n', (1466, 1485), True, 'import bo_target as bo_target_module\n'), ((712, 738), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (728, 738), False, 'import os\n'), ((1536, 1571), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': '(50)'}), '(n_jobs=n_jobs, verbose=50)\n', (1544, 1571), False, 'from joblib import Parallel, delayed\n'), ((2233, 2247), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2245, 2247), False, 'import pdb, traceback, sys, code\n'), ((2256, 2277), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2275, 2277), False, 'import pdb, traceback, sys, code\n'), ((2286, 2305), 'pdb.post_mortem', 'pdb.post_mortem', (['tb'], {}), '(tb)\n', (2301, 2305), False, 'import pdb, traceback, sys, code\n'), ((1607, 1623), 'joblib.delayed', 'delayed', (['do_many'], {}), '(do_many)\n', (1614, 1623), False, 'from joblib import Parallel, delayed\n')]
|
# Project hiatus
# main script with a parser for the model
# 12/10/2020
# <NAME>
# loading required packages
import os
import argparse
from sklearn.model_selection import train_test_split
from torch.utils.data import Subset
# for manual visualisation
from rasterio.plot import show
# putting the right work directory
os.chdir("/home/adminlocal/Bureau/GIT/hiatus_change_detection")
# importing our functions
import utils as fun
import train as train
import evaluate as eval_model
import warnings
warnings.filterwarnings('ignore')
def main():
# create the parser with all arguments
parser = argparse.ArgumentParser(description='Auto-encoder Time Adversarial Model')
# Optimization arguments
parser.add_argument('--lr', default=0.01, type=float, help='Initial learning rate')
parser.add_argument('--lr_decay', default=0.1, type=float, help='Multiplicative factor used on learning rate at `lr_steps`')
parser.add_argument('--lr_steps', default=[50, 100, 1000, 1500], help='List of epochs where the learning rate is decreased by `lr_decay`')
parser.add_argument('--epochs', default=1, type=int, help='Number of epochs to train. If <=0, only testing will be done.')
parser.add_argument('--batch_size', default=64, type=int, help='Batch size')
parser.add_argument('--optim', default='adam', help='Optimizer: sgd|adam')
parser.add_argument('--grad_clip', default=0, type=float, help='Element-wise clipping of gradient. If 0, does not clip')
# Learning process arguments
parser.add_argument('--cuda', default=1, type=int, help='Bool, use cuda')
parser.add_argument('--test_auc', default=1, type=int, help='Test each n-th epoch during training')
parser.add_argument('--load_best_model', default=1, type=int, help='Load the model with the best result')
# Dataset
parser.add_argument('--dataset', default='frejus_dataset', help='Dataset name: frejus_dataset')
# Model
parser.add_argument('--seed', default=1, type=int, help='Seed for random initialisation')
parser.add_argument('--save', default=0, type=int, help='Seed for random initialisation')
parser.add_argument('--data_fusion', default=1, help='Including data fusion')
parser.add_argument('--rad_input', default=1, help='In case of no data_fusion, we indicate which input we want')
parser.add_argument('--adversarial', default=0, help='Making the model adversarial')
parser.add_argument('--defiance', default=0, help='Including defiance')
parser.add_argument('--split', default=0, help='Making a split on the code')
parser.add_argument('--auto_encod', default=1, help='Activating the auto-encoder')
parser.add_argument('--name_model', default="AE_Mmodal", help='Name of the file to save the model')
parser.add_argument('--output_dir', default="evaluation_models/", help='Name of the dir to save the model')
# Encoder
parser.add_argument('--conv_width', default=[8,8,16,16,16], help='Layers size')
# Decoder
parser.add_argument('--dconv_width', default=[16,16,8,8,8], help='Layers size')
# Defiance
parser.add_argument('--def_width', default=[16,16,16,16,16], help='Layers size')
# Discriminator
parser.add_argument('--nb_channels_split', default=16, type=int, help='Number of channels for the input to the discriminator')
parser.add_argument('--disc_width', default=[16,16,16,16,16,16,16,16,16], help='Layers size')
parser.add_argument('--disc_loss_weight', default=0.1, type=float, help='Weight applied on the adversarial loss with full model')
parser.add_argument('--opti_adversarial_encoder', default=0, help='Trains the encoder weights')
args = parser.parse_args()
args.start_epoch = 0
# setting the seed
fun.set_seed(args.seed, args.cuda)
# Decide on the dataset
if args.dataset=='frejus_dataset':
# loading the dataset, getting a raster for later data visualisation
# after every epoch
import frejus_dataset
# loading the data
train_data, gt_change, numpy_rasters = frejus_dataset.get_datasets(["1954","1966","1970", "1978", "1989"])
## we take a test set of the gt_change for evaluation (20%)
# creating a new dict for gt test
gt_change_test = {}
# getting a single subset list throughout the years
train_idx, val_idx = train_test_split(list(range(len(gt_change["1970"]))), test_size=0.20, random_state=1)
# we load the train and test sets for GT
for year in gt_change:
gt_change_test[year] = Subset(gt_change[year], val_idx)
for year in gt_change:
gt_change[year] = Subset(gt_change[year], train_idx)
# training the model
trained_model = train.train_full(args, train_data, gt_change_test)
return args, gt_change, numpy_rasters, trained_model, train_data
###############################################################################
###############################################################################
###############################################################################
if __name__ == "__main__":
print(
"""
Training the model
""")
# running the model
args, gt_change, numpy_rasters, trained_model, datasets = main()
print(
"""
We now test the results for several models
""")
# boolean to allow evaluation or not
evaluation = False
# performing evaluation on the different models
if evaluation:
print("AE_rad")
eval_model.evaluate_model("AE_rad", gt_change)
print("AE_rad+DAN")
eval_model.evaluate_model("AE_rad+DAN", gt_change)
print("AE_Mmodal")
eval_model.evaluate_model("AE_Mmodal", gt_change)
print("AE_Mmodal+DAN")
eval_model.evaluate_model("AE_Mmodal+DAN", gt_change)
print("AE_Mmodal+DAN+split")
eval_model.evaluate_model("AE_Mmodal+DAN+split", gt_change)
print("AE_alt+DAN")
eval_model.evaluate_model("AE_alt+DAN", gt_change)
print("bayesian_model")
eval_model.evaluate_model("bayesian_model", gt_change)
|
[
"evaluate.evaluate_model"
] |
[((320, 383), 'os.chdir', 'os.chdir', (['"""/home/adminlocal/Bureau/GIT/hiatus_change_detection"""'], {}), "('/home/adminlocal/Bureau/GIT/hiatus_change_detection')\n", (328, 383), False, 'import os\n'), ((502, 535), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (525, 535), False, 'import warnings\n'), ((610, 684), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Auto-encoder Time Adversarial Model"""'}), "(description='Auto-encoder Time Adversarial Model')\n", (633, 684), False, 'import argparse\n'), ((3785, 3819), 'utils.set_seed', 'fun.set_seed', (['args.seed', 'args.cuda'], {}), '(args.seed, args.cuda)\n', (3797, 3819), True, 'import utils as fun\n'), ((4768, 4818), 'train.train_full', 'train.train_full', (['args', 'train_data', 'gt_change_test'], {}), '(args, train_data, gt_change_test)\n', (4784, 4818), True, 'import train as train\n'), ((4110, 4179), 'frejus_dataset.get_datasets', 'frejus_dataset.get_datasets', (["['1954', '1966', '1970', '1978', '1989']"], {}), "(['1954', '1966', '1970', '1978', '1989'])\n", (4137, 4179), False, 'import frejus_dataset\n'), ((4584, 4616), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'val_idx'], {}), '(gt_change[year], val_idx)\n', (4590, 4616), False, 'from torch.utils.data import Subset\n'), ((4679, 4713), 'torch.utils.data.Subset', 'Subset', (['gt_change[year]', 'train_idx'], {}), '(gt_change[year], train_idx)\n', (4685, 4713), False, 'from torch.utils.data import Subset\n'), ((5582, 5628), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_rad"""', 'gt_change'], {}), "('AE_rad', gt_change)\n", (5607, 5628), True, 'import evaluate as eval_model\n'), ((5665, 5715), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_rad+DAN"""', 'gt_change'], {}), "('AE_rad+DAN', gt_change)\n", (5690, 5715), True, 'import evaluate as eval_model\n'), ((5751, 5800), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_Mmodal"""', 'gt_change'], {}), "('AE_Mmodal', gt_change)\n", (5776, 5800), True, 'import evaluate as eval_model\n'), ((5840, 5893), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_Mmodal+DAN"""', 'gt_change'], {}), "('AE_Mmodal+DAN', gt_change)\n", (5865, 5893), True, 'import evaluate as eval_model\n'), ((5939, 5998), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_Mmodal+DAN+split"""', 'gt_change'], {}), "('AE_Mmodal+DAN+split', gt_change)\n", (5964, 5998), True, 'import evaluate as eval_model\n'), ((6035, 6085), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""AE_alt+DAN"""', 'gt_change'], {}), "('AE_alt+DAN', gt_change)\n", (6060, 6085), True, 'import evaluate as eval_model\n'), ((6126, 6180), 'evaluate.evaluate_model', 'eval_model.evaluate_model', (['"""bayesian_model"""', 'gt_change'], {}), "('bayesian_model', gt_change)\n", (6151, 6180), True, 'import evaluate as eval_model\n')]
|
from pathlib import Path
import random
from fire import Fire
from munch import Munch
import torch
import numpy as np
from config import config, debug_options
from dataloader.dataset_multichoice import get_iterator
from utils import wait_for_key, suppress_stdout
from train import train
from evaluate import evaluate, qa_similarity
from infer import prepare_model, infer
from blackbox_infer import blackbox_infer
class Cli:
def __init__(self):
self.defaults = config
self.debug = debug_options
def _default_args(self, **kwargs):
args = self.defaults
if 'debug' in kwargs:
args.update(self.debug)
args.update(kwargs)
args.update(resolve_paths(config))
args.update(fix_seed(args))
args.update(get_device(args))
return Munch(args)
def check_dataloader(self, **kwargs):
from dataloader.dataset_multichoice import modes
from utils import prepare_batch
from tqdm import tqdm
args = self._default_args(**kwargs)
iters, vocab = get_iterator(args)
train_iter_test = next(iter(iters['train']))
for key, value in train_iter_test.items():
if isinstance(value, torch.Tensor):
print(key, value.shape)
else:
print(key, value)
for mode in modes:
print('Test loading %s data' % mode)
for batch in tqdm(iters[mode]):
# import ipdb; ipdb.set_trace() # XXX DEBUG
batch = prepare_batch(args, batch, vocab)
def train(self, **kwargs):
args = self._default_args(**kwargs)
args.update({'mode': 'train'})
train(args)
wait_for_key()
def evaluate(self, **kwargs):
args = self._default_args(**kwargs)
args.update({'mode': 'evaluate'})
evaluate(args)
wait_for_key()
def infer(self, **kwargs):
import time
start = time.time()
args = self._default_args(**kwargs)
args.update({'mode' : 'infer'})
if args.input is not None:
if type(args.input) is str:
import json
args.input = json.loads(args.input)
args, cache = prepare_model(args)
load_time = time.time()
print("load time ", load_time - start)
infer(args, cache)
else:
infer(args)
print("infer time ",time.time() - load_time)
def blackbox_infer(self, **kwargs):
args = self._default_args(**kwargs)
blackbox_infer(args)
# added
def qa_similarity(self, **kwargs):
args = self._default_args(**kwargs)
qa_similarity(args)
def resolve_paths(config):
paths = [k for k in config.keys() if k.endswith('_path')]
res = {}
root = Path('../').resolve()
for path in paths:
res[path] = root / config[path]
return res
def fix_seed(args):
if 'random_seed' not in args:
args['random_seed'] = 0
random.seed(args['random_seed'])
np.random.seed(args['random_seed'])
torch.manual_seed(args['random_seed'])
torch.cuda.manual_seed_all(args['random_seed'])
return args
def get_device(args):
if 'device' in args:
device = args['device']
else:
device = "cuda" if torch.cuda.is_available() else "cpu"
return {'device': device}
if __name__ == "__main__":
Fire(Cli)
|
[
"evaluate.qa_similarity",
"evaluate.evaluate"
] |
[((3035, 3067), 'random.seed', 'random.seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (3046, 3067), False, 'import random\n'), ((3072, 3107), 'numpy.random.seed', 'np.random.seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (3086, 3107), True, 'import numpy as np\n'), ((3112, 3150), 'torch.manual_seed', 'torch.manual_seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (3129, 3150), False, 'import torch\n'), ((3155, 3202), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["args['random_seed']"], {}), "(args['random_seed'])\n", (3181, 3202), False, 'import torch\n'), ((3438, 3447), 'fire.Fire', 'Fire', (['Cli'], {}), '(Cli)\n', (3442, 3447), False, 'from fire import Fire\n'), ((813, 824), 'munch.Munch', 'Munch', (['args'], {}), '(args)\n', (818, 824), False, 'from munch import Munch\n'), ((1071, 1089), 'dataloader.dataset_multichoice.get_iterator', 'get_iterator', (['args'], {}), '(args)\n', (1083, 1089), False, 'from dataloader.dataset_multichoice import get_iterator\n'), ((1698, 1709), 'train.train', 'train', (['args'], {}), '(args)\n', (1703, 1709), False, 'from train import train\n'), ((1719, 1733), 'utils.wait_for_key', 'wait_for_key', ([], {}), '()\n', (1731, 1733), False, 'from utils import wait_for_key, suppress_stdout\n'), ((1863, 1877), 'evaluate.evaluate', 'evaluate', (['args'], {}), '(args)\n', (1871, 1877), False, 'from evaluate import evaluate, qa_similarity\n'), ((1887, 1901), 'utils.wait_for_key', 'wait_for_key', ([], {}), '()\n', (1899, 1901), False, 'from utils import wait_for_key, suppress_stdout\n'), ((1970, 1981), 'time.time', 'time.time', ([], {}), '()\n', (1979, 1981), False, 'import time\n'), ((2572, 2592), 'blackbox_infer.blackbox_infer', 'blackbox_infer', (['args'], {}), '(args)\n', (2586, 2592), False, 'from blackbox_infer import blackbox_infer\n'), ((2707, 2726), 'evaluate.qa_similarity', 'qa_similarity', (['args'], {}), '(args)\n', (2720, 2726), False, 'from evaluate import evaluate, qa_similarity\n'), ((1437, 1454), 'tqdm.tqdm', 'tqdm', (['iters[mode]'], {}), '(iters[mode])\n', (1441, 1454), False, 'from tqdm import tqdm\n'), ((2249, 2268), 'infer.prepare_model', 'prepare_model', (['args'], {}), '(args)\n', (2262, 2268), False, 'from infer import prepare_model, infer\n'), ((2293, 2304), 'time.time', 'time.time', ([], {}), '()\n', (2302, 2304), False, 'import time\n'), ((2368, 2386), 'infer.infer', 'infer', (['args', 'cache'], {}), '(args, cache)\n', (2373, 2386), False, 'from infer import prepare_model, infer\n'), ((2413, 2424), 'infer.infer', 'infer', (['args'], {}), '(args)\n', (2418, 2424), False, 'from infer import prepare_model, infer\n'), ((2780, 2793), 'config.config.keys', 'config.keys', ([], {}), '()\n', (2791, 2793), False, 'from config import config, debug_options\n'), ((2842, 2853), 'pathlib.Path', 'Path', (['"""../"""'], {}), "('../')\n", (2846, 2853), False, 'from pathlib import Path\n'), ((3337, 3362), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3360, 3362), False, 'import torch\n'), ((1541, 1574), 'utils.prepare_batch', 'prepare_batch', (['args', 'batch', 'vocab'], {}), '(args, batch, vocab)\n', (1554, 1574), False, 'from utils import prepare_batch\n'), ((2200, 2222), 'json.loads', 'json.loads', (['args.input'], {}), '(args.input)\n', (2210, 2222), False, 'import json\n'), ((2454, 2465), 'time.time', 'time.time', ([], {}), '()\n', (2463, 2465), False, 'import time\n')]
|
import torch
import torch.nn as nn
from helper import *
from rnn import VanillaRNN
from gru import GruRNN
from evaluate import evaluate
def train(model, model_optimizer, inp, target):
hidden = model.init_hidden()
model.zero_grad()
loss = 0
for c in range(chunk_len):
output, hidden = model(inp[c], hidden)
loss += criterion(output, target[c].unsqueeze(0))
loss.backward()
model_optimizer.step()
return loss.data.item() / chunk_len
if __name__ == '__main__':
current_file, n_characters = import_and_sanitize("../../data/shakespeare.txt")
input_size = output_size = n_characters
n_epochs = 2000
print_every = 100
plot_every = 10
hidden_size = 100
n_layers = 1 # not used for VanillaRNN
lr = 0.005
chunk_len = 200
model_vanilla = VanillaRNN(input_size, hidden_size, output_size)
model_gru = GruRNN(input_size, hidden_size, output_size)
model = model_vanilla # choose a model
model_optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
start = time.time()
all_losses = []
loss_avg = 0
for epoch in range(1, n_epochs + 1):
(inp, target) = random_training_set(current_file, chunk_len)
loss = train(model, model_optimizer, inp, target)
loss_avg += loss
if epoch % print_every == 0:
print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch, epoch / n_epochs * 100, loss))
print(evaluate(model, 'Wh', 100), '\n')
if epoch % plot_every == 0:
all_losses.append(loss_avg / plot_every)
loss_avg = 0
|
[
"evaluate.evaluate"
] |
[((821, 869), 'rnn.VanillaRNN', 'VanillaRNN', (['input_size', 'hidden_size', 'output_size'], {}), '(input_size, hidden_size, output_size)\n', (831, 869), False, 'from rnn import VanillaRNN\n'), ((886, 930), 'gru.GruRNN', 'GruRNN', (['input_size', 'hidden_size', 'output_size'], {}), '(input_size, hidden_size, output_size)\n', (892, 930), False, 'from gru import GruRNN\n'), ((1056, 1077), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1075, 1077), True, 'import torch.nn as nn\n'), ((1497, 1523), 'evaluate.evaluate', 'evaluate', (['model', '"""Wh"""', '(100)'], {}), "(model, 'Wh', 100)\n", (1505, 1523), False, 'from evaluate import evaluate\n')]
|
import os
import torch
import shutil
import numpy as np
from utils import get_yaml_value, get_id, get_model_list
from evaluate import evaluate
from Preprocessing import Create_Testing_Datasets
from torchvision import models
from NetVLAD.netvlad import NetVLAD, EmbedNet
encoder = models.resnet18(pretrained=True)
base_model = torch.nn.Sequential(
encoder.conv1,
encoder.bn1,
encoder.relu,
encoder.maxpool,
encoder.layer1,
encoder.layer2,
encoder.layer3,
encoder.layer4,
)
dim = list(base_model.parameters())[-1].shape[0]
netVLAD = NetVLAD(num_clusters=89, dim=dim, alpha=1.0)
model = EmbedNet(base_model, netVLAD).cuda()
name = get_yaml_value("name")
save_dirname = os.path.join("./save_model_weight", name)
last_model_name = os.path.basename(get_model_list(save_dirname, 'net', -1))
print(last_model_name)
model.load_state_dict(torch.load(os.path.join(save_dirname, last_model_name)))
query_name = 'query_drone'
gallery_name = 'gallery_satellite'
image_datasets, data_loader = Create_Testing_Datasets()
query_path = image_datasets[query_name].imgs
gallery_path = image_datasets[gallery_name].imgs
gallery_label, gallery_path = get_id(gallery_path)
query_label, query_path = get_id(query_path)
query_label = np.array(query_label)
gallery_label = np.array(gallery_label)
query_feature = torch.FloatTensor()
gallery_feature = torch.FloatTensor()
print("<<<<<<<<<Testing Start>>>>>>>>>>>>")
with torch.no_grad():
for img, label in data_loader[gallery_name]:
n, c, h, w = img.size()
output = model(img.cuda())
gallery_feature = torch.cat((gallery_feature, output.data.cpu()))
print(gallery_feature.size())
for img, label in data_loader[query_name]:
n, c, h, w = img.size()
output = model(img.cuda())
query_feature = torch.cat((query_feature, output.data.cpu()))
print(query_feature.size())
print("<<<<<<<<<Evaluating Start>>>>>>>>>>>>")
CMC = torch.IntTensor(len(gallery_label)).zero_()
# ap = average precision
ap = 0.0
query_feature = query_feature.cuda()
gallery_feature = gallery_feature.cuda()
for i in range(len(query_label)):
# print(query_label[])
ap_tmp, CMC_tmp = evaluate(query_feature[i], query_label[i], gallery_feature, gallery_label)
# print(CMC_tmp.shape)
if CMC_tmp[0] == -1:
continue
CMC += CMC_tmp
ap += ap_tmp
CMC = CMC.float()
CMC = CMC / len(query_label)
result = 'Recall@1:%.2f Recall@5:%.2f Recall@10:%.2f Recall@top10:%.2f AP:%.2f' % (
CMC[0] * 100, CMC[4] * 100, CMC[9] * 100, CMC[round(len(gallery_label) * 0.1)] * 100,
ap / len(query_label) * 100)
save_path = os.path.join('save_model_weight', get_yaml_value('name'))
save_txt_path = os.path.join(save_path,
'%s_to_%s_%s_%.2f_%.2f.txt' % (query_name[6:], gallery_name[8:], last_model_name[:7],
CMC[0] * 100, ap / len(query_label)*100))
with open(save_txt_path, "w") as f:
f.write(result)
f.close()
shutil.copy("settings.yaml", os.path.join(save_path, "settings_saved.yaml"))
print(result)
|
[
"evaluate.evaluate"
] |
[((282, 314), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (297, 314), False, 'from torchvision import models\n'), ((328, 475), 'torch.nn.Sequential', 'torch.nn.Sequential', (['encoder.conv1', 'encoder.bn1', 'encoder.relu', 'encoder.maxpool', 'encoder.layer1', 'encoder.layer2', 'encoder.layer3', 'encoder.layer4'], {}), '(encoder.conv1, encoder.bn1, encoder.relu, encoder.\n maxpool, encoder.layer1, encoder.layer2, encoder.layer3, encoder.layer4)\n', (347, 475), False, 'import torch\n'), ((566, 610), 'NetVLAD.netvlad.NetVLAD', 'NetVLAD', ([], {'num_clusters': '(89)', 'dim': 'dim', 'alpha': '(1.0)'}), '(num_clusters=89, dim=dim, alpha=1.0)\n', (573, 610), False, 'from NetVLAD.netvlad import NetVLAD, EmbedNet\n'), ((664, 686), 'utils.get_yaml_value', 'get_yaml_value', (['"""name"""'], {}), "('name')\n", (678, 686), False, 'from utils import get_yaml_value, get_id, get_model_list\n'), ((702, 743), 'os.path.join', 'os.path.join', (['"""./save_model_weight"""', 'name'], {}), "('./save_model_weight', name)\n", (714, 743), False, 'import os\n'), ((1016, 1041), 'Preprocessing.Create_Testing_Datasets', 'Create_Testing_Datasets', ([], {}), '()\n', (1039, 1041), False, 'from Preprocessing import Create_Testing_Datasets\n'), ((1167, 1187), 'utils.get_id', 'get_id', (['gallery_path'], {}), '(gallery_path)\n', (1173, 1187), False, 'from utils import get_yaml_value, get_id, get_model_list\n'), ((1214, 1232), 'utils.get_id', 'get_id', (['query_path'], {}), '(query_path)\n', (1220, 1232), False, 'from utils import get_yaml_value, get_id, get_model_list\n'), ((1248, 1269), 'numpy.array', 'np.array', (['query_label'], {}), '(query_label)\n', (1256, 1269), True, 'import numpy as np\n'), ((1286, 1309), 'numpy.array', 'np.array', (['gallery_label'], {}), '(gallery_label)\n', (1294, 1309), True, 'import numpy as np\n'), ((1327, 1346), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (1344, 1346), False, 'import torch\n'), ((1365, 1384), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (1382, 1384), False, 'import torch\n'), ((779, 818), 'utils.get_model_list', 'get_model_list', (['save_dirname', '"""net"""', '(-1)'], {}), "(save_dirname, 'net', -1)\n", (793, 818), False, 'from utils import get_yaml_value, get_id, get_model_list\n'), ((1436, 1451), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1449, 1451), False, 'import torch\n'), ((2189, 2263), 'evaluate.evaluate', 'evaluate', (['query_feature[i]', 'query_label[i]', 'gallery_feature', 'gallery_label'], {}), '(query_feature[i], query_label[i], gallery_feature, gallery_label)\n', (2197, 2263), False, 'from evaluate import evaluate\n'), ((2671, 2693), 'utils.get_yaml_value', 'get_yaml_value', (['"""name"""'], {}), "('name')\n", (2685, 2693), False, 'from utils import get_yaml_value, get_id, get_model_list\n'), ((3053, 3099), 'os.path.join', 'os.path.join', (['save_path', '"""settings_saved.yaml"""'], {}), "(save_path, 'settings_saved.yaml')\n", (3065, 3099), False, 'import os\n'), ((619, 648), 'NetVLAD.netvlad.EmbedNet', 'EmbedNet', (['base_model', 'netVLAD'], {}), '(base_model, netVLAD)\n', (627, 648), False, 'from NetVLAD.netvlad import NetVLAD, EmbedNet\n'), ((876, 919), 'os.path.join', 'os.path.join', (['save_dirname', 'last_model_name'], {}), '(save_dirname, last_model_name)\n', (888, 919), False, 'import os\n')]
|
import pickle
import random
import pyro
import torch
from pyro.contrib.examples.util import print_and_log
from pyro.infer import SVI, JitTrace_ELBO, JitTraceEnum_ELBO, Trace_ELBO, TraceEnum_ELBO, config_enumerate
from pyro.optim import ClippedAdam
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
from data_prep import setup_data_loaders, load_vocab, Vocab, Utterance, WordConversionDataSet
from evaluate import evaluate
from models import Generator
from train import train_epoch
from util import Config, get_args
def main_train(args, vocab):
# random seed setup
if args.seed is not None:
pyro.set_rng_seed(args.seed)
# CUDA for PyTorch
cuda_available = torch.cuda.is_available()
if cuda_available and args.cuda:
device = torch.device("cuda")
torch.cuda.set_device(0)
print("using gpu acceleration")
# generate config for model initialization
print("Generating Config")
config = Config(
word_embeddings=torch.FloatTensor(vocab.embedding),
decoder_hidden_dim=args.decoder_hidden_dim,
num_relations=args.num_relations,
encoder_hidden_dim=args.encoder_hidden_dim,
aux_loss_multiplier=args.aux_loss_multiplier
)
# initialize the generator model
generator = Generator(config)
# setup the optimizer
adam_params = {"lr": args.learning_rate, "betas": (args.beta_1, 0.999)}
optimizer = ClippedAdam(adam_params)
# set up the loss(es) for inference. wrapping the guide in config_enumerate builds the loss as a sum
# by enumerating each class label for the sampled discrete categorical distribution in the model
if args.enumerate:
guide = config_enumerate(generator.guide, args.enum_discrete, expand=True)
else:
guide = generator.guide
elbo = (JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO)(max_plate_nesting=1)
loss_basic = SVI(generator.model, guide, optimizer, loss=elbo)
# build a list of all losses considered
losses = [loss_basic]
# aux_loss: whether to use the auxiliary loss from NIPS 14 paper (Kingma et al)
if args.aux_loss:
elbo = JitTrace_ELBO() if args.jit else Trace_ELBO()
loss_aux = SVI(generator.model_identify, generator.guide_identify, optimizer, loss=elbo)
losses.append(loss_aux)
# setup data loaders
if args.experiment_type == 'clark':
novel_utterances = pickle.load(open("./data/clark/novel_utterances.p", "rb"))
established_utterances = pickle.load(open("./data/clark/established_utterances.p", "rb"))
# def setup_data_loaders(sup_train_set, unsup_train_set, eval_set, test_set, batch_size):
random.shuffle(novel_utterances)
random.shuffle(established_utterances)
num_train = len(established_utterances)
num_eval_test = len(novel_utterances)
num_sup = int(num_train * args.sup_train_ratio) # sup_train_ratio: num_sup_train / num_train
num_unsup = num_train - num_sup
num_eval = int(num_eval_test * args.eval_ratio) # eval_ratio: num_eval / (num_eval + num_test)
sup_train_set = established_utterances[:num_sup]
unsup_train_set = established_utterances[num_sup:]
eval_set = novel_utterances[:num_eval]
test_set = novel_utterances[num_eval:]
data_loaders = setup_data_loaders(sup_train_set,
unsup_train_set,
eval_set,
test_set,
batch_size=args.batch_size)
else:
raise NotImplementedError("Have not implemented this experiment yet.")
# how often would a supervised batch be encountered during inference
# e.g. if sup_num is 3000, we would have every 16th = int(50000/3000) batch supervised
# until we have traversed through the all supervised batches
periodic_interval_batches = int(1.0 / (1.0 * args.sup_train_ratio))
# setup the logger if a filename is provided
log_fn = "./logs/" + args.experiment_type + '/' + args.experiment_name + '.log'
logger = open(log_fn, "w")
# run inference for a certain number of epochs
for i in tqdm(range(0, args.num_epochs)):
# get the losses for an epoch
epoch_losses_sup, epoch_losses_unsup = \
train_epoch(data_loaders=data_loaders,
models=losses,
periodic_interval_batches=periodic_interval_batches,
vocab=vocab)
# compute average epoch losses i.e. losses per example
avg_epoch_losses_sup = map(lambda v: v / len(sup_train_set), epoch_losses_sup)
avg_epoch_losses_unsup = map(lambda v: v / len(unsup_train_set), epoch_losses_unsup)
# store the loss in the logfile
str_loss_sup = " ".join(map(str, avg_epoch_losses_sup))
str_loss_unsup = " ".join(map(str, avg_epoch_losses_unsup))
str_print = "{} epoch: avg losses {}".format(i, "{} {}".format(str_loss_sup, str_loss_unsup))
print_and_log(logger, str_print)
# save trained models
# torch.save(generator.state_dict(), './models/test_generator_state_dict.pth')
# do evaluation if needed
if args.evaluate:
predict_df = evaluate(generator,
eval_data_loader=data_loaders['eval'],
vocab=vocab,
sample_size=args.eval_sample_size,
batch_size=args.batch_size)
# save the df with predictions (a dictionary, see evaluate.evaluate)
eval_df_fn = './data/' + args.experiment_type + '/eval_df_' + args.experiment_name + '.p'
pickle.dump(predict_df, open(eval_df_fn, 'wb'))
return generator
def main_evaluate(vocab, args):
# generate config for model initialization
print("Generating Config")
generator_config = Config(
word_embeddings=torch.FloatTensor(vocab.embedding),
decoder_hidden_dim=args.decoder_hidden_dim,
num_relations=args.num_relations,
encoder_hidden_dim=args.encoder_hidden_dim,
aux_loss_multiplier=args.aux_loss_multiplier
)
# initialize the generator model
generator = Generator(generator_config)
# load models
generator.load_state_dict(torch.load('./models/test_generator_state_dict.pth'))
# load evaluation data set
novel_utterances = pickle.load(open("./data/clark/novel_utterances.p", "rb"))
random.shuffle(novel_utterances)
num_eval_test = len(novel_utterances)
num_eval = int(num_eval_test * args.eval_ratio) # eval_ratio: num_eval / (num_eval + num_test)
eval_set = novel_utterances[:num_eval]
eval_data_loader = DataLoader(WordConversionDataSet(eval_set), batch_size=args.batch_size, shuffle=True)
# model evaluations
eval_stats = evaluate(generator=generator,
eval_data_loader=eval_data_loader,
vocab=vocab,
sample_size=args.eval_sample_size,
batch_size=args.batch_size)
if __name__ == '__main__':
args = get_args()
vocab = load_vocab(args.experiment_type, vocab_dim=args.vocab_dim)
# main_train(args, vocab)
main_evaluate(vocab, args)
|
[
"evaluate.evaluate"
] |
[((704, 729), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (727, 729), False, 'import torch\n'), ((1298, 1315), 'models.Generator', 'Generator', (['config'], {}), '(config)\n', (1307, 1315), False, 'from models import Generator\n'), ((1435, 1459), 'pyro.optim.ClippedAdam', 'ClippedAdam', (['adam_params'], {}), '(adam_params)\n', (1446, 1459), False, 'from pyro.optim import ClippedAdam\n'), ((1916, 1965), 'pyro.infer.SVI', 'SVI', (['generator.model', 'guide', 'optimizer'], {'loss': 'elbo'}), '(generator.model, guide, optimizer, loss=elbo)\n', (1919, 1965), False, 'from pyro.infer import SVI, JitTrace_ELBO, JitTraceEnum_ELBO, Trace_ELBO, TraceEnum_ELBO, config_enumerate\n'), ((6275, 6302), 'models.Generator', 'Generator', (['generator_config'], {}), '(generator_config)\n', (6284, 6302), False, 'from models import Generator\n'), ((6524, 6556), 'random.shuffle', 'random.shuffle', (['novel_utterances'], {}), '(novel_utterances)\n', (6538, 6556), False, 'import random\n'), ((6894, 7039), 'evaluate.evaluate', 'evaluate', ([], {'generator': 'generator', 'eval_data_loader': 'eval_data_loader', 'vocab': 'vocab', 'sample_size': 'args.eval_sample_size', 'batch_size': 'args.batch_size'}), '(generator=generator, eval_data_loader=eval_data_loader, vocab=\n vocab, sample_size=args.eval_sample_size, batch_size=args.batch_size)\n', (6902, 7039), False, 'from evaluate import evaluate\n'), ((7182, 7192), 'util.get_args', 'get_args', ([], {}), '()\n', (7190, 7192), False, 'from util import Config, get_args\n'), ((7205, 7263), 'data_prep.load_vocab', 'load_vocab', (['args.experiment_type'], {'vocab_dim': 'args.vocab_dim'}), '(args.experiment_type, vocab_dim=args.vocab_dim)\n', (7215, 7263), False, 'from data_prep import setup_data_loaders, load_vocab, Vocab, Utterance, WordConversionDataSet\n'), ((630, 658), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['args.seed'], {}), '(args.seed)\n', (647, 658), False, 'import pyro\n'), ((784, 804), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (796, 804), False, 'import torch\n'), ((813, 837), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (834, 837), False, 'import torch\n'), ((1706, 1772), 'pyro.infer.config_enumerate', 'config_enumerate', (['generator.guide', 'args.enum_discrete'], {'expand': '(True)'}), '(generator.guide, args.enum_discrete, expand=True)\n', (1722, 1772), False, 'from pyro.infer import SVI, JitTrace_ELBO, JitTraceEnum_ELBO, Trace_ELBO, TraceEnum_ELBO, config_enumerate\n'), ((2224, 2301), 'pyro.infer.SVI', 'SVI', (['generator.model_identify', 'generator.guide_identify', 'optimizer'], {'loss': 'elbo'}), '(generator.model_identify, generator.guide_identify, optimizer, loss=elbo)\n', (2227, 2301), False, 'from pyro.infer import SVI, JitTrace_ELBO, JitTraceEnum_ELBO, Trace_ELBO, TraceEnum_ELBO, config_enumerate\n'), ((2691, 2723), 'random.shuffle', 'random.shuffle', (['novel_utterances'], {}), '(novel_utterances)\n', (2705, 2723), False, 'import random\n'), ((2732, 2770), 'random.shuffle', 'random.shuffle', (['established_utterances'], {}), '(established_utterances)\n', (2746, 2770), False, 'import random\n'), ((3347, 3449), 'data_prep.setup_data_loaders', 'setup_data_loaders', (['sup_train_set', 'unsup_train_set', 'eval_set', 'test_set'], {'batch_size': 'args.batch_size'}), '(sup_train_set, unsup_train_set, eval_set, test_set,\n batch_size=args.batch_size)\n', (3365, 3449), False, 'from data_prep import setup_data_loaders, load_vocab, Vocab, Utterance, WordConversionDataSet\n'), ((4367, 4490), 'train.train_epoch', 'train_epoch', ([], {'data_loaders': 'data_loaders', 'models': 'losses', 'periodic_interval_batches': 'periodic_interval_batches', 'vocab': 'vocab'}), '(data_loaders=data_loaders, models=losses,\n periodic_interval_batches=periodic_interval_batches, vocab=vocab)\n', (4378, 4490), False, 'from train import train_epoch\n'), ((5086, 5118), 'pyro.contrib.examples.util.print_and_log', 'print_and_log', (['logger', 'str_print'], {}), '(logger, str_print)\n', (5099, 5118), False, 'from pyro.contrib.examples.util import print_and_log\n'), ((5303, 5441), 'evaluate.evaluate', 'evaluate', (['generator'], {'eval_data_loader': "data_loaders['eval']", 'vocab': 'vocab', 'sample_size': 'args.eval_sample_size', 'batch_size': 'args.batch_size'}), "(generator, eval_data_loader=data_loaders['eval'], vocab=vocab,\n sample_size=args.eval_sample_size, batch_size=args.batch_size)\n", (5311, 5441), False, 'from evaluate import evaluate\n'), ((6352, 6404), 'torch.load', 'torch.load', (['"""./models/test_generator_state_dict.pth"""'], {}), "('./models/test_generator_state_dict.pth')\n", (6362, 6404), False, 'import torch\n'), ((6776, 6807), 'data_prep.WordConversionDataSet', 'WordConversionDataSet', (['eval_set'], {}), '(eval_set)\n', (6797, 6807), False, 'from data_prep import setup_data_loaders, load_vocab, Vocab, Utterance, WordConversionDataSet\n'), ((1003, 1037), 'torch.FloatTensor', 'torch.FloatTensor', (['vocab.embedding'], {}), '(vocab.embedding)\n', (1020, 1037), False, 'import torch\n'), ((2159, 2174), 'pyro.infer.JitTrace_ELBO', 'JitTrace_ELBO', ([], {}), '()\n', (2172, 2174), False, 'from pyro.infer import SVI, JitTrace_ELBO, JitTraceEnum_ELBO, Trace_ELBO, TraceEnum_ELBO, config_enumerate\n'), ((2192, 2204), 'pyro.infer.Trace_ELBO', 'Trace_ELBO', ([], {}), '()\n', (2202, 2204), False, 'from pyro.infer import SVI, JitTrace_ELBO, JitTraceEnum_ELBO, Trace_ELBO, TraceEnum_ELBO, config_enumerate\n'), ((5980, 6014), 'torch.FloatTensor', 'torch.FloatTensor', (['vocab.embedding'], {}), '(vocab.embedding)\n', (5997, 6014), False, 'import torch\n')]
|
from intervaltree import IntervalTree, Interval
import math
from evaluate.classification import Classification
from typing import TextIO, Type, Optional
import pysam
from evaluate.filter import Filter
class Masker(Filter):
def __init__(self, tree: IntervalTree = None):
if tree is None:
tree = IntervalTree()
self.tree = tree
def __eq__(self, other: "Masker") -> bool:
return self.tree == other.tree
@classmethod
def from_bed(cls: Type, bed: TextIO) -> "Type[Masker]":
tree = IntervalTree()
for region in bed:
chrom, start, end = region.strip().split("\t")
tree.addi(int(start), int(end), chrom)
return cls(tree=tree)
def record_should_be_filtered_out(self, record: pysam.AlignedSegment) -> bool:
return self.record_overlaps_mask(record)
def record_overlaps_mask(self, record: pysam.AlignedSegment) -> bool:
classification = Classification(record)
interval = self.get_interval_where_probe_aligns_to_truth(classification)
if interval is None:
return False
overlaps = self.tree.overlap(interval)
return any(interval.data == iv.data for iv in overlaps)
@staticmethod
def get_interval_where_probe_aligns_to_truth(
record: Classification
) -> Optional[Interval]:
raise NotImplementedError()
class PrecisionMasker(Masker):
@staticmethod
def get_interval_where_probe_aligns_to_truth(
record: Classification
) -> Optional[Interval]:
if record.is_unmapped:
return None
aligned_pairs = record.get_aligned_pairs(with_seq=True)
query_interval = aligned_pairs.get_index_of_query_interval(
Interval(*record.query_probe.get_interval_or_default_interval_if_none())
)
ref_positions = aligned_pairs.get_ref_positions(
transform_Nones_into_halfway_positions=True
)
ref_positions_query_aligns_to = ref_positions[slice(*query_interval)]
if len(ref_positions_query_aligns_to)==0:
return None
ref_start, ref_end = (
math.floor(ref_positions_query_aligns_to[0]),
math.ceil(ref_positions_query_aligns_to[-1]),
)
chromosome = record.ref_probe.chrom
return Interval(max(0, ref_start), ref_end + 1, chromosome)
class RecallMasker(Masker):
@staticmethod
def get_interval_where_probe_aligns_to_truth(record: Classification) -> Interval:
begin = record.query_probe.pos
end = begin + len(record.query_probe.get_interval(False))
chrom = record.query_probe.chrom
return Interval(begin, end, data=chrom)
|
[
"evaluate.classification.Classification"
] |
[((540, 554), 'intervaltree.IntervalTree', 'IntervalTree', ([], {}), '()\n', (552, 554), False, 'from intervaltree import IntervalTree, Interval\n'), ((955, 977), 'evaluate.classification.Classification', 'Classification', (['record'], {}), '(record)\n', (969, 977), False, 'from evaluate.classification import Classification\n'), ((2675, 2707), 'intervaltree.Interval', 'Interval', (['begin', 'end'], {'data': 'chrom'}), '(begin, end, data=chrom)\n', (2683, 2707), False, 'from intervaltree import IntervalTree, Interval\n'), ((320, 334), 'intervaltree.IntervalTree', 'IntervalTree', ([], {}), '()\n', (332, 334), False, 'from intervaltree import IntervalTree, Interval\n'), ((2153, 2197), 'math.floor', 'math.floor', (['ref_positions_query_aligns_to[0]'], {}), '(ref_positions_query_aligns_to[0])\n', (2163, 2197), False, 'import math\n'), ((2211, 2255), 'math.ceil', 'math.ceil', (['ref_positions_query_aligns_to[-1]'], {}), '(ref_positions_query_aligns_to[-1])\n', (2220, 2255), False, 'import math\n')]
|
import h5py
from keras.models import load_model
from plot_history import plot_history
from evaluate import evaluate
import click
@click.command()
@click.argument('name')
def main(name):
print('Plotting statistics for Architecture:', name)
print('Loading history...')
h = h5py.File('history_{}.h5'.format(name))
print('Loading weights and validation data...')
v = h5py.File('val_weights_{}.h5'.format(name))
print('Loading model...')
m = load_model('model_{}.hdf5'.format(name))
print('plot loss and accuracy history')
plot_history(h['loss'], h['val_loss'], h['accuracy'], h['val_accuracy'], name)
print('plot confusion matrix, ill or not plot')
evaluate(v['X_val'], v['Y_val'], m, v['w_val'], name)
if __name__ == '__main__':
main()
|
[
"evaluate.evaluate"
] |
[((132, 147), 'click.command', 'click.command', ([], {}), '()\n', (145, 147), False, 'import click\n'), ((149, 171), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (163, 171), False, 'import click\n'), ((557, 635), 'plot_history.plot_history', 'plot_history', (["h['loss']", "h['val_loss']", "h['accuracy']", "h['val_accuracy']", 'name'], {}), "(h['loss'], h['val_loss'], h['accuracy'], h['val_accuracy'], name)\n", (569, 635), False, 'from plot_history import plot_history\n'), ((693, 746), 'evaluate.evaluate', 'evaluate', (["v['X_val']", "v['Y_val']", 'm', "v['w_val']", 'name'], {}), "(v['X_val'], v['Y_val'], m, v['w_val'], name)\n", (701, 746), False, 'from evaluate import evaluate\n')]
|
import tensorflow as tf
from .util import get_next_batch, get_batches
import numpy as np
import sys
sys.path.append('../')
from evaluate import Metrics
class BiLSTM(object):
def __init__(self, vocab_size, tag_size, batch_size = 64, lr = 0.001, iteration = 20, hidden_size = 128, embedding_size = 128):
self.vocab_size = vocab_size
self.tag_size = tag_size
self.batch_size = batch_size
self.lr = lr
self.iteration = iteration
self.hidden_size = hidden_size
#self.seq_len = 100
self.embedding_size = embedding_size
self.word_embedding = tf.Variable(initial_value = tf.random_normal(shape=[vocab_size, embedding_size]), trainable = True)
def add_placeholder(self):
self.input_x = tf.placeholder(dtype = tf.int32, shape = [None, None], name = 'input_x')
self.input_y = tf.placeholder(dtype = tf.int32, shape = [None, None], name = 'input_y')
self.seq_lengths = tf.placeholder(dtype = tf.int32, shape = [None], name = 'seq_lengths')
self.dropout = tf.placeholder(dtype = tf.float32, shape = [], name = 'dropout')
def operation(self):
with tf.name_scope('embedding'):
chars_vector = tf.nn.embedding_lookup(self.word_embedding, ids = self.input_x, name = 'chars_vector')
chars_vector = tf.nn.dropout(chars_vector, self.dropout)
with tf.name_scope('Bi-LSTM'):
fw_lstm_cell = tf.nn.rnn_cell.LSTMCell(self.hidden_size, name = 'fw_lstm')
bw_lstm_cell = tf.nn.rnn_cell.LSTMCell(self.hidden_size, name = 'bw_lstm')
output, _ = tf.nn.bidirectional_dynamic_rnn(fw_lstm_cell, bw_lstm_cell, inputs = chars_vector, sequence_length = self.seq_lengths, dtype = tf.float32)
fw_output = output[0]
bw_output = output[1]
concat = tf.concat([fw_output, bw_output], -1, name = 'Bi-LSTM-concat')
concat = tf.nn.dropout(concat, self.dropout)
s = tf.shape(concat)
concat = tf.reshape(concat, shape = [-1, 2 * self.hidden_size])
with tf.name_scope('projection'):
W = tf.get_variable('W', dtype = tf.float32, shape = [2 * self.hidden_size, self.tag_size])
b = tf.get_variable('b', dtype = tf.float32, shape = [self.tag_size])
pred = tf.nn.dropout(tf.matmul(concat, W) + b, self.dropout)
self.logit = tf.reshape(pred, shape = [-1, s[1], self.tag_size])
def loss_op(self):
with tf.name_scope('loss'):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = self.logit, labels = self.input_y)
mask = tf.sequence_mask(self.seq_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
def optimize(self):
with tf.name_scope('optimize'):
self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
def pred_batch(self):
self.pred = tf.cast(tf.argmax(self.logit, axis = -1), dtype = tf.int32)
def train(self, train_x, train_y, dev_x, dev_y, word2id, tag2id, dropout):
self.add_placeholder()
self.operation()
self.loss_op()
self.pred_batch()
self.optimize()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
x, y, seq = get_batches(train_x, train_y, word2id, tag2id, self.batch_size)
for i in range(self.iteration):
for j in range(len(x)):
_, loss, pred_labels = self.sess.run([self.optimizer, self.loss, self.pred], feed_dict = {self.input_x:x[j], self.input_y:y[j], self.seq_lengths:seq[j], self.dropout:dropout})
#self.dev_test(dev_x, dev_y, word2id, tag2id)
def dev_test(self, dev_x, dev_y, word2id, tag2id):
batches_x, batches_y, batches_seq_len = get_batches(dev_x, dev_y, word2id, tag2id, self.batch_size)
pred_lists = []
labels = []
id2tag = dict((id_, tag) for tag, id_ in tag2id.items())
for i in range(len(batches_x)):
pred_labels, loss = self.sess.run([self.pred, self.loss], feed_dict = {self.input_x:batches_x[i], self.input_y:batches_y[i], self.seq_lengths:batches_seq_len[i], self.dropout:1.0})
for j in range(len(pred_labels)):
for k in range(batches_seq_len[i][j]):
pred_lists.append(id2tag[pred_labels[j][k]])
labels.append(id2tag[batches_y[i][j][k]])
metrics = Metrics(labels, pred_lists)
metrics.report_scores()
def close_sess(self):
self.sess.close()
|
[
"evaluate.Metrics"
] |
[((104, 126), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (119, 126), False, 'import sys\n'), ((787, 853), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, None]', 'name': '"""input_x"""'}), "(dtype=tf.int32, shape=[None, None], name='input_x')\n", (801, 853), True, 'import tensorflow as tf\n'), ((884, 950), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, None]', 'name': '"""input_y"""'}), "(dtype=tf.int32, shape=[None, None], name='input_y')\n", (898, 950), True, 'import tensorflow as tf\n'), ((985, 1049), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None]', 'name': '"""seq_lengths"""'}), "(dtype=tf.int32, shape=[None], name='seq_lengths')\n", (999, 1049), True, 'import tensorflow as tf\n'), ((1080, 1138), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[]', 'name': '"""dropout"""'}), "(dtype=tf.float32, shape=[], name='dropout')\n", (1094, 1138), True, 'import tensorflow as tf\n'), ((3321, 3333), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3331, 3333), True, 'import tensorflow as tf\n'), ((4574, 4601), 'evaluate.Metrics', 'Metrics', (['labels', 'pred_lists'], {}), '(labels, pred_lists)\n', (4581, 4601), False, 'from evaluate import Metrics\n'), ((1189, 1215), 'tensorflow.name_scope', 'tf.name_scope', (['"""embedding"""'], {}), "('embedding')\n", (1202, 1215), True, 'import tensorflow as tf\n'), ((1245, 1332), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.word_embedding'], {'ids': 'self.input_x', 'name': '"""chars_vector"""'}), "(self.word_embedding, ids=self.input_x, name=\n 'chars_vector')\n", (1267, 1332), True, 'import tensorflow as tf\n'), ((1360, 1401), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['chars_vector', 'self.dropout'], {}), '(chars_vector, self.dropout)\n', (1373, 1401), True, 'import tensorflow as tf\n'), ((1416, 1440), 'tensorflow.name_scope', 'tf.name_scope', (['"""Bi-LSTM"""'], {}), "('Bi-LSTM')\n", (1429, 1440), True, 'import tensorflow as tf\n'), ((1470, 1527), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['self.hidden_size'], {'name': '"""fw_lstm"""'}), "(self.hidden_size, name='fw_lstm')\n", (1493, 1527), True, 'import tensorflow as tf\n'), ((1558, 1615), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['self.hidden_size'], {'name': '"""bw_lstm"""'}), "(self.hidden_size, name='bw_lstm')\n", (1581, 1615), True, 'import tensorflow as tf\n'), ((1643, 1780), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', (['fw_lstm_cell', 'bw_lstm_cell'], {'inputs': 'chars_vector', 'sequence_length': 'self.seq_lengths', 'dtype': 'tf.float32'}), '(fw_lstm_cell, bw_lstm_cell, inputs=\n chars_vector, sequence_length=self.seq_lengths, dtype=tf.float32)\n', (1674, 1780), True, 'import tensorflow as tf\n'), ((1874, 1934), 'tensorflow.concat', 'tf.concat', (['[fw_output, bw_output]', '(-1)'], {'name': '"""Bi-LSTM-concat"""'}), "([fw_output, bw_output], -1, name='Bi-LSTM-concat')\n", (1883, 1934), True, 'import tensorflow as tf\n'), ((1959, 1994), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['concat', 'self.dropout'], {}), '(concat, self.dropout)\n', (1972, 1994), True, 'import tensorflow as tf\n'), ((2012, 2028), 'tensorflow.shape', 'tf.shape', (['concat'], {}), '(concat)\n', (2020, 2028), True, 'import tensorflow as tf\n'), ((2051, 2103), 'tensorflow.reshape', 'tf.reshape', (['concat'], {'shape': '[-1, 2 * self.hidden_size]'}), '(concat, shape=[-1, 2 * self.hidden_size])\n', (2061, 2103), True, 'import tensorflow as tf\n'), ((2122, 2149), 'tensorflow.name_scope', 'tf.name_scope', (['"""projection"""'], {}), "('projection')\n", (2135, 2149), True, 'import tensorflow as tf\n'), ((2168, 2256), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'dtype': 'tf.float32', 'shape': '[2 * self.hidden_size, self.tag_size]'}), "('W', dtype=tf.float32, shape=[2 * self.hidden_size, self.\n tag_size])\n", (2183, 2256), True, 'import tensorflow as tf\n'), ((2273, 2334), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""'], {'dtype': 'tf.float32', 'shape': '[self.tag_size]'}), "('b', dtype=tf.float32, shape=[self.tag_size])\n", (2288, 2334), True, 'import tensorflow as tf\n'), ((2439, 2488), 'tensorflow.reshape', 'tf.reshape', (['pred'], {'shape': '[-1, s[1], self.tag_size]'}), '(pred, shape=[-1, s[1], self.tag_size])\n', (2449, 2488), True, 'import tensorflow as tf\n'), ((2531, 2552), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (2544, 2552), True, 'import tensorflow as tf\n'), ((2576, 2667), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'self.logit', 'labels': 'self.input_y'}), '(logits=self.logit, labels=\n self.input_y)\n', (2622, 2667), True, 'import tensorflow as tf\n'), ((2687, 2721), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['self.seq_lengths'], {}), '(self.seq_lengths)\n', (2703, 2721), True, 'import tensorflow as tf\n'), ((2744, 2773), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['losses', 'mask'], {}), '(losses, mask)\n', (2759, 2773), True, 'import tensorflow as tf\n'), ((2799, 2821), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), '(losses)\n', (2813, 2821), True, 'import tensorflow as tf\n'), ((2863, 2888), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimize"""'], {}), "('optimize')\n", (2876, 2888), True, 'import tensorflow as tf\n'), ((3030, 3060), 'tensorflow.argmax', 'tf.argmax', (['self.logit'], {'axis': '(-1)'}), '(self.logit, axis=-1)\n', (3039, 3060), True, 'import tensorflow as tf\n'), ((3357, 3390), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3388, 3390), True, 'import tensorflow as tf\n'), ((657, 709), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[vocab_size, embedding_size]'}), '(shape=[vocab_size, embedding_size])\n', (673, 709), True, 'import tensorflow as tf\n'), ((2373, 2393), 'tensorflow.matmul', 'tf.matmul', (['concat', 'W'], {}), '(concat, W)\n', (2382, 2393), True, 'import tensorflow as tf\n'), ((2920, 2951), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (2942, 2951), True, 'import tensorflow as tf\n')]
|
import logging
import os
import sys
from datetime import datetime
import torch
import torch.nn.functional as F
from torch.cuda.amp import GradScaler, autocast
from utils import save_config_file, accuracy, save_checkpoint
from evaluate import Evaluator
root = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
logging.getLogger('matplotlib.font_manager').disabled = True
NORMALIZE_B4_COMPACTNESS = False
class SimCLR(object):
def __init__(self, *args, **kwargs):
self.args = kwargs['args']
self.neptune_run = kwargs['neptune_run']
self.model = kwargs['model'].to(self.args.device)
self.optimizer = kwargs['optimizer']
self.scheduler = kwargs['scheduler']
self.positive_indices = torch.as_tensor(kwargs['positive_indices'])
self.params = kwargs['params']
self.criterion = torch.nn.CrossEntropyLoss().to(self.args.device)
self.compactness_criterion = torch.nn.MSELoss().to(self.args.device)
self.evaluator = Evaluator(self.neptune_run, self.args, self.model, logging, params=self.params)
self.train_labeled_mean = None
def info_nce_loss(self, features):
labels = torch.cat([torch.arange(self.params['batch_size']) for i in range(self.args.n_views)], dim=0)
labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
labels = labels.to(self.args.device)
features = F.normalize(features, dim=1)
similarity_matrix = torch.matmul(features, features.T)
# discard the main diagonal from both: labels and similarities matrix
mask = torch.eye(labels.shape[0], dtype=torch.bool).to(self.args.device)
labels = labels[~mask].view(labels.shape[0], -1)
similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)
# select and combine multiple positives
positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)
# select only the negatives the negatives
negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)
logits = torch.cat([positives, negatives], dim=1)
labels = torch.zeros(logits.shape[0], dtype=torch.long).to(self.args.device)
logits = logits / self.params['temperature']
return logits, labels
def compactness_loss_info(self, features, indices):
if NORMALIZE_B4_COMPACTNESS:
features = F.normalize(features, dim=1)
rel_indices = (indices[:, None] == self.positive_indices[None,:]).any(-1)
rel_features: torch.Tensor = features[torch.cat([rel_indices, rel_indices])].to(self.args.device)
meaned_rel_features = rel_features - self.train_labeled_mean.to(self.args.device)
return meaned_rel_features
def train(self, train_loader, test_loader, train_labeled_loader):
scaler = GradScaler(enabled=self.args.fp16_precision)
n_iter = 0
self.train_labeled_mean = torch.tensor(self.evaluator.evaluate(test_loader, train_labeled_loader))
logging.info(f"Start SimCLR training for {self.params['epochs']} epochs.")
logging.info(f"Training with gpu: {not self.args.disable_cuda}.")
for epoch_counter in range(self.params['epochs']):
print(f"epoch {epoch_counter}")
self.neptune_run['cur_epoch'].log(epoch_counter)
for images, classes, indices in train_loader:
images = torch.cat(images, dim=0)
images = images.to(self.args.device)
with autocast(enabled=self.args.fp16_precision):
features = self.model(images)
logits, labels = self.info_nce_loss(features)
constructive_loss = self.criterion(logits, labels)
zero_mean_labeled = self.compactness_loss_info(features, indices)
compactness_loss = self.params['lambda'] * self.compactness_criterion(zero_mean_labeled,
torch.zeros_like(zero_mean_labeled))
# convert nan loss to 0, when there are no samples to make compact
compactness_loss[compactness_loss != compactness_loss] = 0
self.optimizer.zero_grad()
scaler.scale(constructive_loss + compactness_loss).backward()
scaler.step(self.optimizer)
scaler.update()
if n_iter % self.args.log_every_n_steps == 0:
top1, top5 = accuracy(logits, labels, topk=(1, 5))
self.neptune_run['losses/constructive_loss'].log(constructive_loss)
self.neptune_run['losses/compactness_loss'].log(compactness_loss)
self.neptune_run['losses/total_loss'].log(constructive_loss + compactness_loss)
self.neptune_run['acc/top1'].log(top1[0])
self.neptune_run['acc/top5'].log(top5[0])
self.neptune_run['losses/learning_rate'].log(self.scheduler.get_last_lr()[0])
n_iter += 1
logging.info(f"evaluating on epoch {epoch_counter + 1}")
self.train_labeled_mean = torch.tensor(self.evaluator.evaluate(test_loader, train_labeled_loader))
# warmup for the first 10 epochs
if epoch_counter >= 10:
self.scheduler.step()
if (epoch_counter + 1) % 20 == 0:
# todo: save checkpoint
logging.info("saving checkpoint - to be implemented")
# save_checkpoint({
# 'epoch': self.params['epochs'],
# 'arch': self.args.arch,
# 'state_dict': self.model.state_dict(),
# 'optimizer': self.optimizer.state_dict(),
# }, is_best=False, filename=os.path.join(self.neptune_run.log_dir, checkpoint_name))
logging.debug(f"Epoch: {epoch_counter}\tLoss: {constructive_loss}\tTop1 accuracy: {top1[0]}")
logging.info("Training has finished.")
# save model checkpoints
|
[
"evaluate.Evaluator"
] |
[((260, 279), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (277, 279), False, 'import logging\n'), ((290, 323), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (311, 323), False, 'import logging\n'), ((336, 409), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (353, 409), False, 'import logging\n'), ((467, 511), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib.font_manager"""'], {}), "('matplotlib.font_manager')\n", (484, 511), False, 'import logging\n'), ((890, 933), 'torch.as_tensor', 'torch.as_tensor', (["kwargs['positive_indices']"], {}), "(kwargs['positive_indices'])\n", (905, 933), False, 'import torch\n'), ((1149, 1228), 'evaluate.Evaluator', 'Evaluator', (['self.neptune_run', 'self.args', 'self.model', 'logging'], {'params': 'self.params'}), '(self.neptune_run, self.args, self.model, logging, params=self.params)\n', (1158, 1228), False, 'from evaluate import Evaluator\n'), ((1553, 1581), 'torch.nn.functional.normalize', 'F.normalize', (['features'], {'dim': '(1)'}), '(features, dim=1)\n', (1564, 1581), True, 'import torch.nn.functional as F\n'), ((1610, 1644), 'torch.matmul', 'torch.matmul', (['features', 'features.T'], {}), '(features, features.T)\n', (1622, 1644), False, 'import torch\n'), ((2236, 2276), 'torch.cat', 'torch.cat', (['[positives, negatives]'], {'dim': '(1)'}), '([positives, negatives], dim=1)\n', (2245, 2276), False, 'import torch\n'), ((2993, 3037), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {'enabled': 'self.args.fp16_precision'}), '(enabled=self.args.fp16_precision)\n', (3003, 3037), False, 'from torch.cuda.amp import GradScaler, autocast\n'), ((3172, 3246), 'logging.info', 'logging.info', (['f"""Start SimCLR training for {self.params[\'epochs\']} epochs."""'], {}), '(f"Start SimCLR training for {self.params[\'epochs\']} epochs.")\n', (3184, 3246), False, 'import logging\n'), ((3255, 3320), 'logging.info', 'logging.info', (['f"""Training with gpu: {not self.args.disable_cuda}."""'], {}), "(f'Training with gpu: {not self.args.disable_cuda}.')\n", (3267, 3320), False, 'import logging\n'), ((6168, 6206), 'logging.info', 'logging.info', (['"""Training has finished."""'], {}), "('Training has finished.')\n", (6180, 6206), False, 'import logging\n'), ((2562, 2590), 'torch.nn.functional.normalize', 'F.normalize', (['features'], {'dim': '(1)'}), '(features, dim=1)\n', (2573, 2590), True, 'import torch.nn.functional as F\n'), ((5246, 5302), 'logging.info', 'logging.info', (['f"""evaluating on epoch {epoch_counter + 1}"""'], {}), "(f'evaluating on epoch {epoch_counter + 1}')\n", (5258, 5302), False, 'import logging\n'), ((6065, 6168), 'logging.debug', 'logging.debug', (['f"""Epoch: {epoch_counter}\tLoss: {constructive_loss}\tTop1 accuracy: {top1[0]}"""'], {}), "(\n f'Epoch: {epoch_counter}\\tLoss: {constructive_loss}\\tTop1 accuracy: {top1[0]}'\n )\n", (6078, 6168), False, 'import logging\n'), ((998, 1025), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1023, 1025), False, 'import torch\n'), ((1084, 1102), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (1100, 1102), False, 'import torch\n'), ((1336, 1375), 'torch.arange', 'torch.arange', (["self.params['batch_size']"], {}), "(self.params['batch_size'])\n", (1348, 1375), False, 'import torch\n'), ((1738, 1782), 'torch.eye', 'torch.eye', (['labels.shape[0]'], {'dtype': 'torch.bool'}), '(labels.shape[0], dtype=torch.bool)\n', (1747, 1782), False, 'import torch\n'), ((2294, 2340), 'torch.zeros', 'torch.zeros', (['logits.shape[0]'], {'dtype': 'torch.long'}), '(logits.shape[0], dtype=torch.long)\n', (2305, 2340), False, 'import torch\n'), ((3568, 3592), 'torch.cat', 'torch.cat', (['images'], {'dim': '(0)'}), '(images, dim=0)\n', (3577, 3592), False, 'import torch\n'), ((5636, 5689), 'logging.info', 'logging.info', (['"""saving checkpoint - to be implemented"""'], {}), "('saving checkpoint - to be implemented')\n", (5648, 5689), False, 'import logging\n'), ((2719, 2756), 'torch.cat', 'torch.cat', (['[rel_indices, rel_indices]'], {}), '([rel_indices, rel_indices])\n', (2728, 2756), False, 'import torch\n'), ((3667, 3709), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': 'self.args.fp16_precision'}), '(enabled=self.args.fp16_precision)\n', (3675, 3709), False, 'from torch.cuda.amp import GradScaler, autocast\n'), ((4672, 4709), 'utils.accuracy', 'accuracy', (['logits', 'labels'], {'topk': '(1, 5)'}), '(logits, labels, topk=(1, 5))\n', (4680, 4709), False, 'from utils import save_config_file, accuracy, save_checkpoint\n'), ((4176, 4211), 'torch.zeros_like', 'torch.zeros_like', (['zero_mean_labeled'], {}), '(zero_mean_labeled)\n', (4192, 4211), False, 'import torch\n')]
|
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from dataset import HANDataset
import torch
import torch.nn as nn
import time
import numpy as np
from config import Config
from tqdm import tqdm
import os
from pathlib import Path
from evaluate import evaluate
import datetime
from model.HAN import HAN
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class EarlyStopping:
def __init__(self, patience=4):
self.patience = patience
self.counter = 0
self.best_loss = np.Inf
def __call__(self, val_loss):
if val_loss < self.best_loss:
early_stop = False
get_better = True
self.counter = 0
self.best_loss = val_loss
else:
get_better = False
self.counter += 1
if self.counter >= self.patience:
early_stop = True
else:
early_stop = False
return early_stop, get_better
def latest_checkpoint(directory):
if not os.path.exists(directory):
return None
all_checkpoints = {
int(x.split('.')[-2].split('-')[-1]): x
for x in os.listdir(directory)
}
if not all_checkpoints:
return None
return os.path.join(directory,
all_checkpoints[max(all_checkpoints.keys())])
def train():
writer = SummaryWriter(
log_dir=f"./runs/{datetime.datetime.now().replace(microsecond=0).isoformat()}{'-' + os.environ['REMARK'] if 'REMARK' in os.environ else ''}")
if not os.path.exists('checkpoint'):
os.makedirs('checkpoint')
try:
pretrained_word_embedding = torch.from_numpy(
np.load('./data/train/pretrained_word_embedding.npy')).float()
except FileNotFoundError:
pretrained_word_embedding = None
model = HAN(Config, pretrained_word_embedding).to(device)
print(model)
dataset = HANDataset('data/train/news_parsed.tsv')
validation_size = int(Config.validation_proportion * len(dataset))
train_size = len(dataset) - validation_size
train_dataset, val_dataset = random_split(dataset,
(train_size, validation_size))
print(
f"Load training dataset with train size {len(train_dataset)} and validation size {len(val_dataset)}."
)
train_dataloader = iter(
DataLoader(train_dataset,
batch_size=Config.batch_size,
shuffle=True,
num_workers=Config.num_workers,
drop_last=True))
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=Config.learning_rate)
start_time = time.time()
loss_full = []
exhaustion_count = 0
step = 0
Path('./checkpoint').mkdir(exist_ok=True)
if Config.load_checkpoint:
checkpoint_path = latest_checkpoint('./checkpoint')
if checkpoint_path is not None:
print(f"Load saved parameters in {checkpoint_path}")
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
step = checkpoint['step']
model.train()
early_stopping = EarlyStopping()
with tqdm(total=Config.num_batches, desc="Training") as pbar:
for i in range(1, Config.num_batches + 1):
try:
minibatch = next(train_dataloader)
except StopIteration:
exhaustion_count += 1
tqdm.write(
f"Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset."
)
train_dataloader = iter(
DataLoader(train_dataset,
batch_size=Config.batch_size,
shuffle=True,
num_workers=Config.num_workers,
drop_last=True))
minibatch = next(train_dataloader)
step += 1
# batch_size, num_(sub)categories
y_pred = model(minibatch)
# batch_size
y = minibatch[Config.target]
loss = criterion(y_pred, y.to(device))
loss_full.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('Train/Loss', loss.item(), step)
if i % Config.num_batches_show_loss == 0:
tqdm.write(
f"Time {time_since(start_time)}, batches {i}, current loss {loss.item():.4f}, average loss: {np.mean(loss_full):.4f}"
)
if i % Config.num_batches_validate == 0:
model.eval()
val_loss, val_report = evaluate(model, val_dataset)
model.train()
precision = val_report['weighted avg']['precision']
recall = val_report['weighted avg']['recall']
f1 = val_report['weighted avg']['f1-score']
writer.add_scalar('Validation/loss', val_loss, step)
writer.add_scalar('Validation/precision', precision, step)
writer.add_scalar('Validation/recall', recall, step)
writer.add_scalar('Validation/F1', f1, step)
tqdm.write(
f"Time {time_since(start_time)}, batches {i}, validation loss: {val_loss:.4f}, validation precision: {precision:.4f}, validation recall: {recall:.4f}, validation F1: {f1:.4f}"
)
early_stop, get_better = early_stopping(val_loss)
if early_stop:
tqdm.write('Early stop.')
break
elif get_better:
torch.save(
{
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'step': step
}, f"./checkpoint/ckpt-{step}.pth")
pbar.update(1)
def time_since(since):
"""
Format elapsed time string.
"""
now = time.time()
elapsed_time = now - since
return time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
if __name__ == '__main__':
print('Using device:', device)
# torch.manual_seed(0)
train()
|
[
"evaluate.evaluate"
] |
[((1965, 2005), 'dataset.HANDataset', 'HANDataset', (['"""data/train/news_parsed.tsv"""'], {}), "('data/train/news_parsed.tsv')\n", (1975, 2005), False, 'from dataset import HANDataset\n'), ((2159, 2211), 'torch.utils.data.random_split', 'random_split', (['dataset', '(train_size, validation_size)'], {}), '(dataset, (train_size, validation_size))\n', (2171, 2211), False, 'from torch.utils.data import DataLoader, random_split\n'), ((2635, 2656), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2654, 2656), True, 'import torch.nn as nn\n'), ((2752, 2763), 'time.time', 'time.time', ([], {}), '()\n', (2761, 2763), False, 'import time\n'), ((6296, 6307), 'time.time', 'time.time', ([], {}), '()\n', (6305, 6307), False, 'import time\n'), ((391, 416), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (414, 416), False, 'import torch\n'), ((1073, 1098), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1087, 1098), False, 'import os\n'), ((1595, 1623), 'os.path.exists', 'os.path.exists', (['"""checkpoint"""'], {}), "('checkpoint')\n", (1609, 1623), False, 'import os\n'), ((1633, 1658), 'os.makedirs', 'os.makedirs', (['"""checkpoint"""'], {}), "('checkpoint')\n", (1644, 1658), False, 'import os\n'), ((2423, 2544), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'Config.batch_size', 'shuffle': '(True)', 'num_workers': 'Config.num_workers', 'drop_last': '(True)'}), '(train_dataset, batch_size=Config.batch_size, shuffle=True,\n num_workers=Config.num_workers, drop_last=True)\n', (2433, 2544), False, 'from torch.utils.data import DataLoader, random_split\n'), ((3369, 3416), 'tqdm.tqdm', 'tqdm', ([], {'total': 'Config.num_batches', 'desc': '"""Training"""'}), "(total=Config.num_batches, desc='Training')\n", (3373, 3416), False, 'from tqdm import tqdm\n'), ((6376, 6401), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (6387, 6401), False, 'import time\n'), ((1209, 1230), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1219, 1230), False, 'import os\n'), ((1882, 1920), 'model.HAN.HAN', 'HAN', (['Config', 'pretrained_word_embedding'], {}), '(Config, pretrained_word_embedding)\n', (1885, 1920), False, 'from model.HAN import HAN\n'), ((2826, 2846), 'pathlib.Path', 'Path', (['"""./checkpoint"""'], {}), "('./checkpoint')\n", (2830, 2846), False, 'from pathlib import Path\n'), ((3089, 3116), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (3099, 3116), False, 'import torch\n'), ((4914, 4942), 'evaluate.evaluate', 'evaluate', (['model', 'val_dataset'], {}), '(model, val_dataset)\n', (4922, 4942), False, 'from evaluate import evaluate\n'), ((1735, 1788), 'numpy.load', 'np.load', (['"""./data/train/pretrained_word_embedding.npy"""'], {}), "('./data/train/pretrained_word_embedding.npy')\n", (1742, 1788), True, 'import numpy as np\n'), ((3633, 3748), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset."""'], {}), "(\n f'Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset.'\n )\n", (3643, 3748), False, 'from tqdm import tqdm\n'), ((5797, 5822), 'tqdm.tqdm.write', 'tqdm.write', (['"""Early stop."""'], {}), "('Early stop.')\n", (5807, 5822), False, 'from tqdm import tqdm\n'), ((3838, 3959), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'Config.batch_size', 'shuffle': '(True)', 'num_workers': 'Config.num_workers', 'drop_last': '(True)'}), '(train_dataset, batch_size=Config.batch_size, shuffle=True,\n num_workers=Config.num_workers, drop_last=True)\n', (3848, 3959), False, 'from torch.utils.data import DataLoader, random_split\n'), ((4749, 4767), 'numpy.mean', 'np.mean', (['loss_full'], {}), '(loss_full)\n', (4756, 4767), True, 'import numpy as np\n'), ((1459, 1482), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1480, 1482), False, 'import datetime\n')]
|
import numpy as np
import tensorflow as tf
from keras import initializers
from keras.models import Model
from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from keras import backend as K
from evaluate import evaluate_model
from Dataset import Dataset
from time import time
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Run DMF.")
parser.add_argument('--path', nargs='?', default='Data/',
help='Input data path.')
parser.add_argument('--dataset', nargs='?', default='ml-1m',
help='Choose a dataset.')
parser.add_argument('--epochs', type=int, default=20,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=256,
help='Batch size.')
parser.add_argument('--userlayers', nargs='?', default='[512, 64]',
help="Size of each user layer")
parser.add_argument('--itemlayers', nargs='?', default='[1024, 64]',
help="Size of each item layer")
parser.add_argument('--num_neg', type=int, default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument('--lr', type=float, default=0.0001,
help='Learning rate.')
parser.add_argument('--learner', nargs='?', default='adam',
help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
parser.add_argument('--verbose', type=int, default=1,
help='Show performance per X iterations')
parser.add_argument('--out', type=int, default=1,
help='Whether to save the trained model.')
return parser.parse_args()
def get_model(train, num_users, num_items, userlayers=[512, 64], itemlayers=[1024, 64]):
num_layer = len(userlayers) # Number of layers in the MLP
user_matrix = K.constant(getTrainMatrix(train))
item_matrix = K.constant(getTrainMatrix(train).T)
# Input variables
user = Input(shape=(1,), dtype='int32', name='user_input')
item = Input(shape=(1,), dtype='int32', name='item_input')
# Multi-hot User representation and Item representation
user_input = Lambda(lambda x: tf.gather(user_matrix, tf.to_int32(x)))(user)
item_input = Lambda(lambda x: tf.gather(item_matrix, tf.to_int32(x)))(item)
user_input = Reshape((num_items, ))(user_input)
item_input = Reshape((num_users, ))(item_input)
print(user_input.shape, item_input.shape)
# DMF part
userlayer = Dense(userlayers[0], activation="linear" , name='user_layer0')
itemlayer = Dense(itemlayers[0], activation="linear" , name='item_layer0')
user_latent_vector = userlayer(user_input)
item_latent_vector = itemlayer(item_input)
print(user_latent_vector.shape, item_latent_vector.shape)
for idx in range(1, num_layer):
userlayer = Dense(userlayers[idx], activation='relu', name='user_layer%d' % idx)
itemlayer = Dense(itemlayers[idx], activation='relu', name='item_layer%d' % idx)
user_latent_vector = userlayer(user_latent_vector)
item_latent_vector = itemlayer(item_latent_vector)
print(user_latent_vector.shape, item_latent_vector.shape)
predict_vector = multiply([user_latent_vector, item_latent_vector])
prediction = Dense(1, activation='sigmoid',
kernel_initializer=initializers.lecun_normal(), name='prediction')(predict_vector)
print(prediction.shape)
model_ = Model(inputs=[user, item],
outputs=prediction)
return model_
def getTrainMatrix(train):
num_users, num_items = train.shape
train_matrix = np.zeros([num_users, num_items], dtype=np.int32)
for (u, i) in train.keys():
train_matrix[u][i] = 1
return train_matrix
def get_train_instances(train, num_negatives):
user_input, item_input, labels = [], [], []
num_users = train.shape[0]
for (u, i) in train.keys():
# positive instance
user_input.append(u)
item_input.append(i)
labels.append(1)
# negative instances
for t in range(num_negatives):
j = np.random.randint(num_items)
while (u, j) in train.keys():
j = np.random.randint(num_items)
user_input.append(u)
item_input.append(j)
labels.append(0)
return user_input, item_input, labels
if __name__ == '__main__':
args = parse_args()
path = args.path
dataset = args.dataset
userlayers = eval(args.userlayers)
itemlayers = eval(args.itemlayers)
num_negatives = args.num_neg
learner = args.learner
learning_rate = args.lr
batch_size = args.batch_size
epochs = args.epochs
verbose = args.verbose
topK = 10
evaluation_threads = 1 # mp.cpu_count()
print("DMF arguments: %s " %(args))
model_out_file = 'Pretrain/%s_DMF_%d.h5' %(args.dataset, time())
# Loading data
t1 = time()
dataset = Dataset(args.path + args.dataset)
train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives
num_users, num_items = train.shape
print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d"
% (time()-t1, num_users, num_items, train.nnz, len(testRatings)))
# Build model
model = get_model(train, num_users, num_items, userlayers, itemlayers)
if learner.lower() == "adagrad":
model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "rmsprop":
model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "adam":
model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')
else:
model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')
# Check Init performance
t1 = time()
(hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
print('Init: HR = %.4f, NDCG = %.4f [%.1f]' % (hr, ndcg, time()-t1))
best_hr, best_ndcg, best_iter = hr, ndcg, -1
# Train model
for epoch in range(epochs):
t1 = time()
# Generate training instances
user_input, item_input, labels = get_train_instances(train, num_negatives)
hist = model.fit([np.array(user_input), np.array(item_input)], # input
np.array(labels), # labels
batch_size=batch_size, epochs=1, verbose=0, shuffle=True)
t2 = time()
# Evaluation
if epoch % verbose == 0:
(hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
hr, ndcg, loss = np.array(hits).mean(), np.array(ndcgs).mean(), hist.history['loss'][0]
print('Iteration %d [%.1f s]: HR = %.4f, NDCG = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2-t1, hr, ndcg, loss, time()-t2))
if hr > best_hr:
best_hr, best_ndcg, best_iter = hr, ndcg, epoch
if args.out > 0:
model.save_weights(model_out_file, overwrite=True)
print("End. Best Iteration %d: HR = %.4f, NDCG = %.4f. " %(best_iter, best_hr, best_ndcg))
if args.out > 0:
print("The best DMF model is saved to %s" % model_out_file)
|
[
"evaluate.evaluate_model"
] |
[((437, 484), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run DMF."""'}), "(description='Run DMF.')\n", (460, 484), False, 'import argparse\n'), ((2159, 2210), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int32"""', 'name': '"""user_input"""'}), "(shape=(1,), dtype='int32', name='user_input')\n", (2164, 2210), False, 'from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge\n'), ((2222, 2273), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int32"""', 'name': '"""item_input"""'}), "(shape=(1,), dtype='int32', name='item_input')\n", (2227, 2273), False, 'from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge\n'), ((2681, 2742), 'keras.layers.Dense', 'Dense', (['userlayers[0]'], {'activation': '"""linear"""', 'name': '"""user_layer0"""'}), "(userlayers[0], activation='linear', name='user_layer0')\n", (2686, 2742), False, 'from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge\n'), ((2760, 2821), 'keras.layers.Dense', 'Dense', (['itemlayers[0]'], {'activation': '"""linear"""', 'name': '"""item_layer0"""'}), "(itemlayers[0], activation='linear', name='item_layer0')\n", (2765, 2821), False, 'from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge\n'), ((3399, 3449), 'keras.layers.multiply', 'multiply', (['[user_latent_vector, item_latent_vector]'], {}), '([user_latent_vector, item_latent_vector])\n', (3407, 3449), False, 'from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge\n'), ((3647, 3693), 'keras.models.Model', 'Model', ([], {'inputs': '[user, item]', 'outputs': 'prediction'}), '(inputs=[user, item], outputs=prediction)\n', (3652, 3693), False, 'from keras.models import Model\n'), ((3822, 3870), 'numpy.zeros', 'np.zeros', (['[num_users, num_items]'], {'dtype': 'np.int32'}), '([num_users, num_items], dtype=np.int32)\n', (3830, 3870), True, 'import numpy as np\n'), ((5127, 5133), 'time.time', 'time', ([], {}), '()\n', (5131, 5133), False, 'from time import time\n'), ((5148, 5181), 'Dataset.Dataset', 'Dataset', (['(args.path + args.dataset)'], {}), '(args.path + args.dataset)\n', (5155, 5181), False, 'from Dataset import Dataset\n'), ((6089, 6095), 'time.time', 'time', ([], {}), '()\n', (6093, 6095), False, 'from time import time\n'), ((6116, 6191), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'testRatings', 'testNegatives', 'topK', 'evaluation_threads'], {}), '(model, testRatings, testNegatives, topK, evaluation_threads)\n', (6130, 6191), False, 'from evaluate import evaluate_model\n'), ((2512, 2533), 'keras.layers.Reshape', 'Reshape', (['(num_items,)'], {}), '((num_items,))\n', (2519, 2533), False, 'from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge\n'), ((2564, 2585), 'keras.layers.Reshape', 'Reshape', (['(num_users,)'], {}), '((num_users,))\n', (2571, 2585), False, 'from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge\n'), ((3035, 3103), 'keras.layers.Dense', 'Dense', (['userlayers[idx]'], {'activation': '"""relu"""', 'name': "('user_layer%d' % idx)"}), "(userlayers[idx], activation='relu', name='user_layer%d' % idx)\n", (3040, 3103), False, 'from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge\n'), ((3124, 3192), 'keras.layers.Dense', 'Dense', (['itemlayers[idx]'], {'activation': '"""relu"""', 'name': "('item_layer%d' % idx)"}), "(itemlayers[idx], activation='relu', name='item_layer%d' % idx)\n", (3129, 3192), False, 'from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge\n'), ((6443, 6449), 'time.time', 'time', ([], {}), '()\n', (6447, 6449), False, 'from time import time\n'), ((6801, 6807), 'time.time', 'time', ([], {}), '()\n', (6805, 6807), False, 'from time import time\n'), ((4312, 4340), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (4329, 4340), True, 'import numpy as np\n'), ((5086, 5092), 'time.time', 'time', ([], {}), '()\n', (5090, 5092), False, 'from time import time\n'), ((6677, 6693), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (6685, 6693), True, 'import numpy as np\n'), ((6891, 6966), 'evaluate.evaluate_model', 'evaluate_model', (['model', 'testRatings', 'testNegatives', 'topK', 'evaluation_threads'], {}), '(model, testRatings, testNegatives, topK, evaluation_threads)\n', (6905, 6966), False, 'from evaluate import evaluate_model\n'), ((3540, 3567), 'keras.initializers.lecun_normal', 'initializers.lecun_normal', ([], {}), '()\n', (3565, 3567), False, 'from keras import initializers\n'), ((4403, 4431), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (4420, 4431), True, 'import numpy as np\n'), ((5648, 5673), 'keras.optimizers.Adagrad', 'Adagrad', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (5655, 5673), False, 'from keras.optimizers import Adagrad, Adam, SGD, RMSprop\n'), ((6207, 6221), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (6215, 6221), True, 'import numpy as np\n'), ((6230, 6245), 'numpy.array', 'np.array', (['ndcgs'], {}), '(ndcgs)\n', (6238, 6245), True, 'import numpy as np\n'), ((6598, 6618), 'numpy.array', 'np.array', (['user_input'], {}), '(user_input)\n', (6606, 6618), True, 'import numpy as np\n'), ((6620, 6640), 'numpy.array', 'np.array', (['item_input'], {}), '(item_input)\n', (6628, 6640), True, 'import numpy as np\n'), ((2392, 2406), 'tensorflow.to_int32', 'tf.to_int32', (['x'], {}), '(x)\n', (2403, 2406), True, 'import tensorflow as tf\n'), ((2472, 2486), 'tensorflow.to_int32', 'tf.to_int32', (['x'], {}), '(x)\n', (2483, 2486), True, 'import tensorflow as tf\n'), ((5417, 5423), 'time.time', 'time', ([], {}), '()\n', (5421, 5423), False, 'from time import time\n'), ((5774, 5799), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (5781, 5799), False, 'from keras.optimizers import Adagrad, Adam, SGD, RMSprop\n'), ((6314, 6320), 'time.time', 'time', ([], {}), '()\n', (6318, 6320), False, 'from time import time\n'), ((5897, 5919), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (5901, 5919), False, 'from keras.optimizers import Adagrad, Adam, SGD, RMSprop\n'), ((5991, 6012), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (5994, 6012), False, 'from keras.optimizers import Adagrad, Adam, SGD, RMSprop\n'), ((6996, 7010), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (7004, 7010), True, 'import numpy as np\n'), ((7019, 7034), 'numpy.array', 'np.array', (['ndcgs'], {}), '(ndcgs)\n', (7027, 7034), True, 'import numpy as np\n'), ((7208, 7214), 'time.time', 'time', ([], {}), '()\n', (7212, 7214), False, 'from time import time\n')]
|
from i3Deep import utils
import os
from evaluate import evaluate
import numpy as np
# from imcut.pycut import ImageGraphCut
from imcut.pycut import ImageGraphCut
from tqdm import tqdm
import copy
def compute_predictions(image_path, mask_path, gt_path, save_path, version, nr_modalities, class_labels, resize=True):
image_filenames = utils.load_filenames(image_path)[::nr_modalities]
mask_filenames = utils.load_filenames(mask_path)
target_shape = (512, 512, 200) # (256, 256, 100)
# for i in tqdm(range(len(image_filenames))):
# multi_label_mask, _, _, _ = utils.load_nifty(mask_filenames[i])
# print("shape: ", multi_label_mask.shape)
segparams = {
'use_boundary_penalties': False,
'boundary_dilatation_distance': 1,
'boundary_penalties_weight': 1,
'block_size': 8, # 8
'tile_zoom_constant': 1
}
is_resized = False
for i in tqdm(range(len(image_filenames))):
image, affine, spacing, header = utils.load_nifty(image_filenames[i])
multi_label_mask, _, _, _ = utils.load_nifty(mask_filenames[i])
if resize and image.size > np.prod(target_shape):
print("Resized: ", os.path.basename(image_filenames[i]))
is_resized = True
original_shape = image.shape
image = utils.interpolate(image, (target_shape[0], target_shape[1], original_shape[2]))
multi_label_mask = utils.interpolate(multi_label_mask, (target_shape[0], target_shape[1], original_shape[2]), mask=True)
target_multi_label_mask = np.zeros_like(multi_label_mask)
labels = np.unique(multi_label_mask)
labels = labels[labels > 0].astype(int)
# print("labels: ", labels)
for label in labels:
# print("label: ", label)
mask = copy.deepcopy(multi_label_mask)
mask[mask == label] = -2 # Save foreground
mask[mask >= 0] = 2 # Background
mask[mask == -2] = 1 # Restore foreground
mask[mask == -1] = 0 # Unknown
# utils.save_nifty(save_path + os.path.basename(mask_filenames[i][:-12] + "_tmp1.nii.gz"), mask, affine, spacing, header, is_mask=True)
mask = mask.astype(np.uint8)
if version == "GraphCut1":
segparams.update({"method": "graphcut"})
gc = ImageGraphCut(image, segparams=segparams)
elif version == "GraphCut2":
segparams.update({"method": "lo2hi"})
gc = ImageGraphCut(image, segparams=segparams)
elif version == "GraphCut3":
segparams.update({"method": "hi2lo"})
gc = ImageGraphCut(image, segparams=segparams)
gc.set_seeds(mask)
gc.run()
mask = gc.segmentation.squeeze()
# mask[mask == 0] = -1 # Save foreground
# mask[mask == 1] = 0 # Background
# mask[mask == -1] = label # Restore foreground
# print(save_path + os.path.basename(mask_filenames[i][:-12] + "_tmp2.nii.gz"))
# utils.save_nifty(save_path + os.path.basename(mask_filenames[i][:-12] + "_tmp2.nii.gz"), mask, affine, spacing, header, is_mask=True)
target_multi_label_mask[mask == 0] = label # 0 is foreground
if is_resized:
is_resized = False
target_multi_label_mask = utils.interpolate(target_multi_label_mask, original_shape, mask=True)
utils.save_nifty(save_path + os.path.basename(mask_filenames[i][:-12] + ".nii.gz"), target_multi_label_mask, affine, spacing, header, is_mask=True)
results = evaluate(gt_path, save_path, class_labels)
return results
|
[
"evaluate.evaluate"
] |
[((420, 451), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['mask_path'], {}), '(mask_path)\n', (440, 451), False, 'from i3Deep import utils\n'), ((3690, 3732), 'evaluate.evaluate', 'evaluate', (['gt_path', 'save_path', 'class_labels'], {}), '(gt_path, save_path, class_labels)\n', (3698, 3732), False, 'from evaluate import evaluate\n'), ((348, 380), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['image_path'], {}), '(image_path)\n', (368, 380), False, 'from i3Deep import utils\n'), ((1023, 1059), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (1039, 1059), False, 'from i3Deep import utils\n'), ((1097, 1132), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['mask_filenames[i]'], {}), '(mask_filenames[i])\n', (1113, 1132), False, 'from i3Deep import utils\n'), ((1605, 1636), 'numpy.zeros_like', 'np.zeros_like', (['multi_label_mask'], {}), '(multi_label_mask)\n', (1618, 1636), True, 'import numpy as np\n'), ((1655, 1682), 'numpy.unique', 'np.unique', (['multi_label_mask'], {}), '(multi_label_mask)\n', (1664, 1682), True, 'import numpy as np\n'), ((1356, 1435), 'i3Deep.utils.interpolate', 'utils.interpolate', (['image', '(target_shape[0], target_shape[1], original_shape[2])'], {}), '(image, (target_shape[0], target_shape[1], original_shape[2]))\n', (1373, 1435), False, 'from i3Deep import utils\n'), ((1468, 1573), 'i3Deep.utils.interpolate', 'utils.interpolate', (['multi_label_mask', '(target_shape[0], target_shape[1], original_shape[2])'], {'mask': '(True)'}), '(multi_label_mask, (target_shape[0], target_shape[1],\n original_shape[2]), mask=True)\n', (1485, 1573), False, 'from i3Deep import utils\n'), ((1858, 1889), 'copy.deepcopy', 'copy.deepcopy', (['multi_label_mask'], {}), '(multi_label_mask)\n', (1871, 1889), False, 'import copy\n'), ((3448, 3517), 'i3Deep.utils.interpolate', 'utils.interpolate', (['target_multi_label_mask', 'original_shape'], {'mask': '(True)'}), '(target_multi_label_mask, original_shape, mask=True)\n', (3465, 3517), False, 'from i3Deep import utils\n'), ((1169, 1190), 'numpy.prod', 'np.prod', (['target_shape'], {}), '(target_shape)\n', (1176, 1190), True, 'import numpy as np\n'), ((1224, 1260), 'os.path.basename', 'os.path.basename', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (1240, 1260), False, 'import os\n'), ((2406, 2447), 'imcut.pycut.ImageGraphCut', 'ImageGraphCut', (['image'], {'segparams': 'segparams'}), '(image, segparams=segparams)\n', (2419, 2447), False, 'from imcut.pycut import ImageGraphCut\n'), ((3556, 3609), 'os.path.basename', 'os.path.basename', (["(mask_filenames[i][:-12] + '.nii.gz')"], {}), "(mask_filenames[i][:-12] + '.nii.gz')\n", (3572, 3609), False, 'import os\n'), ((2567, 2608), 'imcut.pycut.ImageGraphCut', 'ImageGraphCut', (['image'], {'segparams': 'segparams'}), '(image, segparams=segparams)\n', (2580, 2608), False, 'from imcut.pycut import ImageGraphCut\n'), ((2728, 2769), 'imcut.pycut.ImageGraphCut', 'ImageGraphCut', (['image'], {'segparams': 'segparams'}), '(image, segparams=segparams)\n', (2741, 2769), False, 'from imcut.pycut import ImageGraphCut\n')]
|
from .lstm import BiLSTM
import tensorflow as tf
from tensorflow.contrib import crf
from .util import get_batches
import sys
sys.path.append('../')
from evaluate import Metrics
class BiLSTM_CRF(object):
def __init__(self, vocab_size, tag_size, batch_size = 64, lr = 0.001, iteration = 30, hidden_size = 128, embedding_size = 128):
tf.reset_default_graph()
self.bilstm = BiLSTM(vocab_size, tag_size, batch_size, lr, iteration, hidden_size, embedding_size)
def CRF_layer(self):
self.logit = self.bilstm.logit
with tf.name_scope('crf'):
log_likelihood_, self.transition = crf.crf_log_likelihood(self.logit, self.bilstm.input_y, self.bilstm.seq_lengths)
self.cost = -tf.reduce_mean(log_likelihood_)
def optimize(self):
with tf.name_scope('crf_optimize'):
self.optimizer = tf.train.AdamOptimizer(self.bilstm.lr).minimize(self.cost)
def train(self, train_x, train_y, dev_x, dev_y, word2id, tag2id, dropout):
self.bilstm.add_placeholder()
self.bilstm.operation()
self.CRF_layer()
self.optimize()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
x, y, seqs = get_batches(train_x, train_y, word2id, tag2id, self.bilstm.batch_size)
for i in range(self.bilstm.iteration):
for j in range(len(x)):
_, loss = self.sess.run([self.optimizer, self.cost], feed_dict = {self.bilstm.input_x:x[j], self.bilstm.input_y:y[j], self.bilstm.seq_lengths:seqs[j], self.bilstm.dropout:dropout})
#self.dev_test(dev_x, dev_y, word2id, tag2id)
def pred_labels(self, x, y, seqs):
scores, transition_matrix = self.sess.run([self.logit, self.transition], feed_dict = {self.bilstm.input_x:x, self.bilstm.input_y:y, self.bilstm.seq_lengths:seqs, self.bilstm.dropout:1.0})
labels = []
for i in range(scores.shape[0]):
label, _ = crf.viterbi_decode(scores[i], transition_params = transition_matrix)
labels.append(label)
return labels
def dev_test(self, dev_x, dev_y, word2id, tag2id):
batches_x, batches_y, batches_seq_len = get_batches(dev_x, dev_y, word2id, tag2id, self.bilstm.batch_size)
pred_lists = []
labels = []
id2tag = dict((id_, tag) for tag, id_ in tag2id.items())
for i in range(len(batches_x)):
pred_labels = self.pred_labels(batches_x[i], batches_y[i], batches_seq_len[i])
for j in range(len(pred_labels)):
for k in range(batches_seq_len[i][j]):
pred_lists.append(id2tag[pred_labels[j][k]])
labels.append(id2tag[batches_y[i][j][k]])
metrics = Metrics(labels, pred_lists)
metrics.report_scores()
def close_sess(self):
self.sess.close()
|
[
"evaluate.Metrics"
] |
[((130, 152), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (145, 152), False, 'import sys\n'), ((356, 380), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (378, 380), True, 'import tensorflow as tf\n'), ((1171, 1183), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1181, 1183), True, 'import tensorflow as tf\n'), ((2822, 2849), 'evaluate.Metrics', 'Metrics', (['labels', 'pred_lists'], {}), '(labels, pred_lists)\n', (2829, 2849), False, 'from evaluate import Metrics\n'), ((573, 593), 'tensorflow.name_scope', 'tf.name_scope', (['"""crf"""'], {}), "('crf')\n", (586, 593), True, 'import tensorflow as tf\n'), ((643, 728), 'tensorflow.contrib.crf.crf_log_likelihood', 'crf.crf_log_likelihood', (['self.logit', 'self.bilstm.input_y', 'self.bilstm.seq_lengths'], {}), '(self.logit, self.bilstm.input_y, self.bilstm.seq_lengths\n )\n', (665, 728), False, 'from tensorflow.contrib import crf\n'), ((823, 852), 'tensorflow.name_scope', 'tf.name_scope', (['"""crf_optimize"""'], {}), "('crf_optimize')\n", (836, 852), True, 'import tensorflow as tf\n'), ((1207, 1240), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1238, 1240), True, 'import tensorflow as tf\n'), ((2022, 2088), 'tensorflow.contrib.crf.viterbi_decode', 'crf.viterbi_decode', (['scores[i]'], {'transition_params': 'transition_matrix'}), '(scores[i], transition_params=transition_matrix)\n', (2040, 2088), False, 'from tensorflow.contrib import crf\n'), ((750, 781), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['log_likelihood_'], {}), '(log_likelihood_)\n', (764, 781), True, 'import tensorflow as tf\n'), ((884, 922), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.bilstm.lr'], {}), '(self.bilstm.lr)\n', (906, 922), True, 'import tensorflow as tf\n')]
|
import cv2
import torch
from network.rtpose_vgg import get_model, use_vgg
from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat
from openpose_utils import get_pose
from network.post import decode_pose
import argparse
import os
from pathlib import Path
parser = argparse.ArgumentParser(description='Generate Poses')
parser.add_argument('--train_path', help="path to train set frames")
parser.add_argument('--test_path', help="path to test set frames")
args = parser.parse_args()
train = Path(args.train_path)
test = Path(args.test_path)
if not os.path.exists(train.joinpath('train_label')):
os.makedirs(train.joinpath('train_label'))
if not os.path.exists(test.joinpath('test_label')):
os.makedirs(test.joinpath('test_label'))
if __name__ == '__main__':
train_pose_dir = train.joinpath('train_label')
test_pose_dir = test.joinpath('test_label')
model = get_model(trunk='vgg19')
model_path = 'pose_model_scratch.pth'
model = torch.nn.DataParallel(model).cuda()
model.load_state_dict(torch.load(model_path))
model.eval()
for idx in range(200, 210):
train_img_path = train.joinpath('train_set')
train_img_name = "image%0d.jpg" % idx
train_img_path = train_img_path.joinpath(train_img_name)
train_image = cv2.resize( cv2.imread(str(train_img_path)), (512, 512))
train_multiplier = get_multiplier(train_image)
test_img_path = test.joinpath('test_set')
test_img_name = "image%0d.jpg" % idx
test_img_path = test_img_path.joinpath(test_img_name)
test_image = cv2.resize( cv2.imread(str(test_img_path)), (512, 512))
test_multiplier = get_multiplier(test_image)
with torch.no_grad():
train_paf, train_heatmap = get_outputs(train_multiplier, train_image, model, 'rtpose')
test_paf, test_heatmap = get_outputs(test_multiplier, test_image, model, 'rtpose')
# use [::-1] to reverse!
train_swapped_img = train_image[:, ::-1, :]
test_swapped_img = test_image[:, ::-1, :]
train_flipped_paf, train_flipped_heat = get_outputs(train_multiplier, train_swapped_img, model, 'rtpose')
test_flipped_paf, test_flipped_heat = get_outputs(test_multiplier, test_swapped_img, model, 'rtpose')
train_paf, train_heatmap = handle_paf_and_heat(train_heatmap, train_flipped_heat, train_paf, train_flipped_paf)
test_paf, test_heatmap = handle_paf_and_heat(test_heatmap, test_flipped_heat, test_paf, test_flipped_paf)
param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
train_pose = get_pose(param, train_heatmap, train_paf)
test_pose = get_pose(param, test_heatmap, test_paf)
pose_name = "pose%0d.jpg" % idx
cv2.imwrite(str(test_pose_dir.joinpath(pose_name)), test_pose)
cv2.imwrite(str(train_pose_dir.joinpath(pose_name)), train_pose)
|
[
"evaluate.coco_eval.handle_paf_and_heat",
"evaluate.coco_eval.get_outputs",
"evaluate.coco_eval.get_multiplier"
] |
[((288, 341), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate Poses"""'}), "(description='Generate Poses')\n", (311, 341), False, 'import argparse\n'), ((514, 535), 'pathlib.Path', 'Path', (['args.train_path'], {}), '(args.train_path)\n', (518, 535), False, 'from pathlib import Path\n'), ((543, 563), 'pathlib.Path', 'Path', (['args.test_path'], {}), '(args.test_path)\n', (547, 563), False, 'from pathlib import Path\n'), ((904, 928), 'network.rtpose_vgg.get_model', 'get_model', ([], {'trunk': '"""vgg19"""'}), "(trunk='vgg19')\n", (913, 928), False, 'from network.rtpose_vgg import get_model, use_vgg\n'), ((1045, 1067), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (1055, 1067), False, 'import torch\n'), ((1389, 1416), 'evaluate.coco_eval.get_multiplier', 'get_multiplier', (['train_image'], {}), '(train_image)\n', (1403, 1416), False, 'from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat\n'), ((1678, 1704), 'evaluate.coco_eval.get_multiplier', 'get_multiplier', (['test_image'], {}), '(test_image)\n', (1692, 1704), False, 'from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat\n'), ((2638, 2679), 'openpose_utils.get_pose', 'get_pose', (['param', 'train_heatmap', 'train_paf'], {}), '(param, train_heatmap, train_paf)\n', (2646, 2679), False, 'from openpose_utils import get_pose\n'), ((2700, 2739), 'openpose_utils.get_pose', 'get_pose', (['param', 'test_heatmap', 'test_paf'], {}), '(param, test_heatmap, test_paf)\n', (2708, 2739), False, 'from openpose_utils import get_pose\n'), ((983, 1011), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (1004, 1011), False, 'import torch\n'), ((1719, 1734), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1732, 1734), False, 'import torch\n'), ((1775, 1834), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['train_multiplier', 'train_image', 'model', '"""rtpose"""'], {}), "(train_multiplier, train_image, model, 'rtpose')\n", (1786, 1834), False, 'from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat\n'), ((1872, 1929), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['test_multiplier', 'test_image', 'model', '"""rtpose"""'], {}), "(test_multiplier, test_image, model, 'rtpose')\n", (1883, 1929), False, 'from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat\n'), ((2132, 2197), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['train_multiplier', 'train_swapped_img', 'model', '"""rtpose"""'], {}), "(train_multiplier, train_swapped_img, model, 'rtpose')\n", (2143, 2197), False, 'from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat\n'), ((2248, 2311), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['test_multiplier', 'test_swapped_img', 'model', '"""rtpose"""'], {}), "(test_multiplier, test_swapped_img, model, 'rtpose')\n", (2259, 2311), False, 'from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat\n'), ((2352, 2440), 'evaluate.coco_eval.handle_paf_and_heat', 'handle_paf_and_heat', (['train_heatmap', 'train_flipped_heat', 'train_paf', 'train_flipped_paf'], {}), '(train_heatmap, train_flipped_heat, train_paf,\n train_flipped_paf)\n', (2371, 2440), False, 'from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat\n'), ((2474, 2559), 'evaluate.coco_eval.handle_paf_and_heat', 'handle_paf_and_heat', (['test_heatmap', 'test_flipped_heat', 'test_paf', 'test_flipped_paf'], {}), '(test_heatmap, test_flipped_heat, test_paf, test_flipped_paf\n )\n', (2493, 2559), False, 'from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat\n')]
|
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
from evaluate import evaluate
from utils.utils import *
def train(model, train_data_loader, dev_data_loader, saver, total_epoch, lr, log_path, start_epoch=0):
f_log = open(log_path, 'w')
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
max_dev_acc = 0
for epoch in range(start_epoch, start_epoch+total_epoch):
model.train()
total_loss = 0.
total_correct = 0
total_count = 0
for i, data in enumerate(train_data_loader):
context, question, option, _, answer, useful_feat = data
context, question, option, answer, useful_feat = put_to_cuda([context, question, option, answer, useful_feat])
optimizer.zero_grad()
output = model(context, question, option, useful_feat)
loss = criterion(output, answer)
loss.backward()
optimizer.step()
total_loss = total_loss + loss.detach().cpu().numpy()
_, predict = torch.max(output, 1)
total_correct += (predict == answer).sum().detach().cpu().numpy()
total_count += context.size()[0]
average_loss = total_loss/total_count
average_accuracy = total_correct/total_count
log = f'batch: {i}/{len(train_data_loader)}, train average loss: {average_loss}, train average accuracy: {average_accuracy}'
print_and_logging(f_log, log)
#evaluate on dev data
print('start evalating')
dev_acc = evaluate(model, dev_data_loader)
log = f'dev accuracy: {dev_acc}'
print_and_logging(f_log, log)
if max_dev_acc <= dev_acc:
max_dev_acc = dev_acc
log = f'save model'
print_and_logging(f_log, log)
#save model
state = {
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'train_average_loss': average_loss,
'train_average_accuracy': average_accuracy,
'dev_acc': dev_acc
}
name = f'epoch_{epoch}_dev_accuracy_{dev_acc}'
saver.save(state, name)
else:
log = f'higher loss!!!!!!'
print_and_logging(f_log, log)
log = 'training end, max dev acc: ' + str(max_dev_acc)
print_and_logging(f_log, log)
'''
#save model
if min_loss > average_loss:
min_loss = average_loss
log = f'average loss: {average_loss}, save model'
print_and_logging(f_log, log)
#save model
state = {
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'average_loss': average_loss,
'average_accuracy': average_accuracy
}
name = f'epoch_{epoch}_average_accuracy_{average_accuracy}'
saver.save(state, name)
else:
log = f'average loss: {average_loss}, higher loss!!!!!!'
print_and_logging(f_log, log)
'''
|
[
"evaluate.evaluate"
] |
[((292, 313), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (311, 313), True, 'import torch.nn as nn\n'), ((1605, 1637), 'evaluate.evaluate', 'evaluate', (['model', 'dev_data_loader'], {}), '(model, dev_data_loader)\n', (1613, 1637), False, 'from evaluate import evaluate\n'), ((1091, 1111), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (1100, 1111), False, 'import torch\n')]
|
from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.data import DataLoader
from raft import (RAFT,
ThingsClassifier,
CentroidRegressor,
MotionClassifier,
BoundaryClassifier,
SpatialAffinityDecoder)
from eisen import EISEN
import teachers
import core.utils.utils as utils
import evaluate
import datasets
from torch.utils.tensorboard import SummaryWriter
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
def __init__(self):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 10
VAL_FREQ = 5000
# datasets without supervision
SELFSUP_DATASETS = ['robonet', 'dsr']
def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW, min_flow=0.5, pos_weight=1.0, pixel_thresh=None):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds)
flow_loss = 0.0
# exlude invalid pixels and extremely large diplacements
mag = torch.sum(flow_gt**2, dim=1).sqrt()
if valid is None:
valid = (mag < max_flow)
else:
valid = (valid >= 0.5) & (mag < max_flow)
valid = valid.float()
num_px = valid.sum((-2,-1)).clamp(min=1)
if list(flow_gt.shape[-2:]) != list(flow_preds[-1].shape[-2:]):
_ds = lambda x: F.avg_pool2d(
x,
args.downsample_factor * args.teacher_downsample_factor,
stride=args.downsample_factor * args.teacher_downsample_factor)
else:
_ds = lambda x: x
if flow_preds[-1].shape[-3] == 1:
flow_gt = (mag[:,None] > min_flow).float()
pos_weight = torch.tensor([pos_weight], device=flow_gt.device)
loss_cls = nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='none')
loss_fn = lambda logits, labels: loss_cls(_ds(logits), labels)
else:
loss_fn = lambda logits, labels: (_ds(logits) - labels).abs()
assert flow_preds[-1].shape[-3] == 2, flow_preds[-1].shape
if pixel_thresh is not None:
print("pos px", flow_gt.sum((1,2,3)))
gt_weight = (flow_gt.sum((1,2,3), True) > pixel_thresh).float()
print("gt weight", gt_weight[:,0,0,0])
else:
gt_weight = 1.0
for i in range(n_predictions):
i_weight = gamma**(n_predictions - i - 1)
i_loss = loss_fn(flow_preds[i], flow_gt) * gt_weight
flow_loss += ((i_weight * valid[:,None] * i_loss).sum((-2,-1)) / num_px).mean()
metrics = {
'loss': flow_loss,
}
return flow_loss, metrics
def boundary_loss(boundary_preds,
boundary_target,
valid,
gamma=0.8,
boundary_scale=1.0,
orientation_scale=1.0,
pixel_thresh=None,
**kwargs):
n_predictions = len(boundary_preds)
b_loss = c_loss = loss = 0.0
# break up boundary_target
if boundary_target.shape[1] == 3:
b_target, c_target = boundary_target.split([1,2], 1)
else:
b_target, c_target, c_target_discrete = boundary_target.split([1,2,8], 1)
num_px = b_target.sum(dim=(-3,-2,-1)).clamp(min=1.)
if pixel_thresh is not None:
print("pos px", b_target.sum((1,2,3)))
gt_weight = (b_target.sum((1,2,3), True) > pixel_thresh).float()
print("gt weight", gt_weight[:,0,0,0])
else:
gt_weight = 1.0
def _split_preds(x):
dim = x.shape[-3]
if dim == 3:
return x.split([1,2], -3)
elif dim == 9:
c1, b, c2 = x.split([4,1,4], -3)
return b, torch.cat([c1, c2], -3)
b_loss_fn = nn.BCEWithLogitsLoss(reduction='none')
if boundary_preds[-1].shape[1] == 3:
c_loss_fn = lambda logits, labels: (logits - labels).abs().sum(1)
else:
c_loss_fn = nn.CrossEntropyLoss(reduction='none')
c_target = c_target_discrete.argmax(1)
ds = args.downsample_factor * args.teacher_downsample_factor
if list(b_target.shape[-2:]) != list(boundary_preds[-1].shape[-2:]):
_ds = lambda x: F.avg_pool2d(x, ds, stride=ds)
else:
_ds = lambda x:x
for i in range(n_predictions):
i_weight = gamma ** (n_predictions - i - 1)
b_pred, c_pred = _split_preds(_ds(boundary_preds[i]))
i_b_loss = (b_loss_fn(b_pred, b_target) * gt_weight).mean()
i_c_loss = (c_loss_fn(c_pred, c_target)[:,None] * b_target * gt_weight).sum((-3,-2,-1))
i_c_loss = (i_c_loss / num_px).mean()
b_loss += i_b_loss * i_weight
c_loss += i_c_loss * i_weight
i_loss = i_b_loss * boundary_scale +\
i_c_loss * orientation_scale
loss += i_loss * i_weight
metrics = {
'loss': loss,
'b_loss': b_loss,
'c_loss': c_loss
}
return loss, metrics
def motion_loss(motion_preds, errors, valid, gamma=0.8, loss_scale=1.0):
n_predictions = len(motion_preds)
loss = 0.0
errors_s, errors_m = errors.split([1,1], -3)
def loss_fn(preds):
p_motion = torch.sigmoid(preds)
return (p_motion*errors_m + (1-p_motion)*errors_s).mean()
for i in range(n_predictions):
i_weight = gamma**(n_predictions - i - 1)
i_loss = loss_fn(motion_preds[i])
loss += i_weight * i_loss * loss_scale
metrics = {'loss': loss.item()}
return loss, metrics
def centroids_loss(centroid_preds, target, valid, gamma=0.8):
"""
target is a [B,2,H,W] centroid offsets target, measured in pixels
valid is a [B,1,H,W] thingness mask that the model should also fit
"""
n_predictions = len(centroid_preds)
thing_loss = cent_loss = loss = 0.0
thingness = valid
num_px = thingness.sum((-2,-1)).clamp(min=1)
if list(target.shape[-2:]) != list(centroid_preds[-1].shape[-2:]):
_ds = lambda x: F.avg_pool2d(
x,
args.downsample_factor * args.teacher_downsample_factor,
stride=args.downsample_factor * args.teacher_downsample_factor)
else:
_ds = lambda x: x
thing_loss_cls = nn.BCEWithLogitsLoss(reduction='none')
thing_loss_fn = lambda logits, labels: thing_loss_cls(_ds(logits), labels)
cent_loss_fn = lambda logits, labels: (_ds(logits) - labels).abs()
for i in range(n_predictions):
i_weight = gamma**(n_predictions - i - 1)
cent_pred = centroid_preds[i]
if cent_pred.shape[1] == 3:
thing_pred, cent_pred = cent_pred.split([1,2], 1)
else:
thing_pred = None
i_cent_loss = (cent_loss_fn(cent_pred, target) * valid).sum((-2,-1)) / num_px
i_cent_loss = i_cent_loss.sum(1).mean()
cent_loss += i_cent_loss * i_weight
if thing_pred is None:
loss += i_cent_loss * i_weight
continue
i_thing_loss = thing_loss_fn(thing_pred, thingness).mean()
thing_loss += i_thing_loss * i_weight
loss += (i_cent_loss + i_thing_loss) * i_weight
metrics = {
'loss': loss,
'thing_loss': thing_loss,
'centroid_loss': cent_loss
}
return loss, metrics
def affinities_loss(affinity_preds, target, valid, gamma=0.8, loss_type='kl_div'):
B,K,H,W = affinity_preds[0].shape
assert target.shape[1] == K, target.shape
n_predictions = len(affinity_preds)
loss = 0.0
if list(target.shape[-2:]) != [H,W]:
_ds = lambda x: F.avg_pool2d(
x,
args.downsample_factor * args.teacher_downsample_factor,
stride=args.downsample_factor * args.teacher_downsample_factor)
else:
_ds = lambda x: x
if loss_type == 'binary_cross_entropy':
loss_cls = nn.BCEWithLogitsLoss(reduction='none')
loss_fn = lambda logits, labels: loss_cls(_ds(logits), labels)
radius = (int(np.sqrt(K)) - 1) // 2
loss_mask = utils.get_local_neighbors(
valid, radius=radius, invalid=0, to_image=True)[:,0]
loss_mask = torch.maximum(valid, loss_mask)
num_px = loss_mask.sum((-3,-2,-1)).clamp(min=1)
elif loss_type == 'kl_div':
raise NotImplementedError()
for i in range(n_predictions):
i_weight = gamma**(n_predictions - i - 1)
aff_pred = affinity_preds[i]
if loss_type == 'binary_cross_entropy':
i_loss = loss_fn(aff_pred, target) * loss_mask
i_loss = i_loss.sum((1,2,3)) / num_px
i_loss = i_loss.mean()
else:
raise NotImplementedError()
loss += i_loss * i_weight
metrics = {
'loss': loss
}
return loss, metrics
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler
class Logger:
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = args.restore_step
self.running_loss = {}
self.writer = None
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:6d}, {:6d}, {:10.7f}] ".format(
self.total_steps+1, self.epoch, self.step+1, self.scheduler.get_last_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
# print the training status
print(training_str + metrics_str)
if self.writer is None:
self.writer = SummaryWriter()
for k in self.running_loss:
self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
self.running_loss[k] = 0.0
def push(self, epoch, step, metrics):
self.epoch = epoch
self.step = step
self.total_steps += 1
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter()
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def close(self):
self.writer.close()
def train(args):
if args.model.lower() == 'bootraft':
model_cls = BootRaft
print("used BootRaft")
elif args.model.lower() == 'thingness' or args.model.lower() == 'occlusion':
model_cls = ThingsClassifier
print("used ThingnessClassifier")
elif args.model.lower() in ['centroids']:
model_cls = CentroidRegressor
print("used CentroidRegressor")
elif args.model.lower() == 'motion':
model_cls = MotionClassifier
print("used MotionClassifier")
elif args.model.lower() == 'boundary':
model_cls = BoundaryClassifier
print("used BoundaryClassifier")
elif args.model.lower() in ['flow', 'flow_centroids']:
model_cls = RAFT
print("used RAFT for %s" % args.model.lower())
elif args.model.lower() in ['affinities']:
model_cls = SpatialAffinityDecoder
print("used SpatialAffinityDecoder")
model = nn.DataParallel(model_cls(args), device_ids=args.gpus)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
did_load = model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
print(did_load, type(model.module).__name__, args.restore_ckpt)
model.cuda()
model.train()
## load a teacher model
stride = args.teacher_downsample_factor * args.downsample_factor
inp_size = {
'tdw': 512,
'movi_d': 256,
'movi_e': 256
}[args.stage]
target_net = nn.DataParallel(
teachers.BipartiteBootNet(
student_model_type=args.model.lower(),
static_path=args.static_ckpt,
static_params={
'stem_pool': (stride > 2),
'affinity_res': [inp_size // stride]*2
},
boot_paths={
'motion_path': args.motion_ckpt,
'boundary_path': args.boundary_ckpt,
'flow_path': args.flow_ckpt
},
downsample_factor=stride,
grouping_window=2,
static_resolution=args.static_resolution,
dynamic_resolution=args.dynamic_resolution
),
device_ids=args.gpus
).cuda().eval()
train_loader, epoch_size = datasets.fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = args.restore_step
scaler = GradScaler(enabled=args.mixed_precision)
logger = Logger(model, scheduler)
VAL_FREQ = args.val_freq
add_noise = True
should_keep_training = True
epoch = 0
while should_keep_training:
epoch += 1
for i_batch in range(epoch_size // args.batch_size):
import time
t1 = time.time()
try:
data_blob = iter(train_loader).next()
except StopIteration:
train_loader.dataset.reset_iterator()
data_blob = iter(train_loader).next()
except Exception as e:
print("skipping step %d due to %s" % (total_steps, e))
total_steps += 1
continue
optimizer.zero_grad()
image1, image2 = [x.cuda() for x in data_blob[:2]]
valid = None
flow_predictions = model(image1, image2, iters=args.iters)
## get the self-supervision
teacher_inp = torch.stack([
image1, image2], 1)
targets = target_net(
video=teacher_inp,
boot_params={
'motion_iters': args.motion_iters,
'boundary_iters': args.boundary_iters,
'flow_iters': args.flow_iters,
'bootstrap': args.bootstrap
},
static_params={
'local_window_size': args.affinity_kernel_size,
'to_image': True
},
mask_with_motion=args.motion_mask_target
)
if args.model.lower() in ['flow', 'flow_centroids']:
target = targets
elif args.model.lower() in ['centroids']:
target, valid = targets
elif args.model.lower() in ['affinities', 'eisen']:
target, valid = targets
print("TARGET SHAPE", target.shape, args.model.lower())
print("VALID SHAPE", (valid.shape if valid is not None else None))
if len(target.shape) == 5:
target = target.squeeze(1)
## compute loss
if args.model.lower() in ['flow', 'flow_centroids']:
loss, metrics = sequence_loss(flow_predictions, target, valid, args.gamma, pos_weight=args.pos_weight, pixel_thresh=args.pixel_thresh)
elif args.model.lower() in ['thingness', 'centroids']:
loss, metrics = centroids_loss(flow_predictions, target, valid, args.gamma)
elif args.model.lower() in ['affinities', 'eisen']:
loss, metrics = affinities_loss(flow_predictions, target, valid, args.gamma, loss_type=args.affinity_loss_type)
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
scaler.step(optimizer)
scheduler.step()
scaler.update()
logger.push(epoch, i_batch + 1, metrics)
if total_steps % VAL_FREQ == VAL_FREQ - 1:
PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name)
torch.save(model.state_dict(), PATH)
results = {}
for val_dataset in args.validation:
if val_dataset == 'chairs':
results.update(evaluate.validate_chairs(model.module))
elif val_dataset == 'sintel':
results.update(evaluate.validate_sintel(model.module))
elif val_dataset == 'kitti':
results.update(evaluate.validate_kitti(model.module))
logger.write_dict(results)
model.train()
if args.stage in ['sintel']:
model.module.freeze_bn()
total_steps += 1
t2 = time.time()
print("step time", i_batch, t2-t1)
if total_steps > args.num_steps:
should_keep_training = False
break
logger.close()
PATH = 'checkpoints/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
def get_args(cmd=None):
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--stage', default="chairs", help="determines which dataset to use for training")
parser.add_argument('--split', type=str, default='train')
parser.add_argument('--dataset_dir', type=str, default='/data2/honglinc/')
parser.add_argument('--dataset_names', type=str, nargs='+')
parser.add_argument('--train_split', type=str, default='all')
parser.add_argument('--flow_gap', type=int, default=1)
parser.add_argument('--filepattern', type=str, default="*", help="which files to train on tdw")
parser.add_argument('--test_filepattern', type=str, default="*9", help="which files to val on tdw")
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--validation', type=str, nargs='+')
parser.add_argument('--val_freq', type=int, default=5000, help='validation and checkpoint frequency')
parser.add_argument('--lr', type=float, default=0.00002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--restore_step', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--gpus', type=int, nargs='+', default=[0])
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--iters', type=int, default=12)
parser.add_argument('--corr_levels', type=int, default=4)
parser.add_argument('--corr_radius', type=int, default=4)
parser.add_argument('--gate_stride', type=int, default=2)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
parser.add_argument('--pos_weight', type=float, default=1.0, help='weight for positive bce samples')
parser.add_argument('--add_noise', action='store_true')
parser.add_argument('--no_aug', action='store_true')
parser.add_argument('--full_playroom', action='store_true')
parser.add_argument('--static_coords', action='store_true')
parser.add_argument('--max_frame', type=int, default=5)
## model class
parser.add_argument('--model', type=str, default='RAFT', help='Model class')
parser.add_argument('--predict_mask', action='store_true', help='Whether to predict a thingness mask')
parser.add_argument('--bootstrap', action='store_true', help='whether to bootstrap')
parser.add_argument('--teacher_ckpt', help='checkpoint for a pretrained RAFT. If None, use GT')
parser.add_argument('--teacher_iters', type=int, default=24)
parser.add_argument('--motion_iters', type=int, default=12)
parser.add_argument('--boundary_iters', type=int, default=12)
parser.add_argument('--flow_iters', type=int, default=12)
parser.add_argument('--motion_ckpt', help='checkpoint for a pretrained motion model')
parser.add_argument('--boundary_ckpt', help='checkpoint for a pretrained boundary model')
parser.add_argument('--flow_ckpt', help='checkpoint for a pretrained boundary model')
parser.add_argument('--static_ckpt', help='checkpoint for a pretrained eisen model')
# BBNet teacher params
parser.add_argument('--static_resolution', type=int, default=4)
parser.add_argument('--dynamic_resolution', type=int, default=3)
parser.add_argument('--affinity_kernel_size', type=int, default=None)
parser.add_argument('--motion_mask_target', action='store_true')
# motion propagation
parser.add_argument('--diffusion_target', action='store_true')
parser.add_argument('--orientation_type', default='classification')
parser.add_argument('--rgb_flow', action='store_true')
parser.add_argument('--boundary_flow', action='store_true')
parser.add_argument('--separate_boundary_models', action='store_true')
parser.add_argument('--zscore_target', action='store_true')
parser.add_argument('--downsample_factor', type=int, default=2)
parser.add_argument('--teacher_downsample_factor', type=int, default=1)
parser.add_argument('--patch_radius', type=int, default=0)
parser.add_argument('--motion_thresh', type=float, default=None)
parser.add_argument('--boundary_thresh', type=float, default=None)
parser.add_argument('--target_thresh', type=float, default=0.75)
parser.add_argument('--pixel_thresh', type=int, default=None)
parser.add_argument('--positive_thresh', type=float, default=0.4)
parser.add_argument('--negative_thresh', type=float, default=0.1)
parser.add_argument('--affinity_radius', type=int, default=12)
parser.add_argument('--affinity_loss_type', type=str, default='kl_div')
parser.add_argument('--static_input', action='store_true')
parser.add_argument('--affinity_nonlinearity', type=str, default='sigmoid')
parser.add_argument('--num_propagation_iters', type=int, default=200)
parser.add_argument('--num_samples', type=int, default=8)
parser.add_argument('--num_sample_points', type=int, default=2**14)
parser.add_argument('--predict_every', type=int, default=5)
parser.add_argument('--binarize_motion', action='store_true')
parser.add_argument('--use_motion_loss', action='store_true')
parser.add_argument('--loss_scale', type=float, default=1.0)
parser.add_argument('--scale_centroids', action='store_true')
parser.add_argument('--training_frames', help="a JSON file of frames to train from")
if cmd is None:
args = parser.parse_args()
print(args)
else:
args = parser.parse_args(cmd)
return args
def load_model(load_path,
model_class=None,
small=False,
cuda=False,
train=False,
freeze_bn=False,
**kwargs):
path = Path(load_path) if load_path else None
def _get_model_class(name):
cls = None
if 'bootraft' in name:
cls = BootRaft
elif 'raft' in name:
cls = RAFT
elif ('thing' in name) or ('occlusion' in name):
cls = ThingsClassifier
elif 'centroid' in name:
cls = CentroidRegressor
elif 'motion' in name:
cls = MotionClassifier
elif 'prop' in name:
cls = MotionPropagator
elif 'boundary' in name:
cls = BoundaryClassifier
else:
raise ValueError("Couldn't identify a model class associated with %s" % name)
return cls
if model_class is None:
cls = _get_model_class(path.name)
else:
cls = _get_model_class(model_class)
assert cls is not None, "Wasn't able to infer model class"
## get the args
args = get_args("")
if small:
args.small = True
for k,v in kwargs.items():
args.__setattr__(k,v)
# build model
model = nn.DataParallel(cls(args), device_ids=args.gpus)
if load_path is not None:
did_load = model.load_state_dict(torch.load(load_path), strict=False)
print(did_load, type(model.module).__name__)
if cuda:
model.cuda()
model.train(train)
if freeze_bn:
model.module.freeze_bn()
return model
if __name__ == '__main__':
args = get_args()
torch.manual_seed(1234)
np.random.seed(1234)
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
train(args)
|
[
"evaluate.validate_chairs",
"evaluate.validate_kitti",
"evaluate.validate_sintel"
] |
[((59, 82), 'sys.path.append', 'sys.path.append', (['"""core"""'], {}), "('core')\n", (74, 82), False, 'import sys\n'), ((4252, 4290), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (4272, 4290), True, 'import torch.nn as nn\n'), ((6681, 6719), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (6701, 6719), True, 'import torch.nn as nn\n'), ((9535, 9674), 'torch.optim.lr_scheduler.OneCycleLR', 'optim.lr_scheduler.OneCycleLR', (['optimizer', 'args.lr', '(args.num_steps + 100)'], {'pct_start': '(0.05)', 'cycle_momentum': '(False)', 'anneal_strategy': '"""linear"""'}), "(optimizer, args.lr, args.num_steps + 100,\n pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')\n", (9564, 9674), True, 'import torch.optim as optim\n'), ((13551, 13582), 'datasets.fetch_dataloader', 'datasets.fetch_dataloader', (['args'], {}), '(args)\n', (13576, 13582), False, 'import datasets\n'), ((13689, 13729), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {'enabled': 'args.mixed_precision'}), '(enabled=args.mixed_precision)\n', (13699, 13729), False, 'from torch.cuda.amp import GradScaler\n'), ((17896, 17921), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17919, 17921), False, 'import argparse\n'), ((25522, 25545), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (25539, 25545), False, 'import torch\n'), ((25550, 25570), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (25564, 25570), True, 'import numpy as np\n'), ((2254, 2303), 'torch.tensor', 'torch.tensor', (['[pos_weight]'], {'device': 'flow_gt.device'}), '([pos_weight], device=flow_gt.device)\n', (2266, 2303), False, 'import torch\n'), ((2323, 2384), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'pos_weight': 'pos_weight', 'reduction': '"""none"""'}), "(pos_weight=pos_weight, reduction='none')\n", (2343, 2384), True, 'import torch.nn as nn\n'), ((4436, 4473), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (4455, 4473), True, 'import torch.nn as nn\n'), ((5657, 5677), 'torch.sigmoid', 'torch.sigmoid', (['preds'], {}), '(preds)\n', (5670, 5677), False, 'import torch\n'), ((8286, 8324), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (8306, 8324), True, 'import torch.nn as nn\n'), ((8572, 8603), 'torch.maximum', 'torch.maximum', (['valid', 'loss_mask'], {}), '(valid, loss_mask)\n', (8585, 8603), False, 'import torch\n'), ((24081, 24096), 'pathlib.Path', 'Path', (['load_path'], {}), '(load_path)\n', (24085, 24096), False, 'from pathlib import Path\n'), ((25583, 25611), 'os.path.isdir', 'os.path.isdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (25596, 25611), False, 'import os\n'), ((25621, 25644), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (25629, 25644), False, 'import os\n'), ((1613, 1643), 'torch.sum', 'torch.sum', (['(flow_gt ** 2)'], {'dim': '(1)'}), '(flow_gt ** 2, dim=1)\n', (1622, 1643), False, 'import torch\n'), ((1932, 2072), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(args.downsample_factor * args.teacher_downsample_factor)'], {'stride': '(args.downsample_factor * args.teacher_downsample_factor)'}), '(x, args.downsample_factor * args.teacher_downsample_factor,\n stride=args.downsample_factor * args.teacher_downsample_factor)\n', (1944, 2072), True, 'import torch.nn.functional as F\n'), ((4684, 4714), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', 'ds'], {'stride': 'ds'}), '(x, ds, stride=ds)\n', (4696, 4714), True, 'import torch.nn.functional as F\n'), ((6449, 6589), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(args.downsample_factor * args.teacher_downsample_factor)'], {'stride': '(args.downsample_factor * args.teacher_downsample_factor)'}), '(x, args.downsample_factor * args.teacher_downsample_factor,\n stride=args.downsample_factor * args.teacher_downsample_factor)\n', (6461, 6589), True, 'import torch.nn.functional as F\n'), ((8012, 8152), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(args.downsample_factor * args.teacher_downsample_factor)'], {'stride': '(args.downsample_factor * args.teacher_downsample_factor)'}), '(x, args.downsample_factor * args.teacher_downsample_factor,\n stride=args.downsample_factor * args.teacher_downsample_factor)\n', (8024, 8152), True, 'import torch.nn.functional as F\n'), ((8460, 8533), 'core.utils.utils.get_local_neighbors', 'utils.get_local_neighbors', (['valid'], {'radius': 'radius', 'invalid': '(0)', 'to_image': '(True)'}), '(valid, radius=radius, invalid=0, to_image=True)\n', (8485, 8533), True, 'import core.utils.utils as utils\n'), ((10440, 10455), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (10453, 10455), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((11141, 11156), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (11154, 11156), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((12441, 12470), 'torch.load', 'torch.load', (['args.restore_ckpt'], {}), '(args.restore_ckpt)\n', (12451, 12470), False, 'import torch\n'), ((14019, 14030), 'time.time', 'time.time', ([], {}), '()\n', (14028, 14030), False, 'import time\n'), ((14670, 14702), 'torch.stack', 'torch.stack', (['[image1, image2]', '(1)'], {}), '([image1, image2], 1)\n', (14681, 14702), False, 'import torch\n'), ((17564, 17575), 'time.time', 'time.time', ([], {}), '()\n', (17573, 17575), False, 'import time\n'), ((25251, 25272), 'torch.load', 'torch.load', (['load_path'], {}), '(load_path)\n', (25261, 25272), False, 'import torch\n'), ((4211, 4234), 'torch.cat', 'torch.cat', (['[c1, c2]', '(-3)'], {}), '([c1, c2], -3)\n', (4220, 4234), False, 'import torch\n'), ((8418, 8428), 'numpy.sqrt', 'np.sqrt', (['K'], {}), '(K)\n', (8425, 8428), True, 'import numpy as np\n'), ((17056, 17094), 'evaluate.validate_chairs', 'evaluate.validate_chairs', (['model.module'], {}), '(model.module)\n', (17080, 17094), False, 'import evaluate\n'), ((17185, 17223), 'evaluate.validate_sintel', 'evaluate.validate_sintel', (['model.module'], {}), '(model.module)\n', (17209, 17223), False, 'import evaluate\n'), ((17313, 17350), 'evaluate.validate_kitti', 'evaluate.validate_kitti', (['model.module'], {}), '(model.module)\n', (17336, 17350), False, 'import evaluate\n')]
|
import torch
import os
import numpy as np
import tqdm
import matplotlib.pyplot as plt
from PIL import Image
from dataset.NAIC_dataset import get_loaders, get_baseline_loader
from config import get_config
from models.model import build_model, get_model
from models.sync_bn.batchnorm import convert_model
from evaluate import euclidean_dist, re_rank, cos_dist
from solver import Solver
class Demo(object):
def __init__(self, config, num_classes, pth_path, valid_dataloader, num_query):
"""
:param config: 配置参数
:param num_classes: 类别数;类型为int
:param pth_path: 权重文件路径;类型为str
:param valid_dataloader: 验证数据集的Dataloader
:param num_query: 查询集数量;类型为int
"""
self.num_classes = num_classes
self.model_name = config.model_name
self.last_stride = config.last_stride
self.dist = config.dist
self.num_gpus = torch.cuda.device_count()
print('Using {} GPUS'.format(self.num_gpus))
# 加载模型,只要有GPU,则使用DataParallel函数,当GPU有多个GPU时,调用sync_bn函数
self.model = get_model(self.model_name, self.num_classes, self.last_stride)
if torch.cuda.is_available():
self.model = torch.nn.DataParallel(self.model)
if self.num_gpus > 1:
self.model = convert_model(self.model)
self.model = self.model.cuda()
# 实例化实现各种子函数的 solver 类
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.solver = Solver(self.model, self.device)
# 加载权重矩阵
self.model = self.solver.load_checkpoint(pth_path)
self.model.eval()
# 每一个查询样本从数据库中取出最近的200个样本
self.num_choose = 10
self.num_query = num_query
self.valid_dataloader = valid_dataloader
self.demo_results_path = './results/valid'
if not os.path.exists(self.demo_results_path):
os.makedirs(self.demo_results_path)
def get_result(self, show):
"""
:param show: 是否显示查询出的结果
:return None
"""
tbar = tqdm.tqdm(self.valid_dataloader)
features_all, labels_all, paths_all = [], [], []
with torch.no_grad():
for i, (images, labels, paths) in enumerate(tbar):
# 完成网络的前向传播
# features = self.solver.forward(images)[-1]
features = self.solver.tta(images)
features_all.append(features.detach().cpu())
labels_all.extend(labels)
paths_all.extend(paths)
features_all = torch.cat(features_all, dim=0)
query_features = features_all[:self.num_query]
gallery_features = features_all[self.num_query:]
query_lables = np.array(labels_all[:self.num_query])
gallery_labels = np.array(labels_all[self.num_query:])
query_paths = np.array(paths_all[:self.num_query])
gallery_paths = np.array(paths_all[self.num_query:])
if self.dist == 're_rank':
distmat = re_rank(query_features, gallery_features)
elif self.dist == 'cos_dist':
distmat = cos_dist(query_features, gallery_features)
elif self.dist == 'euclidean_dist':
distmat = euclidean_dist(query_features, gallery_features)
else:
assert "Not implemented :{}".format(self.dist)
for query_index, query_dist in enumerate(distmat):
choose_index = np.argsort(query_dist)[:self.num_choose]
query_path = query_paths[query_index]
gallery_path = gallery_paths[choose_index]
query_label = query_lables[query_index]
gallery_label = gallery_labels[choose_index]
self.show_result(query_path, gallery_path, query_label, gallery_label, 5, show)
def show_result(self, query_path, gallery_paths, query_label, gallery_labels, top_rank, show):
"""
:param query_path: 待查询样本的路径;类型为str
:param gallery_paths: 检索到的样本路径;类型为list
:param query_label: 待检索样本的类标;类型为int
:param gallery_labels: 检索到的样本类标;类型为list
:param top_rank: 显示检索到的前多少张图片;类型为int
:param show: 是否显示结果;类型为bool
:return None
"""
# 将索引转换为样本名称
query_image = Image.open(query_path)
plt.figure(figsize=(14, 10))
plt.subplot(1, top_rank + 1, 1)
plt.imshow(query_image)
plt.text(30, -10.0, query_path.split('/')[-1])
plt.text(30, -20.0, query_label)
for i, (gallery_path, gallery_label) in enumerate(zip(gallery_paths, gallery_labels)):
if i == top_rank:
break
gallery_image = Image.open(gallery_path)
plt.subplot(1, top_rank + 1, i + 1 + 1)
plt.imshow(gallery_image)
plt.text(30, -20.0, gallery_label)
plt.text(30, -10.0, gallery_path.split('/')[-1])
plt.savefig(os.path.join(self.demo_results_path, query_path.split('/')[-1]))
if show:
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
plt.close()
if __name__ == "__main__":
demo_on_baseline = False
config = get_config()
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
train_dataset_root = os.path.join(config.dataset_root, '初赛训练集')
_, valid_dataloader_folds, num_query_folds, num_classes_folds = get_loaders(
train_dataset_root,
config.n_splits,
config.batch_size,
config.num_instances,
config.num_workers,
config.augmentation_flag,
config.erase_prob,
config.gray_prob,
mean, std
)
for fold_index, [valid_loader, num_query, num_classes] in enumerate(zip(valid_dataloader_folds, num_query_folds, num_classes_folds)):
if fold_index not in config.selected_fold:
continue
num_train_classes = num_classes[0]
pth_path = os.path.join(config.save_path, config.model_name, '{}_fold{}_best.pth'.format(config.model_name, fold_index))
# 注意fold之间的因为类别数不同所以模型也不同,所以均要实例化TrainVal
if demo_on_baseline:
_, num_train_classes = get_baseline_loader(train_dataset_root, config.batch_size, config.num_workers,
True, mean, std)
pth_path = os.path.join(config.save_path, config.model_name,
'{}.pth'.format(config.model_name))
create_submission = Demo(config, num_train_classes, pth_path, valid_loader, num_query)
create_submission.get_result(show=True)
|
[
"evaluate.euclidean_dist",
"evaluate.cos_dist",
"evaluate.re_rank"
] |
[((5154, 5166), 'config.get_config', 'get_config', ([], {}), '()\n', (5164, 5166), False, 'from config import get_config\n'), ((5257, 5299), 'os.path.join', 'os.path.join', (['config.dataset_root', '"""初赛训练集"""'], {}), "(config.dataset_root, '初赛训练集')\n", (5269, 5299), False, 'import os\n'), ((5369, 5562), 'dataset.NAIC_dataset.get_loaders', 'get_loaders', (['train_dataset_root', 'config.n_splits', 'config.batch_size', 'config.num_instances', 'config.num_workers', 'config.augmentation_flag', 'config.erase_prob', 'config.gray_prob', 'mean', 'std'], {}), '(train_dataset_root, config.n_splits, config.batch_size, config.\n num_instances, config.num_workers, config.augmentation_flag, config.\n erase_prob, config.gray_prob, mean, std)\n', (5380, 5562), False, 'from dataset.NAIC_dataset import get_loaders, get_baseline_loader\n'), ((896, 921), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (919, 921), False, 'import torch\n'), ((1061, 1123), 'models.model.get_model', 'get_model', (['self.model_name', 'self.num_classes', 'self.last_stride'], {}), '(self.model_name, self.num_classes, self.last_stride)\n', (1070, 1123), False, 'from models.model import build_model, get_model\n'), ((1135, 1160), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1158, 1160), False, 'import torch\n'), ((1490, 1521), 'solver.Solver', 'Solver', (['self.model', 'self.device'], {}), '(self.model, self.device)\n', (1496, 1521), False, 'from solver import Solver\n'), ((2054, 2086), 'tqdm.tqdm', 'tqdm.tqdm', (['self.valid_dataloader'], {}), '(self.valid_dataloader)\n', (2063, 2086), False, 'import tqdm\n'), ((2545, 2575), 'torch.cat', 'torch.cat', (['features_all'], {'dim': '(0)'}), '(features_all, dim=0)\n', (2554, 2575), False, 'import torch\n'), ((2712, 2749), 'numpy.array', 'np.array', (['labels_all[:self.num_query]'], {}), '(labels_all[:self.num_query])\n', (2720, 2749), True, 'import numpy as np\n'), ((2775, 2812), 'numpy.array', 'np.array', (['labels_all[self.num_query:]'], {}), '(labels_all[self.num_query:])\n', (2783, 2812), True, 'import numpy as np\n'), ((2836, 2872), 'numpy.array', 'np.array', (['paths_all[:self.num_query]'], {}), '(paths_all[:self.num_query])\n', (2844, 2872), True, 'import numpy as np\n'), ((2897, 2933), 'numpy.array', 'np.array', (['paths_all[self.num_query:]'], {}), '(paths_all[self.num_query:])\n', (2905, 2933), True, 'import numpy as np\n'), ((4211, 4233), 'PIL.Image.open', 'Image.open', (['query_path'], {}), '(query_path)\n', (4221, 4233), False, 'from PIL import Image\n'), ((4242, 4270), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 10)'}), '(figsize=(14, 10))\n', (4252, 4270), True, 'import matplotlib.pyplot as plt\n'), ((4279, 4310), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(top_rank + 1)', '(1)'], {}), '(1, top_rank + 1, 1)\n', (4290, 4310), True, 'import matplotlib.pyplot as plt\n'), ((4319, 4342), 'matplotlib.pyplot.imshow', 'plt.imshow', (['query_image'], {}), '(query_image)\n', (4329, 4342), True, 'import matplotlib.pyplot as plt\n'), ((4406, 4438), 'matplotlib.pyplot.text', 'plt.text', (['(30)', '(-20.0)', 'query_label'], {}), '(30, -20.0, query_label)\n', (4414, 4438), True, 'import matplotlib.pyplot as plt\n'), ((5071, 5082), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5080, 5082), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1220), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.model'], {}), '(self.model)\n', (1208, 1220), False, 'import torch\n'), ((1840, 1878), 'os.path.exists', 'os.path.exists', (['self.demo_results_path'], {}), '(self.demo_results_path)\n', (1854, 1878), False, 'import os\n'), ((1892, 1927), 'os.makedirs', 'os.makedirs', (['self.demo_results_path'], {}), '(self.demo_results_path)\n', (1903, 1927), False, 'import os\n'), ((2157, 2172), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2170, 2172), False, 'import torch\n'), ((2992, 3033), 'evaluate.re_rank', 're_rank', (['query_features', 'gallery_features'], {}), '(query_features, gallery_features)\n', (2999, 3033), False, 'from evaluate import euclidean_dist, re_rank, cos_dist\n'), ((4614, 4638), 'PIL.Image.open', 'Image.open', (['gallery_path'], {}), '(gallery_path)\n', (4624, 4638), False, 'from PIL import Image\n'), ((4651, 4690), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(top_rank + 1)', '(i + 1 + 1)'], {}), '(1, top_rank + 1, i + 1 + 1)\n', (4662, 4690), True, 'import matplotlib.pyplot as plt\n'), ((4703, 4728), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gallery_image'], {}), '(gallery_image)\n', (4713, 4728), True, 'import matplotlib.pyplot as plt\n'), ((4741, 4775), 'matplotlib.pyplot.text', 'plt.text', (['(30)', '(-20.0)', 'gallery_label'], {}), '(30, -20.0, gallery_label)\n', (4749, 4775), True, 'import matplotlib.pyplot as plt\n'), ((4964, 4993), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (4991, 4993), True, 'import matplotlib.pyplot as plt\n'), ((5052, 5062), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5060, 5062), True, 'import matplotlib.pyplot as plt\n'), ((6128, 6228), 'dataset.NAIC_dataset.get_baseline_loader', 'get_baseline_loader', (['train_dataset_root', 'config.batch_size', 'config.num_workers', '(True)', 'mean', 'std'], {}), '(train_dataset_root, config.batch_size, config.\n num_workers, True, mean, std)\n', (6147, 6228), False, 'from dataset.NAIC_dataset import get_loaders, get_baseline_loader\n'), ((1284, 1309), 'models.sync_bn.batchnorm.convert_model', 'convert_model', (['self.model'], {}), '(self.model)\n', (1297, 1309), False, 'from models.sync_bn.batchnorm import convert_model\n'), ((1430, 1455), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1453, 1455), False, 'import torch\n'), ((3094, 3136), 'evaluate.cos_dist', 'cos_dist', (['query_features', 'gallery_features'], {}), '(query_features, gallery_features)\n', (3102, 3136), False, 'from evaluate import euclidean_dist, re_rank, cos_dist\n'), ((3412, 3434), 'numpy.argsort', 'np.argsort', (['query_dist'], {}), '(query_dist)\n', (3422, 3434), True, 'import numpy as np\n'), ((3203, 3251), 'evaluate.euclidean_dist', 'euclidean_dist', (['query_features', 'gallery_features'], {}), '(query_features, gallery_features)\n', (3217, 3251), False, 'from evaluate import euclidean_dist, re_rank, cos_dist\n')]
|
#pipenv run
import argparse
import os
import math
import json
from datetime import datetime
from models import models
from db import db, Result
from uuid import uuid4, UUID
from keras import backend as K
import numpy as np
import evaluate
from data_gen import data
from config import config
def test_model(model, train, validation, test, label_form):
loss, accuracy = model.evaluate_generator(validation, steps=math.ceil(len(validation)/config.BATCH_SIZE))
train_loss, train_accuracy = model.evaluate_generator(train, steps=math.ceil(len(train)/config.BATCH_SIZE))
test_loss, test_accuracy = model.evaluate_generator(test, steps=math.ceil(len(test)/config.BATCH_SIZE))
train.reset()
validation.reset()
test.reset()
results = evaluate.get_results(model, validation)
labels = list(evaluate.get_labels(validation))
test_results = evaluate.get_results(model, test)
test_labels = list(evaluate.get_labels(test))
if label_form == "outcome_3":
probabilities = list(results)
test_probabilities = list(test_results)
else:
probabilities = list(evaluate.transform_binary_probabilities(results))
test_probabilities = list(evaluate.transform_binary_probabilities(test_results))
train.reset()
validation.reset()
test.reset()
return {
"train_accuracy": float(train_accuracy),
"train_loss": float(train_loss),
"accuracy": float(accuracy),
"loss": float(loss),
"test_accuracy": float(test_accuracy),
"test_loss": float(test_loss),
"probabilities": probabilities,
"labels": labels,
"test_probabilities": test_probabilities,
"test_labels":test_labels,
}
def characterize_data(data):
unique, counts = np.unique(data.classes, return_counts=True)
index_to_count = dict(zip(unique, counts))
characterization = { str(c): index_to_count[data.class_indices[c]] for c in data.class_indices }
return characterization
def run(model, description, input_form, label_form="outcome", split_id=None, loaded_data=None, hyperparameters=dict()):
run_id = uuid4()
if split_id is None:
split_id = run_id
history = model.run(run_id, mode='normal', input_form=input_form, loaded_data=loaded_data, label_form=label_form, hyperparameters=hyperparameters)
#K.clear_session()
model_instance = evaluate.load(os.path.join(
config.MODEL_DIR,
"{}-{}.h5".format(str(run_id), model.MODEL_NAME),
))
if loaded_data is None:
train, validation, test = data(split_id, input_form=input_form, label_form=label_form)
else:
train, validation, test = loaded_data
train.reset()
validation.reset()
test.reset()
train_data_stats = characterize_data(train)
validation_data_stats = characterize_data(validation)
test_data_stats = characterize_data(test)
results = test_model(model_instance, train, validation, test, label_form)
training.reset()
validation.reset()
test.reset()
result = Result(
model.MODEL_NAME,
str(run_id),
str(split_id),
train_data_stats,
validation_data_stats,
test_data_stats,
description,
input_form,
label=label_form,
hyperparameters=hyperparameters,
history=history,
**results
)
db.session.add(result)
db.session.commit()
def explode_parameters(parameters):
all_parameters = []
for p in parameters.keys():
if type(parameters[p]) is list:
for value in parameters[p]:
new_parameters = dict(parameters)
new_parameters[p] = value
all_parameters += explode_parameters(new_parameters)
break
if all_parameters:
return all_parameters
return [parameters]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model',
type=str,
required=True,
help='which model to run (see models.py)')
parser.add_argument(
'--description',
type=str,
help='brief description of the run and its differences')
parser.add_argument(
'--form',
type=str,
help='input form (see data.py for more information)',
default=config.INPUT_FORM,
)
parser.add_argument(
'--label',
type=str,
help='label form (see data.py for more information)',
default="outcome",
)
parser.add_argument(
'--split',
type=str,
help='UUID for split',
default=None,
)
parser.add_argument(
'--hyperparameters',
type=str,
help='hyperparameters file',
required=True,
)
parser.add_argument(
'--trials',
type=int,
default=config.TRIALS,
help='how many times to run')
FLAGS, unparsed = parser.parse_known_args()
with open(FLAGS.hyperparameters) as f:
parameters = json.load(f)
parameters = explode_parameters(parameters)
model = models[FLAGS.model]
split = FLAGS.split
if split is None:
split = uuid4()
else:
split = UUID(split)
training, validation, test = data(split, input_form=FLAGS.form, label_form=FLAGS.label)
for _ in range(FLAGS.trials):
for hyperparameters in parameters:
run(model, FLAGS.description, FLAGS.form, FLAGS.label, split, loaded_data=(training, validation, test), hyperparameters=hyperparameters)
K.clear_session()
print('The split id for this run ' + FLAGS.description + ' is ' + str(split))
|
[
"evaluate.get_labels",
"evaluate.get_results",
"evaluate.transform_binary_probabilities"
] |
[((765, 804), 'evaluate.get_results', 'evaluate.get_results', (['model', 'validation'], {}), '(model, validation)\n', (785, 804), False, 'import evaluate\n'), ((876, 909), 'evaluate.get_results', 'evaluate.get_results', (['model', 'test'], {}), '(model, test)\n', (896, 909), False, 'import evaluate\n'), ((1786, 1829), 'numpy.unique', 'np.unique', (['data.classes'], {'return_counts': '(True)'}), '(data.classes, return_counts=True)\n', (1795, 1829), True, 'import numpy as np\n'), ((2140, 2147), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2145, 2147), False, 'from uuid import uuid4, UUID\n'), ((3400, 3422), 'db.db.session.add', 'db.session.add', (['result'], {}), '(result)\n', (3414, 3422), False, 'from db import db, Result\n'), ((3427, 3446), 'db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3444, 3446), False, 'from db import db, Result\n'), ((3917, 3942), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3940, 3942), False, 'import argparse\n'), ((5290, 5348), 'data_gen.data', 'data', (['split'], {'input_form': 'FLAGS.form', 'label_form': 'FLAGS.label'}), '(split, input_form=FLAGS.form, label_form=FLAGS.label)\n', (5294, 5348), False, 'from data_gen import data\n'), ((823, 854), 'evaluate.get_labels', 'evaluate.get_labels', (['validation'], {}), '(validation)\n', (842, 854), False, 'import evaluate\n'), ((933, 958), 'evaluate.get_labels', 'evaluate.get_labels', (['test'], {}), '(test)\n', (952, 958), False, 'import evaluate\n'), ((2582, 2642), 'data_gen.data', 'data', (['split_id'], {'input_form': 'input_form', 'label_form': 'label_form'}), '(split_id, input_form=input_form, label_form=label_form)\n', (2586, 2642), False, 'from data_gen import data\n'), ((5052, 5064), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5061, 5064), False, 'import json\n'), ((5211, 5218), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (5216, 5218), False, 'from uuid import uuid4, UUID\n'), ((5245, 5256), 'uuid.UUID', 'UUID', (['split'], {}), '(split)\n', (5249, 5256), False, 'from uuid import uuid4, UUID\n'), ((1124, 1172), 'evaluate.transform_binary_probabilities', 'evaluate.transform_binary_probabilities', (['results'], {}), '(results)\n', (1163, 1172), False, 'import evaluate\n'), ((1208, 1261), 'evaluate.transform_binary_probabilities', 'evaluate.transform_binary_probabilities', (['test_results'], {}), '(test_results)\n', (1247, 1261), False, 'import evaluate\n'), ((5587, 5604), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (5602, 5604), True, 'from keras import backend as K\n')]
|
"""Train the model"""
import argparse
import datetime
import os
import megengine as mge
# mge.core.set_option("async_level", 0)
from megengine.optimizer import Adam, MultiStepLR, LRScheduler
from megengine.autodiff import GradManager
import megengine.distributed as dist
from tqdm import tqdm
import dataset.data_loader as data_loader
import model.net as net
from common import utils
from common.manager import Manager
from evaluate import evaluate
from loss.losses import compute_losses
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="experiments/experiment_omnet", help="Directory containing params.json")
parser.add_argument("--restore_file",
default=None,
help="Optional, name of the file in model_dir containing weights to reload before training")
parser.add_argument("-ow", "--only_weights", action="store_true", help="Only load model weights or load all train status.")
def train(model, manager: Manager, gm):
rank = dist.get_rank()
# loss status and val/test status initial
manager.reset_loss_status()
# set model to training mode
model.train()
# Use tqdm for progress bar
if rank == 0:
t = tqdm(total=len(manager.dataloaders["train"]))
for i, data_batch in enumerate(manager.dataloaders["train"]):
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# infor print
print_str = manager.print_train_info()
with gm:
# compute model output and loss
output_batch = model(data_batch)
loss = compute_losses(output_batch, manager.params)
# update loss status and print current loss and average loss
manager.update_loss_status(loss=loss, split="train")
gm.backward(loss["total"])
# performs updates using calculated gradients
manager.optimizer.step().clear_grad()
manager.update_step()
if rank == 0:
manager.writer.add_scalar("Loss/train", manager.loss_status["total"].val, manager.step)
t.set_description(desc=print_str)
t.update()
if rank == 0:
t.close()
manager.scheduler.step()
manager.update_epoch()
def train_and_evaluate(model, manager: Manager):
rank = dist.get_rank()
# reload weights from restore_file if specified
if args.restore_file is not None:
manager.load_checkpoints()
world_size = dist.get_world_size()
if world_size > 1:
dist.bcast_list_(model.parameters())
dist.bcast_list_(model.buffers())
gm = GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
for epoch in range(manager.params.num_epochs):
# compute number of batches in one epoch (one full pass over the training set)
train(model, manager, gm)
# Evaluate for one epoch on validation set
evaluate(model, manager)
# Save best model weights accroding to the params.major_metric
if rank == 0:
manager.check_best_save_last_checkpoints(save_latest_freq=100, save_best_after=200)
def main(params):
# DTR support
# mge.dtr.eviction_threshold = "5GB"
# mge.dtr.enable()
# Set the logger
logger = utils.set_logger(os.path.join(params.model_dir, "train.log"))
# Set the tensorboard writer
tb_dir = os.path.join(params.model_dir, "summary")
os.makedirs(tb_dir, exist_ok=True)
writter = SummaryWriter(log_dir=tb_dir)
# fetch dataloaders
dataloaders = data_loader.fetch_dataloader(params)
# Define the model and optimizer
model = net.fetch_net(params)
optimizer = Adam(model.parameters(), lr=params.learning_rate)
scheduler = MultiStepLR(optimizer, milestones=[])
# initial status for checkpoint manager
manager = Manager(model=model,
optimizer=optimizer,
scheduler=scheduler,
params=params,
dataloaders=dataloaders,
writer=writter,
logger=logger)
# Train the model
utils.master_logger(logger, "Starting training for {} epoch(s)".format(params.num_epochs))
train_and_evaluate(model, manager)
if __name__ == "__main__":
# Load the parameters from json file
args = parser.parse_args()
json_path = os.path.join(args.model_dir, "params.json")
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
params.update(vars(args))
train_proc = dist.launcher(main) if mge.device.get_device_count("gpu") > 1 else main
train_proc(params)
|
[
"evaluate.evaluate"
] |
[((542, 567), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (565, 567), False, 'import argparse\n'), ((1046, 1061), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1059, 1061), True, 'import megengine.distributed as dist\n'), ((2349, 2364), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2362, 2364), True, 'import megengine.distributed as dist\n'), ((2508, 2529), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2527, 2529), True, 'import megengine.distributed as dist\n'), ((3477, 3518), 'os.path.join', 'os.path.join', (['params.model_dir', '"""summary"""'], {}), "(params.model_dir, 'summary')\n", (3489, 3518), False, 'import os\n'), ((3523, 3557), 'os.makedirs', 'os.makedirs', (['tb_dir'], {'exist_ok': '(True)'}), '(tb_dir, exist_ok=True)\n', (3534, 3557), False, 'import os\n'), ((3572, 3601), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'tb_dir'}), '(log_dir=tb_dir)\n', (3585, 3601), False, 'from tensorboardX import SummaryWriter\n'), ((3645, 3681), 'dataset.data_loader.fetch_dataloader', 'data_loader.fetch_dataloader', (['params'], {}), '(params)\n', (3673, 3681), True, 'import dataset.data_loader as data_loader\n'), ((3732, 3753), 'model.net.fetch_net', 'net.fetch_net', (['params'], {}), '(params)\n', (3745, 3753), True, 'import model.net as net\n'), ((3837, 3874), 'megengine.optimizer.MultiStepLR', 'MultiStepLR', (['optimizer'], {'milestones': '[]'}), '(optimizer, milestones=[])\n', (3848, 3874), False, 'from megengine.optimizer import Adam, MultiStepLR, LRScheduler\n'), ((3934, 4072), 'common.manager.Manager', 'Manager', ([], {'model': 'model', 'optimizer': 'optimizer', 'scheduler': 'scheduler', 'params': 'params', 'dataloaders': 'dataloaders', 'writer': 'writter', 'logger': 'logger'}), '(model=model, optimizer=optimizer, scheduler=scheduler, params=\n params, dataloaders=dataloaders, writer=writter, logger=logger)\n', (3941, 4072), False, 'from common.manager import Manager\n'), ((4475, 4518), 'os.path.join', 'os.path.join', (['args.model_dir', '"""params.json"""'], {}), "(args.model_dir, 'params.json')\n", (4487, 4518), False, 'import os\n'), ((4530, 4555), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (4544, 4555), False, 'import os\n'), ((4629, 4652), 'common.utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (4641, 4652), False, 'from common import utils\n'), ((1422, 1450), 'common.utils.tensor_mge', 'utils.tensor_mge', (['data_batch'], {}), '(data_batch)\n', (1438, 1450), False, 'from common import utils\n'), ((3016, 3040), 'evaluate.evaluate', 'evaluate', (['model', 'manager'], {}), '(model, manager)\n', (3024, 3040), False, 'from evaluate import evaluate\n'), ((3385, 3428), 'os.path.join', 'os.path.join', (['params.model_dir', '"""train.log"""'], {}), "(params.model_dir, 'train.log')\n", (3397, 3428), False, 'import os\n'), ((4701, 4720), 'megengine.distributed.launcher', 'dist.launcher', (['main'], {}), '(main)\n', (4714, 4720), True, 'import megengine.distributed as dist\n'), ((1647, 1691), 'loss.losses.compute_losses', 'compute_losses', (['output_batch', 'manager.params'], {}), '(output_batch, manager.params)\n', (1661, 1691), False, 'from loss.losses import compute_losses\n'), ((2650, 2663), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (2661, 2663), False, 'from megengine.autodiff import GradManager\n'), ((4724, 4758), 'megengine.device.get_device_count', 'mge.device.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (4751, 4758), True, 'import megengine as mge\n'), ((2718, 2747), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""SUM"""'], {}), "('SUM')\n", (2740, 2747), True, 'import megengine.distributed as dist\n')]
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Copyright 2020 The HuggingFace Team. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# Uses some code from
# https://github.com/huggingface/transformers/blob/master/examples/seq2seq/finetune_trainer.py
import argparse
import configparser
import itertools
import json
import logging
import os
from collections import defaultdict
import torch
from torch.utils.data import DataLoader
from transformers import AutoConfig, AutoTokenizer, HfArgumentParser, AutoModelForSeq2SeqLM, Trainer
from arguments import ModelArguments, DataTrainingArguments, TrainingArguments
from datasets import load_dataset
from evaluate import evaluate, get_avg_results, print_results
from utils import get_episode_indices
def main():
assert torch.cuda.is_available(), 'CUDA not available'
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('job')
parser.add_argument('-c', '--config_file', type=str, default='config.ini', help='configuration file')
parser.add_argument('-e', '--eval', action='store_true', default=False, help='run evaluation only')
parser.add_argument('--evaluate_checkpoints', action='store_true', default=False,
help='evaluate intermediate checkpoints instead of the final model')
parser.add_argument('--evaluate_last_checkpoint', action='store_true', default=False,
help='evaluate the last intermediate checkpoint instead of the final model')
parser.add_argument('--evaluate_checkpoint_in_dir', type=str, default=None,
help='evaluate the checkpoint in the given directory')
parser.add_argument('-a', '--evaluate_all', action='store_true', default=False,
help='evaluate intermediate checkpoints together with the final model')
parser.add_argument('-g', '--gpu', type=int, default=0, help='which GPU to use for evaluation')
parser.add_argument('-v', '--verbose_results', action='store_true', default=False,
help='print results for each evaluation run')
args, remaining_args = parser.parse_known_args()
# read config file
config = configparser.ConfigParser(allow_no_value=False)
config.read(args.config_file)
job = args.job
assert job in config
# set defaults for other arguments
defaults = {
'overwrite_output_dir': True,
'overwrite_cache': True,
'per_device_eval_batch_size': 4,
'learning_rate': 5e-4,
'logging_steps': 0, # do not log by default
'save_steps': 0, # do not save checkpoints by default
}
# the config file gives default values for the command line arguments
defaults.update(dict(config.items(job)))
for key in defaults:
if defaults[key] in ['True', 'False']:
# interpret True/False as boolean
defaults[key] = config.getboolean(job, key)
if defaults[key] == 'None':
# interpret as None
defaults[key] = None
if args.eval:
# run evaluation only
defaults['do_train'] = False
# parse remaining arguments and divide them into three categories
second_parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
second_parser.set_defaults(**defaults)
model_args, data_args, training_args = second_parser.parse_args_into_dataclasses(remaining_args)
try:
os.mkdir(training_args.output_dir)
except FileExistsError:
pass
# process arguments related to max length
if data_args.max_output_seq_length_eval is None:
# defaults first to max_output_seq_length, then max_seq_length_eval, then max_seq_length
data_args.max_output_seq_length_eval = data_args.max_output_seq_length \
or data_args.max_seq_length_eval \
or data_args.max_seq_length
if data_args.max_output_seq_length is None:
# defaults to max_seq_length
data_args.max_output_seq_length = data_args.max_seq_length
if data_args.max_seq_length_eval is None:
# defaults to max_seq_length
data_args.max_seq_length_eval = data_args.max_seq_length
if data_args.chunk_size_eval is None:
# defaults to chunk_size
data_args.chunk_size_eval = data_args.chunk_size
if data_args.chunk_overlap_eval is None:
# defaults to chunk overlap
data_args.chunk_overlap_eval = data_args.chunk_overlap
# construct name for the output directory
# for example: conll04-t5-base-ep200-len256-ratio0-b4-train
output_dir = os.path.join(
training_args.output_dir,
f'{args.job}'
f'-{model_args.model_name_or_path.split("/")[-1]}'
f'-ep{round(training_args.num_train_epochs)}'
f'-len{data_args.max_seq_length}'
)
if data_args.max_output_seq_length != data_args.max_seq_length:
output_dir += f'-{data_args.max_output_seq_length}'
if training_args.learning_rate != 5e-4:
output_dir += f'-lr{training_args.learning_rate}'
output_dir += f'-b{training_args.per_device_train_batch_size}' \
f'-{data_args.train_split}'
if data_args.chunk_size != 128:
output_dir += f'-chunk{data_args.chunk_size}'
if data_args.chunk_overlap != 64:
output_dir += f'-overlap{data_args.chunk_overlap}'
if data_args.output_format is not None:
output_dir += f'-{data_args.output_format}'
if data_args.input_format is not None:
output_dir += f'-{data_args.input_format}'
if data_args.train_subset < 1:
output_dir += f'-size{data_args.train_subset:.2f}'
try:
os.mkdir(output_dir)
except FileExistsError:
pass
# setup logging
logging.basicConfig(
filename=os.path.join(output_dir, 'logs.log'),
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
)
logging.getLogger().addHandler(logging.StreamHandler())
# construct file name for the evaluation results
evaluation_output_filename = f'results'
if data_args.num_beams is not None:
evaluation_output_filename += f'-{data_args.num_beams}beams'
if data_args.max_seq_length_eval is not None:
evaluation_output_filename += f'-len{data_args.max_seq_length_eval}'
# create model config
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
# create tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
)
# get list of dataset names
dataset_names = data_args.datasets.split(',')
# construct list of episode indices
episode_indices = get_episode_indices(data_args.episodes)
# episode loop
# (note that the episode index is used as the random seed, so that each episode is reproducible)
evaluation_results = defaultdict(list)
for ep_idx in episode_indices:
print()
logging.info(f'Episode {ep_idx} ({len(episode_indices)} episodes total)')
episode_output_dir = os.path.join(output_dir, f'episode{ep_idx}')
try:
os.mkdir(episode_output_dir)
except FileExistsError:
pass
logging.info(f'Output directory: {episode_output_dir}')
training_args.output_dir = episode_output_dir # checkpoints are saved in episode-specific directory
# load pretrained model
model = None
if training_args.zero_shot or training_args.do_train:
logging.info(f"Using model {model_args.model_name_or_path}")
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
)
# fine-tune the model
if training_args.do_train:
# load train dataset
datasets = []
for dataset_name in dataset_names:
logging.info(f'Process dataset {dataset_name} (train)')
dataset = load_dataset(
dataset_name, data_args, split=data_args.train_split,
max_input_length=data_args.max_seq_length, max_output_length=data_args.max_output_seq_length,
tokenizer=tokenizer, seed=ep_idx, train_subset=data_args.train_subset,
)
datasets.append(dataset)
train_dataset = torch.utils.data.ConcatDataset(datasets) if training_args.do_train else None
# construct trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
# start trainer
logging.info('Start training')
trainer.train(
model_path=model_args.model_name_or_path
)
# save model parameters
trainer.save_model(episode_output_dir)
# run evaluation
if training_args.local_rank in [-1, 0] and (training_args.do_eval or training_args.do_predict):
# should we evaluate on dev, test, or both?
evaluation_splits = []
if training_args.do_eval:
evaluation_splits.append('dev')
if training_args.do_predict:
evaluation_splits.append('test')
# should we evaluate on the final model and/or on all intermediate checkpoints?
evaluation_dirs = []
if args.evaluate_checkpoints or args.evaluate_last_checkpoint or \
args.evaluate_checkpoint_in_dir or args.evaluate_all:
# all intermediate checkpoints
evaluation_dirs = list(sorted([
checkpoint_dir
for checkpoint_dir in os.listdir(episode_output_dir)
if checkpoint_dir.startswith('checkpoint-')
], key=lambda x: int(x[len('checkpoint-'):])))
if args.evaluate_last_checkpoint:
# only evaluate on the last checkpoint
evaluation_dirs = [evaluation_dirs[-1]]
elif args.evaluate_checkpoint_in_dir:
assert args.evaluate_checkpoint_in_dir in evaluation_dirs, \
"checkpoint {} does not exist".format(args.evaluate_checkpoint_in_dir)
evaluation_dirs = [args.evaluate_checkpoint_in_dir]
if args.evaluate_all or (not args.evaluate_checkpoints and not args.evaluate_last_checkpoint):
# evaluate on the final model
evaluation_dirs += ['']
# datasets to evaluate on
if data_args.eval_datasets is None:
eval_dataset_names = dataset_names
else:
eval_dataset_names = data_args.eval_datasets.split(',')
# evaluate all possible combinations of dev/test, model, and datasets
for comb in itertools.product(evaluation_splits, evaluation_dirs, eval_dataset_names):
split, evaluation_dir, dataset_name = comb
model_dir = os.path.join(episode_output_dir, evaluation_dir)
if args.evaluate_checkpoints or args.evaluate_last_checkpoint or args.evaluate_all or model is None:
# we need to load the model
model = AutoModelForSeq2SeqLM.from_pretrained(
model_dir,
config=config,
)
if len(evaluation_dir) > 0:
logging.info(f'Evaluate {evaluation_dir} on {dataset_name} {split}')
else:
logging.info(f'Evaluate on {dataset_name} {split}')
res = evaluate(
model=model, dataset_name=dataset_name, data_args=data_args, tokenizer=tokenizer, split=split,
seed=ep_idx, batch_size=training_args.per_device_eval_batch_size, gpu=args.gpu
)
# store results
evaluation_results[comb].append(res)
# print results
if args.verbose_results:
print_results(res)
# save results to file
with open(
os.path.join(model_dir, evaluation_output_filename + f'-{dataset_name}-{split}.json'), 'w'
) as f:
json.dump(res, f, indent=0)
# print average results and save them to file
for comb, results in evaluation_results.items():
split, evaluation_dir, dataset_name = comb
print()
logging.info(
f'Average of {split} results over {len(results)} episodes ({dataset_name} {evaluation_dir}):'
)
res = get_avg_results(results)
# print average results
print_results(res)
# save average results to file
filename = evaluation_output_filename + f'-{dataset_name}-{split}'
if len(evaluation_dir) > 0:
filename += '-'
filename += f'{evaluation_dir}.json'
with open(os.path.join(output_dir, filename), 'w') as f:
json.dump(res, f, indent=0)
print()
logging.info(f'Model weights and intermediate checkpoints saved in {output_dir}')
if __name__ == "__main__":
main()
|
[
"evaluate.get_avg_results",
"evaluate.evaluate",
"evaluate.print_results"
] |
[((806, 831), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (829, 831), False, 'import torch\n'), ((890, 915), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (913, 915), False, 'import argparse\n'), ((2213, 2260), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'allow_no_value': '(False)'}), '(allow_no_value=False)\n', (2238, 2260), False, 'import configparser\n'), ((3242, 3318), 'transformers.HfArgumentParser', 'HfArgumentParser', (['(ModelArguments, DataTrainingArguments, TrainingArguments)'], {}), '((ModelArguments, DataTrainingArguments, TrainingArguments))\n', (3258, 3318), False, 'from transformers import AutoConfig, AutoTokenizer, HfArgumentParser, AutoModelForSeq2SeqLM, Trainer\n'), ((6503, 6651), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['(model_args.config_name if model_args.config_name else model_args.\n model_name_or_path)'], {'cache_dir': 'model_args.cache_dir'}), '(model_args.config_name if model_args.config_name\n else model_args.model_name_or_path, cache_dir=model_args.cache_dir)\n', (6529, 6651), False, 'from transformers import AutoConfig, AutoTokenizer, HfArgumentParser, AutoModelForSeq2SeqLM, Trainer\n'), ((6710, 6835), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['(model_args.tokenizer_name if model_args.tokenizer_name else model_args.\n model_name_or_path)'], {}), '(model_args.tokenizer_name if model_args.\n tokenizer_name else model_args.model_name_or_path)\n', (6739, 6835), False, 'from transformers import AutoConfig, AutoTokenizer, HfArgumentParser, AutoModelForSeq2SeqLM, Trainer\n'), ((6992, 7031), 'utils.get_episode_indices', 'get_episode_indices', (['data_args.episodes'], {}), '(data_args.episodes)\n', (7011, 7031), False, 'from utils import get_episode_indices\n'), ((7178, 7195), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7189, 7195), False, 'from collections import defaultdict\n'), ((13501, 13587), 'logging.info', 'logging.info', (['f"""Model weights and intermediate checkpoints saved in {output_dir}"""'], {}), "(\n f'Model weights and intermediate checkpoints saved in {output_dir}')\n", (13513, 13587), False, 'import logging\n'), ((3481, 3515), 'os.mkdir', 'os.mkdir', (['training_args.output_dir'], {}), '(training_args.output_dir)\n', (3489, 3515), False, 'import os\n'), ((5770, 5790), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (5778, 5790), False, 'import os\n'), ((6104, 6127), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (6125, 6127), False, 'import logging\n'), ((7358, 7402), 'os.path.join', 'os.path.join', (['output_dir', 'f"""episode{ep_idx}"""'], {}), "(output_dir, f'episode{ep_idx}')\n", (7370, 7402), False, 'import os\n'), ((7516, 7571), 'logging.info', 'logging.info', (['f"""Output directory: {episode_output_dir}"""'], {}), "(f'Output directory: {episode_output_dir}')\n", (7528, 7571), False, 'import logging\n'), ((13069, 13093), 'evaluate.get_avg_results', 'get_avg_results', (['results'], {}), '(results)\n', (13084, 13093), False, 'from evaluate import evaluate, get_avg_results, print_results\n'), ((13135, 13153), 'evaluate.print_results', 'print_results', (['res'], {}), '(res)\n', (13148, 13153), False, 'from evaluate import evaluate, get_avg_results, print_results\n'), ((5893, 5929), 'os.path.join', 'os.path.join', (['output_dir', '"""logs.log"""'], {}), "(output_dir, 'logs.log')\n", (5905, 5929), False, 'import os\n'), ((6073, 6092), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6090, 6092), False, 'import logging\n'), ((7429, 7457), 'os.mkdir', 'os.mkdir', (['episode_output_dir'], {}), '(episode_output_dir)\n', (7437, 7457), False, 'import os\n'), ((7811, 7871), 'logging.info', 'logging.info', (['f"""Using model {model_args.model_name_or_path}"""'], {}), "(f'Using model {model_args.model_name_or_path}')\n", (7823, 7871), False, 'import logging\n'), ((7892, 8012), 'transformers.AutoModelForSeq2SeqLM.from_pretrained', 'AutoModelForSeq2SeqLM.from_pretrained', (['model_args.model_name_or_path'], {'config': 'config', 'cache_dir': 'model_args.cache_dir'}), '(model_args.model_name_or_path, config\n =config, cache_dir=model_args.cache_dir)\n', (7929, 8012), False, 'from transformers import AutoConfig, AutoTokenizer, HfArgumentParser, AutoModelForSeq2SeqLM, Trainer\n'), ((8854, 8923), 'transformers.Trainer', 'Trainer', ([], {'model': 'model', 'args': 'training_args', 'train_dataset': 'train_dataset'}), '(model=model, args=training_args, train_dataset=train_dataset)\n', (8861, 8923), False, 'from transformers import AutoConfig, AutoTokenizer, HfArgumentParser, AutoModelForSeq2SeqLM, Trainer\n'), ((9028, 9058), 'logging.info', 'logging.info', (['"""Start training"""'], {}), "('Start training')\n", (9040, 9058), False, 'import logging\n'), ((11260, 11333), 'itertools.product', 'itertools.product', (['evaluation_splits', 'evaluation_dirs', 'eval_dataset_names'], {}), '(evaluation_splits, evaluation_dirs, eval_dataset_names)\n', (11277, 11333), False, 'import itertools\n'), ((13456, 13483), 'json.dump', 'json.dump', (['res', 'f'], {'indent': '(0)'}), '(res, f, indent=0)\n', (13465, 13483), False, 'import json\n'), ((8259, 8314), 'logging.info', 'logging.info', (['f"""Process dataset {dataset_name} (train)"""'], {}), "(f'Process dataset {dataset_name} (train)')\n", (8271, 8314), False, 'import logging\n'), ((8341, 8586), 'datasets.load_dataset', 'load_dataset', (['dataset_name', 'data_args'], {'split': 'data_args.train_split', 'max_input_length': 'data_args.max_seq_length', 'max_output_length': 'data_args.max_output_seq_length', 'tokenizer': 'tokenizer', 'seed': 'ep_idx', 'train_subset': 'data_args.train_subset'}), '(dataset_name, data_args, split=data_args.train_split,\n max_input_length=data_args.max_seq_length, max_output_length=data_args.\n max_output_seq_length, tokenizer=tokenizer, seed=ep_idx, train_subset=\n data_args.train_subset)\n', (8353, 8586), False, 'from datasets import load_dataset\n'), ((8722, 8762), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['datasets'], {}), '(datasets)\n', (8752, 8762), False, 'import torch\n'), ((11422, 11470), 'os.path.join', 'os.path.join', (['episode_output_dir', 'evaluation_dir'], {}), '(episode_output_dir, evaluation_dir)\n', (11434, 11470), False, 'import os\n'), ((12051, 12243), 'evaluate.evaluate', 'evaluate', ([], {'model': 'model', 'dataset_name': 'dataset_name', 'data_args': 'data_args', 'tokenizer': 'tokenizer', 'split': 'split', 'seed': 'ep_idx', 'batch_size': 'training_args.per_device_eval_batch_size', 'gpu': 'args.gpu'}), '(model=model, dataset_name=dataset_name, data_args=data_args,\n tokenizer=tokenizer, split=split, seed=ep_idx, batch_size=training_args\n .per_device_eval_batch_size, gpu=args.gpu)\n', (12059, 12243), False, 'from evaluate import evaluate, get_avg_results, print_results\n'), ((13397, 13431), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (13409, 13431), False, 'import os\n'), ((11665, 11728), 'transformers.AutoModelForSeq2SeqLM.from_pretrained', 'AutoModelForSeq2SeqLM.from_pretrained', (['model_dir'], {'config': 'config'}), '(model_dir, config=config)\n', (11702, 11728), False, 'from transformers import AutoConfig, AutoTokenizer, HfArgumentParser, AutoModelForSeq2SeqLM, Trainer\n'), ((11865, 11933), 'logging.info', 'logging.info', (['f"""Evaluate {evaluation_dir} on {dataset_name} {split}"""'], {}), "(f'Evaluate {evaluation_dir} on {dataset_name} {split}')\n", (11877, 11933), False, 'import logging\n'), ((11976, 12027), 'logging.info', 'logging.info', (['f"""Evaluate on {dataset_name} {split}"""'], {}), "(f'Evaluate on {dataset_name} {split}')\n", (11988, 12027), False, 'import logging\n'), ((12472, 12490), 'evaluate.print_results', 'print_results', (['res'], {}), '(res)\n', (12485, 12490), False, 'from evaluate import evaluate, get_avg_results, print_results\n'), ((12717, 12744), 'json.dump', 'json.dump', (['res', 'f'], {'indent': '(0)'}), '(res, f, indent=0)\n', (12726, 12744), False, 'import json\n'), ((12582, 12671), 'os.path.join', 'os.path.join', (['model_dir', "(evaluation_output_filename + f'-{dataset_name}-{split}.json')"], {}), "(model_dir, evaluation_output_filename +\n f'-{dataset_name}-{split}.json')\n", (12594, 12671), False, 'import os\n'), ((10102, 10132), 'os.listdir', 'os.listdir', (['episode_output_dir'], {}), '(episode_output_dir)\n', (10112, 10132), False, 'import os\n')]
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from matplotlib.animation import FFMpegWriter
from PIL import Image
import torch
from tqdm import tqdm
import os
from pathlib import Path
openpose_dir = Path('../pytorch_Realtime_Multi-Person_Pose_Estimation/')
import sys
sys.path.append(str(openpose_dir))
from save_img import *
# openpose
from network.rtpose_vgg import get_model
from evaluate.coco_eval import get_multiplier, get_outputs
# utils
from openpose_utils import remove_noise, get_pose
# Match fps whit fps of source video.
def make_video_animation(num_frames , animation_function, output_name,fps = 30):
"""
fps: int
frames per second for output video.
animation_function:
function that returs a frame given an index j.
"""
metadata = dict(title='Movie Test', artist='IA',
comment='<NAME>')
writer = FFMpegWriter(fps=fps, metadata=metadata)
with writer.saving(fig, output_name +'.mp4', dpi=100):
for j in tqdm(range(num_frames)):
animation_function(j)
writer.grab_frame()
torch.cuda.empty_cache()
def skeleton_frame(idx):
img_path = img_dir.joinpath('{:05d}.png'.format(idx))
img = cv2.imread(str(img_path))
shape_dst = np.min(img.shape[:2])
oh = (img.shape[0] - shape_dst) // 2
ow = (img.shape[1] - shape_dst) // 2
img = img[oh:oh+shape_dst, ow:ow+shape_dst]
img = cv2.resize(img, (512, 512))
multiplier = get_multiplier(img)
with torch.no_grad():
paf, heatmap = get_outputs(multiplier, img, model, 'rtpose')
r_heatmap = np.array([remove_noise(ht)
for ht in heatmap.transpose(2, 0, 1)[:-1]])\
.transpose(1, 2, 0)
heatmap[:, :, :-1] = r_heatmap
param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
label, cord = get_pose(param, heatmap, paf)
mask = label[:,:] > 0
intensity = .80
img[mask,:] = int(255*intensity)
fig.clear()
plt.axis('off')
plt.imshow(img)
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
if __name__ == '__main__':
#NETWORK CREATION
weight_name = openpose_dir.joinpath('network/weight/pose_model.pth')
model = get_model('vgg19')
model.load_state_dict(torch.load(weight_name))
model = torch.nn.DataParallel(model).cuda()
model.float()
model.eval()
img_dir = Path('../../data/source/images')
NUM_FRAMES = len(os.listdir(str(img_dir)))
FPS = 30
plt.close()
plt.axis('off')
fig = plt.figure(figsize=(5.12, 5.12))
make_video_animation(num_frames = NUM_FRAMES,
animation_function = skeleton_frame,
output_name = "test_labels",
fps = FPS)
|
[
"evaluate.coco_eval.get_outputs",
"evaluate.coco_eval.get_multiplier"
] |
[((230, 287), 'pathlib.Path', 'Path', (['"""../pytorch_Realtime_Multi-Person_Pose_Estimation/"""'], {}), "('../pytorch_Realtime_Multi-Person_Pose_Estimation/')\n", (234, 287), False, 'from pathlib import Path\n'), ((897, 937), 'matplotlib.animation.FFMpegWriter', 'FFMpegWriter', ([], {'fps': 'fps', 'metadata': 'metadata'}), '(fps=fps, metadata=metadata)\n', (909, 937), False, 'from matplotlib.animation import FFMpegWriter\n'), ((1092, 1116), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1114, 1116), False, 'import torch\n'), ((1260, 1281), 'numpy.min', 'np.min', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (1266, 1281), True, 'import numpy as np\n'), ((1421, 1448), 'cv2.resize', 'cv2.resize', (['img', '(512, 512)'], {}), '(img, (512, 512))\n', (1431, 1448), False, 'import cv2\n'), ((1465, 1484), 'evaluate.coco_eval.get_multiplier', 'get_multiplier', (['img'], {}), '(img)\n', (1479, 1484), False, 'from evaluate.coco_eval import get_multiplier, get_outputs\n'), ((1805, 1834), 'openpose_utils.get_pose', 'get_pose', (['param', 'heatmap', 'paf'], {}), '(param, heatmap, paf)\n', (1813, 1834), False, 'from openpose_utils import remove_noise, get_pose\n'), ((1947, 1962), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1955, 1962), True, 'import matplotlib.pyplot as plt\n'), ((1969, 1984), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1979, 1984), True, 'import matplotlib.pyplot as plt\n'), ((2746, 2764), 'network.rtpose_vgg.get_model', 'get_model', (['"""vgg19"""'], {}), "('vgg19')\n", (2755, 2764), False, 'from network.rtpose_vgg import get_model\n'), ((2910, 2942), 'pathlib.Path', 'Path', (['"""../../data/source/images"""'], {}), "('../../data/source/images')\n", (2914, 2942), False, 'from pathlib import Path\n'), ((3003, 3014), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3012, 3014), True, 'import matplotlib.pyplot as plt\n'), ((3017, 3032), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3025, 3032), True, 'import matplotlib.pyplot as plt\n'), ((3041, 3073), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.12, 5.12)'}), '(figsize=(5.12, 5.12))\n', (3051, 3073), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1508), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1506, 1508), False, 'import torch\n'), ((1529, 1574), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['multiplier', 'img', 'model', '"""rtpose"""'], {}), "(multiplier, img, model, 'rtpose')\n", (1540, 1574), False, 'from evaluate.coco_eval import get_multiplier, get_outputs\n'), ((2471, 2517), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'im.size', '(bg_colour + (255,))'], {}), "('RGBA', im.size, bg_colour + (255,))\n", (2480, 2517), False, 'from PIL import Image\n'), ((2794, 2817), 'torch.load', 'torch.load', (['weight_name'], {}), '(weight_name)\n', (2804, 2817), False, 'import torch\n'), ((2829, 2857), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (2850, 2857), False, 'import torch\n'), ((1600, 1616), 'openpose_utils.remove_noise', 'remove_noise', (['ht'], {}), '(ht)\n', (1612, 1616), False, 'from openpose_utils import remove_noise, get_pose\n')]
|
"""Trains off-policy algorithms, such as QMIX and IQL."""
import json
import os
import random
import sys
import time
sys.path.append('../env/')
import numpy as np
import tensorflow as tf
import alg_iql
import alg_qmix
import env_wrapper
import evaluate
import replay_buffer
def train_function(config):
config_env = config['env']
config_main = config['main']
config_alg = config['alg']
seed = config_main['seed']
np.random.seed(seed)
random.seed(seed)
tf.set_random_seed(seed)
dir_name = config_main['dir_name']
model_name = config_main['model_name']
summarize = config_main['summarize']
save_period = config_main['save_period']
os.makedirs('../results/%s'%dir_name, exist_ok=True)
with open('../results/%s/%s'
% (dir_name, 'config.json'), 'w') as f:
json.dump(config, f, indent=4)
N_train = config_alg['N_train']
N_eval = config_alg['N_eval']
period = config_alg['period']
buffer_size = config_alg['buffer_size']
batch_size = config_alg['batch_size']
pretrain_episodes = config_alg['pretrain_episodes']
steps_per_train = config_alg['steps_per_train']
epsilon_start = config_alg['epsilon_start']
epsilon_end = config_alg['epsilon_end']
epsilon_div = config_alg['epsilon_div']
epsilon_step = (epsilon_start - epsilon_end)/float(epsilon_div)
epsilon = epsilon_start
env = env_wrapper.Env(config_env, config_main)
config_env_mod = config_env.copy()
config_env_mod['self_play'] = False # test against stock AI during evaluation episodes
config_env_mod['num_away_ai_players'] = config_env_mod['num_away_players'] # set number of stock AI
env_eval = env_wrapper.Env(config_env_mod, config_main)
self_play = config_env['self_play']
if self_play:
assert(config_env['num_away_ai_players'] == 0)
l_state = env.state_dim
l_action = env.action_dim
l_obs = env.obs_dim
N_home = config_env['num_home_players']
if config_main['alg_name'] == 'qmix':
alg = alg_qmix.Alg(config_alg, N_home, l_state, l_obs, l_action, config['nn_qmix'])
elif config_main['alg_name'] == 'iql':
alg = alg_iql.Alg(config_alg, N_home, l_state, l_obs, l_action, config['nn_iql'])
config_proto = tf.ConfigProto()
config_proto.gpu_options.allow_growth = True
sess = tf.Session(config=config_proto)
sess.run(tf.global_variables_initializer())
sess.run(alg.list_initialize_target_ops)
if summarize:
writer = tf.summary.FileWriter('../results/%s' % dir_name, sess.graph)
saver = tf.train.Saver(max_to_keep=config_main['max_to_keep'])
buf = replay_buffer.Replay_Buffer(size=buffer_size)
# Logging
header = "Episode,Step,Step_train,R_avg,R_eval,Steps_per_eps,Opp_win_rate,Win_rate,T_env,T_alg\n"
with open("../results/%s/log.csv" % dir_name, 'w') as f:
f.write(header)
t_start = time.time()
t_env = 0
t_alg = 0
reward_period = 0
step = 0
step_train = 0
for idx_episode in range(1, N_train+1):
state_home, state_away, list_obs_home, list_obs_away, done = env.reset()
reward_episode = 0
summarized = 0
while not done:
if idx_episode < pretrain_episodes:
if self_play:
actions_int_h, actions_int_a = env.random_actions()
actions_int = (actions_int_h, actions_int_a)
else:
actions_int = env.random_actions()
else:
t_alg_start = time.time()
if self_play:
actions_int_h = alg.run_actor(list_obs_home, epsilon, sess)
actions_int_a = alg.run_actor(list_obs_away, epsilon, sess)
actions_int = (actions_int_h, actions_int_a)
else:
actions_int = alg.run_actor(list_obs_home, epsilon, sess)
t_alg += time.time() - t_alg_start
t_env_start = time.time()
state_home_next, state_away_next, list_obs_home_next, list_obs_away_next, reward, local_rewards, done, info = env.step(actions_int)
t_env += time.time() - t_env_start
step += 1
if self_play:
buf.add( np.array([ state_home, np.array(list_obs_home), actions_int_h, reward[0], state_home_next, np.array(list_obs_home_next), done] ) )
buf.add( np.array([ state_away, np.array(list_obs_away), actions_int_a, reward[1], state_away_next, np.array(list_obs_away_next), done] ) )
else:
buf.add( np.array([ state_home, np.array(list_obs_home), actions_int, reward, state_home_next, np.array(list_obs_home_next), done] ) )
if (idx_episode >= pretrain_episodes) and (step % steps_per_train == 0):
batch = buf.sample_batch(batch_size)
t_alg_start = time.time()
if summarize and idx_episode % period == 0 and not summarized:
alg.train_step(sess, batch, step_train, summarize=True, writer=writer)
summarized = True
else:
alg.train_step(sess, batch, step_train, summarize=False, writer=None)
step_train += 1
t_alg += time.time() - t_alg_start
state_home = state_home_next
list_obs_home = list_obs_home_next
state_away = state_away_next
list_obs_away = list_obs_away_next
if self_play:
reward_episode += reward[0]
else:
reward_episode += reward
if idx_episode >= pretrain_episodes and epsilon > epsilon_end:
epsilon -= epsilon_step
reward_period += reward_episode
if idx_episode == 1 or idx_episode % (5*period) == 0:
print('{:>10s}{:>10s}{:>12s}{:>8s}{:>8s}{:>15s}{:>15s}{:>10s}{:>12s}{:>12s}'.format(*(header.strip().split(','))))
if idx_episode % period == 0:
# Evaluation episodes
r_avg_eval, steps_per_episode, win_rate, win_rate_opponent = evaluate.test(N_eval, env_eval, sess, alg)
if win_rate >= config_main['save_threshold']:
saver.save(sess, '../results/%s/%s-%d' % (dir_name, "model_good.ckpt", idx_episode))
s = '%d,%d,%d,%.2f,%.2f,%d,%.2f,%.2f,%.5e,%.5e\n' % (idx_episode, step, step_train, reward_period/float(period), r_avg_eval, steps_per_episode, win_rate_opponent, win_rate, t_env, t_alg)
with open('../results/%s/log.csv' % dir_name, 'a') as f:
f.write(s)
print('{:10d}{:10d}{:12d}{:8.2f}{:8.2f}{:15d}{:15.2f}{:10.2f}{:12.5e}{:12.5e}\n'.format(idx_episode, step, step_train, reward_period/float(period), r_avg_eval, int(steps_per_episode), win_rate_opponent, win_rate, t_env, t_alg))
reward_period = 0
if idx_episode % save_period == 0:
saver.save(sess, '../results/%s/%s-%d' % (dir_name, "model.ckpt", idx_episode))
saver.save(sess, '../results/%s/%s' % (dir_name, model_name))
with open('../results/%s/time.txt' % dir_name, 'a') as f:
f.write('t_env_total,t_env_per_step,t_alg_total,t_alg_per_step\n')
f.write('%.5e,%.5e,%.5e,%.5e' % (t_env, t_env/step, t_alg, t_alg/step))
if __name__ == "__main__":
with open('config.json', 'r') as f:
config = json.load(f)
train_function(config)
|
[
"evaluate.test"
] |
[((119, 145), 'sys.path.append', 'sys.path.append', (['"""../env/"""'], {}), "('../env/')\n", (134, 145), False, 'import sys\n'), ((444, 464), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (458, 464), True, 'import numpy as np\n'), ((469, 486), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (480, 486), False, 'import random\n'), ((491, 515), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (509, 515), True, 'import tensorflow as tf\n'), ((698, 752), 'os.makedirs', 'os.makedirs', (["('../results/%s' % dir_name)"], {'exist_ok': '(True)'}), "('../results/%s' % dir_name, exist_ok=True)\n", (709, 752), False, 'import os\n'), ((1432, 1472), 'env_wrapper.Env', 'env_wrapper.Env', (['config_env', 'config_main'], {}), '(config_env, config_main)\n', (1447, 1472), False, 'import env_wrapper\n'), ((1722, 1766), 'env_wrapper.Env', 'env_wrapper.Env', (['config_env_mod', 'config_main'], {}), '(config_env_mod, config_main)\n', (1737, 1766), False, 'import env_wrapper\n'), ((2307, 2323), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2321, 2323), True, 'import tensorflow as tf\n'), ((2384, 2415), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config_proto'}), '(config=config_proto)\n', (2394, 2415), True, 'import tensorflow as tf\n'), ((2628, 2682), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': "config_main['max_to_keep']"}), "(max_to_keep=config_main['max_to_keep'])\n", (2642, 2682), True, 'import tensorflow as tf\n'), ((2698, 2743), 'replay_buffer.Replay_Buffer', 'replay_buffer.Replay_Buffer', ([], {'size': 'buffer_size'}), '(size=buffer_size)\n', (2725, 2743), False, 'import replay_buffer\n'), ((2969, 2980), 'time.time', 'time.time', ([], {}), '()\n', (2978, 2980), False, 'import time\n'), ((846, 876), 'json.dump', 'json.dump', (['config', 'f'], {'indent': '(4)'}), '(config, f, indent=4)\n', (855, 876), False, 'import json\n'), ((2072, 2149), 'alg_qmix.Alg', 'alg_qmix.Alg', (['config_alg', 'N_home', 'l_state', 'l_obs', 'l_action', "config['nn_qmix']"], {}), "(config_alg, N_home, l_state, l_obs, l_action, config['nn_qmix'])\n", (2084, 2149), False, 'import alg_qmix\n'), ((2429, 2462), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2460, 2462), True, 'import tensorflow as tf\n'), ((2554, 2615), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["('../results/%s' % dir_name)", 'sess.graph'], {}), "('../results/%s' % dir_name, sess.graph)\n", (2575, 2615), True, 'import tensorflow as tf\n'), ((7545, 7557), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7554, 7557), False, 'import json\n'), ((2207, 2282), 'alg_iql.Alg', 'alg_iql.Alg', (['config_alg', 'N_home', 'l_state', 'l_obs', 'l_action', "config['nn_iql']"], {}), "(config_alg, N_home, l_state, l_obs, l_action, config['nn_iql'])\n", (2218, 2282), False, 'import alg_iql\n'), ((4096, 4107), 'time.time', 'time.time', ([], {}), '()\n', (4105, 4107), False, 'import time\n'), ((6241, 6283), 'evaluate.test', 'evaluate.test', (['N_eval', 'env_eval', 'sess', 'alg'], {}), '(N_eval, env_eval, sess, alg)\n', (6254, 6283), False, 'import evaluate\n'), ((3635, 3646), 'time.time', 'time.time', ([], {}), '()\n', (3644, 3646), False, 'import time\n'), ((4273, 4284), 'time.time', 'time.time', ([], {}), '()\n', (4282, 4284), False, 'import time\n'), ((5015, 5026), 'time.time', 'time.time', ([], {}), '()\n', (5024, 5026), False, 'import time\n'), ((4027, 4038), 'time.time', 'time.time', ([], {}), '()\n', (4036, 4038), False, 'import time\n'), ((5404, 5415), 'time.time', 'time.time', ([], {}), '()\n', (5413, 5415), False, 'import time\n'), ((4409, 4432), 'numpy.array', 'np.array', (['list_obs_home'], {}), '(list_obs_home)\n', (4417, 4432), True, 'import numpy as np\n'), ((4477, 4505), 'numpy.array', 'np.array', (['list_obs_home_next'], {}), '(list_obs_home_next)\n', (4485, 4505), True, 'import numpy as np\n'), ((4565, 4588), 'numpy.array', 'np.array', (['list_obs_away'], {}), '(list_obs_away)\n', (4573, 4588), True, 'import numpy as np\n'), ((4633, 4661), 'numpy.array', 'np.array', (['list_obs_away_next'], {}), '(list_obs_away_next)\n', (4641, 4661), True, 'import numpy as np\n'), ((4739, 4762), 'numpy.array', 'np.array', (['list_obs_home'], {}), '(list_obs_home)\n', (4747, 4762), True, 'import numpy as np\n'), ((4802, 4830), 'numpy.array', 'np.array', (['list_obs_home_next'], {}), '(list_obs_home_next)\n', (4810, 4830), True, 'import numpy as np\n')]
|
'''Module for boilerplate code around the training loop.
Defines an abstract base class `Model` for Bayesian word embedding models, a
function `train()` that runs the training loop, and a function `add_cli_args()`
that adds command line arguments to control the training loop (e.g., the number
of training steps and the log frequency).
'''
import pickle
from time import time
import abc
import traceback
import datetime
import socket
import subprocess
import os
import sys
import pprint
import argparse
import re
import numpy as np
import tensorflow as tf
from dataset import Dataset
import abstract_model
import optimizer
import distmult_model
import complex_model
import evaluate
def train(arg_parser):
'''Create an instance of model with the command line arguments and train it.
Arguments:
arg_parser -- An `argparse.ArgumentParser`.
'''
args = arg_parser.parse_args()
if not args.em:
args.num_samples = 1
if args.model == 'DistMult':
Model = distmult_model.DistMultModel
elif args.model == 'ComplEx':
Model = complex_model.ComplExModel
# Get random seed from system if the user did not specify a random seed.
if args.rng_seed is None:
args.rng_seed = int.from_bytes(os.urandom(4), byteorder='little')
rng = np.random.RandomState(seed=args.rng_seed)
tf.set_random_seed(rng.randint(2**31))
# Create the output directory.
try:
os.mkdir(args.output)
except OSError:
if not args.force:
sys.stderr.write(
'ERROR: Cannot create output directory %s\n' % args.output)
sys.stderr.write(
'HINT: Does the directory already exist? To prevent accidental data loss this\n'
' script, by default, does not write to an existing output directory.\n'
' Specify a non-existing output directory or use the `--force`.\n')
exit(1)
else:
print('Writing output into directory `%s`.' % args.output)
try:
with open(os.path.join(args.output, 'log'), 'w') as log_file:
# We write log files in the form of python scripts. This way, log files are both human
# readable and very easy to parse by different python scripts. We begin log files with
# a shebang (`#!/usr/bin/python`) so that text editors turn on syntax highlighting.
log_file.write('#!/usr/bin/python\n')
log_file.write('\n')
# Log information about the executing environment to make experiments reproducible.
log_file.write('program = "%s"\n' % arg_parser.prog)
log_file.write(
'args = {\n %s\n}\n\n' % pprint.pformat(vars(args), indent=4)[1:-1])
log_file.write('git_revision = "%s"\n' % subprocess.check_output(
['git', 'rev-parse', 'HEAD']).decode('utf-8').strip())
log_file.write('host_name = "%s"\n' % socket.gethostname())
log_file.write('start_time = "%s"\n' %
str(datetime.datetime.now()))
log_file.write('\n')
dat = Dataset(args.input, log_file=log_file)
model = Model(args, dat, rng, log_file=log_file)
session = tf.Session()
session.run(tf.initializers.global_variables())
if args.initialize_from is not None:
load_checkpoint(model, session, args.initialize_from,
log_file=log_file)
evaluator = evaluate.Evaluator(model, dat, args, log_file=log_file)
training_loop(args, model, session, dat, rng, evaluator,
log_file=log_file)
log_file.write('\n')
log_file.write('end_time = "%s"\n' %
str(datetime.datetime.now()))
except:
with open(os.path.join(args.output, 'err'), 'w') as err_file:
traceback.print_exc(file=err_file)
exit(2)
def training_loop(args, model, session, dat, rng, evaluator, log_file=sys.stdout):
log_file.write('\n# Starting training loop.\n')
step, = session.run(
[var for var in tf.global_variables() if var.name == 'training_step:0'])
initial_summaries = args.initial_summaries + step
log_file.write('pretrained_steps = %d\n' % step)
log_file.write('\n')
log_file.write('progress = [\n')
log_file.flush()
summary_writer = tf.summary.FileWriter(args.output, session.graph)
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=2)
# Implement early stopping using the filtered MRR on the validation set. We don't actually stop
# training when the MRR starts going down, but we keep a checkpoint from peak MRR.
max_mrr_so_far = 0.0
opt_step = model.e_step
for epoch in range(args.epochs):
if args.em and epoch == args.initial_e_epochs:
log_file.write('# Starting hyperparameter optimization.\n')
log_file.flush()
opt_step = model.em_step
if epoch % args.epochs_per_eval == 0:
valid_mrr = evaluator.run(session, summary_writer, step, epoch,
log_file=log_file)
if valid_mrr is not None and valid_mrr > max_mrr_so_far:
max_mrr_so_far = valid_mrr
save_checkpoint(args.output, session, step, saver, log_file)
for minibatch in dat.iterate_in_minibatches('train', args.minibatch_size, rng):
step += 1
if step % args.steps_per_summary == 0 or step <= initial_summaries:
_, summary = session.run([opt_step, model.summary_op],
feed_dict={model.minibatch_htr: minibatch})
summary_writer.add_summary(summary, global_step=step)
else:
session.run(opt_step, feed_dict={
model.minibatch_htr: minibatch})
save_checkpoint(args.output, session, step, saver, log_file)
evaluator.run(session, summary_writer, step, args.epochs,
log_file=log_file)
log_file.write(']\n')
def save_checkpoint(directory, session, step, saver, log_file=sys.stdout):
'''Save the current state of the model to a tensorflow checkpoint.
Arguments:
directory -- Output directory. Must exist. Any files with clashing names
in the output directory will be overwritten.
session -- A `tf.Session` that contains the state.
step -- Integer number of concluded training steps.
saver -- A `tf.train.Saver`.
log_file -- File handle to the log file.
'''
start_time = time()
log_file.write('# Saving checkpoint after step %d... ' % step)
log_file.flush()
saver.save(session, os.path.join(directory, 'checkpoint'),
global_step=step)
log_file.write('done. (%.2g seconds)\n' % (time() - start_time))
log_file.flush()
def load_checkpoint(model, session, path, log_file=sys.stdout):
log_file.write('# Loading model from checkpoint %s\n' % path)
log_file.flush()
reader = tf.train.NewCheckpointReader(path)
checkpoint_variables = set(reader.get_variable_to_shape_map().keys())
# Map "scope/var_name:0" to "scope/var_name".
trimmer = re.compile(r'(.+):\d+$')
def trim(s):
return trimmer.match(s).group(1)
model_variables = set(trim(var.name) for var in tf.global_variables())
try:
model_variables.remove('training_step')
except:
pass
restored_variables = sorted(list(
model_variables.intersection(checkpoint_variables)))
ignored_in_checkpoint = sorted(list(
checkpoint_variables - model_variables))
not_in_checkpoint = sorted(list(
model_variables - checkpoint_variables))
log_file.write('restored_variables = [\n %s\n]\n\n'
% pprint.pformat(restored_variables, indent=4)[1:-1])
log_file.write('not_found_in_checkpoint = [\n %s\n]\n\n'
% pprint.pformat(not_in_checkpoint, indent=4)[1:-1])
log_file.write('found_in_checkpoint_but_not_restored = [\n %s\n]\n\n'
% pprint.pformat(ignored_in_checkpoint, indent=4)[1:-1])
log_file.flush()
loader = tf.train.Saver(
var_list=[var for var in tf.global_variables() if trim(var.name) in restored_variables])
loader.restore(session, path)
log_file.write('# Done loading model from checkpoint.\n')
log_file.flush()
def add_cli_args(parser):
'''Add generic command line arguments.
This function defines command line arguments that are required for all
models in this project.
Arguments:
parser -- An `argparse.ArgumentParser`.
Returns:
A tuple of two command line argument groups that were added to the parser.
'''
positional_args = parser.add_argument_group(
'Required positional arguments')
positional_args.add_argument('input', metavar='IN_PATH', help='''
Path to a directory containing the training, validation, and test data sets.''')
positional_args.add_argument('output', metavar='OUT_PATH', help='''
Path to the output directory. Must not already exist (unless --force is used).''')
train_args = parser.add_argument_group(
'Parameters of the training environment')
train_args.add_argument('-f', '--force', action='store_true', help='''
Allow writing into existing output directory, possibly overwriting existing files.''')
train_args.add_argument('-E', '--epochs', metavar='N', type=int, default=500, help='''
Set the number of training epochs.''')
train_args.add_argument('--initial_e_epochs', metavar='N', type=int, default=50, help='''
Set the number of initial epochs in which the hyperparameters are kept constant (i.e., only
the "E-step" but not the "M-step" is performed during these initial epochs). Only used if
`--em` is set.''')
train_args.add_argument('-B', '--minibatch_size', metavar='N', type=int, default=100, help='''
Set the minibatch size.''')
train_args.add_argument('--rng_seed', metavar='N', type=int, help='''
Set the seed of the pseudo random number generator. If not provided, a
seed will automatically be generated from a system random source. In order to
make experiments reproducible, the seed is always written to the output file,
along with the git commit hash and all command line arguments.''')
train_args.add_argument('--steps_per_summary', metavar='N', type=int, default=100, help='''
Set the number of training steps to run between generating a Tensorboard summary.''')
train_args.add_argument('--initial_summaries', metavar='N', type=int, default=100, help='''
Set the number of initial training steps for which a Tensorboard summary will be generated
after every step.''')
train_args.add_argument('--epochs_per_eval', metavar='N', type=int, default=1, help='''
Set the number of training epochs to run between model evaluations.''')
train_args.add_argument('--initialize_from', metavar='PATH', help='''
Provide a path to a tensorflow checkpoint file from which to load initial model parameters,
initial hyperparameters, and the number of already concluded training steps. The provided
PATH will likely have the form `directory/checkpoint-NNN` where `NNN` is the step number.
Note that any internal state of optimizers, such as momentum or other accumulators, is not
restored. Values stored in the checkpoint file take precedence over any initializations
provided by command line arguments. This operation can be used both to initialize EM with a
point estimated model, as well as to initialize point estimation with the means and
hyperparametersfrom EM. The operation restores the intersection between parameters in the
checkpoint and parmeters of the new model, and reports the names of the restored parameters
to the log file.''')
return positional_args, train_args
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
Train a probabilistic knowledge graph embedding model.''')
add_cli_args(parser)
abstract_model.add_cli_args(parser)
optimizer.add_cli_args(parser)
evaluate.add_cli_args(parser)
train(parser)
|
[
"evaluate.Evaluator",
"evaluate.add_cli_args"
] |
[((1296, 1337), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'args.rng_seed'}), '(seed=args.rng_seed)\n', (1317, 1337), True, 'import numpy as np\n'), ((4431, 4480), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['args.output', 'session.graph'], {}), '(args.output, session.graph)\n', (4452, 4480), True, 'import tensorflow as tf\n'), ((6637, 6643), 'time.time', 'time', ([], {}), '()\n', (6641, 6643), False, 'from time import time\n'), ((7087, 7121), 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', (['path'], {}), '(path)\n', (7115, 7121), True, 'import tensorflow as tf\n'), ((7261, 7285), 're.compile', 're.compile', (['"""(.+):\\\\d+$"""'], {}), "('(.+):\\\\d+$')\n", (7271, 7285), False, 'import re\n'), ((12116, 12227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n Train a probabilistic knowledge graph embedding model."""'}), '(description=\n """\n Train a probabilistic knowledge graph embedding model.""")\n', (12139, 12227), False, 'import argparse\n'), ((12252, 12287), 'abstract_model.add_cli_args', 'abstract_model.add_cli_args', (['parser'], {}), '(parser)\n', (12279, 12287), False, 'import abstract_model\n'), ((12292, 12322), 'optimizer.add_cli_args', 'optimizer.add_cli_args', (['parser'], {}), '(parser)\n', (12314, 12322), False, 'import optimizer\n'), ((12327, 12356), 'evaluate.add_cli_args', 'evaluate.add_cli_args', (['parser'], {}), '(parser)\n', (12348, 12356), False, 'import evaluate\n'), ((1434, 1455), 'os.mkdir', 'os.mkdir', (['args.output'], {}), '(args.output)\n', (1442, 1455), False, 'import os\n'), ((4508, 4532), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4530, 4532), True, 'import tensorflow as tf\n'), ((6757, 6794), 'os.path.join', 'os.path.join', (['directory', '"""checkpoint"""'], {}), "(directory, 'checkpoint')\n", (6769, 6794), False, 'import os\n'), ((1251, 1264), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (1261, 1264), False, 'import os\n'), ((3129, 3167), 'dataset.Dataset', 'Dataset', (['args.input'], {'log_file': 'log_file'}), '(args.input, log_file=log_file)\n', (3136, 3167), False, 'from dataset import Dataset\n'), ((3251, 3263), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3261, 3263), True, 'import tensorflow as tf\n'), ((3519, 3574), 'evaluate.Evaluator', 'evaluate.Evaluator', (['model', 'dat', 'args'], {'log_file': 'log_file'}), '(model, dat, args, log_file=log_file)\n', (3537, 3574), False, 'import evaluate\n'), ((1515, 1591), 'sys.stderr.write', 'sys.stderr.write', (["('ERROR: Cannot create output directory %s\\n' % args.output)"], {}), "('ERROR: Cannot create output directory %s\\n' % args.output)\n", (1531, 1591), False, 'import sys\n'), ((1621, 1874), 'sys.stderr.write', 'sys.stderr.write', (['"""HINT: Does the directory already exist? To prevent accidental data loss this\n script, by default, does not write to an existing output directory.\n Specify a non-existing output directory or use the `--force`.\n"""'], {}), '(\n """HINT: Does the directory already exist? To prevent accidental data loss this\n script, by default, does not write to an existing output directory.\n Specify a non-existing output directory or use the `--force`.\n"""\n )\n', (1637, 1874), False, 'import sys\n'), ((2044, 2076), 'os.path.join', 'os.path.join', (['args.output', '"""log"""'], {}), "(args.output, 'log')\n", (2056, 2076), False, 'import os\n'), ((3288, 3322), 'tensorflow.initializers.global_variables', 'tf.initializers.global_variables', ([], {}), '()\n', (3320, 3322), True, 'import tensorflow as tf\n'), ((3924, 3958), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'err_file'}), '(file=err_file)\n', (3943, 3958), False, 'import traceback\n'), ((4162, 4183), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (4181, 4183), True, 'import tensorflow as tf\n'), ((6877, 6883), 'time.time', 'time', ([], {}), '()\n', (6881, 6883), False, 'from time import time\n'), ((7398, 7419), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (7417, 7419), True, 'import tensorflow as tf\n'), ((7857, 7901), 'pprint.pformat', 'pprint.pformat', (['restored_variables'], {'indent': '(4)'}), '(restored_variables, indent=4)\n', (7871, 7901), False, 'import pprint\n'), ((7991, 8034), 'pprint.pformat', 'pprint.pformat', (['not_in_checkpoint'], {'indent': '(4)'}), '(not_in_checkpoint, indent=4)\n', (8005, 8034), False, 'import pprint\n'), ((8137, 8184), 'pprint.pformat', 'pprint.pformat', (['ignored_in_checkpoint'], {'indent': '(4)'}), '(ignored_in_checkpoint, indent=4)\n', (8151, 8184), False, 'import pprint\n'), ((2947, 2967), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (2965, 2967), False, 'import socket\n'), ((3860, 3892), 'os.path.join', 'os.path.join', (['args.output', '"""err"""'], {}), "(args.output, 'err')\n", (3872, 3892), False, 'import os\n'), ((8276, 8297), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (8295, 8297), True, 'import tensorflow as tf\n'), ((3051, 3074), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3072, 3074), False, 'import datetime\n'), ((3804, 3827), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3825, 3827), False, 'import datetime\n'), ((2801, 2854), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', 'HEAD']"], {}), "(['git', 'rev-parse', 'HEAD'])\n", (2824, 2854), False, 'import subprocess\n')]
|
import argparse
import json
import numpy as np
import os
import torch
from datetime import datetime
from pathlib import Path
from sklearn import metrics
from evaluate import run_model
from loader import load_data
from model import TripleMRNet
def train(rundir, task, backbone, epochs, learning_rate, use_gpu,
abnormal_model_path=None):
train_loader, valid_loader = load_data(task, use_gpu)
model = TripleMRNet(backbone=backbone)
for dirpath, dirnames, files in os.walk(args.rundir):
if not files:
break
max_epoch = 0
model_path = None
for fname in files:
if fname.endswith(".json"):
continue
ep = int(fname[27:])
if ep >= max_epoch:
max_epoch = ep
model_path = os.path.join(dirpath, fname)
if model_path:
state_dict = torch.load(model_path, map_location=(None if use_gpu else 'cpu'))
model.load_state_dict(state_dict)
if use_gpu:
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), learning_rate, weight_decay=.01)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, factor=.3, threshold=1e-4)
best_val_loss = float('inf')
start_time = datetime.now()
epoch = 0
if max_epoch: epoch += max_epoch
while epoch < epochs:
change = datetime.now() - start_time
print('starting epoch {}. time passed: {}'.format(epoch+1, str(change)))
train_loss, train_auc, _, _ = run_model(
model, train_loader, train=True, optimizer=optimizer,
abnormal_model_path=abnormal_model_path)
print(f'train loss: {train_loss:0.4f}')
print(f'train AUC: {train_auc:0.4f}')
val_loss, val_auc, _, _ = run_model(model, valid_loader,
abnormal_model_path=abnormal_model_path)
print(f'valid loss: {val_loss:0.4f}')
print(f'valid AUC: {val_auc:0.4f}')
scheduler.step(val_loss)
# save every epoch
file_name = f'model_val{val_auc:0.4f}_train{train_auc:0.4f}_epoch{epoch+1}'
save_path = Path(rundir) / file_name
torch.save(model.state_dict(), save_path)
if val_loss < best_val_loss:
best_val_loss = val_loss
file_name = f'val{val_auc:0.4f}_train{train_auc:0.4f}_epoch{epoch+1}'
save_path = Path(rundir) / file_name
torch.save(model.state_dict(), save_path)
epoch += 1
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--rundir', type=str, required=True)
parser.add_argument('--task', type=str, required=True)
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--learning_rate', default=1e-05, type=float)
parser.add_argument('--weight_decay', default=0.01, type=float)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--max_patience', default=5, type=int)
parser.add_argument('--factor', default=0.3, type=float)
parser.add_argument('--backbone', default="alexnet", type=str)
parser.add_argument('--abnormal_model', default=None, type=str)
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu:
torch.cuda.manual_seed_all(args.seed)
# if args.task != "abnormal":
# if args.abnormal_model is None:
# raise ValueError("Enter abnormal model path for `acl` or `meniscus` task")
os.makedirs(args.rundir, exist_ok=True)
with open(Path(args.rundir) / 'args.json', 'w') as out:
json.dump(vars(args), out, indent=4)
train(args.rundir, args.task, args.backbone, args.epochs, args.learning_rate, args.gpu, abnormal_model_path=args.abnormal_model)
|
[
"evaluate.run_model"
] |
[((380, 404), 'loader.load_data', 'load_data', (['task', 'use_gpu'], {}), '(task, use_gpu)\n', (389, 404), False, 'from loader import load_data\n'), ((422, 452), 'model.TripleMRNet', 'TripleMRNet', ([], {'backbone': 'backbone'}), '(backbone=backbone)\n', (433, 452), False, 'from model import TripleMRNet\n'), ((489, 509), 'os.walk', 'os.walk', (['args.rundir'], {}), '(args.rundir)\n', (496, 509), False, 'import os\n'), ((1164, 1264), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'patience': '(5)', 'factor': '(0.3)', 'threshold': '(0.0001)'}), '(optimizer, patience=5, factor=\n 0.3, threshold=0.0001)\n', (1206, 1264), False, 'import torch\n'), ((1309, 1323), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1321, 1323), False, 'from datetime import datetime\n'), ((2584, 2609), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2607, 2609), False, 'import argparse\n'), ((3388, 3413), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3402, 3413), True, 'import numpy as np\n'), ((3418, 3446), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3435, 3446), False, 'import torch\n'), ((3681, 3720), 'os.makedirs', 'os.makedirs', (['args.rundir'], {'exist_ok': '(True)'}), '(args.rundir, exist_ok=True)\n', (3692, 3720), False, 'import os\n'), ((1575, 1683), 'evaluate.run_model', 'run_model', (['model', 'train_loader'], {'train': '(True)', 'optimizer': 'optimizer', 'abnormal_model_path': 'abnormal_model_path'}), '(model, train_loader, train=True, optimizer=optimizer,\n abnormal_model_path=abnormal_model_path)\n', (1584, 1683), False, 'from evaluate import run_model\n'), ((1843, 1914), 'evaluate.run_model', 'run_model', (['model', 'valid_loader'], {'abnormal_model_path': 'abnormal_model_path'}), '(model, valid_loader, abnormal_model_path=abnormal_model_path)\n', (1852, 1914), False, 'from evaluate import run_model\n'), ((3472, 3509), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (3498, 3509), False, 'import torch\n'), ((903, 966), 'torch.load', 'torch.load', (['model_path'], {'map_location': "(None if use_gpu else 'cpu')"}), "(model_path, map_location=None if use_gpu else 'cpu')\n", (913, 966), False, 'import torch\n'), ((1419, 1433), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1431, 1433), False, 'from datetime import datetime\n'), ((2196, 2208), 'pathlib.Path', 'Path', (['rundir'], {}), '(rundir)\n', (2200, 2208), False, 'from pathlib import Path\n'), ((817, 845), 'os.path.join', 'os.path.join', (['dirpath', 'fname'], {}), '(dirpath, fname)\n', (829, 845), False, 'import os\n'), ((2453, 2465), 'pathlib.Path', 'Path', (['rundir'], {}), '(rundir)\n', (2457, 2465), False, 'from pathlib import Path\n'), ((3740, 3757), 'pathlib.Path', 'Path', (['args.rundir'], {}), '(args.rundir)\n', (3744, 3757), False, 'from pathlib import Path\n')]
|
"""Runner for flow/utils/leaderboard/evaluate.py/evaluate_policy."""
from solution import BENCHMARK, get_actions, get_states
from evaluate import evaluate_policy
# Evaluate the solution
mean, stdev = evaluate_policy(
benchmark=BENCHMARK, _get_actions=get_actions, _get_states=get_states)
# Print results
print(mean, stdev)
|
[
"evaluate.evaluate_policy"
] |
[((202, 293), 'evaluate.evaluate_policy', 'evaluate_policy', ([], {'benchmark': 'BENCHMARK', '_get_actions': 'get_actions', '_get_states': 'get_states'}), '(benchmark=BENCHMARK, _get_actions=get_actions, _get_states=\n get_states)\n', (217, 293), False, 'from evaluate import evaluate_policy\n')]
|
from pathlib import Path
import numpy as np
import scipy
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from evaluate import evaluate
from lda import logger
from logistic_regression import LogisticRegressionVal
from utils import loadClean, writeResults, preprocessClfParser
INPUT_DIR = Path(r'../data/clean')
class TruncatedSVD(BaseEstimator, TransformerMixin):
def __init__(self, k):
self.k = k
self.U = None
self.Sigma = None
self.VT = None
def fit(self, X_train):
if scipy.sparse.issparse(X_train):
X_train = X_train.toarray()
if self.VT is None:
self.U, self.Sigma, self.VT = np.linalg.svd(X_train)
return self
def transform(self, X_test):
if scipy.sparse.issparse(X_test):
X_test = X_test.toarray()
proj = self.VT[:self.k, :]
return (X_test @ proj.T)
def LSI(train_size, random_state):
subset = 'subset_%s' % train_size
input_dir = INPUT_DIR / subset
K = set((np.linspace(100, train_size - 1, 10) / 100).astype(int) * 100)
X_train, X_test, y_train, y_test = loadClean(input_dir)
X_train_sub, X_val, y_train_sub, y_val = train_test_split(X_train, y_train,
test_size=0.2,
random_state=random_state)
tf_idf = TfidfTransformer()
X_train_sub = tf_idf.fit_transform(X_train_sub)
X_val = tf_idf.transform(X_val)
scaler = StandardScaler()
best_params = []
best_k, best_auc, best_acc = None, 0, 0
lsi = TruncatedSVD(k=0)
lsi.fit(X_train_sub)
for k in K:
lsi.k = k
print(k)
X_train_ = scaler.fit_transform(lsi.transform(X_train_sub))
X_val_ = scaler.transform(lsi.transform(X_val))
clf_val = LogisticRegressionVal(X_train_, y_train_sub, X_val_, y_val,
k, random_state=random_state)
best_k, best_auc, best_acc, best_params = clf_val.tune(best_k, best_auc,
best_acc, best_params)
clf, file_name, header = clf_val.bestClassifier(best_params)
lsi = TruncatedSVD(k=best_k) # Create a new one for the whole training set
preprocess = make_pipeline(tf_idf, lsi, scaler)
tr_time, tr_metrics, test_time, test_metrics = evaluate(preprocess, clf,
X_train, y_train,
X_test, y_test)
writeResults(file_name, header, 'lsi',
train_size, best_k, best_params,
tr_time, tr_metrics, test_time, test_metrics)
logger.info(("\tFor training size = %s, best column dimension k = %s "
"best parameter grid: %s (train AUC: {:.3f}, train acc: {:.3f};"
" test AUC: {:.3f}, test acc: {:.3f})").
format(tr_metrics[0], tr_metrics[1],
test_metrics[0], test_metrics[1])
% (train_size, best_k, best_params))
if __name__ == '__main__':
desc = ("Apply LSI as a preprocessing step, grid search for the best "
"sub-dimension and hyperparameters.")
parser = preprocessClfParser(desc)
args = parser.parse_args()
if args.all:
for train_size in np.linspace(1250, 25000, 20):
LSI(int(train_size), args.random_state)
else:
LSI(int(args.train_size), args.random_state)
|
[
"evaluate.evaluate"
] |
[((505, 526), 'pathlib.Path', 'Path', (['"""../data/clean"""'], {}), "('../data/clean')\n", (509, 526), False, 'from pathlib import Path\n'), ((1334, 1354), 'utils.loadClean', 'loadClean', (['input_dir'], {}), '(input_dir)\n', (1343, 1354), False, 'from utils import loadClean, writeResults, preprocessClfParser\n'), ((1400, 1476), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': '(0.2)', 'random_state': 'random_state'}), '(X_train, y_train, test_size=0.2, random_state=random_state)\n', (1416, 1476), False, 'from sklearn.model_selection import train_test_split\n'), ((1615, 1633), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (1631, 1633), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((1736, 1752), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1750, 1752), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2527, 2561), 'sklearn.pipeline.make_pipeline', 'make_pipeline', (['tf_idf', 'lsi', 'scaler'], {}), '(tf_idf, lsi, scaler)\n', (2540, 2561), False, 'from sklearn.pipeline import make_pipeline\n'), ((2613, 2672), 'evaluate.evaluate', 'evaluate', (['preprocess', 'clf', 'X_train', 'y_train', 'X_test', 'y_test'], {}), '(preprocess, clf, X_train, y_train, X_test, y_test)\n', (2621, 2672), False, 'from evaluate import evaluate\n'), ((2798, 2919), 'utils.writeResults', 'writeResults', (['file_name', 'header', '"""lsi"""', 'train_size', 'best_k', 'best_params', 'tr_time', 'tr_metrics', 'test_time', 'test_metrics'], {}), "(file_name, header, 'lsi', train_size, best_k, best_params,\n tr_time, tr_metrics, test_time, test_metrics)\n", (2810, 2919), False, 'from utils import loadClean, writeResults, preprocessClfParser\n'), ((3496, 3521), 'utils.preprocessClfParser', 'preprocessClfParser', (['desc'], {}), '(desc)\n', (3515, 3521), False, 'from utils import loadClean, writeResults, preprocessClfParser\n'), ((741, 771), 'scipy.sparse.issparse', 'scipy.sparse.issparse', (['X_train'], {}), '(X_train)\n', (762, 771), False, 'import scipy\n'), ((971, 1000), 'scipy.sparse.issparse', 'scipy.sparse.issparse', (['X_test'], {}), '(X_test)\n', (992, 1000), False, 'import scipy\n'), ((2067, 2161), 'logistic_regression.LogisticRegressionVal', 'LogisticRegressionVal', (['X_train_', 'y_train_sub', 'X_val_', 'y_val', 'k'], {'random_state': 'random_state'}), '(X_train_, y_train_sub, X_val_, y_val, k, random_state\n =random_state)\n', (2088, 2161), False, 'from logistic_regression import LogisticRegressionVal\n'), ((3597, 3625), 'numpy.linspace', 'np.linspace', (['(1250)', '(25000)', '(20)'], {}), '(1250, 25000, 20)\n', (3608, 3625), True, 'import numpy as np\n'), ((883, 905), 'numpy.linalg.svd', 'np.linalg.svd', (['X_train'], {}), '(X_train)\n', (896, 905), True, 'import numpy as np\n'), ((1231, 1267), 'numpy.linspace', 'np.linspace', (['(100)', '(train_size - 1)', '(10)'], {}), '(100, train_size - 1, 10)\n', (1242, 1267), True, 'import numpy as np\n')]
|
import os
import os.path as osp
import argparse
import numpy as np
import json
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tqdm import tqdm
from coco_loader import coco_loader
from torchvision import models
from convcap import convcap
#from vggfeats import Vgg16Feats
#from resnet101 import Resnet101Feats
from resnet152 import Resnet152Feats
from evaluate import language_eval
def save_test_json(preds, resFile):
print('Writing %d predictions' % (len(preds)))
json.dump(preds, open(resFile, 'w'))
def test(args, split, modelfn=None, model_convcap=None, model_imgcnn=None):
"""Runs test on split=val/test with checkpoint file modelfn or loaded model_*"""
t_start = time.time()
data = coco_loader(args.coco_root, split=split, ncap_per_img=1)
print('[DEBUG] Loading %s data ... %f secs' % (split, time.time() - t_start))
data_loader = DataLoader(dataset=data, num_workers=args.nthreads,\
batch_size=args.batchsize, shuffle=False, drop_last=True)
batchsize = args.batchsize
max_tokens = data.max_tokens
num_batches = np.int_(np.floor((len(data.ids)*1.)/batchsize))
print('[DEBUG] Running inference on %s with %d batches' % (split, num_batches))
if(modelfn is not None):
#model_imgcnn = Vgg16Feats()
#model_imgcnn = Resnet101Feats()
model_imgcnn = Resnet152Feats()
model_imgcnn.cuda()
model_convcap = convcap(data.numwords, args.num_layers, is_attention=args.attention)
model_convcap.cuda()
print('[DEBUG] Loading checkpoint %s' % modelfn)
checkpoint = torch.load(modelfn)
model_convcap.load_state_dict(checkpoint['state_dict'])
model_imgcnn.load_state_dict(checkpoint['img_state_dict'])
else:
model_imgcnn = model_imgcnn
model_convcap = model_convcap
model_imgcnn.train(False)
model_convcap.train(False)
pred_captions = []
#Test epoch
for batch_idx, (imgs, _, _, _, img_ids) in \
tqdm(enumerate(data_loader), total=num_batches):
imgs = imgs.view(batchsize, 3, 224, 224)
imgs_v = Variable(imgs.cuda())
imgsfeats, imgsfc7 = model_imgcnn(imgs_v)
_, featdim, feat_h, feat_w = imgsfeats.size()
wordclass_feed = np.zeros((batchsize, max_tokens), dtype='int64')
wordclass_feed[:,0] = data.wordlist.index('<S>')
outcaps = np.empty((batchsize, 0)).tolist()
for j in range(max_tokens-1):
wordclass = Variable(torch.from_numpy(wordclass_feed)).cuda()
wordact, _ = model_convcap(imgsfeats, imgsfc7, wordclass)
wordact = wordact[:,:,:-1]
wordact_t = wordact.permute(0, 2, 1).contiguous().view(batchsize*(max_tokens-1), -1)
wordprobs = F.softmax(wordact_t).cpu().data.numpy()
wordids = np.argmax(wordprobs, axis=1)
for k in range(batchsize):
word = data.wordlist[wordids[j+k*(max_tokens-1)]]
outcaps[k].append(word)
if(j < max_tokens-1):
wordclass_feed[k, j+1] = wordids[j+k*(max_tokens-1)]
for j in range(batchsize):
num_words = len(outcaps[j])
if 'EOS' in outcaps[j]:
num_words = outcaps[j].index('EOS')
outcap = ' '.join(outcaps[j][:num_words])
pred_captions.append({'image_id': img_ids[j], 'caption': outcap})
scores = language_eval(pred_captions, args.model_dir, split)
model_imgcnn.train(True)
model_convcap.train(True)
return scores
|
[
"evaluate.language_eval"
] |
[((1018, 1029), 'time.time', 'time.time', ([], {}), '()\n', (1027, 1029), False, 'import time\n'), ((1039, 1095), 'coco_loader.coco_loader', 'coco_loader', (['args.coco_root'], {'split': 'split', 'ncap_per_img': '(1)'}), '(args.coco_root, split=split, ncap_per_img=1)\n', (1050, 1095), False, 'from coco_loader import coco_loader\n'), ((1193, 1307), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data', 'num_workers': 'args.nthreads', 'batch_size': 'args.batchsize', 'shuffle': '(False)', 'drop_last': '(True)'}), '(dataset=data, num_workers=args.nthreads, batch_size=args.\n batchsize, shuffle=False, drop_last=True)\n', (1203, 1307), False, 'from torch.utils.data import DataLoader\n'), ((3517, 3568), 'evaluate.language_eval', 'language_eval', (['pred_captions', 'args.model_dir', 'split'], {}), '(pred_captions, args.model_dir, split)\n', (3530, 3568), False, 'from evaluate import language_eval\n'), ((1632, 1648), 'resnet152.Resnet152Feats', 'Resnet152Feats', ([], {}), '()\n', (1646, 1648), False, 'from resnet152 import Resnet152Feats\n'), ((1695, 1763), 'convcap.convcap', 'convcap', (['data.numwords', 'args.num_layers'], {'is_attention': 'args.attention'}), '(data.numwords, args.num_layers, is_attention=args.attention)\n', (1702, 1763), False, 'from convcap import convcap\n'), ((1860, 1879), 'torch.load', 'torch.load', (['modelfn'], {}), '(modelfn)\n', (1870, 1879), False, 'import torch\n'), ((2478, 2526), 'numpy.zeros', 'np.zeros', (['(batchsize, max_tokens)'], {'dtype': '"""int64"""'}), "((batchsize, max_tokens), dtype='int64')\n", (2486, 2526), True, 'import numpy as np\n'), ((2998, 3026), 'numpy.argmax', 'np.argmax', (['wordprobs'], {'axis': '(1)'}), '(wordprobs, axis=1)\n', (3007, 3026), True, 'import numpy as np\n'), ((2596, 2620), 'numpy.empty', 'np.empty', (['(batchsize, 0)'], {}), '((batchsize, 0))\n', (2604, 2620), True, 'import numpy as np\n'), ((1152, 1163), 'time.time', 'time.time', ([], {}), '()\n', (1161, 1163), False, 'import time\n'), ((2692, 2724), 'torch.from_numpy', 'torch.from_numpy', (['wordclass_feed'], {}), '(wordclass_feed)\n', (2708, 2724), False, 'import torch\n'), ((2942, 2962), 'torch.nn.functional.softmax', 'F.softmax', (['wordact_t'], {}), '(wordact_t)\n', (2951, 2962), True, 'import torch.nn.functional as F\n')]
|
import tensorflow as tf
from train import train
from evaluate import evaluate
if __name__ == '__main__':
# Parameters
# Data loading parameters
tf.app.flags.DEFINE_float("dev_sample_rate", .05,
"Percentage of the training data to use for validation(default:0.05)")
tf.app.flags.DEFINE_string("train_data_path",
"../dataset/San_Francisco_Crime/train.csv.zip",
"Data source for the train data.")
# Model Hyperparameters
tf.app.flags.DEFINE_float("learning_rate", 5e-3,
"learning rate (default:0.001)")
tf.app.flags.DEFINE_integer("embedding_size", 128,
"Dimensionality of character embedding (default: 128)")
tf.app.flags.DEFINE_list("filters_size_list", [3, 4, 5],
"list type filter sizes (default: [3, 4, 5])")
tf.app.flags.DEFINE_integer("num_filters", 128,
"Number of filters per filter size (default: 128)")
tf.app.flags.DEFINE_float("dropout_keep_prob", .5,
"Dropout keep probability (default: 0.5)")
tf.app.flags.DEFINE_integer("num_classes", 39,
"number of classes (default: 39)")
# Training schemes
tf.app.flags.DEFINE_boolean("is_training", True,
"if True , the mode is training, False is eval(default:True")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200,
"Number of training epochs (default: 200)")
tf.app.flags.DEFINE_integer("max_to_keep", 5,
"tf.train.Saver(max_to_keep) (default:5)")
tf.app.flags.DEFINE_integer("evaluate_every", 100,
"Evaluate model on dev set after this many steps (default: 100)")
# Misc Parameters
tf.app.flags.DEFINE_boolean("allow_soft_placement", True,
"Allow device soft device placement")
tf.app.flags.DEFINE_boolean("log_device_placement", False,
"Log placement of ops on devices")
FLAGS = tf.app.flags.FLAGS
if FLAGS.is_training:
train(FLAGS)
elif not FLAGS.is_training:
evaluate(FLAGS)
|
[
"evaluate.evaluate"
] |
[((157, 282), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""dev_sample_rate"""', '(0.05)', '"""Percentage of the training data to use for validation(default:0.05)"""'], {}), "('dev_sample_rate', 0.05,\n 'Percentage of the training data to use for validation(default:0.05)')\n", (182, 282), True, 'import tensorflow as tf\n'), ((312, 448), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_data_path"""', '"""../dataset/San_Francisco_Crime/train.csv.zip"""', '"""Data source for the train data."""'], {}), "('train_data_path',\n '../dataset/San_Francisco_Crime/train.csv.zip',\n 'Data source for the train data.')\n", (338, 448), True, 'import tensorflow as tf\n'), ((536, 622), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.005)', '"""learning rate (default:0.001)"""'], {}), "('learning_rate', 0.005,\n 'learning rate (default:0.001)')\n", (561, 622), True, 'import tensorflow as tf\n'), ((652, 762), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""embedding_size"""', '(128)', '"""Dimensionality of character embedding (default: 128)"""'], {}), "('embedding_size', 128,\n 'Dimensionality of character embedding (default: 128)')\n", (679, 762), True, 'import tensorflow as tf\n'), ((795, 902), 'tensorflow.app.flags.DEFINE_list', 'tf.app.flags.DEFINE_list', (['"""filters_size_list"""', '[3, 4, 5]', '"""list type filter sizes (default: [3, 4, 5])"""'], {}), "('filters_size_list', [3, 4, 5],\n 'list type filter sizes (default: [3, 4, 5])')\n", (819, 902), True, 'import tensorflow as tf\n'), ((932, 1035), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_filters"""', '(128)', '"""Number of filters per filter size (default: 128)"""'], {}), "('num_filters', 128,\n 'Number of filters per filter size (default: 128)')\n", (959, 1035), True, 'import tensorflow as tf\n'), ((1068, 1166), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""dropout_keep_prob"""', '(0.5)', '"""Dropout keep probability (default: 0.5)"""'], {}), "('dropout_keep_prob', 0.5,\n 'Dropout keep probability (default: 0.5)')\n", (1093, 1166), True, 'import tensorflow as tf\n'), ((1196, 1281), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_classes"""', '(39)', '"""number of classes (default: 39)"""'], {}), "('num_classes', 39,\n 'number of classes (default: 39)')\n", (1223, 1281), True, 'import tensorflow as tf\n'), ((1338, 1452), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""is_training"""', '(True)', '"""if True , the mode is training, False is eval(default:True"""'], {}), "('is_training', True,\n 'if True , the mode is training, False is eval(default:True')\n", (1365, 1452), True, 'import tensorflow as tf\n'), ((1486, 1559), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""Batch Size (default: 64)"""'], {}), "('batch_size', 64, 'Batch Size (default: 64)')\n", (1513, 1559), True, 'import tensorflow as tf\n'), ((1597, 1687), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_epochs"""', '(200)', '"""Number of training epochs (default: 200)"""'], {}), "('num_epochs', 200,\n 'Number of training epochs (default: 200)')\n", (1620, 1687), True, 'import tensorflow as tf\n'), ((1717, 1809), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_to_keep"""', '(5)', '"""tf.train.Saver(max_to_keep) (default:5)"""'], {}), "('max_to_keep', 5,\n 'tf.train.Saver(max_to_keep) (default:5)')\n", (1744, 1809), True, 'import tensorflow as tf\n'), ((1842, 1962), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""evaluate_every"""', '(100)', '"""Evaluate model on dev set after this many steps (default: 100)"""'], {}), "('evaluate_every', 100,\n 'Evaluate model on dev set after this many steps (default: 100)')\n", (1869, 1962), True, 'import tensorflow as tf\n'), ((2018, 2117), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""allow_soft_placement"""', '(True)', '"""Allow device soft device placement"""'], {}), "('allow_soft_placement', True,\n 'Allow device soft device placement')\n", (2045, 2117), True, 'import tensorflow as tf\n'), ((2150, 2247), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Log placement of ops on devices"""'], {}), "('log_device_placement', False,\n 'Log placement of ops on devices')\n", (2177, 2247), True, 'import tensorflow as tf\n'), ((2343, 2355), 'train.train', 'train', (['FLAGS'], {}), '(FLAGS)\n', (2348, 2355), False, 'from train import train\n'), ((2396, 2411), 'evaluate.evaluate', 'evaluate', (['FLAGS'], {}), '(FLAGS)\n', (2404, 2411), False, 'from evaluate import evaluate\n')]
|
'''
@Author: dzy
@Date: 2021-09-13 11:07:48
@LastEditTime: 2021-09-26 20:25:17
@LastEditors: dzy
@Description: Helper functions or classes used for the model.
@FilePath: /JDProductSummaryGeneration/src/train.py
'''
import pickle
import os
import sys
import pathlib
import numpy as np
from torch import optim
from torch.utils.data import DataLoader
import torch
from torch.nn.utils import clip_grad_norm_
from tqdm import tqdm
from tensorboardX import SummaryWriter
abs_path = pathlib.Path(__file__).parent.absolute()
sys.path.append(sys.path.append(abs_path))
from dataset import PairDataset
from model import PGN
import config
from evaluate import evaluate
from dataset import collate_fn, SampleDataset
from utils import ScheduledSampler, config_info
def train(dataset, val_dataset, vocab, start_epoch=0):
"""Train the model, evaluate it and store it.
Args:
dataset (dataset.PairDataset): The training dataset.
val_dataset (dataset.PairDataset): The evaluation dataset.
v (vocab.Vocab): The vocabulary built from the training dataset.
start_epoch (int, optional): The starting epoch number. Defaults to 0.
"""
DEVICE = torch.device("cuda" if config.is_cuda else "cpu")
model = PGN(vocab)
model.load_model()
model.to(DEVICE)
if config.fine_tune:
# fine-tuning模式时除了attention.wc的参数外
# 其他所有模型的参数都不更新梯度
print('Fine-tuning mode.')
for name, params in model.named_parameters():
if name != 'attention.wc.weight':
params.requires_grad=False
print("loading data")
train_data = SampleDataset(dataset.pairs, vocab)
val_data = SampleDataset(val_dataset.pairs, vocab)
print("initializing optimizer")
# 使用Adam优化器
optimizer = optim.Adam(model.parameters(),
lr=config.learning_rate,
)
train_dataloader = DataLoader(dataset=train_data,
batch_size=config.batch_size,
shuffle=True,
collate_fn=collate_fn)
# 验证损失初始化为正无穷大
val_losses = np.inf
# 如果之前有保存损失则加载
if (os.path.exists(config.losses_path)):
with open(config.losses_path, 'rb') as f:
val_losses = pickle.load(f)
# SummaryWriter保存日志作为TensorboardX可视化
writer = SummaryWriter(config.log_path)
# scheduled_sampler : A tool for choosing teacher_forcing or not
num_epochs = len(range(start_epoch, config.epochs))
scheduled_sampler = ScheduledSampler(num_epochs)
if config.scheduled_sampling:
print('scheduled_sampling mode.')
# 用tqdm显示训练进度
with tqdm(total=config.epochs) as epoch_progress:
for epoch in range(start_epoch, config.epochs):
# 设置为训练模式
model.train()
# epoch中所有batch的损失
batch_losses = []
# train_dataloader中的batch数量
num_batches = len(train_dataloader)
# set a teacher_forcing signal
if config.scheduled_sampling:
teacher_forcing = scheduled_sampler.teacher_forcing(epoch - start_epoch)
else:
teacher_forcing = True
print('teacher_forcing = {}'.format(teacher_forcing))
with tqdm(total = num_batches // config.update_loss_batch) as batch_progress:
for batch, data in enumerate(tqdm(train_dataloader)):
x, y, x_len, y_len, oov, len_oovs = data
assert not np.any(np.isnan(x.numpy()))
# 在GPU上进行
if config.is_cuda:
x = x.to(DEVICE)
y = y.to(DEVICE)
x_len = x_len.to(DEVICE)
len_oovs = len_oovs.to(DEVICE)
# 梯度清零
optimizer.zero_grad()
# 输出模型损失
loss = model(x, x_len, y, len_oovs, batch=batch, num_batches=num_batches,
teacher_forcing=teacher_forcing)
# 添加到batch的损失列表
batch_losses.append(loss.item())
loss.backward()
# 进行梯度裁剪防止梯度爆炸
clip_grad_norm_(model.encoder.parameters(),
config.max_grad_norm)
clip_grad_norm_(model.decoder.parameters(),
config.max_grad_norm)
clip_grad_norm_(model.attention.parameters(),
config.max_grad_norm)
# 更新参数
optimizer.step()
# 每隔多少个batch更新loss显示
if (batch % config.update_loss_batch) == 0:
batch_progress.set_description(f'Epoch {epoch}')
batch_progress.set_postfix(Batch=batch,
Loss=loss.item())
batch_progress.update()
# Write loss for tensorboard.
writer.add_scalar(f'Average loss for epoch {epoch}',
np.mean(batch_losses),
global_step=batch)
# 计算每个epoch中所有batch的平均损失
epoch_loss = np.mean(batch_losses)
epoch_progress.set_description(f'Epoch {epoch}')
epoch_progress.set_postfix(Loss=epoch_loss)
epoch_progress.update()
# 用验证数据集进行验证,返回平均验证损失
avg_val_loss = evaluate(model, val_data, epoch)
print('training loss:{}'.format(epoch_loss),
'validation loss:{}'.format(avg_val_loss))
# 更新并保存最小验证集损失
if (avg_val_loss < val_losses):
# 保存验证损失小的模型
torch.save(model.encoder, config.encoder_save_name)
torch.save(model.decoder, config.decoder_save_name)
torch.save(model.attention, config.attention_save_name)
torch.save(model.reduce_state, config.reduce_state_save_name)
# 更新验证损失
val_losses = avg_val_loss
# 保存验证集损失
with open(config.losses_path, 'wb') as f:
pickle.dump(val_losses, f)
writer.close()
if __name__ == "__main__":
DEVICE = torch.device('cuda') if config.is_cuda else torch.device('cpu')
dataset = PairDataset(config.data_path,
max_src_len=config.max_src_len,
max_tgt_len=config.max_tgt_len,
truncate_src=config.truncate_src,
truncate_tgt=config.truncate_tgt)
val_dataset = PairDataset(config.val_data_path,
max_src_len=config.max_src_len,
max_tgt_len=config.max_tgt_len,
truncate_src=config.truncate_src,
truncate_tgt=config.truncate_tgt)
vocab = dataset.build_vocab(embed_file=config.embed_file)
train(dataset, val_dataset, vocab, start_epoch=0)
|
[
"evaluate.evaluate"
] |
[((536, 561), 'sys.path.append', 'sys.path.append', (['abs_path'], {}), '(abs_path)\n', (551, 561), False, 'import sys\n'), ((1173, 1222), 'torch.device', 'torch.device', (["('cuda' if config.is_cuda else 'cpu')"], {}), "('cuda' if config.is_cuda else 'cpu')\n", (1185, 1222), False, 'import torch\n'), ((1236, 1246), 'model.PGN', 'PGN', (['vocab'], {}), '(vocab)\n', (1239, 1246), False, 'from model import PGN\n'), ((1612, 1647), 'dataset.SampleDataset', 'SampleDataset', (['dataset.pairs', 'vocab'], {}), '(dataset.pairs, vocab)\n', (1625, 1647), False, 'from dataset import collate_fn, SampleDataset\n'), ((1663, 1702), 'dataset.SampleDataset', 'SampleDataset', (['val_dataset.pairs', 'vocab'], {}), '(val_dataset.pairs, vocab)\n', (1676, 1702), False, 'from dataset import collate_fn, SampleDataset\n'), ((1914, 2015), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_data', 'batch_size': 'config.batch_size', 'shuffle': '(True)', 'collate_fn': 'collate_fn'}), '(dataset=train_data, batch_size=config.batch_size, shuffle=True,\n collate_fn=collate_fn)\n', (1924, 2015), False, 'from torch.utils.data import DataLoader\n'), ((2186, 2220), 'os.path.exists', 'os.path.exists', (['config.losses_path'], {}), '(config.losses_path)\n', (2200, 2220), False, 'import os\n'), ((2368, 2398), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['config.log_path'], {}), '(config.log_path)\n', (2381, 2398), False, 'from tensorboardX import SummaryWriter\n'), ((2550, 2578), 'utils.ScheduledSampler', 'ScheduledSampler', (['num_epochs'], {}), '(num_epochs)\n', (2566, 2578), False, 'from utils import ScheduledSampler, config_info\n'), ((6496, 6667), 'dataset.PairDataset', 'PairDataset', (['config.data_path'], {'max_src_len': 'config.max_src_len', 'max_tgt_len': 'config.max_tgt_len', 'truncate_src': 'config.truncate_src', 'truncate_tgt': 'config.truncate_tgt'}), '(config.data_path, max_src_len=config.max_src_len, max_tgt_len=\n config.max_tgt_len, truncate_src=config.truncate_src, truncate_tgt=\n config.truncate_tgt)\n', (6507, 6667), False, 'from dataset import PairDataset\n'), ((6781, 6954), 'dataset.PairDataset', 'PairDataset', (['config.val_data_path'], {'max_src_len': 'config.max_src_len', 'max_tgt_len': 'config.max_tgt_len', 'truncate_src': 'config.truncate_src', 'truncate_tgt': 'config.truncate_tgt'}), '(config.val_data_path, max_src_len=config.max_src_len,\n max_tgt_len=config.max_tgt_len, truncate_src=config.truncate_src,\n truncate_tgt=config.truncate_tgt)\n', (6792, 6954), False, 'from dataset import PairDataset\n'), ((2688, 2713), 'tqdm.tqdm', 'tqdm', ([], {'total': 'config.epochs'}), '(total=config.epochs)\n', (2692, 2713), False, 'from tqdm import tqdm\n'), ((6417, 6437), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6429, 6437), False, 'import torch\n'), ((6461, 6480), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6473, 6480), False, 'import torch\n'), ((479, 501), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (491, 501), False, 'import pathlib\n'), ((2298, 2312), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2309, 2312), False, 'import pickle\n'), ((5374, 5395), 'numpy.mean', 'np.mean', (['batch_losses'], {}), '(batch_losses)\n', (5381, 5395), True, 'import numpy as np\n'), ((5612, 5644), 'evaluate.evaluate', 'evaluate', (['model', 'val_data', 'epoch'], {}), '(model, val_data, epoch)\n', (5620, 5644), False, 'from evaluate import evaluate\n'), ((3320, 3371), 'tqdm.tqdm', 'tqdm', ([], {'total': '(num_batches // config.update_loss_batch)'}), '(total=num_batches // config.update_loss_batch)\n', (3324, 3371), False, 'from tqdm import tqdm\n'), ((5897, 5948), 'torch.save', 'torch.save', (['model.encoder', 'config.encoder_save_name'], {}), '(model.encoder, config.encoder_save_name)\n', (5907, 5948), False, 'import torch\n'), ((5965, 6016), 'torch.save', 'torch.save', (['model.decoder', 'config.decoder_save_name'], {}), '(model.decoder, config.decoder_save_name)\n', (5975, 6016), False, 'import torch\n'), ((6033, 6088), 'torch.save', 'torch.save', (['model.attention', 'config.attention_save_name'], {}), '(model.attention, config.attention_save_name)\n', (6043, 6088), False, 'import torch\n'), ((6105, 6166), 'torch.save', 'torch.save', (['model.reduce_state', 'config.reduce_state_save_name'], {}), '(model.reduce_state, config.reduce_state_save_name)\n', (6115, 6166), False, 'import torch\n'), ((6328, 6354), 'pickle.dump', 'pickle.dump', (['val_losses', 'f'], {}), '(val_losses, f)\n', (6339, 6354), False, 'import pickle\n'), ((3438, 3460), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {}), '(train_dataloader)\n', (3442, 3460), False, 'from tqdm import tqdm\n'), ((5227, 5248), 'numpy.mean', 'np.mean', (['batch_losses'], {}), '(batch_losses)\n', (5234, 5248), True, 'import numpy as np\n')]
|
import os
import re
import sys
sys.path.append('.')
import cv2
import math
import time
import scipy
import argparse
import matplotlib
import numpy as np
import pylab as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from scipy.ndimage.morphology import generate_binary_structure
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from lib.network.rtpose_vgg import get_model
from lib.network import im_transform
from lib.config import update_config, cfg
from evaluate.coco_eval import get_outputs, handle_paf_and_heat
from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender, draw_humans
from lib.utils.paf_to_pose import paf_to_pose_cpp
def compare(pose1,pose2):
diff = np.mean(abs(pose1-pose2))
return diff
def homography(P,Q,R,S,b):
A= np.zeros((8,8))
A[0,0:3]=P
A[1,3:6]=P
A[2,0:3]=Q
A[3,3:6]=Q
A[4,0:3]=R
A[5,3:6]=R
A[6,0:3]=S
A[7,3:6]=S
for j in range(0,4):
A[2*j,6:8]= -b[2*j] * A[2*j,0:2]
A[2*j+1,6:8]= -b[2*j+1] * A[2*j+1,3:5]
#print(A)
#Calculate the homography
h= np.dot(np.linalg.inv(A),np.transpose(b))
H= np.zeros((3,3))
H[0,:]= h[0:3]
H[1,:]= h[3:6]
H[2,0:2]= h[6:9]
H[2,2]=1
print(H)
return H
def map_figs(imgfill,img, paint, H):
#map the points
for col in range(0,imgfill.shape[1]):
for row in range(0,imgfill.shape[0]):
x= np.transpose(np.array([col,row,1]))
if (imgfill[row,col,1]>0):
Hinv = np.linalg.inv(H)
xproj = np.dot(Hinv, x)
xproj = xproj/xproj[2]
rowint =int(xproj[1])
colint =int(xproj[0])
img[row,col,:]= paint[rowint,colint,:]
return img
def map_keypoints(keypoints, H=None):
#map the points
if H is not None:
Hinv = np.linalg.inv(H)
mapped_keypoints= np.zeros((17,2))
cnt=0
for i in keypoints.keys():
col= keypoints[i].x #x
row= keypoints[i].y #y
x= np.transpose(np.array([col,row,1]))
if H is not None:
xproj = np.dot(Hinv, x)
xproj = xproj/xproj[2]
rowint =int(xproj[1])
colint =int(xproj[0])
else:
rowint = int(x[1])
colint = int(x[0])
if cnt<17:
mapped_keypoints[cnt,0]= colint
mapped_keypoints[cnt,1]= rowint
cnt+=1
return mapped_keypoints
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', help='experiment configure file name',
default='./experiments/vgg19_368x368_sgd.yaml', type=str)
parser.add_argument('--weight', type=str,
default='pose_model.pth')
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# update config file
update_config(cfg, args)
model = get_model('vgg19')
model.load_state_dict(torch.load(args.weight))
model.float()
model.eval()
if __name__ == "__main__":
video_path = "/content/drive/MyDrive/pytorch_Realtime_Multi-Person_Pose_Estimation/student.mp4"
video_capture = cv2.VideoCapture(video_path)
frame_width = int(video_capture.get(3))
frame_height = int(video_capture.get(4))
out_video = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
video_test_path = "/content/drive/MyDrive/pytorch_Realtime_Multi-Person_Pose_Estimation/teacher.mp4"
video_capture2 = cv2.VideoCapture(video_test_path)
frame_width_2 = int(video_capture2.get(3))
frame_height_2 = int(video_capture2.get(4))
count = 0
# print(cv2.CAP_PROP_FRAME_HEIGHT)
while True:
# Capture frame-by-frame
# video_capture.set(cv2.CAP_PROP_POS_MSEC,(count * 10000))
count +=1
ret, oriImg = video_capture.read()
ret2, oriImg2 = video_capture2.read()
if ret == True and ret2 == True:
shape_dst = np.min(oriImg.shape[0:2])
shape_dst_2 = np.min(oriImg2.shape[0:2])
if count % 50 == 0:
with torch.no_grad():
paf, heatmap, imscale = get_outputs(
oriImg, model, 'rtpose')
paf2, heatmap2, imscale2 = get_outputs(
oriImg2, model, 'rtpose')
humans = paf_to_pose_cpp(heatmap, paf, cfg)
humans2 = paf_to_pose_cpp(heatmap2, paf2, cfg)
out = draw_humans(oriImg, humans)
image_h, image_w = oriImg.shape[:2]
bounding_boxes = []
bounding_boxes_2 = []
for human in humans:
bounding_box = human.get_upper_body_box(image_w, image_h) #
if bounding_box != None:
bounding_boxes.append(bounding_box)
for human in humans2:
bounding_box = human.get_upper_body_box(image_w, image_h) #
if bounding_boxes_2!= None:
bounding_boxes_2.append(bounding_box)
# for i in human.body_parts.keys():
# print (i, " : " , "x: ", human.body_parts[i].x, "y: ", human.body_parts[i].y) 0-17
if bounding_boxes == None or len(bounding_boxes) == 0:
out_video.write(oriImg)
continue
pbox_x= bounding_boxes[0]["x"]
pbox_y= bounding_boxes[0]["y"]
pbox_w= bounding_boxes[0]["w"]
pbox_h= bounding_boxes[0]["h"]
P= np.array([max(0,pbox_x- pbox_w/2), max(0,pbox_y- pbox_h/2),1])
Q= np.array([min(image_w,pbox_x+ pbox_w/2), max(0,pbox_y- pbox_h/2),1])
R= np.array([max(0,pbox_x- pbox_w/2),min(image_h, pbox_y+pbox_h/2),1])
S= np.array([min(image_w,pbox_x+ pbox_w/2),min(image_h, pbox_y+pbox_h/2),1])
#Teacher's bbox location
b= np.zeros((8))
tbox_x= bounding_boxes_2[0]["x"]
tbox_y= bounding_boxes_2[0]["y"]
tbox_w= bounding_boxes_2[0]["w"]
tbox_h= bounding_boxes_2[0]["h"]
b= np.array([max(0,tbox_x- tbox_w/2), max(0,tbox_y- tbox_h/2),min(image_w,tbox_x+ tbox_w/2), max(0,tbox_y- tbox_h/2),max(0,tbox_x- tbox_w/2),min(image_h, tbox_y+tbox_h/2),min(image_w,tbox_x+ tbox_w/2),min(image_h, tbox_y+tbox_h/2)])
H= homography(P,Q,R,S, b)
mapped_keypoints1 = map_keypoints(humans[0].body_parts)
mapped_keypoints2 = map_keypoints(humans[0].body_parts,H)
score= compare(mapped_keypoints1, mapped_keypoints2)
print('frame ', count, ', distance=',score)
if score > 80:
cv2.imwrite("student_l.png",oriImg)
cv2.imwrite("teacher_l.png",oriImg2)
if score < 10:
cv2.imwrite("student_s.png",oriImg)
cv2.imwrite("teacher_s.png",oriImg2)
out_video.write(out)
out_video.write(out)
else:
out_video.write(oriImg)
# Display the resulting frame
#cv2.imwrite('Video', out)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
[
"evaluate.coco_eval.get_outputs"
] |
[((31, 51), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (46, 51), False, 'import sys\n'), ((2565, 2590), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2588, 2590), False, 'import argparse\n'), ((3055, 3079), 'lib.config.update_config', 'update_config', (['cfg', 'args'], {}), '(cfg, args)\n', (3068, 3079), False, 'from lib.config import update_config, cfg\n'), ((3092, 3110), 'lib.network.rtpose_vgg.get_model', 'get_model', (['"""vgg19"""'], {}), "('vgg19')\n", (3101, 3110), False, 'from lib.network.rtpose_vgg import get_model\n'), ((893, 909), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (901, 909), True, 'import numpy as np\n'), ((1250, 1266), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1258, 1266), True, 'import numpy as np\n'), ((2003, 2020), 'numpy.zeros', 'np.zeros', (['(17, 2)'], {}), '((17, 2))\n', (2011, 2020), True, 'import numpy as np\n'), ((3138, 3161), 'torch.load', 'torch.load', (['args.weight'], {}), '(args.weight)\n', (3148, 3161), False, 'import torch\n'), ((3340, 3368), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (3356, 3368), False, 'import cv2\n'), ((3703, 3736), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_test_path'], {}), '(video_test_path)\n', (3719, 3736), False, 'import cv2\n'), ((7402, 7425), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7423, 7425), False, 'import cv2\n'), ((1208, 1224), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (1221, 1224), True, 'import numpy as np\n'), ((1225, 1240), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (1237, 1240), True, 'import numpy as np\n'), ((1964, 1980), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (1977, 1980), True, 'import numpy as np\n'), ((3502, 3544), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (3524, 3544), False, 'import cv2\n'), ((2147, 2170), 'numpy.array', 'np.array', (['[col, row, 1]'], {}), '([col, row, 1])\n', (2155, 2170), True, 'import numpy as np\n'), ((2214, 2229), 'numpy.dot', 'np.dot', (['Hinv', 'x'], {}), '(Hinv, x)\n', (2220, 2229), True, 'import numpy as np\n'), ((4172, 4197), 'numpy.min', 'np.min', (['oriImg.shape[0:2]'], {}), '(oriImg.shape[0:2])\n', (4178, 4197), True, 'import numpy as np\n'), ((4222, 4248), 'numpy.min', 'np.min', (['oriImg2.shape[0:2]'], {}), '(oriImg2.shape[0:2])\n', (4228, 4248), True, 'import numpy as np\n'), ((1542, 1565), 'numpy.array', 'np.array', (['[col, row, 1]'], {}), '([col, row, 1])\n', (1550, 1565), True, 'import numpy as np\n'), ((1627, 1643), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (1640, 1643), True, 'import numpy as np\n'), ((1668, 1683), 'numpy.dot', 'np.dot', (['Hinv', 'x'], {}), '(Hinv, x)\n', (1674, 1683), True, 'import numpy as np\n'), ((4537, 4571), 'lib.utils.paf_to_pose.paf_to_pose_cpp', 'paf_to_pose_cpp', (['heatmap', 'paf', 'cfg'], {}), '(heatmap, paf, cfg)\n', (4552, 4571), False, 'from lib.utils.paf_to_pose import paf_to_pose_cpp\n'), ((4594, 4630), 'lib.utils.paf_to_pose.paf_to_pose_cpp', 'paf_to_pose_cpp', (['heatmap2', 'paf2', 'cfg'], {}), '(heatmap2, paf2, cfg)\n', (4609, 4630), False, 'from lib.utils.paf_to_pose import paf_to_pose_cpp\n'), ((4649, 4676), 'lib.utils.common.draw_humans', 'draw_humans', (['oriImg', 'humans'], {}), '(oriImg, humans)\n', (4660, 4676), False, 'from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender, draw_humans\n'), ((6030, 6041), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (6038, 6041), True, 'import numpy as np\n'), ((4296, 4311), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4309, 4311), False, 'import torch\n'), ((4353, 4389), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['oriImg', 'model', '"""rtpose"""'], {}), "(oriImg, model, 'rtpose')\n", (4364, 4389), False, 'from evaluate.coco_eval import get_outputs, handle_paf_and_heat\n'), ((4454, 4491), 'evaluate.coco_eval.get_outputs', 'get_outputs', (['oriImg2', 'model', '"""rtpose"""'], {}), "(oriImg2, model, 'rtpose')\n", (4465, 4491), False, 'from evaluate.coco_eval import get_outputs, handle_paf_and_heat\n'), ((6809, 6845), 'cv2.imwrite', 'cv2.imwrite', (['"""student_l.png"""', 'oriImg'], {}), "('student_l.png', oriImg)\n", (6820, 6845), False, 'import cv2\n'), ((6859, 6896), 'cv2.imwrite', 'cv2.imwrite', (['"""teacher_l.png"""', 'oriImg2'], {}), "('teacher_l.png', oriImg2)\n", (6870, 6896), False, 'import cv2\n'), ((6937, 6973), 'cv2.imwrite', 'cv2.imwrite', (['"""student_s.png"""', 'oriImg'], {}), "('student_s.png', oriImg)\n", (6948, 6973), False, 'import cv2\n'), ((6987, 7024), 'cv2.imwrite', 'cv2.imwrite', (['"""teacher_s.png"""', 'oriImg2'], {}), "('teacher_s.png', oriImg2)\n", (6998, 7024), False, 'import cv2\n'), ((7233, 7247), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7244, 7247), False, 'import cv2\n')]
|
"""
Model training and evaluation.
The model is evaluated when (1) loss < 0.001 or (2) the number of
epochs is reached. The best model is saved in the experiment folder.
"""
from evaluate import evaluate
from time import time
import utils as ut
import numpy as np
import csv
import os
#from tqdm import tqdm
def train(model, optimizer, loss_fn, data_iter_tr):
model.train()
encoded_list = []
encoded_avg_list = []
loss_list = []
mrn_list = []
for idx, (list_mrn, list_batch_ehr, list_batch_prs) in enumerate(data_iter_tr):
loss_batch = []
for batch, mrn in zip(zip(list_batch_ehr,list_batch_prs), list_mrn):
# print(batch[0])
batch_ehr, batch_prs = batch[0], batch[1]
batch_ehr = batch_ehr.cuda()
batch_prs = batch_prs.cuda()
#print('batch_ehr: ', batch_ehr)
#print('batch_prs: ', batch_prs)
optimizer.zero_grad()
out_ehr, out_prs, encoded_vect = model(batch_ehr, batch_prs)
loss_1 = loss_fn(out_ehr, batch_ehr)
loss_2 = loss_fn(out_prs, batch_prs)
loss = loss_1 + loss_2
loss.backward()
optimizer.step()
loss_batch.append(loss.item())
encoded_avg_list.append(
np.mean(encoded_vect.tolist(), axis=0).tolist())
encoded_list.append(encoded_vect.tolist())
mrn_list.append(mrn)
loss_list.append(np.mean(loss_batch))
# print progress bar
if idx % 10 == 0:
print('Training in progress: [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
format(idx * len(list_mrn), len(data_iter_tr.dataset),
100. * idx / len(data_iter_tr), loss))
loss_mean = np.mean(loss_list)
return mrn_list, encoded_list, encoded_avg_list, loss_mean
def train_and_evaluate(model, data_iter_tr, data_iter_ts,
loss_fn, optimizer, metrics, exp_dir):
loss_vect = []
n_epoch = ut.model_param['num_epochs']
for epoch in range(1, n_epoch + 1):
print('Epoch {0} of {1}'.format(epoch, n_epoch))
start = time()
mrn, encoded, encoded_avg, loss_mean = train(
model, optimizer, loss_fn, data_iter_tr)
print('-- time = ', round(time() - start, 3))
print('-- mean loss: {0}'.format(round(loss_mean, 3)))
loss_vect.append(loss_mean)
is_best_1 = loss_mean < 0.1
is_best_2 = epoch == n_epoch
if is_best_1 or is_best_2:
outfile = os.path.join(exp_dir, 'TRconvae-avg_vect.csv')
with open(outfile, 'w') as f:
wr = csv.writer(f)
for m, e in zip(mrn, encoded_avg):
wr.writerow([m] + list(e))
outfile = os.path.join(exp_dir, 'TRconvae_vect.csv')
with open(outfile, 'w') as f:
wr = csv.writer(f)
for m, evs in zip(mrn, encoded):
for e in evs:
wr.writerow([m] + e)
outfile = os.path.join(exp_dir, 'TRmetrics.txt')
with open(outfile, 'w') as f:
f.write('Mean Loss: %.3f\n' % loss_mean)
outfile = os.path.join(exp_dir, 'TRlosses.csv')
with open(outfile, 'w') as f:
wr = csv.writer(f)
wr.writerow(['Epoch', 'Loss'])
for idx, l in enumerate(loss_vect):
wr.writerow([idx, l])
print('\nFound new best model at epoch {0}'.format(epoch))
ut.save_best_model(epoch, model, optimizer, loss_mean, exp_dir)
print('\nEvaluating the model')
mrn, encoded, encoded_avg, test_metrics = evaluate(
model, loss_fn, data_iter_ts, metrics, best_eval=True)
return mrn, encoded, encoded_avg, test_metrics
|
[
"evaluate.evaluate"
] |
[((1764, 1782), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (1771, 1782), True, 'import numpy as np\n'), ((2145, 2151), 'time.time', 'time', ([], {}), '()\n', (2149, 2151), False, 'from time import time\n'), ((1464, 1483), 'numpy.mean', 'np.mean', (['loss_batch'], {}), '(loss_batch)\n', (1471, 1483), True, 'import numpy as np\n'), ((2544, 2590), 'os.path.join', 'os.path.join', (['exp_dir', '"""TRconvae-avg_vect.csv"""'], {}), "(exp_dir, 'TRconvae-avg_vect.csv')\n", (2556, 2590), False, 'import os\n'), ((2789, 2831), 'os.path.join', 'os.path.join', (['exp_dir', '"""TRconvae_vect.csv"""'], {}), "(exp_dir, 'TRconvae_vect.csv')\n", (2801, 2831), False, 'import os\n'), ((3060, 3098), 'os.path.join', 'os.path.join', (['exp_dir', '"""TRmetrics.txt"""'], {}), "(exp_dir, 'TRmetrics.txt')\n", (3072, 3098), False, 'import os\n'), ((3221, 3258), 'os.path.join', 'os.path.join', (['exp_dir', '"""TRlosses.csv"""'], {}), "(exp_dir, 'TRlosses.csv')\n", (3233, 3258), False, 'import os\n'), ((3561, 3624), 'utils.save_best_model', 'ut.save_best_model', (['epoch', 'model', 'optimizer', 'loss_mean', 'exp_dir'], {}), '(epoch, model, optimizer, loss_mean, exp_dir)\n', (3579, 3624), True, 'import utils as ut\n'), ((3724, 3787), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', 'data_iter_ts', 'metrics'], {'best_eval': '(True)'}), '(model, loss_fn, data_iter_ts, metrics, best_eval=True)\n', (3732, 3787), False, 'from evaluate import evaluate\n'), ((2654, 2667), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2664, 2667), False, 'import csv\n'), ((2895, 2908), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2905, 2908), False, 'import csv\n'), ((3322, 3335), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3332, 3335), False, 'import csv\n'), ((2293, 2299), 'time.time', 'time', ([], {}), '()\n', (2297, 2299), False, 'from time import time\n')]
|
"""Train Neural Network
Adapted from CS230 code examples for computer vision.
Source: https://github.com/cs230-stanford/cs230-code-examples/tree/master/pytorch
"""
import argparse
import logging
import os
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
from tqdm import tqdm
import deep_net_utils
import model.data_loader as data_loader
from evaluate import evaluate
import loss_and_metrics
import model.cnn as cnn
import model.regular_neural_net as nn
import model.cnnv2 as cnnv2
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='data/example',
help="Directory containing the dataset")
parser.add_argument('--model_dir', default='experiments/example_trans_learning',
help="Directory containing params.json")
parser.add_argument('--net', default='resnet',
help="The name of the neural network")
parser.add_argument('--restore_file', default=None,
help="Optional, name of the file in --model_dir containing weights to reload before \
training") # 'best' or 'train'
def get_desired_model(args, params):
if args.net == 'fcnn':
return nn.Net(params).cuda() if params.cuda else nn.Net(params)
return cnn.Net(args, params).cuda() if params.cuda else cnn.Net(args, params)
def train(model, optimizer, loss_fn, dataloader, metrics, params):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
num_steps: (int) number of batches to train on, each of size params.batch_size
"""
# set model to training mode
model.train()
# summary for current training loop and a running average object for loss
summ = []
loss_avg = deep_net_utils.RunningAverage()
# Use tqdm for progress bar
with tqdm(total=len(dataloader)) as t:
for i, (train_batch, labels_batch) in enumerate(dataloader):
# move to GPU if available
if params.cuda:
train_batch, labels_batch = train_batch.cuda(
non_blocking=True), labels_batch.cuda(non_blocking=True)
# convert to torch Variables
train_batch, labels_batch = Variable(
train_batch), Variable(labels_batch)
# compute model output and loss
output_batch = model(train_batch)
loss = loss_fn(output_batch, labels_batch)
# clear previous gradients, compute gradients of all variables wrt loss
optimizer.zero_grad()
loss.backward()
# performs updates using calculated gradients
optimizer.step()
# Evaluate summaries only once in a while
if i % params.save_summary_steps == 0:
# extract data from torch Variable, move to cpu, convert to numpy arrays
output_batch = output_batch.data.cpu().numpy()
labels_batch = labels_batch.data.cpu().numpy()
# compute all metrics on this batch
summary_batch = {metric: metrics[metric](output_batch, labels_batch)
for metric in metrics}
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
# update the average loss
loss_avg.update(loss.item())
t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
t.update()
# compute mean of all metrics in summary
metrics_mean = {metric: np.mean([x[metric]
for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v)
for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
def train_and_evaluate(model, train_dataloader, val_dataloader, optimizer, loss_fn, metrics, params, model_dir,
restore_file=None):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
train_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
val_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches validation data
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) optional- name of file to restore from (without its extension .pth.tar)
"""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(
args.model_dir, args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
deep_net_utils.load_checkpoint(restore_path, model, optimizer)
best_val_macro_f1 = 0.0
for epoch in range(params.num_epochs):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# compute number of batches in one epoch (one full pass over the training set)
train(model, optimizer, loss_fn, train_dataloader, metrics, params)
# Evaluate for one epoch on validation set
val_metrics = evaluate(model, loss_fn, val_dataloader, metrics, params)
val_macro_f1 = val_metrics['macro f1']
is_best = val_macro_f1 >= best_val_macro_f1
# Save weights
deep_net_utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict()},
is_best=is_best,
checkpoint=model_dir)
# If best_eval, best_save_path
if is_best:
logging.info("- Found new best macro f1")
best_val_macro_f1 = val_macro_f1
# Save best val metrics in a json file in the model directory
best_json_path = os.path.join(
model_dir, "metrics_val_best_weights.json")
deep_net_utils.save_dict_to_json(val_metrics, best_json_path)
# Save latest val metrics in a json file in the model directory
last_json_path = os.path.join(
model_dir, "metrics_val_last_weights.json")
deep_net_utils.save_dict_to_json(val_metrics, last_json_path)
if __name__ == '__main__':
# Load the parameters from json file
args = parser.parse_args()
json_path = os.path.join(args.model_dir, "params.json")
assert os.path.isfile(
json_path), "No json configuration file found at {}".format(json_path)
params = deep_net_utils.Params(json_path)
# use GPU if available
params.cuda = torch.cuda.is_available()
# Set the random seed for reproducible experiments
torch.manual_seed(229)
if params.cuda:
torch.cuda.manual_seed(229)
# Set the logger
deep_net_utils.set_logger(os.path.join(args.model_dir, 'train.log'))
# Create the input data pipeline
logging.info("Loading the datasets...")
# fetch dataloaders
dataloaders = data_loader.fetch_dataloader(
['train', 'val'], args.data_dir, params)
train_dl = dataloaders['train']
val_dl = dataloaders['val']
logging.info("- done.")
# model selected is based on args.net
#model = get_desired_model(args, params)
#print(model)
model = cnnv2.initialize_model(model_name=args.net, num_classes=3, feature_extract=False, use_pretrained=True)
if params.cuda : model = model.cuda()
print(model)
optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
# fetch loss function and metrics
#loss_fn = loss_and_metrics.loss_fn
#metrics = loss_and_metrics.metrics
loss_fn = torch.nn.CrossEntropyLoss()
metrics = cnnv2.metrics
# Train the model
logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
train_and_evaluate(model, train_dl, val_dl, optimizer, loss_fn, metrics, params, args.model_dir,
args.restore_file)
|
[
"evaluate.evaluate"
] |
[((544, 569), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (567, 569), False, 'import argparse\n'), ((2222, 2253), 'deep_net_utils.RunningAverage', 'deep_net_utils.RunningAverage', ([], {}), '()\n', (2251, 2253), False, 'import deep_net_utils\n'), ((4205, 4255), 'logging.info', 'logging.info', (["('- Train metrics: ' + metrics_string)"], {}), "('- Train metrics: ' + metrics_string)\n", (4217, 4255), False, 'import logging\n'), ((7219, 7262), 'os.path.join', 'os.path.join', (['args.model_dir', '"""params.json"""'], {}), "(args.model_dir, 'params.json')\n", (7231, 7262), False, 'import os\n'), ((7274, 7299), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (7288, 7299), False, 'import os\n'), ((7382, 7414), 'deep_net_utils.Params', 'deep_net_utils.Params', (['json_path'], {}), '(json_path)\n', (7403, 7414), False, 'import deep_net_utils\n'), ((7461, 7486), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7484, 7486), False, 'import torch\n'), ((7547, 7569), 'torch.manual_seed', 'torch.manual_seed', (['(229)'], {}), '(229)\n', (7564, 7569), False, 'import torch\n'), ((7763, 7802), 'logging.info', 'logging.info', (['"""Loading the datasets..."""'], {}), "('Loading the datasets...')\n", (7775, 7802), False, 'import logging\n'), ((7846, 7915), 'model.data_loader.fetch_dataloader', 'data_loader.fetch_dataloader', (["['train', 'val']", 'args.data_dir', 'params'], {}), "(['train', 'val'], args.data_dir, params)\n", (7874, 7915), True, 'import model.data_loader as data_loader\n'), ((7998, 8021), 'logging.info', 'logging.info', (['"""- done."""'], {}), "('- done.')\n", (8010, 8021), False, 'import logging\n'), ((8141, 8248), 'model.cnnv2.initialize_model', 'cnnv2.initialize_model', ([], {'model_name': 'args.net', 'num_classes': '(3)', 'feature_extract': '(False)', 'use_pretrained': '(True)'}), '(model_name=args.net, num_classes=3, feature_extract=\n False, use_pretrained=True)\n', (8163, 8248), True, 'import model.cnnv2 as cnnv2\n'), ((8509, 8536), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (8534, 8536), False, 'import torch\n'), ((1344, 1365), 'model.cnn.Net', 'cnn.Net', (['args', 'params'], {}), '(args, params)\n', (1351, 1365), True, 'import model.cnn as cnn\n'), ((3980, 4014), 'numpy.mean', 'np.mean', (['[x[metric] for x in summ]'], {}), '([x[metric] for x in summ])\n', (3987, 4014), True, 'import numpy as np\n'), ((5350, 5410), 'os.path.join', 'os.path.join', (['args.model_dir', "(args.restore_file + '.pth.tar')"], {}), "(args.model_dir, args.restore_file + '.pth.tar')\n", (5362, 5410), False, 'import os\n'), ((5506, 5568), 'deep_net_utils.load_checkpoint', 'deep_net_utils.load_checkpoint', (['restore_path', 'model', 'optimizer'], {}), '(restore_path, model, optimizer)\n', (5536, 5568), False, 'import deep_net_utils\n'), ((5977, 6034), 'evaluate.evaluate', 'evaluate', (['model', 'loss_fn', 'val_dataloader', 'metrics', 'params'], {}), '(model, loss_fn, val_dataloader, metrics, params)\n', (5985, 6034), False, 'from evaluate import evaluate\n'), ((6962, 7018), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_last_weights.json"""'], {}), "(model_dir, 'metrics_val_last_weights.json')\n", (6974, 7018), False, 'import os\n'), ((7040, 7101), 'deep_net_utils.save_dict_to_json', 'deep_net_utils.save_dict_to_json', (['val_metrics', 'last_json_path'], {}), '(val_metrics, last_json_path)\n', (7072, 7101), False, 'import deep_net_utils\n'), ((7598, 7625), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(229)'], {}), '(229)\n', (7620, 7625), False, 'import torch\n'), ((7678, 7719), 'os.path.join', 'os.path.join', (['args.model_dir', '"""train.log"""'], {}), "(args.model_dir, 'train.log')\n", (7690, 7719), False, 'import os\n'), ((1269, 1283), 'model.regular_neural_net.Net', 'nn.Net', (['params'], {}), '(params)\n', (1275, 1283), True, 'import model.regular_neural_net as nn\n'), ((6525, 6566), 'logging.info', 'logging.info', (['"""- Found new best macro f1"""'], {}), "('- Found new best macro f1')\n", (6537, 6566), False, 'import logging\n'), ((6716, 6772), 'os.path.join', 'os.path.join', (['model_dir', '"""metrics_val_best_weights.json"""'], {}), "(model_dir, 'metrics_val_best_weights.json')\n", (6728, 6772), False, 'import os\n'), ((6802, 6863), 'deep_net_utils.save_dict_to_json', 'deep_net_utils.save_dict_to_json', (['val_metrics', 'best_json_path'], {}), '(val_metrics, best_json_path)\n', (6834, 6863), False, 'import deep_net_utils\n'), ((1295, 1316), 'model.cnn.Net', 'cnn.Net', (['args', 'params'], {}), '(args, params)\n', (1302, 1316), True, 'import model.cnn as cnn\n'), ((2686, 2707), 'torch.autograd.Variable', 'Variable', (['train_batch'], {}), '(train_batch)\n', (2694, 2707), False, 'from torch.autograd import Variable\n'), ((2726, 2748), 'torch.autograd.Variable', 'Variable', (['labels_batch'], {}), '(labels_batch)\n', (2734, 2748), False, 'from torch.autograd import Variable\n'), ((1227, 1241), 'model.regular_neural_net.Net', 'nn.Net', (['params'], {}), '(params)\n', (1233, 1241), True, 'import model.regular_neural_net as nn\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: penghuailiang
# @Date : 2019-09-20
import utils
import ops
import logging
import torch
import align
import cv2
import os
import util.logit as log
import numpy as np
from dataset import FaceDataset
from imitator import Imitator
from extractor import Extractor
from evaluate import Evaluate
from parse import parser
import torchvision.transforms as transforms
def init_device(arguments):
"""
检查配置和硬件是否支持gpu
:param arguments: 配置
:return: 返回True 则支持gpu
"""
support_gpu = torch.cuda.is_available()
log.info("neural face network use gpu: %s", support_gpu and arguments.use_gpu)
if support_gpu and arguments.use_gpu:
if not arguments.gpuid:
arguments.gpuid = 0
dev = torch.device("cuda:%d" % arguments.gpuid)
return True, dev
else:
dev = torch.device("cpu")
return False, dev
if __name__ == '__main__':
"""
程序入口函数
"""
args = parser.parse_args()
log.init("FaceNeural", logging.INFO, log_path="./output/neural_log.txt")
cuda, device = init_device(args)
if args.phase == "train_imitator":
log.info('imitator train mode')
imitator = Imitator("neural imitator", args)
if cuda:
imitator.cuda()
imitator.batch_train(cuda)
elif args.phase == "train_extractor":
log.info('feature extractor train mode')
extractor = Extractor("neural extractor", args)
if cuda:
extractor.cuda()
extractor.batch_train(cuda)
elif args.phase == "inference_imitator":
log.info("inference imitator")
imitator = Imitator("neural imitator", args, clean=False)
if cuda:
imitator.cuda()
imitator.load_checkpoint(args.imitator_model, True, cuda=cuda)
elif args.phase == "prev_imitator":
log.info("preview imitator")
imitator = Imitator("neural imitator", args, clean=False)
imitator.load_checkpoint(args.imitator_model, False, cuda=False)
dataset = FaceDataset(args)
name, param, img = dataset.get_picture()
param = np.array(param, dtype=np.float32)
b_param = param[np.newaxis, :]
log.info(b_param.shape)
t_param = torch.from_numpy(b_param)
output = imitator(t_param)
output = output.cpu().detach().numpy()
output = np.squeeze(output, axis=0)
output = output.swapaxes(0, 2) * 255
cv2.imwrite('./output/{0}.jpg'.format(name), output)
elif args.phase == "inference_extractor":
log.info("inference extractor")
extractor = Extractor("neural extractor", args)
if cuda:
extractor.cuda()
extractor.load_checkpoint("model_extractor_845000.pth", True, cuda)
elif args.phase == "lightcnn":
log.info("light cnn test")
lightcnn_inst = utils.load_lightcnn(args.lightcnn, cuda)
transform = transforms.Compose([transforms.ToTensor()])
im1 = cv2.imread('../export/star/a-rb1.jpg', cv2.IMREAD_GRAYSCALE)
im2 = cv2.imread('../export/star/a-lyf.jpg', cv2.IMREAD_GRAYSCALE)
im1 = cv2.resize(im1, dsize=(128, 128), interpolation=cv2.INTER_LINEAR)
im2 = cv2.resize(im2, dsize=(128, 128), interpolation=cv2.INTER_LINEAR)
im1 = np.reshape(im1, (128, 128, 1))
im2 = np.reshape(im2, (128, 128, 1))
img = transform(im1).view(1, 1, 128, 128)
img2 = transform(im2).view(1, 1, 128, 128)
features = utils.discriminative_loss(img, img2, lightcnn_inst)
log.info("loss feature:{0}".format(features))
elif args.phase == "faceparsing":
log.info("faceparsing")
im = utils.evalute_face("./output/face/db_0000_3.jpg", args.parsing_checkpoint, cuda)
cv2.imwrite("./output/eval.jpg", im)
elif args.phase == "align":
path = '../export/star'
for file in os.listdir(path):
p = os.path.join(path, file)
log.info(p)
p2 = os.path.join(path, "a_" + file)
al = align.face_features(p, p2)
ev = utils.faceparsing_ndarray(al, args.parsing_checkpoint, cuda=cuda)
elif args.phase == "dataset":
dataset = FaceDataset(args, "test")
dataset.pre_process(cuda)
elif args.phase == "preview":
log.info("preview picture")
path = "../export/regular/model.jpg"
img = cv2.imread(path)
img2 = utils.faceparsing_ndarray(img, args.parsing_checkpoint, cuda)
img3 = utils.img_edge(img2)
img3_ = ops.fill_gray(img3)
img4 = align.face_features(path)
log.info("{0} {1} {2} {3}".format(img.shape, img2.shape, img3_.shape, img4.shape))
ops.merge_4image(img, img2, img3_, img4, show=True)
elif args.phase == "evaluate":
log.info("evaluation mode start")
evl = Evaluate(args, cuda=cuda)
img = cv2.imread(args.eval_image).astype(np.float32)
x_ = evl.itr_train(img)
evl.output(x_, img)
else:
log.error("not known phase %s", args.phase)
|
[
"evaluate.Evaluate"
] |
[((552, 577), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (575, 577), False, 'import torch\n'), ((582, 660), 'util.logit.info', 'log.info', (['"""neural face network use gpu: %s"""', '(support_gpu and arguments.use_gpu)'], {}), "('neural face network use gpu: %s', support_gpu and arguments.use_gpu)\n", (590, 660), True, 'import util.logit as log\n'), ((985, 1004), 'parse.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (1002, 1004), False, 'from parse import parser\n'), ((1009, 1081), 'util.logit.init', 'log.init', (['"""FaceNeural"""', 'logging.INFO'], {'log_path': '"""./output/neural_log.txt"""'}), "('FaceNeural', logging.INFO, log_path='./output/neural_log.txt')\n", (1017, 1081), True, 'import util.logit as log\n'), ((781, 822), 'torch.device', 'torch.device', (["('cuda:%d' % arguments.gpuid)"], {}), "('cuda:%d' % arguments.gpuid)\n", (793, 822), False, 'import torch\n'), ((872, 891), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (884, 891), False, 'import torch\n'), ((1167, 1198), 'util.logit.info', 'log.info', (['"""imitator train mode"""'], {}), "('imitator train mode')\n", (1175, 1198), True, 'import util.logit as log\n'), ((1218, 1251), 'imitator.Imitator', 'Imitator', (['"""neural imitator"""', 'args'], {}), "('neural imitator', args)\n", (1226, 1251), False, 'from imitator import Imitator\n'), ((1382, 1422), 'util.logit.info', 'log.info', (['"""feature extractor train mode"""'], {}), "('feature extractor train mode')\n", (1390, 1422), True, 'import util.logit as log\n'), ((1443, 1478), 'extractor.Extractor', 'Extractor', (['"""neural extractor"""', 'args'], {}), "('neural extractor', args)\n", (1452, 1478), False, 'from extractor import Extractor\n'), ((1614, 1644), 'util.logit.info', 'log.info', (['"""inference imitator"""'], {}), "('inference imitator')\n", (1622, 1644), True, 'import util.logit as log\n'), ((1664, 1710), 'imitator.Imitator', 'Imitator', (['"""neural imitator"""', 'args'], {'clean': '(False)'}), "('neural imitator', args, clean=False)\n", (1672, 1710), False, 'from imitator import Imitator\n'), ((1875, 1903), 'util.logit.info', 'log.info', (['"""preview imitator"""'], {}), "('preview imitator')\n", (1883, 1903), True, 'import util.logit as log\n'), ((1923, 1969), 'imitator.Imitator', 'Imitator', (['"""neural imitator"""', 'args'], {'clean': '(False)'}), "('neural imitator', args, clean=False)\n", (1931, 1969), False, 'from imitator import Imitator\n'), ((2061, 2078), 'dataset.FaceDataset', 'FaceDataset', (['args'], {}), '(args)\n', (2072, 2078), False, 'from dataset import FaceDataset\n'), ((2144, 2177), 'numpy.array', 'np.array', (['param'], {'dtype': 'np.float32'}), '(param, dtype=np.float32)\n', (2152, 2177), True, 'import numpy as np\n'), ((2225, 2248), 'util.logit.info', 'log.info', (['b_param.shape'], {}), '(b_param.shape)\n', (2233, 2248), True, 'import util.logit as log\n'), ((2267, 2292), 'torch.from_numpy', 'torch.from_numpy', (['b_param'], {}), '(b_param)\n', (2283, 2292), False, 'import torch\n'), ((2392, 2418), 'numpy.squeeze', 'np.squeeze', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (2402, 2418), True, 'import numpy as np\n'), ((2579, 2610), 'util.logit.info', 'log.info', (['"""inference extractor"""'], {}), "('inference extractor')\n", (2587, 2610), True, 'import util.logit as log\n'), ((2631, 2666), 'extractor.Extractor', 'Extractor', (['"""neural extractor"""', 'args'], {}), "('neural extractor', args)\n", (2640, 2666), False, 'from extractor import Extractor\n'), ((2832, 2858), 'util.logit.info', 'log.info', (['"""light cnn test"""'], {}), "('light cnn test')\n", (2840, 2858), True, 'import util.logit as log\n'), ((2883, 2923), 'utils.load_lightcnn', 'utils.load_lightcnn', (['args.lightcnn', 'cuda'], {}), '(args.lightcnn, cuda)\n', (2902, 2923), False, 'import utils\n'), ((3002, 3062), 'cv2.imread', 'cv2.imread', (['"""../export/star/a-rb1.jpg"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('../export/star/a-rb1.jpg', cv2.IMREAD_GRAYSCALE)\n", (3012, 3062), False, 'import cv2\n'), ((3077, 3137), 'cv2.imread', 'cv2.imread', (['"""../export/star/a-lyf.jpg"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('../export/star/a-lyf.jpg', cv2.IMREAD_GRAYSCALE)\n", (3087, 3137), False, 'import cv2\n'), ((3152, 3217), 'cv2.resize', 'cv2.resize', (['im1'], {'dsize': '(128, 128)', 'interpolation': 'cv2.INTER_LINEAR'}), '(im1, dsize=(128, 128), interpolation=cv2.INTER_LINEAR)\n', (3162, 3217), False, 'import cv2\n'), ((3232, 3297), 'cv2.resize', 'cv2.resize', (['im2'], {'dsize': '(128, 128)', 'interpolation': 'cv2.INTER_LINEAR'}), '(im2, dsize=(128, 128), interpolation=cv2.INTER_LINEAR)\n', (3242, 3297), False, 'import cv2\n'), ((3312, 3342), 'numpy.reshape', 'np.reshape', (['im1', '(128, 128, 1)'], {}), '(im1, (128, 128, 1))\n', (3322, 3342), True, 'import numpy as np\n'), ((3357, 3387), 'numpy.reshape', 'np.reshape', (['im2', '(128, 128, 1)'], {}), '(im2, (128, 128, 1))\n', (3367, 3387), True, 'import numpy as np\n'), ((3508, 3559), 'utils.discriminative_loss', 'utils.discriminative_loss', (['img', 'img2', 'lightcnn_inst'], {}), '(img, img2, lightcnn_inst)\n', (3533, 3559), False, 'import utils\n'), ((3660, 3683), 'util.logit.info', 'log.info', (['"""faceparsing"""'], {}), "('faceparsing')\n", (3668, 3683), True, 'import util.logit as log\n'), ((3697, 3782), 'utils.evalute_face', 'utils.evalute_face', (['"""./output/face/db_0000_3.jpg"""', 'args.parsing_checkpoint', 'cuda'], {}), "('./output/face/db_0000_3.jpg', args.parsing_checkpoint, cuda\n )\n", (3715, 3782), False, 'import utils\n'), ((3786, 3822), 'cv2.imwrite', 'cv2.imwrite', (['"""./output/eval.jpg"""', 'im'], {}), "('./output/eval.jpg', im)\n", (3797, 3822), False, 'import cv2\n'), ((2964, 2985), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2983, 2985), True, 'import torchvision.transforms as transforms\n'), ((3907, 3923), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3917, 3923), False, 'import os\n'), ((3941, 3965), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (3953, 3965), False, 'import os\n'), ((3978, 3989), 'util.logit.info', 'log.info', (['p'], {}), '(p)\n', (3986, 3989), True, 'import util.logit as log\n'), ((4007, 4038), 'os.path.join', 'os.path.join', (['path', "('a_' + file)"], {}), "(path, 'a_' + file)\n", (4019, 4038), False, 'import os\n'), ((4056, 4082), 'align.face_features', 'align.face_features', (['p', 'p2'], {}), '(p, p2)\n', (4075, 4082), False, 'import align\n'), ((4100, 4165), 'utils.faceparsing_ndarray', 'utils.faceparsing_ndarray', (['al', 'args.parsing_checkpoint'], {'cuda': 'cuda'}), '(al, args.parsing_checkpoint, cuda=cuda)\n', (4125, 4165), False, 'import utils\n'), ((4218, 4243), 'dataset.FaceDataset', 'FaceDataset', (['args', '"""test"""'], {}), "(args, 'test')\n", (4229, 4243), False, 'from dataset import FaceDataset\n'), ((4320, 4347), 'util.logit.info', 'log.info', (['"""preview picture"""'], {}), "('preview picture')\n", (4328, 4347), True, 'import util.logit as log\n'), ((4407, 4423), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (4417, 4423), False, 'import cv2\n'), ((4439, 4500), 'utils.faceparsing_ndarray', 'utils.faceparsing_ndarray', (['img', 'args.parsing_checkpoint', 'cuda'], {}), '(img, args.parsing_checkpoint, cuda)\n', (4464, 4500), False, 'import utils\n'), ((4516, 4536), 'utils.img_edge', 'utils.img_edge', (['img2'], {}), '(img2)\n', (4530, 4536), False, 'import utils\n'), ((4553, 4572), 'ops.fill_gray', 'ops.fill_gray', (['img3'], {}), '(img3)\n', (4566, 4572), False, 'import ops\n'), ((4588, 4613), 'align.face_features', 'align.face_features', (['path'], {}), '(path)\n', (4607, 4613), False, 'import align\n'), ((4713, 4764), 'ops.merge_4image', 'ops.merge_4image', (['img', 'img2', 'img3_', 'img4'], {'show': '(True)'}), '(img, img2, img3_, img4, show=True)\n', (4729, 4764), False, 'import ops\n'), ((4808, 4841), 'util.logit.info', 'log.info', (['"""evaluation mode start"""'], {}), "('evaluation mode start')\n", (4816, 4841), True, 'import util.logit as log\n'), ((4856, 4881), 'evaluate.Evaluate', 'Evaluate', (['args'], {'cuda': 'cuda'}), '(args, cuda=cuda)\n', (4864, 4881), False, 'from evaluate import Evaluate\n'), ((5021, 5064), 'util.logit.error', 'log.error', (['"""not known phase %s"""', 'args.phase'], {}), "('not known phase %s', args.phase)\n", (5030, 5064), True, 'import util.logit as log\n'), ((4896, 4923), 'cv2.imread', 'cv2.imread', (['args.eval_image'], {}), '(args.eval_image)\n', (4906, 4923), False, 'import cv2\n')]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../")
import os
import argparse
import warnings
from functools import partial
import paddle
import paddle.nn.functional as F
from paddlenlp.metrics.glue import AccuracyAndF1
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Pad, Stack, Tuple
from paddlenlp.transformers import PPMiniLMForSequenceClassification, PPMiniLMTokenizer, LinearDecayWithWarmup
from evaluate import evaluate
from utils import set_seed
from data import read, load_dict, convert_example_to_feature
warnings.filterwarnings("ignore")
def train():
# set running envir
paddle.set_device(args.device)
set_seed(args.seed)
if not os.path.exists(args.checkpoints):
os.mkdir(args.checkpoints)
# load and process data
label2id, id2label = load_dict(args.label_path)
train_ds = load_dataset(read, data_path=args.train_path, lazy=False)
dev_ds = load_dataset(read, data_path=args.dev_path, lazy=False)
tokenizer = PPMiniLMTokenizer.from_pretrained(args.base_model_name)
trans_func = partial(convert_example_to_feature,
tokenizer=tokenizer,
label2id=label2id,
max_seq_len=args.max_seq_len)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype="int64"),
Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype="int64"),
Stack(dtype="int64"), Stack(dtype="int64")): fn(samples)
train_batch_sampler = paddle.io.BatchSampler(train_ds,
batch_size=args.batch_size,
shuffle=True)
dev_batch_sampler = paddle.io.BatchSampler(dev_ds,
batch_size=args.batch_size,
shuffle=False)
train_loader = paddle.io.DataLoader(train_ds,
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn)
dev_loader = paddle.io.DataLoader(dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=batchify_fn)
# configure model training
model = PPMiniLMForSequenceClassification.from_pretrained(
args.base_model_name, num_classes=len(label2id))
num_training_steps = len(train_loader) * args.num_epochs
lr_scheduler = LinearDecayWithWarmup(learning_rate=args.learning_rate,
total_steps=num_training_steps,
warmup=args.warmup_proportion)
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
grad_clip = paddle.nn.ClipGradByGlobalNorm(args.max_grad_norm)
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params,
grad_clip=grad_clip)
metric = AccuracyAndF1()
# start to train model
global_step, best_f1 = 1, 0.
model.train()
for epoch in range(1, args.num_epochs + 1):
for batch_data in train_loader():
input_ids, token_type_ids, _, labels = batch_data
# logits: batch_size, seql_len, num_tags
logits = model(input_ids, token_type_ids=token_type_ids)
loss = F.cross_entropy(logits, labels)
loss.backward()
lr_scheduler.step()
optimizer.step()
optimizer.clear_grad()
if global_step > 0 and global_step % args.log_steps == 0:
print(
f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}"
)
if (global_step > 0 and global_step % args.eval_steps
== 0) or global_step == num_training_steps:
accuracy, precision, recall, f1 = evaluate(
model, dev_loader, metric)
model.train()
if f1 > best_f1:
print(
f"best F1 performence has been updated: {best_f1:.5f} --> {f1:.5f}"
)
best_f1 = f1
paddle.save(model.state_dict(),
f"{args.checkpoints}/best.pdparams")
print(
f'evalution result: accuracy:{accuracy:.5f} precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1:.5f}'
)
global_step += 1
paddle.save(model.state_dict(), f"{args.checkpoints}/final.pdparams")
if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--base_model_name", type=str, default=None, help="The name of base model.")
parser.add_argument("--train_path", type=str, default=None, help="The path of train set.")
parser.add_argument("--dev_path", type=str, default=None, help="The path of dev set.")
parser.add_argument("--label_path", type=str, default=None, help="The path of label dict.")
parser.add_argument("--num_epochs", type=int, default=3, help="Number of epoches for fine-tuning.")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_len", type=int, default=512, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--learning_rate", type=float, default=5e-5, help="The initial learning rate for optimizer.")
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.")
parser.add_argument("--max_grad_norm", type=float, default=1.0, help="Max grad norm to clip gradient.")
parser.add_argument("--warmup_proportion", type=float, default=0.1, help="Warmup proportion params for warmup strategy")
parser.add_argument("--log_steps", type=int, default=50, help="Frequency of printing log.")
parser.add_argument("--eval_steps", type=int, default=500, help="Frequency of performing evaluation.")
parser.add_argument("--seed", type=int, default=1000, help="Random seed for initialization.")
parser.add_argument('--device', choices=['cpu', 'gpu'], default="gpu", help="Select which device to train model, defaults to gpu.")
parser.add_argument("--checkpoints", type=str, default=None, help="Directory to save checkpoint.")
args = parser.parse_args()
# yapf: enable
train()
|
[
"evaluate.evaluate"
] |
[((623, 645), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (638, 645), False, 'import sys\n'), ((1134, 1167), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1157, 1167), False, 'import warnings\n'), ((1211, 1241), 'paddle.set_device', 'paddle.set_device', (['args.device'], {}), '(args.device)\n', (1228, 1241), False, 'import paddle\n'), ((1246, 1265), 'utils.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (1254, 1265), False, 'from utils import set_seed\n'), ((1401, 1427), 'data.load_dict', 'load_dict', (['args.label_path'], {}), '(args.label_path)\n', (1410, 1427), False, 'from data import read, load_dict, convert_example_to_feature\n'), ((1443, 1500), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['read'], {'data_path': 'args.train_path', 'lazy': '(False)'}), '(read, data_path=args.train_path, lazy=False)\n', (1455, 1500), False, 'from paddlenlp.datasets import load_dataset\n'), ((1514, 1569), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['read'], {'data_path': 'args.dev_path', 'lazy': '(False)'}), '(read, data_path=args.dev_path, lazy=False)\n', (1526, 1569), False, 'from paddlenlp.datasets import load_dataset\n'), ((1587, 1642), 'paddlenlp.transformers.PPMiniLMTokenizer.from_pretrained', 'PPMiniLMTokenizer.from_pretrained', (['args.base_model_name'], {}), '(args.base_model_name)\n', (1620, 1642), False, 'from paddlenlp.transformers import PPMiniLMForSequenceClassification, PPMiniLMTokenizer, LinearDecayWithWarmup\n'), ((1660, 1769), 'functools.partial', 'partial', (['convert_example_to_feature'], {'tokenizer': 'tokenizer', 'label2id': 'label2id', 'max_seq_len': 'args.max_seq_len'}), '(convert_example_to_feature, tokenizer=tokenizer, label2id=label2id,\n max_seq_len=args.max_seq_len)\n', (1667, 1769), False, 'from functools import partial\n'), ((2219, 2293), 'paddle.io.BatchSampler', 'paddle.io.BatchSampler', (['train_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_ds, batch_size=args.batch_size, shuffle=True)\n', (2241, 2293), False, 'import paddle\n'), ((2416, 2489), 'paddle.io.BatchSampler', 'paddle.io.BatchSampler', (['dev_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dev_ds, batch_size=args.batch_size, shuffle=False)\n', (2438, 2489), False, 'import paddle\n'), ((2603, 2696), 'paddle.io.DataLoader', 'paddle.io.DataLoader', (['train_ds'], {'batch_sampler': 'train_batch_sampler', 'collate_fn': 'batchify_fn'}), '(train_ds, batch_sampler=train_batch_sampler,\n collate_fn=batchify_fn)\n', (2623, 2696), False, 'import paddle\n'), ((2790, 2880), 'paddle.io.DataLoader', 'paddle.io.DataLoader', (['dev_ds'], {'batch_sampler': 'dev_batch_sampler', 'collate_fn': 'batchify_fn'}), '(dev_ds, batch_sampler=dev_batch_sampler, collate_fn=\n batchify_fn)\n', (2810, 2880), False, 'import paddle\n'), ((3185, 3308), 'paddlenlp.transformers.LinearDecayWithWarmup', 'LinearDecayWithWarmup', ([], {'learning_rate': 'args.learning_rate', 'total_steps': 'num_training_steps', 'warmup': 'args.warmup_proportion'}), '(learning_rate=args.learning_rate, total_steps=\n num_training_steps, warmup=args.warmup_proportion)\n', (3206, 3308), False, 'from paddlenlp.transformers import PPMiniLMForSequenceClassification, PPMiniLMTokenizer, LinearDecayWithWarmup\n'), ((3536, 3586), 'paddle.nn.ClipGradByGlobalNorm', 'paddle.nn.ClipGradByGlobalNorm', (['args.max_grad_norm'], {}), '(args.max_grad_norm)\n', (3566, 3586), False, 'import paddle\n'), ((3844, 3859), 'paddlenlp.metrics.glue.AccuracyAndF1', 'AccuracyAndF1', ([], {}), '()\n', (3857, 3859), False, 'from paddlenlp.metrics.glue import AccuracyAndF1\n'), ((5554, 5586), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['__doc__'], {}), '(__doc__)\n', (5577, 5586), False, 'import argparse\n'), ((1278, 1310), 'os.path.exists', 'os.path.exists', (['args.checkpoints'], {}), '(args.checkpoints)\n', (1292, 1310), False, 'import os\n'), ((1320, 1346), 'os.mkdir', 'os.mkdir', (['args.checkpoints'], {}), '(args.checkpoints)\n', (1328, 1346), False, 'import os\n'), ((4232, 4263), 'paddle.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (4247, 4263), True, 'import paddle.nn.functional as F\n'), ((1994, 2052), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_id', 'dtype': '"""int64"""'}), "(axis=0, pad_val=tokenizer.pad_token_id, dtype='int64')\n", (1997, 2052), False, 'from paddlenlp.data import Pad, Stack, Tuple\n'), ((2062, 2125), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_type_id', 'dtype': '"""int64"""'}), "(axis=0, pad_val=tokenizer.pad_token_type_id, dtype='int64')\n", (2065, 2125), False, 'from paddlenlp.data import Pad, Stack, Tuple\n'), ((2135, 2155), 'paddlenlp.data.Stack', 'Stack', ([], {'dtype': '"""int64"""'}), "(dtype='int64')\n", (2140, 2155), False, 'from paddlenlp.data import Pad, Stack, Tuple\n'), ((2157, 2177), 'paddlenlp.data.Stack', 'Stack', ([], {'dtype': '"""int64"""'}), "(dtype='int64')\n", (2162, 2177), False, 'from paddlenlp.data import Pad, Stack, Tuple\n'), ((4802, 4837), 'evaluate.evaluate', 'evaluate', (['model', 'dev_loader', 'metric'], {}), '(model, dev_loader, metric)\n', (4810, 4837), False, 'from evaluate import evaluate\n')]
|
import numpy as np
from rebuild import rebuild
from render import render
from depth import depth
from evaluate import evaluate
from PIL import Image
def rendering(dir):
#z的尺度与x和y相同,大小等同于测试图像大小,位置与测试图像像素点一一对应
#imgs为渲染结果,大小等同于测试图像大小,位置与测试图像像素点一一对应
train_lvectors = np.zeros([7,3])# the direction of light
for line in open(dir+'/train.txt'):
i,ang1,ang2 = line.strip().split(",")
i = int(i)
ang1 = int(ang1)
ang2 = int(ang2)
train_lvectors[i-1] = (np.sin(np.pi*ang1/180)*np.cos(np.pi*ang2/180),np.sin(np.pi*ang2/180),np.cos(np.pi*ang1/180)*np.cos(np.pi*ang2/180))
train_lvectors = -train_lvectors
test_lvectors = np.zeros([10,3])# the direction of light
for line in open(dir+'/test.txt'):
i,ang1,ang2 = line.strip().split(",")
i = int(i)
ang1 = int(ang1)
ang2 = int(ang2)
test_lvectors[i-1] = (np.sin(np.pi*ang1/180)*np.cos(np.pi*ang2/180),np.sin(np.pi*ang2/180),np.cos(np.pi*ang1/180)*np.cos(np.pi*ang2/180))
test_lvectors = -test_lvectors
train_images = np.zeros([7, 168, 168])
for num in range(7):
image = Image.open(dir+'/train/'+str(num+1)+'.bmp')
train_images[num] = np.asarray(image)
n_s=3
alpha,beta,s,X,Y,Z,vector = rebuild(train_images,train_lvectors,n_s)
evaluate(alpha,beta,s,X,Y,Z,n_s,train_lvectors,train_images)
imgs = render(alpha,beta,s,X,Y,Z,n_s,test_lvectors)
z = depth(vector)
return z, imgs
|
[
"evaluate.evaluate"
] |
[((277, 293), 'numpy.zeros', 'np.zeros', (['[7, 3]'], {}), '([7, 3])\n', (285, 293), True, 'import numpy as np\n'), ((677, 694), 'numpy.zeros', 'np.zeros', (['[10, 3]'], {}), '([10, 3])\n', (685, 694), True, 'import numpy as np\n'), ((1073, 1096), 'numpy.zeros', 'np.zeros', (['[7, 168, 168]'], {}), '([7, 168, 168])\n', (1081, 1096), True, 'import numpy as np\n'), ((1271, 1313), 'rebuild.rebuild', 'rebuild', (['train_images', 'train_lvectors', 'n_s'], {}), '(train_images, train_lvectors, n_s)\n', (1278, 1313), False, 'from rebuild import rebuild\n'), ((1316, 1384), 'evaluate.evaluate', 'evaluate', (['alpha', 'beta', 's', 'X', 'Y', 'Z', 'n_s', 'train_lvectors', 'train_images'], {}), '(alpha, beta, s, X, Y, Z, n_s, train_lvectors, train_images)\n', (1324, 1384), False, 'from evaluate import evaluate\n'), ((1389, 1440), 'render.render', 'render', (['alpha', 'beta', 's', 'X', 'Y', 'Z', 'n_s', 'test_lvectors'], {}), '(alpha, beta, s, X, Y, Z, n_s, test_lvectors)\n', (1395, 1440), False, 'from render import render\n'), ((1442, 1455), 'depth.depth', 'depth', (['vector'], {}), '(vector)\n', (1447, 1455), False, 'from depth import depth\n'), ((1210, 1227), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1220, 1227), True, 'import numpy as np\n'), ((549, 575), 'numpy.sin', 'np.sin', (['(np.pi * ang2 / 180)'], {}), '(np.pi * ang2 / 180)\n', (555, 575), True, 'import numpy as np\n'), ((948, 974), 'numpy.sin', 'np.sin', (['(np.pi * ang2 / 180)'], {}), '(np.pi * ang2 / 180)\n', (954, 974), True, 'import numpy as np\n'), ((503, 529), 'numpy.sin', 'np.sin', (['(np.pi * ang1 / 180)'], {}), '(np.pi * ang1 / 180)\n', (509, 529), True, 'import numpy as np\n'), ((526, 552), 'numpy.cos', 'np.cos', (['(np.pi * ang2 / 180)'], {}), '(np.pi * ang2 / 180)\n', (532, 552), True, 'import numpy as np\n'), ((572, 598), 'numpy.cos', 'np.cos', (['(np.pi * ang1 / 180)'], {}), '(np.pi * ang1 / 180)\n', (578, 598), True, 'import numpy as np\n'), ((595, 621), 'numpy.cos', 'np.cos', (['(np.pi * ang2 / 180)'], {}), '(np.pi * ang2 / 180)\n', (601, 621), True, 'import numpy as np\n'), ((902, 928), 'numpy.sin', 'np.sin', (['(np.pi * ang1 / 180)'], {}), '(np.pi * ang1 / 180)\n', (908, 928), True, 'import numpy as np\n'), ((925, 951), 'numpy.cos', 'np.cos', (['(np.pi * ang2 / 180)'], {}), '(np.pi * ang2 / 180)\n', (931, 951), True, 'import numpy as np\n'), ((971, 997), 'numpy.cos', 'np.cos', (['(np.pi * ang1 / 180)'], {}), '(np.pi * ang1 / 180)\n', (977, 997), True, 'import numpy as np\n'), ((994, 1020), 'numpy.cos', 'np.cos', (['(np.pi * ang2 / 180)'], {}), '(np.pi * ang2 / 180)\n', (1000, 1020), True, 'import numpy as np\n')]
|
import numpy as np
import os
import pickle
import copy
import json
import warnings
warnings.filterwarnings("ignore")
os.environ["CUDA_VISIBLE_DEVICES"]="6,7"
from load_img import Load_from_Folder, Load_Images
from evaluate import Time, MSE, PSNR
from MBMBVQ import MBMBVQ
from EntropyCoding import EntropyCoding
class GIC():
def __init__(self, par):
self.MBMBVQ = MBMBVQ(par)
self.EC = EntropyCoding(par)
def change_n_img(self, n_img):
for i in range(1, self.EC.par['n_hop']+1):
for j in range(len(self.EC.par['shape']['hop'+str(i)])):
self.EC.par['shape']['hop'+str(i)][j][0] = n_img
@Time
def fit(self, Y):
self.change_n_img(Y.shape[0])
self.MBMBVQ.fit(copy.deepcopy(Y))
save = self.MBMBVQ.encode(copy.deepcopy(Y))
self.EC.fit(save)
return self
@Time
def refit(self, Y, par):
self.change_n_img(Y.shape[0])
self.MBMBVQ.refit(copy.deepcopy(Y), par)
save = self.MBMBVQ.encode(copy.deepcopy(Y))
self.EC.refit(save, par)
return self
@Time
def encode(self, Y):
self.change_n_img(Y.shape[0])
save = self.MBMBVQ.encode(Y)
stream = self.EC.encode(save, S=Y.shape[1])
return stream, save['DC'], save
@Time
def decode(self, stream, DC):
save = self.EC.decode(stream)
save['DC'] = DC
iY = self.MBMBVQ.decode(save)
return iY
# return pickleable obj
def save(self):
for k in self.MBMBVQ.km.keys():
km = self.MBMBVQ.km[k]
for i in km.KM:
i.KM.KM = None
i.KM.saveObj=False
return self
if __name__ == "__main__":
with open('./test_data/test_par1.json', 'r') as f:
par = json.load(f)
gic = GIC_Y(par)
Y_list = Load_from_Folder(folder='./test_data/', color='YUV', ct=-1)
Y = np.array(Y_list)[:,:,:,:1]
gic.fit(Y)
stream, dc = gic.encode(Y)
iY = gic.decode(stream, dc)
print('MSE=%5.3f, PSNR=%3.5f'%(MSE(Y, iY), PSNR(Y, iY)))
print('------------------')
print(" * Ref result: "+'MSE=129.342, PSNR=27.01340')
|
[
"evaluate.PSNR",
"evaluate.MSE"
] |
[((84, 117), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (107, 117), False, 'import warnings\n'), ((1844, 1903), 'load_img.Load_from_Folder', 'Load_from_Folder', ([], {'folder': '"""./test_data/"""', 'color': '"""YUV"""', 'ct': '(-1)'}), "(folder='./test_data/', color='YUV', ct=-1)\n", (1860, 1903), False, 'from load_img import Load_from_Folder, Load_Images\n'), ((378, 389), 'MBMBVQ.MBMBVQ', 'MBMBVQ', (['par'], {}), '(par)\n', (384, 389), False, 'from MBMBVQ import MBMBVQ\n'), ((408, 426), 'EntropyCoding.EntropyCoding', 'EntropyCoding', (['par'], {}), '(par)\n', (421, 426), False, 'from EntropyCoding import EntropyCoding\n'), ((1796, 1808), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1805, 1808), False, 'import json\n'), ((1912, 1928), 'numpy.array', 'np.array', (['Y_list'], {}), '(Y_list)\n', (1920, 1928), True, 'import numpy as np\n'), ((741, 757), 'copy.deepcopy', 'copy.deepcopy', (['Y'], {}), '(Y)\n', (754, 757), False, 'import copy\n'), ((793, 809), 'copy.deepcopy', 'copy.deepcopy', (['Y'], {}), '(Y)\n', (806, 809), False, 'import copy\n'), ((960, 976), 'copy.deepcopy', 'copy.deepcopy', (['Y'], {}), '(Y)\n', (973, 976), False, 'import copy\n'), ((1017, 1033), 'copy.deepcopy', 'copy.deepcopy', (['Y'], {}), '(Y)\n', (1030, 1033), False, 'import copy\n'), ((2055, 2065), 'evaluate.MSE', 'MSE', (['Y', 'iY'], {}), '(Y, iY)\n', (2058, 2065), False, 'from evaluate import Time, MSE, PSNR\n'), ((2067, 2078), 'evaluate.PSNR', 'PSNR', (['Y', 'iY'], {}), '(Y, iY)\n', (2071, 2078), False, 'from evaluate import Time, MSE, PSNR\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.