Dataset Viewer
code
stringlengths 42
43.2k
| apis
sequence | extract_api
stringlengths 115
61.9k
|
---|---|---|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from network import NN
from evaluate import accuracy
def read_data(fpath):
iris = pd.read_csv(fpath)
iris.loc[iris['species'] == 'virginica', 'species'] = 0
iris.loc[iris['species'] == 'versicolor', 'species'] = 1
iris.loc[iris['species'] == 'setosa', 'species'] = 2
iris = iris[iris['species'] != 2]
return iris[['petal_length', 'petal_width']].values, iris[['species']].values.astype('uint8')
def plot_data(X, y):
plt.scatter(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral)
plt.title("IRIS DATA | Blue - Versicolor, Red - Virginica ")
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.show()
def train_test_split(X, y, ratio=0.8):
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
train_len = int(X.shape[0] * ratio)
return X[indices[:train_len]], y[indices[:train_len]], X[indices[train_len:]], y[indices[train_len:]]
if __name__ == '__main__':
X, y = read_data('iris.csv')
# comment the following line if you don't need the plot anymore
plot_data(X, y)
X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7)
nn = NN(len(X[0]), 5, 1)
output = nn.feedforward(X_train)
print(output)
print(f'w1 before backward propagation: \n{nn.w1} \nw2 before backward propagation:\n{nn.w2}')
nn.backward(X_train, y_train, output)
print(f'w1 after backward propagation: \n{nn.w1} \nw2 after backward propagation:\n{nn.w2}')
nn.train(X_train, y_train)
print("Accuracy:")
print(accuracy(nn, X_test, y_test))
|
[
"evaluate.accuracy"
] |
[((159, 177), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {}), '(fpath)\n', (170, 177), True, 'import pandas as pd\n'), ((519, 587), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y[:, 0]', 's': '(40)', 'cmap': 'plt.cm.Spectral'}), '(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral)\n', (530, 587), True, 'import matplotlib.pyplot as plt\n'), ((592, 652), 'matplotlib.pyplot.title', 'plt.title', (['"""IRIS DATA | Blue - Versicolor, Red - Virginica """'], {}), "('IRIS DATA | Blue - Versicolor, Red - Virginica ')\n", (601, 652), True, 'import matplotlib.pyplot as plt\n'), ((657, 683), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal Length"""'], {}), "('Petal Length')\n", (667, 683), True, 'import matplotlib.pyplot as plt\n'), ((688, 713), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal Width"""'], {}), "('Petal Width')\n", (698, 713), True, 'import matplotlib.pyplot as plt\n'), ((718, 728), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (726, 728), True, 'import matplotlib.pyplot as plt\n'), ((784, 805), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (793, 805), True, 'import numpy as np\n'), ((810, 836), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (827, 836), True, 'import numpy as np\n'), ((1586, 1614), 'evaluate.accuracy', 'accuracy', (['nn', 'X_test', 'y_test'], {}), '(nn, X_test, y_test)\n', (1594, 1614), False, 'from evaluate import accuracy\n')]
|
import datetime
import os
import copy
import json
import numpy as np
from pytz import timezone
from gamified_squad import GamifiedSquad
from agent import CustomAgent
import generic
import evaluate
SAVE_CHECKPOINT = 100000
def train():
time_1 = datetime.datetime.now()
config = generic.load_config()
env = GamifiedSquad(config)
env.split_reset("train")
agent = CustomAgent(config, env.has_token_set)
if config["general"]["visdom"]:
# visdom
import visdom
viz = visdom.Visdom()
plt_win = None
eval_plt_win = None
plt_q_value_win = None
plt_steps_win = None
eval_plt_steps_win = None
viz_avg_ig_acc, viz_avg_qa_acc = [], []
viz_avg_ig_q_value = []
viz_eval_ig_acc, viz_eval_qa_acc, viz_eval_steps = [], [], []
viz_avg_steps = []
step_in_total = 0
batch_no = 0
episode_no = 0
running_avg_qa_acc = generic.HistoryScoreCache(capacity=50)
running_avg_ig_acc = generic.HistoryScoreCache(capacity=50)
running_avg_qa_loss = generic.HistoryScoreCache(capacity=50)
running_avg_ig_loss = generic.HistoryScoreCache(capacity=50)
running_avg_ig_q_value = generic.HistoryScoreCache(capacity=50)
running_avg_steps = generic.HistoryScoreCache(capacity=50)
output_dir = "."
data_dir = "."
json_file_name = agent.experiment_tag.replace(" ", "_")
best_qa_acc_so_far = 0.0
prev_performance = 0.0
i_am_patient = 0
# load model from checkpoint
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
print("checkpoint already exist.")
exit(0)
if os.path.exists(data_dir + "/" + agent.load_graph_generation_model_from_tag + ".pt"):
agent.load_pretrained_graph_generation_model(data_dir + "/" + agent.load_graph_generation_model_from_tag + ".pt")
if agent.load_pretrained:
if os.path.exists(data_dir + "/" + agent.load_from_tag + ".pt"):
agent.load_pretrained_model(data_dir + "/" + agent.load_from_tag + ".pt") # load partial graph
agent.update_target_net()
while(True):
if episode_no > agent.max_episode:
break
np.random.seed(episode_no)
env.seed(episode_no)
obs, infos = env.reset()
batch_size = len(obs)
report = agent.report_frequency > 0 and (episode_no % agent.report_frequency <= max(episode_no - batch_size, 0) % agent.report_frequency)
__save__ = episode_no % SAVE_CHECKPOINT <= max(episode_no - batch_size, 0) % SAVE_CHECKPOINT
if report:
print("====================================================================================", episode_no)
print("-- Q: %s" % (agent.bert_tokenizer.decode(infos[0]["q"]).encode('utf-8')))
print("-- A: %s" % (infos[0]["a_string"][0].encode('utf-8')))
agent.train()
agent.init(obs, infos)
quest_list = agent.get_game_quest_info(infos)
agent.kg.push_batch_question(quest_list, [item["q_srl"] for item in infos])
previous_dynamics = None
previous_belief = None
input_quest, input_quest_mask, quest_id_list = agent.get_agent_inputs(quest_list)
tmp_replay_buffer = []
print_cmds = []
prev_commands = ["restart" for _ in range(batch_size)]
belief_buffer = []
act_randomly = False if agent.noisy_net else episode_no < agent.learn_start_from_this_episode
for _ in range(agent.max_nb_steps_per_episode):
# generate commands
if agent.noisy_net:
agent.reset_noise() # Draw a new set of noisy weights
commands, replay_info, current_dynamics, current_belief = agent.act(obs, infos, input_quest, input_quest_mask, quest_id_list, prev_commands, previous_dynamics, previous_belief, random=act_randomly)
tmp_replay_buffer.append(replay_info)
obs, infos = env.step(commands)
prev_commands = commands
previous_dynamics = current_dynamics
previous_belief = current_belief
belief_buffer.append(current_belief)
if agent.noisy_net and step_in_total % agent.update_per_k_game_steps == 0:
agent.reset_noise() # Draw a new set of noisy weights
if episode_no >= agent.learn_start_from_this_episode and step_in_total % agent.update_per_k_game_steps == 0:
interaction_loss, interaction_q_value = agent.update_interaction()
if interaction_loss is not None:
running_avg_ig_loss.push(interaction_loss)
running_avg_ig_q_value.push(interaction_q_value)
qa_loss = agent.update_qa()
if qa_loss is not None:
running_avg_qa_loss.push(qa_loss)
step_in_total += 1
still_running = generic.to_np(replay_info[-1])
print_cmds.append(commands[0] if still_running[0] else "--")
if np.sum(still_running) == 0:
break
if report:
print(" / ".join(print_cmds).encode('utf-8'))
# The agent has exhausted all steps, now answer question.
chosen_head_tails = agent.answer_question_act(agent.naozi.get(), quest_list, current_belief) # batch
chosen_head_tails_np = generic.to_np(chosen_head_tails)
chosen_answer_strings = generic.get_answer_strings(agent.naozi.get(), chosen_head_tails_np, agent.bert_tokenizer, agent.special_token_ids)
answer_strings = [item["a_string"] for item in infos]
answer_token_ids = [item["a"] for item in infos]
qa_reward_np = generic.get_qa_reward(chosen_answer_strings, answer_strings)
obs_strings = [agent.bert_tokenizer.decode(agent.naozi.get(i)) for i in range(batch_size)]
ig_reward_np = generic.get_sufficient_info_reward(agent.naozi.get(), answer_token_ids)
ig_reward = generic.to_pt(ig_reward_np, enable_cuda=False, type='float') # batch
# push qa experience into qa replay buffer
replay_node_vocab = agent.kg.get_node_vocabulary()
replay_relation_vocab = agent.kg.get_relation_vocabulary()
replay_triplets = agent.kg.get_triplets()
for b in range(batch_size): # data points in batch
is_prior = qa_reward_np[b] > agent.qa_reward_prior_threshold * agent.qa_replay_memory.avg_rewards()
# if the agent is not in the correct state, do not push it into replay buffer
if np.mean(ig_reward_np[b]) == 0.0:
continue
agent.qa_replay_memory.push(is_prior, qa_reward_np[b], agent.naozi.get_sentence_lists(b), quest_list[b], replay_node_vocab[b], replay_relation_vocab[b], replay_triplets[b], answer_token_ids[b], belief_buffer[-1][b].cpu() if belief_buffer[-1][b] is not None else None)
# small positive reward whenever it answers question correctly
masks_np = [generic.to_np(item[-1]) for item in tmp_replay_buffer]
command_rewards_np = []
for i in range(len(tmp_replay_buffer)):
if i == len(tmp_replay_buffer) - 1:
r = ig_reward * tmp_replay_buffer[i][-1]
r_np = ig_reward_np * masks_np[i]
else:
# give reward only at that one game step, not all
r = ig_reward * (tmp_replay_buffer[i][-1] - tmp_replay_buffer[i + 1][-1])
r_np = ig_reward_np * (masks_np[i] - masks_np[i + 1])
tmp_replay_buffer[i].append(r)
command_rewards_np.append(r_np)
command_rewards_np = np.array(command_rewards_np)
if report:
print(command_rewards_np[:, 0])
# push experience into replay buffer
for b in range(len(ig_reward_np)):
is_prior = np.sum(command_rewards_np, 0)[b] > 0.0
mem = []
for i in range(len(tmp_replay_buffer)):
batch_description_list, batch_chosen_indices, batch_chosen_ctrlf_indices, batch_graph_node_vocabulary, batch_graph_relation_vocabulary, batch_graph_triplets, _, batch_rewards = tmp_replay_buffer[i]
mem.append([copy.deepcopy(batch_description_list[b]),
copy.deepcopy(quest_list[b]),
batch_chosen_indices[b],
batch_chosen_ctrlf_indices[b],
copy.deepcopy(batch_graph_node_vocabulary[b]),
copy.deepcopy(batch_graph_relation_vocabulary[b]),
copy.deepcopy(batch_graph_triplets[b]),
copy.deepcopy(belief_buffer[i][b].cpu()) if belief_buffer[i][b] is not None else None,
batch_rewards[b]])
if masks_np[i][b] == 0.0:
break
agent.replay_memory.push(is_prior, mem)
qa_acc = np.mean(qa_reward_np)
ig_acc = np.mean(ig_reward_np)
step_masks_np = np.sum(np.array(masks_np), 0) # batch
for i in range(len(qa_reward_np)):
# if the answer is totally wrong, we assume it used all steps
if qa_reward_np[i] == 0.0:
step_masks_np[i] = agent.max_nb_steps_per_episode
used_steps = np.mean(step_masks_np)
running_avg_qa_acc.push(qa_acc)
running_avg_ig_acc.push(ig_acc)
running_avg_steps.push(used_steps)
print_rewards = np.sum(np.mean(command_rewards_np, -1))
if report:
print("-- OBS: %s" % (obs_strings[0].encode('utf-8')))
print("-- PRED: %s" % (chosen_answer_strings[0].encode('utf-8')))
# finish game
agent.finish_of_episode(episode_no, batch_no, batch_size)
time_2 = datetime.datetime.now()
eastern_time = datetime.datetime.now(timezone('US/Eastern')).strftime("%b %d %Y %H:%M:%S")
if report:
print("Episode: {:3d} | {:s} | time spent: {:s} | interaction loss: {:2.3f} | interaction qvalue: {:2.3f} | qa loss: {:2.3f} | rewards: {:2.3f} | qa acc: {:2.3f}/{:2.3f} | sufficient info: {:2.3f}/{:2.3f} | used steps: {:2.3f}".format(episode_no, eastern_time, str(time_2 - time_1).rsplit(".")[0], running_avg_ig_loss.get_avg(), running_avg_ig_q_value.get_avg(), running_avg_qa_loss.get_avg(), print_rewards, qa_acc, running_avg_qa_acc.get_avg(), ig_acc, running_avg_ig_acc.get_avg(), running_avg_steps.get_avg()))
if __save__:
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_ep" + str(episode_no) + "_model.pt")
if not report or episode_no < agent.learn_start_from_this_episode:
episode_no += batch_size
batch_no += 1
continue
eval_qa_acc, eval_ig_acc, eval_used_steps = 0.0, 0.0, 0.0
# evaluate
if agent.run_eval:
eval_qa_acc, eval_ig_acc, eval_used_steps = evaluate.evaluate(env, agent, "valid")
env.split_reset("train")
# if run eval, then save model by eval accucacy
if eval_qa_acc >= best_qa_acc_so_far:
best_qa_acc_so_far = eval_qa_acc
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
curr_performance = eval_qa_acc
else:
if running_avg_qa_acc.get_avg() >= best_qa_acc_so_far:
best_qa_acc_so_far = running_avg_qa_acc.get_avg()
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
curr_performance = running_avg_qa_acc.get_avg()
if prev_performance <= curr_performance:
i_am_patient = 0
else:
i_am_patient += 1
prev_performance = curr_performance
# if patient >= patience, resume from checkpoint
if agent.patience > 0 and i_am_patient >= agent.patience:
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
print('reload from a good checkpoint...')
agent.load_pretrained_model(output_dir + "/" + agent.experiment_tag + "_model.pt", load_partial_graph=False)
agent.update_target_net()
i_am_patient = 0
# plot using visdom
if config["general"]["visdom"] and not agent.debug_mode:
viz_avg_ig_acc.append(running_avg_ig_acc.get_avg())
viz_avg_qa_acc.append(running_avg_qa_acc.get_avg())
viz_avg_ig_q_value.append(running_avg_ig_q_value.get_avg())
viz_eval_ig_acc.append(eval_ig_acc)
viz_eval_qa_acc.append(eval_qa_acc)
viz_eval_steps.append(eval_used_steps)
viz_avg_steps.append(running_avg_steps.get_avg())
viz_x = np.arange(len(viz_avg_ig_acc)).tolist()
if plt_win is None:
plt_win = viz.line(X=viz_x, Y=viz_avg_ig_acc,
opts=dict(title=agent.experiment_tag + "_train"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_avg_qa_acc,
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_avg_ig_acc) - 1], Y=[viz_avg_ig_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_avg_qa_acc) - 1], Y=[viz_avg_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="qa")
if plt_q_value_win is None:
plt_q_value_win = viz.line(X=viz_x, Y=viz_avg_ig_q_value,
opts=dict(title=agent.experiment_tag + "_train_q_value"),
name="sufficient info")
else:
viz.line(X=[len(viz_avg_ig_q_value) - 1], Y=[viz_avg_ig_q_value[-1]],
opts=dict(title=agent.experiment_tag + "_train_q_value"),
win=plt_q_value_win,
update='append', name="sufficient info")
if plt_steps_win is None:
plt_steps_win = viz.line(X=viz_x, Y=viz_avg_steps,
opts=dict(title=agent.experiment_tag + "_train_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_avg_steps[-1]],
opts=dict(title=agent.experiment_tag + "_train_step"),
win=plt_steps_win,
update='append', name="used steps")
if agent.run_eval:
if eval_plt_win is None:
eval_plt_win = viz.line(X=viz_x, Y=viz_eval_ig_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_eval_qa_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_eval_ig_acc) - 1], Y=[viz_eval_ig_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_eval_qa_acc) - 1], Y=[viz_eval_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="qa")
if eval_plt_steps_win is None:
eval_plt_steps_win = viz.line(X=viz_x, Y=viz_eval_steps,
opts=dict(title=agent.experiment_tag + "_eval_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_eval_steps[-1]],
opts=dict(title=agent.experiment_tag + "_eval_step"),
win=eval_plt_steps_win,
update='append', name="used steps")
# write accucacies down into file
_s = json.dumps({"time spent": str(time_2 - time_1).rsplit(".")[0],
"sufficient info": str(running_avg_ig_acc.get_avg()),
"qa": str(running_avg_qa_acc.get_avg()),
"sufficient qvalue": str(running_avg_ig_q_value.get_avg()),
"eval sufficient info": str(eval_ig_acc),
"eval qa": str(eval_qa_acc),
"eval steps": str(eval_used_steps),
"used steps": str(running_avg_steps.get_avg())})
with open(output_dir + "/" + json_file_name + '.json', 'a+') as outfile:
outfile.write(_s + '\n')
outfile.flush()
episode_no += batch_size
batch_no += 1
if __name__ == '__main__':
train()
|
[
"evaluate.evaluate"
] |
[((252, 275), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (273, 275), False, 'import datetime\n'), ((289, 310), 'generic.load_config', 'generic.load_config', ([], {}), '()\n', (308, 310), False, 'import generic\n'), ((321, 342), 'gamified_squad.GamifiedSquad', 'GamifiedSquad', (['config'], {}), '(config)\n', (334, 342), False, 'from gamified_squad import GamifiedSquad\n'), ((384, 422), 'agent.CustomAgent', 'CustomAgent', (['config', 'env.has_token_set'], {}), '(config, env.has_token_set)\n', (395, 422), False, 'from agent import CustomAgent\n'), ((936, 974), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (961, 974), False, 'import generic\n'), ((1000, 1038), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1025, 1038), False, 'import generic\n'), ((1065, 1103), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1090, 1103), False, 'import generic\n'), ((1130, 1168), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1155, 1168), False, 'import generic\n'), ((1198, 1236), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1223, 1236), False, 'import generic\n'), ((1261, 1299), 'generic.HistoryScoreCache', 'generic.HistoryScoreCache', ([], {'capacity': '(50)'}), '(capacity=50)\n', (1286, 1299), False, 'import generic\n'), ((1523, 1592), 'os.path.exists', 'os.path.exists', (["(output_dir + '/' + agent.experiment_tag + '_model.pt')"], {}), "(output_dir + '/' + agent.experiment_tag + '_model.pt')\n", (1537, 1592), False, 'import os\n'), ((1661, 1748), 'os.path.exists', 'os.path.exists', (["(data_dir + '/' + agent.load_graph_generation_model_from_tag + '.pt')"], {}), "(data_dir + '/' + agent.load_graph_generation_model_from_tag +\n '.pt')\n", (1675, 1748), False, 'import os\n'), ((514, 529), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (527, 529), False, 'import visdom\n'), ((1909, 1969), 'os.path.exists', 'os.path.exists', (["(data_dir + '/' + agent.load_from_tag + '.pt')"], {}), "(data_dir + '/' + agent.load_from_tag + '.pt')\n", (1923, 1969), False, 'import os\n'), ((2204, 2230), 'numpy.random.seed', 'np.random.seed', (['episode_no'], {}), '(episode_no)\n', (2218, 2230), True, 'import numpy as np\n'), ((5347, 5379), 'generic.to_np', 'generic.to_np', (['chosen_head_tails'], {}), '(chosen_head_tails)\n', (5360, 5379), False, 'import generic\n'), ((5670, 5730), 'generic.get_qa_reward', 'generic.get_qa_reward', (['chosen_answer_strings', 'answer_strings'], {}), '(chosen_answer_strings, answer_strings)\n', (5691, 5730), False, 'import generic\n'), ((5945, 6005), 'generic.to_pt', 'generic.to_pt', (['ig_reward_np'], {'enable_cuda': '(False)', 'type': '"""float"""'}), "(ig_reward_np, enable_cuda=False, type='float')\n", (5958, 6005), False, 'import generic\n'), ((7600, 7628), 'numpy.array', 'np.array', (['command_rewards_np'], {}), '(command_rewards_np)\n', (7608, 7628), True, 'import numpy as np\n'), ((8900, 8921), 'numpy.mean', 'np.mean', (['qa_reward_np'], {}), '(qa_reward_np)\n', (8907, 8921), True, 'import numpy as np\n'), ((8939, 8960), 'numpy.mean', 'np.mean', (['ig_reward_np'], {}), '(ig_reward_np)\n', (8946, 8960), True, 'import numpy as np\n'), ((9267, 9289), 'numpy.mean', 'np.mean', (['step_masks_np'], {}), '(step_masks_np)\n', (9274, 9289), True, 'import numpy as np\n'), ((9750, 9773), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9771, 9773), False, 'import datetime\n'), ((4892, 4922), 'generic.to_np', 'generic.to_np', (['replay_info[-1]'], {}), '(replay_info[-1])\n', (4905, 4922), False, 'import generic\n'), ((6950, 6973), 'generic.to_np', 'generic.to_np', (['item[-1]'], {}), '(item[-1])\n', (6963, 6973), False, 'import generic\n'), ((8992, 9010), 'numpy.array', 'np.array', (['masks_np'], {}), '(masks_np)\n', (9000, 9010), True, 'import numpy as np\n'), ((9445, 9476), 'numpy.mean', 'np.mean', (['command_rewards_np', '(-1)'], {}), '(command_rewards_np, -1)\n', (9452, 9476), True, 'import numpy as np\n'), ((10888, 10926), 'evaluate.evaluate', 'evaluate.evaluate', (['env', 'agent', '"""valid"""'], {}), "(env, agent, 'valid')\n", (10905, 10926), False, 'import evaluate\n'), ((11871, 11940), 'os.path.exists', 'os.path.exists', (["(output_dir + '/' + agent.experiment_tag + '_model.pt')"], {}), "(output_dir + '/' + agent.experiment_tag + '_model.pt')\n", (11885, 11940), False, 'import os\n'), ((5011, 5032), 'numpy.sum', 'np.sum', (['still_running'], {}), '(still_running)\n', (5017, 5032), True, 'import numpy as np\n'), ((6520, 6544), 'numpy.mean', 'np.mean', (['ig_reward_np[b]'], {}), '(ig_reward_np[b])\n', (6527, 6544), True, 'import numpy as np\n'), ((7805, 7834), 'numpy.sum', 'np.sum', (['command_rewards_np', '(0)'], {}), '(command_rewards_np, 0)\n', (7811, 7834), True, 'import numpy as np\n'), ((9819, 9841), 'pytz.timezone', 'timezone', (['"""US/Eastern"""'], {}), "('US/Eastern')\n", (9827, 9841), False, 'from pytz import timezone\n'), ((8159, 8199), 'copy.deepcopy', 'copy.deepcopy', (['batch_description_list[b]'], {}), '(batch_description_list[b])\n', (8172, 8199), False, 'import copy\n'), ((8230, 8258), 'copy.deepcopy', 'copy.deepcopy', (['quest_list[b]'], {}), '(quest_list[b])\n', (8243, 8258), False, 'import copy\n'), ((8403, 8448), 'copy.deepcopy', 'copy.deepcopy', (['batch_graph_node_vocabulary[b]'], {}), '(batch_graph_node_vocabulary[b])\n', (8416, 8448), False, 'import copy\n'), ((8479, 8528), 'copy.deepcopy', 'copy.deepcopy', (['batch_graph_relation_vocabulary[b]'], {}), '(batch_graph_relation_vocabulary[b])\n', (8492, 8528), False, 'import copy\n'), ((8559, 8597), 'copy.deepcopy', 'copy.deepcopy', (['batch_graph_triplets[b]'], {}), '(batch_graph_triplets[b])\n', (8572, 8597), False, 'import copy\n')]
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import division, print_function, unicode_literals
import argparse
import json
import os
import shutil
import time
import torch
from utils import util
from evaluate import MultiWozEvaluator
from model.model import Model
parser = argparse.ArgumentParser(description='S2S')
parser.add_argument('--no_cuda', type=util.str2bool, nargs='?', const=True, default=True, help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--no_models', type=int, default=20, help='how many models to evaluate')
parser.add_argument('--original', type=str, default='model/model/', help='Original path.')
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--use_emb', type=str, default='False')
parser.add_argument('--beam_width', type=int, default=10, help='Beam width used in beamsearch')
parser.add_argument('--write_n_best', type=util.str2bool, nargs='?', const=True, default=False, help='Write n-best list (n=beam_width)')
parser.add_argument('--model_path', type=str, default='model/model/translate.ckpt', help='Path to a specific model checkpoint.')
parser.add_argument('--model_dir', type=str, default='model/')
parser.add_argument('--model_name', type=str, default='translate.ckpt')
parser.add_argument('--valid_output', type=str, default='model/data/val_dials/', help='Validation Decoding output dir path')
parser.add_argument('--decode_output', type=str, default='model/data/test_dials/', help='Decoding output dir path')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
def load_config(args):
config = util.unicode_to_utf8(
json.load(open('%s.json' % args.model_path, 'rb')))
for key, value in args.__args.items():
try:
config[key] = value.value
except:
config[key] = value
return config
def loadModelAndData(num):
# Load dictionaries
with open('data/input_lang.index2word.json') as f:
input_lang_index2word = json.load(f)
with open('data/input_lang.word2index.json') as f:
input_lang_word2index = json.load(f)
with open('data/output_lang.index2word.json') as f:
output_lang_index2word = json.load(f)
with open('data/output_lang.word2index.json') as f:
output_lang_word2index = json.load(f)
# Reload existing checkpoint
model = Model(args, input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index)
if args.load_param:
model.loadModel(iter=num)
# Load data
if os.path.exists(args.decode_output):
shutil.rmtree(args.decode_output)
os.makedirs(args.decode_output)
else:
os.makedirs(args.decode_output)
if os.path.exists(args.valid_output):
shutil.rmtree(args.valid_output)
os.makedirs(args.valid_output)
else:
os.makedirs(args.valid_output)
# Load validation file list:
with open('data/val_dials.json') as outfile:
val_dials = json.load(outfile)
# Load test file list:
with open('data/test_dials.json') as outfile:
test_dials = json.load(outfile)
return model, val_dials, test_dials
def decode(num=1):
model, val_dials, test_dials = loadModelAndData(num)
evaluator_valid = MultiWozEvaluator("valid")
evaluator_test = MultiWozEvaluator("test")
start_time = time.time()
for ii in range(2):
if ii == 0:
print(50 * '-' + 'GREEDY')
model.beam_search = False
else:
print(50 * '-' + 'BEAM')
model.beam_search = True
# VALIDATION
val_dials_gen = {}
valid_loss = 0
for name, val_file in val_dials.items():
input_tensor = []; target_tensor = [];bs_tensor = [];db_tensor = []
input_tensor, target_tensor, bs_tensor, db_tensor = util.loadDialogue(model, val_file, input_tensor, target_tensor, bs_tensor, db_tensor)
# create an empty matrix with padding tokens
input_tensor, input_lengths = util.padSequence(input_tensor)
target_tensor, target_lengths = util.padSequence(target_tensor)
bs_tensor = torch.tensor(bs_tensor, dtype=torch.float, device=device)
db_tensor = torch.tensor(db_tensor, dtype=torch.float, device=device)
output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,
db_tensor, bs_tensor)
valid_loss += 0
val_dials_gen[name] = output_words
print('Current VALID LOSS:', valid_loss)
with open(args.valid_output + 'val_dials_gen.json', 'w') as outfile:
json.dump(val_dials_gen, outfile)
evaluator_valid.evaluateModel(val_dials_gen, val_dials, mode='valid')
# TESTING
test_dials_gen = {}
test_loss = 0
for name, test_file in test_dials.items():
input_tensor = []; target_tensor = [];bs_tensor = [];db_tensor = []
input_tensor, target_tensor, bs_tensor, db_tensor = util.loadDialogue(model, test_file, input_tensor, target_tensor, bs_tensor, db_tensor)
# create an empty matrix with padding tokens
input_tensor, input_lengths = util.padSequence(input_tensor)
target_tensor, target_lengths = util.padSequence(target_tensor)
bs_tensor = torch.tensor(bs_tensor, dtype=torch.float, device=device)
db_tensor = torch.tensor(db_tensor, dtype=torch.float, device=device)
output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,
db_tensor, bs_tensor)
test_loss += 0
test_dials_gen[name] = output_words
test_loss /= len(test_dials)
print('Current TEST LOSS:', test_loss)
with open(args.decode_output + 'test_dials_gen.json', 'w') as outfile:
json.dump(test_dials_gen, outfile)
evaluator_test.evaluateModel(test_dials_gen, test_dials, mode='test')
print('TIME:', time.time() - start_time)
def decodeWrapper():
# Load config file
with open(args.model_path + '.config') as f:
add_args = json.load(f)
for k, v in add_args.items():
setattr(args, k, v)
args.mode = 'test'
args.load_param = True
args.dropout = 0.0
assert args.dropout == 0.0
# Start going through models
args.original = args.model_path
for ii in range(1, args.no_models + 1):
print(70 * '-' + 'EVALUATING EPOCH %s' % ii)
args.model_path = args.model_path + '-' + str(ii)
try:
decode(ii)
except:
print('cannot decode')
args.model_path = args.original
if __name__ == '__main__':
decodeWrapper()
|
[
"evaluate.MultiWozEvaluator"
] |
[((286, 328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""S2S"""'}), "(description='S2S')\n", (309, 328), False, 'import argparse\n'), ((1679, 1707), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1696, 1707), False, 'import torch\n'), ((1718, 1762), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (1730, 1762), False, 'import torch\n'), ((1652, 1677), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1675, 1677), False, 'import torch\n'), ((2547, 2656), 'model.model.Model', 'Model', (['args', 'input_lang_index2word', 'output_lang_index2word', 'input_lang_word2index', 'output_lang_word2index'], {}), '(args, input_lang_index2word, output_lang_index2word,\n input_lang_word2index, output_lang_word2index)\n', (2552, 2656), False, 'from model.model import Model\n'), ((2735, 2769), 'os.path.exists', 'os.path.exists', (['args.decode_output'], {}), '(args.decode_output)\n', (2749, 2769), False, 'import os\n'), ((2911, 2944), 'os.path.exists', 'os.path.exists', (['args.valid_output'], {}), '(args.valid_output)\n', (2925, 2944), False, 'import os\n'), ((3455, 3481), 'evaluate.MultiWozEvaluator', 'MultiWozEvaluator', (['"""valid"""'], {}), "('valid')\n", (3472, 3481), False, 'from evaluate import MultiWozEvaluator\n'), ((3503, 3528), 'evaluate.MultiWozEvaluator', 'MultiWozEvaluator', (['"""test"""'], {}), "('test')\n", (3520, 3528), False, 'from evaluate import MultiWozEvaluator\n'), ((3547, 3558), 'time.time', 'time.time', ([], {}), '()\n', (3556, 3558), False, 'import time\n'), ((2184, 2196), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2193, 2196), False, 'import json\n'), ((2284, 2296), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2293, 2296), False, 'import json\n'), ((2386, 2398), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2395, 2398), False, 'import json\n'), ((2488, 2500), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2497, 2500), False, 'import json\n'), ((2779, 2812), 'shutil.rmtree', 'shutil.rmtree', (['args.decode_output'], {}), '(args.decode_output)\n', (2792, 2812), False, 'import shutil\n'), ((2821, 2852), 'os.makedirs', 'os.makedirs', (['args.decode_output'], {}), '(args.decode_output)\n', (2832, 2852), False, 'import os\n'), ((2871, 2902), 'os.makedirs', 'os.makedirs', (['args.decode_output'], {}), '(args.decode_output)\n', (2882, 2902), False, 'import os\n'), ((2954, 2986), 'shutil.rmtree', 'shutil.rmtree', (['args.valid_output'], {}), '(args.valid_output)\n', (2967, 2986), False, 'import shutil\n'), ((2995, 3025), 'os.makedirs', 'os.makedirs', (['args.valid_output'], {}), '(args.valid_output)\n', (3006, 3025), False, 'import os\n'), ((3044, 3074), 'os.makedirs', 'os.makedirs', (['args.valid_output'], {}), '(args.valid_output)\n', (3055, 3074), False, 'import os\n'), ((3178, 3196), 'json.load', 'json.load', (['outfile'], {}), '(outfile)\n', (3187, 3196), False, 'import json\n'), ((3296, 3314), 'json.load', 'json.load', (['outfile'], {}), '(outfile)\n', (3305, 3314), False, 'import json\n'), ((6453, 6465), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6462, 6465), False, 'import json\n'), ((4034, 4123), 'utils.util.loadDialogue', 'util.loadDialogue', (['model', 'val_file', 'input_tensor', 'target_tensor', 'bs_tensor', 'db_tensor'], {}), '(model, val_file, input_tensor, target_tensor, bs_tensor,\n db_tensor)\n', (4051, 4123), False, 'from utils import util\n'), ((4219, 4249), 'utils.util.padSequence', 'util.padSequence', (['input_tensor'], {}), '(input_tensor)\n', (4235, 4249), False, 'from utils import util\n'), ((4294, 4325), 'utils.util.padSequence', 'util.padSequence', (['target_tensor'], {}), '(target_tensor)\n', (4310, 4325), False, 'from utils import util\n'), ((4350, 4407), 'torch.tensor', 'torch.tensor', (['bs_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(bs_tensor, dtype=torch.float, device=device)\n', (4362, 4407), False, 'import torch\n'), ((4432, 4489), 'torch.tensor', 'torch.tensor', (['db_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(db_tensor, dtype=torch.float, device=device)\n', (4444, 4489), False, 'import torch\n'), ((4900, 4933), 'json.dump', 'json.dump', (['val_dials_gen', 'outfile'], {}), '(val_dials_gen, outfile)\n', (4909, 4933), False, 'import json\n'), ((5277, 5367), 'utils.util.loadDialogue', 'util.loadDialogue', (['model', 'test_file', 'input_tensor', 'target_tensor', 'bs_tensor', 'db_tensor'], {}), '(model, test_file, input_tensor, target_tensor, bs_tensor,\n db_tensor)\n', (5294, 5367), False, 'from utils import util\n'), ((5463, 5493), 'utils.util.padSequence', 'util.padSequence', (['input_tensor'], {}), '(input_tensor)\n', (5479, 5493), False, 'from utils import util\n'), ((5538, 5569), 'utils.util.padSequence', 'util.padSequence', (['target_tensor'], {}), '(target_tensor)\n', (5554, 5569), False, 'from utils import util\n'), ((5594, 5651), 'torch.tensor', 'torch.tensor', (['bs_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(bs_tensor, dtype=torch.float, device=device)\n', (5606, 5651), False, 'import torch\n'), ((5676, 5733), 'torch.tensor', 'torch.tensor', (['db_tensor'], {'dtype': 'torch.float', 'device': 'device'}), '(db_tensor, dtype=torch.float, device=device)\n', (5688, 5733), False, 'import torch\n'), ((6180, 6214), 'json.dump', 'json.dump', (['test_dials_gen', 'outfile'], {}), '(test_dials_gen, outfile)\n', (6189, 6214), False, 'import json\n'), ((6313, 6324), 'time.time', 'time.time', ([], {}), '()\n', (6322, 6324), False, 'import time\n')]
|
import argparse
import os
import torch
import yaml
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils.model import get_vocoder, get_param_num
from utils.tools import to_device, log, synth_one_sample
from model import FastSpeech2Loss, FastSpeech2
from dataset import Dataset
from evaluate import evaluate
import wandb
import pandas as pd
from pytorch_lamb import Lamb, log_lamb_rs
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args, configs):
print("Prepare training ...")
preprocess_config, model_config, train_config = configs
# Get dataset
dataset = Dataset(
"train.txt", preprocess_config, train_config, sort=True, drop_last=True
)
batch_size = args.batch_size if args.batch_size else train_config["optimizer"]["batch_size"]
group_size = 1 # Set this larger than 1 to enable sorting in Dataset
print(f"Number of rows in training dataset: {len(dataset)}")
assert batch_size * group_size < len(dataset)
loader = DataLoader(
dataset,
batch_size=batch_size * group_size,
shuffle=True,
collate_fn=dataset.collate_fn,
)
# Prepare model
model = FastSpeech2(preprocess_config, model_config).to(device)
model.train() # sets the model into training mode
step_size = train_config["optimizer"]["step_size"]
weight_decay = args.wd if args.wd else train_config["optimizer"]["weight_decay"]
betas=train_config["optimizer"]["betas"]
eps=train_config["optimizer"]["eps"]
# reading the paper you'd think 'lr' (learning rate) is not needed for Lamb but basically lr param is an Adam learning rate
# which is then multiplied by Trust Ratio for that layer to get the actual learning rate used at each step/each layer
optimizer = Lamb(model.parameters(), lr=step_size, weight_decay=weight_decay, betas=betas, eps=eps)
model = nn.DataParallel(model)
num_param = get_param_num(model)
Loss = FastSpeech2Loss(preprocess_config, model_config).to(device)
print("Number of FastSpeech2 Parameters:", num_param)
if args.wandb:
from flatten_json import flatten
log_config = {}
for key, val in pd.json_normalize(model_config["transformer"]).iloc[0].items():
log_config[f"transformer.{key}"]=str(val)
log_config["multi_speaker"]=model_config["multi_speaker"]
log_config["vocoder"]=model_config["vocoder"]
log_config["sample_rate"] = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
log_config["train.batch_size"] = batch_size
log_config["train.optimizer"] = "lamb"
log_config["train.step_size"] = step_size
log_config["train.weight_decay"] = weight_decay
log_config["train.betas"] = str(betas)
log_config["train.eps"] = eps
log_config["num_params"] = num_param
log_config["len(dataset)"] = len(dataset)
print(log_config)
if args.wandb:
wandb.init(project="synthesis-fastspeech2", entity="papa-reo", config=log_config)
# Load vocoder
vocoder = get_vocoder(model_config, device)
# Init logger
for p in train_config["path"].values():
os.makedirs(p, exist_ok=True)
train_log_path = os.path.join(train_config["path"]["log_path"], "train")
val_log_path = os.path.join(train_config["path"]["log_path"], "val")
os.makedirs(train_log_path, exist_ok=True)
os.makedirs(val_log_path, exist_ok=True)
train_logger = SummaryWriter(train_log_path)
val_logger = SummaryWriter(val_log_path)
# Training
step = args.restore_step + 1
epoch = 1
grad_acc_step = train_config["optimizer"]["grad_acc_step"]
grad_clip_thresh = train_config["optimizer"]["grad_clip_thresh"]
total_step = train_config["step"]["total_step"]
log_step = train_config["step"]["log_step"]
save_step = train_config["step"]["save_step"]
synth_step = train_config["step"]["synth_step"]
val_step = train_config["step"]["val_step"]
outer_bar = tqdm(total=total_step, desc="Training", position=0)
outer_bar.n = args.restore_step
outer_bar.update()
while True:
inner_bar = tqdm(total=len(loader), desc="Epoch {}".format(epoch), position=1)
for batchs in loader:
for batch in batchs:
batch = to_device(batch, device)
# Forward
output = model(*(batch[2:]))
# Cal Loss
losses = Loss(batch, output)
total_loss = losses[0]
# Backward
total_loss = total_loss / grad_acc_step
total_loss.backward()
if step % grad_acc_step == 0:
# Clipping gradients to avoid gradient explosion
nn.utils.clip_grad_norm_(model.parameters(), grad_clip_thresh)
# Update weights
optimizer.step()
optimizer.zero_grad()
if step % log_step == 0:
losses = [l.item() for l in losses]
message1 = "Step {}/{}, ".format(step, total_step)
message2 = "Total Loss: {:.4f}, Mel Loss: {:.4f}, Mel PostNet Loss: {:.4f}, Pitch Loss: {:.4f}, Energy Loss: {:.4f}, Duration Loss: {:.4f}".format(
*losses
)
with open(os.path.join(train_log_path, "log.txt"), "a") as f:
f.write(message1 + message2 + "\n")
outer_bar.write(message1 + message2)
log(train_logger, step, losses=losses)
log_lamb_rs(optimizer, train_logger, step)
if args.wandb:
wandb.log({"train_loss": total_loss})
wandb.watch(model)
if step % synth_step == 0:
fig, wav_reconstruction, wav_prediction, tag = synth_one_sample(
batch,
output,
vocoder,
model_config,
preprocess_config,
)
log(
train_logger,
fig=fig,
tag="Training/step_{}_{}".format(step, tag),
)
sampling_rate = preprocess_config["preprocessing"]["audio"][
"sampling_rate"
]
log(
train_logger,
audio=wav_reconstruction,
sampling_rate=sampling_rate,
tag="Training/step_{}_{}_reconstructed".format(step, tag),
)
log(
train_logger,
audio=wav_prediction,
sampling_rate=sampling_rate,
tag="Training/step_{}_{}_synthesized".format(step, tag),
)
if step % val_step == 0:
model.eval()
message = evaluate(model, step, configs, val_logger, vocoder, log_to_wandb=args.wandb)
with open(os.path.join(val_log_path, "log.txt"), "a") as f:
f.write(message + "\n")
outer_bar.write(message)
model.train()
if step % save_step == 0:
torch.save(
{
"model": model.module.state_dict(),
"optimizer": optimizer.state_dict(),
},
os.path.join(
train_config["path"]["ckpt_path"],
"{}.pth.tar".format(step),
),
)
if step == total_step:
quit()
step += 1
outer_bar.update(1)
inner_bar.update(1)
epoch += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--restore_step", type=int, default=0)
parser.add_argument(
"-p",
"--preprocess_config",
type=str,
required=True,
help="path to preprocess.yaml",
)
parser.add_argument(
"-m", "--model_config", type=str, required=True, help="path to model.yaml"
)
parser.add_argument(
"-t", "--train_config", type=str, required=True, help="path to train.yaml"
)
parser.add_argument(
'--batch-size', type=int, default=None, metavar='N', help='input batch size for training (for default val see config/../train.yaml)'
)
parser.add_argument(
'--wd', type=float, default=None, metavar='WD', help='weight decay (for default val see config/../train.yaml)'
)
parser.add_argument(
'--wandb', type=bool, default=False, help='log to wandb'
)
args = parser.parse_args()
# Read Config
preprocess_config = yaml.load(
open(args.preprocess_config, "r"), Loader=yaml.FullLoader
)
model_config = yaml.load(open(args.model_config, "r"), Loader=yaml.FullLoader)
train_config = yaml.load(open(args.train_config, "r"), Loader=yaml.FullLoader)
configs = (preprocess_config, model_config, train_config)
main(args, configs)
|
[
"evaluate.evaluate"
] |
[((740, 825), 'dataset.Dataset', 'Dataset', (['"""train.txt"""', 'preprocess_config', 'train_config'], {'sort': '(True)', 'drop_last': '(True)'}), "('train.txt', preprocess_config, train_config, sort=True, drop_last=True\n )\n", (747, 825), False, 'from dataset import Dataset\n'), ((1141, 1245), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(batch_size * group_size)', 'shuffle': '(True)', 'collate_fn': 'dataset.collate_fn'}), '(dataset, batch_size=batch_size * group_size, shuffle=True,\n collate_fn=dataset.collate_fn)\n', (1151, 1245), False, 'from torch.utils.data import DataLoader\n'), ((2044, 2066), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (2059, 2066), True, 'import torch.nn as nn\n'), ((2084, 2104), 'utils.model.get_param_num', 'get_param_num', (['model'], {}), '(model)\n', (2097, 2104), False, 'from utils.model import get_vocoder, get_param_num\n'), ((3275, 3308), 'utils.model.get_vocoder', 'get_vocoder', (['model_config', 'device'], {}), '(model_config, device)\n', (3286, 3308), False, 'from utils.model import get_vocoder, get_param_num\n'), ((3436, 3491), 'os.path.join', 'os.path.join', (["train_config['path']['log_path']", '"""train"""'], {}), "(train_config['path']['log_path'], 'train')\n", (3448, 3491), False, 'import os\n'), ((3512, 3565), 'os.path.join', 'os.path.join', (["train_config['path']['log_path']", '"""val"""'], {}), "(train_config['path']['log_path'], 'val')\n", (3524, 3565), False, 'import os\n'), ((3571, 3613), 'os.makedirs', 'os.makedirs', (['train_log_path'], {'exist_ok': '(True)'}), '(train_log_path, exist_ok=True)\n', (3582, 3613), False, 'import os\n'), ((3619, 3659), 'os.makedirs', 'os.makedirs', (['val_log_path'], {'exist_ok': '(True)'}), '(val_log_path, exist_ok=True)\n', (3630, 3659), False, 'import os\n'), ((3680, 3709), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['train_log_path'], {}), '(train_log_path)\n', (3693, 3709), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3728, 3755), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['val_log_path'], {}), '(val_log_path)\n', (3741, 3755), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((4231, 4282), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_step', 'desc': '"""Training"""', 'position': '(0)'}), "(total=total_step, desc='Training', position=0)\n", (4235, 4282), False, 'from tqdm import tqdm\n'), ((8412, 8437), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8435, 8437), False, 'import argparse\n'), ((534, 559), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (557, 559), False, 'import torch\n'), ((3384, 3413), 'os.makedirs', 'os.makedirs', (['p'], {'exist_ok': '(True)'}), '(p, exist_ok=True)\n', (3395, 3413), False, 'import os\n'), ((1322, 1366), 'model.FastSpeech2', 'FastSpeech2', (['preprocess_config', 'model_config'], {}), '(preprocess_config, model_config)\n', (1333, 1366), False, 'from model import FastSpeech2Loss, FastSpeech2\n'), ((2117, 2165), 'model.FastSpeech2Loss', 'FastSpeech2Loss', (['preprocess_config', 'model_config'], {}), '(preprocess_config, model_config)\n', (2132, 2165), False, 'from model import FastSpeech2Loss, FastSpeech2\n'), ((3156, 3242), 'wandb.init', 'wandb.init', ([], {'project': '"""synthesis-fastspeech2"""', 'entity': '"""papa-reo"""', 'config': 'log_config'}), "(project='synthesis-fastspeech2', entity='papa-reo', config=\n log_config)\n", (3166, 3242), False, 'import wandb\n'), ((4541, 4565), 'utils.tools.to_device', 'to_device', (['batch', 'device'], {}), '(batch, device)\n', (4550, 4565), False, 'from utils.tools import to_device, log, synth_one_sample\n'), ((5832, 5870), 'utils.tools.log', 'log', (['train_logger', 'step'], {'losses': 'losses'}), '(train_logger, step, losses=losses)\n', (5835, 5870), False, 'from utils.tools import to_device, log, synth_one_sample\n'), ((5892, 5934), 'pytorch_lamb.log_lamb_rs', 'log_lamb_rs', (['optimizer', 'train_logger', 'step'], {}), '(optimizer, train_logger, step)\n', (5903, 5934), False, 'from pytorch_lamb import Lamb, log_lamb_rs\n'), ((6194, 6267), 'utils.tools.synth_one_sample', 'synth_one_sample', (['batch', 'output', 'vocoder', 'model_config', 'preprocess_config'], {}), '(batch, output, vocoder, model_config, preprocess_config)\n', (6210, 6267), False, 'from utils.tools import to_device, log, synth_one_sample\n'), ((7412, 7488), 'evaluate.evaluate', 'evaluate', (['model', 'step', 'configs', 'val_logger', 'vocoder'], {'log_to_wandb': 'args.wandb'}), '(model, step, configs, val_logger, vocoder, log_to_wandb=args.wandb)\n', (7420, 7488), False, 'from evaluate import evaluate\n'), ((2350, 2396), 'pandas.json_normalize', 'pd.json_normalize', (["model_config['transformer']"], {}), "(model_config['transformer'])\n", (2367, 2396), True, 'import pandas as pd\n'), ((5998, 6035), 'wandb.log', 'wandb.log', (["{'train_loss': total_loss}"], {}), "({'train_loss': total_loss})\n", (6007, 6035), False, 'import wandb\n'), ((6061, 6079), 'wandb.watch', 'wandb.watch', (['model'], {}), '(model)\n', (6072, 6079), False, 'import wandb\n'), ((5636, 5675), 'os.path.join', 'os.path.join', (['train_log_path', '"""log.txt"""'], {}), "(train_log_path, 'log.txt')\n", (5648, 5675), False, 'import os\n'), ((7520, 7557), 'os.path.join', 'os.path.join', (['val_log_path', '"""log.txt"""'], {}), "(val_log_path, 'log.txt')\n", (7532, 7557), False, 'import os\n')]
|
from i3Deep import utils
import os
from evaluate import evaluate
import numpy as np
from skimage.segmentation.random_walker_segmentation import random_walker
from tqdm import tqdm
import torchio
import torch
def compute_predictions(image_path, mask_path, gt_path, save_path, nr_modalities, class_labels, resize=True, beta=10):
image_filenames = utils.load_filenames(image_path)[::nr_modalities]
mask_filenames = utils.load_filenames(mask_path)
target_shape = (256, 256, 200) # (256, 256, 100)
is_resized = False
for i in tqdm(range(len(image_filenames))):
image, affine, spacing, header = utils.load_nifty(image_filenames[i])
mask, _, _, _ = utils.load_nifty(mask_filenames[i])
if resize and image.size > np.prod(target_shape):
is_resized = True
print("Resized: ", os.path.basename(image_filenames[i]))
original_shape = image.shape
image = utils.interpolate(image, (target_shape[0], target_shape[1], original_shape[2]))
mask = utils.interpolate(mask, (target_shape[0], target_shape[1], original_shape[2]), mask=True)
image = utils.normalize(image)
labels = np.unique(mask)
# labels = labels[labels > 0]
for label in np.flip(labels):
mask[mask == label] = label + 1
mask = mask.astype(np.uint8)
mask = random_walker(data=image, labels=mask, beta=beta, mode='cg_mg')
for label in labels:
mask[mask == label + 1] = label
if is_resized:
mask = utils.interpolate(mask, original_shape, mask=True)
utils.save_nifty(save_path + os.path.basename(mask_filenames[i][:-12] + ".nii.gz"), mask, affine, spacing, header, is_mask=True)
results = evaluate(gt_path, save_path, class_labels)
return results
# def compute_predictions(image_path, mask_path, gt_path, save_path):
# image_filenames = utils.load_filenames(image_path)
# mask_filenames = utils.load_filenames(mask_path)
#
# for i in tqdm(range(len(image_filenames))):
# _, affine, spacing, header = utils.load_nifty(mask_filenames[i])
# subject = torchio.Subject(image=torchio.ScalarImage(image_filenames[i]), mask=torchio.LabelMap(mask_filenames[i]))
# sampler = torchio.inference.GridSampler(subject, patch_size=(20, 20, 10), padding_mode='edge')
# aggregator = torchio.inference.GridAggregator(sampler)
# for patch in sampler:
# image = patch["image"][torchio.DATA].numpy()[0]
# image = utils.normalize(image)
# mask = patch["mask"][torchio.DATA].numpy()[0]
# location = torch.tensor(patch[torchio.LOCATION]).unsqueeze(0)
# if not(image.max() <= 0 or mask.max() == 0):
# # image[image < 0] = 0
# mask = mask.astype(np.int32)
# mask = random_walker(data=image, labels=mask, mode='cg_j')
# mask = torch.tensor(mask).unsqueeze(0).unsqueeze(0)
# aggregator.add_batch(mask, location)
# mask = aggregator.get_output_tensor()
# utils.save_nifty(save_path + os.path.basename(mask_filenames[i]), mask, affine, spacing, header, is_mask=True)
# mean_dice_score, median_dice_score = evaluate(gt_path, save_path)
# return mean_dice_score, median_dice_score
|
[
"evaluate.evaluate"
] |
[((432, 463), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['mask_path'], {}), '(mask_path)\n', (452, 463), False, 'from i3Deep import utils\n'), ((1785, 1827), 'evaluate.evaluate', 'evaluate', (['gt_path', 'save_path', 'class_labels'], {}), '(gt_path, save_path, class_labels)\n', (1793, 1827), False, 'from evaluate import evaluate\n'), ((360, 392), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['image_path'], {}), '(image_path)\n', (380, 392), False, 'from i3Deep import utils\n'), ((636, 672), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (652, 672), False, 'from i3Deep import utils\n'), ((698, 733), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['mask_filenames[i]'], {}), '(mask_filenames[i])\n', (714, 733), False, 'from i3Deep import utils\n'), ((1164, 1186), 'i3Deep.utils.normalize', 'utils.normalize', (['image'], {}), '(image)\n', (1179, 1186), False, 'from i3Deep import utils\n'), ((1205, 1220), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (1214, 1220), True, 'import numpy as np\n'), ((1282, 1297), 'numpy.flip', 'np.flip', (['labels'], {}), '(labels)\n', (1289, 1297), True, 'import numpy as np\n'), ((1398, 1461), 'skimage.segmentation.random_walker_segmentation.random_walker', 'random_walker', ([], {'data': 'image', 'labels': 'mask', 'beta': 'beta', 'mode': '"""cg_mg"""'}), "(data=image, labels=mask, beta=beta, mode='cg_mg')\n", (1411, 1461), False, 'from skimage.segmentation.random_walker_segmentation import random_walker\n'), ((957, 1036), 'i3Deep.utils.interpolate', 'utils.interpolate', (['image', '(target_shape[0], target_shape[1], original_shape[2])'], {}), '(image, (target_shape[0], target_shape[1], original_shape[2]))\n', (974, 1036), False, 'from i3Deep import utils\n'), ((1057, 1151), 'i3Deep.utils.interpolate', 'utils.interpolate', (['mask', '(target_shape[0], target_shape[1], original_shape[2])'], {'mask': '(True)'}), '(mask, (target_shape[0], target_shape[1], original_shape[2\n ]), mask=True)\n', (1074, 1151), False, 'from i3Deep import utils\n'), ((1581, 1631), 'i3Deep.utils.interpolate', 'utils.interpolate', (['mask', 'original_shape'], {'mask': '(True)'}), '(mask, original_shape, mask=True)\n', (1598, 1631), False, 'from i3Deep import utils\n'), ((770, 791), 'numpy.prod', 'np.prod', (['target_shape'], {}), '(target_shape)\n', (777, 791), True, 'import numpy as np\n'), ((856, 892), 'os.path.basename', 'os.path.basename', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (872, 892), False, 'import os\n'), ((1670, 1723), 'os.path.basename', 'os.path.basename', (["(mask_filenames[i][:-12] + '.nii.gz')"], {}), "(mask_filenames[i][:-12] + '.nii.gz')\n", (1686, 1723), False, 'import os\n')]
|
import random
import os
import sys
from models.bert import BERT_Model
from models.bilstm_crf_ import BiLSTM_CRF_Model
from data import build_corpus
from config import ModelPathConfig,ResultPathConfig
from datetime import datetime
from utils import extend_map_bert,save_model,load_model,extend_map,add_label_for_lstmcrf
from evaluate import evaluate_entity_label,evaluate_multiclass,evaluate_single_label,unitstopd
from tabulate import tabulate
import pandas as pd
def bert_test():
model_is_exitsed=os.path.exists(ModelPathConfig.bert)
print("upload data!")
word_lists,tag_lists,word2id,tag2id=build_corpus("train")
test_word_lists,test_tag_lists,_,_=build_corpus("test")
labels=list(tag2id.keys())
dev_indices=random.sample(range(len(word_lists)),len(word_lists)//5)
train_indices=[i for i in range(len(word_lists)) if i not in dev_indices]
dev_word_lists=[ word_lists[ind] for ind in dev_indices]
dev_tag_lists=[tag_lists[ind] for ind in dev_indices]
train_word_lists=[word_lists[ind] for ind in train_indices]
train_tag_lists=[tag_lists[ind] for ind in train_indices]
bert_tag2id=extend_map_bert(tag2id)
if not model_is_exitsed:
print('start to training')
start=datetime.now()
vocab_size=len(word2id)
out_size=len(bert_tag2id)
bert_model=BERT_Model(vocab_size,out_size)
bert_model.train(train_word_lists,train_tag_lists,\
word2id,bert_tag2id,dev_word_lists,dev_tag_lists)
deltatime=datetime.now()-start
print("Training is finished, {} second".format(deltatime.seconds))
try:
print("Save the model")
save_model(bert_model,ModelPathConfig.bert)
except:
print("fail to save model")
else:
try:
print("load model")
bert_model=load_model(ModelPathConfig.bert)
except:
print("fail to load model")
sys.exit(0)
print("test the model")
pred_tag_lists=bert_model.test(test_word_lists,test_tag_lists,word2id,bert_tag2id)
label_tag_lists=test_tag_lists
units=evaluate_entity_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig.bert_entity)
print(tabulate(df,headers='keys',tablefmt='psql'))
units=evaluate_single_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig.bert_model)
print(tabulate(df,headers='keys',tablefmt='psql'))
def _bilstm_crf_test(if_train=True):
model_is_existed=os.path.exists(ModelPathConfig._bilstm_crf)
print("upload data!")
word_lists,tag_lists,word2id,tag2id=build_corpus("train")
test_word_lists,test_tag_lists,_,_=build_corpus("test")
labels=list(tag2id.keys())
dev_indices=random.sample(range(len(word_lists)),len(word_lists)//5)
train_indices=[i for i in range(len(word_lists)) if i not in dev_indices]
dev_word_lists=[ word_lists[ind] for ind in dev_indices]
dev_tag_lists=[tag_lists[ind] for ind in dev_indices]
train_word_lists=[word_lists[ind] for ind in train_indices]
train_tag_lists=[tag_lists[ind] for ind in train_indices]
# bilstm_crf_word2id,bilstm_crf_tag2id=extend_map(word2id,tag2id,crf=True)
bilstm_crf_word2id,bilstm_crf_tag2id=extend_map(word2id,tag2id,crf=False)
if if_train or not model_is_existed:
print('start to training')
# sample_print_test(train_word_lists,train_tag_lists)
start=datetime.now()
vocab_size=len(bilstm_crf_word2id)
out_size=len(tag2id)
bilstm_model=BiLSTM_CRF_Model(vocab_size,out_size)
bilstm_model.train(train_word_lists,train_tag_lists,\
word2id,bilstm_crf_tag2id,dev_word_lists,dev_tag_lists)
deltatime=datetime.now()-start
print("Training is finished, {} second".format(deltatime.seconds))
save_model(bilstm_model,ModelPathConfig._bilstm_crf)
print("Save the model")
else:
print("load model")
bilstm_model=load_model(ModelPathConfig._bilstm_crf)
print("test the model")
pred_tag_lists,label_tag_lists,=bilstm_model.test(test_word_lists,test_tag_lists,word2id,tag2id)
units=evaluate_entity_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig._bilstm_crf_entity)
print(tabulate(df,headers='keys',tablefmt='psql'))
units=evaluate_single_label(pred_tag_lists,label_tag_lists,labels)
df=unitstopd(units)
df.to_csv(ResultPathConfig._bilstm_crf_model)
print(tabulate(df,headers='keys',tablefmt='psql'))
def mrc_bert_test(if_train=True):
pass
if __name__=='__main__':
pass
|
[
"evaluate.unitstopd",
"evaluate.evaluate_single_label",
"evaluate.evaluate_entity_label"
] |
[((504, 540), 'os.path.exists', 'os.path.exists', (['ModelPathConfig.bert'], {}), '(ModelPathConfig.bert)\n', (518, 540), False, 'import os\n'), ((609, 630), 'data.build_corpus', 'build_corpus', (['"""train"""'], {}), "('train')\n", (621, 630), False, 'from data import build_corpus\n'), ((670, 690), 'data.build_corpus', 'build_corpus', (['"""test"""'], {}), "('test')\n", (682, 690), False, 'from data import build_corpus\n'), ((1138, 1161), 'utils.extend_map_bert', 'extend_map_bert', (['tag2id'], {}), '(tag2id)\n', (1153, 1161), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((2138, 2200), 'evaluate.evaluate_entity_label', 'evaluate_entity_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (2159, 2200), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((2206, 2222), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (2215, 2222), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((2333, 2395), 'evaluate.evaluate_single_label', 'evaluate_single_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (2354, 2395), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((2401, 2417), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (2410, 2417), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((2577, 2620), 'os.path.exists', 'os.path.exists', (['ModelPathConfig._bilstm_crf'], {}), '(ModelPathConfig._bilstm_crf)\n', (2591, 2620), False, 'import os\n'), ((2688, 2709), 'data.build_corpus', 'build_corpus', (['"""train"""'], {}), "('train')\n", (2700, 2709), False, 'from data import build_corpus\n'), ((2749, 2769), 'data.build_corpus', 'build_corpus', (['"""test"""'], {}), "('test')\n", (2761, 2769), False, 'from data import build_corpus\n'), ((3318, 3356), 'utils.extend_map', 'extend_map', (['word2id', 'tag2id'], {'crf': '(False)'}), '(word2id, tag2id, crf=False)\n', (3328, 3356), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((4235, 4297), 'evaluate.evaluate_entity_label', 'evaluate_entity_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (4256, 4297), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((4303, 4319), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (4312, 4319), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((4437, 4499), 'evaluate.evaluate_single_label', 'evaluate_single_label', (['pred_tag_lists', 'label_tag_lists', 'labels'], {}), '(pred_tag_lists, label_tag_lists, labels)\n', (4458, 4499), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((4505, 4521), 'evaluate.unitstopd', 'unitstopd', (['units'], {}), '(units)\n', (4514, 4521), False, 'from evaluate import evaluate_entity_label, evaluate_multiclass, evaluate_single_label, unitstopd\n'), ((1245, 1259), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1257, 1259), False, 'from datetime import datetime\n'), ((1345, 1377), 'models.bert.BERT_Model', 'BERT_Model', (['vocab_size', 'out_size'], {}), '(vocab_size, out_size)\n', (1355, 1377), False, 'from models.bert import BERT_Model\n'), ((2277, 2322), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (2285, 2322), False, 'from tabulate import tabulate\n'), ((2471, 2516), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (2479, 2516), False, 'from tabulate import tabulate\n'), ((3508, 3522), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3520, 3522), False, 'from datetime import datetime\n'), ((3617, 3655), 'models.bilstm_crf_.BiLSTM_CRF_Model', 'BiLSTM_CRF_Model', (['vocab_size', 'out_size'], {}), '(vocab_size, out_size)\n', (3633, 3655), False, 'from models.bilstm_crf_ import BiLSTM_CRF_Model\n'), ((3908, 3961), 'utils.save_model', 'save_model', (['bilstm_model', 'ModelPathConfig._bilstm_crf'], {}), '(bilstm_model, ModelPathConfig._bilstm_crf)\n', (3918, 3961), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((4052, 4091), 'utils.load_model', 'load_model', (['ModelPathConfig._bilstm_crf'], {}), '(ModelPathConfig._bilstm_crf)\n', (4062, 4091), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((4381, 4426), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (4389, 4426), False, 'from tabulate import tabulate\n'), ((4582, 4627), 'tabulate.tabulate', 'tabulate', (['df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df, headers='keys', tablefmt='psql')\n", (4590, 4627), False, 'from tabulate import tabulate\n'), ((1518, 1532), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1530, 1532), False, 'from datetime import datetime\n'), ((1675, 1719), 'utils.save_model', 'save_model', (['bert_model', 'ModelPathConfig.bert'], {}), '(bert_model, ModelPathConfig.bert)\n', (1685, 1719), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((1862, 1894), 'utils.load_model', 'load_model', (['ModelPathConfig.bert'], {}), '(ModelPathConfig.bert)\n', (1872, 1894), False, 'from utils import extend_map_bert, save_model, load_model, extend_map, add_label_for_lstmcrf\n'), ((3803, 3817), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3815, 3817), False, 'from datetime import datetime\n'), ((1963, 1974), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1971, 1974), False, 'import sys\n')]
|
from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
# from raft import RAFT
from core.ours import RAFT
import evaluate
import datasets
import flow_vis
import random
from torch.utils.tensorboard import SummaryWriter
from utils.scheduler import CosineAnnealingWarmupRestarts
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
def __init__(self):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 100
VAL_FREQ = 5000
def sequence_loss(flow_preds, flow_gt, valid, sparse_lambda=1.0, gamma=0.8, max_flow=MAX_FLOW):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds[0])
flow_loss = 0.0
sparse_loss = 0.0
# exlude invalid pixels and extremely large diplacements
mag = torch.sum(flow_gt ** 2, dim=1).sqrt()
dense_valid = (valid >= 0.5) & (mag < max_flow)
bs, _, I_H, I_W = flow_gt.shape
for i in range(n_predictions):
# i_weight = gamma ** (n_predictions - i - 1)
i_weight = 1.0
i_loss = (flow_preds[0][i] - flow_gt).abs()
# i_loss = (flow_preds[0][i] - flow_gt).square()
flow_loss += i_weight * (dense_valid[:, None] * i_loss).mean()
if sparse_lambda > 0.0:
ref, sparse_flow, _, _ = flow_preds[1][i]
scale = torch.tensor((I_W - 1, I_H - 1), dtype=torch.float32).view(1, 1, 2).to(sparse_flow.device)
flatten_gt = flow_gt.flatten(2).permute(0, 2, 1)
flatten_valid = valid.flatten(1)
coords = torch.round(ref * scale).long()
coords = torch.clamp_max(coords[..., 1] * coords[..., 0], I_H * I_W - 1)
sparse_gt = torch.gather(flatten_gt, 1, coords.unsqueeze(-1).repeat(1, 1, 2))
sparse_valid = torch.gather(flatten_valid, 1, coords)
sparse_valid = (sparse_valid >= 0.5) & (torch.sum(sparse_gt ** 2, dim=-1).sqrt() < max_flow)
sparse_i_loss = (sparse_flow * scale - sparse_gt).abs()
# sparse_i_loss = (sparse_flow * scale - sparse_gt).square()
sparse_loss += i_weight * (sparse_valid[..., None] * sparse_i_loss).mean()
loss = flow_loss + sparse_loss * sparse_lambda
epe = torch.sum((flow_preds[0][-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[dense_valid.view(-1)]
metrics = {
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
'loss': loss,
'flow_loss': flow_loss,
'sparse_loss': sparse_loss
}
return loss, metrics
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.wdecay)
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, round(args.num_steps * 0.8))
# scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100,
# pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
# scheduler = torch.optim.lr_scheduler.OneCycleLR(
# optimizer, args.lr,
# args.num_steps + 10,
# pct_start=0.05,
# cycle_momentum=False,
# anneal_strategy='cos')
# scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
# optimizer, 1000, T_mult=1, eta_min=0, last_epoch=- 1, verbose=False)
return optimizer, scheduler
class Logger:
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = 0
self.running_loss = {}
self.writer = None
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
# print the training status
print(training_str + metrics_str)
if self.writer is None:
self.writer = SummaryWriter()
for k in self.running_loss:
self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
self.running_loss[k] = 0.0
def push(self, metrics):
self.total_steps += 1
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter()
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def write_image(self, image1, image2, target, pred, phase="T", idx=0):
if self.writer is None:
self.writer = SummaryWriter()
_, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (1, 2, 0))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (1, 2, 0))
target = target.detach().cpu().numpy()
target = np.transpose(target, (1, 2, 0))
target_img = flow_vis.flow_to_color(target, convert_to_bgr=False)
pred_img = list()
for p_i in range(len(pred[0])):
ref, sparse_flow, masks, scores = pred[1][p_i]
coords = torch.round(ref.squeeze(0) * scale).long()
coords = coords.cpu().numpy()
confidence = np.squeeze(scores.squeeze(0).cpu().numpy())
ref_img = cv2.cvtColor(np.array(image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = pred[0][p_i].squeeze(0).detach().cpu().numpy()
this_pred = np.transpose(this_pred, (1, 2, 0))
this_pred = flow_vis.flow_to_color(this_pred, convert_to_bgr=False)
pred_img.append(this_pred)
mask_img = list()
top_k = len(pred[0])
# top_k_indices = np.argsort(-confidence)[:top_k]
masks = masks.squeeze(0).cpu()
# masks = masks.reshape(self.num_keypoints, 1, H, W)
masks = F.interpolate(masks, size=(I_H, I_W), mode="bilinear", align_corners=False).numpy()
masks = masks.squeeze(1)
top_k_indices = np.argsort(-np.sum(masks, axis=(1, 2)))[:top_k]
for m_i in top_k_indices:
coord = coords[m_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.cvtColor(np.array(image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[m_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
mask_img.append(ref_img)
masked_flow = np.expand_dims(masks[m_i], axis=-1) * this_pred
mask_img.append(masked_flow)
pred_img = np.concatenate(pred_img, axis=1)
mask_img = np.concatenate(mask_img, axis=1)
image = np.concatenate((np.concatenate((image1, image2, target_img, pred_img), axis=1),
np.concatenate((image1, image2, target_img, mask_img), axis=1)), axis=0)
image = image.astype(np.uint8)
self.writer.add_image("{}_Image_{:02d}".format(phase, idx + 1), image, self.total_steps, dataformats='HWC')
def write_images(self, image1, image2, targets, preds, phase="T"):
if self.writer is None:
self.writer = SummaryWriter()
_, _, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (0, 2, 3, 1))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (0, 2, 3, 1))
targets = targets.detach().cpu().numpy()
targets = np.transpose(targets, (0, 2, 3, 1))
sampled_indices = random.sample(range(len(targets)), min(10, len(targets)))
for i_i, n_i in enumerate(sampled_indices):
this_image1 = image1[n_i]
this_image2 = image2[n_i]
target_img = flow_vis.flow_to_color(targets[n_i], convert_to_bgr=False)
pred_img = list()
for p_i in range(len(preds[0])):
ref, sparse_flow, masks, scores = preds[1][p_i]
coords = torch.round(ref * scale).long()
coords = coords.cpu().numpy()[n_i]
confidence = np.squeeze(scores.cpu().numpy()[n_i])
ref_img = cv2.cvtColor(np.array(this_image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = preds[0][p_i].detach().cpu().numpy()[n_i]
this_pred = np.transpose(this_pred, (1, 2, 0))
this_pred = flow_vis.flow_to_color(this_pred, convert_to_bgr=False)
pred_img.append(this_pred)
mask_img = list()
top_k = len(preds[0])
# top_k_indices = np.argsort(-confidence)[:top_k]
masks = masks[n_i].cpu()
masks = F.interpolate(masks, size=(I_H, I_W), mode="bilinear", align_corners=False).numpy()
masks = masks.squeeze(1)
top_k_indices = np.argsort(-np.sum(masks, axis=(1, 2)))[:top_k]
for m_i in top_k_indices:
coord = coords[m_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.cvtColor(np.array(this_image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[m_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
mask_img.append(ref_img)
masked_flow = np.expand_dims(masks[m_i], axis=-1) * this_pred
mask_img.append(masked_flow)
pred_img = np.concatenate(pred_img, axis=1)
mask_img = np.concatenate(mask_img, axis=1)
image = np.concatenate((np.concatenate((this_image1, this_image2, target_img, pred_img), axis=1),
np.concatenate((this_image1, this_image2, target_img, mask_img), axis=1)), axis=0)
image = image.astype(np.uint8)
self.writer.add_image("{}_Image_{:02d}".format(phase, i_i + 1), image, self.total_steps, dataformats='HWC')
def write_seg_images(self, image1, image2, targets, preds, phase="T"):
if self.writer is None:
self.writer = SummaryWriter()
_, _, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (0, 2, 3, 1))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (0, 2, 3, 1))
targets = targets.detach().cpu().numpy()
targets = np.transpose(targets, (0, 2, 3, 1))
for n_i in range(len(targets)):
this_image1 = image1[n_i]
this_image2 = image2[n_i]
target_img = flow_vis.flow_to_color(targets[n_i], convert_to_bgr=False)
pred_img = list()
for p_i in range(len(preds[0])):
ref, sparse_flow, masks, scores = preds[1][p_i]
coords = torch.round(ref * scale).long()
coords = coords.detach().cpu().numpy()[n_i]
confidence = np.squeeze(scores.detach().cpu().numpy()[n_i])
ref_img = cv2.cvtColor(np.array(this_image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = preds[0][p_i].detach().cpu().numpy()[n_i]
this_pred = np.transpose(this_pred, (1, 2, 0))
pred_img.append(flow_vis.flow_to_color(this_pred, convert_to_bgr=False))
pred_img = np.concatenate(pred_img, axis=1)
image = np.concatenate((this_image1, this_image2, target_img, pred_img), axis=1)
image = image.astype(np.uint8)
self.writer.add_image("{}_Image_{:02d}".format(phase, n_i + 1), image, self.total_steps, dataformats='HWC')
def close(self):
self.writer.close()
def train(args):
model = nn.DataParallel(RAFT(args), device_ids=args.gpus)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
model.cuda()
model.train()
# if args.stage != 'chairs':
# model.module.freeze_bn()
train_loader = datasets.fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
# scaler = GradScaler(enabled=args.mixed_precision)
logger = Logger(model, scheduler)
VAL_FREQ = 5000
# VAL_FREQ = 10
IMAGE_FREQ = 5000
add_noise = True
should_keep_training = True
while should_keep_training:
for i_batch, data_blob in enumerate(train_loader):
optimizer.zero_grad()
image1, image2, flow, valid = [x.cuda() for x in data_blob]
if args.add_noise:
stdv = np.random.uniform(0.0, 5.0)
image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
flow_predictions = model(image1, image2, iters=args.iters)
sparse_lambda = 1.0 if total_steps < 20000 else 0.0
# sparse_lambda = 1.0
loss, metrics = sequence_loss(flow_predictions, flow, valid, sparse_lambda, args.gamma)
# scaler.scale(loss).backward()
# scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
loss.backward()
optimizer.step()
# scaler.step(optimizer)
scheduler.step()
# scaler.update()
logger.push(metrics)
if total_steps % IMAGE_FREQ == IMAGE_FREQ - 1:
logger.write_images(image1, image2, flow, flow_predictions, phase="T")
if total_steps % VAL_FREQ == VAL_FREQ - 1:
PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name)
torch.save(model.state_dict(), PATH)
results = {}
for val_dataset in args.validation:
if val_dataset == 'chairs':
results.update(evaluate.validate_chairs(model.module, logger=logger, iters=args.iters))
elif val_dataset == 'sintel':
results.update(evaluate.validate_sintel(model.module, iters=args.iters))
elif val_dataset == 'kitti':
results.update(evaluate.validate_kitti(model.module, iters=args.iters))
logger.write_dict(results)
model.train()
if args.stage != 'chairs':
model.module.freeze_bn()
total_steps += 1
if total_steps > args.num_steps:
should_keep_training = False
break
logger.close()
PATH = 'checkpoints/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--stage', help="determines which dataset to use for training")
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--validation', type=str, nargs='+')
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--gpus', type=int, nargs='+', default=[0,1])
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--iters', type=int, default=3)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
parser.add_argument('--add_noise', action='store_true')
args = parser.parse_args()
torch.manual_seed(2022)
np.random.seed(2022)
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
train(args)
|
[
"evaluate.validate_chairs",
"evaluate.validate_kitti",
"evaluate.validate_sintel"
] |
[((59, 82), 'sys.path.append', 'sys.path.append', (['"""core"""'], {}), "('core')\n", (74, 82), False, 'import sys\n'), ((14695, 14726), 'datasets.fetch_dataloader', 'datasets.fetch_dataloader', (['args'], {}), '(args)\n', (14720, 14726), False, 'import datasets\n'), ((17465, 17490), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17488, 17490), False, 'import argparse\n'), ((18792, 18815), 'torch.manual_seed', 'torch.manual_seed', (['(2022)'], {}), '(2022)\n', (18809, 18815), False, 'import torch\n'), ((18820, 18840), 'numpy.random.seed', 'np.random.seed', (['(2022)'], {}), '(2022)\n', (18834, 18840), True, 'import numpy as np\n'), ((5981, 6012), 'numpy.transpose', 'np.transpose', (['image1', '(1, 2, 0)'], {}), '(image1, (1, 2, 0))\n', (5993, 6012), True, 'import numpy as np\n'), ((6077, 6108), 'numpy.transpose', 'np.transpose', (['image2', '(1, 2, 0)'], {}), '(image2, (1, 2, 0))\n', (6089, 6108), True, 'import numpy as np\n'), ((6173, 6204), 'numpy.transpose', 'np.transpose', (['target', '(1, 2, 0)'], {}), '(target, (1, 2, 0))\n', (6185, 6204), True, 'import numpy as np\n'), ((6227, 6279), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['target'], {'convert_to_bgr': '(False)'}), '(target, convert_to_bgr=False)\n', (6249, 6279), False, 'import flow_vis\n'), ((8223, 8255), 'numpy.concatenate', 'np.concatenate', (['pred_img'], {'axis': '(1)'}), '(pred_img, axis=1)\n', (8237, 8255), True, 'import numpy as np\n'), ((8275, 8307), 'numpy.concatenate', 'np.concatenate', (['mask_img'], {'axis': '(1)'}), '(mask_img, axis=1)\n', (8289, 8307), True, 'import numpy as np\n'), ((9010, 9044), 'numpy.transpose', 'np.transpose', (['image1', '(0, 2, 3, 1)'], {}), '(image1, (0, 2, 3, 1))\n', (9022, 9044), True, 'import numpy as np\n'), ((9109, 9143), 'numpy.transpose', 'np.transpose', (['image2', '(0, 2, 3, 1)'], {}), '(image2, (0, 2, 3, 1))\n', (9121, 9143), True, 'import numpy as np\n'), ((9211, 9246), 'numpy.transpose', 'np.transpose', (['targets', '(0, 2, 3, 1)'], {}), '(targets, (0, 2, 3, 1))\n', (9223, 9246), True, 'import numpy as np\n'), ((12457, 12491), 'numpy.transpose', 'np.transpose', (['image1', '(0, 2, 3, 1)'], {}), '(image1, (0, 2, 3, 1))\n', (12469, 12491), True, 'import numpy as np\n'), ((12556, 12590), 'numpy.transpose', 'np.transpose', (['image2', '(0, 2, 3, 1)'], {}), '(image2, (0, 2, 3, 1))\n', (12568, 12590), True, 'import numpy as np\n'), ((12658, 12693), 'numpy.transpose', 'np.transpose', (['targets', '(0, 2, 3, 1)'], {}), '(targets, (0, 2, 3, 1))\n', (12670, 12693), True, 'import numpy as np\n'), ((14363, 14373), 'core.ours.RAFT', 'RAFT', (['args'], {}), '(args)\n', (14367, 14373), False, 'from core.ours import RAFT\n'), ((18853, 18881), 'os.path.isdir', 'os.path.isdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (18866, 18881), False, 'import os\n'), ((18891, 18914), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (18899, 18914), False, 'import os\n'), ((1332, 1362), 'torch.sum', 'torch.sum', (['(flow_gt ** 2)'], {'dim': '(1)'}), '(flow_gt ** 2, dim=1)\n', (1341, 1362), False, 'import torch\n'), ((2130, 2193), 'torch.clamp_max', 'torch.clamp_max', (['(coords[..., 1] * coords[..., 0])', '(I_H * I_W - 1)'], {}), '(coords[..., 1] * coords[..., 0], I_H * I_W - 1)\n', (2145, 2193), False, 'import torch\n'), ((2311, 2349), 'torch.gather', 'torch.gather', (['flatten_valid', '(1)', 'coords'], {}), '(flatten_valid, 1, coords)\n', (2323, 2349), False, 'import torch\n'), ((2746, 2798), 'torch.sum', 'torch.sum', (['((flow_preds[0][-1] - flow_gt) ** 2)'], {'dim': '(1)'}), '((flow_preds[0][-1] - flow_gt) ** 2, dim=1)\n', (2755, 2798), False, 'import torch\n'), ((4886, 4901), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (4899, 4901), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5522, 5537), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5535, 5537), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5773, 5788), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5786, 5788), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((7068, 7102), 'numpy.transpose', 'np.transpose', (['this_pred', '(1, 2, 0)'], {}), '(this_pred, (1, 2, 0))\n', (7080, 7102), True, 'import numpy as np\n'), ((7127, 7182), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['this_pred'], {'convert_to_bgr': '(False)'}), '(this_pred, convert_to_bgr=False)\n', (7149, 7182), False, 'import flow_vis\n'), ((8796, 8811), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (8809, 8811), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((9484, 9542), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['targets[n_i]'], {'convert_to_bgr': '(False)'}), '(targets[n_i], convert_to_bgr=False)\n', (9506, 9542), False, 'import flow_vis\n'), ((11627, 11659), 'numpy.concatenate', 'np.concatenate', (['pred_img'], {'axis': '(1)'}), '(pred_img, axis=1)\n', (11641, 11659), True, 'import numpy as np\n'), ((11683, 11715), 'numpy.concatenate', 'np.concatenate', (['mask_img'], {'axis': '(1)'}), '(mask_img, axis=1)\n', (11697, 11715), True, 'import numpy as np\n'), ((12243, 12258), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (12256, 12258), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((12835, 12893), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['targets[n_i]'], {'convert_to_bgr': '(False)'}), '(targets[n_i], convert_to_bgr=False)\n', (12857, 12893), False, 'import flow_vis\n'), ((13974, 14006), 'numpy.concatenate', 'np.concatenate', (['pred_img'], {'axis': '(1)'}), '(pred_img, axis=1)\n', (13988, 14006), True, 'import numpy as np\n'), ((14027, 14099), 'numpy.concatenate', 'np.concatenate', (['(this_image1, this_image2, target_img, pred_img)'], {'axis': '(1)'}), '((this_image1, this_image2, target_img, pred_img), axis=1)\n', (14041, 14099), True, 'import numpy as np\n'), ((14525, 14554), 'torch.load', 'torch.load', (['args.restore_ckpt'], {}), '(args.restore_ckpt)\n', (14535, 14554), False, 'import torch\n'), ((6615, 6647), 'numpy.array', 'np.array', (['image1'], {'dtype': 'np.uint8'}), '(image1, dtype=np.uint8)\n', (6623, 6647), True, 'import numpy as np\n'), ((6881, 6914), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (6889, 6914), True, 'import numpy as np\n'), ((7452, 7527), 'torch.nn.functional.interpolate', 'F.interpolate', (['masks'], {'size': '(I_H, I_W)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(masks, size=(I_H, I_W), mode='bilinear', align_corners=False)\n", (7465, 7527), True, 'import torch.nn.functional as F\n'), ((7814, 7846), 'numpy.array', 'np.array', (['image1'], {'dtype': 'np.uint8'}), '(image1, dtype=np.uint8)\n', (7822, 7846), True, 'import numpy as np\n'), ((7997, 8030), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (8005, 8030), True, 'import numpy as np\n'), ((8114, 8149), 'numpy.expand_dims', 'np.expand_dims', (['masks[m_i]'], {'axis': '(-1)'}), '(masks[m_i], axis=-1)\n', (8128, 8149), True, 'import numpy as np\n'), ((8340, 8402), 'numpy.concatenate', 'np.concatenate', (['(image1, image2, target_img, pred_img)'], {'axis': '(1)'}), '((image1, image2, target_img, pred_img), axis=1)\n', (8354, 8402), True, 'import numpy as np\n'), ((8436, 8498), 'numpy.concatenate', 'np.concatenate', (['(image1, image2, target_img, mask_img)'], {'axis': '(1)'}), '((image1, image2, target_img, mask_img), axis=1)\n', (8450, 8498), True, 'import numpy as np\n'), ((10457, 10491), 'numpy.transpose', 'np.transpose', (['this_pred', '(1, 2, 0)'], {}), '(this_pred, (1, 2, 0))\n', (10469, 10491), True, 'import numpy as np\n'), ((10520, 10575), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['this_pred'], {'convert_to_bgr': '(False)'}), '(this_pred, convert_to_bgr=False)\n', (10542, 10575), False, 'import flow_vis\n'), ((13826, 13860), 'numpy.transpose', 'np.transpose', (['this_pred', '(1, 2, 0)'], {}), '(this_pred, (1, 2, 0))\n', (13838, 13860), True, 'import numpy as np\n'), ((15267, 15294), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(5.0)'], {}), '(0.0, 5.0)\n', (15284, 15294), True, 'import numpy as np\n'), ((2077, 2101), 'torch.round', 'torch.round', (['(ref * scale)'], {}), '(ref * scale)\n', (2088, 2101), False, 'import torch\n'), ((7605, 7631), 'numpy.sum', 'np.sum', (['masks'], {'axis': '(1, 2)'}), '(masks, axis=(1, 2))\n', (7611, 7631), True, 'import numpy as np\n'), ((9896, 9933), 'numpy.array', 'np.array', (['this_image1'], {'dtype': 'np.uint8'}), '(this_image1, dtype=np.uint8)\n', (9904, 9933), True, 'import numpy as np\n'), ((10263, 10296), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (10271, 10296), True, 'import numpy as np\n'), ((10803, 10878), 'torch.nn.functional.interpolate', 'F.interpolate', (['masks'], {'size': '(I_H, I_W)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(masks, size=(I_H, I_W), mode='bilinear', align_corners=False)\n", (10816, 10878), True, 'import torch.nn.functional as F\n'), ((11189, 11226), 'numpy.array', 'np.array', (['this_image1'], {'dtype': 'np.uint8'}), '(this_image1, dtype=np.uint8)\n', (11197, 11226), True, 'import numpy as np\n'), ((11385, 11418), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (11393, 11418), True, 'import numpy as np\n'), ((11510, 11545), 'numpy.expand_dims', 'np.expand_dims', (['masks[m_i]'], {'axis': '(-1)'}), '(masks[m_i], axis=-1)\n', (11524, 11545), True, 'import numpy as np\n'), ((11752, 11824), 'numpy.concatenate', 'np.concatenate', (['(this_image1, this_image2, target_img, pred_img)'], {'axis': '(1)'}), '((this_image1, this_image2, target_img, pred_img), axis=1)\n', (11766, 11824), True, 'import numpy as np\n'), ((11862, 11934), 'numpy.concatenate', 'np.concatenate', (['(this_image1, this_image2, target_img, mask_img)'], {'axis': '(1)'}), '((this_image1, this_image2, target_img, mask_img), axis=1)\n', (11876, 11934), True, 'import numpy as np\n'), ((13265, 13302), 'numpy.array', 'np.array', (['this_image1'], {'dtype': 'np.uint8'}), '(this_image1, dtype=np.uint8)\n', (13273, 13302), True, 'import numpy as np\n'), ((13632, 13665), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (13640, 13665), True, 'import numpy as np\n'), ((13893, 13948), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['this_pred'], {'convert_to_bgr': '(False)'}), '(this_pred, convert_to_bgr=False)\n', (13915, 13948), False, 'import flow_vis\n'), ((5841, 5886), 'torch.tensor', 'torch.tensor', (['(I_W, I_H)'], {'dtype': 'torch.float32'}), '((I_W, I_H), dtype=torch.float32)\n', (5853, 5886), False, 'import torch\n'), ((8867, 8912), 'torch.tensor', 'torch.tensor', (['(I_W, I_H)'], {'dtype': 'torch.float32'}), '((I_W, I_H), dtype=torch.float32)\n', (8879, 8912), False, 'import torch\n'), ((9707, 9731), 'torch.round', 'torch.round', (['(ref * scale)'], {}), '(ref * scale)\n', (9718, 9731), False, 'import torch\n'), ((10964, 10990), 'numpy.sum', 'np.sum', (['masks'], {'axis': '(1, 2)'}), '(masks, axis=(1, 2))\n', (10970, 10990), True, 'import numpy as np\n'), ((12314, 12359), 'torch.tensor', 'torch.tensor', (['(I_W, I_H)'], {'dtype': 'torch.float32'}), '((I_W, I_H), dtype=torch.float32)\n', (12326, 12359), False, 'import torch\n'), ((13058, 13082), 'torch.round', 'torch.round', (['(ref * scale)'], {}), '(ref * scale)\n', (13069, 13082), False, 'import torch\n'), ((1859, 1912), 'torch.tensor', 'torch.tensor', (['(I_W - 1, I_H - 1)'], {'dtype': 'torch.float32'}), '((I_W - 1, I_H - 1), dtype=torch.float32)\n', (1871, 1912), False, 'import torch\n'), ((2402, 2435), 'torch.sum', 'torch.sum', (['(sparse_gt ** 2)'], {'dim': '(-1)'}), '(sparse_gt ** 2, dim=-1)\n', (2411, 2435), False, 'import torch\n'), ((16602, 16673), 'evaluate.validate_chairs', 'evaluate.validate_chairs', (['model.module'], {'logger': 'logger', 'iters': 'args.iters'}), '(model.module, logger=logger, iters=args.iters)\n', (16626, 16673), False, 'import evaluate\n'), ((16764, 16820), 'evaluate.validate_sintel', 'evaluate.validate_sintel', (['model.module'], {'iters': 'args.iters'}), '(model.module, iters=args.iters)\n', (16788, 16820), False, 'import evaluate\n'), ((16910, 16965), 'evaluate.validate_kitti', 'evaluate.validate_kitti', (['model.module'], {'iters': 'args.iters'}), '(model.module, iters=args.iters)\n', (16933, 16965), False, 'import evaluate\n'), ((15337, 15363), 'torch.randn', 'torch.randn', (['*image1.shape'], {}), '(*image1.shape)\n', (15348, 15363), False, 'import torch\n'), ((15432, 15458), 'torch.randn', 'torch.randn', (['*image2.shape'], {}), '(*image2.shape)\n', (15443, 15458), False, 'import torch\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
from evaluate import evaluate
from utils import get_data, tf_melspectogram
from shallow_nn import shallow_nn
from deep_nn import deep_nn
from shallow_nn_improve import shallow_nn as shallow_nn_improve
from deep_nn_improve import deep_nn as deep_nn_improve
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('epochs', 100,
'Number of mini-batches to train on. (default: %(default)d)')
tf.app.flags.DEFINE_integer('network', 0,
'Type of network to use, 0 for shallow, 1 for deep. (default: %(default)d)')
tf.app.flags.DEFINE_integer('improve', 0,
'Turn improvements on or off, 0 for off, 1 for improvements on. (default: %(default)d)')
tf.app.flags.DEFINE_float('decay', 0,
'Amount to decay learning rate. Greater than 0 enables decaying (default: %(default)d')
tf.app.flags.DEFINE_integer('log_frequency', 100,
'Number of steps between logging results to the console and saving summaries (default: %(default)d)')
tf.app.flags.DEFINE_integer('augment', 0,
'Use augmentation, 0 for off, 1 for on (default: %(default)d)')
tf.app.flags.DEFINE_integer('num_parallel_calls', 1,
'Number of cpu cores to use to preprocess data')
tf.app.flags.DEFINE_integer('save_model', 1000,
'Number of steps between model saves (default: %(default)d)')
tf.app.flags.DEFINE_integer('save_images', 0,
'Whether to save spectrogram images, 0 to not save, 1 to save. (default: %(default)d)')
# Optimisation hyperparameters
tf.app.flags.DEFINE_integer(
'batch_size', 16, 'Number of examples per mini-batch (default: %(default)d)')
tf.app.flags.DEFINE_float(
'learning_rate', 5e-5, 'Learning rate (default: %(default)d)')
tf.app.flags.DEFINE_integer(
"input_width", 80, "Input width (default: %(default)d)")
tf.app.flags.DEFINE_integer(
"input_height", 80, "Input height (default: %(default)d)")
tf.app.flags.DEFINE_integer(
"input_channels", 1, "Input channels (default: %(default)d)"
)
tf.app.flags.DEFINE_integer(
"num_classes", 10, "Number of classes (default: %(default)d)"
)
tf.app.flags.DEFINE_string(
"log_dir",
"{cwd}/logs/".format(cwd=os.getcwd()),
"Directory where to write event logs and checkpoint. (default: %(default)s)",
)
run_log_dir = os.path.join(FLAGS.log_dir, 'exp_lr_{learning_rate}_decay_{decay}_bs_{batch_size}_e_{epochs}_{network}_improve_{improve}_augment_{augment}'.format(
learning_rate=FLAGS.learning_rate, decay=FLAGS.decay, batch_size=FLAGS.batch_size, epochs=FLAGS.epochs, network='shallow' if (FLAGS.network == 0) else 'deep', improve=FLAGS.improve, augment=FLAGS.augment))
def model(iterator, is_training, nn):
next_x, next_y = iterator.get_next()
with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
y_out, img_summary = nn(next_x, is_training)
# Compute categorical loss
with tf.variable_scope("cross_entropy"):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=next_y, logits=y_out)
)
# L1 regularise
regularization_penalty = tf.losses.get_regularization_loss(
name="total_regularization_loss"
)
regularized_loss = cross_entropy + regularization_penalty
accuracy, accuracy_op = tf.metrics.accuracy(
tf.argmax(next_y, axis=1), tf.argmax(y_out, axis=1), name="accuracy_train")
return regularized_loss, img_summary, accuracy, accuracy_op
def calc_accuracy(iterator, is_training, nn):
next_x, next_y = iterator.get_next()
with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
y_out, _ = nn(next_x, is_training)
accuracy, accuracy_op = tf.metrics.accuracy(
tf.argmax(next_y, axis=1), tf.argmax(y_out, axis=1), name="accuracy_test")
return accuracy, accuracy_op
def accumulate_results(iterator, is_training, nn):
x, y, i = iterator.get_next()
with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
y_out, _ = nn(x, is_training)
return (x, y, y_out, i)
def _preprocess(features, label):
label = tf.one_hot(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8)
return features, label
def main(_):
(
train_set_data,
train_set_labels,
_,
test_set_data,
test_set_labels,
test_set_track_ids,
) = get_data()
print("Making TF graph")
start = time.time()
is_training_placeholder = tf.placeholder_with_default(False, shape=())
features_placeholder = tf.placeholder(
tf.float32, (None, np.shape(train_set_data)[1])
)
labels_placeholder = tf.placeholder(tf.uint8, (None))
track_ids_placeholder = tf.placeholder(tf.uint8, (None))
shuffle_buffer_size = len(train_set_data)
dataset = tf.data.Dataset.from_tensor_slices(
(features_placeholder, labels_placeholder)
)
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
_preprocess, FLAGS.batch_size, num_parallel_calls=FLAGS.num_parallel_calls)
)
dataset = dataset.prefetch(1)
train_iterator = dataset.make_initializable_iterator()
test_iterator = dataset.make_initializable_iterator()
eval_dataset = tf.data.Dataset.from_tensor_slices(
(features_placeholder, labels_placeholder, track_ids_placeholder)
)
eval_dataset = eval_dataset.map(
lambda features, label, track_id: (
features,
tf.one_hot(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8),
track_id,
)
)
eval_dataset = eval_dataset.batch(1)
eval_iterator = eval_dataset.make_initializable_iterator()
if (FLAGS.network == 0):
if (FLAGS.improve == 0):
print("Using Shallow network")
nn = shallow_nn
else:
print("Using Shallow Improved network")
nn = shallow_nn_improve
else:
if (FLAGS.improve == 0):
print("Using Deep Network")
nn = deep_nn
else:
print("Using Deep Improved Network")
nn = deep_nn_improve
loss, _, train_acc, train_acc_op = model(
train_iterator, is_training_placeholder, nn)
global_step = tf.Variable(0, trainable=False)
if (FLAGS.decay > 0):
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate, global_step, 15000, FLAGS.decay)
else:
learning_rate = FLAGS.learning_rate
# Adam Optimiser
# default values match that in paper
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimiser = tf.train.AdamOptimizer(
learning_rate, name="AdamOpt").minimize(loss, global_step=global_step)
validation_accuracy, acc_op = calc_accuracy(
test_iterator, is_training_placeholder, nn)
evaluator = accumulate_results(
eval_iterator, is_training_placeholder, nn)
loss_summary = tf.summary.scalar("Loss", loss)
acc_summary = tf.summary.scalar("Accuracy", validation_accuracy)
train_acc_summary = tf.summary.scalar("Accuracy", train_acc)
training_summary = tf.summary.merge([loss_summary, train_acc_summary])
validation_summary = tf.summary.merge([acc_summary])
# Isolate the variables stored behind the scenes by the metric operation
running_vars = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="accuracy_test")
train_running_vars = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="accuracy_train")
# Define initializer to initialize/reset running variables
running_vars_initializer = tf.variables_initializer(
var_list=running_vars)
train_running_vars_initializer = tf.variables_initializer(
var_list=train_running_vars)
end = time.time()
print("Time to prep TF ops: {:.2f}s".format(end - start))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.graph.finalize()
summary_writer = tf.summary.FileWriter(
run_log_dir + "_train", sess.graph)
summary_writer_validation = tf.summary.FileWriter(
run_log_dir + "_validate", sess.graph
)
for epoch in range(FLAGS.epochs):
sess.run(running_vars_initializer)
sess.run(train_running_vars_initializer)
sess.run(train_iterator.initializer, feed_dict={
features_placeholder: train_set_data, labels_placeholder: train_set_labels})
# Run until all samples done
while True:
try:
_, acc_train, summary_str = sess.run([optimiser, train_acc_op, training_summary], feed_dict={
is_training_placeholder: True})
except tf.errors.OutOfRangeError:
break
summary_writer.add_summary(summary_str, epoch)
sess.run(test_iterator.initializer, feed_dict={
features_placeholder: test_set_data, labels_placeholder: test_set_labels})
while True:
try:
acc, acc_summary_str = sess.run(
[acc_op, validation_summary])
except tf.errors.OutOfRangeError:
break
summary_writer_validation.add_summary(acc_summary_str, epoch)
print("Accuracy after epoch {} - Training: {:.2f}% Validation: {:.2f}%".format(
str(epoch), acc_train * 100.0, acc * 100.0))
sess.run(eval_iterator.initializer, feed_dict={
features_placeholder: test_set_data, labels_placeholder: test_set_labels, track_ids_placeholder: test_set_track_ids})
results = [None] * np.shape(test_set_data)[0]
count = 0
while True:
try:
evaluated = sess.run(evaluator)
results[count] = evaluated
count += 1
except tf.errors.OutOfRangeError:
break
raw_probability, maximum_probability, majority_vote = evaluate(results)
print("-----===== Summary =====-----")
print("Raw Probability: {:.2f}%".format(raw_probability * 100.0))
print("Maximum Probability: {:.2f}%".format(
maximum_probability * 100.0))
print("Majority Vote: {:.2f}%".format(majority_vote * 100))
if __name__ == "__main__":
tf.app.run(main=main)
|
[
"evaluate.evaluate"
] |
[((461, 569), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""epochs"""', '(100)', '"""Number of mini-batches to train on. (default: %(default)d)"""'], {}), "('epochs', 100,\n 'Number of mini-batches to train on. (default: %(default)d)')\n", (488, 569), True, 'import tensorflow as tf\n'), ((594, 721), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""network"""', '(0)', '"""Type of network to use, 0 for shallow, 1 for deep. (default: %(default)d)"""'], {}), "('network', 0,\n 'Type of network to use, 0 for shallow, 1 for deep. (default: %(default)d)'\n )\n", (621, 721), True, 'import tensorflow as tf\n'), ((741, 880), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""improve"""', '(0)', '"""Turn improvements on or off, 0 for off, 1 for improvements on. (default: %(default)d)"""'], {}), "('improve', 0,\n 'Turn improvements on or off, 0 for off, 1 for improvements on. (default: %(default)d)'\n )\n", (768, 880), True, 'import tensorflow as tf\n'), ((900, 1034), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""decay"""', '(0)', '"""Amount to decay learning rate. Greater than 0 enables decaying (default: %(default)d"""'], {}), "('decay', 0,\n 'Amount to decay learning rate. Greater than 0 enables decaying (default: %(default)d'\n )\n", (925, 1034), True, 'import tensorflow as tf\n'), ((1052, 1212), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""log_frequency"""', '(100)', '"""Number of steps between logging results to the console and saving summaries (default: %(default)d)"""'], {}), "('log_frequency', 100,\n 'Number of steps between logging results to the console and saving summaries (default: %(default)d)'\n )\n", (1079, 1212), True, 'import tensorflow as tf\n'), ((1232, 1341), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""augment"""', '(0)', '"""Use augmentation, 0 for off, 1 for on (default: %(default)d)"""'], {}), "('augment', 0,\n 'Use augmentation, 0 for off, 1 for on (default: %(default)d)')\n", (1259, 1341), True, 'import tensorflow as tf\n'), ((1366, 1471), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_parallel_calls"""', '(1)', '"""Number of cpu cores to use to preprocess data"""'], {}), "('num_parallel_calls', 1,\n 'Number of cpu cores to use to preprocess data')\n", (1393, 1471), True, 'import tensorflow as tf\n'), ((1496, 1609), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_model"""', '(1000)', '"""Number of steps between model saves (default: %(default)d)"""'], {}), "('save_model', 1000,\n 'Number of steps between model saves (default: %(default)d)')\n", (1523, 1609), True, 'import tensorflow as tf\n'), ((1634, 1776), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_images"""', '(0)', '"""Whether to save spectrogram images, 0 to not save, 1 to save. (default: %(default)d)"""'], {}), "('save_images', 0,\n 'Whether to save spectrogram images, 0 to not save, 1 to save. (default: %(default)d)'\n )\n", (1661, 1776), True, 'import tensorflow as tf\n'), ((1828, 1937), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(16)', '"""Number of examples per mini-batch (default: %(default)d)"""'], {}), "('batch_size', 16,\n 'Number of examples per mini-batch (default: %(default)d)')\n", (1855, 1937), True, 'import tensorflow as tf\n'), ((1939, 2032), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(5e-05)', '"""Learning rate (default: %(default)d)"""'], {}), "('learning_rate', 5e-05,\n 'Learning rate (default: %(default)d)')\n", (1964, 2032), True, 'import tensorflow as tf\n'), ((2033, 2121), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""input_width"""', '(80)', '"""Input width (default: %(default)d)"""'], {}), "('input_width', 80,\n 'Input width (default: %(default)d)')\n", (2060, 2121), True, 'import tensorflow as tf\n'), ((2123, 2213), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""input_height"""', '(80)', '"""Input height (default: %(default)d)"""'], {}), "('input_height', 80,\n 'Input height (default: %(default)d)')\n", (2150, 2213), True, 'import tensorflow as tf\n'), ((2215, 2308), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""input_channels"""', '(1)', '"""Input channels (default: %(default)d)"""'], {}), "('input_channels', 1,\n 'Input channels (default: %(default)d)')\n", (2242, 2308), True, 'import tensorflow as tf\n'), ((2311, 2405), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_classes"""', '(10)', '"""Number of classes (default: %(default)d)"""'], {}), "('num_classes', 10,\n 'Number of classes (default: %(default)d)')\n", (2338, 2405), True, 'import tensorflow as tf\n'), ((3423, 3490), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {'name': '"""total_regularization_loss"""'}), "(name='total_regularization_loss')\n", (3456, 3490), True, 'import tensorflow as tf\n'), ((4385, 4451), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'label', 'depth': 'FLAGS.num_classes', 'dtype': 'tf.uint8'}), '(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8)\n', (4395, 4451), True, 'import tensorflow as tf\n'), ((4646, 4656), 'utils.get_data', 'get_data', ([], {}), '()\n', (4654, 4656), False, 'from utils import get_data, tf_melspectogram\n'), ((4699, 4710), 'time.time', 'time.time', ([], {}), '()\n', (4708, 4710), False, 'import time\n'), ((4742, 4786), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)'], {'shape': '()'}), '(False, shape=())\n', (4769, 4786), True, 'import tensorflow as tf\n'), ((4918, 4948), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', 'None'], {}), '(tf.uint8, None)\n', (4932, 4948), True, 'import tensorflow as tf\n'), ((4979, 5009), 'tensorflow.placeholder', 'tf.placeholder', (['tf.uint8', 'None'], {}), '(tf.uint8, None)\n', (4993, 5009), True, 'import tensorflow as tf\n'), ((5073, 5151), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(features_placeholder, labels_placeholder)'], {}), '((features_placeholder, labels_placeholder))\n', (5107, 5151), True, 'import tensorflow as tf\n'), ((5568, 5673), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(features_placeholder, labels_placeholder, track_ids_placeholder)'], {}), '((features_placeholder,\n labels_placeholder, track_ids_placeholder))\n', (5602, 5673), True, 'import tensorflow as tf\n'), ((6568, 6599), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (6579, 6599), True, 'import tensorflow as tf\n'), ((6877, 6919), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (6894, 6919), True, 'import tensorflow as tf\n'), ((7304, 7335), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'loss'], {}), "('Loss', loss)\n", (7321, 7335), True, 'import tensorflow as tf\n'), ((7354, 7404), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'validation_accuracy'], {}), "('Accuracy', validation_accuracy)\n", (7371, 7404), True, 'import tensorflow as tf\n'), ((7429, 7469), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'train_acc'], {}), "('Accuracy', train_acc)\n", (7446, 7469), True, 'import tensorflow as tf\n'), ((7494, 7545), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_summary, train_acc_summary]'], {}), '([loss_summary, train_acc_summary])\n', (7510, 7545), True, 'import tensorflow as tf\n'), ((7571, 7602), 'tensorflow.summary.merge', 'tf.summary.merge', (['[acc_summary]'], {}), '([acc_summary])\n', (7587, 7602), True, 'import tensorflow as tf\n'), ((7700, 7770), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.LOCAL_VARIABLES'], {'scope': '"""accuracy_test"""'}), "(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_test')\n", (7717, 7770), True, 'import tensorflow as tf\n'), ((7805, 7876), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.LOCAL_VARIABLES'], {'scope': '"""accuracy_train"""'}), "(tf.GraphKeys.LOCAL_VARIABLES, scope='accuracy_train')\n", (7822, 7876), True, 'import tensorflow as tf\n'), ((7981, 8028), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'running_vars'}), '(var_list=running_vars)\n', (8005, 8028), True, 'import tensorflow as tf\n'), ((8075, 8128), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'train_running_vars'}), '(var_list=train_running_vars)\n', (8099, 8128), True, 'import tensorflow as tf\n'), ((8149, 8160), 'time.time', 'time.time', ([], {}), '()\n', (8158, 8160), False, 'import time\n'), ((10739, 10760), 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main'}), '(main=main)\n', (10749, 10760), True, 'import tensorflow as tf\n'), ((3043, 3090), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Model', reuse=tf.AUTO_REUSE)\n", (3060, 3090), True, 'import tensorflow as tf\n'), ((3186, 3220), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cross_entropy"""'], {}), "('cross_entropy')\n", (3203, 3220), True, 'import tensorflow as tf\n'), ((3625, 3650), 'tensorflow.argmax', 'tf.argmax', (['next_y'], {'axis': '(1)'}), '(next_y, axis=1)\n', (3634, 3650), True, 'import tensorflow as tf\n'), ((3652, 3676), 'tensorflow.argmax', 'tf.argmax', (['y_out'], {'axis': '(1)'}), '(y_out, axis=1)\n', (3661, 3676), True, 'import tensorflow as tf\n'), ((3865, 3912), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Model', reuse=tf.AUTO_REUSE)\n", (3882, 3912), True, 'import tensorflow as tf\n'), ((4015, 4040), 'tensorflow.argmax', 'tf.argmax', (['next_y'], {'axis': '(1)'}), '(next_y, axis=1)\n', (4024, 4040), True, 'import tensorflow as tf\n'), ((4042, 4066), 'tensorflow.argmax', 'tf.argmax', (['y_out'], {'axis': '(1)'}), '(y_out, axis=1)\n', (4051, 4066), True, 'import tensorflow as tf\n'), ((4221, 4268), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Model', reuse=tf.AUTO_REUSE)\n", (4238, 4268), True, 'import tensorflow as tf\n'), ((5266, 5380), 'tensorflow.data.experimental.map_and_batch', 'tf.data.experimental.map_and_batch', (['_preprocess', 'FLAGS.batch_size'], {'num_parallel_calls': 'FLAGS.num_parallel_calls'}), '(_preprocess, FLAGS.batch_size,\n num_parallel_calls=FLAGS.num_parallel_calls)\n', (5300, 5380), True, 'import tensorflow as tf\n'), ((6650, 6735), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['FLAGS.learning_rate', 'global_step', '(15000)', 'FLAGS.decay'], {}), '(FLAGS.learning_rate, global_step, 15000, FLAGS.decay\n )\n', (6676, 6735), True, 'import tensorflow as tf\n'), ((6929, 6964), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (6952, 6964), True, 'import tensorflow as tf\n'), ((8233, 8245), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8243, 8245), True, 'import tensorflow as tf\n'), ((8363, 8420), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(run_log_dir + '_train')", 'sess.graph'], {}), "(run_log_dir + '_train', sess.graph)\n", (8384, 8420), True, 'import tensorflow as tf\n'), ((8470, 8530), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(run_log_dir + '_validate')", 'sess.graph'], {}), "(run_log_dir + '_validate', sess.graph)\n", (8491, 8530), True, 'import tensorflow as tf\n'), ((10403, 10420), 'evaluate.evaluate', 'evaluate', (['results'], {}), '(results)\n', (10411, 10420), False, 'from evaluate import evaluate\n'), ((2480, 2491), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2489, 2491), False, 'import os\n'), ((3274, 3345), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'next_y', 'logits': 'y_out'}), '(labels=next_y, logits=y_out)\n', (3316, 3345), True, 'import tensorflow as tf\n'), ((8272, 8305), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8303, 8305), True, 'import tensorflow as tf\n'), ((4858, 4882), 'numpy.shape', 'np.shape', (['train_set_data'], {}), '(train_set_data)\n', (4866, 4882), True, 'import numpy as np\n'), ((5799, 5865), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'label', 'depth': 'FLAGS.num_classes', 'dtype': 'tf.uint8'}), '(indices=label, depth=FLAGS.num_classes, dtype=tf.uint8)\n', (5809, 5865), True, 'import tensorflow as tf\n'), ((6986, 7039), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {'name': '"""AdamOpt"""'}), "(learning_rate, name='AdamOpt')\n", (7008, 7039), True, 'import tensorflow as tf\n'), ((10071, 10094), 'numpy.shape', 'np.shape', (['test_set_data'], {}), '(test_set_data)\n', (10079, 10094), True, 'import numpy as np\n')]
|
from implicit_neural_networks import IMLP
import torch
import torch.optim as optim
import numpy as np
from evaluate import evaluate_model
from datetime import datetime
from loss_utils import get_gradient_loss, get_rigidity_loss, \
get_optical_flow_loss, get_optical_flow_alpha_loss
from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow
import sys
from torch.utils.tensorboard import SummaryWriter
import logging
import json
from pathlib import Path
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(config):
maximum_number_of_frames = config["maximum_number_of_frames"]
resx = np.int64(config["resx"])
resy = np.int64(config["resy"])
iters_num = config["iters_num"]
#batch size:
samples = config["samples_batch"]
# evaluation frequency (in terms of iterations number)
evaluate_every = np.int64(config["evaluate_every"])
# optionally it is possible to load a checkpoint
load_checkpoint = config["load_checkpoint"] # set to true to continue from a checkpoint
checkpoint_path = config["checkpoint_path"]
# a data folder that contains folders named "[video_name]","[video_name]_flow","[video_name]_maskrcnn"
data_folder = Path(config["data_folder"])
results_folder_name = config["results_folder_name"] # the folder (under the code's folder where the experiments will be saved.
add_to_experiment_folder_name = config["add_to_experiment_folder_name"] # for each experiment folder (saved inside "results_folder_name") add this string
# boolean variables for determining if a pretraining is used:
pretrain_mapping1 = config["pretrain_mapping1"]
pretrain_mapping2 = config["pretrain_mapping2"]
pretrain_iter_number = config["pretrain_iter_number"]
# the scale of the atlas uv coordinates relative to frame's xy coordinates
uv_mapping_scale = config["uv_mapping_scale"]
# M_\alpha's hyper parameters:
positional_encoding_num_alpha = config["positional_encoding_num_alpha"]
number_of_channels_alpha = config["number_of_channels_alpha"]
number_of_layers_alpha = config["number_of_layers_alpha"]
# M_f's hyper parameters
use_positional_encoding_mapping1 = config["use_positional_encoding_mapping1"]
number_of_positional_encoding_mapping1 = config["number_of_positional_encoding_mapping1"]
number_of_layers_mapping1 = config["number_of_layers_mapping1"]
number_of_channels_mapping1 = config["number_of_channels_mapping1"]
# M_b's hyper parameters
use_positional_encoding_mapping2 = config["use_positional_encoding_mapping2"]
number_of_positional_encoding_mapping2 = config["number_of_positional_encoding_mapping2"]
number_of_layers_mapping2 = config["number_of_layers_mapping2"]
number_of_channels_mapping2 = config["number_of_channels_mapping2"]
# Atlas MLP's hyper parameters
number_of_channels_atlas = config["number_of_channels_atlas"]
number_of_layers_atlas = config["number_of_layers_atlas"]
positional_encoding_num_atlas = config[
"positional_encoding_num_atlas"]
# bootstrapping configuration:
alpha_bootstrapping_factor = config["alpha_bootstrapping_factor"]
stop_bootstrapping_iteration = config["stop_bootstrapping_iteration"]
# coefficients for the different loss terms
rgb_coeff = config["rgb_coeff"] # coefficient for rgb loss term:
alpha_flow_factor = config["alpha_flow_factor"]
sparsity_coeff = config["sparsity_coeff"]
# optical flow loss term coefficient (beta_f in the paper):
optical_flow_coeff = config["optical_flow_coeff"]
use_gradient_loss = config["use_gradient_loss"]
gradient_loss_coeff = config["gradient_loss_coeff"]
rigidity_coeff = config["rigidity_coeff"] # coefficient for the rigidity loss term
derivative_amount = config["derivative_amount"] # For finite differences gradient computation:
# for using global (in addition to the current local) rigidity loss:
include_global_rigidity_loss = config["include_global_rigidity_loss"]
# Finite differences parameters for the global rigidity terms:
global_rigidity_derivative_amount_fg = config["global_rigidity_derivative_amount_fg"]
global_rigidity_derivative_amount_bg = config["global_rigidity_derivative_amount_bg"]
global_rigidity_coeff_fg = config["global_rigidity_coeff_fg"]
global_rigidity_coeff_bg = config["global_rigidity_coeff_bg"]
stop_global_rigidity = config["stop_global_rigidity"]
use_optical_flow = True
vid_name = data_folder.name
vid_root = data_folder.parent
results_folder = Path(
f'./{results_folder_name}/{vid_name}_{datetime.utcnow().strftime("%m_%d_%Y__%H_%M_%S_%f")}{add_to_experiment_folder_name}')
results_folder.mkdir(parents=True, exist_ok=True)
with open('%s/config.json' % results_folder, 'w') as json_file:
json.dump(config, json_file, indent=4)
logging.basicConfig(
filename='%s/log.log' % results_folder,
level=logging.INFO,
format='%(asctime)s %(message)s')
logging.info('Started')
writer = SummaryWriter(log_dir=str(results_folder))
optical_flows_mask, video_frames, optical_flows_reverse_mask, mask_frames, video_frames_dx, video_frames_dy, optical_flows_reverse, optical_flows = load_input_data(
resy, resx, maximum_number_of_frames, data_folder, True, True, vid_root, vid_name)
number_of_frames=video_frames.shape[3]
# save a video showing the masked part of the forward optical flow:s
save_mask_flow(optical_flows_mask, video_frames, results_folder)
model_F_mapping1 = IMLP(
input_dim=3,
output_dim=2,
hidden_dim=number_of_channels_mapping1,
use_positional=use_positional_encoding_mapping1,
positional_dim=number_of_positional_encoding_mapping1,
num_layers=number_of_layers_mapping1,
skip_layers=[]).to(device)
model_F_mapping2 = IMLP(
input_dim=3,
output_dim=2,
hidden_dim=number_of_channels_mapping2,
use_positional=use_positional_encoding_mapping2,
positional_dim=number_of_positional_encoding_mapping2,
num_layers=number_of_layers_mapping2,
skip_layers=[]).to(device)
model_F_atlas = IMLP(
input_dim=2,
output_dim=3,
hidden_dim=number_of_channels_atlas,
use_positional=True,
positional_dim=positional_encoding_num_atlas,
num_layers=number_of_layers_atlas,
skip_layers=[4, 7]).to(device)
model_alpha = IMLP(
input_dim=3,
output_dim=1,
hidden_dim=number_of_channels_alpha,
use_positional=True,
positional_dim=positional_encoding_num_alpha,
num_layers=number_of_layers_alpha,
skip_layers=[]).to(device)
start_iteration = 0
optimizer_all = optim.Adam(
[{'params': list(model_F_mapping1.parameters())},
{'params': list(model_F_mapping2.parameters())},
{'params': list(model_alpha.parameters())},
{'params': list(model_F_atlas.parameters())}], lr=0.0001)
larger_dim = np.maximum(resx, resy)
if not load_checkpoint:
if pretrain_mapping1:
model_F_mapping1 = pre_train_mapping(model_F_mapping1, number_of_frames, uv_mapping_scale, resx=resx, resy=resy,
larger_dim=larger_dim,device=device, pretrain_iters=pretrain_iter_number)
if pretrain_mapping2:
model_F_mapping2 = pre_train_mapping(model_F_mapping2, number_of_frames, uv_mapping_scale, resx=resx, resy=resy,
larger_dim=larger_dim, device=device,pretrain_iters=pretrain_iter_number)
else:
init_file = torch.load(checkpoint_path)
model_F_atlas.load_state_dict(init_file["F_atlas_state_dict"])
model_F_mapping1.load_state_dict(init_file["model_F_mapping1_state_dict"])
model_F_mapping2.load_state_dict(init_file["model_F_mapping2_state_dict"])
model_alpha.load_state_dict(init_file["model_F_alpha_state_dict"])
optimizer_all.load_state_dict(init_file["optimizer_all_state_dict"])
start_iteration = init_file["iteration"]
jif_all = get_tuples(number_of_frames, video_frames)
# Start training!
for i in range(start_iteration, iters_num):
if i > stop_bootstrapping_iteration:
alpha_bootstrapping_factor = 0
if i > stop_global_rigidity:
global_rigidity_coeff_fg = 0
global_rigidity_coeff_bg = 0
print(i)
logging.info('Iteration %d' % i)
# randomly choose indices for the current batch
inds_foreground = torch.randint(jif_all.shape[1],
(np.int64(samples * 1.0), 1))
jif_current = jif_all[:, inds_foreground] # size (3, batch, 1)
rgb_current = video_frames[jif_current[1, :], jif_current[0, :], :,
jif_current[2, :]].squeeze(1).to(device)
# the correct alpha according to the precomputed maskrcnn
alpha_maskrcnn = mask_frames[jif_current[1, :], jif_current[0, :],
jif_current[2, :]].squeeze(1).to(device).unsqueeze(-1)
# normalize coordinates to be in [-1,1]
xyt_current = torch.cat(
(jif_current[0, :] / (larger_dim / 2) - 1, jif_current[1, :] / (larger_dim / 2) - 1,
jif_current[2, :] / (number_of_frames / 2.0) - 1),
dim=1).to(device) # size (batch, 3)
# get the atlas UV coordinates from the two mapping networks;
uv_foreground1 = model_F_mapping1(xyt_current)
uv_foreground2 = model_F_mapping2(xyt_current)
# map tanh output of the alpha network to the range (0,1) :
alpha = 0.5 * (model_alpha(xyt_current) + 1.0)
# prevent a situation of alpha=0, or alpha=1 (for the BCE loss that uses log(alpha),log(1-alpha) below)
alpha = alpha * 0.99
alpha = alpha + 0.001
# Sample atlas values. Foreground colors are sampled from [0,1]x[0,1] and background colors are sampled from [-1,0]x[-1,0]
# Note that the original [u,v] coorinates are in [-1,1]x[-1,1] for both networks
rgb_output1 = (model_F_atlas(uv_foreground1 * 0.5 + 0.5) + 1.0) * 0.5
rgb_output2 = (model_F_atlas(
uv_foreground2 * 0.5 - 0.5) + 1.0) * 0.5
# Reconstruct final colors from the two layers (using alpha)
rgb_output_foreground = rgb_output1 * alpha + rgb_output2 * (1.0 - alpha)
if use_gradient_loss:
gradient_loss = get_gradient_loss(video_frames_dx, video_frames_dy, jif_current,
model_F_mapping1, model_F_mapping2, model_F_atlas,
rgb_output_foreground,device,resx,number_of_frames,model_alpha)
else:
gradient_loss = 0.0
print("gradient_loss:")
print(gradient_loss)
rgb_output_foreground_not = rgb_output1 * (1.0 - alpha)
rgb_loss = (torch.norm(rgb_output_foreground - rgb_current, dim=1) ** 2).mean()
rgb_loss_sparsity = (torch.norm(rgb_output_foreground_not, dim=1) ** 2).mean()
rigidity_loss1 = get_rigidity_loss(
jif_current,
derivative_amount,
larger_dim,
number_of_frames,
model_F_mapping1,
uv_foreground1,device,
uv_mapping_scale=uv_mapping_scale)
rigidity_loss2 = get_rigidity_loss(
jif_current,
derivative_amount,
larger_dim,
number_of_frames,
model_F_mapping2,
uv_foreground2,device,
uv_mapping_scale=uv_mapping_scale)
if include_global_rigidity_loss and i <= stop_global_rigidity:
global_rigidity_loss1 = get_rigidity_loss(
jif_current,
global_rigidity_derivative_amount_fg,
larger_dim,
number_of_frames,
model_F_mapping1,
uv_foreground1,device,
uv_mapping_scale=uv_mapping_scale)
global_rigidity_loss2 = get_rigidity_loss(
jif_current,
global_rigidity_derivative_amount_bg,
larger_dim,
number_of_frames,
model_F_mapping2,
uv_foreground2,device,
uv_mapping_scale=uv_mapping_scale)
flow_loss1 = get_optical_flow_loss(
jif_current, uv_foreground1, optical_flows_reverse, optical_flows_reverse_mask, larger_dim,
number_of_frames, model_F_mapping1, optical_flows, optical_flows_mask, uv_mapping_scale,device, use_alpha=True,
alpha=alpha)
flow_loss2 = get_optical_flow_loss(
jif_current, uv_foreground2, optical_flows_reverse, optical_flows_reverse_mask, larger_dim,
number_of_frames, model_F_mapping2, optical_flows, optical_flows_mask, uv_mapping_scale,device, use_alpha=True,
alpha=1 - alpha)
flow_alpha_loss = get_optical_flow_alpha_loss(model_alpha,
jif_current, alpha, optical_flows_reverse,
optical_flows_reverse_mask, larger_dim,
number_of_frames, optical_flows,
optical_flows_mask, device)
print("flow alpha loss:")
print(flow_alpha_loss)
alpha_bootstrapping_loss = torch.mean(
-alpha_maskrcnn * torch.log(alpha) - (1 - alpha_maskrcnn) * torch.log(1 - alpha))
print("alpha_balancing_loss")
print(alpha_bootstrapping_loss)
if include_global_rigidity_loss and i <= stop_global_rigidity:
loss = rigidity_coeff * (
rigidity_loss1 + rigidity_loss2) + global_rigidity_coeff_fg * global_rigidity_loss1 + global_rigidity_coeff_bg * global_rigidity_loss2 + \
rgb_loss * rgb_coeff + optical_flow_coeff * (
flow_loss1 + flow_loss2) + alpha_bootstrapping_loss * alpha_bootstrapping_factor + flow_alpha_loss * alpha_flow_factor + rgb_loss_sparsity * sparsity_coeff + gradient_loss * gradient_loss_coeff
else:
loss = rigidity_coeff * (rigidity_loss1 + rigidity_loss2) + rgb_loss * rgb_coeff + optical_flow_coeff * (
flow_loss1 + flow_loss2) + alpha_bootstrapping_loss * alpha_bootstrapping_factor + flow_alpha_loss * alpha_flow_factor + rgb_loss_sparsity * sparsity_coeff + gradient_loss * gradient_loss_coeff
optimizer_all.zero_grad()
loss.backward()
optimizer_all.step()
try:
if use_optical_flow:
print("of_loss1:%f" % flow_loss1.detach())
print("of_loss2:%f" % flow_loss2.detach())
logging.info("of_loss1:%f" % flow_loss1.detach())
writer.add_scalar('Loss/train_of1', flow_loss1.detach(), i)
logging.info("of_loss2:%f" % flow_loss2.detach())
writer.add_scalar('Loss/train_of2', flow_loss2.detach(), i)
except:
pass
logging.info("flow_alpha_loss: %f", flow_alpha_loss.detach())
logging.info("rgb_loss:%f" % rgb_loss.detach())
logging.info("total_loss:%f" % loss.detach())
logging.info("rigidity_loss1:%f" % rigidity_loss1.detach())
logging.info("rigidity_loss2:%f" % rigidity_loss2.detach())
logging.info('rgb_loss_negative %f' % rgb_loss_sparsity.detach())
logging.info('-------------------------------')
print("rgb_loss:%f" % rgb_loss.detach())
print('rgb_loss_negative %f' % rgb_loss_sparsity.detach())
print("total_loss:%f" % loss.detach())
print("rigidity_loss1:%f" % rigidity_loss1.detach())
print("rigidity_loss2:%f" % rigidity_loss2.detach())
print("alpha_mean:%f" % alpha.mean().detach())
logging.info("alpha_mean:%f" % alpha.mean().detach())
print("alpha_mean_1:%f" % alpha[alpha > 0.5].mean().detach())
logging.info("alpha_mean_1:%f" % alpha[alpha > 0.5].mean().detach())
print("alpha_mean_0:%f" % alpha[alpha < 0.5].mean().detach())
logging.info("alpha_mean_0:%f" % alpha[alpha < 0.5].mean().detach())
print(f'------------{results_folder.name}------------------')
writer.add_scalar('Loss/alpha_mean', alpha.mean().detach(), i)
writer.add_scalar('Loss/rgb_loss', rgb_loss.detach(), i)
writer.add_scalar('Loss/rigidity_loss1', rigidity_loss1.detach(), i)
writer.add_scalar('Loss/rigidity_loss2', rigidity_loss2.detach(), i)
try:
# render and evaluate videos every N iterations
if i % evaluate_every == 0 and i > start_iteration:
evaluate_model(model_F_atlas, resx, resy, number_of_frames, model_F_mapping1,
model_F_mapping2, model_alpha,
video_frames, results_folder, i, mask_frames, optimizer_all,
writer, vid_name, derivative_amount, uv_mapping_scale,
optical_flows,
optical_flows_mask,device)
rgb_img = video_frames[:, :, :, 0].numpy()
writer.add_image('Input/rgb_0', rgb_img, i, dataformats='HWC')
model_F_atlas.train()
model_F_mapping1.train()
model_F_mapping2.train()
model_alpha.train()
except Exception:
pass
if __name__ == "__main__":
with open(sys.argv[1]) as f:
main(json.load(f))
|
[
"evaluate.evaluate_model"
] |
[((659, 683), 'numpy.int64', 'np.int64', (["config['resx']"], {}), "(config['resx'])\n", (667, 683), True, 'import numpy as np\n'), ((695, 719), 'numpy.int64', 'np.int64', (["config['resy']"], {}), "(config['resy'])\n", (703, 719), True, 'import numpy as np\n'), ((893, 927), 'numpy.int64', 'np.int64', (["config['evaluate_every']"], {}), "(config['evaluate_every'])\n", (901, 927), True, 'import numpy as np\n'), ((1248, 1275), 'pathlib.Path', 'Path', (["config['data_folder']"], {}), "(config['data_folder'])\n", (1252, 1275), False, 'from pathlib import Path\n'), ((4931, 5049), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "('%s/log.log' % results_folder)", 'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(filename='%s/log.log' % results_folder, level=logging.\n INFO, format='%(asctime)s %(message)s')\n", (4950, 5049), False, 'import logging\n'), ((5074, 5097), 'logging.info', 'logging.info', (['"""Started"""'], {}), "('Started')\n", (5086, 5097), False, 'import logging\n'), ((5306, 5409), 'unwrap_utils.load_input_data', 'load_input_data', (['resy', 'resx', 'maximum_number_of_frames', 'data_folder', '(True)', '(True)', 'vid_root', 'vid_name'], {}), '(resy, resx, maximum_number_of_frames, data_folder, True, \n True, vid_root, vid_name)\n', (5321, 5409), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((5535, 5599), 'unwrap_utils.save_mask_flow', 'save_mask_flow', (['optical_flows_mask', 'video_frames', 'results_folder'], {}), '(optical_flows_mask, video_frames, results_folder)\n', (5549, 5599), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((7110, 7132), 'numpy.maximum', 'np.maximum', (['resx', 'resy'], {}), '(resx, resy)\n', (7120, 7132), True, 'import numpy as np\n'), ((8228, 8270), 'unwrap_utils.get_tuples', 'get_tuples', (['number_of_frames', 'video_frames'], {}), '(number_of_frames, video_frames)\n', (8238, 8270), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((524, 549), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (547, 549), False, 'import torch\n'), ((4888, 4926), 'json.dump', 'json.dump', (['config', 'json_file'], {'indent': '(4)'}), '(config, json_file, indent=4)\n', (4897, 4926), False, 'import json\n'), ((7747, 7774), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (7757, 7774), False, 'import torch\n'), ((8575, 8607), 'logging.info', 'logging.info', (["('Iteration %d' % i)"], {}), "('Iteration %d' % i)\n", (8587, 8607), False, 'import logging\n'), ((11256, 11420), 'loss_utils.get_rigidity_loss', 'get_rigidity_loss', (['jif_current', 'derivative_amount', 'larger_dim', 'number_of_frames', 'model_F_mapping1', 'uv_foreground1', 'device'], {'uv_mapping_scale': 'uv_mapping_scale'}), '(jif_current, derivative_amount, larger_dim,\n number_of_frames, model_F_mapping1, uv_foreground1, device,\n uv_mapping_scale=uv_mapping_scale)\n', (11273, 11420), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((11522, 11686), 'loss_utils.get_rigidity_loss', 'get_rigidity_loss', (['jif_current', 'derivative_amount', 'larger_dim', 'number_of_frames', 'model_F_mapping2', 'uv_foreground2', 'device'], {'uv_mapping_scale': 'uv_mapping_scale'}), '(jif_current, derivative_amount, larger_dim,\n number_of_frames, model_F_mapping2, uv_foreground2, device,\n uv_mapping_scale=uv_mapping_scale)\n', (11539, 11686), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((12505, 12756), 'loss_utils.get_optical_flow_loss', 'get_optical_flow_loss', (['jif_current', 'uv_foreground1', 'optical_flows_reverse', 'optical_flows_reverse_mask', 'larger_dim', 'number_of_frames', 'model_F_mapping1', 'optical_flows', 'optical_flows_mask', 'uv_mapping_scale', 'device'], {'use_alpha': '(True)', 'alpha': 'alpha'}), '(jif_current, uv_foreground1, optical_flows_reverse,\n optical_flows_reverse_mask, larger_dim, number_of_frames,\n model_F_mapping1, optical_flows, optical_flows_mask, uv_mapping_scale,\n device, use_alpha=True, alpha=alpha)\n', (12526, 12756), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((12803, 13058), 'loss_utils.get_optical_flow_loss', 'get_optical_flow_loss', (['jif_current', 'uv_foreground2', 'optical_flows_reverse', 'optical_flows_reverse_mask', 'larger_dim', 'number_of_frames', 'model_F_mapping2', 'optical_flows', 'optical_flows_mask', 'uv_mapping_scale', 'device'], {'use_alpha': '(True)', 'alpha': '(1 - alpha)'}), '(jif_current, uv_foreground2, optical_flows_reverse,\n optical_flows_reverse_mask, larger_dim, number_of_frames,\n model_F_mapping2, optical_flows, optical_flows_mask, uv_mapping_scale,\n device, use_alpha=True, alpha=1 - alpha)\n', (12824, 13058), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((13110, 13302), 'loss_utils.get_optical_flow_alpha_loss', 'get_optical_flow_alpha_loss', (['model_alpha', 'jif_current', 'alpha', 'optical_flows_reverse', 'optical_flows_reverse_mask', 'larger_dim', 'number_of_frames', 'optical_flows', 'optical_flows_mask', 'device'], {}), '(model_alpha, jif_current, alpha,\n optical_flows_reverse, optical_flows_reverse_mask, larger_dim,\n number_of_frames, optical_flows, optical_flows_mask, device)\n', (13137, 13302), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((15681, 15728), 'logging.info', 'logging.info', (['"""-------------------------------"""'], {}), "('-------------------------------')\n", (15693, 15728), False, 'import logging\n'), ((5624, 5867), 'implicit_neural_networks.IMLP', 'IMLP', ([], {'input_dim': '(3)', 'output_dim': '(2)', 'hidden_dim': 'number_of_channels_mapping1', 'use_positional': 'use_positional_encoding_mapping1', 'positional_dim': 'number_of_positional_encoding_mapping1', 'num_layers': 'number_of_layers_mapping1', 'skip_layers': '[]'}), '(input_dim=3, output_dim=2, hidden_dim=number_of_channels_mapping1,\n use_positional=use_positional_encoding_mapping1, positional_dim=\n number_of_positional_encoding_mapping1, num_layers=\n number_of_layers_mapping1, skip_layers=[])\n', (5628, 5867), False, 'from implicit_neural_networks import IMLP\n'), ((5946, 6189), 'implicit_neural_networks.IMLP', 'IMLP', ([], {'input_dim': '(3)', 'output_dim': '(2)', 'hidden_dim': 'number_of_channels_mapping2', 'use_positional': 'use_positional_encoding_mapping2', 'positional_dim': 'number_of_positional_encoding_mapping2', 'num_layers': 'number_of_layers_mapping2', 'skip_layers': '[]'}), '(input_dim=3, output_dim=2, hidden_dim=number_of_channels_mapping2,\n use_positional=use_positional_encoding_mapping2, positional_dim=\n number_of_positional_encoding_mapping2, num_layers=\n number_of_layers_mapping2, skip_layers=[])\n', (5950, 6189), False, 'from implicit_neural_networks import IMLP\n'), ((6265, 6463), 'implicit_neural_networks.IMLP', 'IMLP', ([], {'input_dim': '(2)', 'output_dim': '(3)', 'hidden_dim': 'number_of_channels_atlas', 'use_positional': '(True)', 'positional_dim': 'positional_encoding_num_atlas', 'num_layers': 'number_of_layers_atlas', 'skip_layers': '[4, 7]'}), '(input_dim=2, output_dim=3, hidden_dim=number_of_channels_atlas,\n use_positional=True, positional_dim=positional_encoding_num_atlas,\n num_layers=number_of_layers_atlas, skip_layers=[4, 7])\n', (6269, 6463), False, 'from implicit_neural_networks import IMLP\n'), ((6543, 6737), 'implicit_neural_networks.IMLP', 'IMLP', ([], {'input_dim': '(3)', 'output_dim': '(1)', 'hidden_dim': 'number_of_channels_alpha', 'use_positional': '(True)', 'positional_dim': 'positional_encoding_num_alpha', 'num_layers': 'number_of_layers_alpha', 'skip_layers': '[]'}), '(input_dim=3, output_dim=1, hidden_dim=number_of_channels_alpha,\n use_positional=True, positional_dim=positional_encoding_num_alpha,\n num_layers=number_of_layers_alpha, skip_layers=[])\n', (6547, 6737), False, 'from implicit_neural_networks import IMLP\n'), ((7222, 7398), 'unwrap_utils.pre_train_mapping', 'pre_train_mapping', (['model_F_mapping1', 'number_of_frames', 'uv_mapping_scale'], {'resx': 'resx', 'resy': 'resy', 'larger_dim': 'larger_dim', 'device': 'device', 'pretrain_iters': 'pretrain_iter_number'}), '(model_F_mapping1, number_of_frames, uv_mapping_scale,\n resx=resx, resy=resy, larger_dim=larger_dim, device=device,\n pretrain_iters=pretrain_iter_number)\n', (7239, 7398), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((7500, 7676), 'unwrap_utils.pre_train_mapping', 'pre_train_mapping', (['model_F_mapping2', 'number_of_frames', 'uv_mapping_scale'], {'resx': 'resx', 'resy': 'resy', 'larger_dim': 'larger_dim', 'device': 'device', 'pretrain_iters': 'pretrain_iter_number'}), '(model_F_mapping2, number_of_frames, uv_mapping_scale,\n resx=resx, resy=resy, larger_dim=larger_dim, device=device,\n pretrain_iters=pretrain_iter_number)\n', (7517, 7676), False, 'from unwrap_utils import get_tuples, pre_train_mapping, load_input_data, save_mask_flow\n'), ((10608, 10799), 'loss_utils.get_gradient_loss', 'get_gradient_loss', (['video_frames_dx', 'video_frames_dy', 'jif_current', 'model_F_mapping1', 'model_F_mapping2', 'model_F_atlas', 'rgb_output_foreground', 'device', 'resx', 'number_of_frames', 'model_alpha'], {}), '(video_frames_dx, video_frames_dy, jif_current,\n model_F_mapping1, model_F_mapping2, model_F_atlas,\n rgb_output_foreground, device, resx, number_of_frames, model_alpha)\n', (10625, 10799), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((11871, 12054), 'loss_utils.get_rigidity_loss', 'get_rigidity_loss', (['jif_current', 'global_rigidity_derivative_amount_fg', 'larger_dim', 'number_of_frames', 'model_F_mapping1', 'uv_foreground1', 'device'], {'uv_mapping_scale': 'uv_mapping_scale'}), '(jif_current, global_rigidity_derivative_amount_fg,\n larger_dim, number_of_frames, model_F_mapping1, uv_foreground1, device,\n uv_mapping_scale=uv_mapping_scale)\n', (11888, 12054), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((12195, 12378), 'loss_utils.get_rigidity_loss', 'get_rigidity_loss', (['jif_current', 'global_rigidity_derivative_amount_bg', 'larger_dim', 'number_of_frames', 'model_F_mapping2', 'uv_foreground2', 'device'], {'uv_mapping_scale': 'uv_mapping_scale'}), '(jif_current, global_rigidity_derivative_amount_bg,\n larger_dim, number_of_frames, model_F_mapping2, uv_foreground2, device,\n uv_mapping_scale=uv_mapping_scale)\n', (12212, 12378), False, 'from loss_utils import get_gradient_loss, get_rigidity_loss, get_optical_flow_loss, get_optical_flow_alpha_loss\n'), ((17837, 17849), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17846, 17849), False, 'import json\n'), ((8764, 8787), 'numpy.int64', 'np.int64', (['(samples * 1.0)'], {}), '(samples * 1.0)\n', (8772, 8787), True, 'import numpy as np\n'), ((9311, 9472), 'torch.cat', 'torch.cat', (['(jif_current[0, :] / (larger_dim / 2) - 1, jif_current[1, :] / (larger_dim /\n 2) - 1, jif_current[2, :] / (number_of_frames / 2.0) - 1)'], {'dim': '(1)'}), '((jif_current[0, :] / (larger_dim / 2) - 1, jif_current[1, :] / (\n larger_dim / 2) - 1, jif_current[2, :] / (number_of_frames / 2.0) - 1),\n dim=1)\n', (9320, 9472), False, 'import torch\n'), ((16941, 17224), 'evaluate.evaluate_model', 'evaluate_model', (['model_F_atlas', 'resx', 'resy', 'number_of_frames', 'model_F_mapping1', 'model_F_mapping2', 'model_alpha', 'video_frames', 'results_folder', 'i', 'mask_frames', 'optimizer_all', 'writer', 'vid_name', 'derivative_amount', 'uv_mapping_scale', 'optical_flows', 'optical_flows_mask', 'device'], {}), '(model_F_atlas, resx, resy, number_of_frames,\n model_F_mapping1, model_F_mapping2, model_alpha, video_frames,\n results_folder, i, mask_frames, optimizer_all, writer, vid_name,\n derivative_amount, uv_mapping_scale, optical_flows, optical_flows_mask,\n device)\n', (16955, 17224), False, 'from evaluate import evaluate_model\n'), ((11074, 11128), 'torch.norm', 'torch.norm', (['(rgb_output_foreground - rgb_current)'], {'dim': '(1)'}), '(rgb_output_foreground - rgb_current, dim=1)\n', (11084, 11128), False, 'import torch\n'), ((11172, 11216), 'torch.norm', 'torch.norm', (['rgb_output_foreground_not'], {'dim': '(1)'}), '(rgb_output_foreground_not, dim=1)\n', (11182, 11216), False, 'import torch\n'), ((13655, 13671), 'torch.log', 'torch.log', (['alpha'], {}), '(alpha)\n', (13664, 13671), False, 'import torch\n'), ((13697, 13717), 'torch.log', 'torch.log', (['(1 - alpha)'], {}), '(1 - alpha)\n', (13706, 13717), False, 'import torch\n'), ((4671, 4688), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4686, 4688), False, 'from datetime import datetime\n')]
|
'''Train CIFAR10/100 with PyTorch using standard Contrastive Learning. This script tunes the L2 reg weight of the
final classifier.'''
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import math
import os
import argparse
from models import *
from configs import get_datasets
from evaluate import train_clf, encode_feature_averaging
parser = argparse.ArgumentParser(description='Final evaluation with feature averaging.')
parser.add_argument("--num-workers", type=int, default=2, help='Number of threads for data loaders')
parser.add_argument("--load-from", type=str, default='ckpt.pth', help='File to load from')
parser.add_argument("--num-passes", type=int, default=10, help='Number of passes to average')
parser.add_argument("--reg-lower", type=float, default=-7, help='Minimum log regularization parameter (base 10)')
parser.add_argument("--reg-upper", type=float, default=-3, help='Maximum log regularization parameter (base 10)')
parser.add_argument("--num-steps", type=int, default=10, help='Number of log-linearly spaced reg parameters to try')
args = parser.parse_args()
# Load checkpoint.
print('==> Loading settings from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
resume_from = os.path.join('./checkpoint', args.load_from)
checkpoint = torch.load(resume_from)
args.dataset = checkpoint['args']['dataset']
args.arch = checkpoint['args']['arch']
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Data
print('==> Preparing data..')
_, testset, clftrainset, num_classes, stem = get_datasets(args.dataset, test_as_train=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=1000, shuffle=False, num_workers=args.num_workers,
pin_memory=True)
clftrainloader = torch.utils.data.DataLoader(clftrainset, batch_size=1000, shuffle=False, num_workers=args.num_workers,
pin_memory=True)
# Model
print('==> Building model..')
##############################################################
# Encoder
##############################################################
if args.arch == 'resnet18':
net = ResNet18(stem=stem)
elif args.arch == 'resnet34':
net = ResNet34(stem=stem)
elif args.arch == 'resnet50':
net = ResNet50(stem=stem)
else:
raise ValueError("Bad architecture specification")
net = net.to(device)
if device == 'cuda':
repr_dim = net.representation_dim
net = torch.nn.DataParallel(net)
net.representation_dim = repr_dim
cudnn.benchmark = True
print('==> Loading encoder from checkpoint..')
net.load_state_dict(checkpoint['net'])
best_acc = 0
X, y = encode_feature_averaging(clftrainloader, device, net, num_passes=args.num_passes)
X_test, y_test = encode_feature_averaging(testloader, device, net, num_passes=args.num_passes)
for reg_weight in torch.exp(math.log(10) * torch.linspace(args.reg_lower, args.reg_upper, args.num_steps,
dtype=torch.float, device=device)):
clf = train_clf(X, y, net.representation_dim, num_classes, device, reg_weight=reg_weight)
raw_scores = clf(X_test)
_, predicted = raw_scores.max(1)
correct = predicted.eq(y_test).sum().item()
acc = 100 * correct / predicted.shape[0]
print('Test accuracy', acc, '%')
if acc > best_acc:
best_acc = acc
print("Best test accuracy", best_acc, "%")
|
[
"evaluate.encode_feature_averaging",
"evaluate.train_clf"
] |
[((368, 447), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Final evaluation with feature averaging."""'}), "(description='Final evaluation with feature averaging.')\n", (391, 447), False, 'import argparse\n'), ((1181, 1208), 'os.path.isdir', 'os.path.isdir', (['"""checkpoint"""'], {}), "('checkpoint')\n", (1194, 1208), False, 'import os\n'), ((1264, 1308), 'os.path.join', 'os.path.join', (['"""./checkpoint"""', 'args.load_from'], {}), "('./checkpoint', args.load_from)\n", (1276, 1308), False, 'import os\n'), ((1322, 1345), 'torch.load', 'torch.load', (['resume_from'], {}), '(resume_from)\n', (1332, 1345), False, 'import torch\n'), ((1570, 1616), 'configs.get_datasets', 'get_datasets', (['args.dataset'], {'test_as_train': '(True)'}), '(args.dataset, test_as_train=True)\n', (1582, 1616), False, 'from configs import get_datasets\n'), ((1631, 1750), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(testset, batch_size=1000, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n', (1658, 1750), False, 'import torch\n'), ((1805, 1928), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['clftrainset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(clftrainset, batch_size=1000, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n', (1832, 1928), False, 'import torch\n'), ((2676, 2762), 'evaluate.encode_feature_averaging', 'encode_feature_averaging', (['clftrainloader', 'device', 'net'], {'num_passes': 'args.num_passes'}), '(clftrainloader, device, net, num_passes=args.\n num_passes)\n', (2700, 2762), False, 'from evaluate import train_clf, encode_feature_averaging\n'), ((2775, 2852), 'evaluate.encode_feature_averaging', 'encode_feature_averaging', (['testloader', 'device', 'net'], {'num_passes': 'args.num_passes'}), '(testloader, device, net, num_passes=args.num_passes)\n', (2799, 2852), False, 'from evaluate import train_clf, encode_feature_averaging\n'), ((1450, 1475), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1473, 1475), False, 'import torch\n'), ((2475, 2501), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (2496, 2501), False, 'import torch\n'), ((3063, 3151), 'evaluate.train_clf', 'train_clf', (['X', 'y', 'net.representation_dim', 'num_classes', 'device'], {'reg_weight': 'reg_weight'}), '(X, y, net.representation_dim, num_classes, device, reg_weight=\n reg_weight)\n', (3072, 3151), False, 'from evaluate import train_clf, encode_feature_averaging\n'), ((2881, 2893), 'math.log', 'math.log', (['(10)'], {}), '(10)\n', (2889, 2893), False, 'import math\n'), ((2896, 2997), 'torch.linspace', 'torch.linspace', (['args.reg_lower', 'args.reg_upper', 'args.num_steps'], {'dtype': 'torch.float', 'device': 'device'}), '(args.reg_lower, args.reg_upper, args.num_steps, dtype=torch.\n float, device=device)\n', (2910, 2997), False, 'import torch\n')]
|
import os, sys
root_path = os.path.realpath(__file__).split('/evaluate/multipose_coco_eval.py')[0]
os.chdir(root_path)
sys.path.append(root_path)
from network.posenet import poseNet
from evaluate.tester import Tester
backbone = 'resnet101'
# Set Training parameters
params = Tester.TestParams()
params.subnet_name = 'both'
params.inp_size = 480 # input picture size = (inp_size, inp_size)
params.coeff = 2
params.in_thres = 0.21
params.coco_root = '/mnt/hdd10tb/Datasets/COCO2017/'
params.testresult_write_json = True # Whether to write json result
params.coco_result_filename = './demo/multipose_coco2017_results.json'
params.ckpt = '/home/vietnguyen/MultiPoseNet/extra/models/res50_detection_subnet/ckpt_39_0.59604.h5.best'
# model
model = poseNet(backbone)
for name, module in model.named_children():
for para in module.parameters():
para.requires_grad = False
tester = Tester(model, params)
tester.coco_eval() # pic_test
|
[
"evaluate.tester.Tester",
"evaluate.tester.Tester.TestParams"
] |
[((99, 118), 'os.chdir', 'os.chdir', (['root_path'], {}), '(root_path)\n', (107, 118), False, 'import os, sys\n'), ((119, 145), 'sys.path.append', 'sys.path.append', (['root_path'], {}), '(root_path)\n', (134, 145), False, 'import os, sys\n'), ((278, 297), 'evaluate.tester.Tester.TestParams', 'Tester.TestParams', ([], {}), '()\n', (295, 297), False, 'from evaluate.tester import Tester\n'), ((748, 765), 'network.posenet.poseNet', 'poseNet', (['backbone'], {}), '(backbone)\n', (755, 765), False, 'from network.posenet import poseNet\n'), ((893, 914), 'evaluate.tester.Tester', 'Tester', (['model', 'params'], {}), '(model, params)\n', (899, 914), False, 'from evaluate.tester import Tester\n'), ((27, 53), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (43, 53), False, 'import os, sys\n')]
|
import torch
from torch import nn
from Fashion_Mnist import load_data_fashion_mnist
from evaluate import Accumulator, accurate_num, evaluate_accuracy
net = nn.Sequential(nn.Flatten(),
nn.Linear(784,512), nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 256), nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 128), nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 64), nn.BatchNorm1d(64),
nn.ReLU(),
nn.Linear(64, 32), nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32,10))
def init_weights(m):
if type(m) == nn.Linear:
nn.init.kaiming_uniform_(m.weight)
net.apply(init_weights)
batch_size, lr, num_epochs = 256, 0.2, 10
loss = nn.CrossEntropyLoss()
trainer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.75)
train_iter, test_iter = load_data_fashion_mnist(batch_size)
def train_epoch(net, train_iter, loss, updater):
if isinstance(net, torch.nn.Module):
net.train()
metric = Accumulator(3)
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
updater.zero_grad()
l.sum().backward()
updater.step()
else:
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accurate_num(y_hat, y), y.numel())
return metric[0] / metric[2], metric[1] / metric[2]
def train(net, train_iter, test_iter, loss, num_epochs, updater):
for epoch in range(num_epochs):
train_loss, train_acc = train_epoch(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
print(f'epoch{epoch+1}: train loss: {train_loss:.5f} train acc: {train_acc:.2%} test acc: {test_acc:.2%}')
train(net, train_iter, test_iter, loss, num_epochs, trainer)
|
[
"evaluate.Accumulator",
"evaluate.evaluate_accuracy",
"evaluate.accurate_num"
] |
[((874, 895), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (893, 895), False, 'from torch import nn\n'), ((988, 1023), 'Fashion_Mnist.load_data_fashion_mnist', 'load_data_fashion_mnist', (['batch_size'], {}), '(batch_size)\n', (1011, 1023), False, 'from Fashion_Mnist import load_data_fashion_mnist\n'), ((176, 188), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (186, 188), False, 'from torch import nn\n'), ((211, 230), 'torch.nn.Linear', 'nn.Linear', (['(784)', '(512)'], {}), '(784, 512)\n', (220, 230), False, 'from torch import nn\n'), ((231, 250), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (245, 250), False, 'from torch import nn\n'), ((273, 282), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (280, 282), False, 'from torch import nn\n'), ((305, 324), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (314, 324), False, 'from torch import nn\n'), ((326, 345), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (340, 345), False, 'from torch import nn\n'), ((368, 377), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (375, 377), False, 'from torch import nn\n'), ((400, 419), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (409, 419), False, 'from torch import nn\n'), ((421, 440), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (435, 440), False, 'from torch import nn\n'), ((463, 472), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (470, 472), False, 'from torch import nn\n'), ((495, 513), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (504, 513), False, 'from torch import nn\n'), ((515, 533), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (529, 533), False, 'from torch import nn\n'), ((556, 565), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (563, 565), False, 'from torch import nn\n'), ((588, 605), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (597, 605), False, 'from torch import nn\n'), ((607, 625), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(32)'], {}), '(32)\n', (621, 625), False, 'from torch import nn\n'), ((648, 657), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (655, 657), False, 'from torch import nn\n'), ((680, 697), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(10)'], {}), '(32, 10)\n', (689, 697), False, 'from torch import nn\n'), ((1153, 1167), 'evaluate.Accumulator', 'Accumulator', (['(3)'], {}), '(3)\n', (1164, 1167), False, 'from evaluate import Accumulator, accurate_num, evaluate_accuracy\n'), ((759, 793), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['m.weight'], {}), '(m.weight)\n', (783, 793), False, 'from torch import nn\n'), ((1809, 1842), 'evaluate.evaluate_accuracy', 'evaluate_accuracy', (['net', 'test_iter'], {}), '(net, test_iter)\n', (1826, 1842), False, 'from evaluate import Accumulator, accurate_num, evaluate_accuracy\n'), ((1514, 1536), 'evaluate.accurate_num', 'accurate_num', (['y_hat', 'y'], {}), '(y_hat, y)\n', (1526, 1536), False, 'from evaluate import Accumulator, accurate_num, evaluate_accuracy\n')]
|
import argparse, os
import matplotlib
matplotlib.use('Agg')
import torch
from evaluate import evaluate_synthesis, evaluate_projection
import numpy as np
from synth.synthesize import create_synth
from utils.data import get_external_sounds
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default="/fast-1/philippe/flow_results_final/32par/models/vae_flow_mel_mse_cnn_mlp_iaf_1.model", help='')
parser.add_argument('--real_output', type=str, default="/fast-1/naotake", help='')
parser.add_argument('--batch_evals', type=int, default=16, help='')
parser.add_argument('--epochs', type=int, default=200, help='')
parser.add_argument('--device', type=str, default='cpu', help='Device for CUDA')
parser.add_argument('--project', action="store_true", help='')
parser.add_argument('--dataset', type=str, default='32par', help='')
parser.add_argument('--n_classes', type=int, default=32, help='')
parser.add_argument('--batch_out', type=int, default=3, help='')
parser.add_argument('--test_sounds', type=str, default='/fast-2/datasets/flow_synth_test', help='')
parser.add_argument('--nbworkers', type=int, default=0, help='')
args = parser.parse_args()
args.output = os.path.split(args.model_path)[0] #.../models
args.output = os.path.split(args.output)[0]
args.synthesize=True
model_name = os.path.splitext(os.path.basename(args.model_path))[0]
print
args.base_model = args.real_output + '/models/' + model_name
model_pars = model_name.split("_")
if model_pars[0]+model_pars[1] in ["vaeflow", "gatedcnn", "gatedmlp", "rescnn"]:
args.model = model_pars[0] + "_" + model_pars[1]
idx = 2
else:
args.model = model_pars[0]
idx = 1
if model_pars[idx+1] == "mfcc": #mel_mfcc
args.data="mel_mfcc"
idx += 1
else:
args.data = model_pars[idx]
args.loss = model_pars[idx+1]
base_img = '{0}/images/{1}_re'.format(args.real_output, model_name)
base_audio = '{0}/audio/{1}_re'.format(args.real_output, model_name)
args.cuda = not args.device == 'cpu' and torch.cuda.is_available()
args.device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
args.model_name, args.base_img, args.base_audio = model_name, base_img, base_audio
ref_split = '/fast-2/datasets/diva_dataset' + '/reference_split_' + args.dataset+ "_" +args.data + '.npz'
data = np.load(ref_split)['arr_0']
train_loader, valid_loader, test_loader = data[0], data[1], data[2]
args.batch_size = test_loader.batch_size
args.output_size = train_loader.dataset.output_size
args.input_size = train_loader.dataset.input_size
model = torch.load(args.model_path, map_location=args.device)
model.to(args.device)
args.engine, args.generator, args.param_defaults, args.rev_idx = create_synth(args.dataset)
if not args.project:
evaluate_synthesis(model, test_loader, args, train=False)
else:
test_sounds = get_external_sounds(args.test_sounds, test_loader.dataset, args)
evaluate_projection(model, test_sounds, args, train=False)
|
[
"evaluate.evaluate_projection",
"evaluate.evaluate_synthesis"
] |
[((38, 59), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (52, 59), False, 'import matplotlib\n'), ((247, 272), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (270, 272), False, 'import argparse, os\n'), ((2635, 2688), 'torch.load', 'torch.load', (['args.model_path'], {'map_location': 'args.device'}), '(args.model_path, map_location=args.device)\n', (2645, 2688), False, 'import torch\n'), ((2776, 2802), 'synth.synthesize.create_synth', 'create_synth', (['args.dataset'], {}), '(args.dataset)\n', (2788, 2802), False, 'from synth.synthesize import create_synth\n'), ((1279, 1309), 'os.path.split', 'os.path.split', (['args.model_path'], {}), '(args.model_path)\n', (1292, 1309), False, 'import argparse, os\n'), ((1339, 1365), 'os.path.split', 'os.path.split', (['args.output'], {}), '(args.output)\n', (1352, 1365), False, 'import argparse, os\n'), ((2084, 2109), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2107, 2109), False, 'import torch\n'), ((2388, 2406), 'numpy.load', 'np.load', (['ref_split'], {}), '(ref_split)\n', (2395, 2406), True, 'import numpy as np\n'), ((2828, 2885), 'evaluate.evaluate_synthesis', 'evaluate_synthesis', (['model', 'test_loader', 'args'], {'train': '(False)'}), '(model, test_loader, args, train=False)\n', (2846, 2885), False, 'from evaluate import evaluate_synthesis, evaluate_projection\n'), ((2910, 2974), 'utils.data.get_external_sounds', 'get_external_sounds', (['args.test_sounds', 'test_loader.dataset', 'args'], {}), '(args.test_sounds, test_loader.dataset, args)\n', (2929, 2974), False, 'from utils.data import get_external_sounds\n'), ((2979, 3037), 'evaluate.evaluate_projection', 'evaluate_projection', (['model', 'test_sounds', 'args'], {'train': '(False)'}), '(model, test_sounds, args, train=False)\n', (2998, 3037), False, 'from evaluate import evaluate_synthesis, evaluate_projection\n'), ((1421, 1454), 'os.path.basename', 'os.path.basename', (['args.model_path'], {}), '(args.model_path)\n', (1437, 1454), False, 'import argparse, os\n'), ((2152, 2177), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2175, 2177), False, 'import torch\n')]
|
import evaluate
import pandas as pd
import sys
import glob
sys.path.append('../gopher')
import utils
import numpy as np
import json
def get_runs(glob_pattern):
bin_run = {}
for run_dir in glob.glob(glob_pattern):
config = utils.get_config(run_dir)
if config['loss_fn']['value'] == 'poisson':
bin_run[config['bin_size']['value']] = run_dir
return bin_run
result_base_dir = utils.make_dir('inter_results')
# get datasets
testset, targets = utils.collect_whole_testset('../datasets/quantitative_data/testset/')
model_run_pattern = {'binloss_basenji_v2': '../trained_models/basenji_v2/binloss_basenji_v2/run*',
'bpnet_bin_loss_40': '../trained_models/bpnet/bin_loss_40/run*'}
for model_label, run_pattern in model_run_pattern.items():
bin_run = get_runs(run_pattern)
# get performance metrics for various evaluation bin sizes
result_path = result_base_dir + '/{}_triangle_plot.txt'.format(model_label)
bin_sizes = sorted(list(bin_run.keys()))
performance_per_resolution = []
for raw_bin_size in bin_sizes:
model, _ = utils.read_model(bin_run[raw_bin_size])
all_true, all_pred = utils.get_true_pred(model, raw_bin_size, testset)
for eval_bin_size in bin_sizes:
if eval_bin_size >= raw_bin_size:
print(raw_bin_size, '--->', eval_bin_size)
true_for_eval = evaluate.change_resolution(all_true, raw_bin_size, eval_bin_size)
pred_for_eval = evaluate.change_resolution(all_pred, raw_bin_size, eval_bin_size)
performance = evaluate.get_performance(true_for_eval, pred_for_eval, targets, 'whole')
performance_per_resolution.append([raw_bin_size, eval_bin_size] + list(performance.mean().values))
metric = 'pr_corr'
label = '<NAME>'
sorted_personr = pd.DataFrame(performance_per_resolution,
columns=['train', 'eval'] + list(performance.columns[:-1].values)).sort_values(
['train', 'eval'])[['train', 'eval', metric]]
padded_values = []
for train_bin, df in sorted_personr.groupby('train'):
pr_values = list(df[metric].values)
add_N = len(bin_sizes) - len(pr_values)
if add_N > 0:
pr_values = [np.nan for n in range(add_N)] + pr_values
padded_values.append(pr_values)
with open(result_path, 'w') as f:
f.write(json.dumps(padded_values))
|
[
"evaluate.change_resolution",
"evaluate.get_performance"
] |
[((59, 87), 'sys.path.append', 'sys.path.append', (['"""../gopher"""'], {}), "('../gopher')\n", (74, 87), False, 'import sys\n'), ((415, 446), 'utils.make_dir', 'utils.make_dir', (['"""inter_results"""'], {}), "('inter_results')\n", (429, 446), False, 'import utils\n'), ((481, 550), 'utils.collect_whole_testset', 'utils.collect_whole_testset', (['"""../datasets/quantitative_data/testset/"""'], {}), "('../datasets/quantitative_data/testset/')\n", (508, 550), False, 'import utils\n'), ((198, 221), 'glob.glob', 'glob.glob', (['glob_pattern'], {}), '(glob_pattern)\n', (207, 221), False, 'import glob\n'), ((240, 265), 'utils.get_config', 'utils.get_config', (['run_dir'], {}), '(run_dir)\n', (256, 265), False, 'import utils\n'), ((1110, 1149), 'utils.read_model', 'utils.read_model', (['bin_run[raw_bin_size]'], {}), '(bin_run[raw_bin_size])\n', (1126, 1149), False, 'import utils\n'), ((1179, 1228), 'utils.get_true_pred', 'utils.get_true_pred', (['model', 'raw_bin_size', 'testset'], {}), '(model, raw_bin_size, testset)\n', (1198, 1228), False, 'import utils\n'), ((2419, 2444), 'json.dumps', 'json.dumps', (['padded_values'], {}), '(padded_values)\n', (2429, 2444), False, 'import json\n'), ((1406, 1471), 'evaluate.change_resolution', 'evaluate.change_resolution', (['all_true', 'raw_bin_size', 'eval_bin_size'], {}), '(all_true, raw_bin_size, eval_bin_size)\n', (1432, 1471), False, 'import evaluate\n'), ((1504, 1569), 'evaluate.change_resolution', 'evaluate.change_resolution', (['all_pred', 'raw_bin_size', 'eval_bin_size'], {}), '(all_pred, raw_bin_size, eval_bin_size)\n', (1530, 1569), False, 'import evaluate\n'), ((1600, 1672), 'evaluate.get_performance', 'evaluate.get_performance', (['true_for_eval', 'pred_for_eval', 'targets', '"""whole"""'], {}), "(true_for_eval, pred_for_eval, targets, 'whole')\n", (1624, 1672), False, 'import evaluate\n')]
|
import logging
import numpy as np
import torch
from torch import nn
from anchor_based import anchor_helper
from anchor_based.dsnet import DSNet
from anchor_based.losses import calc_cls_loss, calc_loc_loss
from evaluate import evaluate
from helpers import data_helper, vsumm_helper, bbox_helper
logger = logging.getLogger()
def xavier_init(module):
cls_name = module.__class__.__name__
if 'Linear' in cls_name or 'Conv' in cls_name:
nn.init.xavier_uniform_(module.weight, gain=np.sqrt(2.0))
if module.bias is not None:
nn.init.constant_(module.bias, 0.1)
def train(args, split, save_path):
model = DSNet(base_model=args.base_model, num_feature=args.num_feature,
num_hidden=args.num_hidden, anchor_scales=args.anchor_scales,
num_head=args.num_head)
model = model.to(args.device)
model.apply(xavier_init)
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(parameters, lr=args.lr,
weight_decay=args.weight_decay)
max_val_fscore = -1
train_set = data_helper.VideoDataset(split['train_keys'])
train_loader = data_helper.DataLoader(train_set, shuffle=True)
val_set = data_helper.VideoDataset(split['test_keys'])
val_loader = data_helper.DataLoader(val_set, shuffle=False)
for epoch in range(args.max_epoch):
model.train()
stats = data_helper.AverageMeter('loss', 'cls_loss', 'loc_loss')
for _, seq, gtscore, cps, n_frames, nfps, picks, _ in train_loader:
keyshot_summ = vsumm_helper.get_keyshot_summ(
gtscore, cps, n_frames, nfps, picks)
target = vsumm_helper.downsample_summ(keyshot_summ)
if not target.any():
continue
target_bboxes = bbox_helper.seq2bbox(target)
target_bboxes = bbox_helper.lr2cw(target_bboxes)
anchors = anchor_helper.get_anchors(target.size, args.anchor_scales)
# Get class and location label for positive samples
cls_label, loc_label = anchor_helper.get_pos_label(
anchors, target_bboxes, args.pos_iou_thresh)
# Get negative samples
num_pos = cls_label.sum()
cls_label_neg, _ = anchor_helper.get_pos_label(
anchors, target_bboxes, args.neg_iou_thresh)
cls_label_neg = anchor_helper.get_neg_label(
cls_label_neg, int(args.neg_sample_ratio * num_pos))
# Get incomplete samples
cls_label_incomplete, _ = anchor_helper.get_pos_label(
anchors, target_bboxes, args.incomplete_iou_thresh)
cls_label_incomplete[cls_label_neg != 1] = 1
cls_label_incomplete = anchor_helper.get_neg_label(
cls_label_incomplete,
int(args.incomplete_sample_ratio * num_pos))
cls_label[cls_label_neg == -1] = -1
cls_label[cls_label_incomplete == -1] = -1
cls_label = torch.tensor(cls_label, dtype=torch.float32).to(args.device)
loc_label = torch.tensor(loc_label, dtype=torch.float32).to(args.device)
seq = torch.tensor(seq, dtype=torch.float32).unsqueeze(0).to(args.device)
pred_cls, pred_loc = model(seq)
loc_loss = calc_loc_loss(pred_loc, loc_label, cls_label)
cls_loss = calc_cls_loss(pred_cls, cls_label)
loss = cls_loss + args.lambda_reg * loc_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
stats.update(loss=loss.item(), cls_loss=cls_loss.item(),
loc_loss=loc_loss.item())
val_fscore, _ = evaluate(model, val_loader, args.nms_thresh, args.device)
if max_val_fscore < val_fscore:
max_val_fscore = val_fscore
torch.save(model.state_dict(), str(save_path))
logger.info(f'Epoch: {epoch}/{args.max_epoch} '
f'Loss: {stats.cls_loss:.4f}/{stats.loc_loss:.4f}/{stats.loss:.4f} '
f'F-score cur/max: {val_fscore:.4f}/{max_val_fscore:.4f}')
return max_val_fscore
|
[
"evaluate.evaluate"
] |
[((306, 325), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (323, 325), False, 'import logging\n'), ((644, 798), 'anchor_based.dsnet.DSNet', 'DSNet', ([], {'base_model': 'args.base_model', 'num_feature': 'args.num_feature', 'num_hidden': 'args.num_hidden', 'anchor_scales': 'args.anchor_scales', 'num_head': 'args.num_head'}), '(base_model=args.base_model, num_feature=args.num_feature, num_hidden=\n args.num_hidden, anchor_scales=args.anchor_scales, num_head=args.num_head)\n', (649, 798), False, 'from anchor_based.dsnet import DSNet\n'), ((979, 1051), 'torch.optim.Adam', 'torch.optim.Adam', (['parameters'], {'lr': 'args.lr', 'weight_decay': 'args.weight_decay'}), '(parameters, lr=args.lr, weight_decay=args.weight_decay)\n', (995, 1051), False, 'import torch\n'), ((1127, 1172), 'helpers.data_helper.VideoDataset', 'data_helper.VideoDataset', (["split['train_keys']"], {}), "(split['train_keys'])\n", (1151, 1172), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1192, 1239), 'helpers.data_helper.DataLoader', 'data_helper.DataLoader', (['train_set'], {'shuffle': '(True)'}), '(train_set, shuffle=True)\n', (1214, 1239), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1255, 1299), 'helpers.data_helper.VideoDataset', 'data_helper.VideoDataset', (["split['test_keys']"], {}), "(split['test_keys'])\n", (1279, 1299), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1317, 1363), 'helpers.data_helper.DataLoader', 'data_helper.DataLoader', (['val_set'], {'shuffle': '(False)'}), '(val_set, shuffle=False)\n', (1339, 1363), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1443, 1499), 'helpers.data_helper.AverageMeter', 'data_helper.AverageMeter', (['"""loss"""', '"""cls_loss"""', '"""loc_loss"""'], {}), "('loss', 'cls_loss', 'loc_loss')\n", (1467, 1499), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((3745, 3802), 'evaluate.evaluate', 'evaluate', (['model', 'val_loader', 'args.nms_thresh', 'args.device'], {}), '(model, val_loader, args.nms_thresh, args.device)\n', (3753, 3802), False, 'from evaluate import evaluate\n'), ((559, 594), 'torch.nn.init.constant_', 'nn.init.constant_', (['module.bias', '(0.1)'], {}), '(module.bias, 0.1)\n', (576, 594), False, 'from torch import nn\n'), ((1604, 1670), 'helpers.vsumm_helper.get_keyshot_summ', 'vsumm_helper.get_keyshot_summ', (['gtscore', 'cps', 'n_frames', 'nfps', 'picks'], {}), '(gtscore, cps, n_frames, nfps, picks)\n', (1633, 1670), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1709, 1751), 'helpers.vsumm_helper.downsample_summ', 'vsumm_helper.downsample_summ', (['keyshot_summ'], {}), '(keyshot_summ)\n', (1737, 1751), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1840, 1868), 'helpers.bbox_helper.seq2bbox', 'bbox_helper.seq2bbox', (['target'], {}), '(target)\n', (1860, 1868), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1897, 1929), 'helpers.bbox_helper.lr2cw', 'bbox_helper.lr2cw', (['target_bboxes'], {}), '(target_bboxes)\n', (1914, 1929), False, 'from helpers import data_helper, vsumm_helper, bbox_helper\n'), ((1952, 2010), 'anchor_based.anchor_helper.get_anchors', 'anchor_helper.get_anchors', (['target.size', 'args.anchor_scales'], {}), '(target.size, args.anchor_scales)\n', (1977, 2010), False, 'from anchor_based import anchor_helper\n'), ((2110, 2182), 'anchor_based.anchor_helper.get_pos_label', 'anchor_helper.get_pos_label', (['anchors', 'target_bboxes', 'args.pos_iou_thresh'], {}), '(anchors, target_bboxes, args.pos_iou_thresh)\n', (2137, 2182), False, 'from anchor_based import anchor_helper\n'), ((2305, 2377), 'anchor_based.anchor_helper.get_pos_label', 'anchor_helper.get_pos_label', (['anchors', 'target_bboxes', 'args.neg_iou_thresh'], {}), '(anchors, target_bboxes, args.neg_iou_thresh)\n', (2332, 2377), False, 'from anchor_based import anchor_helper\n'), ((2597, 2676), 'anchor_based.anchor_helper.get_pos_label', 'anchor_helper.get_pos_label', (['anchors', 'target_bboxes', 'args.incomplete_iou_thresh'], {}), '(anchors, target_bboxes, args.incomplete_iou_thresh)\n', (2624, 2676), False, 'from anchor_based import anchor_helper\n'), ((3345, 3390), 'anchor_based.losses.calc_loc_loss', 'calc_loc_loss', (['pred_loc', 'loc_label', 'cls_label'], {}), '(pred_loc, loc_label, cls_label)\n', (3358, 3390), False, 'from anchor_based.losses import calc_cls_loss, calc_loc_loss\n'), ((3414, 3448), 'anchor_based.losses.calc_cls_loss', 'calc_cls_loss', (['pred_cls', 'cls_label'], {}), '(pred_cls, cls_label)\n', (3427, 3448), False, 'from anchor_based.losses import calc_cls_loss, calc_loc_loss\n'), ((497, 509), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (504, 509), True, 'import numpy as np\n'), ((3043, 3087), 'torch.tensor', 'torch.tensor', (['cls_label'], {'dtype': 'torch.float32'}), '(cls_label, dtype=torch.float32)\n', (3055, 3087), False, 'import torch\n'), ((3128, 3172), 'torch.tensor', 'torch.tensor', (['loc_label'], {'dtype': 'torch.float32'}), '(loc_label, dtype=torch.float32)\n', (3140, 3172), False, 'import torch\n'), ((3208, 3246), 'torch.tensor', 'torch.tensor', (['seq'], {'dtype': 'torch.float32'}), '(seq, dtype=torch.float32)\n', (3220, 3246), False, 'import torch\n')]
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 2