code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
def get_wave_form(self):
''' getter '''
if isinstance(self.__wave_form, WaveFormInterface) is False:
raise TypeError()
return self.__wave_form | getter | null | null | null |
|
def set_wave_form(self, value):
''' setter '''
if isinstance(value, WaveFormInterface) is False:
raise TypeError()
self.__wave_form = value | setter | null | null | null |
|
def play_beat(
self,
frequencys,
play_time,
sample_rate=44100,
volume=0.01
):
'''
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
'''
# 依存するライブラリの基底オブジェクト
audio = pyaudio.PyAudio()
# ストリーム
stream = audio.open(
format=pyaudio.paFloat32,
channels=2,
rate=sample_rate,
output=1
)
left_frequency, right_frequency = frequencys
left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate)
right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate)
self.write_stream(stream, left_chunk, right_chunk, volume)
stream.stop_stream()
stream.close()
audio.terminate() | 引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void | null | null | null |
|
def save_beat(
self,
output_file_name,
frequencys,
play_time,
sample_rate=44100,
volume=0.01
):
'''
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
'''
left_frequency, right_frequency = frequencys
left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate)
right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate)
frame_list = self.read_stream(left_chunk, right_chunk, volume)
wf = wave.open(output_file_name, 'wb')
wf.setparams((2, 2, sample_rate, 0, 'NONE', 'not compressed'))
wf.writeframes(b''.join(frame_list))
wf.close() | 引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void | null | null | null |
|
def __create_chunk(self, frequency, play_time, sample_rate):
'''
チャンクを生成する
Args:
frequency: 周波数
play_time: 再生時間(秒)
sample_rate: サンプルレート
Returns:
チャンクのnumpy配列
'''
chunks = []
wave_form = self.wave_form.create(frequency, play_time, sample_rate)
chunks.append(wave_form)
chunk = numpy.concatenate(chunks)
return chunk | チャンクを生成する
Args:
frequency: 周波数
play_time: 再生時間(秒)
sample_rate: サンプルレート
Returns:
チャンクのnumpy配列 | null | null | null |
|
def draw(self):
'''
Draws samples from the `true` distribution.
Returns:
`np.ndarray` of samples.
'''
observed_arr = self.__image_true_sampler.draw()
observed_arr = self.add_condition(observed_arr)
return observed_arr | Draws samples from the `true` distribution.
Returns:
`np.ndarray` of samples. | null | null | null |
|
def add_condition(self, observed_arr):
'''
Add condtion.
Args:
observed_arr: `np.ndarray` of samples.
Returns:
`np.ndarray` of samples.
'''
condition_arr = self.__image_true_sampler.draw()
return np.concatenate((observed_arr, condition_arr), axis=1) | Add condtion.
Args:
observed_arr: `np.ndarray` of samples.
Returns:
`np.ndarray` of samples. | null | null | null |
|
def create(self, frequency, play_time, sample_rate):
'''
音の波形を生成する
Args:
frequency: 周波数
play_time: 再生時間
sample_rate: サンプルレート
Returns:
波形要素を格納した配列
'''
length = int(play_time * sample_rate)
factor = float(frequency) * (math.pi * 2) / sample_rate
return numpy.sin(numpy.arange(length) * factor) | 音の波形を生成する
Args:
frequency: 周波数
play_time: 再生時間
sample_rate: サンプルレート
Returns:
波形要素を格納した配列 | null | null | null |
|
'''
Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route.
'''
agent_x, agent_y = np.where(state_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_list = [(agent_x, agent_y, 0.0)]
self.t = 1
while self.t <= limit:
next_action_arr = self.extract_possible_actions(state_arr)
next_q_arr = self.function_approximator.inference_q(next_action_arr)
action_arr, q = self.select_action(next_action_arr, next_q_arr)
agent_x, agent_y = np.where(action_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_list.append((agent_x, agent_y, q[0]))
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break
return result_list | def inference(self, state_arr, limit=1000) | Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route. | 2.885467 | 2.305627 | 1.251489 |
'''
Compute the reward value.
Args:
state_arr: `np.ndarray` of state.
action_arr: `np.ndarray` of action.
Returns:
Reward value.
'''
if self.__check_goal_flag(action_arr) is True:
return 1.0
else:
x, y = np.where(action_arr[-1] == 1)
x, y = x[0], y[0]
goal_x, goal_y = self.__goal_pos
if x == goal_x and y == goal_y:
distance = 0.0
else:
distance = np.sqrt(((x - goal_x) ** 2) + (y - goal_y) ** 2)
if (x, y) in self.__route_long_memory_list:
repeating_penalty = self.__repeating_penalty
else:
repeating_penalty = 0.0
return 1.0 - distance - repeating_penalty | def observe_reward_value(self, state_arr, action_arr) | Compute the reward value.
Args:
state_arr: `np.ndarray` of state.
action_arr: `np.ndarray` of action.
Returns:
Reward value. | 2.977922 | 2.567071 | 1.160047 |
'''
Extract now map state.
Returns:
`np.ndarray` of state.
'''
x, y = self.__agent_pos
state_arr = np.zeros(self.__map_arr.shape)
state_arr[x, y] = 1
return np.expand_dims(state_arr, axis=0) | def extract_now_state(self) | Extract now map state.
Returns:
`np.ndarray` of state. | 4.906692 | 2.91364 | 1.684042 |
'''
Update state.
Override.
Args:
state_arr: `np.ndarray` of state in `self.t`.
action_arr: `np.ndarray` of action in `self.t`.
Returns:
`np.ndarray` of state in `self.t+1`.
'''
x, y = np.where(action_arr[-1] == 1)
self.__agent_pos = (x[0], y[0])
self.__route_memory_list.append((x[0], y[0]))
self.__route_long_memory_list.append((x[0], y[0]))
self.__route_long_memory_list = list(set(self.__route_long_memory_list))
while len(self.__route_memory_list) > self.__memory_num:
self.__route_memory_list = self.__route_memory_list[1:]
return self.extract_now_state() | def update_state(self, state_arr, action_arr) | Update state.
Override.
Args:
state_arr: `np.ndarray` of state in `self.t`.
action_arr: `np.ndarray` of action in `self.t`.
Returns:
`np.ndarray` of state in `self.t+1`. | 3.149595 | 2.289489 | 1.375676 |
'''
Create map.
References:
- https://qiita.com/kusano_t/items/487eec15d42aace7d685
'''
import random
import numpy as np
from itertools import product
news = ['n', 'e', 'w', 's']
m, n = map_size
SPACE = self.SPACE
WALL = self.WALL
START = self.START
GOAL = self.GOAL
memo = np.array([i for i in range(n * m)])
memo = memo.reshape(m, n)
# 迷路を初期化
maze = [[SPACE for _ in range(2 * n + 1)] for _ in range(2 * m + 1)]
maze[self.START_POS[0]][self.START_POS[1]] = START
self.__goal_pos = (2 * m - 1, 2 * n - 1)
maze[2 * m - 1][2 * n - 1] = GOAL
for i, j in product(range(2 * m + 1), range(2 * n + 1)):
if i % 2 == 0 or j % 2 == 0:
maze[i][j] = WALL
while (memo != 0).any():
x1 = random.choice(range(m))
y1 = random.choice(range(n))
direction = random.choice(news)
if direction == 'e':
x2, y2 = x1, y1 + 1
elif direction == 'w':
x2, y2 = x1, y1 - 1
elif direction == 'n':
x2, y2 = x1 - 1, y1
elif direction == 's':
x2, y2 = x1 + 1, y1
# 範囲外の場合はcontinue
if (x2 < 0) or (x2 >= m) or (y2 < 0) or (y2 >= n):
continue
if memo[x1, y1] != memo[x2, y2]:
tmp_min = min(memo[x1, y1], memo[x2, y2])
tmp_max = max(memo[x1, y1], memo[x2, y2])
# メモの更新
memo[memo == tmp_max] = tmp_min
# 壁を壊す
maze[x1 + x2 + 1][y1 + y2 + 1] = SPACE
maze_arr = np.array(maze)
return maze_arr | def __create_map(self, map_size) | Create map.
References:
- https://qiita.com/kusano_t/items/487eec15d42aace7d685 | 2.410153 | 2.051404 | 1.17488 |
'''
Args:
sentence_list: The list of tokenized sentences.
[[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...]]
Returns:
`np.ndarray` of tokens.
[vector of token, vector of token, vector of token]
'''
test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list, self.__seq_len)
inferenced_arr = self.__rbm.inference(
test_observed_arr,
training_count=1,
r_batch_size=-1
)
return inferenced_arr | def vectorize(self, sentence_list) | Args:
sentence_list: The list of tokenized sentences.
[[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...]]
Returns:
`np.ndarray` of tokens.
[vector of token, vector of token, vector of token] | 6.458496 | 3.516561 | 1.836594 |
'''
Init.
Args:
sentence_list: The `list` of sentences.
token_master_list: Unique `list` of tokens.
hidden_neuron_count: The number of units in hidden layer.
training_count: The number of training.
bath_size: Batch size of Mini-batch.
learning_rate: Learning rate.
seq_len: The length of one sequence.
'''
observed_arr = self.__setup_dataset(sentence_list, token_master_list, seq_len)
visible_num = observed_arr.shape[-1]
# `Builder` in `Builder Pattern` for LSTM-RTRBM.
rnnrbm_builder = LSTMRTRBMSimpleBuilder()
# Learning rate.
rnnrbm_builder.learning_rate = learning_rate
# Set units in visible layer.
rnnrbm_builder.visible_neuron_part(LogisticFunction(), visible_num)
# Set units in hidden layer.
rnnrbm_builder.hidden_neuron_part(LogisticFunction(), hidden_neuron_count)
# Set units in RNN layer.
rnnrbm_builder.rnn_neuron_part(TanhFunction())
# Set graph and approximation function, delegating `SGD` which is-a `OptParams`.
rnnrbm_builder.graph_part(LSTMRTRBMCD(opt_params=SGD()))
# Building.
rbm = rnnrbm_builder.get_result()
# Learning.
rbm.learn(
# The `np.ndarray` of observed data points.
observed_arr,
# Training count.
training_count=training_count,
# Batch size.
batch_size=batch_size
)
self.__rbm = rbm
self.__token_master_list = token_master_list
self.__seq_len = seq_len | def learn(
self,
sentence_list,
token_master_list,
hidden_neuron_count=1000,
training_count=1,
batch_size=100,
learning_rate=1e-03,
seq_len=5
) | Init.
Args:
sentence_list: The `list` of sentences.
token_master_list: Unique `list` of tokens.
hidden_neuron_count: The number of units in hidden layer.
training_count: The number of training.
bath_size: Batch size of Mini-batch.
learning_rate: Learning rate.
seq_len: The length of one sequence. | 4.41785 | 3.591484 | 1.230091 |
def inference(self, observed_arr):
'''
Draws samples from the `true` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
'''
if observed_arr.ndim < 4:
# Add rank for channel.
observed_arr = np.expand_dims(observed_arr, axis=1)
self.__add_channel_flag = True
else:
self.__add_channel_flag = False
return super().inference(observed_arr) | Draws samples from the `true` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced. | null | null | null |
|
def learn(self, grad_arr, fix_opt_flag=False):
'''
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
'''
delta_arr = super().learn(grad_arr, fix_opt_flag)
if self.__add_channel_flag is True:
return delta_arr[:, 0]
else:
return delta_arr | Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients. | null | null | null |
|
'''
Calculate similarity with the so-called Cosine similarity of Tf-Idf vectors.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity.
'''
if len(token_list_x) == 0 or len(token_list_y) == 0:
return 0.0
document_list = token_list_x.copy()
[document_list.append(v) for v in token_list_y]
document_list = list(set(document_list))
tfidf_vectorizer = TfidfVectorizer(document_list)
vector_list_x = tfidf_vectorizer.vectorize(token_list_x)
vector_list_y = tfidf_vectorizer.vectorize(token_list_y)
if len(vector_list_x) > len(vector_list_y):
[vector_list_y.append(0.0) for _ in range(len(vector_list_x) - len(vector_list_y))]
elif len(vector_list_y) > len(vector_list_x):
[vector_list_x.append(0.0) for _ in range(len(vector_list_y) - len(vector_list_x))]
dot_prod = np.dot(vector_list_x, vector_list_y)
norm_x = np.linalg.norm(vector_list_x)
norm_y = np.linalg.norm(vector_list_y)
try:
result = dot_prod / (norm_x * norm_y)
if np.isnan(result) is True:
return 0.0
else:
return result
except ZeroDivisionError:
return 0.0 | def calculate(self, token_list_x, token_list_y) | Calculate similarity with the so-called Cosine similarity of Tf-Idf vectors.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity. | 1.928501 | 1.553854 | 1.241109 |
'''
Compute distance.
Args:
x: Data point.
y: Data point.
Returns:
Distance.
'''
if x in self.__memo_dict:
x_v = self.__memo_dict[x]
else:
x_v = self.__cost_functionable.compute(self.__params_arr[x, :])
self.__memo_dict.setdefault(x, x_v)
if y in self.__memo_dict:
y_v = self.__memo_dict[y]
else:
y_v = self.__cost_functionable.compute(self.__params_arr[y, :])
self.__memo_dict.setdefault(y, y_v)
return abs(x_v - y_v) | def compute(self, x, y) | Compute distance.
Args:
x: Data point.
y: Data point.
Returns:
Distance. | 2.600338 | 2.284029 | 1.138487 |
'''
Initialize map of maze and setup reward value.
Args:
map_arr: Map. the 2d- `np.ndarray`.
start_point_label: Label of start point.
end_point_label: Label of end point.
wall_label: Label of wall.
agent_label: Label of agent.
'''
np.set_printoptions(threshold=np.inf)
self.__agent_label = agent_label
self.__map_arr = map_arr
self.__start_point_label = start_point_label
start_arr_tuple = np.where(self.__map_arr == self.__start_point_label)
x_arr, y_arr = start_arr_tuple
self.__start_point_tuple = (x_arr[0], y_arr[0])
end_arr_tuple = np.where(self.__map_arr == self.__end_point_label)
x_arr, y_arr = end_arr_tuple
self.__end_point_tuple = (x_arr[0], y_arr[0])
self.__wall_label = wall_label
for x in range(self.__map_arr.shape[1]):
for y in range(self.__map_arr.shape[0]):
if (x, y) == self.__start_point_tuple or (x, y) == self.__end_point_tuple:
continue
arr_value = self.__map_arr[y][x]
if arr_value == self.__wall_label:
continue
self.save_r_df((x, y), float(arr_value)) | def initialize(self, map_arr, start_point_label="S", end_point_label="G", wall_label="#", agent_label="@") | Initialize map of maze and setup reward value.
Args:
map_arr: Map. the 2d- `np.ndarray`.
start_point_label: Label of start point.
end_point_label: Label of end point.
wall_label: Label of wall.
agent_label: Label of agent. | 2.204092 | 1.722513 | 1.279579 |
'''
Concreat method.
Args:
state_key The key of state. this value is point in map.
Returns:
[(x, y)]
'''
x, y = state_key
if self.__map_arr[y][x] == self.__wall_label:
raise ValueError("It is the wall. (x, y)=(%d, %d)" % (x, y))
around_map = [(x, y-1), (x, y+1), (x-1, y), (x+1, y)]
possible_actoins_list = [(_x, _y) for _x, _y in around_map if self.__map_arr[_y][_x] != self.__wall_label and self.__map_arr[_y][_x] != self.__start_point_label]
return possible_actoins_list | def extract_possible_actions(self, state_key) | Concreat method.
Args:
state_key The key of state. this value is point in map.
Returns:
[(x, y)] | 3.552593 | 2.309149 | 1.538486 |
'''
Compute the reward value.
Args:
state_key: The key of state.
action_key: The key of action.
Returns:
Reward value.
'''
x, y = state_key
if self.__map_arr[y][x] == self.__end_point_label:
return 100.0
elif self.__map_arr[y][x] == self.__start_point_label:
return 0.0
elif self.__map_arr[y][x] == self.__wall_label:
raise ValueError("It is the wall. (x, y)=(%d, %d)" % (x, y))
else:
reward_value = float(self.__map_arr[y][x])
self.save_r_df(state_key, reward_value)
return reward_value | def observe_reward_value(self, state_key, action_key) | Compute the reward value.
Args:
state_key: The key of state.
action_key: The key of action.
Returns:
Reward value. | 2.910432 | 2.569662 | 1.132613 |
'''
Visualize learning result.
'''
x, y = state_key
map_arr = copy.deepcopy(self.__map_arr)
goal_point_tuple = np.where(map_arr == self.__end_point_label)
goal_x, goal_y = goal_point_tuple
map_arr[y][x] = "@"
self.__map_arr_list.append(map_arr)
if goal_x == x and goal_y == y:
for i in range(10):
key = len(self.__map_arr_list) - (10 - i)
print("Number of searches: " + str(key))
print(self.__map_arr_list[key])
print("Total number of searches: " + str(self.t))
print(self.__map_arr_list[-1])
print("Goal !!") | def visualize_learning_result(self, state_key) | Visualize learning result. | 3.526369 | 3.391422 | 1.039791 |
'''
Check the end flag.
If this return value is `True`, the learning is end.
Args:
state_key: The key of state in `self.t`.
Returns:
bool
'''
# As a rule, the learning can not be stopped.
x, y = state_key
end_point_tuple = np.where(self.__map_arr == self.__end_point_label)
end_point_x_arr, end_point_y_arr = end_point_tuple
if x == end_point_x_arr[0] and y == end_point_y_arr[0]:
return True
else:
return False | def check_the_end_flag(self, state_key) | Check the end flag.
If this return value is `True`, the learning is end.
Args:
state_key: The key of state in `self.t`.
Returns:
bool | 4.925272 | 2.859365 | 1.722505 |
'''
Normalize q-value.
Override.
This method is called in each learning steps.
For example:
self.q_df.q_value = self.q_df.q_value / self.q_df.q_value.sum()
'''
if self.q_df is not None and self.q_df.shape[0]:
# min-max normalization
self.q_df.q_value = (self.q_df.q_value - self.q_df.q_value.min()) / (self.q_df.q_value.max() - self.q_df.q_value.min()) | def normalize_q_value(self) | Normalize q-value.
Override.
This method is called in each learning steps.
For example:
self.q_df.q_value = self.q_df.q_value / self.q_df.q_value.sum() | 2.686361 | 1.565006 | 1.716518 |
'''
Normalize r-value.
Override.
This method is called in each learning steps.
For example:
self.r_df = self.r_df.r_value / self.r_df.r_value.sum()
'''
if self.r_df is not None and self.r_df.shape[0]:
# z-score normalization.
self.r_df.r_value = (self.r_df.r_value - self.r_df.r_value.mean()) / self.r_df.r_value.std() | def normalize_r_value(self) | Normalize r-value.
Override.
This method is called in each learning steps.
For example:
self.r_df = self.r_df.r_value / self.r_df.r_value.sum() | 3.523053 | 1.694832 | 2.078703 |
'''
Inference route.
Args:
limit: the number of inferencing.
Returns:
[(x_1, y_1), (x_2, y_2), ...]
'''
route_list = []
memory_list = []
state_key = self.__start_point_tuple
x, y = state_key
end_x, end_y = self.__end_point_tuple
for i in range(limit):
q_df = self.q_df[self.q_df.state_key == state_key]
if len(memory_list):
q_df = q_df[~q_df.action_key.isin(memory_list)]
if q_df.shape[0] > 1:
q_df = q_df.sort_values(by=["q_value"], ascending=False)
action_key = q_df.iloc[0, :]["action_key"]
q_value = q_df.iloc[0, :]["q_value"]
elif q_df.shape[0] == 1:
action_key = q_df.action_key.values[0]
q_value = q_df.q_value.values[0]
else:
action_key_list = self.extract_possible_actions(state_key)
action_key_list = [v for v in action_key_list if v not in memory_list]
q_value = 0.0
if len(action_key_list):
action_key = random.choice(action_key_list)
_q_df = q_df[q_df.action_key == action_key]
if _q_df.shape[0]:
q_value = _q_df.q_value.values[0]
state_key = self.update_state(
state_key=state_key,
action_key=action_key
)
x, y = state_key
route_list.append((x, y, q_value))
memory_list.append(state_key)
if self.check_the_end_flag(state_key) is True:
break
return route_list | def inference(self, limit=1000) | Inference route.
Args:
limit: the number of inferencing.
Returns:
[(x_1, y_1), (x_2, y_2), ...] | 2.282689 | 2.043472 | 1.117064 |
def draw(self):
'''
Draws samples from the `fake` distribution.
Returns:
`np.ndarray` of samples.
'''
observed_arr = self.noise_sampler.generate()
_ = self.__encoder_decoder_controller.encoder.inference(observed_arr)
arr = self.__encoder_decoder_controller.encoder.get_feature_points()
return arr | Draws samples from the `fake` distribution.
Returns:
`np.ndarray` of samples. | null | null | null |
|
def learn(self, grad_arr):
'''
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
Returns:
`np.ndarray` of delta or gradients.
'''
encoder_delta_arr, _, encoder_grads_list = self.__encoder_decoder_controller.encoder.hidden_back_propagate(
grad_arr[:, -1]
)
encoder_grads_list.insert(0, None)
encoder_grads_list.insert(0, None)
self.__encoder_decoder_controller.encoder.optimize(
encoder_grads_list,
self.__learning_rate,
1
)
return encoder_delta_arr | Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
Returns:
`np.ndarray` of delta or gradients. | null | null | null |
|
def update(self):
'''
Update the encoder and the decoder
to minimize the reconstruction error of the inputs.
Returns:
`np.ndarray` of the reconstruction errors.
'''
observed_arr = self.noise_sampler.generate()
inferenced_arr = self.inference(observed_arr)
error_arr = self.__encoder_decoder_controller.computable_loss.compute_loss(
observed_arr,
inferenced_arr
)
delta_arr = self.__encoder_decoder_controller.computable_loss.compute_delta(
observed_arr,
inferenced_arr
)
decoder_grads_list, encoder_delta_arr, encoder_grads_list = self.__encoder_decoder_controller.back_propagation(
delta_arr
)
self.__encoder_decoder_controller.optimize(
decoder_grads_list,
encoder_grads_list,
self.__learning_rate,
1
)
return error_arr | Update the encoder and the decoder
to minimize the reconstruction error of the inputs.
Returns:
`np.ndarray` of the reconstruction errors. | null | null | null |
|
'''
Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
'''
while self.t <= limit:
# Draw samples of next possible actions from any distribution.
next_action_arr = self.extract_possible_actions(state_arr)
# Inference Q-Values.
predicted_q_arr = self.__function_approximator.inference_q(next_action_arr)
# Set `np.ndarray` of rewards and next Q-Values.
reward_value_arr = np.empty((next_action_arr.shape[0], 1))
next_max_q_arr = np.empty((next_action_arr.shape[0], 1))
for i in range(reward_value_arr.shape[0]):
# Observe reward values.
reward_value_arr[i] = self.observe_reward_value(state_arr, next_action_arr[i])
# Inference the Max-Q-Value in next action time.
next_next_action_arr = self.extract_possible_actions(next_action_arr[i])
next_max_q_arr[i] = self.__function_approximator.inference_q(next_next_action_arr).max()
# Select action.
action_arr, predicted_q = self.select_action(next_action_arr, predicted_q_arr)
# Update real Q-Values.
real_q_arr = self.update_q(
predicted_q_arr,
reward_value_arr,
next_max_q_arr
)
# Maximum of predicted and real Q-Values.
real_q = real_q_arr[np.where(predicted_q_arr == predicted_q)[0][0]]
if self.__q_logs_arr.shape[0] > 0:
self.__q_logs_arr = np.r_[
self.__q_logs_arr,
np.array([predicted_q, real_q]).reshape(1, 2)
]
else:
self.__q_logs_arr = np.array([predicted_q, real_q]).reshape(1, 2)
# Learn Q-Values.
self.learn_q(predicted_q_arr, real_q_arr)
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break | def learn(self, state_arr, limit=1000) | Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms. | 3.03488 | 2.594694 | 1.169649 |
'''
Update Q.
Args:
predicted_q_arr: `np.ndarray` of predicted Q-Values.
reward_value_arr: `np.ndarray` of reward values.
next_max_q_arr: `np.ndarray` of maximum Q-Values in next time step.
Returns:
`np.ndarray` of real Q-Values.
'''
# Update Q-Value.
return predicted_q_arr + (self.alpha_value * (reward_value_arr + (self.gamma_value * next_max_q_arr) - predicted_q_arr)) | def update_q(self, predicted_q_arr, reward_value_arr, next_max_q_arr) | Update Q.
Args:
predicted_q_arr: `np.ndarray` of predicted Q-Values.
reward_value_arr: `np.ndarray` of reward values.
next_max_q_arr: `np.ndarray` of maximum Q-Values in next time step.
Returns:
`np.ndarray` of real Q-Values. | 2.500705 | 1.572955 | 1.589813 |
'''
getter
Learning rate.
'''
if isinstance(self.__alpha_value, float) is False:
raise TypeError("The type of __alpha_value must be float.")
return self.__alpha_value | def get_alpha_value(self) | getter
Learning rate. | 6.542418 | 4.765373 | 1.372908 |
'''
setter
Learning rate.
'''
if isinstance(value, float) is False:
raise TypeError("The type of __alpha_value must be float.")
self.__alpha_value = value | def set_alpha_value(self, value) | setter
Learning rate. | 6.900826 | 4.846196 | 1.423968 |
'''
getter
Gamma value.
'''
if isinstance(self.__gamma_value, float) is False:
raise TypeError("The type of __gamma_value must be float.")
return self.__gamma_value | def get_gamma_value(self) | getter
Gamma value. | 6.025335 | 4.685174 | 1.286043 |
'''
setter
Gamma value.
'''
if isinstance(value, float) is False:
raise TypeError("The type of __gamma_value must be float.")
self.__gamma_value = value | def set_gamma_value(self, value) | setter
Gamma value. | 6.066525 | 4.859278 | 1.248442 |
def compute(self, x_arr, y_arr):
'''
Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances.
'''
x_arr = x_arr / np.linalg.norm(x_arr, ord=1)
y_arr = y_arr / np.linalg.norm(y_arr, ord=1)
mixture_arr = 0.5 * (x_arr + y_arr)
return 0.5 * (super().compute(x_arr, mixture_arr) + super().compute(y_arr, mixture_arr)) | Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances. | null | null | null |
|
''' getter '''
if isinstance(self.__top_n, int) is False:
raise TypeError("The type of __top_n must be int.")
return self.__top_n | def get_top_n(self) | getter | 4.948498 | 4.39662 | 1.125523 |
''' setter '''
if isinstance(value, int) is False:
raise TypeError("The type of __top_n must be int.")
self.__top_n = value | def set_top_n(self, value) | setter | 5.0008 | 4.919115 | 1.016606 |
'''
Filtering with top-n ranking.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result.
'''
top_n_key = -1 * self.top_n
top_n_list = sorted(scored_list, key=lambda x: x[1])[top_n_key:]
result_list = sorted(top_n_list, key=lambda x: x[0])
return result_list | def filter(self, scored_list) | Filtering with top-n ranking.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result. | 3.859987 | 2.254371 | 1.712223 |
''' setter '''
if isinstance(value, Ngram):
self.__n_gram = value
else:
raise TypeError("The type of n_gram must be Ngram.") | def set_n_gram(self, value) | setter | 4.257233 | 4.226194 | 1.007344 |
''' setter '''
if isinstance(value, int):
self.__n = value
else:
raise TypeError("The type of n must be int.") | def set_n(self, value) | setter | 4.703194 | 4.663164 | 1.008584 |
'''
Tokenize sentence.
Args:
[n-gram, n-gram, n-gram, ...]
'''
super().tokenize(data)
token_tuple_zip = self.n_gram.generate_tuple_zip(self.token, self.n)
token_list = []
self.token = ["".join(list(token_tuple)) for token_tuple in token_tuple_zip] | def tokenize(self, data) | Tokenize sentence.
Args:
[n-gram, n-gram, n-gram, ...] | 6.007915 | 4.058803 | 1.480218 |
'''
getter
'''
if isinstance(self.__q_df, pd.DataFrame) is False and self.__q_df is not None:
raise TypeError("The type of `__q_df` must be `pd.DataFrame`.")
return self.__q_df | def get_q_df(self) | getter | 4.092517 | 3.711332 | 1.102709 |
'''
setter
'''
if isinstance(value, pd.DataFrame) is False and value is not None:
raise TypeError("The type of `__q_df` must be `pd.DataFrame`.")
self.__q_df = value | def set_q_df(self, value) | setter | 4.274619 | 4.079512 | 1.047826 |
'''
Extract Q-Value from `self.q_df`.
Args:
state_key: The key of state.
action_key: The key of action.
Returns:
Q-Value.
'''
q = 0.0
if self.q_df is None:
self.save_q_df(state_key, action_key, q)
return q
q_df = self.q_df[self.q_df.state_key == state_key]
q_df = q_df[q_df.action_key == action_key]
if q_df.shape[0]:
q = float(q_df["q_value"])
else:
self.save_q_df(state_key, action_key, q)
return q | def extract_q_df(self, state_key, action_key) | Extract Q-Value from `self.q_df`.
Args:
state_key: The key of state.
action_key: The key of action.
Returns:
Q-Value. | 2.238453 | 1.854255 | 1.207198 |
'''
Insert or update Q-Value in `self.q_df`.
Args:
state_key: State.
action_key: Action.
q_value: Q-Value.
Exceptions:
TypeError: If the type of `q_value` is not float.
'''
if isinstance(q_value, float) is False:
raise TypeError("The type of q_value must be float.")
new_q_df = pd.DataFrame([(state_key, action_key, q_value)], columns=["state_key", "action_key", "q_value"])
if self.q_df is not None:
self.q_df = pd.concat([new_q_df, self.q_df])
self.q_df = self.q_df.drop_duplicates(["state_key", "action_key"])
else:
self.q_df = new_q_df | def save_q_df(self, state_key, action_key, q_value) | Insert or update Q-Value in `self.q_df`.
Args:
state_key: State.
action_key: Action.
q_value: Q-Value.
Exceptions:
TypeError: If the type of `q_value` is not float. | 2.132175 | 1.540309 | 1.384252 |
''' getter '''
if isinstance(self.__r_df, pd.DataFrame) is False and self.__r_df is not None:
raise TypeError("The type of `__r_df` must be `pd.DataFrame`.")
return self.__r_df | def get_r_df(self) | getter | 3.992925 | 3.666411 | 1.089056 |
''' setter '''
if isinstance(value, pd.DataFrame) is False and self.__r_df is not None:
raise TypeError("The type of `__r_df` must be `pd.DataFrame`.")
self.__r_df = value | def set_r_df(self, value) | setter | 4.034322 | 4.000531 | 1.008447 |
'''
Insert or update R-Value in `self.r_df`.
Args:
state_key: The key of state.
r_value: R-Value(Reward).
action_key: The key of action if it is nesesary for the parametar of value function.
Exceptions:
TypeError: If the type of `r_value` is not float.
'''
if isinstance(r_value, float) is False:
raise TypeError("The type of r_value must be float.")
r = 0.0
if self.r_df is None:
self.save_r_df(state_key, r, action_key)
return r
r_df = self.r_df[self.r_df.state_key == state_key]
if action_key is not None:
r_df = r_df[r_df.action_key == action_key]
if r_df.shape[0]:
r = float(r_df["r_value"])
else:
self.save_r_df(state_key, r, action_key)
return r | def extract_r_df(self, state_key, r_value, action_key=None) | Insert or update R-Value in `self.r_df`.
Args:
state_key: The key of state.
r_value: R-Value(Reward).
action_key: The key of action if it is nesesary for the parametar of value function.
Exceptions:
TypeError: If the type of `r_value` is not float. | 2.867949 | 1.615207 | 1.775592 |
'''
Insert or update R-Value in `self.r_df`.
Args:
state_key: The key of state.
r_value: R-Value(Reward).
action_key: The key of action if it is nesesary for the parametar of value function.
Exceptions:
TypeError: If the type of `r_value` is not float.
'''
if action_key is not None:
add_r_df = pd.DataFrame([(state_key, action_key, r_value)], columns=["state_key", "action_key", "r_value"])
else:
add_r_df = pd.DataFrame([(state_key, r_value)], columns=["state_key", "r_value"])
if self.r_df is not None:
self.r_df = pd.concat([add_r_df, self.r_df])
if action_key is not None:
self.r_df = self.r_df.drop_duplicates(["state_key", "action_key"])
else:
self.r_df = self.r_df.drop_duplicates(["state_key"])
else:
self.r_df = add_r_df | def save_r_df(self, state_key, r_value, action_key=None) | Insert or update R-Value in `self.r_df`.
Args:
state_key: The key of state.
r_value: R-Value(Reward).
action_key: The key of action if it is nesesary for the parametar of value function.
Exceptions:
TypeError: If the type of `r_value` is not float. | 2.344772 | 1.404932 | 1.668957 |
'''
getter
Time.
'''
if isinstance(self.__t, int) is False:
raise TypeError("The type of __t must be int.")
return self.__t | def get_t(self) | getter
Time. | 8.619627 | 6.698241 | 1.286849 |
'''
setter
Time.
'''
if isinstance(value, int) is False:
raise TypeError("The type of __t must be int.")
self.__t = value | def set_t(self, value) | setter
Time. | 8.11944 | 6.126404 | 1.325319 |
'''
Learning and searching the optimal solution.
Args:
state_key: Initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
'''
self.t = 1
while self.t <= limit:
next_action_list = self.extract_possible_actions(state_key)
if len(next_action_list):
action_key = self.select_action(
state_key=state_key,
next_action_list=next_action_list
)
reward_value = self.observe_reward_value(state_key, action_key)
if len(next_action_list):
# Max-Q-Value in next action time.
next_state_key = self.update_state(
state_key=state_key,
action_key=action_key
)
next_next_action_list = self.extract_possible_actions(next_state_key)
next_action_key = self.predict_next_action(next_state_key, next_next_action_list)
next_max_q = self.extract_q_df(next_state_key, next_action_key)
# Update Q-Value.
self.update_q(
state_key=state_key,
action_key=action_key,
reward_value=reward_value,
next_max_q=next_max_q
)
# Update State.
state_key = next_state_key
# Normalize.
self.normalize_q_value()
self.normalize_r_value()
# Vis.
self.visualize_learning_result(state_key)
# Check.
if self.check_the_end_flag(state_key) is True:
break
# Epsode.
self.t += 1 | def learn(self, state_key, limit=1000) | Learning and searching the optimal solution.
Args:
state_key: Initial state.
limit: The maximum number of iterative updates based on value iteration algorithms. | 3.107916 | 2.532455 | 1.227234 |
'''
Update Q-Value.
Args:
state_key: The key of state.
action_key: The key of action.
reward_value: R-Value(Reward).
next_max_q: Maximum Q-Value.
'''
# Now Q-Value.
q = self.extract_q_df(state_key, action_key)
# Update Q-Value.
new_q = q + self.alpha_value * (reward_value + (self.gamma_value * next_max_q) - q)
# Save updated Q-Value.
self.save_q_df(state_key, action_key, new_q) | def update_q(self, state_key, action_key, reward_value, next_max_q) | Update Q-Value.
Args:
state_key: The key of state.
action_key: The key of action.
reward_value: R-Value(Reward).
next_max_q: Maximum Q-Value. | 2.815524 | 2.145821 | 1.312097 |
'''
Predict next action by Q-Learning.
Args:
state_key: The key of state in `self.t+1`.
next_action_list: The possible action in `self.t+1`.
Returns:
The key of action.
'''
if self.q_df is not None:
next_action_q_df = self.q_df[self.q_df.state_key == state_key]
next_action_q_df = next_action_q_df[next_action_q_df.action_key.isin(next_action_list)]
if next_action_q_df.shape[0] == 0:
return random.choice(next_action_list)
else:
if next_action_q_df.shape[0] == 1:
max_q_action = next_action_q_df["action_key"].values[0]
else:
next_action_q_df = next_action_q_df.sort_values(by=["q_value"], ascending=False)
max_q_action = next_action_q_df.iloc[0, :]["action_key"]
return max_q_action
else:
return random.choice(next_action_list) | def predict_next_action(self, state_key, next_action_list) | Predict next action by Q-Learning.
Args:
state_key: The key of state in `self.t+1`.
next_action_list: The possible action in `self.t+1`.
Returns:
The key of action. | 2.000741 | 1.56735 | 1.276512 |
'''
Pull arms.
Args:
arm_id: Arms master id.
success: The number of success.
failure: The number of failure.
'''
self.__beta_dist_dict[arm_id].observe(success, failure) | def pull(self, arm_id, success, failure) | Pull arms.
Args:
arm_id: Arms master id.
success: The number of success.
failure: The number of failure. | 8.233379 | 3.839128 | 2.144596 |
'''
Listup arms and expected value.
Args:
limit: Length of the list.
Returns:
[Tuple(`Arms master id`, `expected value`)]
'''
expected_list = [(arm_id, beta_dist.expected_value()) for arm_id, beta_dist in self.__beta_dist_dict.items()]
expected_list = sorted(expected_list, key=lambda x: x[1], reverse=True)
return expected_list[:limit] | def recommend(self, limit=10) | Listup arms and expected value.
Args:
limit: Length of the list.
Returns:
[Tuple(`Arms master id`, `expected value`)] | 5.137337 | 2.201284 | 2.333791 |
def calculate(self, token_list_x, token_list_y):
'''
Calculate similarity with the Tanimoto coefficient.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity.
'''
match_list = [tanimoto_value for tanimoto_value in token_list_x if tanimoto_value in token_list_y]
return float(len(match_list) / (len(token_list_x) + len(token_list_y) - len(match_list))) | Calculate similarity with the Tanimoto coefficient.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity. | null | null | null |
|
'''
Args:
x: Data point.
y: Data point.
Returns:
Distance.
'''
return np.sqrt(np.sum((x-y)**2)) | def compute(self, x, y) | Args:
x: Data point.
y: Data point.
Returns:
Distance. | 4.386286 | 2.318918 | 1.891522 |
'''
getter
Time rate.
'''
if isinstance(self.__time_rate, float) is False:
raise TypeError("The type of __time_rate must be float.")
if self.__time_rate <= 0.0:
raise ValueError("The value of __time_rate must be greater than 0.0")
return self.__time_rate | def get_time_rate(self) | getter
Time rate. | 3.376202 | 3.016621 | 1.1192 |
'''
setter
Time rate.
'''
if isinstance(value, float) is False:
raise TypeError("The type of __time_rate must be float.")
if value <= 0.0:
raise ValueError("The value of __time_rate must be greater than 0.0")
self.__time_rate = value | def set_time_rate(self, value) | setter
Time rate. | 3.405307 | 3.073967 | 1.107789 |
'''
Select action by Q(state, action).
Concreat method for boltzmann distribution.
Args:
state_key: The key of state.
next_action_list: The possible action in `self.t+1`.
If the length of this list is 0, all action should be possible.
Returns:
The key of action.
'''
if self.q_df is None or self.q_df.shape[0] == 0:
return random.choice(next_action_list)
next_action_b_df = self.__calculate_boltzmann_factor(state_key, next_action_list)
if next_action_b_df.shape[0] == 1:
return next_action_b_df["action_key"].values[0]
prob = np.random.random()
next_action_b_df = next_action_b_df.sort_values(by=["boltzmann_factor"])
i = 0
while prob > next_action_b_df.iloc[i, :]["boltzmann_factor"] + next_action_b_df.iloc[i + 1, :]["boltzmann_factor"]:
i += 1
if i + 1 >= next_action_b_df.shape[0]:
break
max_b_action_key = next_action_b_df.iloc[i, :]["action_key"]
return max_b_action_key | def select_action(self, state_key, next_action_list) | Select action by Q(state, action).
Concreat method for boltzmann distribution.
Args:
state_key: The key of state.
next_action_list: The possible action in `self.t+1`.
If the length of this list is 0, all action should be possible.
Returns:
The key of action. | 2.824438 | 1.823801 | 1.548655 |
'''
Function of temperature.
Returns:
Sigmoid.
'''
sigmoid = 1 / np.log(self.t * self.time_rate + 1.1)
return sigmoid | def __calculate_sigmoid(self) | Function of temperature.
Returns:
Sigmoid. | 14.696718 | 6.994225 | 2.101265 |
'''
Calculate boltzmann factor.
Args:
state_key: The key of state.
next_action_list: The possible action in `self.t+1`.
If the length of this list is 0, all action should be possible.
Returns:
[(`The key of action`, `boltzmann probability`)]
'''
sigmoid = self.__calculate_sigmoid()
q_df = self.q_df[self.q_df.state_key == state_key]
q_df = q_df[q_df.isin(next_action_list)]
q_df["boltzmann_factor"] = q_df["q_value"] / sigmoid
q_df["boltzmann_factor"] = q_df["boltzmann_factor"].apply(np.exp)
q_df["boltzmann_factor"] = q_df["boltzmann_factor"] / q_df["boltzmann_factor"].sum()
return q_df | def __calculate_boltzmann_factor(self, state_key, next_action_list) | Calculate boltzmann factor.
Args:
state_key: The key of state.
next_action_list: The possible action in `self.t+1`.
If the length of this list is 0, all action should be possible.
Returns:
[(`The key of action`, `boltzmann probability`)] | 3.267707 | 1.828348 | 1.787245 |
''' getter '''
if isinstance(self.__epsilon_greedy_rate, float) is True:
return self.__epsilon_greedy_rate
else:
raise TypeError("The type of __epsilon_greedy_rate must be float.") | def get_epsilon_greedy_rate(self) | getter | 3.433091 | 3.169922 | 1.083021 |
''' setter '''
if isinstance(value, float) is True:
self.__epsilon_greedy_rate = value
else:
raise TypeError("The type of __epsilon_greedy_rate must be float.") | def set_epsilon_greedy_rate(self, value) | setter | 3.559043 | 3.480772 | 1.022487 |
'''
Select action by Q(state, action).
Concreat method.
ε-greedy.
Args:
state_key: The key of state.
next_action_list: The possible action in `self.t+1`.
If the length of this list is 0, all action should be possible.
Returns:
The key of action.
'''
epsilon_greedy_flag = bool(np.random.binomial(n=1, p=self.epsilon_greedy_rate))
if epsilon_greedy_flag is False:
action_key = random.choice(next_action_list)
else:
action_key = self.predict_next_action(state_key, next_action_list)
return action_key | def select_action(self, state_key, next_action_list) | Select action by Q(state, action).
Concreat method.
ε-greedy.
Args:
state_key: The key of state.
next_action_list: The possible action in `self.t+1`.
If the length of this list is 0, all action should be possible.
Returns:
The key of action. | 4.242683 | 1.771719 | 2.394671 |
def draw(self):
'''
Draws samples from the `fake` distribution.
Returns:
`np.ndarray` of samples.
'''
observed_arr = self.noise_sampler.generate()
arr = self.inference(observed_arr)
return arr | Draws samples from the `fake` distribution.
Returns:
`np.ndarray` of samples. | null | null | null |
|
def inference(self, observed_arr):
'''
Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
'''
_ = self.__lstm_model.inference(observed_arr)
return self.__lstm_model.get_feature_points() | Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced. | null | null | null |
|
def learn(self, grad_arr):
'''
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
Returns:
`np.ndarray` of delta or gradients.
'''
if grad_arr.ndim > 3:
grad_arr = grad_arr.reshape((
grad_arr.shape[0],
grad_arr.shape[1],
-1
))
grad_arr = grad_arr[:, -1]
elif grad_arr.ndim == 3:
grad_arr = grad_arr[:, -1]
delta_arr, _, grads_list = self.__lstm_model.hidden_back_propagate(grad_arr)
grads_list.insert(0, None)
grads_list.insert(0, None)
self.__lstm_model.optimize(
grads_list,
self.__learning_rate,
1
)
return delta_arr | Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
Returns:
`np.ndarray` of delta or gradients. | null | null | null |
|
'''
Infernce Q-Value.
Args:
predicted_q_arr: `np.ndarray` of predicted Q-Values.
real_q_arr: `np.ndarray` of real Q-Values.
'''
self.__predicted_q_arr_list.append(predicted_q_arr)
while len(self.__predicted_q_arr_list) > self.__seq_len:
self.__predicted_q_arr_list = self.__predicted_q_arr_list[1:]
while len(self.__predicted_q_arr_list) < self.__seq_len:
self.__predicted_q_arr_list.append(self.__predicted_q_arr_list[-1])
predicted_q_arr = np.array(self.__predicted_q_arr_list)
predicted_q_arr = predicted_q_arr.transpose((1, 0, 2))
self.__real_q_arr_list.append(real_q_arr)
while len(self.__real_q_arr_list) > self.__seq_len:
self.__real_q_arr_list = self.__real_q_arr_list[1:]
while len(self.__real_q_arr_list) < self.__seq_len:
self.__real_q_arr_list.append(self.__real_q_arr_list[-1])
real_q_arr = np.array(self.__real_q_arr_list)
real_q_arr = real_q_arr.transpose((1, 0, 2))
loss = self.__computable_loss.compute_loss(predicted_q_arr, real_q_arr)
delta_arr = self.__computable_loss.compute_delta(predicted_q_arr, real_q_arr)
delta_arr, lstm_output_grads_list = self.__lstm_model.output_back_propagate(
predicted_q_arr,
delta_arr
)
delta_arr, _, lstm_hidden_grads_list = self.__lstm_model.hidden_back_propagate(
delta_arr[:, -1]
)
lstm_grads_list = lstm_output_grads_list
lstm_grads_list.extend(lstm_hidden_grads_list)
self.__lstm_model.optimize(lstm_grads_list, self.__learning_rate, 1)
self.__loss_list.append(loss) | def learn_q(self, predicted_q_arr, real_q_arr) | Infernce Q-Value.
Args:
predicted_q_arr: `np.ndarray` of predicted Q-Values.
real_q_arr: `np.ndarray` of real Q-Values. | 1.79365 | 1.659383 | 1.080914 |
'''
Infernce Q-Value.
Args:
next_action_arr: `np.ndarray` of action.
Returns:
`np.ndarray` of Q-Values.
'''
q_arr = next_action_arr.reshape((next_action_arr.shape[0], -1))
self.__q_arr_list.append(q_arr)
while len(self.__q_arr_list) > self.__seq_len:
self.__q_arr_list = self.__q_arr_list[1:]
while len(self.__q_arr_list) < self.__seq_len:
self.__q_arr_list.append(self.__q_arr_list[-1])
q_arr = np.array(self.__q_arr_list)
q_arr = q_arr.transpose((1, 0, 2))
q_arr = self.__lstm_model.inference(q_arr)
return q_arr[:, -1].reshape((q_arr.shape[0], 1)) | def inference_q(self, next_action_arr) | Infernce Q-Value.
Args:
next_action_arr: `np.ndarray` of action.
Returns:
`np.ndarray` of Q-Values. | 2.300344 | 1.896264 | 1.213092 |
'''
`object` of model as a function approximator,
which has `lstm_model` whose type is `pydbm.rnn.lstm_model.LSTMModel`.
'''
class Model(object):
def __init__(self, lstm_model):
self.lstm_model = lstm_model
return Model(self.__lstm_model) | def get_model(self) | `object` of model as a function approximator,
which has `lstm_model` whose type is `pydbm.rnn.lstm_model.LSTMModel`. | 8.913895 | 2.428822 | 3.670049 |
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
generated_arr = np.random.uniform(
low=0.1,
high=0.9,
size=((self.__batch_size, self.__seq_len, self.__dim))
)
if self.noise_sampler is not None:
self.noise_sampler.output_shape = generated_arr.shape
generated_arr += self.noise_sampler.generate()
return generated_arr | Generate noise samples.
Returns:
`np.ndarray` of samples. | null | null | null |
|
def compute(self, x_arr, y_arr):
'''
Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances.
'''
return np.linalg.norm(x_arr - y_arr, axis=-1) | Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances. | null | null | null |
|
def vectorize(self, token_list):
'''
Tokenize token list.
Args:
token_list: The list of tokens..
Returns:
[vector of token, vector of token, vector of token, ...]
'''
sentence_list = [token_list]
test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list)
pred_arr = self.__controller.inference(test_observed_arr)
return self.__controller.get_feature_points() | Tokenize token list.
Args:
token_list: The list of tokens..
Returns:
[vector of token, vector of token, vector of token, ...] | null | null | null |
|
def learn(self, iter_n=500, k_step=10):
'''
Learning.
Args:
iter_n: The number of training iterations.
k_step: The number of learning of the `discriminator`.
'''
generative_model, discriminative_model = self.__GAN.train(
self.__true_sampler,
self.__generative_model,
self.__discriminative_model,
iter_n=iter_n,
k_step=k_step
)
self.__generative_model = generative_model
self.__discriminative_model = discriminative_model | Learning.
Args:
iter_n: The number of training iterations.
k_step: The number of learning of the `discriminator`. | null | null | null |
|
def compose(self, file_path, velocity_mean=None, velocity_std=None):
'''
Compose by learned model.
Args:
file_path: Path to generated MIDI file.
velocity_mean: Mean of velocity.
This class samples the velocity from a Gaussian distribution of
`velocity_mean` and `velocity_std`.
If `None`, the average velocity in MIDI files set to this parameter.
velocity_std: Standard deviation(SD) of velocity.
This class samples the velocity from a Gaussian distribution of
`velocity_mean` and `velocity_std`.
If `None`, the SD of velocity in MIDI files set to this parameter.
'''
generated_arr = self.__generative_model.draw()
channel = generated_arr.shape[1] // 2
generated_arr = generated_arr[:, :channel]
# @TODO(chimera0(RUM)): Fix the redundant processings.
if velocity_mean is None:
velocity_mean = np.array(
[self.__midi_df_list[i].velocity.mean() for i in range(len(self.__midi_df_list))]
).mean()
if velocity_std is None:
velocity_std = np.array(
[self.__midi_df_list[i].velocity.std() for i in range(len(self.__midi_df_list))]
).std()
generated_list = []
start = 0
end = self.__time_fraction
for batch in range(generated_arr.shape[0]):
for seq in range(generated_arr.shape[2]):
add_flag = False
for program_key in range(generated_arr.shape[1]):
pitch_key = np.argmax(generated_arr[batch, program_key, seq])
pitch_tuple = self.__bar_gram.pitch_tuple_list[pitch_key]
for pitch in pitch_tuple:
velocity = np.random.normal(
loc=velocity_mean,
scale=velocity_std
)
velocity = int(velocity)
program = self.__noise_sampler.program_list[program_key]
generated_list.append((program, start, end, pitch, velocity))
add_flag = True
if add_flag is True:
start += self.__time_fraction
end += self.__time_fraction
generated_midi_df = pd.DataFrame(
generated_list,
columns=[
"program",
"start",
"end",
"pitch",
"velocity"
]
)
pitch_arr = generated_midi_df.pitch.drop_duplicates()
df_list = []
for pitch in pitch_arr:
df = generated_midi_df[generated_midi_df.pitch == pitch]
df = df.sort_values(by=["start", "end"])
df["next_start"] = df.start.shift(-1)
df["next_end"] = df.end.shift(-1)
df.loc[df.end == df.next_start, "end"] = df.loc[df.end == df.next_start, "next_end"]
df = df.drop_duplicates(["end"])
df_list.append(df)
generated_midi_df = pd.concat(df_list)
generated_midi_df = generated_midi_df.sort_values(by=["start", "end"])
self.__midi_controller.save(
file_path=file_path,
note_df=generated_midi_df
) | Compose by learned model.
Args:
file_path: Path to generated MIDI file.
velocity_mean: Mean of velocity.
This class samples the velocity from a Gaussian distribution of
`velocity_mean` and `velocity_std`.
If `None`, the average velocity in MIDI files set to this parameter.
velocity_std: Standard deviation(SD) of velocity.
This class samples the velocity from a Gaussian distribution of
`velocity_mean` and `velocity_std`.
If `None`, the SD of velocity in MIDI files set to this parameter. | null | null | null |
|
'''
Entry Point.
Args:
url: target url.
'''
# The object of Web-Scraping.
web_scrape = WebScraping()
# Execute Web-Scraping.
document = web_scrape.scrape(url)
# The object of NLP.
nlp_base = NlpBase()
# Set tokenizer. This is japanese tokenizer with MeCab.
nlp_base.tokenizable_doc = MeCabTokenizer()
sentence_list = nlp_base.listup_sentence(document)
batch_size = 10
if len(sentence_list) < batch_size:
raise ValueError("The number of extracted sentences is insufficient.")
all_token_list = []
for i in range(len(sentence_list)):
nlp_base.tokenize(sentence_list[i])
all_token_list.extend(nlp_base.token)
sentence_list[i] = nlp_base.token
vectorlizable_sentence = LSTMRTRBM()
vectorlizable_sentence.learn(
sentence_list=sentence_list,
token_master_list=list(set(all_token_list)),
hidden_neuron_count=1000,
batch_size=batch_size,
learning_rate=1e-03,
seq_len=5
)
test_list = sentence_list[:batch_size]
feature_points_arr = vectorlizable_sentence.vectorize(test_list)
print("Feature points (Top 5 sentences):")
print(feature_points_arr) | def Main(url) | Entry Point.
Args:
url: target url. | 4.902741 | 4.654568 | 1.053318 |
def train_discriminator(
self,
k_step,
true_sampler,
generative_model,
discriminative_model,
d_logs_list
):
'''
Train the discriminator.
Args:
k_step: The number of learning of the discriminative_model.
true_sampler: Sampler which draws samples from the `true` distribution.
generative_model: Generator which draws samples from the `fake` distribution.
discriminative_model: Discriminator which discriminates `true` from `fake`.
d_logs_list: `list` of probabilities inferenced by the `discriminator` (mean) in the `discriminator`'s update turn.
Returns:
Tuple data. The shape is...
- Discriminator which discriminates `true` from `fake`.
- `list` of probabilities inferenced by the `discriminator` (mean) in the `discriminator`'s update turn.
'''
for k in range(k_step):
true_arr = true_sampler.draw()
generated_arr = generative_model.draw()
true_posterior_arr = discriminative_model.inference(true_arr)
generated_posterior_arr = discriminative_model.inference(generated_arr)
grad_arr = self.__gans_value_function.compute_discriminator_reward(
true_posterior_arr,
generated_posterior_arr
)
discriminative_model.learn(grad_arr)
self.__logger.debug(
"Probability inferenced by the `discriminator` (mean): " + str(generated_posterior_arr.mean())
)
self.__logger.debug(
"And update the `discriminator` by descending its stochastic gradient(means): " + str(grad_arr.mean())
)
d_logs_list.append(generated_posterior_arr.mean())
return discriminative_model, d_logs_list | Train the discriminator.
Args:
k_step: The number of learning of the discriminative_model.
true_sampler: Sampler which draws samples from the `true` distribution.
generative_model: Generator which draws samples from the `fake` distribution.
discriminative_model: Discriminator which discriminates `true` from `fake`.
d_logs_list: `list` of probabilities inferenced by the `discriminator` (mean) in the `discriminator`'s update turn.
Returns:
Tuple data. The shape is...
- Discriminator which discriminates `true` from `fake`.
- `list` of probabilities inferenced by the `discriminator` (mean) in the `discriminator`'s update turn. | null | null | null |
|
def train_generator(
self,
generative_model,
discriminative_model,
g_logs_list
):
'''
Train the generator.
Args:
generative_model: Generator which draws samples from the `fake` distribution.
discriminative_model: Discriminator which discriminates `true` from `fake`.
g_logs_list: `list` of Probabilities inferenced by the `discriminator` (mean) in the `generator`'s update turn.
Returns:
Tuple data. The shape is...
- Generator which draws samples from the `fake` distribution.
- `list` of probabilities inferenced by the `discriminator` (mean) in the `generator`'s update turn.
'''
generated_arr = generative_model.draw()
generated_posterior_arr = discriminative_model.inference(generated_arr)
grad_arr = self.__gans_value_function.compute_generator_reward(
generated_posterior_arr
)
grad_arr = discriminative_model.learn(grad_arr, fix_opt_flag=True)
grad_arr = grad_arr.reshape(generated_arr.shape)
generative_model.learn(grad_arr)
self.__logger.debug(
"Probability inferenced by the `discriminator` (mean): " + str(generated_posterior_arr.mean())
)
self.__logger.debug(
"And update the `generator` by descending its stochastic gradient(means): " + str(grad_arr.mean())
)
g_logs_list.append(generated_posterior_arr.mean())
return generative_model, g_logs_list | Train the generator.
Args:
generative_model: Generator which draws samples from the `fake` distribution.
discriminative_model: Discriminator which discriminates `true` from `fake`.
g_logs_list: `list` of Probabilities inferenced by the `discriminator` (mean) in the `generator`'s update turn.
Returns:
Tuple data. The shape is...
- Generator which draws samples from the `fake` distribution.
- `list` of probabilities inferenced by the `discriminator` (mean) in the `generator`'s update turn. | null | null | null |
|
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
generated_arr = np.random.uniform(low=self.__low, high=self.__high, size=self.__output_shape)
if self.noise_sampler is not None:
self.noise_sampler.output_shape = generated_arr.shape
generated_arr += self.noise_sampler.generate()
return generated_arr | Generate noise samples.
Returns:
`np.ndarray` of samples. | null | null | null |
|
'''
Filtering with std.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result.
'''
if len(scored_list) > 0:
avg = np.mean([s[1] for s in scored_list])
std = np.std([s[1] for s in scored_list])
else:
avg = 0
std = 0
limiter = avg + 0.5 * std
mean_scored = [(sent_idx, score) for (sent_idx, score) in scored_list if score > limiter]
return mean_scored | def filter(self, scored_list) | Filtering with std.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result. | 3.353141 | 2.274919 | 1.473961 |
'''
Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route.
'''
self.__inferencing_flag = True
agent_x, agent_y = np.where(state_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
self.__create_enemy(self.__map_arr)
result_list = [(agent_x, agent_y, 0.0)]
result_val_list = [agent_x, agent_y]
for e in range(self.__enemy_num):
result_val_list.append(self.__enemy_pos_list[e][0])
result_val_list.append(self.__enemy_pos_list[e][1])
result_val_list.append(0.0)
result_list.append(tuple(result_val_list))
self.t = 0
while self.t < limit:
next_action_arr = self.extract_possible_actions(state_arr)
next_q_arr = self.function_approximator.inference_q(next_action_arr)
action_arr, q = self.select_action(next_action_arr, next_q_arr)
self.__move_enemy(action_arr)
agent_x, agent_y = np.where(action_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_val_list = [agent_x, agent_y]
for e in range(self.__enemy_num):
result_val_list.append(self.__enemy_pos_list[e][0])
result_val_list.append(self.__enemy_pos_list[e][1])
try:
result_val_list.append(q[0])
except IndexError:
result_val_list.append(q)
result_list.append(tuple(result_val_list))
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break
return result_list | def inference(self, state_arr, limit=1000) | Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route. | 2.540078 | 2.226425 | 1.140877 |
'''
Extract possible actions.
Args:
state_arr: `np.ndarray` of state.
Returns:
`np.ndarray` of actions.
The shape is:(
`batch size corresponded to each action key`,
`channel that is 1`,
`feature points1`,
`feature points2`
)
'''
agent_x, agent_y = np.where(state_arr[-1] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
possible_action_arr = None
for x, y in [
(-1, 0), (1, 0), (0, -1), (0, 1), (0, 0)
]:
next_x = agent_x + x
if next_x < 0 or next_x >= state_arr[-1].shape[1]:
continue
next_y = agent_y + y
if next_y < 0 or next_y >= state_arr[-1].shape[0]:
continue
wall_flag = False
if x > 0:
for add_x in range(1, x):
if self.__map_arr[agent_x + add_x, next_y] == self.WALL:
wall_flag = True
elif x < 0:
for add_x in range(x, 0):
if self.__map_arr[agent_x + add_x, next_y] == self.WALL:
wall_flag = True
if wall_flag is True:
continue
if y > 0:
for add_y in range(1, y):
if self.__map_arr[next_x, agent_y + add_y] == self.WALL:
wall_flag = True
elif y < 0:
for add_y in range(y, 0):
if self.__map_arr[next_x, agent_y + add_y] == self.WALL:
wall_flag = True
if wall_flag is True:
continue
if self.__map_arr[next_x, next_y] == self.WALL:
continue
if (next_x, next_y) in self.__route_memory_list:
continue
next_action_arr = np.zeros((
3 + self.__enemy_num,
state_arr[-1].shape[0],
state_arr[-1].shape[1]
))
next_action_arr[0][agent_x, agent_y] = 1
next_action_arr[1] = self.__map_arr
next_action_arr[-1][next_x, next_y] = 1
for e in range(self.__enemy_num):
enemy_state_arr = np.zeros(state_arr[0].shape)
enemy_state_arr[self.__enemy_pos_list[e][0], self.__enemy_pos_list[e][1]] = 1
next_action_arr[2 + e] = enemy_state_arr
next_action_arr = np.expand_dims(next_action_arr, axis=0)
if possible_action_arr is None:
possible_action_arr = next_action_arr
else:
possible_action_arr = np.r_[possible_action_arr, next_action_arr]
if possible_action_arr is not None:
while possible_action_arr.shape[0] < self.__batch_size:
key = np.random.randint(low=0, high=possible_action_arr.shape[0])
possible_action_arr = np.r_[
possible_action_arr,
np.expand_dims(possible_action_arr[key], axis=0)
]
else:
# Forget oldest memory and do recuresive executing.
self.__route_memory_list = self.__route_memory_list[1:]
possible_action_arr = self.extract_possible_actions(state_arr)
return possible_action_arr | def extract_possible_actions(self, state_arr) | Extract possible actions.
Args:
state_arr: `np.ndarray` of state.
Returns:
`np.ndarray` of actions.
The shape is:(
`batch size corresponded to each action key`,
`channel that is 1`,
`feature points1`,
`feature points2`
) | 2.064979 | 1.75089 | 1.179388 |
'''
Compute the reward value.
Args:
state_arr: `np.ndarray` of state.
action_arr: `np.ndarray` of action.
Returns:
Reward value.
'''
if self.__check_goal_flag(action_arr) is True:
return 1.0
else:
self.__move_enemy(action_arr)
x, y = np.where(action_arr[-1] == 1)
x, y = x[0], y[0]
e_dist_sum = 0.0
for e in range(self.__enemy_num):
e_dist = np.sqrt(
((x - self.__enemy_pos_list[e][0]) ** 2) + ((y - self.__enemy_pos_list[e][1]) ** 2)
)
e_dist_sum += e_dist
e_dist_penalty = e_dist_sum / self.__enemy_num
goal_x, goal_y = self.__goal_pos
if x == goal_x and y == goal_y:
distance = 0.0
else:
distance = np.sqrt(((x - goal_x) ** 2) + (y - goal_y) ** 2)
if (x, y) in self.__route_long_memory_list:
repeating_penalty = self.__repeating_penalty
else:
repeating_penalty = 0.0
return 1.0 - distance - repeating_penalty + e_dist_penalty | def observe_reward_value(self, state_arr, action_arr) | Compute the reward value.
Args:
state_arr: `np.ndarray` of state.
action_arr: `np.ndarray` of action.
Returns:
Reward value. | 2.668907 | 2.4209 | 1.102444 |
'''
Check the end flag.
If this return value is `True`, the learning is end.
As a rule, the learning can not be stopped.
This method should be overrided for concreate usecases.
Args:
state_arr: `np.ndarray` of state in `self.t`.
Returns:
bool
'''
if self.__check_goal_flag(state_arr) is True or self.__check_crash_flag(state_arr):
return True
else:
return False | def check_the_end_flag(self, state_arr) | Check the end flag.
If this return value is `True`, the learning is end.
As a rule, the learning can not be stopped.
This method should be overrided for concreate usecases.
Args:
state_arr: `np.ndarray` of state in `self.t`.
Returns:
bool | 7.224872 | 1.792733 | 4.030088 |
def calculate(self, token_list_x, token_list_y):
'''
Calculate similarity with the Dice coefficient.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity.
'''
x, y = self.unique(token_list_x, token_list_y)
try:
result = 2 * len(x & y) / float(sum(map(len, (x, y))))
except ZeroDivisionError:
result = 0.0
return result | Calculate similarity with the Dice coefficient.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity. | null | null | null |
|
def summarize(self, test_arr, vectorizable_token, sentence_list, limit=5):
'''
Summarize input document.
Args:
test_arr: `np.ndarray` of observed data points..
vectorizable_token: is-a `VectorizableToken`.
sentence_list: `list` of all sentences.
limit: The number of selected abstract sentence.
Returns:
`np.ndarray` of scores.
'''
if isinstance(vectorizable_token, VectorizableToken) is False:
raise TypeError()
_ = self.inference(test_arr)
score_arr = self.__encoder_decoder_controller.get_reconstruction_error()
score_arr = score_arr.reshape((
score_arr.shape[0],
-1
)).mean(axis=1)
score_list = score_arr.tolist()
abstract_list = []
for i in range(limit):
if self.__normal_prior_flag is True:
key = score_arr.argmin()
else:
key = score_arr.argmax()
score = score_list.pop(key)
score_arr = np.array(score_list)
seq_arr = test_arr[key]
token_arr = vectorizable_token.tokenize(seq_arr.tolist())
s = " ".join(token_arr.tolist())
_s = "".join(token_arr.tolist())
for sentence in sentence_list:
if s in sentence or _s in sentence:
abstract_list.append(sentence)
abstract_list = list(set(abstract_list))
if len(abstract_list) >= limit:
break
return abstract_list | Summarize input document.
Args:
test_arr: `np.ndarray` of observed data points..
vectorizable_token: is-a `VectorizableToken`.
sentence_list: `list` of all sentences.
limit: The number of selected abstract sentence.
Returns:
`np.ndarray` of scores. | null | null | null |
|
def inference(self, observed_arr):
'''
Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
'''
if observed_arr.ndim != 2:
observed_arr = observed_arr.reshape((observed_arr.shape[0], -1))
pred_arr = self.__nn.inference(observed_arr)
return pred_arr | Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced. | null | null | null |
|
def learn(self, grad_arr, fix_opt_flag=False):
'''
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
'''
if grad_arr.ndim != 2:
grad_arr = grad_arr.reshape((grad_arr.shape[0], -1))
delta_arr = self.__nn.back_propagation(grad_arr)
if fix_opt_flag is False:
self.__nn.optimize(self.__learning_rate, 1)
return delta_arr | Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients. | null | null | null |
|
def draw(self):
'''
Draws samples from the `true` distribution.
Returns:
`np.ndarray` of samples.
'''
if self.__conditional_flag is True:
return np.concatenate((self.__create_samples(), self.__create_samples()), axis=1)
else:
return self.__create_samples() | Draws samples from the `true` distribution.
Returns:
`np.ndarray` of samples. | null | null | null |
|
'''
Multi-Agent Learning.
Override.
Args:
initial_state_key: Initial state.
limit: Limit of the number of learning.
game_n: The number of games.
'''
end_flag_list = [False] * len(self.q_learning_list)
for game in range(game_n):
state_key = copy.copy(initial_state_key)
self.t = 1
while self.t <= limit:
for i in range(len(self.q_learning_list)):
if game + 1 == game_n:
self.state_key_list.append((i, copy.copy(state_key)))
self.q_learning_list[i].t = self.t
next_action_list = self.q_learning_list[i].extract_possible_actions(state_key)
if len(next_action_list):
action_key = self.q_learning_list[i].select_action(
state_key=state_key,
next_action_list=next_action_list
)
reward_value = self.q_learning_list[i].observe_reward_value(state_key, action_key)
# Check.
if self.q_learning_list[i].check_the_end_flag(state_key) is True:
end_flag_list[i] = True
# Max-Q-Value in next action time.
next_state_key = self.q_learning_list[i].update_state(
state_key=state_key,
action_key=action_key
)
next_next_action_list = self.q_learning_list[i].extract_possible_actions(next_state_key)
if len(next_next_action_list):
next_action_key = self.q_learning_list[i].predict_next_action(
next_state_key,
next_next_action_list
)
next_max_q = self.q_learning_list[i].extract_q_df(next_state_key, next_action_key)
# Update Q-Value.
self.q_learning_list[i].update_q(
state_key=state_key,
action_key=action_key,
reward_value=reward_value,
next_max_q=next_max_q
)
# Update State.
state_key = next_state_key
# Epsode.
self.t += 1
self.q_learning_list[i].t = self.t
if False not in end_flag_list:
break | def learn(self, initial_state_key, limit=1000, game_n=1) | Multi-Agent Learning.
Override.
Args:
initial_state_key: Initial state.
limit: Limit of the number of learning.
game_n: The number of games. | 2.335116 | 2.125395 | 1.098674 |
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
generated_arr = np.random.normal(loc=self.__mu, scale=self.__sigma, size=self.__output_shape)
if self.noise_sampler is not None:
self.noise_sampler.output_shape = generated_arr.shape
generated_arr += self.noise_sampler.generate()
return generated_arr | Generate noise samples.
Returns:
`np.ndarray` of samples. | null | null | null |
|
'''
Args:
sentence_list: The list of tokenized sentences.
[[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...]]
Returns:
`np.ndarray` of tokens.
[vector of token, vector of token, vector of token]
'''
test_observed_arr, _ = self.__setup_dataset(sentence_list, self.__token_master_list, self.__sentence_mean_len)
pred_arr = self.__controller.inference(test_observed_arr)
return self.__controller.get_feature_points() | def vectorize(self, sentence_list) | Args:
sentence_list: The list of tokenized sentences.
[[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...]]
Returns:
`np.ndarray` of tokens.
[vector of token, vector of token, vector of token] | 6.756166 | 3.512297 | 1.923575 |
'''
Init for Adaptive Simulated Annealing.
Args:
reannealing_per: How often will this model reanneals there per cycles.
thermostat: Thermostat.
t_min: The minimum temperature.
t_default: The default temperature.
'''
self.__reannealing_per = reannealing_per
self.__thermostat = thermostat
self.__t_min = t_min
self.__t_default = t_default | def adaptive_set(
self,
reannealing_per=50,
thermostat=0.9,
t_min=0.001,
t_default=1.0
) | Init for Adaptive Simulated Annealing.
Args:
reannealing_per: How often will this model reanneals there per cycles.
thermostat: Thermostat.
t_min: The minimum temperature.
t_default: The default temperature. | 3.431638 | 1.362799 | 2.518081 |
'''
Change temperature.
Override.
Args:
t: Now temperature.
Returns:
Next temperature.
'''
t = super().change_t(t)
self.__now_cycles += 1
if self.__now_cycles % self.__reannealing_per == 0:
t = t * self.__thermostat
if t < self.__t_min:
t = self.__t_default
return t | def change_t(self, t) | Change temperature.
Override.
Args:
t: Now temperature.
Returns:
Next temperature. | 7.018809 | 4.53438 | 1.547909 |
def inference(self, observed_arr):
'''
Infernece by the model.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced feature points.
'''
decoded_arr = self.__encoder_decoder_controller.inference(observed_arr)
encoded_arr = self.__encoder_decoder_controller.get_feature_points()
_ = self.__retrospective_encoder.inference(decoded_arr)
re_encoded_arr = self.__retrospective_encoder.get_feature_points()
self.__inferenced_tuple = (observed_arr, encoded_arr, decoded_arr, re_encoded_arr)
return re_encoded_arr | Infernece by the model.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced feature points. | null | null | null |
|
def summarize(self, test_arr, vectorizable_token, sentence_list, limit=5):
'''
Summarize input document.
Args:
test_arr: `np.ndarray` of observed data points..
vectorizable_token: is-a `VectorizableToken`.
sentence_list: `list` of all sentences.
limit: The number of selected abstract sentence.
Returns:
`list` of `str` of abstract sentences.
'''
if isinstance(vectorizable_token, VectorizableToken) is False:
raise TypeError()
_ = self.inference(test_arr)
_, loss_arr, _ = self.compute_retrospective_loss()
loss_list = loss_arr.tolist()
abstract_list = []
for i in range(limit):
key = loss_arr.argmin()
_ = loss_list.pop(key)
loss_arr = np.array(loss_list)
seq_arr = test_arr[key]
token_arr = vectorizable_token.tokenize(seq_arr.tolist())
s = " ".join(token_arr.tolist())
_s = "".join(token_arr.tolist())
for sentence in sentence_list:
if s in sentence or _s in sentence:
abstract_list.append(sentence)
abstract_list = list(set(abstract_list))
if len(abstract_list) >= limit:
break
return abstract_list | Summarize input document.
Args:
test_arr: `np.ndarray` of observed data points..
vectorizable_token: is-a `VectorizableToken`.
sentence_list: `list` of all sentences.
limit: The number of selected abstract sentence.
Returns:
`list` of `str` of abstract sentences. | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.