code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
def back_propagation(self, delta_arr):
'''
Back propagation.
Args:
delta_output_arr: Delta.
Returns:
Tuple data.
- decoder's `list` of gradations,
- encoder's `np.ndarray` of Delta,
- encoder's `list` of gradations.
'''
re_encoder_delta_arr, delta_hidden_arr, re_encoder_grads_list = self.__retrospective_encoder.hidden_back_propagate(
delta_arr[:, -1]
)
re_encoder_grads_list.insert(0, None)
re_encoder_grads_list.insert(0, None)
observed_arr, encoded_arr, decoded_arr, re_encoded_arr = self.__inferenced_tuple
delta_arr = self.__encoder_decoder_controller.computable_loss.compute_delta(
decoded_arr,
observed_arr
)
delta_arr[:, -1] += re_encoder_delta_arr[:, -1]
decoder_grads_list, encoder_delta_arr, encoder_grads_list = self.__encoder_decoder_controller.back_propagation(
delta_arr
)
return re_encoder_grads_list, decoder_grads_list, encoder_delta_arr, encoder_grads_list | Back propagation.
Args:
delta_output_arr: Delta.
Returns:
Tuple data.
- decoder's `list` of gradations,
- encoder's `np.ndarray` of Delta,
- encoder's `list` of gradations. | null | null | null |
|
def optimize(
self,
re_encoder_grads_list,
decoder_grads_list,
encoder_grads_list,
learning_rate,
epoch
):
'''
Back propagation.
Args:
re_encoder_grads_list: re-encoder's `list` of graduations.
decoder_grads_list: decoder's `list` of graduations.
encoder_grads_list: encoder's `list` of graduations.
learning_rate: Learning rate.
epoch: Now epoch.
'''
self.__retrospective_encoder.optimize(re_encoder_grads_list, learning_rate, epoch)
self.__encoder_decoder_controller.optimize(
decoder_grads_list,
encoder_grads_list,
learning_rate,
epoch
) | Back propagation.
Args:
re_encoder_grads_list: re-encoder's `list` of graduations.
decoder_grads_list: decoder's `list` of graduations.
encoder_grads_list: encoder's `list` of graduations.
learning_rate: Learning rate.
epoch: Now epoch. | null | null | null |
|
def __change_inferencing_mode(self, inferencing_mode):
'''
Change dropout rate in Encoder/Decoder.
Args:
dropout_rate: The probalibity of dropout.
'''
self.__encoder_decoder_controller.decoder.opt_params.inferencing_mode = inferencing_mode
self.__encoder_decoder_controller.encoder.opt_params.inferencing_mode = inferencing_mode
self.__retrospective_encoder.opt_params.inferencing_mode = inferencing_mode | Change dropout rate in Encoder/Decoder.
Args:
dropout_rate: The probalibity of dropout. | null | null | null |
|
def __remember_best_params(self, encoder_best_params_list, decoder_best_params_list, re_encoder_best_params_list):
'''
Remember best parameters.
Args:
encoder_best_params_list: `list` of encoder's parameters.
decoder_best_params_list: `list` of decoder's parameters.
re_encoder_best_params_list: `list` of re-decoder's parameters.
'''
if len(encoder_best_params_list) > 0 and len(decoder_best_params_list) > 0:
self.__encoder_decoder_controller.encoder.graph.weights_lstm_hidden_arr = encoder_best_params_list[0]
self.__encoder_decoder_controller.encoder.graph.weights_lstm_observed_arr = encoder_best_params_list[1]
self.__encoder_decoder_controller.encoder.graph.lstm_bias_arr = encoder_best_params_list[2]
self.__encoder_decoder_controller.decoder.graph.weights_lstm_hidden_arr = decoder_best_params_list[0]
self.__encoder_decoder_controller.decoder.graph.weights_lstm_observed_arr = decoder_best_params_list[1]
self.__encoder_decoder_controller.decoder.graph.lstm_bias_arr = decoder_best_params_list[2]
self.__retrospective_encoder.graph.weights_lstm_hidden_arr = re_encoder_best_params_list[0]
self.__retrospective_encoder.graph.weights_lstm_observed_arr = re_encoder_best_params_list[1]
self.__retrospective_encoder.graph.lstm_bias_arr = re_encoder_best_params_list[2]
self.__logger.debug("Best params are saved.") | Remember best parameters.
Args:
encoder_best_params_list: `list` of encoder's parameters.
decoder_best_params_list: `list` of decoder's parameters.
re_encoder_best_params_list: `list` of re-decoder's parameters. | null | null | null |
|
'''
Entry Point.
Args:
url: PDF url.
'''
# The object of Web-scraping.
web_scrape = WebScraping()
# Set the object of reading PDF files.
web_scrape.readable_web_pdf = WebPDFReading()
# Execute Web-scraping.
document = web_scrape.scrape(url)
if similarity_mode == "TfIdfCosine":
# The object of `Similarity Filter`.
# The similarity observed by this object is so-called cosine similarity of Tf-Idf vectors.
similarity_filter = TfIdfCosine()
elif similarity_mode == "Dice":
# The object of `Similarity Filter`.
# The similarity observed by this object is the Dice coefficient.
similarity_filter = Dice()
elif similarity_mode == "Jaccard":
# The object of `Similarity Filter`.
# The similarity observed by this object is the Jaccard coefficient.
similarity_filter = Jaccard()
elif similarity_mode == "Simpson":
# The object of `Similarity Filter`.
# The similarity observed by this object is the Simpson coefficient.
similarity_filter = Simpson()
else:
raise ValueError()
# The object of the NLP.
nlp_base = NlpBase()
# Set tokenizer. This is japanese tokenizer with MeCab.
nlp_base.tokenizable_doc = MeCabTokenizer()
# Set the object of NLP.
similarity_filter.nlp_base = nlp_base
# If the similarity exceeds this value, the sentence will be cut off.
similarity_filter.similarity_limit = similarity_limit
# The object of automatic sumamrization.
auto_abstractor = AutoAbstractor()
# Set tokenizer. This is japanese tokenizer with MeCab.
auto_abstractor.tokenizable_doc = MeCabTokenizer()
# Object of abstracting and filtering document.
abstractable_doc = TopNRankAbstractor()
# Execute summarization.
result_dict = auto_abstractor.summarize(document, abstractable_doc, similarity_filter)
# Output summarized sentence.
[print(result_dict["summarize_result"][i]) for i in range(len(result_dict["summarize_result"])) if i < 3] | def Main(url, similarity_mode="TfIdfCosine", similarity_limit=0.75) | Entry Point.
Args:
url: PDF url. | 3.909132 | 3.688243 | 1.05989 |
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
observed_arr = None
for row in range(self.__batch_size):
arr = None
for d in range(self.__dim):
_arr = self.__generate_sin(
amp=self.__amp,
sampling_freq=self.__sampling_freq,
freq=self.__freq,
sec=self.__sec,
seq_len=self.__seq_len
)
_arr = np.expand_dims(_arr, axis=0)
if arr is None:
arr = _arr
else:
arr = np.r_[arr, _arr]
arr = np.expand_dims(arr, axis=0)
if observed_arr is None:
observed_arr = arr
else:
observed_arr = np.r_[observed_arr, arr]
observed_arr = observed_arr.transpose((0, 2, 1))
gauss_noise = np.random.normal(loc=self.__mu, scale=self.__sigma, size=observed_arr.shape)
observed_arr = observed_arr + gauss_noise
if self.noise_sampler is not None:
self.noise_sampler.output_shape = observed_arr.shape
observed_arr += self.noise_sampler.generate()
if self.__norm_mode == "z_score":
if observed_arr.std() != 0:
observed_arr = (observed_arr - observed_arr.mean()) / observed_arr.std()
elif self.__norm_mode == "min_max":
if (observed_arr.max() - observed_arr.min()) != 0:
observed_arr = (observed_arr - observed_arr.min()) / (observed_arr.max() - observed_arr.min())
elif self.__norm_mode == "tanh":
observed_arr = np.tanh(observed_arr)
return observed_arr | Generate noise samples.
Returns:
`np.ndarray` of samples. | null | null | null |
|
'''
Entry Point.
Args:
url: target url.
'''
# The object of Web-Scraping.
web_scrape = WebScraping()
# Execute Web-Scraping.
document = web_scrape.scrape(url)
# The object of NLP.
nlp_base = NlpBase()
# Set tokenizer. This is japanese tokenizer with MeCab.
nlp_base.tokenizable_doc = MeCabTokenizer()
sentence_list = nlp_base.listup_sentence(document)
all_token_list = []
for i in range(len(sentence_list)):
nlp_base.tokenize(sentence_list[i])
all_token_list.extend(nlp_base.token)
sentence_list[i] = nlp_base.token
vectorlizable_sentence = EncoderDecoder()
vectorlizable_sentence.learn(
sentence_list=sentence_list,
token_master_list=list(set(all_token_list)),
epochs=60
)
test_list = sentence_list[:5]
feature_points_arr = vectorlizable_sentence.vectorize(test_list)
reconstruction_error_arr = vectorlizable_sentence.controller.get_reconstruction_error().mean()
print("Feature points (Top 5 sentences):")
print(feature_points_arr)
print("Reconstruction error(MSE):")
print(reconstruction_error_arr) | def Main(url) | Entry Point.
Args:
url: target url. | 5.043251 | 4.753647 | 1.060922 |
''' getter '''
if isinstance(self.__var_arr, np.ndarray):
return self.__var_arr
else:
raise TypeError() | def get_var_arr(self) | getter | 5.231487 | 4.770946 | 1.09653 |
''' setter '''
if isinstance(value, np.ndarray):
self.__var_arr = value
else:
raise TypeError() | def set_var_arr(self, value) | setter | 5.06493 | 5.136888 | 0.985992 |
''' getter '''
if isinstance(self.__predicted_log_arr, np.ndarray):
return self.__predicted_log_arr
else:
raise TypeError() | def get_predicted_log_arr(self) | getter | 4.519621 | 4.028389 | 1.121943 |
''' setter '''
if isinstance(value, np.ndarray):
self.__predicted_log_arr = value
else:
raise TypeError() | def set_predicted_log_arr(self, value) | setter | 4.343719 | 4.297626 | 1.010725 |
''' getter '''
if isinstance(self.__var_log_arr, np.ndarray):
return self.__var_log_arr
else:
raise TypeError() | def get_var_log_arr(self) | getter | 4.355944 | 3.946895 | 1.103638 |
''' setter '''
if isinstance(value, np.ndarray):
self.__var_log_arr = value
else:
raise TypeError() | def set_var_log_arr(self, value) | setter | 4.537316 | 4.577607 | 0.991198 |
''' getter '''
if isinstance(self.__computed_cost_arr, np.ndarray):
return self.__computed_cost_arr
else:
raise TypeError() | def get_computed_cost_arr(self) | getter | 4.524843 | 4.158702 | 1.088042 |
''' setter '''
if isinstance(value, np.ndarray):
self.__computed_cost_arr = value
else:
raise TypeError() | def set_computed_cost_arr(self, value) | setter | 4.48319 | 4.518554 | 0.992174 |
def tokenize(self, vector_list):
'''
Tokenize vector.
Args:
vector_list: The list of vector of one token.
Returns:
token
'''
if self.computable_distance is None:
self.computable_distance = EuclidDistance()
vector_arr = np.array(vector_list)
distance_arr = np.empty_like(vector_arr)
feature_arr = self.__dbm.get_feature_point(layer_number=0)
key_arr = np.empty(vector_arr.shape[0], dtype=int)
for i in range(vector_arr.shape[0]):
distance_arr = self.computable_distance.compute(
np.expand_dims(vector_arr[i], axis=0).repeat(feature_arr.shape[0], axis=0),
feature_arr
)
key_arr[i] = distance_arr.argmin(axis=0)
return self.token_arr[key_arr] | Tokenize vector.
Args:
vector_list: The list of vector of one token.
Returns:
token | null | null | null |
|
def set_computable_distance(self, value):
''' setter '''
if isinstance(value, ComputableDistance) is False:
raise TypeError()
self.__computable_distance = value | setter | null | null | null |
|
def write_stream(self, stream, left_chunk, right_chunk, volume):
'''
具象メソッド
モノラルビートを生成する
Args:
stream: PyAudioのストリーム
left_chunk: 左音源に対応するチャンク
right_chunk: 右音源に対応するチャンク
volume: 音量
Returns:
void
'''
if len(left_chunk) != len(right_chunk):
raise ValueError()
for i in range(len(left_chunk)):
chunk = (left_chunk[i] + right_chunk[i]) * volume
data = struct.pack("2f", chunk, chunk)
stream.write(data) | 具象メソッド
モノラルビートを生成する
Args:
stream: PyAudioのストリーム
left_chunk: 左音源に対応するチャンク
right_chunk: 右音源に対応するチャンク
volume: 音量
Returns:
void | null | null | null |
|
def read_stream(self, left_chunk, right_chunk, volume, bit16=32767.0):
'''
具象メソッド
wavファイルに保存するモノラルビートを読み込む
Args:
left_chunk: 左音源に対応するチャンク
right_chunk: 右音源に対応するチャンク
volume: 音量
bit16: 整数化の条件
Returns:
フレームのlist
'''
if len(left_chunk) != len(right_chunk):
raise ValueError()
frame_list = []
for i in range(len(left_chunk)):
chunk = int((left_chunk[i] + right_chunk[i]) * bit16 * volume)
data = struct.pack("2h", chunk, chunk)
frame_list.append(data)
return frame_list | 具象メソッド
wavファイルに保存するモノラルビートを読み込む
Args:
left_chunk: 左音源に対応するチャンク
right_chunk: 右音源に対応するチャンク
volume: 音量
bit16: 整数化の条件
Returns:
フレームのlist | null | null | null |
|
'''
Tokenize str.
Args:
sentence_str: tokenized string.
Returns:
[token, token, token, ...]
'''
mt = MeCab.Tagger("-Owakati")
wordlist = mt.parse(sentence_str)
token_list = wordlist.rstrip(" \n").split(" ")
return token_list | def tokenize(self, sentence_str) | Tokenize str.
Args:
sentence_str: tokenized string.
Returns:
[token, token, token, ...] | 4.810791 | 2.825212 | 1.702807 |
def train(
self,
true_sampler,
generative_model,
discriminative_model,
iter_n=100,
k_step=10
):
'''
Train.
Args:
true_sampler: Sampler which draws samples from the `true` distribution.
generative_model: Generator which draws samples from the `fake` distribution.
discriminative_model: Discriminator which discriminates `true` from `fake`.
iter_n: The number of training iterations.
k_step: The number of learning of the discriminative_model.
Returns:
Tuple data.
- trained Generator which is-a `GenerativeModel`.
- trained Discriminator which is-a `DiscriminativeModel`.
'''
if isinstance(true_sampler, TrueSampler) is False:
raise TypeError("The type of `true_sampler` must be `TrueSampler`.")
if isinstance(generative_model, AutoEncoderModel) is False:
raise TypeError("The type of `generative_model` must be `AutoEncoderModel`.")
if isinstance(discriminative_model, DiscriminativeModel) is False:
raise TypeError("The type of `discriminative_model` must be `DiscriminativeModel`.")
a_logs_list = []
d_logs_list = []
g_logs_list = []
try:
for n in range(iter_n):
self.__logger.debug("-" * 100)
self.__logger.debug("Iterations: (" + str(n+1) + "/" + str(iter_n) + ")")
self.__logger.debug("-" * 100)
self.__logger.debug(
"The `auto_encoder`'s turn."
)
self.__logger.debug("-" * 100)
generative_model, a_logs_list = self.train_auto_encoder(
generative_model,
a_logs_list
)
self.__logger.debug("-" * 100)
self.__logger.debug(
"The `discriminator`'s turn."
)
self.__logger.debug("-" * 100)
discriminative_model, d_logs_list = self.train_discriminator(
k_step,
true_sampler,
generative_model,
discriminative_model,
d_logs_list
)
self.__logger.debug("-" * 100)
self.__logger.debug(
"The `generator`'s turn."
)
self.__logger.debug("-" * 100)
generative_model, g_logs_list = self.train_generator(
generative_model,
discriminative_model,
g_logs_list
)
except KeyboardInterrupt:
print("Keyboard Interrupt.")
self.__logs_tuple = (a_logs_list, d_logs_list, g_logs_list)
return generative_model, discriminative_model | Train.
Args:
true_sampler: Sampler which draws samples from the `true` distribution.
generative_model: Generator which draws samples from the `fake` distribution.
discriminative_model: Discriminator which discriminates `true` from `fake`.
iter_n: The number of training iterations.
k_step: The number of learning of the discriminative_model.
Returns:
Tuple data.
- trained Generator which is-a `GenerativeModel`.
- trained Discriminator which is-a `DiscriminativeModel`. | null | null | null |
|
def train_auto_encoder(self, generative_model, a_logs_list):
'''
Train the generative model as the Auto-Encoder.
Args:
generative_model: Generator which draws samples from the `fake` distribution.
a_logs_list: `list` of the reconstruction errors.
Returns:
The tuple data. The shape is...
- Generator which draws samples from the `fake` distribution.
- `list` of the reconstruction errors.
'''
error_arr = generative_model.update()
if error_arr.ndim > 1:
error_arr = error_arr.mean()
a_logs_list.append(error_arr)
self.__logger.debug("The reconstruction error (mean): " + str(error_arr))
return generative_model, a_logs_list | Train the generative model as the Auto-Encoder.
Args:
generative_model: Generator which draws samples from the `fake` distribution.
a_logs_list: `list` of the reconstruction errors.
Returns:
The tuple data. The shape is...
- Generator which draws samples from the `fake` distribution.
- `list` of the reconstruction errors. | null | null | null |
|
def compute_discriminator_reward(
self,
true_posterior_arr,
generated_posterior_arr
):
'''
Compute discriminator's reward.
Args:
true_posterior_arr: `np.ndarray` of `true` posterior inferenced by the discriminator.
generated_posterior_arr: `np.ndarray` of `fake` posterior inferenced by the discriminator.
Returns:
`np.ndarray` of Gradients.
'''
grad_arr = np.log(true_posterior_arr + 1e-08) + np.log(1 - generated_posterior_arr + 1e-08)
return grad_arr | Compute discriminator's reward.
Args:
true_posterior_arr: `np.ndarray` of `true` posterior inferenced by the discriminator.
generated_posterior_arr: `np.ndarray` of `fake` posterior inferenced by the discriminator.
Returns:
`np.ndarray` of Gradients. | null | null | null |
|
'''
Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
Tuple(`np.ndarray` of action., Q-Value)
'''
key_arr = self.select_action_key(next_action_arr, next_q_arr)
return next_action_arr[key_arr], next_q_arr[key_arr] | def select_action(self, next_action_arr, next_q_arr) | Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
Tuple(`np.ndarray` of action., Q-Value) | 3.592827 | 1.655932 | 2.169671 |
'''
Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
`np.ndarray` of keys.
'''
epsilon_greedy_flag = bool(np.random.binomial(n=1, p=self.epsilon_greedy_rate))
if epsilon_greedy_flag is False:
key = np.random.randint(low=0, high=next_action_arr.shape[0])
else:
key = next_q_arr.argmax()
return key | def select_action_key(self, next_action_arr, next_q_arr) | Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
`np.ndarray` of keys. | 2.829847 | 1.870827 | 1.512619 |
if fields is None:
fields = []
page = int(page)
pages = float('inf')
data = {
"query": query,
"page": page,
"fields": fields,
"flatten": flatten
}
count = 0
while page <= pages:
payload = self._post(self.search_path, data=data)
pages = payload['metadata']['pages']
page += 1
data["page"] = page
for result in payload["results"]:
yield result
count += 1
if max_records and count >= max_records:
return | def search(self, query, fields=None, page=1, max_records=None, flatten=True) | returns iterator over all records that match the given query | 2.613912 | 2.659616 | 0.982815 |
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color) | def adjustColors(self, mode='dark') | Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples. | 6.590861 | 6.832213 | 0.964674 |
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True | def loadBestScore(self) | load local best score from the default file | 3.142979 | 2.946549 | 1.066664 |
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True | def saveBestScore(self) | save current best score in the default file | 2.278303 | 2.234459 | 1.019622 |
self.score += pts
if self.score > self.best_score:
self.best_score = self.score | def incScore(self, pts) | update the current score by adding it the specified number of points | 2.454606 | 2.372835 | 1.034461 |
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True | def store(self) | save the current game session's score and data for further use | 3.251953 | 2.891113 | 1.12481 |
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True | def restore(self) | restore the saved game score and data | 3.03745 | 2.680273 | 1.133261 |
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score | def loop(self) | main game loop. returns the final score. | 5.450699 | 5.054237 | 1.078441 |
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL | def getCellStr(self, x, y): # TODO: refactor regarding issue #11
c = self.board.getCell(x, y)
if c == 0 | return a string representation of the cell located at x,y. | 4.457158 | 4.650572 | 0.958411 |
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s | def boardToString(self, margins=None) | return a string representation of the current board. | 4.351248 | 4.201705 | 1.035591 |
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False | def canMove(self) | test if a move is possible | 3.056662 | 2.892266 | 1.05684 |
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v) | def addTile(self, value=None, choices=None) | add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``. | 3.884902 | 3.195222 | 1.215847 |
self.cells[y][x] = v | def setCell(self, x, y, v) | set the cell value at x,y | 5.320478 | 4.958828 | 1.072931 |
return [self.getCell(x, i) for i in self.__size_range] | def getCol(self, x) | return the x-th column, starting at 0 | 13.353764 | 12.779783 | 1.044913 |
for i in xrange(0, self.__size):
self.setCell(x, i, l[i]) | def setCol(self, x, l) | set the x-th column, starting at 0 | 4.672573 | 4.908163 | 0.952 |
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0] | def getEmptyCells(self) | return a (x, y) pair for each empty cell | 5.057689 | 3.774974 | 1.339795 |
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts) | def __collapseLineOrCol(self, line, d) | Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line | 3.450751 | 3.096504 | 1.114402 |
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl | def __moveLineOrCol(self, line, d) | Move a line or column to a given direction (d) | 3.800576 | 3.574684 | 1.063192 |
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score | def move(self, d, add_tile=True) | move and return the move score | 4.757124 | 4.604259 | 1.033201 |
parser = argparse.ArgumentParser(description='2048 in your terminal')
parser.add_argument('--mode', dest='mode', type=str,
default=None, help='colors mode (dark or light)')
parser.add_argument('--az', dest='azmode', action='store_true',
help='Use the letters a-z instead of numbers')
parser.add_argument('--resume', dest='resume', action='store_true',
help='restart the game from where you left')
parser.add_argument('-v', '--version', action='store_true')
parser.add_argument('-r', '--rules', action='store_true')
return vars(parser.parse_args()) | def parse_cli_args() | parse args from the CLI and return a dict | 3.196184 | 3.125101 | 1.022746 |
args = parse_cli_args()
if args['version']:
print_version_and_exit()
if args['rules']:
print_rules_and_exit()
game = Game(**args)
if args['resume']:
game.restore()
if debug:
return game
return game.loop() | def start_game(debug=False) | Start a new game. If ``debug`` is set to ``True``, the game object is
returned and the game loop isn't fired. | 4.530225 | 4.182133 | 1.083233 |
message = ObjectDict(escape.json_decode(message))
if message.command == 'hello':
handshake = {
'command': 'hello',
'protocols': [
'http://livereload.com/protocols/official-7',
],
'serverName': 'livereload-tornado',
}
self.send_message(handshake)
if message.command == 'info' and 'url' in message:
logger.info('Browser Connected: %s' % message.url)
LiveReloadHandler.waiters.add(self) | def on_message(self, message) | Handshake with livereload.js
1. client send 'hello'
2. server reply 'hello'
3. client send 'info' | 4.729206 | 4.252817 | 1.112017 |
stat_result = os.stat(abspath)
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified | def get_content_modified_time(cls, abspath) | Returns the time that ``abspath`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None. | 3.225852 | 3.226468 | 0.999809 |
data = cls.get_content(abspath)
hasher = hashlib.md5()
mtime_data = format(cls.get_content_modified_time(abspath), "%Y-%m-%d %H:%M:%S")
hasher.update(mtime_data.encode())
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest() | def get_content_version(cls, abspath) | Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1 | 2.841367 | 2.926512 | 0.970906 |
_, ext = os.path.splitext(filename)
return ext in ['.pyc', '.pyo', '.o', '.swp'] | def ignore(self, filename) | Ignore a given filename or not. | 3.858968 | 3.861065 | 0.999457 |
self._tasks[path] = {
'func': func,
'delay': delay,
'ignore': ignore,
} | def watch(self, path, func=None, delay=0, ignore=None) | Add a task to watcher.
:param path: a filepath or directory path or glob pattern
:param func: the function to be executed when file changed
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
:param ignore: A function return True to ignore a certain pattern of
filepath. | 3.060741 | 3.388331 | 0.903318 |
if self._changes:
return self._changes.pop()
# clean filepath
self.filepath = None
delays = set()
for path in self._tasks:
item = self._tasks[path]
if self.is_changed(path, item['ignore']):
func = item['func']
delay = item['delay']
if delay and isinstance(delay, float):
delays.add(delay)
if func:
name = getattr(func, 'name', None)
if not name:
name = getattr(func, '__name__', 'anonymous')
logger.info(
"Running task: {} (delay: {})".format(name, delay))
func()
if delays:
delay = max(delays)
else:
delay = None
return self.filepath, delay | def examine(self) | Check if there are changes, if true, run the given task. | 4.126311 | 3.724573 | 1.107861 |
if not output:
output = os.devnull
else:
folder = os.path.dirname(output)
if folder and not os.path.isdir(folder):
os.makedirs(folder)
if not isinstance(cmd, (list, tuple)) and not shell:
cmd = shlex.split(cmd)
def run_shell():
try:
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd,
shell=shell)
except OSError as e:
logger.error(e)
if e.errno == os.errno.ENOENT: # file (command) not found
logger.error("maybe you haven't installed %s", cmd[0])
return e
stdout, stderr = p.communicate()
if stderr:
logger.error(stderr)
return stderr
#: stdout is bytes, decode for python3
if PY3:
stdout = stdout.decode()
with open(output, mode) as f:
f.write(stdout)
return run_shell | def shell(cmd, output=None, mode='w', cwd=None, shell=False) | Execute a shell command.
You can add a shell command::
server.watch(
'style.less', shell('lessc style.less', output='style.css')
)
:param cmd: a shell command, string or list
:param output: output stdout to the given file
:param mode: only works with output, mode ``w`` means write,
mode ``a`` means append
:param cwd: set working directory before command is executed.
:param shell: if true, on Unix the executable argument specifies a
replacement shell for the default ``/bin/sh``. | 2.692058 | 2.966066 | 0.907619 |
if isinstance(func, string_types):
cmd = func
func = shell(func)
func.name = "shell: {}".format(cmd)
self.watcher.watch(filepath, func, delay, ignore=ignore) | def watch(self, filepath, func=None, delay=None, ignore=None) | Add the given filepath for watcher list.
Once you have intialized a server, watch file changes before
serve the server::
server.watch('static/*.stylus', 'make static')
def alert():
print('foo')
server.watch('foo.txt', alert)
server.serve()
:param filepath: files to be watched, it can be a filepath,
a directory, or a glob pattern
:param func: the function to be called, it can be a string of
shell command, or any callable object without
parameters
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
:param ignore: A function return True to ignore a certain pattern of
filepath. | 5.80302 | 6.212649 | 0.934065 |
host = host or '127.0.0.1'
if root is not None:
self.root = root
self._setup_logging()
logger.info('Serving on http://%s:%s' % (host, port))
self.application(
port, host, liveport=liveport, debug=debug, live_css=live_css)
# Async open web browser after 5 sec timeout
if open_url or open_url_delay:
if open_url:
logger.warn('Use `open_url_delay` instead of `open_url`')
sleep = open_url_delay or 5
def opener():
time.sleep(sleep)
webbrowser.open('http://%s:%s' % (host, port))
threading.Thread(target=opener).start()
try:
self.watcher._changes.append(('__livereload__', restart_delay))
LiveReloadHandler.start_tasks()
add_reload_hook(lambda: IOLoop.instance().close(all_fds=True))
IOLoop.instance().start()
except KeyboardInterrupt:
logger.info('Shutting down...') | def serve(self, port=5500, liveport=None, host=None, root=None, debug=None,
open_url=False, restart_delay=2, open_url_delay=None,
live_css=True) | Start serve the server with the given port.
:param port: serve on this port, default is 5500
:param liveport: live reload on this port
:param host: serve on this hostname, default is 127.0.0.1
:param root: serve static on this root directory
:param debug: set debug mode, which autoreloads the app on code changes
via Tornado (and causes polling). Defaults to True when
``self.app`` is set, otherwise False.
:param open_url_delay: open webbrowser after the delay seconds
:param live_css: whether to use live css or force reload on css.
Defaults to True | 3.48 | 3.651794 | 0.952956 |
m = re.search(self.COORD_MATCH, address)
return (m != None) | def already_coords(self, address) | test used to see if we have coordinates or address | 8.761026 | 7.372385 | 1.188357 |
lat, lon = coords.split(',')
return {"lat": lat.strip(), "lon": lon.strip(), "bounds": {}} | def coords_string_parser(self, coords) | Pareses the address string into coordinates to match address_to_coords return object | 6.728547 | 5.677551 | 1.185114 |
base_coords = self.BASE_COORDS[self.region]
get_cord = self.COORD_SERVERS[self.region]
url_options = {
"q": address,
"lang": "eng",
"origin": "livemap",
"lat": base_coords["lat"],
"lon": base_coords["lon"]
}
response = requests.get(self.WAZE_URL + get_cord, params=url_options, headers=self.HEADERS)
for response_json in response.json():
if response_json.get('city'):
lat = response_json['location']['lat']
lon = response_json['location']['lon']
bounds = response_json['bounds'] # sometimes the coords don't match up
if bounds is not None:
bounds['top'], bounds['bottom'] = max(bounds['top'], bounds['bottom']), min(bounds['top'], bounds['bottom'])
bounds['left'], bounds['right'] = min(bounds['left'], bounds['right']), max(bounds['left'], bounds['right'])
else:
bounds = {}
return {"lat": lat, "lon": lon, "bounds": bounds}
raise WRCError("Cannot get coords for %s" % address) | def address_to_coords(self, address) | Convert address to coordinates | 3.520194 | 3.451403 | 1.019931 |
routing_server = self.ROUTING_SERVERS[self.region]
url_options = {
"from": "x:%s y:%s" % (self.start_coords["lon"], self.start_coords["lat"]),
"to": "x:%s y:%s" % (self.end_coords["lon"], self.end_coords["lat"]),
"at": time_delta,
"returnJSON": "true",
"returnGeometries": "true",
"returnInstructions": "true",
"timeout": 60000,
"nPaths": npaths,
"options": "AVOID_TRAILS:t",
}
if self.vehicle_type:
url_options["vehicleType"] = self.vehicle_type
response = requests.get(self.WAZE_URL + routing_server, params=url_options, headers=self.HEADERS)
response.encoding = 'utf-8'
response_json = self._check_response(response)
if response_json:
if 'error' in response_json:
raise WRCError(response_json.get("error"))
else:
if response_json.get("alternatives"):
return [alt['response'] for alt in response_json['alternatives']]
if npaths > 1:
return [response_json['response']]
return response_json['response']
else:
raise WRCError("empty response") | def get_route(self, npaths=1, time_delta=0) | Get route data from waze | 3.22644 | 3.228841 | 0.999256 |
start_bounds = self.start_coords['bounds']
end_bounds = self.end_coords['bounds']
def between(target, min, max):
return target > min and target < max
time = 0
distance = 0
for segment in results:
if stop_at_bounds and segment.get('path'):
x = segment['path']['x']
y = segment['path']['y']
if (
between(x, start_bounds.get('left', 0), start_bounds.get('right', 0)) or
between(x, end_bounds.get('left', 0), end_bounds.get('right', 0))
) and (
between(y, start_bounds.get('bottom', 0), start_bounds.get('top', 0)) or
between(y, end_bounds.get('bottom', 0), end_bounds.get('top', 0))
):
continue
time += segment['crossTime' if real_time else 'crossTimeWithoutRealTime']
distance += segment['length']
route_time = time / 60.0
route_distance = distance / 1000.0
return route_time, route_distance | def _add_up_route(self, results, real_time=True, stop_at_bounds=False) | Calculate route time and distance. | 2.257795 | 2.139793 | 1.055146 |
route = self.get_route(1, time_delta)
results = route['results']
route_time, route_distance = self._add_up_route(results, real_time=real_time, stop_at_bounds=stop_at_bounds)
self.log.info('Time %.2f minutes, distance %.2f km.', route_time, route_distance)
return route_time, route_distance | def calc_route_info(self, real_time=True, stop_at_bounds=False, time_delta=0) | Calculate best route info. | 3.471801 | 3.434102 | 1.010978 |
routes = self.get_route(npaths, time_delta)
results = {route['routeName']: self._add_up_route(route['results'], real_time=real_time, stop_at_bounds=stop_at_bounds) for route in routes}
route_time = [route[0] for route in results.values()]
route_distance = [route[1] for route in results.values()]
self.log.info('Time %.2f - %.2f minutes, distance %.2f - %.2f km.', min(route_time), max(route_time), min(route_distance), max(route_distance))
return results | def calc_all_routes_info(self, npaths=3, real_time=True, stop_at_bounds=False, time_delta=0) | Calculate all route infos. | 3.019667 | 3.006786 | 1.004284 |
self._key_prefix = self._config.get('redis', 'key_prefix')
self._job_expire_interval = int(
self._config.get('sharq', 'job_expire_interval')
)
self._default_job_requeue_limit = int(
self._config.get('sharq', 'default_job_requeue_limit')
)
# initalize redis
redis_connection_type = self._config.get('redis', 'conn_type')
db = self._config.get('redis', 'db')
if redis_connection_type == 'unix_sock':
self._r = redis.StrictRedis(
db=db,
unix_socket_path=self._config.get('redis', 'unix_socket_path')
)
elif redis_connection_type == 'tcp_sock':
self._r = redis.StrictRedis(
db=db,
host=self._config.get('redis', 'host'),
port=self._config.get('redis', 'port')
)
self._load_lua_scripts() | def _initialize(self) | Read the SharQ configuration and set appropriate
variables. Open a redis connection pool and load all
the Lua scripts. | 2.326581 | 2.060877 | 1.128927 |
self._config = ConfigParser.SafeConfigParser()
self._config.read(self.config_path) | def _load_config(self) | Read the configuration file and load it into memory. | 3.978996 | 2.746223 | 1.448898 |
# load lua scripts
lua_script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'scripts/lua'
)
with open(os.path.join(
lua_script_path,
'enqueue.lua'), 'r') as enqueue_file:
self._lua_enqueue_script = enqueue_file.read()
self._lua_enqueue = self._r.register_script(
self._lua_enqueue_script)
with open(os.path.join(
lua_script_path,
'dequeue.lua'), 'r') as dequeue_file:
self._lua_dequeue_script = dequeue_file.read()
self._lua_dequeue = self._r.register_script(
self._lua_dequeue_script)
with open(os.path.join(
lua_script_path,
'finish.lua'), 'r') as finish_file:
self._lua_finish_script = finish_file.read()
self._lua_finish = self._r.register_script(self._lua_finish_script)
with open(os.path.join(
lua_script_path,
'interval.lua'), 'r') as interval_file:
self._lua_interval_script = interval_file.read()
self._lua_interval = self._r.register_script(
self._lua_interval_script)
with open(os.path.join(
lua_script_path,
'requeue.lua'), 'r') as requeue_file:
self._lua_requeue_script = requeue_file.read()
self._lua_requeue = self._r.register_script(
self._lua_requeue_script)
with open(os.path.join(
lua_script_path,
'metrics.lua'), 'r') as metrics_file:
self._lua_metrics_script = metrics_file.read()
self._lua_metrics = self._r.register_script(
self._lua_metrics_script) | def _load_lua_scripts(self) | Loads all lua scripts required by SharQ. | 1.365237 | 1.357738 | 1.005523 |
# validate all the input
if not is_valid_interval(interval):
raise BadArgumentException('`interval` has an invalid value.')
if not is_valid_identifier(job_id):
raise BadArgumentException('`job_id` has an invalid value.')
if not is_valid_identifier(queue_id):
raise BadArgumentException('`queue_id` has an invalid value.')
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
if requeue_limit is None:
requeue_limit = self._default_job_requeue_limit
if not is_valid_requeue_limit(requeue_limit):
raise BadArgumentException('`requeue_limit` has an invalid value.')
try:
serialized_payload = serialize_payload(payload)
except TypeError as e:
raise BadArgumentException(e.message)
timestamp = str(generate_epoch())
keys = [
self._key_prefix,
queue_type
]
args = [
timestamp,
queue_id,
job_id,
'"%s"' % serialized_payload,
interval,
requeue_limit
]
self._lua_enqueue(keys=keys, args=args)
response = {
'status': 'queued'
}
return response | def enqueue(self, payload, interval, job_id,
queue_id, queue_type='default', requeue_limit=None) | Enqueues the job into the specified queue_id
of a particular queue_type | 2.284425 | 2.311676 | 0.988212 |
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
timestamp = str(generate_epoch())
keys = [
self._key_prefix,
queue_type
]
args = [
timestamp,
self._job_expire_interval
]
dequeue_response = self._lua_dequeue(keys=keys, args=args)
if len(dequeue_response) < 4:
response = {
'status': 'failure'
}
return response
queue_id, job_id, payload, requeues_remaining = dequeue_response
payload = deserialize_payload(payload[1:-1])
response = {
'status': 'success',
'queue_id': queue_id,
'job_id': job_id,
'payload': payload,
'requeues_remaining': int(requeues_remaining)
}
return response | def dequeue(self, queue_type='default') | Dequeues a job from any of the ready queues
based on the queue_type. If no job is ready,
returns a failure status. | 3.411391 | 3.341038 | 1.021057 |
if not is_valid_identifier(job_id):
raise BadArgumentException('`job_id` has an invalid value.')
if not is_valid_identifier(queue_id):
raise BadArgumentException('`queue_id` has an invalid value.')
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
keys = [
self._key_prefix,
queue_type
]
args = [
queue_id,
job_id
]
response = {
'status': 'success'
}
finish_response = self._lua_finish(keys=keys, args=args)
if finish_response == 0:
# the finish failed.
response.update({
'status': 'failure'
})
return response | def finish(self, job_id, queue_id, queue_type='default') | Marks any dequeued job as *completed successfully*.
Any job which gets a finish will be treated as complete
and will be removed from the SharQ. | 2.964988 | 2.905088 | 1.020619 |
# validate all the input
if not is_valid_interval(interval):
raise BadArgumentException('`interval` has an invalid value.')
if not is_valid_identifier(queue_id):
raise BadArgumentException('`queue_id` has an invalid value.')
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
# generate the interval key
interval_hmap_key = '%s:interval' % self._key_prefix
interval_queue_key = '%s:%s' % (queue_type, queue_id)
keys = [
interval_hmap_key,
interval_queue_key
]
args = [
interval
]
interval_response = self._lua_interval(keys=keys, args=args)
if interval_response == 0:
# the queue with the id and type does not exist.
response = {
'status': 'failure'
}
else:
response = {
'status': 'success'
}
return response | def interval(self, interval, queue_id, queue_type='default') | Updates the interval for a specific queue_id
of a particular queue type. | 3.18264 | 3.229463 | 0.985501 |
timestamp = str(generate_epoch())
# get all queue_types and requeue one by one.
# not recommended to do this entire process
# in lua as it might take long and block other
# enqueues and dequeues.
active_queue_type_list = self._r.smembers(
'%s:active:queue_type' % self._key_prefix)
for queue_type in active_queue_type_list:
# requeue all expired jobs in all queue types.
keys = [
self._key_prefix,
queue_type
]
args = [
timestamp
]
job_discard_list = self._lua_requeue(keys=keys, args=args)
# discard the jobs if any
for job in job_discard_list:
queue_id, job_id = job.split(':')
# explicitly finishing a job
# is nothing but discard.
self.finish(
job_id=job_id,
queue_id=queue_id,
queue_type=queue_type
) | def requeue(self) | Re-queues any expired job (one which does not get an expire
before the job_expiry_interval) back into their respective queue.
This function has to be run at specified intervals to ensure the
expired jobs are re-queued back. | 6.258825 | 6.170113 | 1.014378 |
if queue_id is None or not is_valid_identifier(queue_id):
raise BadArgumentException('`queue_id` has an invalid value.')
if queue_type is None or not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
response = {
'status': 'Failure',
'message': 'No queued calls found'
}
# remove from the primary sorted set
primary_set = '{}:{}'.format(self._key_prefix, queue_type)
queued_status = self._r.zrem(primary_set, queue_id)
if queued_status:
response.update({'status': 'Success',
'message': 'Successfully removed all queued calls'})
# do a full cleanup of reources
# although this is not necessary as we don't remove resources
# while dequeue operation
job_queue_list = '{}:{}:{}'.format(self._key_prefix, queue_type, queue_id)
if queued_status and purge_all:
job_list = self._r.lrange(job_queue_list, 0, -1)
pipe = self._r.pipeline()
# clear the payload data for job_uuid
for job_uuid in job_list:
if job_uuid is None:
continue
payload_set = '{}:payload'.format(self._key_prefix)
job_payload_key = '{}:{}:{}'.format(queue_type, queue_id, job_uuid)
pipe.hdel(payload_set, job_payload_key)
# clear jobrequest interval
interval_set = '{}:interval'.format(self._key_prefix)
job_interval_key = '{}:{}'.format(queue_type, queue_id)
pipe.hdel(interval_set, job_interval_key)
# clear job_queue_list
pipe.delete(job_queue_list)
pipe.execute()
response.update({'status': 'Success',
'message': 'Successfully removed all queued calls and purged related resources'})
else:
# always delete the job queue list
self._r.delete(job_queue_list)
return response | def clear_queue(self, queue_type=None, queue_id=None, purge_all=False) | clear the all entries in queue with particular queue_id
and queue_type. It takes an optional argument,
purge_all : if True, then it will remove the related resources
from the redis. | 3.183539 | 3.067306 | 1.037894 |
if not isinstance(identifier, basestring):
return False
if len(identifier) > 100 or len(identifier) < 1:
return False
condensed_form = set(list(identifier.lower()))
return condensed_form.issubset(VALID_IDENTIFIER_SET) | def is_valid_identifier(identifier) | Checks if the given identifier is valid or not. A valid
identifier may consists of the following characters with a
maximum length of 100 characters, minimum of 1 character.
Valid characters for an identifier,
- A to Z
- a to z
- 0 to 9
- _ (underscore)
- - (hypen) | 3.50995 | 3.750773 | 0.935794 |
if not isinstance(interval, (int, long)):
return False
if interval <= 0:
return False
return True | def is_valid_interval(interval) | Checks if the given interval is valid. A valid interval
is always a positive, non-zero integer value. | 3.396055 | 2.710551 | 1.252902 |
if not isinstance(requeue_limit, (int, long)):
return False
if requeue_limit <= -2:
return False
return True | def is_valid_requeue_limit(requeue_limit) | Checks if the given requeue limit is valid.
A valid requeue limit is always greater than
or equal to -1. | 3.354657 | 3.180445 | 1.054776 |
parts = re.split('[-_.]', name)
if len(parts) == 1:
return parts
result = set()
for i in range(len(parts) - 1, 0, -1):
for s1 in '-_.':
prefix = s1.join(parts[:i])
for s2 in '-_.':
suffix = s2.join(parts[i:])
for s3 in '-_.':
result.add(s3.join([prefix, suffix]))
return list(result) | def get_search_names(name) | Return a list of values to search on when we are looking for a package
with the given name.
This is required to search on both pyramid_debugtoolbar and
pyramid-debugtoolbar. | 2.536337 | 2.585189 | 0.981103 |
# We first need to retrieve the body before accessing POST or FILES since
# it can only be read once.
body = request.body
if request.POST or request.FILES:
return
new_body = BytesIO()
# Split the response in the various parts based on the boundary string
content_type, opts = parse_header(request.META['CONTENT_TYPE'].encode('ascii'))
parts = body.split(b'\n--' + opts['boundary'] + b'\n')
for part in parts:
if b'\n\n' not in part:
continue
headers, content = part.split(b'\n\n', 1)
if not headers:
continue
new_body.write(b'--' + opts['boundary'] + b'\r\n')
new_body.write(headers.replace(b'\n', b'\r\n'))
new_body.write(b'\r\n\r\n')
new_body.write(content)
new_body.write(b'\r\n')
new_body.write(b'--' + opts['boundary'] + b'--\r\n')
request._body = new_body.getvalue()
request.META['CONTENT_LENGTH'] = len(request._body)
# Clear out _files and _post so that the request object re-parses the body
if hasattr(request, '_files'):
delattr(request, '_files')
if hasattr(request, '_post'):
delattr(request, '_post') | def alter_old_distutils_request(request: WSGIRequest) | Alter the request body for compatibility with older distutils clients
Due to a bug in the Python distutils library, the request post is sent
using \n as a separator instead of the \r\n that the HTTP spec demands.
This breaks the Django form parser and therefore we have to write a
custom parser.
This bug was fixed in the Python 2.7.4 and 3.4:
http://bugs.python.org/issue10510 | 2.677828 | 2.598912 | 1.030365 |
instance = kwargs['instance']
if not hasattr(instance.distribution, 'path'):
return
if not os.path.exists(instance.distribution.path):
return
# Check if there are other instances which reference this fle
is_referenced = (
instance.__class__.objects
.filter(distribution=instance.distribution)
.exclude(pk=instance._get_pk_val())
.exists())
if is_referenced:
return
try:
instance.distribution.storage.delete(instance.distribution.path)
except Exception:
logger.exception(
'Error when trying to delete file %s of package %s:' % (
instance.pk, instance.distribution.path)) | def delete_files(sender, **kwargs) | Signal callback for deleting old files when database item is deleted | 3.437159 | 3.373999 | 1.018719 |
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest() | def md5_hash_file(fh) | Return the md5 hash of the given file-object | 1.533907 | 1.543516 | 0.993774 |
module_path = '.'.join(full_class_path.split('.')[0:-1])
class_name = full_class_path.split('.')[-1]
try:
module = importlib.import_module(module_path)
except ImportError:
raise RuntimeError('Invalid specified Versio schema {}'.format(full_class_path))
try:
return getattr(module, class_name)
except AttributeError:
raise RuntimeError(
'Could not find Versio schema class {!r} inside {!r} module.'.format(
class_name, module_path)) | def get_versio_versioning_scheme(full_class_path) | Return a class based on it's full path | 2.369908 | 2.298516 | 1.03106 |
field_map = {
'name': 'name__icontains',
'summary': 'releases__summary__icontains',
}
query_filter = None
for field, values in spec.items():
for value in values:
if field not in field_map:
continue
field_filter = Q(**{field_map[field]: value})
if not query_filter:
query_filter = field_filter
continue
if operator == 'and':
query_filter &= field_filter
else:
query_filter |= field_filter
result = []
packages = models.Package.objects.filter(query_filter).all()[:20]
for package in packages:
release = package.releases.all()[0]
result.append({
'name': package.name,
'summary': release.summary,
'version': release.version,
'_pypi_ordering': 0,
})
return result | def search(spec, operator='and') | Implement xmlrpc search command.
This only searches through the mirrored and private packages | 2.358917 | 2.441665 | 0.96611 |
@wraps(view_func, assigned=available_attrs(view_func))
def decorator(request, *args, **kwargs):
if settings.LOCALSHOP_USE_PROXIED_IP:
try:
ip_addr = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return HttpResponseForbidden('No permission')
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# The client's IP will be the first one.
ip_addr = ip_addr.split(",")[0].strip()
else:
ip_addr = request.META['REMOTE_ADDR']
if CIDR.objects.has_access(ip_addr, with_credentials=False):
return view_func(request, *args, **kwargs)
if not CIDR.objects.has_access(ip_addr, with_credentials=True):
return HttpResponseForbidden('No permission')
# Just return the original view because already logged in
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
user = authenticate_user(request)
if user is not None:
login(request, user)
return view_func(request, *args, **kwargs)
return HttpResponseUnauthorized(content='Authorization Required')
return decorator | def credentials_required(view_func) | This decorator should be used with views that need simple authentication
against Django's authentication framework. | 2.583453 | 2.579412 | 1.001567 |
@wraps(function)
def wrapper(self, *args, **kwargs):
key = generate_key(function, *args, **kwargs)
try:
function(self, *args, **kwargs)
finally:
logging.info('Removing key %s', key)
cache.delete(key)
return wrapper | def no_duplicates(function, *args, **kwargs) | Makes sure that no duplicated tasks are enqueued. | 2.911433 | 2.78827 | 1.044172 |
release_file = models.ReleaseFile.objects.get(pk=pk)
logging.info("Downloading %s", release_file.url)
proxies = None
if settings.LOCALSHOP_HTTP_PROXY:
proxies = settings.LOCALSHOP_HTTP_PROXY
response = requests.get(release_file.url, stream=True, proxies=proxies)
# Write the file to the django file field
filename = os.path.basename(release_file.url)
# Setting the size manually since Django can't figure it our from
# the raw HTTPResponse
if 'content-length' in response.headers:
size = int(response.headers['content-length'])
else:
size = len(response.content)
# Setting the content type by first looking at the response header
# and falling back to guessing it from the filename
default_content_type = 'application/octet-stream'
content_type = response.headers.get('content-type')
if content_type is None or content_type == default_content_type:
content_type = mimetypes.guess_type(filename)[0] or default_content_type
# Using Django's temporary file upload system to not risk memory
# overflows
with TemporaryUploadedFile(name=filename, size=size, charset='utf-8',
content_type=content_type) as temp_file:
temp_file.write(response.content)
temp_file.seek(0)
# Validate the md5 hash of the downloaded file
md5_hash = md5_hash_file(temp_file)
if md5_hash != release_file.md5_digest:
logging.error("MD5 hash mismatch: %s (expected: %s)" % (
md5_hash, release_file.md5_digest))
return
release_file.distribution.save(filename, temp_file)
release_file.save()
logging.info("Complete") | def download_file(pk) | Download the file reference in `models.ReleaseFile` with the given pk. | 2.712385 | 2.658842 | 1.020138 |
name = post_data.get('name')
version = post_data.get('version')
if settings.LOCALSHOP_VERSIONING_TYPE:
scheme = get_versio_versioning_scheme(settings.LOCALSHOP_VERSIONING_TYPE)
try:
Version(version, scheme=scheme)
except AttributeError:
response = HttpResponseBadRequest(
reason="Invalid version supplied '{!s}' for '{!s}' scheme.".format(
version, settings.LOCALSHOP_VERSIONING_TYPE))
return response
if not name or not version:
logger.info("Missing name or version for package")
return HttpResponseBadRequest('No name or version given')
try:
condition = Q()
for search_name in get_search_names(name):
condition |= Q(name__iexact=search_name)
package = repository.packages.get(condition)
# Error out when we try to override a mirror'ed package for now
# not sure what the best thing is
if not package.is_local:
return HttpResponseBadRequest(
'%s is a pypi package!' % package.name)
try:
release = package.releases.get(version=version)
except ObjectDoesNotExist:
release = None
except ObjectDoesNotExist:
package = None
release = None
# Validate the data
form = forms.ReleaseForm(post_data, instance=release)
if not form.is_valid():
return HttpResponseBadRequest(reason=form.errors.values()[0][0])
if not package:
pkg_form = forms.PackageForm(post_data, repository=repository)
if not pkg_form.is_valid():
return HttpResponseBadRequest(
reason=six.next(six.itervalues(pkg_form.errors))[0])
package = pkg_form.save()
release = form.save(commit=False)
release.package = package
release.save()
# If this is an upload action then process the uploaded file
if files:
files = {
'distribution': files['content']
}
filename = files['distribution']._name
try:
release_file = release.files.get(filename=filename)
if settings.LOCALSHOP_RELEASE_OVERWRITE is False:
message = 'That it already released, please bump version.'
return HttpResponseBadRequest(message)
except ObjectDoesNotExist:
release_file = models.ReleaseFile(
release=release, filename=filename)
form_file = forms.ReleaseFileForm(
post_data, files, instance=release_file)
if not form_file.is_valid():
return HttpResponseBadRequest('ERRORS %s' % form_file.errors)
release_file = form_file.save(commit=False)
release_file.save()
return HttpResponse() | def handle_register_or_upload(post_data, files, user, repository) | Process a `register` or `upload` comment issued via distutils.
This method is called with the authenticated user. | 3.36594 | 3.412494 | 0.986358 |
from .tasks import download_file
if not settings.LOCALSHOP_ISOLATED:
download_file.delay(pk=self.pk)
else:
download_file(pk=self.pk) | def download(self) | Start a celery task to download the release file from pypi.
If `settings.LOCALSHOP_ISOLATED` is True then download the file
in-process. | 5.902364 | 2.952648 | 1.999007 |
# type: (DataLoader) -> None
# Take the current loader queue, replacing it with an empty queue.
queue = loader._queue
loader._queue = []
# If a maxBatchSize was provided and the queue is longer, then segment the
# queue into multiple batches, otherwise treat the queue as a single batch.
max_batch_size = loader.max_batch_size
if max_batch_size and max_batch_size < len(queue):
chunks = get_chunks(queue, max_batch_size)
for chunk in chunks:
dispatch_queue_batch(loader, chunk)
else:
dispatch_queue_batch(loader, queue) | def dispatch_queue(loader) | Given the current state of a Loader instance, perform a batch load
from its current queue. | 3.869473 | 3.617272 | 1.069721 |
# type: (DataLoader, Iterable[Loader], Exception) -> None
for l in queue:
loader.clear(l.key)
l.reject(error) | def failed_dispatch(loader, queue, error) | Do not cache individual loads if the entire batch dispatch fails,
but still reject each request so they do not hang. | 7.665972 | 8.738011 | 0.877313 |
# type: (Hashable) -> Promise
if key is None:
raise TypeError(
(
"The loader.load() function must be called with a value,"
+ "but got: {}."
).format(key)
)
cache_key = self.get_cache_key(key)
# If caching and there is a cache-hit, return cached Promise.
if self.cache:
cached_promise = self._promise_cache.get(cache_key)
if cached_promise:
return cached_promise
# Otherwise, produce a new Promise for this value.
promise = Promise(partial(self.do_resolve_reject, key)) # type: ignore
# If caching, cache this promise.
if self.cache:
self._promise_cache[cache_key] = promise
return promise | def load(self, key=None) | Loads a key, returning a `Promise` for the value represented by that key. | 3.98219 | 3.953544 | 1.007246 |
# type: (Iterable[Hashable]) -> Promise
if not isinstance(keys, Iterable):
raise TypeError(
(
"The loader.loadMany() function must be called with Array<key> "
+ "but got: {}."
).format(keys)
)
return Promise.all([self.load(key) for key in keys]) | def load_many(self, keys) | Loads multiple keys, promising an array of values
>>> a, b = await my_loader.load_many([ 'a', 'b' ])
This is equivalent to the more verbose:
>>> a, b = await Promise.all([
>>> my_loader.load('a'),
>>> my_loader.load('b')
>>> ]) | 5.39181 | 7.234755 | 0.745265 |
# type: (Hashable) -> DataLoader
cache_key = self.get_cache_key(key)
self._promise_cache.pop(cache_key, None)
return self | def clear(self, key) | Clears the value at `key` from the cache, if it exists. Returns itself for
method chaining. | 6.334344 | 4.909282 | 1.290279 |
# type: (Hashable, Any) -> DataLoader
cache_key = self.get_cache_key(key)
# Only add the key if it does not already exist.
if cache_key not in self._promise_cache:
# Cache a rejected promise if the value is an Error, in order to match
# the behavior of load(key).
if isinstance(value, Exception):
promise = Promise.reject(value)
else:
promise = Promise.resolve(value)
self._promise_cache[cache_key] = promise
return self | def prime(self, key, value) | Adds the provied key and value to the cache. If the key already exists, no
change is made. Returns itself for method chaining. | 3.930503 | 3.679384 | 1.06825 |
if version is None:
from promise import VERSION
return VERSION
else:
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
return version | def get_complete_version(version=None) | Returns a tuple of the promise version. If version argument is non-empty,
then checks for correctness of the tuple provided. | 5.382838 | 4.596626 | 1.171041 |
# type: (Promise, Optional[List[Union[Dict[str, Optional[Callable]], Tuple[Callable, Callable], Callable]]]) -> None
if not handlers:
return
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
self.done(s, f)
elif isinstance(handler, dict):
s = handler.get("success") # type: ignore
f = handler.get("failure") # type: ignore
self.done(s, f)
else:
self.done(handler) | def done_all(self, handlers=None) | :type handlers: list[(Any) -> object] | list[((Any) -> object, (Any) -> object)] | 2.736839 | 2.800417 | 0.977297 |
# type: (Promise, List[Callable]) -> List[Promise]
if not handlers:
return []
promises = [] # type: List[Promise]
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
promises.append(self.then(s, f))
elif isinstance(handler, dict):
s = handler.get("success")
f = handler.get("failure")
promises.append(self.then(s, f))
else:
promises.append(self.then(handler))
return promises | def then_all(self, handlers=None) | Utility function which calls 'then' for each handler provided. Handler can either
be a function in which case it is used as success handler, or a tuple containing
the success and the failure handler, where each of them could be None.
:type handlers: list[(Any) -> object] | list[((Any) -> object, (Any) -> object)]
:param handlers
:rtype : list[Promise] | 2.399293 | 2.47701 | 0.968624 |
# type: (Dict[Hashable, Promise[S]]) -> Promise[Dict[Hashable, S]]
dict_type = type(m) # type: Type[Dict]
if not m:
return cls.resolve(dict_type())
def handle_success(resolved_values):
# type: (List[S]) -> Dict[Hashable, S]
return dict_type(zip(m.keys(), resolved_values))
return cls.all(m.values()).then(handle_success) | def for_dict(cls, m) | A special function that takes a dictionary of promises
and turns them into a promise for a dictionary of values.
In other words, this turns an dictionary of promises for values
into a promise for a dictionary of values. | 3.371915 | 3.239206 | 1.04097 |
# type: (Any) -> bool
_type = obj.__class__
if obj is None or _type in BASE_TYPES:
return False
return (
issubclass(_type, Promise)
or iscoroutine(obj) # type: ignore
or is_future_like(_type)
) | def is_thenable(cls, obj) | A utility function to determine if the specified
object is a promise using "duck typing". | 4.977935 | 5.092927 | 0.977421 |
if ccc.shape[0] == 1:
cc = ccc[0]
else:
cc = ccc
# Code borrowed from obspy.signal.cross_correlation.xcorr_pick_correction
cc_curvature = np.concatenate((np.zeros(1), np.diff(cc, 2), np.zeros(1)))
cc_t = np.arange(0, len(cc) * dt, dt)
peak_index = cc.argmax()
first_sample = peak_index
# XXX this could be improved..
while first_sample > 0 and cc_curvature[first_sample - 1] <= 0:
first_sample -= 1
last_sample = peak_index
while last_sample < len(cc) - 1 and cc_curvature[last_sample + 1] <= 0:
last_sample += 1
num_samples = last_sample - first_sample + 1
if num_samples < 3:
msg = "Less than 3 samples selected for fit to cross " + \
"correlation: %s" % num_samples
raise IndexError(msg)
if num_samples < 5:
msg = "Less than 5 samples selected for fit to cross " + \
"correlation: %s" % num_samples
warnings.warn(msg)
coeffs, residual = scipy.polyfit(
cc_t[first_sample:last_sample + 1],
cc[first_sample:last_sample + 1], deg=2, full=True)[:2]
# check results of fit
if coeffs[0] >= 0:
msg = "Fitted parabola opens upwards!"
warnings.warn(msg)
if residual > 0.1:
msg = "Residual in quadratic fit to cross correlation maximum " + \
"larger than 0.1: %s" % residual
warnings.warn(msg)
# X coordinate of vertex of parabola gives time shift to correct
# differential pick time. Y coordinate gives maximum correlation
# coefficient.
shift = -coeffs[1] / 2.0 / coeffs[0]
coeff = (4 * coeffs[0] * coeffs[2] - coeffs[1] ** 2) / (4 * coeffs[0])
return shift, coeff | def _xcorr_interp(ccc, dt) | Intrpolate around the maximum correlation value for sub-sample precision.
:param ccc: Cross-correlation array
:type ccc: numpy.ndarray
:param dt: sample interval
:type dt: float
:return: Position of interpolated maximum in seconds from start of ccc
:rtype: float | 3.004249 | 3.117203 | 0.963764 |
if len(detection_streams) == 0:
return Catalog()
if not cores:
num_cores = cpu_count()
else:
num_cores = cores
if num_cores > len(detection_streams):
num_cores = len(detection_streams)
if parallel:
pool = Pool(processes=num_cores)
debug_print('Made pool of %i workers' % num_cores, 4, debug)
# Parallel generation of events for each detection:
# results will be a list of (i, event class)
results = [pool.apply_async(
_channel_loop, (detection_streams[i], ),
{'template': template, 'min_cc': min_cc,
'detection_id': detections[i].id, 'interpolate': interpolate,
'i': i, 'pre_lag_ccsum': detections[i].detect_val,
'detect_chans': detections[i].no_chans,
'horizontal_chans': horizontal_chans,
'vertical_chans': vertical_chans})
for i in range(len(detection_streams))]
pool.close()
try:
events_list = [p.get() for p in results]
except KeyboardInterrupt as e: # pragma: no cover
pool.terminate()
raise e
pool.join()
events_list.sort(key=lambda tup: tup[0]) # Sort based on index.
else:
events_list = []
for i in range(len(detection_streams)):
events_list.append(_channel_loop(
detection=detection_streams[i], template=template,
min_cc=min_cc, detection_id=detections[i].id,
interpolate=interpolate, i=i,
pre_lag_ccsum=detections[i].detect_val,
detect_chans=detections[i].no_chans,
horizontal_chans=horizontal_chans,
vertical_chans=vertical_chans, debug=debug))
temp_catalog = Catalog()
temp_catalog.events = [event_tup[1] for event_tup in events_list]
return temp_catalog | def _day_loop(detection_streams, template, min_cc, detections,
horizontal_chans, vertical_chans, interpolate, cores, parallel,
debug=0) | Function to loop through multiple detections for one template.
Designed to run for the same day of data for I/O simplicity, but as you
are passing stream objects it could run for all the detections ever, as
long as you have the RAM!
:type detection_streams: list
:param detection_streams:
List of all the detections for this template that you want to compute
the optimum pick for. Individual things in list should be of
:class:`obspy.core.stream.Stream` type.
:type template: obspy.core.stream.Stream
:param template: The original template used to detect the detections passed
:type min_cc: float
:param min_cc: Minimum cross-correlation value to be allowed for a pick.
:type detections: list
:param detections:
List of detections to associate events with an input detection.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type debug: int
:param debug: debug output level 0-5.
:returns:
Catalog object containing Event objects for each detection created by
this template.
:rtype: :class:`obspy.core.event.Catalog` | 2.432038 | 2.386458 | 1.019099 |
detect_streams = []
for detection in detections:
if detection.template_name != template[0]:
continue
# Stream to be saved for new detection
detect_stream = []
max_delay = 0
for tr in detect_data:
template_tr = template[1].select(
station=tr.stats.station, channel=tr.stats.channel)
if len(template_tr) >= 1:
# Save template trace length in seconds
template_len = (
len(template_tr[0]) / template_tr[0].stats.sampling_rate)
else:
continue
# If there is no template-data match then skip the rest
# of the trace loop.
# Grab the delays for the desired template: [(sta, chan, delay)]
# Now grab the delay for the desired trace for this template
delay = delays[tr.stats.station + '.' + tr.stats.channel]
if delay > max_delay:
max_delay = delay
detect_stream.append(tr.slice(
starttime=detection.detect_time - shift_len + delay,
endtime=detection.detect_time + delay + shift_len +
template_len).copy())
for tr in detect_stream:
if len(tr.data) == 0:
msg = ('No data in %s.%s for detection at time %s' %
(tr.stats.station, tr.stats.channel,
detection.detect_time))
warnings.warn(msg)
detect_stream.remove(tr)
elif tr.stats.endtime - tr.stats.starttime < (
2 * shift_len) + template_len:
msg = ("Insufficient data for %s.%s will not use."
% (tr.stats.station, tr.stats.channel))
warnings.warn(msg)
detect_stream.remove(tr)
elif np.ma.is_masked(tr.data):
msg = ("Masked data found for %s.%s, will not use."
% (tr.stats.station, tr.stats.channel))
warnings.warn(msg)
detect_stream.remove(tr)
# Check for duplicate traces
stachans = [(tr.stats.station, tr.stats.channel)
for tr in detect_stream]
c_stachans = Counter(stachans)
for key in c_stachans.keys():
if c_stachans[key] > 1:
msg = ('Multiple channels for %s.%s, likely a data issue'
% (key[0], key[1]))
raise LagCalcError(msg)
if plot:
background = detect_data.slice(
starttime=detection.detect_time - (shift_len + 5),
endtime=detection.detect_time +
shift_len + max_delay + 7).copy()
for tr in background:
if len(tr.data) == 0:
background.remove(tr)
detection_multiplot(
stream=background, template=Stream(detect_stream),
times=[detection.detect_time - shift_len],
title='Detection Extracted')
if not len(detect_stream) == 0:
detect_stream = Stream(detect_stream).split()
# Make sure there are no masks left over.
# Create tuple of (template name, data stream)
detect_streams.append((detection.template_name,
Stream(detect_stream)))
return detect_streams | def _prepare_data(detect_data, detections, template, delays,
shift_len, plot) | Prepare data for lag_calc - reduce memory here.
:type detect_data: obspy.core.stream.Stream
:param detect_data: Stream to extract detection streams from.
:type detections: list
:param detections:
List of :class:`eqcorrscan.core.match_filter.Detection` to get
data for.
:type template: tuple
:param template: tuple of (template_name, template)
:type delays: list
:param delays:
Dictionary of delay times in seconds keyed by sta.channel.
:type shift_len: float
:param shift_len: Shift length in seconds allowed for picking.
:type plot: bool
:param plot:
Whether to plot the data extracted or not, used for debugging.
:returns: List of detect_streams to be worked on
:rtype: list | 3.20382 | 2.978208 | 1.075754 |
parameters = []
f = open(filename, 'r')
print('Reading parameters with the following header:')
for line in f:
if line[0] == '#':
print(line.rstrip('\n').lstrip('\n'))
else:
parameter_dict = ast.literal_eval(line)
# convert the dictionary to the class
trig_par = TriggerParameters(parameter_dict)
parameters.append(trig_par)
f.close()
return parameters | def read_trigger_parameters(filename) | Read the trigger parameters into trigger_parameter classes.
:type filename: str
:param filename: Parameter file
:returns: List of :class:`eqcorrscan.utils.trigger.TriggerParameters`
:rtype: list
.. rubric:: Example
>>> from eqcorrscan.utils.trigger import read_trigger_parameters
>>> parameters = read_trigger_parameters('parameters') # doctest: +SKIP | 3.10687 | 3.831851 | 0.810802 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.