code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
should_gzip = app.config.get('FLASKS3_GZIP') add_mime = app.config.get('FLASKS3_FORCE_MIMETYPE') gzip_include_only = app.config.get('FLASKS3_GZIP_ONLY_EXTS') new_hashes = [] static_folder_rel = _path_to_relative_url(static_folder) for file_path in files: per_file_should_gzip = should_gzip asset_loc = _path_to_relative_url(file_path) full_key_name = _static_folder_path(static_url_loc, static_folder_rel, asset_loc) key_name = full_key_name.lstrip("/") logger.debug("Uploading {} to {} as {}".format(file_path, bucket, key_name)) exclude = False if app.config.get('FLASKS3_ONLY_MODIFIED', False): file_hash = hash_file(file_path) new_hashes.append((full_key_name, file_hash)) if hashes and hashes.get(full_key_name, None) == file_hash: exclude = True if ex_keys and full_key_name in ex_keys or exclude: logger.debug("%s excluded from upload" % key_name) else: h = {} # Set more custom headers if the filepath matches certain # configured regular expressions. filepath_headers = app.config.get('FLASKS3_FILEPATH_HEADERS') if filepath_headers: for filepath_regex, headers in six.iteritems(filepath_headers): if re.search(filepath_regex, file_path): for header, value in six.iteritems(headers): h[header] = value # check for extension, only if there are extensions provided if per_file_should_gzip and gzip_include_only: if os.path.splitext(file_path)[1] not in gzip_include_only: per_file_should_gzip = False if per_file_should_gzip: h["content-encoding"] = "gzip" if (add_mime or per_file_should_gzip) and "content-type" not in h: # When we use GZIP we have to explicitly set the content type # or if the mime flag is True (mimetype, encoding) = mimetypes.guess_type(file_path, False) if mimetype: h["content-type"] = mimetype else: logger.warn("Unable to detect mimetype for %s" % file_path) file_mode = 'rb' if six.PY3 else 'r' with open(file_path, file_mode) as fp: merged_dicts = merge_two_dicts(get_setting('FLASKS3_HEADERS', app), h) metadata, params = split_metadata_params(merged_dicts) if per_file_should_gzip: compressed = six.BytesIO() z = gzip.GzipFile(os.path.basename(file_path), 'wb', 9, compressed) z.write(fp.read()) z.close() data = compressed.getvalue() else: data = fp.read() s3.put_object(Bucket=bucket, Key=key_name, Body=data, ACL="public-read", Metadata=metadata, **params) return new_hashes
def _write_files(s3, app, static_url_loc, static_folder, files, bucket, ex_keys=None, hashes=None)
Writes all the files inside a static folder to S3.
2.866282
2.857275
1.003152
default_value = DEFAULT_SETTINGS.get(name, None) return app.config.get(name, default_value) if app else default_value
def get_setting(name, app=None)
Returns the value for `name` settings (looks into `app` config, and into DEFAULT_SETTINGS). Returns None if not set. :param name: (str) name of a setting (e.g. FLASKS3_URL_STYLE) :param app: Flask app instance :return: setting value or None
2.986238
3.093801
0.965233
for k, v in DEFAULT_SETTINGS.items(): app.config.setdefault(k, v) if app.debug and not get_setting('FLASKS3_DEBUG', app): app.config['FLASKS3_ACTIVE'] = False if get_setting('FLASKS3_ACTIVE', app): app.jinja_env.globals['url_for'] = url_for if get_setting('FLASKS3_USE_CACHE_CONTROL', app) and app.config.get('FLASKS3_CACHE_CONTROL'): cache_control_header = get_setting('FLASKS3_CACHE_CONTROL', app) app.config['FLASKS3_HEADERS']['Cache-Control'] = cache_control_header
def init_app(self, app)
An alternative way to pass your :class:`flask.Flask` application object to Flask-S3. :meth:`init_app` also takes care of some default `settings`_. :param app: the :class:`flask.Flask` application object.
2.543677
2.65891
0.956662
if not subsequence: raise ValueError('Given subsequence is empty!') max_substitutions, max_insertions, max_deletions, max_l_dist = search_params.unpacked max_substitutions = min(max_substitutions, max_l_dist) max_insertions = min(max_insertions, max_l_dist) subseq_len = len(subsequence) seq_len = len(sequence) ngram_len = subseq_len // (max_substitutions + max_insertions + 1) if ngram_len == 0: raise ValueError( "The subsequence's length must be greater than max_subs + max_ins!" ) matches = [] matched_indexes = set() for ngram_start in range(0, len(subsequence) - ngram_len + 1, ngram_len): ngram_end = ngram_start + ngram_len subseq_before = subsequence[:ngram_start] subseq_before_reversed = subseq_before[::-1] subseq_after = subsequence[ngram_end:] start_index = max(0, ngram_start - max_insertions) end_index = min(seq_len, seq_len - (subseq_len - ngram_end) + max_insertions) for index in search_exact( subsequence[ngram_start:ngram_end], sequence, start_index, end_index, ): if index - ngram_start in matched_indexes: continue seq_after = sequence[index + ngram_len:index + subseq_len - ngram_start + max_insertions] if seq_after.startswith(subseq_after): matches_after = [(0, 0)] else: matches_after = _expand(subseq_after, seq_after, max_substitutions, max_insertions, max_l_dist) if not matches_after: continue _max_substitutions = max_substitutions - min(m[0] for m in matches_after) _max_insertions = max_insertions - min(m[1] for m in matches_after) _max_l_dist = max_l_dist - min(m[0] + m[1] for m in matches_after) seq_before = sequence[index - ngram_start - _max_insertions:index] if seq_before.endswith(subseq_before): matches_before = [(0, 0)] else: matches_before = _expand( subseq_before_reversed, seq_before[::-1], _max_substitutions, _max_insertions, _max_l_dist, ) for (subs_before, ins_before) in matches_before: for (subs_after, ins_after) in matches_after: if ( subs_before + subs_after <= max_substitutions and ins_before + ins_after <= max_insertions and subs_before + subs_after + ins_before + ins_after <= max_l_dist ): matches.append(Match( start=index - ngram_start - ins_before, end=index - ngram_start + subseq_len + ins_after, dist=subs_before + subs_after + ins_before + ins_after, )) matched_indexes |= set(range( index - ngram_start - ins_before, index - ngram_start - ins_before + max_insertions + 1, )) return sorted(matches, key=lambda match: match.start)
def find_near_matches_no_deletions_ngrams(subsequence, sequence, search_params)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * no deletions are allowed * the total number of substitutions, insertions and deletions
2.031502
2.053609
0.989235
search_params = LevenshteinSearchParams(max_substitutions, max_insertions, max_deletions, max_l_dist) search_func = choose_search_func(search_params) return search_func(subsequence, sequence, search_params)
def find_near_matches(subsequence, sequence, max_substitutions=None, max_insertions=None, max_deletions=None, max_l_dist=None)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions (a.k.a. the Levenshtein distance)
2.831794
2.940629
0.962989
_check_arguments(subsequence, sequence, max_substitutions) if max_substitutions == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] elif len(subsequence) // (max_substitutions + 1) >= 3: return find_near_matches_substitutions_ngrams( subsequence, sequence, max_substitutions, ) else: return find_near_matches_substitutions_lp( subsequence, sequence, max_substitutions, )
def find_near_matches_substitutions(subsequence, sequence, max_substitutions)
Find near-matches of the subsequence in the sequence. This chooses a suitable fuzzy search implementation according to the given parameters. Returns a list of fuzzysearch.Match objects describing the matching parts of the sequence.
2.518086
2.639465
0.954014
_check_arguments(subsequence, sequence, max_substitutions) return list(_find_near_matches_substitutions_lp(subsequence, sequence, max_substitutions))
def find_near_matches_substitutions_lp(subsequence, sequence, max_substitutions)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed
2.571219
3.704321
0.694114
_check_arguments(subsequence, sequence, max_substitutions) match_starts = set() matches = [] for match in _find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions): if match.start not in match_starts: match_starts.add(match.start) matches.append(match) return sorted(matches, key=lambda match: match.start)
def find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed
2.177779
2.62714
0.828954
_check_arguments(subsequence, sequence, max_substitutions) for match in _find_near_matches_substitutions_ngrams(subsequence, sequence, max_substitutions): return True return False
def has_near_match_substitutions_ngrams(subsequence, sequence, max_substitutions)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the number of character substitutions must be less than max_substitutions * no deletions or insertions are allowed
3.011635
4.061581
0.741493
if not subsequence: raise ValueError('Given subsequence is empty!') # if the limitations are so strict that only exact matches are allowed, # use search_exact() if search_params.max_l_dist == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] # if the n-gram length would be at least 3, use the n-gram search method elif len(subsequence) // (search_params.max_l_dist + 1) >= 3: return find_near_matches_generic_ngrams(subsequence, sequence, search_params) # use the linear programming search method else: matches = find_near_matches_generic_linear_programming(subsequence, sequence, search_params) match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
def find_near_matches_generic(subsequence, sequence, search_params)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions
3.418004
3.769496
0.906754
if not subsequence: raise ValueError('Given subsequence is empty!') matches = list(_find_near_matches_generic_ngrams(subsequence, sequence, search_params)) # don't return overlapping matches; instead, group overlapping matches # together and return the best match from each group match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
def find_near_matches_generic_ngrams(subsequence, sequence, search_params)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions
3.621292
4.361744
0.830239
if not subsequence: raise ValueError('Given subsequence is empty!') for match in _find_near_matches_generic_ngrams(subsequence, sequence, search_params): return True return False
def has_near_match_generic_ngrams(subsequence, sequence, search_params)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * and the maximum allowed number of character deletions * the total number of substitutions, insertions and deletions
3.68688
5.321532
0.692823
# If given a long sub-sequence and relatively small max distance, # use a more complex algorithm better optimized for such cases. if len(subsequence) > max(max_l_dist * 2, 10): return _expand_long(subsequence, sequence, max_l_dist) else: return _expand_short(subsequence, sequence, max_l_dist)
def _expand(subsequence, sequence, max_l_dist)
Expand a partial match of a Levenstein search. An expansion must begin at the beginning of the sequence, which makes this much simpler than a full search, and allows for greater optimization.
3.933938
4.004438
0.982395
# The following diagram shows the score calculation step. # # Each new score is the minimum of: # * a OR a + 1 (substitution, if needed) # * b + 1 (deletion, i.e. skipping a sequence character) # * c + 1 (insertion, i.e. skipping a sub-sequence character) # # a -- +1 -> c # # | \ | # | \ | # +1 +1? +1 # | \ | # v ⌟ v # # b -- +1 -> scores[subseq_index] subseq_len = len(subsequence) if subseq_len == 0: return (0, 0) # Initialize the scores array with values for just skipping sub-sequence # chars. scores = list(range(1, subseq_len + 1)) min_score = subseq_len min_score_idx = -1 for seq_index, char in enumerate(sequence): # calculate scores, one for each character in the sub-sequence a = seq_index c = a + 1 for subseq_index in range(subseq_len): b = scores[subseq_index] c = scores[subseq_index] = min( a + (char != subsequence[subseq_index]), b + 1, c + 1, ) a = b # keep the minimum score found for matches of the entire sub-sequence if c <= min_score: min_score = c min_score_idx = seq_index # bail early when it is impossible to find a better expansion elif min(scores) >= min_score: break return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None)
def _py_expand_short(subsequence, sequence, max_l_dist)
Straightforward implementation of partial match expansion.
4.423315
4.348483
1.017209
# The additional optimization in this version is to limit the part of # the sub-sequence inspected for each sequence character. The start and # end of the iteration are limited to the range where the scores are # smaller than the maximum allowed distance. Additionally, once a good # expansion has been found, the range is further reduced to where the # scores are smaller than the score of the best expansion found so far. subseq_len = len(subsequence) if subseq_len == 0: return (0, 0) # Initialize the scores array with values for just skipping sub-sequence # chars. scores = list(range(1, subseq_len + 1)) min_score = subseq_len min_score_idx = -1 max_good_score = max_l_dist new_needle_idx_range_start = 0 new_needle_idx_range_end = subseq_len - 1 for seq_index, char in enumerate(sequence): # calculate scores, one for each character in the sub-sequence needle_idx_range_start = new_needle_idx_range_start needle_idx_range_end = min(subseq_len, new_needle_idx_range_end + 1) a = seq_index c = a + 1 if c <= max_good_score: new_needle_idx_range_start = 0 new_needle_idx_range_end = 0 else: new_needle_idx_range_start = None new_needle_idx_range_end = -1 for subseq_index in range(needle_idx_range_start, needle_idx_range_end): b = scores[subseq_index] c = scores[subseq_index] = min( a + (char != subsequence[subseq_index]), b + 1, c + 1, ) a = b if c <= max_good_score: if new_needle_idx_range_start is None: new_needle_idx_range_start = subseq_index new_needle_idx_range_end = max( new_needle_idx_range_end, subseq_index + 1 + (max_good_score - c), ) # bail early when it is impossible to find a better expansion if new_needle_idx_range_start is None: break # keep the minimum score found for matches of the entire sub-sequence if needle_idx_range_end == subseq_len and c <= min_score: min_score = c min_score_idx = seq_index if min_score < max_good_score: max_good_score = min_score return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None)
def _py_expand_long(subsequence, sequence, max_l_dist)
Partial match expansion, optimized for long sub-sequences.
3.063248
2.99278
1.023546
if not subsequence: raise ValueError('Given subsequence is empty!') if max_l_dist < 0: raise ValueError('Maximum Levenshtein distance must be >= 0!') if max_l_dist == 0: return [ Match(start_index, start_index + len(subsequence), 0) for start_index in search_exact(subsequence, sequence) ] elif len(subsequence) // (max_l_dist + 1) >= 3: return find_near_matches_levenshtein_ngrams(subsequence, sequence, max_l_dist) else: matches = find_near_matches_levenshtein_linear_programming(subsequence, sequence, max_l_dist) match_groups = group_matches(matches) best_matches = [get_best_match_in_group(group) for group in match_groups] return sorted(best_matches)
def find_near_matches_levenshtein(subsequence, sequence, max_l_dist)
Find near-matches of the subsequence in the sequence. This chooses a suitable fuzzy search implementation according to the given parameters. Returns a list of fuzzysearch.Match objects describing the matching parts of the sequence.
2.603953
2.606822
0.9989
''' :return: 4 hands, obtained by shuffling the 28 dominoes used in this variation of the game, and distributing them evenly ''' all_dominoes = [dominoes.Domino(i, j) for i in range(7) for j in range(i, 7)] random.shuffle(all_dominoes) return [dominoes.Hand(all_dominoes[0:7]), dominoes.Hand(all_dominoes[7:14]), dominoes.Hand(all_dominoes[14:21]), dominoes.Hand(all_dominoes[21:28])]
def _randomized_hands()
:return: 4 hands, obtained by shuffling the 28 dominoes used in this variation of the game, and distributing them evenly
2.90048
1.693126
1.713092
''' Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3. :param int player: player to be validated :return: None :raises NoSuchPlayerException: if the player is invalid ''' valid_players = range(4) if player not in valid_players: valid_players = ', '.join(str(p) for p in valid_players) raise dominoes.NoSuchPlayerException('{} is not a valid player. Valid players' ' are: {}'.format(player, valid_players))
def _validate_player(player)
Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3. :param int player: player to be validated :return: None :raises NoSuchPlayerException: if the player is invalid
3.263401
2.168051
1.505223
''' :param Domino d: domino to find within the hands :param list hands: hands to find domino in :return: index of the hand that contains the specified domino :raises NoSuchDominoException: if no hand contains the specified domino ''' for i, hand in enumerate(hands): if d in hand: return i raise dominoes.NoSuchDominoException('{} is not in any hand!'.format(d))
def _domino_hand(d, hands)
:param Domino d: domino to find within the hands :param list hands: hands to find domino in :return: index of the hand that contains the specified domino :raises NoSuchDominoException: if no hand contains the specified domino
3.617105
1.96033
1.845151
''' :param list hands: hands for which to compute the remaining points :return: a list indicating the amount of points remaining in each of the input hands ''' points = [] for hand in hands: points.append(sum(d.first + d.second for d in hand)) return points
def _remaining_points(hands)
:param list hands: hands for which to compute the remaining points :return: a list indicating the amount of points remaining in each of the input hands
6.294684
2.611466
2.410402
''' Validates hands, based on values that are supposed to be missing from them. :param list hands: list of Hand objects to validate :param list missing: list of sets that indicate the values that are supposed to be missing from the respective Hand objects :return: True if no Hand objects contain values that they are supposed to be missing; False otherwise ''' for h, m in zip(hands, missing): for value in m: if dominoes.hand.contains_value(h, value): return False return True
def _validate_hands(hands, missing)
Validates hands, based on values that are supposed to be missing from them. :param list hands: list of Hand objects to validate :param list missing: list of sets that indicate the values that are supposed to be missing from the respective Hand objects :return: True if no Hand objects contain values that they are supposed to be missing; False otherwise
5.691922
2.069452
2.750449
''' Helper function for Game.all_possible_hands(). Given a set of elements and the sizes of partitions, yields all possible partitionings of the elements into partitions of the provided sizes. :param set elements: a set of elements to partition. :param list sizes: a list of sizes for the partitions. The sum of the sizes should equal the length of the set of elements. :yields: a tuple of tuples, each inner tuple corresponding to a partition. ''' try: # get the size of the current partition size = sizes[0] except IndexError: # base case: no more sizes left yield () return # don't include the current size in the recursive calls sizes = sizes[1:] # iterate over all possible partitions of the current size for partition in itertools.combinations(elements, size): # recursive case: pass down the remaining elements and the remaining sizes for other_partitions in _all_possible_partitionings(elements.difference(partition), sizes): # put results together and yield up yield (partition,) + other_partitions
def _all_possible_partitionings(elements, sizes)
Helper function for Game.all_possible_hands(). Given a set of elements and the sizes of partitions, yields all possible partitionings of the elements into partitions of the provided sizes. :param set elements: a set of elements to partition. :param list sizes: a list of sizes for the partitions. The sum of the sizes should equal the length of the set of elements. :yields: a tuple of tuples, each inner tuple corresponding to a partition.
4.080972
2.232587
1.827912
''' :param Domino starting_domino: the domino that should be played to start the game. The player with this domino in their hand will play first. :param int starting_player: the player that should play first. This value is ignored if a starting domino is provided. Players are referred to by their indexes: 0, 1, 2, and 3. 0 and 2 are on one team, and 1 and 3 are on another team. :return: a new game, initialized according to starting_domino and starting_player :raises NoSuchDominoException: if starting_domino is invalid :raises NoSuchPlayerException: if starting_player is invalid ''' board = dominoes.Board() hands = _randomized_hands() moves = [] result = None if starting_domino is None: _validate_player(starting_player) valid_moves = tuple((d, True) for d in hands[starting_player]) game = cls(board, hands, moves, starting_player, valid_moves, starting_player, result) else: starting_player = _domino_hand(starting_domino, hands) valid_moves = ((starting_domino, True),) game = cls(board, hands, moves, starting_player, valid_moves, starting_player, result) game.make_move(*valid_moves[0]) return game
def new(cls, starting_domino=None, starting_player=0)
:param Domino starting_domino: the domino that should be played to start the game. The player with this domino in their hand will play first. :param int starting_player: the player that should play first. This value is ignored if a starting domino is provided. Players are referred to by their indexes: 0, 1, 2, and 3. 0 and 2 are on one team, and 1 and 3 are on another team. :return: a new game, initialized according to starting_domino and starting_player :raises NoSuchDominoException: if starting_domino is invalid :raises NoSuchPlayerException: if starting_player is invalid
3.534615
1.995844
1.770988
''' Updates self.valid_moves according to the latest game state. Assumes that the board and all hands are non-empty. ''' left_end = self.board.left_end() right_end = self.board.right_end() moves = [] for d in self.hands[self.turn]: if left_end in d: moves.append((d, True)) # do not double count moves if both of the board's ends have # the same value, and a domino can be placed on both of them if right_end in d and left_end != right_end: moves.append((d, False)) self.valid_moves = tuple(moves)
def _update_valid_moves(self)
Updates self.valid_moves according to the latest game state. Assumes that the board and all hands are non-empty.
5.072947
3.627358
1.398524
''' Plays a domino from the hand of the player whose turn it is onto one end of the game board. If the game does not end, the turn is advanced to the next player who has a valid move. Making a move is transactional - if the operation fails at any point, the game will return to its state before the operation began. :param Domino d: domino to be played :param bool left: end of the board on which to play the domino (True for left, False for right) :return: a Result object if the game ends; None otherwise :raises GameOverException: if the game has already ended :raises NoSuchDominoException: if the domino to be played is not in the hand of the player whose turn it is :raises EndsMismatchException: if the domino cannot be placed on the specified position in the board ''' if self.result is not None: raise dominoes.GameOverException('Cannot make a move - the game is over!') i = self.hands[self.turn].play(d) try: self.board.add(d, left) except dominoes.EndsMismatchException as error: # return the domino to the hand if it cannot be placed on the board self.hands[self.turn].draw(d, i) raise error # record the move self.moves.append((d, left)) # check if the game ended due to a player running out of dominoes if not self.hands[self.turn]: self.valid_moves = () self.result = dominoes.Result( self.turn, True, pow(-1, self.turn) * sum(_remaining_points(self.hands)) ) return self.result # advance the turn to the next player with a valid move. # if no player has a valid move, the game is stuck. also, # record all the passes. passes = [] stuck = True for _ in self.hands: self.turn = next_player(self.turn) self._update_valid_moves() if self.valid_moves: self.moves.extend(passes) stuck = False break else: passes.append(None) if stuck: player_points = _remaining_points(self.hands) team_points = [player_points[0] + player_points[2], player_points[1] + player_points[3]] if team_points[0] < team_points[1]: self.result = dominoes.Result(self.turn, False, sum(team_points)) elif team_points[0] == team_points[1]: self.result = dominoes.Result(self.turn, False, 0) else: self.result = dominoes.Result(self.turn, False, -sum(team_points)) return self.result
def make_move(self, d, left)
Plays a domino from the hand of the player whose turn it is onto one end of the game board. If the game does not end, the turn is advanced to the next player who has a valid move. Making a move is transactional - if the operation fails at any point, the game will return to its state before the operation began. :param Domino d: domino to be played :param bool left: end of the board on which to play the domino (True for left, False for right) :return: a Result object if the game ends; None otherwise :raises GameOverException: if the game has already ended :raises NoSuchDominoException: if the domino to be played is not in the hand of the player whose turn it is :raises EndsMismatchException: if the domino cannot be placed on the specified position in the board
3.511288
2.180461
1.610342
''' Computes the values that must be missing from each player's hand, based on when they have passed. :return: a list of sets, each one containing the values that must be missing from the corresponding player's hand ''' missing = [set() for _ in self.hands] # replay the game from the beginning board = dominoes.SkinnyBoard() player = self.starting_player for move in self.moves: if move is None: # pass - update the missing values missing[player].update([board.left_end(), board.right_end()]) else: # not a pass - update the board board.add(*move) # move on to the next player player = next_player(player) return missing
def missing_values(self)
Computes the values that must be missing from each player's hand, based on when they have passed. :return: a list of sets, each one containing the values that must be missing from the corresponding player's hand
6.446658
3.713077
1.736203
''' Returns random possible hands for all players, given the information known by the player whose turn it is. This information includes the current player's hand, the sizes of the other players' hands, and the moves played by every player, including the passes. :return: a list of possible Hand objects, corresponding to each player ''' # compute values that must be missing from # each hand, to rule out impossible hands missing = self.missing_values() # get the dominoes that are in all of the other hands. note that, even # though we are 'looking' at the other hands to get these dominoes, we # are not 'cheating' because these dominoes could also be computed by # subtracting the dominoes that have been played (which are public # knowledge) and the dominoes in the current player's hand from the # initial set of dominoes other_dominoes = [d for p, h in enumerate(self.hands) for d in h if p != self.turn] while True: # generator for a shuffled shallow copy of other_dominoes shuffled_dominoes = (d for d in random.sample(other_dominoes, len(other_dominoes))) # generate random hands by partitioning the shuffled dominoes according # to how many dominoes need to be in each of the other hands. since we # know the current player's hand, we just use a shallow copy of it hands = [] for player, hand in enumerate(self.hands): if player != self.turn: hand = [next(shuffled_dominoes) for _ in hand] hands.append(dominoes.Hand(hand)) # only return the hands if they are possible, according to the values we # know to be missing from each hand. if the hands are not possible, try # generating random hands again if _validate_hands(hands, missing): return hands
def random_possible_hands(self)
Returns random possible hands for all players, given the information known by the player whose turn it is. This information includes the current player's hand, the sizes of the other players' hands, and the moves played by every player, including the passes. :return: a list of possible Hand objects, corresponding to each player
5.630251
4.112571
1.369034
''' Yields all possible hands for all players, given the information known by the player whose turn it is. This information includes the current player's hand, the sizes of the other players' hands, and the moves played by every player, including the passes. :yields: a list of possible Hand objects, corresponding to each player ''' # compute values that must be missing from # each hand, to rule out impossible hands missing = self.missing_values() # get the dominoes that are in all of the other hands. note that, even # though we are 'looking' at the other hands to get these dominoes, we # are not 'cheating' because these dominoes could also be computed by # subtracting the dominoes that have been played (which are public # knowledge) and the dominoes in the current player's hand from the # initial set of dominoes other_dominoes = {d for p, h in enumerate(self.hands) for d in h if p != self.turn} # get the lengths of all the other hands, so # that we know how many dominoes to place in each other_hand_lengths = [len(h) for p, h in enumerate(self.hands) if p != self.turn] # iterate over all possible hands that the other players might have for possible_hands in _all_possible_partitionings(other_dominoes, other_hand_lengths): # given possible hands for all players, this is a generator for # tuples containing the dominoes that are in the other players' hands possible_hands = (h for h in possible_hands) # build a list containing possible hands for all players. since we # know the current player's hand, we just use a shallow copy of it hands = [] for player, hand in enumerate(self.hands): if player != self.turn: hand = next(possible_hands) hands.append(dominoes.Hand(hand)) # only yield the hands if they are possible, according # to the values we know to be missing from each hand if _validate_hands(hands, missing): yield hands
def all_possible_hands(self)
Yields all possible hands for all players, given the information known by the player whose turn it is. This information includes the current player's hand, the sizes of the other players' hands, and the moves played by every player, including the passes. :yields: a list of possible Hand objects, corresponding to each player
5.643676
4.140264
1.36312
''' Prefers moves randomly. :param Game game: game to play :return: None ''' game.valid_moves = tuple(sorted(game.valid_moves, key=lambda _: rand.random()))
def random(game)
Prefers moves randomly. :param Game game: game to play :return: None
9.096744
4.388214
2.072994
''' Prefers to play dominoes with higher point values. :param Game game: game to play :return: None ''' game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: -(m[0].first + m[0].second)))
def bota_gorda(game)
Prefers to play dominoes with higher point values. :param Game game: game to play :return: None
7.004725
3.206808
2.184329
''' For each of a Game object's valid moves, yields a tuple containing the move and the Game object obtained by playing the move on the original Game object. The original Game object will be modified. :param Game game: the game to make moves on :param callable player: a player to call on the game before making any moves, to determine the order in which they get made. The identity player is the default. ''' # game is over - do not yield anything if game.result is not None: return # determine the order in which to make moves player(game) # copy the original game before making all # but the last move for move in game.valid_moves[:-1]: new_game = copy.deepcopy(game) new_game.make_move(*move) yield move, new_game # don't copy the original game before making # the last move move = game.valid_moves[-1] game.make_move(*move) yield move, game
def make_moves(game, player=dominoes.players.identity)
For each of a Game object's valid moves, yields a tuple containing the move and the Game object obtained by playing the move on the original Game object. The original Game object will be modified. :param Game game: the game to make moves on :param callable player: a player to call on the game before making any moves, to determine the order in which they get made. The identity player is the default.
4.750806
2.25172
2.109856
''' Runs minimax search with alpha-beta pruning on the provided game. :param Game game: game to search :param tuple alpha_beta: a tuple of two floats that indicate the initial values of alpha and beta, respectively. The default is (-inf, inf). :param callable player: player used to sort moves to be explored. Ordering better moves first may significantly reduce the amount of moves that need to be explored. The identity player is the default. ''' # base case - game is over if game.result is not None: return [], game.result.points if game.turn % 2: # minimizing player best_value = float('inf') op = operator.lt update = lambda ab, v: (ab[0], min(ab[1], v)) else: # maximizing player best_value = -float('inf') op = operator.gt update = lambda ab, v: (max(ab[0], v), ab[1]) # recursive case - game is not over for move, new_game in make_moves(game, player): moves, value = alphabeta(new_game, alpha_beta, player) if op(value, best_value): best_value = value best_moves = moves best_moves.insert(0, move) alpha_beta = update(alpha_beta, best_value) if alpha_beta[1] <= alpha_beta[0]: # alpha-beta cutoff break return best_moves, best_value
def alphabeta(game, alpha_beta=(-float('inf'), float('inf')), player=dominoes.players.identity)
Runs minimax search with alpha-beta pruning on the provided game. :param Game game: game to search :param tuple alpha_beta: a tuple of two floats that indicate the initial values of alpha and beta, respectively. The default is (-inf, inf). :param callable player: player used to sort moves to be explored. Ordering better moves first may significantly reduce the amount of moves that need to be explored. The identity player is the default.
3.115687
1.942918
1.603612
''' Advances the series to the next game, if possible. Also updates each team's score with points from the most recently completed game. :return: the next game, if the previous game did not end the series; None otherwise :raises SeriesOverException: if the series has already ended :raises GameInProgressException: if the last game has not yet finished ''' if self.is_over(): raise dominoes.SeriesOverException( 'Cannot start a new game - series ended with a score of {} to {}'.format(*self.scores) ) result = self.games[-1].result if result is None: raise dominoes.GameInProgressException( 'Cannot start a new game - the latest one has not finished!' ) # update each team's score with the points from the previous game if result.points >= 0: self.scores[0] += result.points else: self.scores[1] -= result.points # return None if the series is now over if self.is_over(): return # determine the starting player for the next game if result.won or pow(-1, result.player) * result.points > 0: starting_player = result.player elif not result.points: starting_player = self.games[-1].starting_player else: # pow(-1, result.player) * result.points < 0 starting_player = dominoes.game.next_player(result.player) # start the next game self.games.append(dominoes.Game.new(starting_player=starting_player)) return self.games[-1]
def next_game(self)
Advances the series to the next game, if possible. Also updates each team's score with points from the most recently completed game. :return: the next game, if the previous game did not end the series; None otherwise :raises SeriesOverException: if the series has already ended :raises GameInProgressException: if the last game has not yet finished
3.966881
2.640654
1.502234
''' :param Board board: board to represent :return: SkinnyBoard to represent the given Board ''' if len(board): left = board.left_end() right = board.right_end() else: left = None right = None return cls(left, right, len(board))
def from_board(cls, board)
:param Board board: board to represent :return: SkinnyBoard to represent the given Board
5.937696
3.074159
1.931486
''' Adds the provided domino to the left end of the board. :param Domino d: domino to add :return: None :raises EndsMismatchException: if the values do not match ''' if not self: self._left = d.first self._right = d.second elif d.second == self.left_end(): self._left = d.first elif d.first == self.left_end(): self._left = d.second else: raise dominoes.EndsMismatchException( '{} cannot be added to the left of' ' the board - values do not match!'.format(d) ) self._length += 1
def _add_left(self, d)
Adds the provided domino to the left end of the board. :param Domino d: domino to add :return: None :raises EndsMismatchException: if the values do not match
4.077258
2.603912
1.56582
''' Adds the provided domino to the right end of the board. :param Domino d: domino to add :return: None :raises EndsMismatchException: if the values do not match ''' if not self: self._left = d.first self._right = d.second elif d.first == self.right_end(): self._right = d.second elif d.second == self.right_end(): self._right = d.first else: raise dominoes.EndsMismatchException( '{} cannot be added to the right of' ' the board - values do not match!'.format(d) ) self._length += 1
def _add_right(self, d)
Adds the provided domino to the right end of the board. :param Domino d: domino to add :return: None :raises EndsMismatchException: if the values do not match
3.937112
2.519742
1.562506
''' Adds the provided domino to the specifed end of the board. :param Domino d: domino to add :param bool left: end of the board to which to add the domino (True for left, False for right) :return: None :raises EndsMismatchException: if the values do not match ''' if left: self._add_left(d) else: self._add_right(d)
def add(self, d, left)
Adds the provided domino to the specifed end of the board. :param Domino d: domino to add :param bool left: end of the board to which to add the domino (True for left, False for right) :return: None :raises EndsMismatchException: if the values do not match
4.765769
1.574555
3.026741
''' Adds the provided domino to the left end of the board. :param Domino d: domino to add :return: None :raises EndsMismatchException: if the values do not match ''' if not self: self.board.append(d) elif d.first == self.left_end(): self.board.appendleft(d.inverted()) elif d.second == self.left_end(): self.board.appendleft(d) else: raise dominoes.EndsMismatchException( '{} cannot be added to the left of' ' the board - values do not match!'.format(d) )
def _add_left(self, d)
Adds the provided domino to the left end of the board. :param Domino d: domino to add :return: None :raises EndsMismatchException: if the values do not match
4.318119
2.650497
1.629173
''' Adds the provided domino to the right end of the board. :param Domino d: domino to add :return: None :raises EndsMismatchException: if the values do not match ''' if not self: self.board.append(d) elif d.first == self.right_end(): self.board.append(d) elif d.second == self.right_end(): self.board.append(d.inverted()) else: raise dominoes.EndsMismatchException( '{} cannot be added to the right of' ' the board - values do not match!'.format(d) )
def _add_right(self, d)
Adds the provided domino to the right end of the board. :param Domino d: domino to add :return: None :raises EndsMismatchException: if the values do not match
4.18626
2.589842
1.616415
''' Removes a domino from the hand. :param Domino d: domino to remove from the hand :return: the index within the hand of the played domino :raises NoSuchDominoException: if the domino is not in the hand ''' try: i = self._dominoes.index(d) except ValueError: raise dominoes.NoSuchDominoException('Cannot make move -' ' {} is not in hand!'.format(d)) self._dominoes.pop(i) return i
def play(self, d)
Removes a domino from the hand. :param Domino d: domino to remove from the hand :return: the index within the hand of the played domino :raises NoSuchDominoException: if the domino is not in the hand
3.923547
2.214634
1.771645
''' Adds a domino to the hand. :param Domino d: domino to add to the hand :param int i: index at which to add the domino; by default adds to the end of the hand :return: None ''' if i is None: self._dominoes.append(d) else: self._dominoes.insert(i, d)
def draw(self, d, i=None)
Adds a domino to the hand. :param Domino d: domino to add to the hand :param int i: index at which to add the domino; by default adds to the end of the hand :return: None
2.939864
1.606302
1.830206
if "PATH" not in os.environ: return for p in os.environ["PATH"].split(":"): pp = "%s/%s" % (p, exec_name) if os.path.exists(pp): yield pp
def find_exec_in_path(exec_name)
:param str exec_name: :return: yields full paths :rtype: list[str]
2.579399
2.267019
1.137793
if not isinstance(pkg_config_args, (tuple, list)): pkg_config_args = [pkg_config_args] # Maybe we have multiple pkg-config, and maybe some of them finds it. for pp in find_exec_in_path("pkg-config"): try: cmd = [pp] + list(pkg_config_args) + list(packages) #print(" ".join(cmd)) out = check_output(cmd, stderr=open(os.devnull, "wb")) return out.strip().decode("utf8").split() except CalledProcessError: pass return None
def get_pkg_config(pkg_config_args, *packages)
:param str|list[str] pkg_config_args: e.g. "--cflags" :param str packages: e.g. "python3" :rtype: list[str]|None
3.355584
3.309635
1.013883
kw = kw.copy() flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'} # kwargs of :class:`Extension` for token in check_output(["pkg-config", "--libs", "--cflags"] + list(packages)).split(): if token[:2] in flag_map: kw.setdefault(flag_map[token[:2]], []).append(token[2:]) else: # throw others to extra_link_args kw.setdefault('extra_link_args', []).append(token) return kw
def pkgconfig(*packages, **kw)
:param str packages: list like 'libavutil', 'libavformat', ... :rtype: dict[str]
3.356579
3.261928
1.029017
try: DST_DRIVER = get_ogr_driver(filepath=dst) except ValueError: raise with fiona.Env(): with fiona.open(src, mode='r') as source: SCHEMA = source.schema.copy() SCHEMA.update({'geometry': 'MultiLineString'}) with fiona.open( dst, mode='w', driver=DST_DRIVER.GetName(), schema=SCHEMA, crs=source.crs, encoding=source.encoding) as destination: for record in source: geom = record.get('geometry') input_geom = shape(geom) if not is_valid_geometry(geometry=input_geom): continue attributes = record.get('properties') try: centerline_obj = Centerline( input_geom=input_geom, interpolation_dist=density, **attributes ) except RuntimeError as err: logging.warning( "ignoring record that could not be processed: %s", err ) continue centerline_dict = { 'geometry': mapping(centerline_obj), 'properties': { k: v for k, v in centerline_obj.__dict__.items() if k in attributes.keys() } } destination.write(centerline_dict) return None
def create_centerlines(src, dst, density=0.5)
Create centerlines and save the to an ESRI Shapefile. Reads polygons from the `src` ESRI Shapefile, creates Centerline objects with the specified `density` parameter and writes them to the `dst` ESRI Shapefile. Only Polygon features are converted to centerlines. Features of different types are skipped. Args: src (str): source ESRI Shapefile dst (str): destination ESRI Shapefile density (:obj:`float`, optional): the Centerline's density. Defaults to 0.5 (meters) Returns: None
2.901786
2.845424
1.019808
border = array(self.__densify_border()) vor = Voronoi(border) vertex = vor.vertices lst_lines = [] for j, ridge in enumerate(vor.ridge_vertices): if -1 not in ridge: line = LineString([ (vertex[ridge[0]][0] + self._minx, vertex[ridge[0]][1] + self._miny), (vertex[ridge[1]][0] + self._minx, vertex[ridge[1]][1] + self._miny)]) if line.within(self._input_geom) and len(line.coords[0]) > 1: lst_lines.append(line) nr_lines = len(lst_lines) if nr_lines < 2: raise RuntimeError(( "Number of produced ridges is too small: {}" ", this might be caused by too large interpolation distance." ).format(nr_lines)) return unary_union(lst_lines)
def _create_centerline(self)
Calculate the centerline of a polygon. Densifies the border of a polygon which is then represented by a Numpy array of points necessary for creating the Voronoi diagram. Once the diagram is created, the ridges located within the polygon are joined and returned. Returns: a union of lines that are located within the polygon.
3.836055
3.571763
1.073995
if isinstance(self._input_geom, MultiPolygon): polygons = [polygon for polygon in self._input_geom] else: polygons = [self._input_geom] points = [] for polygon in polygons: if len(polygon.interiors) == 0: exterior = LineString(polygon.exterior) points += self.__fixed_interpolation(exterior) else: exterior = LineString(polygon.exterior) points += self.__fixed_interpolation(exterior) for j in range(len(polygon.interiors)): interior = LineString(polygon.interiors[j]) points += self.__fixed_interpolation(interior) return points
def __densify_border(self)
Densify the border of a polygon. The border is densified by a given factor (by default: 0.5). The complexity of the polygon's geometry is evaluated in order to densify the borders of its interior rings as well. Returns: list: a list of points where each point is represented by a list of its reduced coordinates Example: [[X1, Y1], [X2, Y2], ..., [Xn, Yn]
2.351668
2.395786
0.981585
STARTPOINT = [line.xy[0][0] - self._minx, line.xy[1][0] - self._miny] ENDPOINT = [line.xy[0][-1] - self._minx, line.xy[1][-1] - self._miny] count = self._interpolation_dist newline = [STARTPOINT] while count < line.length: point = line.interpolate(count) newline.append([point.x - self._minx, point.y - self._miny]) count += self._interpolation_dist newline.append(ENDPOINT) return newline
def __fixed_interpolation(self, line)
Place additional points on the border at the specified distance. By default the distance is 0.5 (meters) which means that the first point will be placed 0.5 m from the starting point, the second point will be placed at the distance of 1.0 m from the first point, etc. The loop breaks when the summarized distance exceeds the length of the line. Args: line (shapely.geometry.LineString): object Returns: list: a list of points where each point is represented by a list of its reduced coordinates Example: [[X1, Y1], [X2, Y2], ..., [Xn, Yn]
2.570895
2.585959
0.994175
if isinstance(geometry, Polygon) or isinstance(geometry, MultiPolygon): return True else: return False
def is_valid_geometry(geometry)
Confirm that the geometry type is of type Polygon or MultiPolygon. Args: geometry (BaseGeometry): BaseGeometry instance (e.g. Polygon) Returns: bool
2.820014
3.396608
0.830244
filename, file_extension = os.path.splitext(filepath) EXTENSION = file_extension[1:] ogr_driver_count = ogr.GetDriverCount() for idx in range(ogr_driver_count): driver = ogr.GetDriver(idx) driver_extension = driver.GetMetadataItem(str('DMD_EXTENSION')) or '' driver_extensions = driver.GetMetadataItem(str('DMD_EXTENSIONS')) or '' if EXTENSION == driver_extension or EXTENSION in driver_extensions: return driver else: msg = 'No driver found for the following file extension: {}'.format( EXTENSION) raise ValueError(msg)
def get_ogr_driver(filepath)
Get the OGR driver from the provided file extension. Args: file_extension (str): file extension Returns: osgeo.ogr.Driver Raises: ValueError: no driver is found
2.47258
2.606951
0.948457
numwords = {'and': (1, 0), 'a': (1, 1), 'an': (1, 1)} for idx, word in enumerate(UNITS): numwords[word] = (1, idx) for idx, word in enumerate(TENS): numwords[word] = (1, idx * 10) for idx, word in enumerate(SCALES): numwords[word] = (10 ** (idx * 3 or 2), 0) all_numbers = ur'|'.join(ur'\b%s\b' % i for i in numwords.keys() if i) return all_numbers, numwords
def get_numwords()
Convert number words to integers in a given text.
2.678173
2.630788
1.018012
op_keys = sorted(OPERATORS.keys(), key=len, reverse=True) unit_keys = sorted(l.UNITS.keys(), key=len, reverse=True) symbol_keys = sorted(l.SYMBOLS.keys(), key=len, reverse=True) exponent = ur'(?:(?:\^?\-?[0-9%s]*)(?:\ cubed|\ squared)?)(?![a-zA-Z])' % \ SUPERSCRIPTS all_ops = '|'.join([r'%s' % re.escape(i) for i in op_keys]) all_units = '|'.join([ur'%s' % re.escape(i) for i in unit_keys]) all_symbols = '|'.join([ur'%s' % re.escape(i) for i in symbol_keys]) pattern = ur''' (?P<prefix>(?:%s)(?![a-zA-Z]))? # Currencies, mainly (?P<value>%s)-? # Number (?:(?P<operator1>%s)?(?P<unit1>(?:%s)%s)?) # Operator + Unit (1) (?:(?P<operator2>%s)?(?P<unit2>(?:%s)%s)?) # Operator + Unit (2) (?:(?P<operator3>%s)?(?P<unit3>(?:%s)%s)?) # Operator + Unit (3) (?:(?P<operator4>%s)?(?P<unit4>(?:%s)%s)?) # Operator + Unit (4) ''' % tuple([all_symbols, RAN_PATTERN] + 4 * [all_ops, all_units, exponent]) regex = re.compile(pattern, re.VERBOSE | re.IGNORECASE) return regex
def get_units_regex()
Build a compiled regex object.
2.940127
2.894373
1.015808
new_dimensions = defaultdict(int) for item in dimensions: new = entities[item['base']].dimensions if new: for new_item in new: new_dimensions[new_item['base']] += new_item['power'] * \ item['power'] else: new_dimensions[item['base']] += item['power'] final = [[{'base': i[0], 'power': i[1]} for i in new_dimensions.items()]] final.append(dimensions) final = [sorted(i, key=lambda x: x['base']) for i in final] candidates = [] for item in final: if item not in candidates: candidates.append(item) return candidates
def get_dimension_permutations(entities, dimensions)
Get all possible dimensional definitions for an entity.
3.05775
2.946623
1.037713
path = os.path.join(TOPDIR, 'entities.json') entities = json.load(open(path)) names = [i['name'] for i in entities] try: assert len(set(names)) == len(entities) except AssertionError: raise Exception('Entities with same name: %s' % [i for i in names if names.count(i) > 1]) entities = dict((k['name'], c.Entity(name=k['name'], dimensions=k['dimensions'], uri=k['URI'])) for k in entities) dimensions_ent = defaultdict(list) for ent in entities: if not entities[ent].dimensions: continue perms = get_dimension_permutations(entities, entities[ent].dimensions) for perm in perms: key = get_key_from_dimensions(perm) dimensions_ent[key].append(entities[ent]) return entities, dimensions_ent
def load_entities()
Load entities from JSON file.
3.481699
3.362856
1.03534
dimensions_uni = {} for name in names: key = get_key_from_dimensions(names[name].dimensions) dimensions_uni[key] = names[name] plain_dimensions = [{'base': name, 'power': 1}] key = get_key_from_dimensions(plain_dimensions) dimensions_uni[key] = names[name] if not names[name].dimensions: names[name].dimensions = plain_dimensions names[name].dimensions = [{'base': names[i['base']].name, 'power': i['power']} for i in names[name].dimensions] return dimensions_uni
def get_dimensions_units(names)
Create dictionary of unit dimensions.
3.637669
3.466664
1.049329
names = {} lowers = defaultdict(list) symbols = defaultdict(list) surfaces = defaultdict(list) for unit in json.load(open(os.path.join(TOPDIR, 'units.json'))): try: assert unit['name'] not in names except AssertionError: msg = 'Two units with same name in units.json: %s' % unit['name'] raise Exception(msg) obj = c.Unit(name=unit['name'], surfaces=unit['surfaces'], entity=ENTITIES[unit['entity']], uri=unit['URI'], symbols=unit['symbols'], dimensions=unit['dimensions']) names[unit['name']] = obj for symbol in unit['symbols']: surfaces[symbol].append(obj) lowers[symbol.lower()].append(obj) if unit['entity'] == 'currency': symbols[symbol].append(obj) for surface in unit['surfaces']: surfaces[surface].append(obj) lowers[surface.lower()].append(obj) split = surface.split() index = None if ' per ' in surface: index = split.index('per') - 1 elif 'degree ' in surface: index = split.index('degree') if index is not None: plural = ' '.join([i if num != index else PLURALS.plural(split[index]) for num, i in enumerate(split)]) else: plural = PLURALS.plural(surface) if plural != surface: surfaces[plural].append(obj) lowers[plural.lower()].append(obj) dimensions_uni = get_dimensions_units(names) return names, surfaces, lowers, symbols, dimensions_uni
def load_units()
Load units from JSON file.
3.210601
3.182991
1.008674
ambiguous = [i for i in l.UNITS.items() if len(i[1]) > 1] ambiguous += [i for i in l.DERIVED_ENT.items() if len(i[1]) > 1] pages = set([(j.name, j.uri) for i in ambiguous for j in i[1]]) print objs = [] for num, page in enumerate(pages): obj = {'url': page[1]} obj['_id'] = obj['url'].replace('https://en.wikipedia.org/wiki/', '') obj['clean'] = obj['_id'].replace('_', ' ') print '---> Downloading %s (%d of %d)' % \ (obj['clean'], num + 1, len(pages)) obj['text'] = wikipedia.page(obj['clean']).content obj['unit'] = page[0] objs.append(obj) path = os.path.join(l.TOPDIR, 'wiki.json') os.remove(path) json.dump(objs, open(path, 'w'), indent=4, sort_keys=True) print '\n---> All done.\n'
def download_wiki()
Download WikiPedia pages of ambiguous units.
3.50089
3.287407
1.06494
new_text = re.sub(ur'\p{P}+', ' ', text) new_text = [stem(i) for i in new_text.lower().split() if not re.findall(r'[0-9]', i)] new_text = ' '.join(new_text) return new_text
def clean_text(text)
Clean text for TFIDF.
3.467264
3.216001
1.078129
if download: download_wiki() path = os.path.join(l.TOPDIR, 'train.json') training_set = json.load(open(path)) path = os.path.join(l.TOPDIR, 'wiki.json') wiki_set = json.load(open(path)) target_names = list(set([i['unit'] for i in training_set + wiki_set])) train_data, train_target = [], [] for example in training_set + wiki_set: train_data.append(clean_text(example['text'])) train_target.append(target_names.index(example['unit'])) tfidf_model = TfidfVectorizer(sublinear_tf=True, ngram_range=ngram_range, stop_words='english') matrix = tfidf_model.fit_transform(train_data) if parameters is None: parameters = {'loss': 'log', 'penalty': 'l2', 'n_iter': 50, 'alpha': 0.00001, 'fit_intercept': True} clf = SGDClassifier(**parameters).fit(matrix, train_target) obj = {'tfidf_model': tfidf_model, 'clf': clf, 'target_names': target_names} path = os.path.join(l.TOPDIR, 'clf.pickle') pickle.dump(obj, open(path, 'w'))
def train_classifier(download=True, parameters=None, ngram_range=(1, 1))
Train the intent classifier.
2.193275
2.199625
0.997113
path = os.path.join(l.TOPDIR, 'clf.pickle') obj = pickle.load(open(path, 'r')) return obj['tfidf_model'], obj['clf'], obj['target_names']
def load_classifier()
Train the intent classifier.
6.378644
6.354943
1.003729
new_ent = l.DERIVED_ENT[key][0] if len(l.DERIVED_ENT[key]) > 1: transformed = TFIDF_MODEL.transform([text]) scores = CLF.predict_proba(transformed).tolist()[0] scores = sorted(zip(scores, TARGET_NAMES), key=lambda x: x[0], reverse=True) names = [i.name for i in l.DERIVED_ENT[key]] scores = [i for i in scores if i[1] in names] try: new_ent = l.ENTITIES[scores[0][1]] except IndexError: logging.debug('\tAmbiguity not resolved for "%s"', str(key)) return new_ent
def disambiguate_entity(key, text)
Resolve ambiguity between entities with same dimensionality.
4.301049
4.045563
1.063152
new_unit = l.UNITS[unit] if not new_unit: new_unit = l.LOWER_UNITS[unit.lower()] if not new_unit: raise KeyError('Could not find unit "%s"' % unit) if len(new_unit) > 1: transformed = TFIDF_MODEL.transform([clean_text(text)]) scores = CLF.predict_proba(transformed).tolist()[0] scores = sorted(zip(scores, TARGET_NAMES), key=lambda x: x[0], reverse=True) names = [i.name for i in new_unit] scores = [i for i in scores if i[1] in names] try: final = l.UNITS[scores[0][1]][0] logging.debug('\tAmbiguity resolved for "%s" (%s)', unit, scores) except IndexError: logging.debug('\tAmbiguity not resolved for "%s"', unit) final = new_unit[0] else: final = new_unit[0] return final
def disambiguate_unit(unit, text)
Resolve ambiguity. Distinguish between units that have same names, symbols or abbreviations.
3.442145
3.404178
1.011153
surface = surface.replace('-', ' ') no_start = ['and', ' '] no_end = [' and', ' '] found = True while found: found = False for word in no_start: if surface.lower().startswith(word): surface = surface[len(word):] span = (span[0] + len(word), span[1]) found = True for word in no_end: if surface.lower().endswith(word): surface = surface[:-len(word)] span = (span[0], span[1] - len(word)) found = True if not surface: return None, None split = surface.lower().split() if split[0] in ['one', 'a', 'an'] and len(split) > 1 and split[1] in \ r.UNITS + r.TENS: span = (span[0] + len(surface.split()[0]) + 1, span[1]) surface = ' '.join(surface.split()[1:]) return surface, span
def clean_surface(surface, span)
Remove spurious characters from a quantity's surface.
2.433838
2.366422
1.028489
values = [] for item in r.REG_TXT.finditer(text): surface, span = clean_surface(item.group(0), item.span()) if not surface or surface.lower() in r.SCALES: continue curr = result = 0.0 for word in surface.split(): try: scale, increment = 1, float(word.lower()) except ValueError: scale, increment = r.NUMWORDS[word.lower()] curr = curr * scale + increment if scale > 100: result += curr curr = 0.0 values.append({'old_surface': surface, 'old_span': span, 'new_surface': unicode(result + curr)}) for item in re.finditer(r'\d+(,\d{3})+', text): values.append({'old_surface': item.group(0), 'old_span': item.span(), 'new_surface': unicode(item.group(0).replace(',', ''))}) return sorted(values, key=lambda x: x['old_span'][0])
def extract_spellout_values(text)
Convert spelled out numbers in a given text to digits.
3.712861
3.508462
1.058259
shift, final_text, shifts = 0, text, defaultdict(int) for value in values: first = value['old_span'][0] + shift second = value['old_span'][1] + shift new_s = value['new_surface'] final_text = final_text[0:first] + new_s + final_text[second:] shift += len(new_s) - len(value['old_surface']) for char in range(first + 1, len(final_text)): shifts[char] = shift logging.debug(u'Text after numeric conversion: "%s"', final_text) return final_text, shifts
def substitute_values(text, values)
Convert spelled out numbers in a given text to digits.
3.785017
3.729869
1.014786
fracs = r'|'.join(r.UNI_FRAC) value = item.group(2) value = re.sub(ur'(?<=\d)(%s)10' % r.MULTIPLIERS, 'e', value) value = re.sub(fracs, callback, value, re.IGNORECASE) value = re.sub(' +', ' ', value) range_separator = re.findall(ur'\d+ ?(-|and|(?:- ?)?to) ?\d', value) uncer_separator = re.findall(ur'\d+ ?(\+/-|±) ?\d', value) fract_separator = re.findall(ur'\d+/\d+', value) uncertainty = None if range_separator: values = value.split(range_separator[0]) values = [float(re.sub(r'-$', '', i)) for i in values] elif uncer_separator: values = [float(i) for i in value.split(uncer_separator[0])] uncertainty = values[1] values = [values[0]] elif fract_separator: values = value.split() if len(values) > 1: values = [float(values[0]) + float(Fraction(values[1]))] else: values = [float(Fraction(values[0]))] else: values = [float(re.sub(r'-$', '', value))] logging.debug(u'\tUncertainty: %s', uncertainty) logging.debug(u'\tValues: %s', values) return uncertainty, values
def get_values(item)
Extract value from regex hit.
3.397702
3.322753
1.022556
name = '' for unit in dimensions: if unit['power'] < 0: name += 'per ' power = abs(unit['power']) if power == 1: name += unit['base'] elif power == 2: name += 'square ' + unit['base'] elif power == 3: name += 'cubic ' + unit['base'] elif power > 3: name += unit['base'] + ' to the %g' % power name += ' ' name = name.strip() logging.debug(u'\tUnit inferred name: %s', name) return name
def build_unit_name(dimensions)
Build the name of the unit from its dimensions.
3.184953
3.177967
1.002198
key = l.get_key_from_dimensions(dimensions) try: unit = l.DERIVED_UNI[key] except KeyError: logging.debug(u'\tCould not find unit for: %s', key) unit = c.Unit(name=build_unit_name(dimensions), dimensions=dimensions, entity=get_entity_from_dimensions(dimensions, text)) return unit
def get_unit_from_dimensions(dimensions, text)
Reconcile a unit based on its dimensionality.
5.622267
5.505328
1.021241
new_dimensions = [{'base': l.NAMES[i['base']].entity.name, 'power': i['power']} for i in dimensions] final_dimensions = sorted(new_dimensions, key=lambda x: x['base']) key = l.get_key_from_dimensions(final_dimensions) try: if clf.USE_CLF: ent = clf.disambiguate_entity(key, text) else: ent = l.DERIVED_ENT[key][0] except IndexError: logging.debug(u'\tCould not find entity for: %s', key) ent = c.Entity(name='unknown', dimensions=new_dimensions) return ent
def get_entity_from_dimensions(dimensions, text)
Infer the underlying entity of a unit (e.g. "volume" for "m^3"). Just based on the unit's dimensionality if the classifier is disabled.
6.145039
5.665737
1.084596
surface = item.group(group).replace('.', '') power = re.findall(r'\-?[0-9%s]+' % r.SUPERSCRIPTS, surface) if power: power = [r.UNI_SUPER[i] if i in r.UNI_SUPER else i for i in power] power = ''.join(power) new_power = (-1 * int(power) if slash else int(power)) surface = re.sub(r'\^?\-?[0-9%s]+' % r.SUPERSCRIPTS, '', surface) elif re.findall(r'\bcubed\b', surface): new_power = (-3 if slash else 3) surface = re.sub(r'\bcubed\b', '', surface).strip() elif re.findall(r'\bsquared\b', surface): new_power = (-2 if slash else 2) surface = re.sub(r'\bsquared\b', '', surface).strip() else: new_power = (-1 if slash else 1) return surface, new_power
def parse_unit(item, group, slash)
Parse surface and power from unit text.
2.745666
2.600385
1.055869
group_units = [1, 4, 6, 8, 10] group_operators = [3, 5, 7, 9] item_units = [item.group(i) for i in group_units if item.group(i)] if len(item_units) == 0: unit = l.NAMES['dimensionless'] else: dimensions, slash = [], False for group in sorted(group_units + group_operators): if not item.group(group): continue if group in group_units: surface, power = parse_unit(item, group, slash) if clf.USE_CLF: base = clf.disambiguate_unit(surface, text).name else: base = l.UNITS[surface][0].name dimensions += [{'base': base, 'power': power}] elif not slash: slash = any(i in item.group(group) for i in [u'/', u' per ']) unit = get_unit_from_dimensions(dimensions, text) logging.debug(u'\tUnit: %s', unit) logging.debug(u'\tEntity: %s', unit.entity) return unit
def get_unit(item, text)
Extract unit from regex hit.
4.509005
4.301651
1.048203
span = item.span() logging.debug(u'\tInitial span: %s ("%s")', span, text[span[0]:span[1]]) real_span = (span[0] - shifts[span[0]], span[1] - shifts[span[1] - 1]) surface = orig_text[real_span[0]:real_span[1]] logging.debug(u'\tShifted span: %s ("%s")', real_span, surface) while any(surface.endswith(i) for i in [' ', '-']): surface = surface[:-1] real_span = (real_span[0], real_span[1] - 1) while surface.startswith(' '): surface = surface[1:] real_span = (real_span[0] + 1, real_span[1]) logging.debug(u'\tFinal span: %s ("%s")', real_span, surface) return surface, real_span
def get_surface(shifts, orig_text, item, text)
Extract surface from regex hit.
2.176828
2.151632
1.011711
res = False cursor = re.finditer(r'("|\')[^ .,:;?!()*+-].*?("|\')', orig_text) for item in cursor: if item.span()[1] == span[1]: res = True return res
def is_quote_artifact(orig_text, span)
Distinguish between quotes and units.
6.323827
5.787122
1.092741
# Discard irrelevant txt2float extractions, cardinal numbers, codes etc. if surface.lower() in ['a', 'an', 'one'] or \ re.search(r'1st|2nd|3rd|[04-9]th', surface) or \ re.search(r'\d+[A-Z]+\d+', surface) or \ re.search(r'\ba second\b', surface, re.IGNORECASE): logging.debug(u'\tMeaningless quantity ("%s"), discard', surface) return # Usually "$3T" does not stand for "dollar tesla" elif unit.entity.dimensions and \ unit.entity.dimensions[0]['base'] == 'currency': if len(unit.dimensions) > 1: try: suffix = re.findall(r'\d(K|M|B|T)\b(.*?)$', surface)[0] values = [i * r.SUFFIXES[suffix[0]] for i in values] unit = l.UNITS[unit.dimensions[0]['base']][0] if suffix[1]: surface = surface[:surface.find(suffix[1])] span = (span[0], span[1] - len(suffix[1])) logging.debug(u'\tCorrect for "$3T" pattern') except IndexError: pass else: try: suffix = re.findall(r'%s(K|M|B|T)\b' % re.escape(surface), orig_text)[0] surface += suffix span = (span[0], span[1] + 1) values = [i * r.SUFFIXES[suffix] for i in values] logging.debug(u'\tCorrect for "$3T" pattern') except IndexError: pass # Usually "1990s" stands for the decade, not the amount of seconds elif re.match(r'[1-2]\d\d0s', surface): unit = l.NAMES['dimensionless'] surface = surface[:-1] span = (span[0], span[1] - 1) logging.debug(u'\tCorrect for decade pattern') # Usually "in" stands for the preposition, not inches elif unit.dimensions[-1]['base'] == 'inch' and \ re.search(r' in$', surface) and '/' not in surface: if len(unit.dimensions) > 1: unit = get_unit_from_dimensions(unit.dimensions[:-1], orig_text) else: unit = l.NAMES['dimensionless'] surface = surface[:-3] span = (span[0], span[1] - 3) logging.debug(u'\tCorrect for "in" pattern') elif is_quote_artifact(text, item.span()): if len(unit.dimensions) > 1: unit = get_unit_from_dimensions(unit.dimensions[:-1], orig_text) else: unit = l.NAMES['dimensionless'] surface = surface[:-1] span = (span[0], span[1] - 1) logging.debug(u'\tCorrect for quotes') elif re.search(r' time$', surface) and len(unit.dimensions) > 1 and \ unit.dimensions[-1]['base'] == 'count': unit = get_unit_from_dimensions(unit.dimensions[:-1], orig_text) surface = surface[:-5] span = (span[0], span[1] - 5) logging.debug(u'\tCorrect for "time"') objs = [] for value in values: obj = c.Quantity(value=value, unit=unit, surface=surface, span=span, uncertainty=uncert) objs.append(obj) return objs
def build_quantity(orig_text, text, item, values, unit, surface, span, uncert)
Build a Quantity object out of extracted information.
3.087377
3.102715
0.995057
# Replace a few nasty unicode characters with their ASCII equivalent maps = {u'×': u'x', u'–': u'-', u'−': '-'} for element in maps: text = text.replace(element, maps[element]) # Replace genitives text = re.sub(r'(?<=\w)\'s\b|(?<=\w)s\'(?!\w)', ' ', text) logging.debug(u'Clean text: "%s"', text) return text
def clean_text(text)
Clean text before parsing.
4.910069
4.964498
0.989037
log_format = ('%(asctime)s --- %(message)s') logging.basicConfig(format=log_format) root = logging.getLogger() if verbose: level = root.level root.setLevel(logging.DEBUG) logging.debug(u'Verbose mode') if isinstance(text, str): text = text.decode('utf-8') logging.debug(u'Converted string to unicode (assume utf-8 encoding)') orig_text = text logging.debug(u'Original text: "%s"', orig_text) text = clean_text(text) values = extract_spellout_values(text) text, shifts = substitute_values(text, values) quantities = [] for item in r.REG_DIM.finditer(text): groups = dict([i for i in item.groupdict().items() if i[1] and i[1].strip()]) logging.debug(u'Quantity found: %s', groups) try: uncert, values = get_values(item) except ValueError as err: logging.debug(u'Could not parse quantity: %s', err) unit = get_unit(item, text) surface, span = get_surface(shifts, orig_text, item, text) objs = build_quantity(orig_text, text, item, values, unit, surface, span, uncert) if objs is not None: quantities += objs if verbose: root.level = level return quantities
def parse(text, verbose=False)
Extract all quantities from unstructured text.
4.042057
3.834782
1.054051
if isinstance(text, str): text = text.decode('utf-8') parsed = parse(text, verbose=verbose) shift = 0 for quantity in parsed: index = quantity.span[1] + shift to_add = u' {' + unicode(quantity) + u'}' text = text[0:index] + to_add + text[index:] shift += len(to_add) return text
def inline_parse(text, verbose=False)
Extract all quantities from unstructured text.
3.509031
3.238611
1.083499
if self._login_data or self._login_token: def reconnect_login_callback(error, result): if error: if self._login_token: self._login_token = None self._login(self._login_data, callback=reconnect_login_callback) return else: raise MeteorClientException( 'Failed to re-authenticate during reconnect') self.connected = True self._resubscribe() if self._login_token: self._resume(self._login_token, callback=reconnect_login_callback) else: self._login(self._login_data, callback=reconnect_login_callback) else: self._resubscribe()
def _reconnected(self)
Reconnect Currently we get a new session every time so we have to clear all the data an resubscribe
3.201055
2.99642
1.068293
# TODO: keep the tokenExpires around so we know the next time # we need to authenticate # hash the password hashed = hashlib.sha256(password).hexdigest() # handle username or email address if '@' in user: user_object = { 'email': user } else: user_object = { 'username': user } password_object = { 'algorithm': 'sha-256', 'digest': hashed } self._login_token = token self._login_data = {'user': user_object, 'password': password_object} if token: self._resume(token, callback=callback) else: self._login(self._login_data, callback=callback)
def login(self, user, password, token=None, callback=None)
Login with a username and password Arguments: user - username or email address password - the password for the account Keyword Arguments: token - meteor resume token callback - callback function containing error as first argument and login data
3.77423
3.90571
0.966336
self.ddp_client.call('logout', [], callback=callback) self.emit('logged_out')
def logout(self, callback=None)
Logout a user Keyword Arguments: callback - callback function called when the user has been logged out
7.698749
12.033082
0.639799
self._wait_for_connect() self.ddp_client.call(method, params, callback=callback)
def call(self, method, params, callback=None)
Call a remote method Arguments: method - remote method name params - remote method parameters Keyword Arguments: callback - callback function containing return data
6.533659
8.653324
0.755046
self._wait_for_connect() def subscribed(error, sub_id): if error: self._remove_sub_by_id(sub_id) if callback: callback(error.get('reason')) return if callback: callback(None) self.emit('subscribed', name) if name in self.subscriptions: raise MeteorClientException('Already subcribed to {}'.format(name)) sub_id = self.ddp_client.subscribe(name, params, subscribed) self.subscriptions[name] = { 'id': sub_id, 'params': params }
def subscribe(self, name, params=[], callback=None)
Subscribe to a collection Arguments: name - the name of the publication params - the subscription parameters Keyword Arguments: callback - a function callback that returns an error (if exists)
3.216708
3.673407
0.875674
self._wait_for_connect() if name not in self.subscriptions: raise MeteorClientException('No subscription for {}'.format(name)) self.ddp_client.unsubscribe(self.subscriptions[name]['id']) del self.subscriptions[name] self.emit('unsubscribed', name)
def unsubscribe(self, name)
Unsubscribe from a collection Arguments: name - the name of the publication
3.954366
4.529475
0.87303
results = [] for _id, doc in self.collection_data.data.get(collection, {}).items(): doc.update({'_id': _id}) if selector == {}: results.append(doc) for key, value in selector.items(): if key in doc and doc[key] == value: results.append(doc) return results
def find(self, collection, selector={})
Find data in a collection Arguments: collection - collection to search Keyword Arguments: selector - the query (default returns all items in a collection)
2.640951
3.175209
0.831741
for _id, doc in self.collection_data.data.get(collection, {}).items(): doc.update({'_id': _id}) if selector == {}: return doc for key, value in selector.items(): if key in doc and doc[key] == value: return doc return None
def find_one(self, collection, selector={})
Return one item from a collection Arguments: collection - collection to search Keyword Arguments: selector - the query (default returns first item found)
2.820845
3.512375
0.803116
self.call("/" + collection + "/insert", [doc], callback=callback)
def insert(self, collection, doc, callback=None)
Insert an item into a collection Arguments: collection - the collection to be modified doc - The document to insert. May not yet have an _id attribute, in which case Meteor will generate one for you. Keyword Arguments: callback - Optional. If present, called with an error object as the first argument and, if no error, the _id as the second.
8.895993
13.517534
0.658108
self.call("/" + collection + "/update", [selector, modifier], callback=callback)
def update(self, collection, selector, modifier, callback=None)
Insert an item into a collection Arguments: collection - the collection to be modified selector - specifies which documents to modify modifier - Specifies how to modify the documents Keyword Arguments: callback - Optional. If present, called with an error object as the first argument and, if no error, the number of affected documents as the second.
7.399491
11.341109
0.652449
self.call("/" + collection + "/remove", [selector], callback=callback)
def remove(self, collection, selector, callback=None)
Remove an item from a collection Arguments: collection - the collection to be modified selector - Specifies which documents to remove Keyword Arguments: callback - Optional. If present, called with an error object as its argument.
9.173355
13.565125
0.676246
try: if self.closed is False: close_msg = bytearray() close_msg.extend(struct.pack("!H", status)) if _check_unicode(reason): close_msg.extend(reason.encode('utf-8')) else: close_msg.extend(reason) self._send_message(False, CLOSE, close_msg) finally: self.closed = True
def close(self, status=1000, reason=u'')
Send Close frame to the client. The underlying socket is only closed when the client acknowledges the Close frame. status is the closing identifier. reason is the reason for the close.
2.880816
2.876728
1.001421
opcode = BINARY if _check_unicode(data): opcode = TEXT self._send_message(True, opcode, data)
def send_fragment_start(self, data)
Send the start of a data fragment stream to a websocket client. Subsequent data should be sent using sendFragment(). A fragment stream is completed when sendFragmentEnd() is called. If data is a unicode object then the frame is sent as Text. If the data is a bytearray object then the frame is sent as Binary.
12.088314
15.169222
0.796897
opcode = BINARY if _check_unicode(data): opcode = TEXT self._send_message(False, opcode, data)
def send_message(self, data)
Send websocket data frame to the client. If data is a unicode object then the frame is sent as Text. If the data is a bytearray object then the frame is sent as Binary.
12.304915
14.316739
0.859478
zlibbed_str = zlib.compress(plantuml_text.encode('utf-8')) compressed_string = zlibbed_str[2:-4] return encode(compressed_string.decode('latin-1'))
def deflate_and_encode(plantuml_text)
zlib compress the plantuml text and encode it for the plantuml server.
3.527121
3.657549
0.96434
res = "" for i in range(0,len(data), 3): if (i+2==len(data)): res += _encode3bytes(ord(data[i]), ord(data[i+1]), 0) elif (i+1==len(data)): res += _encode3bytes(ord(data[i]), 0, 0) else: res += _encode3bytes(ord(data[i]), ord(data[i+1]), ord(data[i+2])) return res
def encode(data)
encode the plantuml data which may be compresses in the proper encoding for the plantuml server
1.763821
1.819503
0.969397
url = self.get_url(plantuml_text) try: response, content = self.http.request(url, **self.request_opts) except self.HttpLib2Error as e: raise PlantUMLConnectionError(e) if response.status != 200: raise PlantUMLHTTPError(response, content) return content
def processes(self, plantuml_text)
Processes the plantuml text into the raw PNG image data. :param str plantuml_text: The plantuml markup to render :returns: the raw image data
3.03575
3.515068
0.863639
if outfile is None: outfile = os.path.splitext(filename)[0] + '.png' if errorfile is None: errorfile = os.path.splitext(filename)[0] + '_error.html' data = open(filename, 'U').read() try: content = self.processes(data) except PlantUMLHTTPError as e: err = open(errorfile, 'w') err.write(e.content) err.close() return False out = open(outfile, 'wb') out.write(content) out.close() return True
def processes_file(self, filename, outfile=None, errorfile=None)
Take a filename of a file containing plantuml text and processes it into a .png image. :param str filename: Text file containing plantuml markup :param str outfile: Filename to write the output image to. If not supplied, then it will be the input filename with the file extension replaced with '.png'. :param str errorfile: Filename to write server html error page to. If this is not supplined, then it will be the input ``filename`` with the extension replaced with '_error.html'. :returns: ``True`` if the image write succedded, ``False`` if there was an error written to ``errorfile``.
2.319298
1.892662
1.225416
''' For Jira next-gen projects, issue types can be scoped to projects. For issue types that are scoped to projects, only extract the ones in the extracted projects. ''' print('downloading jira issue types... ', end='', flush=True) result = [] for it in jira_connection.issue_types(): if 'scope' in it.raw and it.raw['scope']['type'] == 'PROJECT': if it.raw['scope']['project']['id'] in project_ids: result.append(it.raw) else: result.append(it.raw) print('✓') return result
def download_issuetypes(jira_connection, project_ids)
For Jira next-gen projects, issue types can be scoped to projects. For issue types that are scoped to projects, only extract the ones in the extracted projects.
4.529395
2.356365
1.922196
# type: (List[AbstractPEMObject], **Any) -> ssl.CerticateOptions keys = [key for key in pemObjects if isinstance(key, Key)] if not len(keys): raise ValueError("Supplied PEM file(s) does *not* contain a key.") if len(keys) > 1: raise ValueError("Supplied PEM file(s) contains *more* than one key.") privateKey = ssl.KeyPair.load(str(keys[0]), FILETYPE_PEM) certs = [cert for cert in pemObjects if isinstance(cert, Certificate)] if not len(certs): raise ValueError("*At least one* certificate is required.") certificates = [ssl.Certificate.loadPEM(str(certPEM)) for certPEM in certs] certificatesByFingerprint = dict( [ (certificate.getPublicKey().keyHash(), certificate) for certificate in certificates ] ) if privateKey.keyHash() not in certificatesByFingerprint: raise ValueError( "No certificate matching {fingerprint} found.".format( fingerprint=privateKey.keyHash() ) ) primaryCertificate = certificatesByFingerprint.pop(privateKey.keyHash()) if "dhParameters" in kw: raise TypeError( "Passing DH parameters as a keyword argument instead of a " "PEM object is not supported anymore." ) dhparams = [o for o in pemObjects if isinstance(o, DHParameters)] if len(dhparams) > 1: raise ValueError( "Supplied PEM file(s) contain(s) *more* than one set of DH " "parameters." ) elif len(dhparams) == 1: kw["dhParameters"] = ssl.DiffieHellmanParameters(str(dhparams[0])) ctxFactory = ssl.CertificateOptions( privateKey=privateKey.original, certificate=primaryCertificate.original, extraCertChain=[ chain.original for chain in certificatesByFingerprint.values() ], **kw ) return ctxFactory
def certificateOptionsFromPEMs(pemObjects, **kw)
Load a CertificateOptions from the given collection of PEM objects (already-loaded private keys and certificates). In those PEM objects, identify one private key and its corresponding certificate to use as the primary certificate. Then use the rest of the certificates found as chain certificates. Raise a ValueError if no certificate matching a private key is found. :return: A TLS context factory using *pemObjects* :rtype: `twisted.internet.ssl.CertificateOptions`_ .. _`twisted.internet.ssl.CertificateOptions`: \ https://twistedmatrix.com/documents/current/api/\ twisted.internet.ssl.CertificateOptions.html
3.168234
3.314287
0.955932
# type: (*str, **Any) -> ssl.CertificateOptions pems = [] # type: List[AbstractPEMObject] for pemFile in pemFiles: pems += parse_file(pemFile) return certificateOptionsFromPEMs(pems, **kw)
def certificateOptionsFromFiles(*pemFiles, **kw)
Read all files named by *pemFiles*, and parse them using :func:`certificateOptionsFromPEMs`.
4.021949
4.87273
0.8254
# type: (bytes) -> List[AbstractPEMObject] return [ _PEM_TO_CLASS[match.group(1)](match.group(0)) for match in _PEM_RE.finditer(pem_str) ]
def parse(pem_str)
Extract PEM objects from *pem_str*. :param pem_str: String to parse. :type pem_str: bytes :return: list of :ref:`pem-objects`
4.390557
6.586495
0.6666
# type: () -> str if self._sha1_hexdigest is None: self._sha1_hexdigest = hashlib.sha1(self._pem_bytes).hexdigest() return self._sha1_hexdigest
def sha1_hexdigest(self)
A SHA-1 digest of the whole object for easy differentiation. .. versionadded:: 18.1.0
2.802901
3.740046
0.74943