content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def is_subsequence(needle, haystack): """Are all the elements of needle contained in haystack, and in the same order? There may be other elements interspersed throughout""" it = iter(haystack) for element in needle: if element not in it: return False return True
1b3c1d66258bc3ae66e52c9eacfe1dbfed345c87
44,464
def is_safe(value: str) -> bool: """Evaluate if the given string is a fractional number safe for eval()""" return len(value) <= 10 and all(c in "0123456789./ " for c in set(value))
115cfe5c2c429154ba4af454e9769bf0ec33c059
502,713
import mimetypes def get_extension(mime_type: str) -> str: """From a ``mime_type`` return a corresponding file extension, or empty.""" extensions = sorted(mimetypes.guess_all_extensions( mime_type, strict=False)) return extensions[0] if extensions else ''
351c8bafcd2ba7f2428445750344d1463e989a1e
568,503
def split_seq(seq, n_chunks): """Split the given sequence into `n_chunks`. Suitable for distributing an array of jobs over a fixed number of workers. >>> split_seq([1,2,3,4,5,6], 3) [[1, 2], [3, 4], [5, 6]] >>> split_seq([1,2,3,4,5,6], 2) [[1, 2, 3], [4, 5, 6]] >>> split_seq([1,2,3,4,5,6,7], 3) [[1, 2], [3, 4, 5], [6, 7]] """ newseq = [] splitsize = 1.0/n_chunks*len(seq) for i in range(n_chunks): newseq.append(seq[int(round(i*splitsize)):int(round((i+1)*splitsize))]) return newseq
e97c07506f8b477d3c6a43ff57948b5661ed8d78
508,297
def crop_keypoint_by_coords(keypoint, crop_coords, crop_height, crop_width, rows, cols): """Crop a keypoint using the provided coordinates of bottom-left and top-right corners in pixels and the required height and width of the crop. """ x, y, a, s = keypoint x1, y1, x2, y2 = crop_coords cropped_keypoint = [x - x1, y - y1, a, s] return cropped_keypoint
5a2365a611275fea4d0f5d031127426c88c43905
3,625
from typing import Callable def firstDerivative(f: Callable, x: float, h: float=1e-7) -> float: """Function to numerically approximate the first derivative about a point `x`, given a function `f(x)` which takes a single float as its argument. This function uses the central finite difference method, computing the slope of a nearby secant line passing through the points `(x - h)` and `(x + h)`. Arguments: f {Callable} -- Objective function who's derivative is to be computed. x {float} -- Point about which the derivative is computed. Keyword Arguments: h {float} -- Step size (default: {1e-7}). Returns: float -- Approximation of the first derivative of `f` about point `x`. """ return (f(x + h) - f(x - h)) / (2 * h)
b6ef9862ea62b0a31fd67c438742340b3f9942e1
64,555
import json def merge_json_data(old_data: str, new_data: str) -> str: """Merge to dictionaries encoded as JSON.""" if not old_data: return new_data data = json.loads(old_data) data.update(json.loads(new_data)) return json.dumps(data)
0b89b21528c5b848734e076004ba0db5480602f0
167,854
from typing import Optional def read_text_file(file_name: str) -> Optional[str]: """ Reads file and returns its text :param file_name: name of a file to read :return: text of the file """ if not file_name: return None # encoding='utf-8-sig' is used for omitting \ufeff symbol # in the beginning of the string after reading from file with open(file_name, 'r', encoding='utf-8-sig') as text_file: text = text_file.read() return text
982a1322c8fee216f3bc2364bbae3b5f6bfa348f
227,557
def test_command(source): """ Creates a command to be run via subprocess :param source: str|None :return: list """ command = ['pytest', '--cov', source] if source is not None else ['pytest', '--cov'] return command
a9ba6b11b500f0ce7816a5471048209854f7cb65
268,840
def classifyCharacter(c): """ return 0 for delimiter, 1 for digit and 2 for alphabetic character """ if c in [".", "-", "_", " "]: return 0 if c.isdigit(): return 1 else: return 2
6f319904860921d4b520706ec98db7de1f44ba24
477,999
from typing import List import csv def read_result_csv(file_path: str) -> List: """ Method to read in the first line of a csv to an array. :param file_path: Path to csv file. :return: Returns array containing the first line of the csv """ with open(file_path, mode='r') as file: result = csv.reader(file) t = [l for l in result] return t[0]
021604a5f89664fedb5271556c9435e5c0a1e48a
526,432
def getKeyPath(parent, keyPath): """ Allows the getting of arbitrary nested dictionary keys via a single dot-separated string. For example, getKeyPath(parent, "foo.bar.baz") would fetch parent["foo"]["bar"]["baz"]. If any of the keys don't exist, None is returned instead. @param parent: the object to traverse @type parent: any dict-like object @param keyPath: a dot-delimited string specifying the path of keys to traverse @type keyPath: C{str} @return: the value at keyPath """ parts = keyPath.split(".") for part in parts[:-1]: child = parent.get(part, None) if child is None: return None parent = child return parent.get(parts[-1], None)
66765c1ddaf820fe5950c394e5cb41f6b6c88e77
456,190
import re def parse_td_links(ctx, raw_links, doc_id): """ Parsing links contained in the last <td> elements. returns: list of tuples i.e. [(url, type), (url, type)] """ links = [] # https://regex101.com/r/7iAqds/1 links_n_types_patt = re.compile(r"href='(.*?)'.*?title='(.*?)'") for url_n_type in links_n_types_patt.findall(raw_links): link = url_n_type[0] l_type = None # link type if 'PDF' in url_n_type[1].upper(): l_type = 'PDF' elif 'ZIP' in url_n_type[1].upper(): l_type = 'ZIP' elif 'HTML' in url_n_type[1].upper(): l_type = 'HTML' else: continue links.append((link, l_type)) return links
7a9e78bcbae06fcb6e6fd1f1773f375b0a0e1f6b
171,621
def single_function_to_run(batch, function_to_run): """ apply a list of functions on a batch of data """ for fn in function_to_run: batch = fn(batch) return batch
6bf769541346177b45156c8d1c9c93318f3fddf1
671,083
def isnamedtuple(x): """ Utility to check whether something is a named tuple Since a namedtuple is basically a tuple with some additional metadata, we can't just do an `isinstance` check Based on https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple/2166841#2166841 """ t = type(x) b = t.__bases__ # Named tuples are tuple subclasses if len(b) != 1 or b[0] != tuple: return False # They have a fields tuple f = getattr(t, "_fields", None) if not isinstance(f, tuple): return False # All the items in the fields tuple are strings return all(type(n) == str for n in f)
7ca9bdcd264cf1d4fdae01f9a8cc0f2ff4d9dd32
86,483
from math import sqrt def distance(p1, p2): """Return the Euclidean distance between `p1` and `p2`""" return sqrt((p1.x - p2.x)**2 + (p1.y - p2.y)**2)
d1c28fb056c83e5d3cde7d1cbeda178670ffe6b0
311,183
def parse_boolean(value): """Parses a boolean from test i.e. from "0", "yes", "false" etc. """ return value.lower() in ("1","yes","true")
094af647683fe5aaefe2d2ba6ce17409c6cd6fb4
417,399
from datetime import datetime def get_iso_from_local_time(user_datetime: datetime): """ outputs the provided datetime into an ISO date format string :param user_datetime: the datetime in the user's TZ :type user_datetime: datetime :return: provided datetime formatted into a ISO string :rtype: string """ return user_datetime.isoformat()
cbe0d5fcd3a5ffadcee3c8e2609712610a327ed2
233,026
from pathlib import Path def existing_git_repo(path: Path) -> bool: """Check if a git repository exists :param Path path: Repo path :return: True, if .git directory exists inside path """ return path.is_dir() and Path(path / '.git').is_dir()
60d00eab72c23fbe7fdd4a7ad0868e081c712247
90,036
import torch def angles_to_rotation_matrix(yaw, pitch, roll, degrees=True): """ Convert pitch, yaw, roll angles to a right-handed rotation matrix. It can be used to post-multiply row vectors: rotated = vector * R. Based on RotationMatrix.m from the AFLW2000 and 300W LP datasets. :param pitch pitch (x) angle :param yaw yaw (y) angle :param roll roll (z) angle :param degrees if True, the angles are in degrees, otherwise in radians. """ # Negate to make the matrix consistent with the head position. yaw = -yaw roll = -roll if degrees: pitch = torch.deg2rad(pitch) yaw = torch.deg2rad(yaw) roll = torch.deg2rad(roll) rx = torch.tensor([[1, 0, 0], [0, torch.cos(pitch), torch.sin(pitch)], [0, -torch.sin(pitch), torch.cos(pitch)]]) ry = torch.tensor([[torch.cos(yaw), 0, -torch.sin(yaw)], [0, 1, 0], [torch.sin(yaw), 0, torch.cos(yaw)]]) rz = torch.tensor([[torch.cos(roll), torch.sin(roll), 0], [-torch.sin(roll), torch.cos(roll), 0], [0, 0, 1]]) r = torch.matmul(torch.matmul(rx, ry), rz) r = torch.transpose(r, 0, 1) return r
e11bf54b1be4744cdde30466631fc7a547d5a0db
412,813
import inspect def _accepts_args(fn, *args, **kwargs): """Returns True if `fn` can accept `*args` and `**kwargs`.""" try: inspect.signature(fn).bind_partial(*args, **kwargs) return True except TypeError: return False
0b912c5398999347b1aaf36931988689c36d2dce
583,397
import time def check_cli_output_with_mocker(dut, mocker_object, command, max_wait_time, key_index=0): """ Check the command line output matches the mocked data. :param dut: DUT object representing a SONiC switch under test. :param mocker_object: A mocker instance. :param command: The command to be executed. E.g, 'show platform fan' :param max_wait_time: Max wait time. :return: True if the actual data matches the mocked data. """ time.sleep(max_wait_time) result = dut.show_and_parse(command) assert len(result) > 0, "Run and parse output of command '{}' failed".format(command) return mocker_object.check_result(result)
f16c1d691dcd614d27bcc79a5e57a8eeeab82eaf
600,990
from typing import Callable from typing import Dict def loop(setup: Callable, cond: Callable, func: Callable) -> Dict: """A for loop like in C, C++, Rust, etc. Every function gets a dictionary for talking between them. Args: setup (Callable): The function that gets called before the loop cond (Callable): The condition function returns True or False func (Callable): The function to call for each iteration Returns: Dict: The dictionary """ gls = {} setup(gls) while cond(gls): func(gls) return gls
335a3b60be6f26f0e25fd2464c3f31a3302f5e0d
132,205
def getColor (value): """Returns a color name dependent on value. :param value: The integer that determines the color :type value: int :return: The color appropriate to the integer :rtype: str """ if 95 <= value <= 100: return "darkred" if 90 <= value <= 94: return "red" if 70 <= value <= 89: return "green" if 60 <= value <= 69: return "blue" if 0 <= value <= 59: return "darkblue"
c6424852283ece3bff1909190fb1eef8c44cbda9
182,009
def mb_to_hgt(Psta, mslp=1013.25): # METERS """Convert millibars to expected altitude, in meters.""" return (1-(Psta/mslp)**0.190284)*44307.69396
dbe2df667e54f80031f162f6d3e9aeb9fcee15f4
22,392
def create_entities_dict(name, face_entity_list, solid_entity_list, main_object=None): """ Helper method for creating an entities dictionary. :param name: name of the collection of entities :param face_entity_list: [face_entity_1, ..., face_entity_n] :param solid_entity_list: [solid_entity_1, ..., solid_entity_n] :param main_object: main object (usually a solid when the entity only has one) :return: entities_dict """ entities_dict = { 'name':name, 'faces':face_entity_list, 'solids':solid_entity_list, 'main object':main_object } return entities_dict
9902f4142c8651f709a72c495f3a88f7dc7791b3
247,055
def format_sc(n): """Return a LaTeX scientifc format for a float n. Parameters ---------- n: float. Returns ------- str. scientific writing of n in LaTeX. """ a = "%.2E" % n b = a.split('E')[0] + ' ' p = str(int(a.split('E')[-1])) if p == '0': return b return b + r'$10^{' + p + r'}$'
081147c97b9fdad46651b862a7c9bfc2d7f388d7
210,845
import torch def get_attention_mask(input_ids): """Get the default attention mask. Args: input_ids: Ids representing the input tokens, used for the mask length. Returns: extended_attention_mask: The attention mask to be used with the model. """ attention_mask = torch.ones_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, # to_seq_length] extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=torch.float) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask
343fd58fd65cfcd00892d2b88be5ef87060a6c7c
430,860
def to_iso639_part1(language_code): """ Convert codes like "en-us" to "en" """ return language_code.split('-', 1)[0]
53192b1a7b0263cca5bb9c714fc01be4b4b3b1ae
48,116
def get_gjson(gdf): """ Generates geojson from geopandas.GeoDataFrame """ gjson = gdf.to_json() return gjson
a4af1516d3dd2a8ce38f1769b699c2d23627b941
273,711
def read_clan_ranking(rankings, clan_tag, clan_stats, attr_name): """ Sets global or local clan ranking info on a clan history object :param list rankings: :param str clan_tag: :param ClanHistory clan_stats: :param str attr_name: must be in (local, global, local_war, global_war) - see backend.models.clashroyale.ClanHistory :return: """ try: ranking = next(x for x in rankings if x.tag[1:] == clan_tag) except StopIteration: return None setattr(clan_stats, '{}_rank'.format(attr_name), ranking.rank) setattr(clan_stats, 'prev_{}_rank'.format(attr_name), ranking.previous_rank) return ranking
38f1a7ec05407f1e4bc241dd45685cc5cf54d085
429,319
def repository_url(datastore_client, name): """Returns the URL of a repository, given its short name. If a repository moved locations or has multiple locations, a repository can have multiple URLs. The returned URL should be the current canonical one. Args: datastore_client: The client to use for the datastore query. name: The short name of the repository. Returns: A URL string, not including '.git'. """ repositories = datastore_client.query(kind='Repository', order=('-time_added', ), filters=(('name', '=', name), )).fetch() for repo in repositories: return repo['url'] raise KeyError(f'Unknown repository name: {name}')
0546335ddf8c539a017190d7269615385983a1ce
199,841
def isMyNumber(guess): """ Procedure that hides a secret number. :param guess: integer number :return: -1 if guess is less than the secret number 0 if guess is equal to the secret number 1 if guess is greater than the secret number """ secretnum = 5 if guess == secretnum: return 0 elif guess < secretnum: return -1 else: return 1
df7527c0da298450d9f3f7b33b7f3b1067c3a275
200,044
import time def readable_time(timestamp=None): """ Return a human-friendly string of a timestamp e.g. timestamp: 1582029001.6709404 readable time: Tue Feb 18 20:30:01 2020 Args: timestamp: a UNIX timestamp Returns: str, a human-friendly readable string of the argument timestamp """ time_readable_str = str(time.asctime(time.localtime(timestamp if timestamp else time.time()))) return time_readable_str
f4ae90175a321aacec5fff8b1445b480fd1894bd
141,001
def collect_all_accessible(auth_api, object_types = ["Role", "Organization", "EntitySet", "PropertyTypeInEntitySet"], permissions = ["OWNER", "READ", "WRITE"]): """ Creates a dict with outputs of get_all_accessible_at_permission_level over various object_types and permission levels Output format is dict({ tuple(principal_1): ["OWNER"], tuple(principal_2): ["READ"], tuple(principal_3): ["READ", "WRITE"] }) """ out = dict() for perm in permissions: for object_type in object_types: api_output = auth_api.get_accessible_objects(object_type = object_type, permission = perm) accessibles = [tuple(a) for a in api_output.authorized_objects] for a in accessibles: if a in out.keys(): out[a].append(perm) else: out[a] = [perm] return out
3ed9d39dad5ae383186dcdeb2f7d376b472800c1
485,109
def append_results(results, base, c, conf): """Append results before calculating metrics.""" results.append({'truth name': base['name'], 'model name': c['name'], 'path': c['path'], 'location': conf['location'], 'var': c['var']} ) return results
c9911e970217c021486e69dd2f031316b4e45ea6
56,579
def add_intercept(X): """Add all 1's column to predictor matrix""" X['intercept'] = [1]*X.shape[0] return X
3ddc9baae14d87eeac29060c34428ed53a922477
70,658
from typing import Union from datetime import datetime def to_dt(s: Union[datetime, str]): """Convert an ISO 8601 string to a datetime.""" if isinstance(s, str): try: return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S") elif isinstance(s, datetime): return s
8d5e175aac4e3d717e3cd0d67aaa54377dd07d1e
454,178
from typing import Any def _infer_flat_input_shape(n_args: int) -> tuple[Any, ...]: """Compute a shape of inputs that function are not tuples. Args: n_args: a number of inputs. Returns: a shape of inputs. """ return tuple([()] * n_args)
817f65cece9db7c5c5428abba791c31ea641532a
372,113
import fractions def _frame_num_to_drop_frame_num( frame_number: int, timebase: fractions.Fraction, ) -> int: """ _frame_num_to_drop_frame_num converts a frame-number to an adjusted frame number for creating drop-frame tc. Algorithm adapted from: https://www.davidheidelberger.com/2010/06/10/drop-frame-timecode/ :param frame_number: the frame number to convert to a drop-frame number. :param timebase: the timebase (not playback) to use for the conversion. :returns: The frame number adjusted to produce the correct drop-frame timecode when used in the normal timecode calculation. """ # Get the number frames-per-minute at the whole-frame rate frames_per_minute_whole = timebase * 60 # Get the number of frames we need to drop each time we drop frames (ex: 2 or 29.97) drop_frames = round(timebase * 0.066666) # Get the number of frames are in a minute where we have dropped frames at the # beginning frames_per_minute_drop = (timebase * 60) - drop_frames # Get the number of actual frames in a 10-minute span for drop frame timecode. Since # we drop 9 times a minute, it will be 9 drop-minute frame counts + 1 whole-minute # frame count. frames_per_10minutes_drop = frames_per_minute_drop * 9 + frames_per_minute_whole # Get the number of 10s of minutes in this count, and the remaining frames. tens_of_minutes, frames = divmod(frame_number, frames_per_10minutes_drop) # Create an adjustment for the number of 10s of minutes. It will be 9 times the # drop value (we drop for the first 9 minutes, then leave the 10th alone). adjustment = 9 * drop_frames * tens_of_minutes # If our remaining frames are less than a whole minute, we aren't going to drop # again. Add the adjustment and return. if frames < frames_per_minute_whole: return frame_number + adjustment # Remove the first full minute (we don't drop until the next minute) and add the # drop-rate to the adjustment. frames -= timebase adjustment += drop_frames # Get the number of remaining drop-minutes present, and add a drop adjustment for # each. minutes_drop = frames // frames_per_minute_drop adjustment += minutes_drop * drop_frames # Return our original frame number adjusted by our calculated adjustment. return frame_number + adjustment
0b4239cec8ed675a3a15c75588e4ccb93cad2462
479,564
import re def select_structs(innerclass_list): """Select structures from innerclass list. Args: innerclass_list: raw list with unions and structures extracted from Dogygen's xml file. Returns: Doxygen directives with structures selected from the list. Note: some structures are excluded as described on code below. """ rst_output = "" for line in innerclass_list.splitlines(): # structure is denoted by "struct" at the beginning of line if line.find("struct") == 0: # skip structures that are part of union # they are documented by 'doxygenunion' directive if line.find("::") > 0: continue struct_id, struct_name = re.split(r"\t+", line) rst_output += ".. doxygenstruct:: " rst_output += struct_name rst_output += "\n" rst_output += " :members:\n" rst_output += "\n" return rst_output
844156fb1b3eddfcb81affb59e8e6d9911c93483
620,782
def shake_shake_eval(xa, xb): """Shake-shake regularization in testing mode. Args: xa: Input, branch A. xb: Input, branch B. Returns: Mix of input branches. """ # Blend between inputs A and B 50%-50%. return (xa + xb) * 0.5
68eca94d4bf8ea75427297b0324dac9cf7eb10c4
610,177
import random def generate_uuid_readable(length=9): """ Create a new random uuid suitable for acting as a unique key in the db Use this when it's an ID a user will see as it's a bit shorter. Duplicates are still unlikely, but don't use this in situations where a duplicate might cause problems (check for them!) :param length: The number of characters we want in the UUID """ valid = "23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" # 57^n possibilities - about 6 million billion options for n=9. # Hopefully pretty good. return "".join([random.choice(valid) for _ in range(length)])
857478e749ebcc01dc4d78777404813bddc19863
273,936
import string def _clean_up_string_entry(entry): """Remove leading and trailing whitespace and quote marks.""" return entry.strip(string.whitespace + '"\'')
d296974953f0b59f899ae40b3d32629da97cf98b
438,373
def count_receive(link_counters): """Find the receive count in the counters returned by a CommunicationLink.""" for processor in link_counters.values(): if 'receive' in processor: return processor['receive']
e869ae163a2d06afd36c7005dc5b47fef7e67abe
476,216
def myreplace(old, new, s): """ Replace all occurrences of old with new in s. """ s = " ".join(s.split()) return new.join(s.split(old))
837ffc3bfa4fa26e3886e168106f12b087a42d36
289,312
def batch_tokenize(tokenizer, attacked_text_list): """ Tokenizes a list of inputs and returns their tokenized forms in a list. """ inputs = [at.tokenizer_input for at in attacked_text_list] if hasattr(tokenizer, "batch_encode"): return tokenizer.batch_encode(inputs) else: return [tokenizer.encode(x) for x in inputs]
317fb8b113ff2c83dcaa2d50dd933d187a6f0609
502,553
def source_dx_dy(source_pos_x, source_pos_y, cog_x, cog_y): """ Compute the coordinates of the vector (dx, dy) from the center of gravity to the source position Parameters ---------- source_pos_x: X coordinate of the source in the camera source_pos_y: Y coordinate of the source in the camera cog_x: X coordinate of the center of gravity in the camera cog_y: Y coordinate of the center of gravity in the camera Returns ------- (dx, dy) """ return source_pos_x - cog_x, source_pos_y - cog_y
4dc959fb557268121a2288789a227a8a08a28c83
178,068
def get_field_on_block(block, field_name, default_value=None): """ Get the field value that is directly set on the xblock. Do not get the inherited value since field inheritance returns value from only a single parent chain (e.g., doesn't take a union in DAGs). """ try: if block.fields[field_name].is_set_on(block): return getattr(block, field_name) except KeyError: pass return default_value
6f48a89a4684869b2b5ceec0a276b5f8117f70f4
71,486
def clean(line): """ Extract elements from line, add fixed format :param line: checked line(string) :return: String with elements """ result = '' for element in line.split(';'): result += element.strip(' ') + ';' if result[0] == ';': result = result[1:] return result[:-3] + '\n'
8abe287b8ad39861d8f228b26c708fe9049c905d
266,370
import math def get_line_ori(x_0, y_0, x_1, y_1): """Returns the orientation of a vector defined by two points in 2D space. Parameters ---------- x_0 : float X coordinate of first point y_0 : float Y coordinate of first point x_1 : float X coordinate of second point y_1 : float Y coordinate of second point Returns ------- float Orientation of the vector (radians) """ return math.atan2(y_1-y_0, x_1-x_0)
d884ad1068d7abc926ce1e3fd327e277257f1ec5
345,288
def get_repo_name(url: str) -> str: """ Get the name of the repo. :param url: The URL on which the name of the repo will be retrieved. :return: The name of the repo. """ return url.split('https://github.com/')[1].split('/')[1]
d2f7c14e424b0233b6867ae8311b4177c379b61a
639,951
def extractFolders(folders): """ convert a string of folders to a list of tuples (db , schema, node) :param folders: a string containing folders db-schema-node seperated by , :return: a list of tuples (db , schema, node) """ output = [] folderList = folders.split('-') for folder in folderList: folderComponents = folder.split(',') output.append( (folderComponents[0], folderComponents[1], folderComponents[2])) return output
b5ba5e2ce3fcbc33e51fea06d2019c36b65aaece
639,240
def convert_to_str(value): """convert True/False to yes/no and all values to strongs""" if isinstance(value, bool): if value: return "yes" return "no" return str(value)
dfa9dbaa31e18ec8c1433dd81e80b578fd8fe541
107,191
import pickle def restore_object(filename): """ Read a `pickle` file and returns the saved objects. """ try: with open(filename, 'rb') as bkp: reading = pickle.load(bkp) except (FileNotFoundError, PermissionError): message = 'There was a problem reading the pickle.\n' message += 'Please, check the path and permissions.' raise UserWarning(message) return reading
93d1de6e21bfbf393a29db72113c6e1d954f791f
124,480
def is_verb(tok): """ Is this token a verb """ return tok.tag_.startswith('V')
a42e92596de719d55810bbe8012f7418cc7d9be8
688,772
from collections import namedtuple def tol_cset(colorset=None): """ Discrete color sets for qualitative data. Define a namedtuple instance with the colors. Examples for: cset = tol_cset(<scheme>) - cset.red and cset[1] give the same color (in default 'bright' colorset) - cset._fields gives a tuple with all color names - list(cset) gives a list with all colors """ namelist = ('bright', 'high-contrast', 'vibrant', 'muted', 'medium-contrast', 'light') if colorset is None: return namelist if colorset not in namelist: colorset = 'bright' print('*** Warning: requested colorset not defined,', 'known colorsets are {}.'.format(namelist), 'Using {}.'.format(colorset)) if colorset == 'bright': cset = namedtuple('Bcset', 'blue cyan green yellow red purple grey black') return cset('#4477AA', '#66CCEE', '#228833', '#CCBB44', '#EE6677', '#AA3377', '#BBBBBB', '#000000') if colorset == 'high-contrast': cset = namedtuple('Hcset', 'blue yellow red black') return cset('#004488', '#DDAA33', '#BB5566', '#000000') if colorset == 'vibrant': cset = namedtuple('Vcset', 'orange blue cyan magenta red teal grey black') return cset('#EE7733', '#0077BB', '#33BBEE', '#EE3377', '#CC3311', '#009988', '#BBBBBB', '#000000') if colorset == 'muted': cset = namedtuple('Mcset', 'rose indigo sand green cyan wine teal olive purple pale_grey black') return cset('#CC6677', '#332288', '#DDCC77', '#117733', '#88CCEE', '#882255', '#44AA99', '#999933', '#AA4499', '#DDDDDD', '#000000') if colorset == 'medium-contrast': cset = namedtuple('Mcset', 'light_blue dark_blue light_yellow dark_red dark_yellow light_red black') return cset('#6699CC', '#004488', '#EECC66', '#994455', '#997700', '#EE99AA', '#000000') if colorset == 'light': cset = namedtuple('Lcset', 'light_blue orange light_yellow pink light_cyan mint pear olive pale_grey black') return cset('#77AADD', '#EE8866', '#EEDD88', '#FFAABB', '#99DDFF', '#44BB99', '#BBCC33', '#AAAA00', '#DDDDDD', '#000000')
c6edc57be76c8f53cc897f7be58156a1b326fb40
54,738
def count_files_per_issue(aggregated): """Count the number of files for each issue. :param aggregated: {issue: [file, ...], ...} >>> i = count_files_per_issue(dict((('TEST-1111', ('A', 'B', 'C')), ('TEST-1112', ('A', 'D'))))) >>> i.sort() >>> i [('TEST-1111', 3), ('TEST-1112', 2)] """ return [(key, len(v)) for key, v in aggregated.items()]
72b2ade5f169b381ae7492330cca8fec0271e89f
339,987
def find(nda, obj): """returns the index of the obj in the given nda(ndarray, list, or tuple)""" for i in range(0, len(nda)): if(nda[i] == obj): return i; return -1;
39e7c58809299259fbeae523f81264295766d64b
169,478
def isanyinstance(o, classes): """calls isinstance on a list of classes. true if any matches""" for cls in classes: if isinstance(o, cls): return True return False
3a8a006e92e7e343d8cad429c798ca31e103143a
59,761
def time_in_words(h, m): """Hackerrank Problem: https://www.hackerrank.com/challenges/the-time-in-words/problem Given the time in numerals we may convert it into words, as shown below: ---------------------------------------------- | 5:00 | -> | five o' clock | | 5:01 | -> | one minute past five | | 5:10 | -> | ten minutes past five | | 5:15 | -> | quarter past five | | 5:30 | -> | half past five | | 5:40 | -> | twenty minutes to six | | 5:45 | -> | quarter to six | | 5:47 | -> | thirteen minutes to six | | 5:28 | -> | twenty eight minutes past five | ---------------------------------------------- At minutes = 0, use o' clock. For 1 <= minutes <= 30, use past, and for 30 < minutes use to. Note the space between the apostrophe and clock in o' clock. Write a program which prints the time in words for the input given in the format described. Args: h (int): hour of the day m (int): minutes after the hour Returns: str: string representation of the time """ time = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty", "twenty one", "twenty two", "twenty three", "twenty four", "twenty five", "twenty six", "twenty seven", "twenty eight", "twenty nine"] # We check for a certain set of cases: # Case 1 - we're on the hour, so we use o' clock if m == 0: return "{0} o' clock".format(time[h-1]) # Case 2 - we're one minute after, so we use minute (versus minutes later on to describe the time) if m == 1: return "{0} minute past {1}".format(time[m-1], time[h-1]) # Case 3 - we're a quarter past the hour if m == 15: return "quarter past {0}".format(time[h-1]) # Case 4 - we're half past the hour if m == 30: return "half past {0}".format(time[h-1]) # Case 5 - we're a quarter to the next hour if m == 45: return "quarter to {0}".format(time[h]) # Case 6 - we check for minutes after the hour, which is until we hit minute 30 if m < 30: return "{0} minutes past {1}".format(time[m-1], time[h-1]) # Case 7 - this covers the cases where the minutes are after 30 so we're mintues to the next hour return "{0} minutes to {1}".format(time[59-m], time[h])
85f2247f01df36ef499105a9940be63eee189100
706,995
import math def hsv_to_rgb(hue, saturation, value): """ Convert HSV color (hue, saturation, value) to RGB (red, green, blue) @param hue: 0=Red, 1/6=Yellow, 2/6=Green, 3/6=Cyan, 4/6=Blue, etc. @param saturation: 0.0=Monochrome to 1.0=Fully saturated @param value: 0.0=Black to 1.0=Max brightness @returns: red, green, blue eacn in range 0 to 255 """ hue = hue * 6.0 # Hue circle = 0.0 to 6.0 sxt = math.floor(hue) # Sextant index is next-lower integer of hue frac = hue - sxt # Fraction-within-sextant is 0.0 to <1.0 sxt = int(sxt) % 6 # mod6 the sextant so it's always 0 to 5 if sxt == 0: # Red to <yellow red, green, blue = 1.0, frac, 0.0 elif sxt == 1: # Yellow to <green red, green, blue = 1.0 - frac, 1.0, 0.0 elif sxt == 2: # Green to <cyan red, green, blue = 0.0, 1.0, frac elif sxt == 3: # Cyan to <blue red, green, blue = 0.0, 1.0 - frac, 1.0 elif sxt == 4: # Blue to <magenta red, green, blue = frac, 0.0, 1.0 else: # Magenta to <red red, green, blue = 1.0, 0.0, 1.0 - frac invsat = 1.0 - saturation # Inverse-of-saturation red = int(((red * saturation) + invsat) * value * 255.0 + 0.5) green = int(((green * saturation) + invsat) * value * 255.0 + 0.5) blue = int(((blue * saturation) + invsat) * value * 255.0 + 0.5) return red, green, blue
8f571b14168e220d30042049cdf29fae76cf955b
533,259
def small_angle_diff(a1, a2): """ Helper function for getting smallest angle difference between two angles """ return abs((a1 - a2 + 180) % 360 - 180)
80fd2b471a7ac85c05547e528762bab12fef3fc3
352,176
def array_class_from_type(name): """ Given a DataTypeClass class name (such as "BooleanType"), return the corresponding Array class name. """ assert name.endswith("Type") return name[:-4] + "Array"
bbc9601c232c89f4361cee712ccfbcd2ca192229
243,876
def _prefix_master(master): """Convert user-specified master name to full master name. Buildbucket uses full master name(master.tryserver.chromium.linux) as bucket name, while the developers always use shortened master name (tryserver.chromium.linux) by stripping off the prefix 'master.'. This function does the conversion for buildbucket migration. """ prefix = 'master.' if master.startswith(prefix): return master return '%s%s' % (prefix, master)
c4a4f715d1b17733bf6d2d2bae89143f62fdbc87
294,553
from datetime import datetime def age(yob): """ Accepts year of birth and returns the persons age. """ return datetime.now().year - yob
9e16e3739055fe410a2661d896530f99b30a6692
172,957
def objects_to_dictionaries(iterable, fields): """ Convert an iterable into a list of dictionaries; fields should be set to a comma-separated string of properties for each item included in the resulting list; e.g. for a queryset: {{ queryset | objects_to_dictionaries:"id,name" }} will return a list like [{'id': 1, 'name': 'foo'}, ...] providing queryset has id and name fields This is mostly to support serialising querysets or lists of model objects to JSON """ objects = [] if fields: fields_list = [field.strip() for field in fields.split(',')] for item in iterable: out = {} for field in fields_list: out[field] = getattr(item, field) objects.append(out) return objects
767fa53729ac32fa05fc237f7dac9d0f91ee50fc
229,880
def _get_json_safely(response): """ Check for JSON response errors, and if all clear, return the JSON data """ json = response.json() # get the JSON if "error" in json: raise ValueError(json["error"][0]) return json
9ae0085f512b89cc4f457c78f5ab8670d7e6165e
200,580
def celctorank(celsius): """ This function converts celsius to Rankine, with celsius as parameter.""" rankine = (celsius + 273.15) * 1.8 return rankine
8f43b8ae84bb7106b30961774624ecffb9f92c74
142,078
def get_accuracy(data_path): """ Load & parse the accuracy data file accuracy data format: model:hg_s2_mobile_tiny_192_192;dataset:MPI Epoch 0:0.03907726641313506 Epoch 1:0.3704370004326737 ... return: label: training model + dataset, used for plotting curve epoch: list of epoch numbers accuracy: list of accuracy for every epoch """ with open(data_path) as f: acc_data = f.readlines() acc_data = [c.strip() for c in acc_data] # parse line 1 to get model & dataset name for curve label model = acc_data[0].split(';')[0].split(':')[-1] dataset = acc_data[0].split(';')[-1].split(':')[-1] label = model + '_' + dataset # parse accuracy data accuracy = [float(c.split(':')[-1]) for c in acc_data[1:]] epoch = [int(c.split(':')[0].split(' ')[-1]) for c in acc_data[1:]] return label, epoch, accuracy
8ac6daccca3a5c13bc3b7dabd4a7cb5e8a743b45
139,813
from typing import Counter def sum_dicts(a, b): """Merge dictionaries, summing their values. For example: a = {"x": 1, "y": 1} b = {"x": 1, "z": 1} returns {"x": 2, "y": 1, "z": 1} Args: a (dict) b (dict) Returns: dict """ return Counter(a) + Counter(b)
77b17417e53c0cebe8ee254697db639abc858f11
532,042
def _trim_zero_to_3_digits_or_less(trim: bool, exponent: int, ticker: str) -> str: """Returns a string representation of the number zero, limited to 3 digits This function exists to reduce the cognitive complexity of pretty_ether """ if trim: zero = "0" else: if exponent == -1: zero = "0.0" else: zero = "0.00" return zero + " " + ticker if ticker else zero
3a710a1da03daa43bf0ddaefb54f9b9ef613d10d
445,857
from typing import List import random def choose_seq_indices( input_seqs: int, proportion: float = 1.0, max_records: int = 10000) -> List[int]: """Generate a list seq indices. Args: input_seq (int): number of input_sequences proportion (float): proportion of seqs to extract if not larger than max_records and not larger than nr of input sequences max_records (int): max_nr of records to extract if not larger than the number of input sequences Returns: list of indices (list(int)) """ # default number of sequences to get: entire set nr_seqs_to_get = input_seqs # should we extract a specified proportion? if proportion < 1.0: nr_seqs_to_get = int(proportion * input_seqs) elif max_records < nr_seqs_to_get: # we want a specific number # if it's not larger than the number of sequences we have nr_seqs_to_get = max_records # get a random subset of sequence indices # sorted from lowest to highest seqs_ids = list(range(input_seqs)) random.shuffle(seqs_ids) # get the indices of sequences to extract, sorted seqs_ids = seqs_ids[:nr_seqs_to_get] return sorted(seqs_ids)
abc47c52361a02481e55e81f093052e5e5465f29
580,302
from typing import Dict def load_env_file(filename: str) -> Dict[str, str]: """Load environmental variables from a `.env` file.""" result = {} with open(filename) as f: for line in f.readlines(): if line.startswith('#') or not line.strip(): # Skip this line. It's a comment or just whitespace. continue # Remove the end of line character line = line.rstrip() # Split the line into key and value key, value = line.split('=', 1) # Add the variable to the result result[key] = value return result
ffa4a6b99612e8dced78024c1d089958620cca7f
261,852
def _getpricestring(amount, denom): """Return a human-readable price string""" return '{:g} {}'.format( amount, denom + 's' if denom in ('Key', 'Weapon') and amount != 1 else denom)
195f47283f9cc88a89c32d29cda55b643d71be0f
213,717
import requests def query_test_run(sotest_url, testrun_id): """ Queries the status of a test run """ url = "{}/api/query/{}".format(sotest_url, testrun_id) r = requests.get(url) return r.json()
a6195066dd6814379a253d85288e2c20f9589846
158,755
def load_dat(filename: str) -> list: """ Loads the contents of the DAT data file into a list with each line of the file into an element of the list. """ with open(file = f'{filename}.dat', mode = u'r') as dat_file: return dat_file.read().split(u'\n')
25f13fedb8bb0d2927e794b20691c648b8502bc5
70,920
def get_set_members(instance, sets): """Get set members relative to a list of sets :param instance: Pyomo Instance :param sets: List of strings with the set names :return: A list with the set members """ sm = [] for s in sets: sm.append([v for v in getattr(instance, s).data()]) return sm
cb590a248cc01e86611dcfe6b701183f89d2b5b5
499,947
def is_desired_post(post): """Determine if a post is a request, self post, or link to an untested host. - Request posts usually have extra text, overlays, etc that need to be removed from the image, so we'll ignore those. - Fufilled requests require parsing the comments for all edited versions of the image, so we'll ignore these too. - Most wallpaper subs require walls to be in linked posts, so we'll skip self posts. - Any url that's a direct link to an image file is fine. """ has_approved_host = False is_request = "[request]" in post.title.lower() file_types = ("jpg", "jpeg", "png") hosts = [ "imgur.com", "iob.imgur.com", "i.imgur.com", "i.redd.it", "i.reddituploads.com", "cdn.awwni.me", "a.pomf.cat" ] if ( any(host in post.url for host in hosts) or post.url.endswith(file_types) ): has_approved_host = True if post.link_flair_text: flair = post.link_flair_text.lower() is_request = ("request" in flair) or ("fulfilled" in flair) return (not (is_request or post.is_self)) and has_approved_host
83a67f0c086f352d3575660b1fc4ea165a0278ac
460,733
def ev_strip(short): """Strip zeros, taking into account E / V codes. """ if short.startswith("E") or short.startswith("V"): return short[0] + short[1:].lstrip("0") else: return short.lstrip("0")
9070feb4f55ebb2cda4eb1047a5002368321919a
288,828
import struct def unpack_int(data): """Extract an int from 4 bytes, big endian.""" return struct.unpack(">i", data[:4])[0]
4c322a0d2a85077f3b80d85f65bfc57f3d182869
54,405
import string def remove_punct(target): """ Removes punctuation from a text. Arguments: target: interable object containing text. Returns: punctuation striped text """ output=target.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation))).replace(' '*4, ' ').replace(' '*3, ' ').replace(' '*2, ' ').strip() return(output)
d65e57cbb05e23b7029cb79fe89a885cfcceaf98
360,733
def comp_hex(x): """ Get the components of a hex RGB color as strings (`"00"` to `"ff"`). """ return x[0:2], x[2:4], x[4:6]
ce47cd13b1e68fb68b8063fa92006f27b796b766
393,265
def get_spectral_characteristics_for_lid(local_identifier, label): """ Search a PDS4 label for Spectral_Characteristics of a data structure with local_identifier. Parameters ---------- local_identifier : str or unicode The local identifier of the data structure to which the spectral characteristics belong. label : Label or ElementTree Element Label for a PDS4 product with-in which to look for the spectral characteristics. Returns ------- Label, ElementTree Element or None Found Spectral_Characteristics section with same return type as *label*, or None if not found. """ matching_spectral = None # Find all the Spectral Characteristics classes in the label spectra = label.findall('.//sp:Spectral_Characteristics') if not spectra: return None # Find the particular Spectral Characteristics for this LID for spectral in spectra: # There may be multiple local internal references for each spectral data object sharing these # characteristics. Also look in both PDS and SP namespace due to standards changes in the spectral # dictionary references = spectral.findall('sp:Local_Internal_Reference') + \ spectral.findall('Local_Internal_Reference') for reference in references: # Look in both PDS and DISP namespace due to standards changes in the display dictionary lid_sp = reference.findtext('sp:local_identifier_reference') lid_pds = reference.findtext('local_identifier_reference') if local_identifier in (lid_sp, lid_pds): matching_spectral = spectral break return matching_spectral
5195e85055122c86312177464ba34f716f79a2fc
284,859
def matrix_get_translation(m): """ Get translation component of a 4x4 matrix. :param m: Matrix :return: """ r = range(3) return [m[i][3] for i in r]
a42c24f8a2fa38806e6b74e4c52f6f2926d8aca5
434,048
def _subject_member_format(member_ref): """Format a member ref for consumption outside of this module.""" return { 'id': member_ref['id'], 'subject_id': member_ref['subject_id'], 'member': member_ref['member'], 'can_share': member_ref['can_share'], 'status': member_ref['status'], 'created_at': member_ref['created_at'], 'updated_at': member_ref['updated_at'], 'deleted': member_ref['deleted'] }
24c6ad6123aebb3f0268c7eeed805ccd1a3e5aab
606,039
def count_newlines(x: str, before=True) -> int: """ Count the number of newlines from front or back. Here all newlines are counted while ignoring whitespace. Stop at first character that is not newline or whitespace. If there are no non-newline or whitespace characters, return infinite number of newlines. """ num_nl = 0 whitespace = [" ", "\r", "\t"] y = x if before else reversed(x) for ch in y: if ch == "\n": num_nl += 1 elif ch in whitespace: continue else: return num_nl return num_nl
2396bc072f518c5fff86b74d99086aedcb9cbe1a
251,442
def substitute_variables(cmd, variables): """Given a cmd (str, list), substitute variables in it.""" if isinstance(cmd, list): return [s.format(**variables) for s in cmd] elif isinstance(cmd, str): return cmd.format(**variables) else: raise ValueError(f"cmd: {cmd}: wrong type")
3a6bbf276c161daf6508c3a8614eb9840d7e3f3c
375,881
def does_flavor_exist(nova_client, flavor_name): """ Check if flavor exists """ for flavor in nova_client.flavors.list(): if flavor.name == flavor_name: return True return False
bdd853f606bcdb94c92c4417363bf9d0b8d2c36a
667,428
def construct_node_config(*, name: str, remote_fs: str = '/tmp', executors: int = 2) -> dict: """ Args: - name: Node name - remote_fs: Remote node root directory - executors: Number of node executors Returns: - dict: return ready to use dict with nodes.create() """ return { 'name': name, 'nodeDescription': '', 'numExecutors': executors, 'remoteFS': remote_fs, 'labelString': '', 'launcher': { 'stapler-class': 'hudson.slaves.JNLPLauncher', }, 'retentionStrategy': { 'stapler-class': 'hudson.slaves.RetentionStrategy$Always', }, 'nodeProperties': { 'stapler-class-bag': 'true' } }
2c7255d380c4c080f829dcef613cd772620a0299
81,313
def count_words(wordlst: list): """ count words in tweet text from list of list, dict, or str :param wordlst: list of tweets :return: word count, tweet count """ wrd_count: int = 0 tw_count: int = 0 for tw in wordlst: if isinstance(tw, dict): tw_wrds: list = tw['text'].split() elif isinstance(tw, str): tw_wrds: list = tw.split() else: tw_wrds: list = tw tw_count += 1 wrd_count += len(tw_wrds) return wrd_count, tw_count
8f75a6424de22190fafd9cd97bd03decbb5e95c3
448,332
import itertools def is_valid_ean13(s): """Check whether a string is a valid EAN-13. EAN = `European Article Number <https://en.wikipedia.org/wiki/European_Article_Number>`_ The string must not contain any separators; only the characters ``0-9`` are allowed and the length of the string must be 13. :param str s: the string :return: ``True`` if the string is a valid EAN-13 :rtype: bool :raise ValueError: if a character is not allowed or the length is wrong """ if len(s) != 13: raise ValueError(f'length of string must be 13 not {len(s)}') if not s.isdecimal(): raise ValueError('only characters 0-9 are allowed') return sum(int(c) * w for c, w in zip(s, itertools.cycle((1, 3)))) % 10 == 0
86327e93a68040dd181ef77be75cbb3e7196e82b
166,812
import json def _names_to_bytes(names): """Reproducibly converts an iterable of strings to bytes :param iter[str] names: An iterable of strings :rtype: bytes """ names = sorted(names) names_bytes = json.dumps(names).encode('utf8') return names_bytes
461850e92363fdb53d508b37d3e4e311a994e4d4
503,225
def _to_z_score(scaled_score, expected_score, test): """ Turn scaled and expected score to a z score :param scaled_score: scaled score, result from raw_to_scaled function :param expected_score: expected score, result from get_expected_score function :param test: test of interest :return: z-score for the test of interest """ denominator_dict = {'sdmt': 2.790, 'bvmt': 2.793, 'cvlt': 2.801} denominator = denominator_dict.get(test) z_score = (scaled_score - expected_score)/denominator return z_score
ccfe2686481809a4ddc4bbb5e94c4ec4fe5c6318
146,432
def pstres(self, key="", **kwargs): """Specifies whether prestress effects are calculated or included. APDL Command: PSTRES Parameters ---------- key Prestress key: OFF - Do not calculate (or include) prestress effects (default). ON - Calculate (or include) prestress effects. Notes ----- The PSTRES command specifies whether or not prestress effects are to be calculated or included. The command should be issued after the ANTYPE command. Prestress effects are calculated in a static or transient analysis for inclusion in a buckling, modal, harmonic (Method = FULL), or substructure generation analysis. If used in the solution processor (/SOLU), this command is valid only within the first load step. If you apply thermal body forces during a static analysis to calculate prestress effects, do not delete the forces during any subsequent full harmonic analyses. If you delete the thermal body forces, the thermal prestress effects will not be included in the harmonic analysis. Temperature loads used to define the thermal prestress will also be used in the full harmonic analysis as sinusoidally time-varying temperature loads. A prestress effect applied with non-follower loads resists rigid body rotation of the model. For example, an unsupported beam with axial tensile forces applied to both ends will have two nonzero rotational rigid body modes. If tabular loading (*DIM,,TABLE) was used in the prestress static analysis step, the corresponding value of TIME will be used for tabular evaluations in the modal analysis. This command is also valid in PREP7. """ command = f"PSTRES,{key}" return self.run(command, **kwargs)
507c1599b5a1a70788d248942c1f993e62e0065f
524,297
def prevent_bad_replacement(overrides_dict, df): """Checks that there is not any values equal to the placeholder-values which are used as keys in the overrides dict. If they did exist, this would cause a bad replacement. It is very unlikely that this is needed, but just here as an extra safety step""" place_holder_values = ['FILL_NA', 'OldValue1', 'OldValue2', 'OldValue3', 'to'] results_dict = {} for search_value in place_holder_values: if search_value in df.values: results_dict[search_value] = True raise Exception(f"Value {search_value} has been found in the dataframe") return None
11f2fc459d2bd471681eb5846131c4baa84ef273
461,715
from typing import Dict from typing import List def sort_dict_desc(word_dict: Dict) -> List: """Sort the dictionary in descending order. :param word_dict: the dictionary of word. :return: sorted list of dictionary. """ # sort by value # word_dict.items() to get a list of (key: value) pairs sorted_word_dict = sorted(word_dict.items(), key=lambda item: item[1]) # descending order sorted_word_dict.reverse() return sorted_word_dict
87ea93145b7d4d7a867845a52ae3ef1c8403075d
54,121
def represent(item): """Represents callables and values consistently Args: item: The item to represent Returns: Item representation """ return repr(item.__name__) if callable(item) else repr(item)
d98f2f0cf1979b833908bc539486fb2b12669dd1
154,984
from textwrap import dedent def authorized_keys(tmpdir): """Return the filename of an authorized_keys file""" authorized_keys_file = tmpdir.join("authorized_keys") authorized_keys_content = dedent( r''' ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6N/eQ1L0MxzzXgVpnnJCn6x+g+3434/ABLgG6IXbekYqBDWFOUfjslt90eTXRv5IVex1eY5RpR1d7dnFhYxW6bCZdrAryu9fPYSidFL3MGWTtijFRmSc9nCJVAP5+DY1xjA5aCtYq0MbhQMTRtBvOGPxFjXeG6sZ3dP698/am7KYjCUSqS2RBInEJ9J9Ym4lpCVptmnHWEJM8mc2PEa0PsuGBtxp2IaD7WO56ekaxy0+FlH2F93GsLDDqksxbcVp0UWoDW111CwFU3218z4TvjnftGoyLHMRDc6UmJallbpv/Ru+WeGCuCbzvzeoGVROxfBhLUji4idtMZlnWy3trQ== user@host ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6clJaaa/7QtvyeTtD23AGEBau0BKePGtewVnoQjZ3UxAkJPUYslOIr4tyHbRZFB6mf8U2xUDgVSd99QwIJQIDpA5jHT6ro0lb9hhUGqqaqX0UKKm0s2w3LscuiSgUY+dfBQAhX48T8YNG2MLtx7fCHigV7lTUgJZci44QvcoHkUM9W89SmG1qb7Z4lFE/WFQWkymH+JPnwC4fkKYxBq5FcwoHvn2+Jf0uhHlxnrGbg+xJJjUFbCkL6OdH4XZjkK1Tg5FqS8vL6Wbl7NY7NG0MSDQrVzzDbDSmqvLc7vHnbkENJSg3p/pLTY5ILXL2SOVJOuvBqWgIVjU/AjX18UcYQ== user@host2 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5dm/9BAeahUX5kQD90/2TMppY/mNoBHyie2RsKvAptjJBDtq4n9JQz0gKYKUAeeek5blrsoRTsobbDdjvZp4M4PJ1959sNvkyrNgqu9OtkxJRa8l+gpGBxq2bTJ5+UXHmYLCjCtVR+Ln/1BznV525LZac5s9hrtobJrLvFFAuvuIQXdetkJ2FKH+ZL8IJhDUNrPJznaYcHRlCxPfxZmfp6HBByWce5pN1s+p7NkqVFCjdusxr/a+SxeZr6f/yJGBGiIOnxc9tVl2bZ97MbwJ02ayCaTJCXRCtiAs+oKtD4Ev8wTXuLghvT2YiFV0focpRSgV0BMG3uzuklLLyjSLdQ== user@@host3 ''' ).strip() authorized_keys_file.write(authorized_keys_content) return str(authorized_keys_file)
a7d39c1dc4b2b9d5088e8f829db90bd79bba9d18
394,266
def _check_git_access(req, dataset): """Validate HTTP token has access to the requested dataset.""" user = 'user' in req.context and req.context['user'] or None # Check that this request includes the correct token if user != None and 'dataset:git' in user['scopes'] and user['dataset'] == dataset: return True else: return False
61c027d637f7040e33d64f0799c993c5c979827c
623,836