content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def print_multi_list(list_ch_info=None, sep=";"): """Print the summary of downloaded claims from multiple channels. This is meant to be used with the returned list from `ch_download_latest_multi`. Parameters ---------- list of lists of dicts A list of lists, where each internal list represents one channel, and this internal list has as many dictionaries as downloaded claims. The information in each dictionary represents the standard output of the `lbrynet_get` command for each downloaded claim. If the download fails, then the corresponding item in the list may be `False`, in which case no claim information is printed. sep: str, optional It defaults to `;`. It is the separator character between the data fields in the printed summary. Since the claim name can have commas, a semicolon `;` is used by default. Returns ------- bool It returns `True` if the information was read and printed without problems. If there is a problem or no list of items, it will return `False`. """ if not list_ch_info or not isinstance(list_ch_info, (list, tuple)): print("Print information from a list of lists from multiple " "channels obtained from `ch_download_latest_multi`.") return False if len(list_ch_info) < 1: print("Empty list.") return False # flat_list = [item for sublist in list_ch_info for item in sublist] flat_list = [] for sublist in list_ch_info: if not sublist: flat_list.append(None) continue for item in sublist: if not item: flat_list.append(None) continue flat_list.append(item) n_items = len(flat_list) print("Summary of downloads") out_list = [] for it, item in enumerate(flat_list, start=1): out = "{:2d}/{:2d}".format(it, n_items) + f"{sep} " if not item: out += "empty item. Failure establishing server connection?" out_list.append(out) continue if "claim_id" in item: out += "{}".format(item["claim_id"]) + f"{sep} " out += "{:3d}/{:3d}".format(item["blobs_completed"], item["blobs_in_stream"]) + f"{sep} " out += '"{}"'.format(item["channel_name"]) out += f"{sep} " out += '"{}"'.format(item["claim_name"]) out_list.append(out) elif "error" in item: out_list.append(out + '"{}"'.format(item["error"])) else: out_list.append(out + "not downloaded") print("\n".join(out_list)) return True
51aefe169399561e5d0e0f6c21576c9ec8e4813f
53,655
import random def randBytes(b: int = 2) -> bytes: """ Get a random number of bytes :param b: number of bytes generate :return: random number of bytes requested """ return bytes([random.getrandbits(8) for _ in range(b)])
51af4722e9710f0cff315a09bb9ce2f6e956ee6a
75,884
def pg_utcnow(element, compiler, **kw): """ Postgres UTC timestamp object """ return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
3deb31b98b8c75417ff0ecf5c7b5fa9eb0b91df9
695,769
def _to_extended_delta_code(seconds: int) -> str: """Return the deltaCode encoding for the ExtendedZoneProcessor which is roughly: deltaCode = (deltaSeconds + 1h) / 15m. Using the lower 4-bits of the uint8_t field, this will handle deltaOffsets from -1:00 to +2:45. """ return f"({seconds // 900} + 4)"
26adc142df031d38cd383ba12c3fac8ac0030e75
212,007
def MemorySizeBytesToGB(memory_size): """Returns a memory size value in GB. Args: memory_size: int, size in bytes, or None for default value Returns: float, size in GB rounded to 3 decimal places """ if not memory_size: return memory_size return round(memory_size / float(1 << 30), 3)
a98f4f96f5d49840077b824324fd98d967dac185
516,136
def _genpoints(xlist, lbound, ubound): """Generate the points necessary to plot the lines showing the interval estimates. Parameters ---------- xlist : array_like Array of values on the x-axis. lbound : array_like Lower bounds of the estimates. ubound : array_like Upper bounds of the estimates. Returns ------- points : list List of start and end points for plotting the interval estimates. """ xlist = xlist.tolist().copy() lbound = lbound.tolist().copy() ubound = ubound.tolist().copy() # Alternative approach -> shorter, seen in # https://pandas.pydata.org/docs/getting_started/10min.html Stack example xtemp = list(zip(*[xlist, xlist])) inttemp = list(zip(*[lbound, ubound])) points = [] for xnew, ynew in zip(xtemp, inttemp): points.append(list(zip(xnew, ynew))) return points
e6edb25e7bfce3af5890f68c0d8f865db57acb50
492,091
def yoToZA( yo_i ) : """Returns ZA designator, or special code, from yo id. That is, :: yo_i = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 is converted to 0, 1, 1001, 1002, 1003, 2003, 2004, 0, -8, -9, -10, 1, 1001, 1002, 1003, 2003, 2004 with the following meanings :: none, n, p, d, t, He3, He, g, b+, b-, EC, n, p, d, t, He3, He. yo_i must be a python integer.""" if ( yo_i < 0 ) or ( yo_i > 16 ) : raise Exception( "\nError in yoToZA: unsupported yo value = %s" % repr(yo_i) ) yo = yo_i if ( yo > 10 ) : yo -= 10 return ( 0, 1, 1001, 1002, 1003, 2003, 2004, 0, -8, -9, -10 )[yo]
4a877b729aa9a5ab397409a8f1784844f188fa52
116,484
def undo_reshape_padding(image, orig_shape): """ Undoes the padding done by the `reshape` function in `utils.shape_io` Args: image: numpy array that was reshaped to a larger size using reshape orig_shape: original shape before padding (do not include the channels) Returns: the reshaped image """ return image[:orig_shape[0], :orig_shape[1], :orig_shape[2]]
6b4629a81b699c610b406801f78e6b953bd29083
498,153
from typing import List from typing import Dict from typing import Any from typing import Optional def get_entity(entity_id: str, entities: List[Dict[str, Any]]) -> Dict: """ Retrieves entity from list of entities given the entity id. Parameters ---------- entity_id: String identifier of the relevant entity. entities: List of entities Returns ------- Entity from entity list with matching entity id """ entity: Optional[Dict] = next((x for x in entities if x['id'] == entity_id), None) if entity: return entity else: raise Exception(f'The entity_id {entity_id} was not found in:\n {entities}')
28dbc7475ce1bfb38fe44ba6dcd273735af80cf8
587,733
import re def uppercase_lowercase(a_string): """Assumes a_string is a string returns a boolean, True is a_string contains one upper case letter followed by lower case letters else False """ regex = "[A-Z][a-z]+" results = re.search(regex, a_string) return bool(results)
cd0623fd885b1f6d4833b901852985f4317e9156
677,021
def scale_minmax(X, min=0.0, max=1.0): """ Minmax scaler for a numpy array PARAMS ====== X (numpy array) - array to scale min (float) - minimum value of the scaling range (default: 0.0) max (float) - maximum value of the scaling range (default: 1.0) """ X_std = (X - X.min()) / (X.max() - X.min()) X_scaled = X_std * (max - min) + min return X_scaled
1aed3f5f854d973bd81b821d637c17f741fa895a
77,918
import re def ngram_parser(text, ngram_parser): """ Parse out N-grams using a custom regex :param: text: a string object :param ngram_parser: a compiled regex expression to extract one-grams :return a list of 1-grams """ # take care of a few edge cases text = re.sub(r'(([\-\.]{2,})|(\'\'))', r' \1 ', text) return [x[0] for x in ngram_parser.findall(text) if x[0] != '']
e8db18a88b4c3ed0c6c469617f7f75e87aef7a65
163,917
def add_prefix(dct, prefix, sep="/"): """ Add a prefix to all keys in `dct`. :param dct: Input dict :type dct: dict :param prefix: Prefix :type prefix: str :param sep: Separator between prefix and key :type sep: str :return: Dict with prefix prepended to all keys :rtype: dict """ return {prefix + sep + key: value for key, value in dct.items()}
ae6b8fc74f0c655b14f3c8b8ae29d3d9b67bc601
416,526
def compare_webapps(list1, list2): """ Return list of matching dictionaries from two lists of dictionaries. """ check = set([(d['name'], d['id']) for d in list2]) return [d for d in list1 if (d['name'], d['id']) in check]
d02bac02e6dea5bbf7188b7e3afa6c6cbb64e054
641,274
def seq_idx(df): """ Provide boolean indexer for South East Queensland Parameters: ------- df : dataframe a population dataframe Raises ------ ValueError if the dataframe does not contain an asgs_name column Returns ------- idx_seq : boolean indexer an indexer to Tudge's definition of SEQ. """ if "asgs_name" not in df.columns: raise ValueError("asgs_name column not in dataframe") idx_seq = ( (df.asgs_name == "Greater Brisbane") | (df.asgs_name == "Sunshine Coast") | (df.asgs_name == "Gold Coast") | (df.asgs_name == "Toowoomba") ) return idx_seq
d7816d3e21ede1b2d8d46e096b5c25ee5318b8ab
506,297
def unix_time_to_mjd(time_in_unix): """ Converts the time format from unix to MJD (Modified Julian Date) 86400 is the # of sec per 24 hours 40587 is the unix epoch in mjd """ time_in_mjd = time_in_unix / 86400 + 40587 return time_in_mjd
77dfde227cab75e0db0b38af1a68bfd0d933fa42
152,621
import yaml def info_dumper(infos): """Dump a dictionary information to a formated string. Now, it's just a wrapper to yaml.dump, put here to customize if needed. """ return yaml.dump(infos)
16a6217054801ef6924741be25512feb828e28a1
306,281
def ref(logical_name): """The intrinsic function Ref returns the value of the specified parameter or resource. When you are declaring a resource in a template and you need to specify another template resource by name, you can use the Ref to refer to that other resource. In general, Ref returns the name of the resource. For example, a reference to an AWS::AutoScaling::AutoScalingGroup returns the name of that Auto Scaling group resource. For some resources, an identifier is returned that has another significant meaning in the context of the resource. An AWS::EC2::EIP resource, for instance, returns the IP address, and an AWS::EC2::Instance returns the instance ID. Args: logical_name: The logical name of the resource or parameter you want to dereference. Returns: * When you specify a parameter's logical name, it returns the value of the parameter. * When you specify a resource's logical name, it returns a value that you can typically use to refer to that resource. .. note:: You can also use Ref to add values to Output messages. """ return {'Ref': logical_name}
68db97a89dafcf5c087aefefa8868e7d8a60479f
518,174
def normalize_comment(s): """ Normalize comments, turning things like "????" into "?". :param s: The original comment :return: The normalized comment as string """ if s: if set(s) == {'#'}: return if set(s) == {'?'}: return '?' return s
1b5c3b6071f2861c7c72c4bb53cec2ccb8807324
386,372
import time def getTimestamp(t=None, fmt='%Y-%m-%d %H:%M:%S'): """Returns the timestamp for the given time (defaults to current time). The time should be in secs since epoch. .. note:: The default `fmt` includes spaces and colons. """ if not t: t = time.time() return time.strftime(fmt, time.localtime(t))
23c3d106a1a58f6d186318825bf83802112d5f71
263,173
def dataverse_tree_walker( data: list, dv_keys: list = ["dataverse_id", "dataverse_alias"], ds_keys: list = ["dataset_id", "pid"], df_keys: list = ["datafile_id", "filename", "pid", "label"], ) -> tuple: """Walk through a Dataverse tree by get_children(). Recursively walk through the tree structure returned by ``get_children()`` and extract the keys needed. Parameters ---------- data : dict Tree data structure returned by ``get_children()``. dv_keys : list List of keys to be extracted from each Dataverse element. ds_keys : list List of keys to be extracted from each Dataset element. df_keys : list List of keys to be extracted from each Datafile element. Returns ------- tuple (List of Dataverse, List of Datasets, List of Datafiles) """ dataverses = [] datasets = [] datafiles = [] if type(data) == list: for elem in data: dv, ds, df = dataverse_tree_walker(elem) dataverses += dv datasets += ds datafiles += df elif type(data) == dict: if data["type"] == "dataverse": dv_tmp = {} for key in dv_keys: if key in data: dv_tmp[key] = data[key] dataverses.append(dv_tmp) elif data["type"] == "dataset": ds_tmp = {} for key in ds_keys: if key in data: ds_tmp[key] = data[key] datasets.append(ds_tmp) elif data["type"] == "datafile": df_tmp = {} for key in df_keys: if key in data: df_tmp[key] = data[key] datafiles.append(df_tmp) if "children" in data: if len(data["children"]) > 0: dv, ds, df = dataverse_tree_walker(data["children"]) dataverses += dv datasets += ds datafiles += df return dataverses, datasets, datafiles
a3ce15354e5a9b6c38510eafaec0dfa531b595ef
596,836
def parse_file(file_name): """Parse file to image list Args: file_name (str): The path of data file Returns: List of image path """ with open(file_name, "r") as f: data_list = [line.strip() for line in f.readlines()] return data_list
c488727d1e11c08ad0dec775f5f9bb6d8b1de830
170,983
def _sort_baseline_facts(baseline_facts): """ helper method to sort baseline facts by name before saving to the DB. """ sorted_baseline_facts = sorted(baseline_facts, key=lambda fact: fact["name"].lower()) for fact in sorted_baseline_facts: if "values" in fact: fact["values"] = sorted(fact["values"], key=lambda fact: fact["name"].lower()) return sorted_baseline_facts
571e464b1f6a1631a30529bc67aab1a7584a2cdb
201,095
import fnmatch def ignore_rule_matches_result(ignore_rule, pa11y_result): """ Returns a boolean result of whether the given ignore rule matches the given pa11y result. The rule only matches the result if *all* attributes of the rule match. """ return all( fnmatch.fnmatch(pa11y_result.get(attr), ignore_rule.get(attr)) for attr in ignore_rule.keys() )
e3596b41fe8fbcc910154d0514c62dc29096fa6c
662,150
def get_slices_by_indices(str, indices): """Given a string and a list of indices, this function returns a list of the substrings defined by those indices. For example, given the arguments:: str='antidisestablishmentarianism', indices=[4, 7, 16, 20, 25] this function returns the list:: ['anti', 'dis', 'establish', 'ment', arian', 'ism'] @param str: text @type str: string @param indices: indices @type indices: list of integers """ slices = [] for i in range(0, len(indices)): slice = None start = indices[i] if i == len(indices)-1: slice = str[start: ] else: finish = indices[i+1] slice = str[start: finish] slices.append(slice) return slices
e69bd33abfd3f9f423ed33c3b46841ad0ed1a30e
673,648
def get_gender_id(gender: str) -> int: """ A helper method for generating gender IDs. :param gender: a gender string :return: a gender id """ if gender == "FEMALE": gender_id = 0 elif gender == "MALE": gender_id = 1 else: gender_id = 2 return gender_id
2e44f52ef4c762e483a26ae5d87760f901216011
270,860
def add(x, y, z=0): """Add two (or three) objects""" return x + y + z
6f5284d580dc797b22f9465f0b2bd697ce2269e5
331,621
def find_pure_symbol(symbols, unknown_clauses): """ Find a symbol and its value if it appears only as a positive literal (or only as a negative) in clauses. Arguments are expected to be in integer representation. >>> find_pure_symbol({1, 2, 3}, [{1, -2}, {-2, -3}, {3, 1}]) (1, True) """ all_symbols = set().union(*unknown_clauses) found_pos = all_symbols.intersection(symbols) found_neg = all_symbols.intersection([-s for s in symbols]) for p in found_pos: if -p not in found_neg: return p, True for p in found_neg: if -p not in found_pos: return -p, False return None, None
ffa20cee768e81cd3525483bc2490ada6482b550
48,539
def most_common_start_hour(df): """Get the most common start hour for the bike sharing service.""" df['Start Hour'] = df['Start Time'].dt.hour common_start_hour = df['Start Hour'].mode()[0] return "Most common start hour is {}.".format(common_start_hour)
7b87b2b08c6a2fb48f1929757bf8f4846af81696
423,824
def test_single_processor(recipe): """Test for recipe having a single processor. Args: recipe: Recipe object. Returns: Tuple of Bool: Failure or success, and a string describing the test and result. """ description = "Recipe has only a single processor, of type 'JSSImporter'." result = False processors = recipe.get("Process") if len(processors) == 1: processor = processors[0].get("Processor") if processor and processor == "JSSImporter": result = True else: description += " (Processor is not 'JSSImporter')" else: description += " (Too many processors: %s > 1)" % len(processors) return (result, description)
c0404794ce87f4fd1aee8f8f9d7105f377461988
578,219
def stub_read(mote_id, chan_id, read_start_time): """ A stub to return nothing of interest; well, a value that is more easily picked up as "invalid". """ return -1
b3d94d002f9d112540d62ae1f985bf595b3f2558
39,837
import random def generate_random_pastel() -> tuple: """ Generate a random pastel color """ red = (random.randint(0, 255) + 255) / 2 green = (random.randint(0, 255) + 255) / 2 blue = (random.randint(0, 255) + 255) / 2 return red, green, blue
9a9960d14d960d101cfcb1efa73ea16142cfe84f
382,449
import torch def _conformal_score_interval1(predictions, values, max_interval=1e+3): """ Compute the alternative non-conformity score of a set of values under interval predictions """ diff = values - predictions.mean(dim=1).view(1, -1) return torch.sign(diff) * max_interval + diff
b5e8ef02c90f882f8d66daf091052b5526910cfb
294,475
def get_last_row(sheet_object) -> int: """ Get the last populated row from a spreadsheet :param sheet_object: openpyxl worksheet :return: int, index of the last row """ max_row_index = sheet_object.max_row while max_row_index > 1: if sheet_object.cell(max_row_index, column=1).value is None: max_row_index -= 1 else: break return max_row_index
840bd2aabc652a33ae2f4848227857eba75c497b
548,697
def uint8_to_bool(np_img): """ Convert NumPy array of uint8 (255,0) values to bool (True,False) values Args: np_img: Binary image as NumPy array of uint8 (255,0) values. Returns: NumPy array of bool (True,False) values. """ result = (np_img / 255).astype(bool) return result
55b0b5f1376a7619f2691a05890e5840717f9716
614,560
from typing import List def assign_encoder_layer_ids( encoder_names: List[List[str]], ): """ Assign ids to encoder layers. The encoder may contain several blocks e.g., block1 and block2. This function iterates through all the layers of each block from the input end towards the output end. It increases 1 on the layer id when the detected digit in a layer name changes. Parameters ---------- encoder_names Encoder layer names. Returns ------- name_to_id The encoder layer-to-id mapping. encoder_layer_num The encoder layer number. """ name_to_id = {} cur_id = 0 for i, group_names in enumerate(encoder_names): last_inferred_id = -1 for n in group_names: detect_id = False n_splits = n.split('.') for split in n_splits: # the first digit encountered is used to infer layer id if split.isdigit(): inferred_id = int(split) # increase at most 1 one time if inferred_id != last_inferred_id: cur_id += 1 # layer.0 -> layer_id 1 last_inferred_id = inferred_id name_to_id[n] = cur_id detect_id = True break if detect_id is False: raise ValueError(f"parameter name: {n} not has no id inside") if len(name_to_id) > 0: encoder_layer_num = max(name_to_id.values()) else: encoder_layer_num = 0 return name_to_id, encoder_layer_num
5ab370eaa602063b2d840abf6de108ea2ef9d8ca
205,281
def metrics(table): """Calculate the metrics for a set of crater matches. Arguments: table : Pandas table with the columns N_match N_detect N_csv Returns: table : modified table with the additional columns frac_new_csv : fraction of unmatched craters relative to the matched + diff frac_new_detect : fraction of detected creaters that are new (FPR) precision : The ratio of true positives to all positives recall : The ratio of true positives to all craters f1 : The harmonic mean of precision and recall. """ tp = table["N_match"] fp = table["N_detect"]-table["N_match"] fn = table["N_csv"] - table["N_match"] g = table["N_csv"] diff = table["N_detect"] - table["N_match"] table["frac_new_csv"] = diff/(table["N_csv"] +diff) table["frac_new_detect"] = diff/(table["N_detect"]) p = tp/(tp+fp) r = tp/(fn+tp) table["precision"] = p table["recall"] = r table["f1"] = 2*(r*p)/(r + p) return table
a4ff1c019442d8334e26e30bf5126c807083e766
593,437
import re def lmcp_toprettyxml(obj): """ Convert an LCMP object to a string that formats correctly with xml.dom.minidom's toprettyxml(). Keyword arguments: obj -- an LMCP message object """ # toprettyxml() assumes no newlines or whitespace between XML elements, # so remove them from what LMCP's toXML() returns. return re.sub('\n(\s)*', '', obj.toXML())
a916fed25573457ac908955a3151bf0c8c993cbe
146,386
def binary_search(sorted_arr, wanted_num): """ Szuka elementu w posortwanej tablicy, jeżeli znajdzie to zwraca indeks szukanego elementu. W przypadku nie znalezienia elemetu zwraca -1.""" left = 0 right = len(sorted_arr) - 1 mid = 0 while left <= right: mid = (right + left) // 2 if sorted_arr[mid] < wanted_num: left = mid + 1 elif sorted_arr[mid] > wanted_num: right = mid - 1 else: return mid return -1
fc3abd941bc134effc3ce8bbe741ee3a92ea6386
625,992
import json import logging def load_json_file(filepath): """Load contents of a .json file on disk into a JSON object. """ try: with open(filepath, 'r') as jsonfile: return json.load(jsonfile) except: logging.error('could not load .json file %s' % filepath) return None
dd7f7c4adef2162ec8a617b09864a46cebfd2749
418,101
def expand_rle(rle): """expand an RLE back to a list""" expanded_rle = [] for char, count in rle: expanded_rle.extend(count*[char]) return expanded_rle
21490f3ce7e022d5bcb212d7763909beea5641c8
403,994
def generate_dashboard_link(uuid: str) -> str: """Generate a MythX dashboard link for an analysis job. This method will generate a link to an analysis job on the official MythX dashboard production setup. Custom deployment locations are currently not supported by this function (but available at mythx.io). :param uuid: The analysis job's UUID :return: The analysis job's dashboard link """ return "https://dashboard.mythx.io/#/console/analyses/{}".format(uuid)
8001568677214c564a120e968a2362b1c5979c34
331,067
import json def get_end_stream_request(stream_id: str, request_id: int, api_version: str) -> str: """ Parameters ---------- stream_id: ID of stream we want to stop request_id: ID of EndStreamRequest Returns ------- Serialized EndStreamRequest """ api_message = { "api_version": api_version, "api_request": { "request_id": request_id, "end_stream_request": {"stream_id": stream_id}, }, } return json.dumps(api_message)
86265dd7cb7bef04eb9f1c5d876afa85bbac1a72
632,413
def emulator_type(emulator): """Identifies the type of emulator.""" return emulator["emulator_type"]
db33730c5f06ff1e50b592dc23050d9c7049c7b0
106,162
def sourceSingleSlitOpaqueWall(a, da): """ We return two lists: one with the space coordinates, one with the amplitude at the corresponding coordinate. Amplitude is zero outside of coordinates from aCoords. """ Na = int(a/da) amplitude = (1/float(Na)) # normalized aCoords = [(j-Na/2)*da for j in range(Na+1)] # space coordinates amplitudes = [ amplitude for y in aCoords] # constant amplitude return (aCoords, amplitudes)
83429643d3fa9c40fca9a373a376ebae1701bc96
338,117
import csv def parse_csv_line(line): """ Parses a string holding a CSV line into a Python list """ return list(csv.reader([line]))[0]
cc0066f790fcda5eaea8a1fed18a1a5ac7f1cd47
473,513
def userfunc(say='Hi'): """Test func with one parameter.""" return 'SKPAR says {}'.format(say)
f55264cc50dfcf08fb15b491a052431abff57724
553,255
def file_len(file_name): """ Returns number of lines in text-like file_name """ i = 0 with open(file_name) as f: for _ in f: i += 1 return i
702b6b25e550a0fe292315667c71968478a4eff0
217,188
def dict_to_desc(d: dict): """ Transform dictionary into a text-based description. Parameters ---------- d : dict Mapping factors->indicators. Returns ------- desc : str Text-form description of the model. """ desc = str() for lat, inds in d.items(): if lat != -1: inds = ' + '.join(inds) desc += f'{lat} =~ {inds}\n' return desc
6aa6c40d1766ade4bb2399c08f93e9fec986bc84
432,251
def get_score_end_time(score): """Return the timestamp just after the final note in the score ends.""" if not score: return 0 last_event = score[-1] if last_event.time_delta is None: return last_event.time return last_event.time + last_event.time_delta.total_beats()
0376d62ca370e7d8bf0d6ffc4bf68f66e0d16e34
168,600
from typing import Dict from typing import Any def merge_dicts(*args: Dict[Any, Any]) -> Dict[Any, Any]: """ Successively merge any number of dictionaries. >>> merge_dicts({'a': 1}, {'b': 2}) {'a': 1, 'b': 2} >>> merge_dicts({'a': 1}, {'a': 2}, {'a': 3}) {'a': 3} Returns: Dict: Dictionary of merged inputs. """ out = {} # type: Dict[Any, Any] for dct in args: out = {**out, **dct} return out
68cf2046f830610a9b386edebc8f16671dfc9293
656,213
def estimate_microturbulence(effective_temperature, surface_gravity): """ Estimate microtubulence from relations between effective temperature and surface gravity. For giants (logg < 3.5) the relationship employed is from Kirby et al. (2008, ) and for dwarfs (logg >= 3.5) the Reddy et al. (2003) relation is used. :param effective_temperature: The effective temperature of the star in Kelvin. :type effective_temperature: float :param surface_gravity: The surface gravity of the star. :type surface_gravity: float :returns: The estimated microturbulence (km/s) from the given stellar parameters. :rtype: float """ if surface_gravity >= 3.5: return 1.28 + 3.3e-4 * (effective_temperature - 6000) \ - 0.64 * (surface_gravity - 4.5) else: return 2.70 - 0.509 * surface_gravity
bf54342e00fc61f042f183c8bbebc01005eb6b4c
15,089
def single(a, b, distance_function): """ Given two collections ``a`` and ``b``, this will return the distance of the points which are closest together. ``distance_function`` is used to determine the distance between two elements. Example:: >>> single([1, 2], [3, 4], lambda x, y: abs(x-y)) 1 # (distance between 2 and 3) """ left_a, right_a = min(a), max(a) left_b, right_b = min(b), max(b) result = min(distance_function(left_a, right_b), distance_function(left_b, right_a)) return result
a8252954d129d67755ade0cd119a6582dd063ee1
341,074
def remove_by_index(l, index): """removes element at index position from indexed sequence l and returns the result as new object""" return l[:index] + l[index + 1:]
1c55d35d6f37c1180de4eaf1244985734a73a033
662,908
def __get_type_NHC(code): """ Get the intensity category according to the status of system defined by "National Hurricane Center". Reference: https://www.nhc.noaa.gov/data/hurdat/hurdat2-format-nov2019.pdf 0 - Subtropical cyclone of depression intensity; Subtropical cyclone of storm intensity; A low that is neither a TC, a subtropical cyclone, nor an EC; Tropical wave; Disturbuance (OTHERS, unknown intensity) 1 - Tropical depression (TD, <34 knots) 2 - Tropical storm (TS, 34-63 knots) 3 - Hurricane (HU, >64 knots) 4 - Extratropical cyclone (EC, any intensity) Parameters ---------- code : str A string code represents the type. Returns ------- re: str One of the type in ['TD', 'TS', 'HU', 'EC', 'OTHERS']. """ return code
a59d97fa473dc5faea7d8d7c61a5d5766500e6dd
692,425
def xywh2xyxy(xywh, inplace=False): """Convert bbox from (x1,y1,w,h) to (x1,y1,x2,y2). """ if inplace: xyxy = xywh else: xyxy = xywh.clone() if xywh.dim() == 1: xywh = xywh.unsqueeze(0) xyxy.unsqueeze_(0) xyxy[:, 2] = xywh[:, 0] + xywh[:, 2] - 1 xyxy[:, 3] = xywh[:, 1] + xywh[:, 3] - 1 return xyxy.squeeze_()
ebe063414eb290510b56c2f1bac921f89da333ea
532,607
def could_short_circuit(if_el): """ Is there the potential to take advantage of Python's ability to short- circuit and collapse a nested IF into its parent as a single expression? The if_el must have an IF as its only child AND that nested IF must have an ORELSE as a child. But that ORELSE must have no children. :param element if_el: If element :return: True if the If has the potential to be short-circuited :rtype: bool """ body_els = if_el.xpath('body') has_one_body = len(body_els) == 1 if not has_one_body: return False body_el = body_els[0] body_children_els = body_el.getchildren() body_one_child = len(body_children_els) == 1 if not body_one_child: return False body_child_el = body_children_els[0] sole_child_is_if = body_child_el.tag == 'If' if not sole_child_is_if: return False nested_if_el = body_child_el nested_if_orelse_els = nested_if_el.xpath('orelse') one_orelse = len(nested_if_orelse_els) == 1 if not one_orelse: return False nested_if_orelse_el = nested_if_orelse_els[0] orelse_has_children = bool(nested_if_orelse_el.getchildren()) if orelse_has_children: return False ## we have an IF with one child which is an IF and the nested IF's ORELSE has no children return True
f3ede43ae76e7a07cda67999d327aea61203c870
245,585
from typing import Optional from typing import IO def openfile(spec: Optional[str]) -> Optional[IO[str]]: """Open file helper. Args: spec: file/mode spec; for example: * ``file`` uses mode ``w`` * ``file+`` uses mode ``a`` * ``file+r`` uses mode ``r`` Returns: File object, or ``None`` if spec is ``None``. """ if spec is None: return None else: parts = spec.split('+', 1) name = parts[0] mode = parts[1] if len(parts) > 1 else 'w' mode = mode or 'a' return open(name, mode=mode, buffering=1)
435547e3fa359a787f1f14b378a6eed9a0c3ee53
677,572
def addContent(old_html, raw_html): """Add html content together""" old_html += raw_html return old_html
cd5a1677ac4f4216d4cbeb7b7565b44429872bcc
633,528
def generate_param(name, location, _type, required=None, _format=None): """ Generates a parameter definition dynamically. """ param = { 'in': location, 'type': _type, 'name': name, } if required is not None: param['required'] = required if _format is not None: param['format'] = _format return param
e2c884e067635cb7c4e999505a60662491400cab
517,151
def get_max_len(in_string, max_len): """Gets current length of string and returns it if it exceeds the provided max_len. Otherwise, returns the provided max_len. """ curr_len = len(in_string) if curr_len > max_len: return curr_len return max_len
e252a871600f4ec1ae1bd1694896395dadaca9a3
210,960
def default_none(value, default): """ Returns |value| if it is not None, otherwise returns |default|. """ return value if value != None else default
d36f266f68a472b9781174b36a853a2f2e332b5c
175,363
def _upsert_user(request_session, api_url, user_doc): """ Insert user, or update if insert fails due to user already existing. Returns: requests.Response: API response. Args: request_session (requests.Session): Session to use for the request. api_url (str): Base url for the API eg. 'https://localhost:8443/api' user_doc (dict): Valid user doc defined in user input schema. """ new_user_resp = request_session.post(api_url + '/users', json=user_doc) if new_user_resp.status_code != 409: return new_user_resp # Already exists, update instead return request_session.put(api_url + '/users/' + user_doc['_id'], json=user_doc)
eebbb5d8bc2244371097955219068135b0a47d36
185,016
def app_name(experiment_uuid): """Convert a UUID to a valid Heroku app name.""" return "dlgr-" + experiment_uuid[:8]
924ddf4204e4d387b1c2ece5b6ca21d0bea4c0c5
330,021
import six def is_string(v): """Test if a value is currently a string @param v: the value to test @returns: True if the value is a string """ return isinstance(v, six.string_types)
e59aa19a2133ef5969835c866114b13790f10a31
352,355
def substitute_str_idx(s: str, r: str, i: int) -> str: """Substitute char at position Arguments: s: The str in which to substitute r: The char to substitute i: index of the substitution Returns: The string `s` with the i'th char substitute with `r` """ z = ''.join([(lambda: r, lambda: x)[idx != i]() for idx, x in enumerate(s)]) return z
9cdef667249c3447338085ff47de01a58a192be2
673,251
def preformatted(text: str) -> str: """Make text to pre-formatted text.""" return f'```{text}```'
152c9cf6ce78ffed74b23562f7f09195340ab9b0
46,716
def error_response(msg: str = ""): """Define a error json response to send a client. :param msg: A message indicating that the request has errors. """ _data_response = { u'valid': False, u'msg': msg } return _data_response
f52d1a82f7786d1a6f5d1722fdd884d4bb623a7b
281,787
def get_spo_count(spec): """Returns the number of spos in the spec file.""" total = 0 for f in spec["cfiles"]: for ff in spec["cfiles"][f]["functions"]: if "spos" in spec["cfiles"][f]["functions"][ff]: total += len(spec["cfiles"][f]["functions"][ff]["spos"]) return total
dfa997d4c68e8973409a4de2d54ee5e8000ce4ec
503,212
def z_score(x, avg, sd): """Calculate z-score. Parameters ---------- x : float Standardized variable.. avg : float Average from population. sd : float Standard deviation from population. Returns ------- z : float Z-score. """ return (x - avg) / sd
7f1aa9f47ef685aba72154bf5144620d4a304b9f
262,729
def format_list(list): """format a list into a space-separated string""" return " ".join(str(tok) for tok in list)
db67e2fe16ee04883d30476de4d53f99d3335b9a
312,381
def _get_node_count(graph_entry): """ Get number of nodes""" return graph_entry.vcount()
e5e9992aadfa0d2f84c698b1ceeae0c5f500c72e
32,306
def get_multiline_description(description_start, lines): """ Return a multiline description given the ``description_start`` start of the decsription and a ``lines`` list. These are common in .podspec. https://guides.cocoapods.org/syntax/podspec.html#description description is in the form: spec.description = <<-DESC Computes the meaning of life. Features: 1. Is self aware ... 42. Likes candies. DESC """ # from "<<-DESC" to "DESC" description_end = description_start.strip('<-') description_lines = [] started = False for line in lines: if started: ended = line.strip().startswith(description_end) if not ended: description_lines.append(line) else: break elif '.description' in line and description_start in line: started = True return '\n'.join(description_lines)
e32f6678e58863c293e1122e95b91ceb5d4f5c55
279,668
from typing import List from typing import Dict def order_by_station(order_list: List[Dict], station_name) -> List[tuple]: """Returns a list of tuples. The first element of each tuple contains the index of an order that contains the station 'station_name'. The second element is the respective index in the station list of the order. Since a station can be used by several orders, these tuples are stored in a list. """ indices: List[tuple] = [] for index, order in enumerate(order_list): if station_name in order['station']: indices.append((index, order['station'].index(station_name))) return indices
7826a01779dbcb4b6ce9874d61c1c430553fedb3
336,911
def wavelength_tlv(wave_period, velocity): """Usage: Calculate wavelength (l) using wave period (T) and velocity (v)""" return wave_period*velocity
35e6292ccf7407336acc33dbb934e7a034b6401b
612,512
def average_throughput(throughputs): """Return the average `ops` value of `throughputs`, a list of `Throughput`s.""" return float(sum(throughput.ops for throughput in throughputs)) / len(throughputs)
ee3a95b1f90d8faa2bf014d598f82b6a75d01d9c
322,256
def average(numbers): """ Return the average (arithmetic mean) of a sequence of numbers. """ return sum(numbers) / float(len(numbers))
206a490758e8a2ce082029f698ca0d6fbd8cddd6
325,944
def processed_events(object_type: str, subscriber: str) -> str: """Return the db key used to store processed events. This is the key where processed events for the specified subscriber and object type are stored. Args: object_type (str): Type of object subscriber (str): Subscriber name Returns: str, db key where processed events are stored. """ return 'events:{}:{}:processed'.format(object_type, subscriber)
5cd043759467307b2dfc5574db58b0ac946204df
323,615
def findIndexes(text, subString): """ Returns a set of all indexes of subString in text. """ indexes = set() lastFoundIndex = 0 while True: foundIndex = text.find(subString, lastFoundIndex) if foundIndex == -1: break indexes.add(foundIndex) lastFoundIndex = foundIndex + 1 return indexes
54f24abbb74f674b2b5a39dd545a0470c849c652
449,470
def extension(subdir): """Does a subdirectory use the .cpp or .cc suffix for its files?""" return 'cpp' if subdir.startswith('vendor') else 'cc'
f8480b81a4c07e0902d8c13893196eb0ca4dbdea
286,950
import torch def squared_distances(x, y): """ Returns the matrix of $\|x_i - y_j\|_2^2$. Taken from the geomloss package. parameters: - x : Source data (TorchTensor (batch size, ns)) - y : Target data (TorchTensor (batch size, nt)) returns: - Ground cost (float) """ if x.dim() == 2: D_xx = (x*x).sum(-1).unsqueeze(1) # (N,1) D_xy = torch.matmul( x, y.permute(1,0) ) # (N,D) @ (D,M) = (N,M) D_yy = (y*y).sum(-1).unsqueeze(0) # (1,M) elif x.dim() == 3: # Batch computation D_xx = (x*x).sum(-1).unsqueeze(2) # (B,N,1) D_xy = torch.matmul( x, y.permute(0,2,1) ) # (B,N,D) @ (B,D,M) = (B,N,M) D_yy = (y*y).sum(-1).unsqueeze(1) # (B,1,M) else: print("x.shape : ", x.shape) raise ValueError("Incorrect number of dimensions") return D_xx - 2*D_xy + D_yy
0875ac68321731fd87f0c4a45a185bc4e7b56294
159,371
def length_sqrd(vec): """ Returns the length of a vector2D sqaured. Faster than Length(), but only marginally """ return vec[0] ** 2 + vec[1] ** 2
1950a086cd136abc923a4377356b68e7e1c728c2
607,030
def p2a(p, m1, m2): """ It computes the separation (Rsun) given m1 (Msun), m2 (Msun) and p (days). """ yeardy=365.24 AURsun=214.95 p = p/yeardy a = AURsun*(p*p*(m1 + m2))**(1./3.) return a
a0c5d8c0d7b961e8017217f22f54aa2a70daf5a0
20,231
def version_string(version): """Return the string representation of an Aardvark API version""" return str((version >> 8) & 0xFF) + '.' + str(version & 0xFF)
153e422798ed7674ee4df25c98c3cd52920499c3
320,671
import collections def deep_convert_to_plain_dict(an_odict): """ Recursively convert `an_odict` and any of its dictionary subelements from `collections.OrderedDict`:py:class: to plain `dict`:py:class: .. note:: This is naive, in that it will not properly handle dictionaries with recursive object references. :Args: an_odict a (presumably) `collections.OrderedDict`:py:class: to convert :Returns: an "unordered" (i.e., plain) `dict`:py:class: with all ordered dictionaries converted to `dict`:py:class: """ a_dict = {} for (key, value) in an_odict.items(): if type(value) is collections.OrderedDict: a_dict[key] = deep_convert_to_plain_dict(value) else: a_dict[key] = value return a_dict
0a463981909153d4beee64fbbf5fad489adf78ac
7,527
def compute_gender(last_gender: str, top_gender_male: int, top_gender_female: int, top_gender_7_days: str): """Computes the gender type of a visitor using a majority voting rule.""" def majority_voting(lst): return max(set(lst), key=lst.count) if top_gender_male > top_gender_female: top_gender = 'M' else: top_gender = 'F' return majority_voting([last_gender, top_gender, top_gender_7_days])
387f04fae4b593de54894eeac017b7fe124706c9
47,744
import csv def load_aitlas_format_dataset(file_path): """Reads the images from a CSV. Format: (image_path, class_name)""" data = [] with open(file_path, "r") as f: csv_reader = csv.reader(f) for index, row in enumerate(csv_reader): path = row[0] item = (path, row[1]) data.append(item) return data
b36b9b213c88e6cd66ee88d0f3b5a76e113c320c
622,878
def multiply(number_1, number_2): """ This function is used to perform finite field multiplication, which is the core operation of the MixColumn step. :param number_1: The first number which involve the multiplication. :param number_2: The second number which involve the multiplication. :return: A decimal integer """ result_list = [number_1] for x in range(7): if number_1 >= 0x80: result = ((number_1 << 1) & 0xff) ^ 0x1b result_list.append(result) number_1 = result else: result = number_1 << 1 result_list.append(result) number_1 = result result_list.reverse() final_result = 0 zipper = zip('{:08b}'.format(number_2), result_list) for x in zipper: if int(x[0]) != 0: final_result ^= x[1] else: pass return final_result
d2511fc5a637f8532b3acd63205474b9081b2b24
514,887
def get_task_version(taskf): """return a version for a taskf. This is wrapper around taskf.get_version() that will not raise an exception if a tool is not installed but will return None instead. """ try: version = taskf().get_version() except Exception as ex: msg = str(ex) if msg.startswith("no version defined for"): raise ValueError(msg) elif "non-zero exit status" in msg: # if tool can not be found, that is ok return None elif "not found" in msg: return None else: raise ValueError("version check failed with: {}".format(msg)) return version
cfafc774fa969a0eb3acad13f3acdf47b8de722e
626,993
import re def regex_finder(topics): """ Extracts queries, questions and narratives""" queries = re.findall("<query>(.*?)<\/query>", topics, flags = re.DOTALL) questions = re.findall("<question>(.*?)<\/question>",topics, flags=re.DOTALL) narratives = re.findall("<narrative>(.*?)<\/narrative>",topics, flags=re.DOTALL) return (queries, questions, narratives)
8bd6120835d3c5f95b9b0d59921d53295602096f
346,957
def fringe_spacing(wavelength, slits_distance, screen_distance): """ This function provides the fringe spacing when the following parameters are provided. =========================================== Parameters: wavelength : Wavelength of light slits_distance : Distance between two slits screen_distance : Distance between slits and screen """ return ((wavelength*slits_distance)/screen_distance)
6f778da0126efda07c18679915a06bc1a4d5b1e0
461,781
def convert_cell_value_to_string(data): """ Converts the value of a call to a string if it is a list or a dictionary Parameters ---------- data The value of the cell in the dataframe Returns ------- str The original data if it is not a list or a dictionary, otherwise the string representation of these """ if isinstance(data, list): return ", ".join(data) elif isinstance(data, dict): return str(data) else: return data
88e9db6dd9b3aa9d9608d364e5607d9bdd57c48a
253,733
from typing import List from typing import Union def ls_strip_elements(ls_elements: List[str], chars: Union[None, str] = None) -> List[str]: """ >>> ls_strip_elements([' a','bbb',' ']) ['a', 'bbb', ''] >>> ls_strip_elements([]) [] """ if (not ls_elements) or (ls_elements is None): return list() return [s_element.strip(chars) for s_element in ls_elements]
830691f26e934d025b96e0d1c3d89d0bb634ed4d
671,958
import ast def robust_literal_eval(val): """Call `ast.literal_eval` without raising `ValueError`. Parameters ---------- val : str String literal to be evaluated. Returns ------- Output of `ast.literal_eval(val)', or `val` if `ValueError` was raised. """ try: return ast.literal_eval(val) except ValueError: return val
7c5136d72354ad6018b99c431fc704ed761ea7db
674,993
import click def verify_operation(txt, msg=None): """ Issue click confirm request and return result. If msg is none and confirm response is n, output msg. Parameters: txt (:term:`string`): String that is prefixed to the prompt text and defines the verification request. msg (:class:`py:bool`): Optional parameter that if True causes an abort msg on the console. Returns: (:class:`py:bool`) where true corresponds to 'y' prompt response """ if click.confirm(txt): return True if msg: click.echo('Request aborted') return False
70554e86b852e939fd40caf654a707ecfd55ec4e
599,854
def queryset_to_dict(qs, key='id'): """ Given a queryset will transform it into a dictionary based on ``key``. """ return dict((str(getattr(u, key)), u) for u in qs)
e9c4b9653106b949c887e3ff5d69c490aa769b86
332,818
def _get_more_static_shape(shape0, shape1): """Compare two shapes with the same rank, and return the one with fewer symbolic dimension. """ assert len(shape0) == len(shape1) num_sym_dim0 = 0 num_sym_dim1 = 0 for dim0, dim1 in zip(list(shape0), list(shape1)): if not isinstance(dim0, int): num_sym_dim0 += 1 if not isinstance(dim1, int): num_sym_dim1 += 1 if num_sym_dim0 < num_sym_dim1: return shape0 return shape1
290a1472b96509d8c424153171cd58175fc2d2c4
165,268
def tag_predicate(p): """ Given a full URI predicate, return a tagged predicate. So, for example, given http://www.w3.org/1999/02/22-rdf-syntax-ns#type return rdf:type """ ns = { "http://www.w3.org/1999/02/22-rdf-syntax-ns#":"rdf:", "http://www.w3.org/2000/01/rdf-schema#":"rdfs:", "http://www.w3.org/2001/XMLSchema#":"xsd:", "http://www.w3.org/2002/07/owl#":"owl:", "http://www.w3.org/2003/11/swrl#":"swrl:", "http://www.w3.org/2003/11/swrlb#":"swrlb:", "http://vitro.mannlib.cornell.edu/ns/vitro/0.7#":"vitro:", "http://purl.org/ontology/bibo/":"bibo:", "http://purl.org/spar/c4o/":"c4o:", "http://purl.org/spar/cito/":"cito:", "http://purl.org/dc/terms/":"dcterms:", "http://purl.org/NET/c4dm/event.owl#":"event:", "http://purl.org/spar/fabio/":"fabio:", "http://xmlns.com/foaf/0.1/":"foaf:", "http://aims.fao.org/aos/geopolitical.owl#":"geo:", "http://purl.obolibrary.org/obo/":"obo:", "http://purl.org/net/OCRe/research.owl#":"ocrer:", "http://purl.org/net/OCRe/study_design.owl#":"ocresd:", "http://www.w3.org/2004/02/skos/core#":"skos:", "http://vivo.ufl.edu/ontology/vivo-ufl/":"ufVivo:", "http://www.w3.org/2006/vcard/ns#":"vcard:", "http://vitro.mannlib.cornell.edu/ns/vitro/public#":"vitro-public:", "http://vivoweb.org/ontology/core#":"vivo:", "http://vivoweb.org/ontology/scientific-research#":"scires:" } for uri, tag in ns.items(): if p.find(uri) > -1: newp = p.replace(uri, tag) return newp return None
bfe1cf2ea83f4faee77299b0e7929571b550b59a
686,876
from pathlib import Path def files_list(mydir): """Create a list of filenames in mydir.""" searchDir = Path(mydir) myfiles = list(searchDir.iterdir()) return myfiles
b6ebaaea602e4bd5d6b80a5b9a810a3a868dba9c
301,759
import string def convert(value): """Get a decimal value and return a tuple of that value in different numerical formats. (<str>hex, <str>binary, <str>ASCII, <int>int) """ if value < 256 and value > 32 and chr(value) in string.printable: ascii_val = chr(value) else: ascii_val = "" return (hex(value), bin(value), ascii_val, value)
54e00fa7227053e9a06593a525d67fad807f1a60
192,275