content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_matching_files(url, entry): """Given a search entry returned from PilotClient.get_search_entry, find all of the partial matches in the entry. Partial matches can happen if a base folder matches, but not the files below it. For example: * A url: http://example.com/files/foo * entry with files: http://example.com/files/foo/bar.txt http://example.com/files/foo/moo.hdf5""" if entry and entry.get('files'): return [f for f in entry['files'] if url in f['url']] return []
33c7a22dbd66934dd76ae522eade0e64d627fd8c
499,421
from pathlib import Path def _template(name): """Read the specified template file.""" template = Path(__file__).parent / 'templates' / name with open(template, 'r') as file: return file.read().strip()
746ba2ae2be6b1abff14953a030e3e5393df1d7b
180,438
import click def unstyle_output(msg: str) -> str: """ A thin wrapper around click.unstyle. """ s: str = click.unstyle(msg) return s
f52dbf774c6d1567da4fb39468460e3197a1a821
456,050
def decode_ber(ber): """ Decodes a ber length byte array into an integer return: (length, bytes_read) - a tuple of values """ ber = bytearray(ber) length = ber[0] bytes_read = 1 if length > 127: bytes_read += length & 127 # Strip off the high bit length = 0 for i in range(1, bytes_read): length += ber[i] << (8 * (bytes_read - i - 1)) return length, bytes_read
3ed69a24e2ceef7e157ee2773d40bf1425b7ae99
406,275
def get_nested_attr(obj, attr, default=None): """Get nested attributes of an object.""" attrs = attr.split(".") for a in attrs: try: obj = getattr(obj, a) except AttributeError: if default: return default else: raise return obj
7f8dc82f6d08a763a0a083967ef524acbba8e983
680,775
def base36encode(number): """Converts an integer into a base36 string.""" ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz" base36 = '' sign = '' if number < 0: sign = '-' number = -number if 0 <= number < len(ALPHABET): return sign + ALPHABET[number] while number != 0: number, i = divmod(number, len(ALPHABET)) base36 = ALPHABET[i] + base36 return sign + base36
c69312809f15fc546823e54b33d4198aaa7b3149
394,609
def print_network(net, out_f=None): """ Prints the number of learnable parameters. :param net: the network. :type net: :param out_f: file :type out_f: :return: number of learnable parameters. :rtype: int """ num_params = 0 for param in net.parameters(): num_params += param.numel() if out_f is not None: out_f.write(net.__repr__() + "\n") out_f.write('Total number of parameters: %d\n' % num_params) out_f.flush() return num_params
9dcfd7da51eab385dacb90d35bfeb139ed785be1
24,720
from datetime import datetime def calculate_tz_offset(local_time_str, server_datetime_str): """Given a local_time without date and a server_datetime in UTC, calculate the user's timezone offset in minutes. This is not accurate for all cases, but should be approximately correct for the most common ones. It assumes all offsets are less than 12 hours magnitude. A positive offset of 13 hours is assumed to be a negative offset of one hour. """ local_datetime = datetime.strptime(local_time_str, '%H:%M:%S') server_datetime = datetime.strptime(server_datetime_str, '%Y-%m-%d %H:%M:%S') # minutes since midnight local_minutes = local_datetime.hour * 60 + local_datetime.minute server_minutes = server_datetime.hour * 60 + server_datetime.minute tz_offset = None if local_minutes == server_minutes: return 0 # local_timezone is same as utc elif local_minutes > server_minutes: minutes_difference = local_minutes - server_minutes return minutes_difference * 1 if minutes_difference <= 12*60 else -1 else: minutes_difference = server_minutes - local_minutes return minutes_difference * -1 if minutes_difference <= 12*60 else 1
79b098ffd6427fc2d5263569e7647d7f4bae6d8b
584,036
def std_url(url): """ Standardizes urls by removing protocoll and final slash. """ if url: url = url.split("//")[-1] if url.endswith("/"): url = url[:len(url)-1] return url
4940052d2feebd92f9cb54e7cee57318445f86c2
603,993
def get_name(metadata): """Return the name of an object based on the dictionary metadata. By preference: long_name, short_name, 'Unnamed' """ name = metadata.get("long_name", None) if name is not None: return name name = metadata.get("short_name", None) if name is not None: return name return "Unnamed"
f01576f90cc37168009e30ee04283e7ff2a5e927
80,304
from typing import Tuple def extract_entitlement(entitlement: str, text: str) -> Tuple[str, str, str, str]: """ Extracts entitlement components from an entitlement string Args: entitlement: The entitlement itself text: The actual reply text Returns: Entitlement components """ parts = entitlement.split('@') guid = parts[0] id_and_task = parts[1].split('|') incident_id = id_and_task[0] task_id = '' if len(id_and_task) > 1: task_id = id_and_task[1] content = text.replace(entitlement, '', 1) return content, guid, incident_id, task_id
8ecee278f03535f0a3f6426cd400d76c08f8e544
641,460
def extract_name_and_id(user_input): """Determines if the string user_input is a name or an id. :param user_input: (str): input string from user :return: (name, id) pair """ name = id = None if user_input.lower().startswith('id:'): id = user_input[3:] else: name = user_input return name, id
feb84b022e626bae091bb28e8d3f316bdbf1ba3d
675,496
def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n)
ffaa1c0d8672c92f9c854ae18bce0026dd38c0d7
488,563
def add_integers(a: int, b: int): """Sum two integers Args: a (int): b (int): Returns: int: sum of the integer inputs a and b """ return a + b
3cdd1be6670a649b69f0eaa4a274ebd4029a6d64
634,390
def _card(item): """Handle card entries Returns: title (append " - Card" to the name, username (Card brand), password (card number), url (none), notes (including all card info) """ notes = item.get('notes', "") or "" # Add card info to the notes notes = notes + ("\n".join([f"{i}: {j}" for i, j in item.get('card', "").items()])) return f"{item['name']} - Card", \ item.get('card', {}).get('brand', '') or "", \ item.get('card', {}).get('number', "") or "", \ "", \ notes
fc7d5e4b960019b05ffe7ca02fd3d1a94d69b303
375
def ndbi(swir, red, nir, blue): """ Converts the swir, red, nir and blue band of Landsat scene to a normalized difference bareness index. Source: Zao and Chen, "Use of Normalized Difference Bareness Index in Quickly Mapping Bare from TM/ETM+", IEEE Conference Paper, 2005 DOI: 10.1109/IGARSS.2005.1526319 :param swir: numpy.array, shortwave infrared band :param red: numpy.array, red band :param nir: numpy.array, near infrared band :param blue: numpy.array, blue band :return: normal difference bareness index """ # bareness index components x = (swir + red) - (nir + blue) y = (swir + red) + (nir + blue) # prevent division by zero error y[y == 0.0] = 1.0 # bareness index img = x / y img[y == 1.0] = 0.0 # clip range to normal difference img[img < -1.0] = -1.0 img[img > 1.0] = 1.0 return img
5a79e2af482e56429cfe76d3ab12ae9e487573af
690,863
import click def _log_status(count, model_label): """ Log the output of how many records were created. :param count: Amount created :type count: int :param model_label: Name of the model :type model_label: str :return: None """ click.echo('Created {0} {1}'.format(count, model_label)) return None
c1d223b00451abd6cc980ec0b83224d73624f3e8
397,481
def s2time( secs: float, show_secs: bool = True, show_fracs: bool = True ) -> str: """Convert seconds to time. Args: secs (float): show_secs (bool): Show seconds (default: True) show_fracs (bool): Show centiseconds (default: True) Returns: str: `HH:MM` / `HH:MM:SS` / `HH:MM:SS.CS` string """ try: secs = max(0, float(secs)) except ValueError: placeholder = "--:--" if show_secs: placeholder += ":--" if show_fracs: placeholder += ".--" return placeholder wholesecs = int(secs) centisecs = int((secs - wholesecs) * 100) hh = int(wholesecs / 3600) hd = int(hh % 24) mm = int((wholesecs / 60) - (hh*60)) ss = int(wholesecs - (hh*3600) - (mm*60)) r = f"{hd:02d}:{mm:02d}" if show_secs: r += f":{ss:02d}" if show_fracs: r += f".{centisecs:02d}" return r
cf15ae732ec0713ea181cfbbf05fba69206967a4
215,707
import tempfile def get_spooled_file_object(s3_client, bucket, key): """Get a temporary spooled file object for an S3 object :param s3_client Boto s3_client object to apply :param bucket: Bucket to upload to :param key: key identifying the object within the bucket """ result = tempfile.SpooledTemporaryFile() s3_client.download_fileobj(bucket, key, result) result.seek(0) return result
51d3478d798fb0e3e6692ef304343f65ce1466a8
69,064
from typing import Dict from typing import Optional def get_new_name(attrs: Dict[str, str], mark_name: str = '', name_map: Optional[Dict[str, str]] = None) -> str: """Get new name for a node. Args: attrs (Dict[str, str]): A dict contains attributes of an ONNX node. mark_name (str): The input mark op name. Default is ''. name_map (Dict[str, str]): A mapping of node names, defaults to `None`. Returns: str: The new node name. """ if 'name' in attrs: new_name = attrs['name'] else: new_name = '_'.join((attrs['func'], attrs['type'], str(attrs['id']))) if name_map is not None: if new_name in name_map: return name_map[new_name] if f'{mark_name}:{new_name}' in name_map: return name_map[f'{mark_name}:{new_name}'] return new_name
a541b05a4cd0021815062698d48a2fb155d60f01
645,008
def unsigned_Q(num:int) -> int: """Returns unsigned value of signed (or unsigned) 64-bit integer (struct fmt 'Q') """ return num & 0xffffffffffffffff
623b3a41e44bb6f66551827a2ef2396650562d0c
230,896
from typing import List import re def parse_args(line: str) -> List[str]: """Parse the first line of the program for the command line. This should have the form # cmd: mypy <options> For example: # cmd: mypy pkg/ """ m = re.match('# cmd: mypy (.*)$', line) if not m: return [] # No args; mypy will spit out an error. return m.group(1).split()
d768b5faf2a03a643555f6e508327a45050fca46
481,969
import hashlib import codecs def hash256_twice(s): """ Hash input with sha256 twice, reverse, encode to hex. """ bh = hashlib.sha256(hashlib.sha256(s).digest()).digest() hh = codecs.encode(bh[::-1], "hex") return hh
3b55214077dcfd10b3ffdb28e578ada5e6b06604
379,378
def format_item(item, xmlns, key): """ Format a rss item and return data. This properly format a rss item regarding the xmlns attribute and a key. It returns the data of the item regarding it's key. :param item: A rss item :param xmlns: XML namespace of the rss feed :param key: The key to format :type item: WebDriver :type xmlns: str :type key: str :return: The item content :rtype: str """ formated_key = key if xmlns: formated_key = xmlns + key data = item.find(formated_key) if key == 'link' and xmlns: return data.attrib.get('href') else: return ''.join(data.itertext())
10b50932eb71977b76bf4b968227b9d642ee89d5
88,152
import re def cwb_escape(inname): """Replace dots with "-" for CWB compatibility.""" return re.sub(r"\.", "-", inname)
b552fc8a0ccf8d61c4febbc7617c4c0e3433f013
91,800
async def create_product(http_client, name="Product", description="Description") -> int: """ Create a new product, assert status code, and return product ID. This is factored into a helper as many tests perform this operation. """ resp = await http_client.post("/products", json={"name": name, "description": description}) assert resp.status_code == 202 resp_payload = resp.json() assert "id" in resp_payload return resp_payload["id"]
557a054c8ee1dca54d51ce8f94c7b8b04b3d4f5c
209,842
def map_range( value: float, in_min: float, in_max: float, out_min: float, out_max: float ) -> float: """Map a value in one range to another.""" if in_max - in_min <= 0.0: raise ValueError("invalid input range") if out_max - out_min <= 0.0: raise ValueError("invalid output range") if value < in_min or value > in_max: raise ValueError("input value out of range") return (value - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
01e05906e32d070a39b202bce1bf3d6446286179
88,805
def view_capture_stream(telstate, capture_block_id, stream_name): """Create telstate view based on given capture block ID and stream name. It constructs a view on `telstate` with at least the prefixes - <capture_block_id>_<stream_name> - <capture_block_id> - <stream_name> Additionally if there is a <stream_name>_inherit key, that stream is added too (recursively). Parameters ---------- telstate : :class:`katsdptelstate.TelescopeState` object Original telescope state capture_block_id : string Capture block ID stream_name : string Stream name Returns ------- telstate : :class:`~katdal.sensordata.TelescopeState` object Telstate with a view that incorporates capture block, stream and combo """ streams = [stream_name] while True: inherit = telstate.view(streams[-1], exclusive=True).get('inherit') if inherit is None: break streams.append(inherit) streams.reverse() for stream in streams: telstate = telstate.view(stream) telstate = telstate.view(capture_block_id) for stream in streams: capture_stream = telstate.join(capture_block_id, stream) telstate = telstate.view(capture_stream) return telstate
ebad45cd567dee22f3f7117fffb0fd4b3d376ccc
501,806
import math def calculate_angle(a, b, c): """ Returns angle (a-b-c), in degrees All points are assumed to be locintel.core.datamodel.geo.GeoCoordinate objects """ b_c = math.sqrt((b.lng - c.lng) ** 2 + (b.lat - c.lat) ** 2) a_c = math.sqrt((a.lng - c.lng) ** 2 + (a.lat - c.lat) ** 2) a_b = math.sqrt((a.lng - b.lng) ** 2 + (a.lat - b.lat) ** 2) formula = (b_c ** 2.0 + a_c ** 2.0 - a_b ** 2.0) / (2.0 * b_c * a_c) formula = -1.0 if formula < -1.0 else formula formula = 1.0 if formula > 1.0 else formula return (math.acos(formula) * 180.0) / math.pi
8823e1a7d348ab05fd00ea4359ef04224c854fb6
186,957
def attr_proc_title( binary, attrs ): """ Make a process title that has attr:<k>=<v> for set of attributes. """ return "%s %s" % (binary, " ".join( ["attr:%s=%s" % (k, v) for (k, v) in attrs.items()] ))
3e4b28119898e5b868da678cec10637f2c6532ae
263,526
def warc_datetime_str(date): """Amendedment to warctools' function to fix non-ISO8601 output.""" iso = date.isoformat() if "." in iso: iso = iso[:iso.find(".")] if "+" in iso: iso = iso[:iso.find("+")] return iso + "Z"
f5932bedd398ecd5c6f3e346b1fc2ccfc024eaa5
612,158
import json def get_keys(path): """ Given a path to json file, returns the keys Parameters --------- path: path with file name of json file Returns ------- returns: dict of keys """ with open(path) as f: return json.load(f)
7883661d799eca53a59e663b5248fa872189031c
171,595
def multiply_two_number(a, b): """ Returns the result of multiplying two numbers """ # unused_var = 'blah' return a * b
86637659dccdce4d83bdfec5c6bda6e2d922e826
466,462
def _find_tusd_object_name(params: list[dict]) -> str: """ Finds the tusd_objekt_navn from a list of parameters :param params: list of workflow parameters :return: tusd_objekt_navn """ for param in params: if param['name'] == 'TUSD_OBJEKT_NAVN': return param['value'] raise NotImplementedError("TUSD_OBJEKT_NAVN er eneste som er støttet")
5bd7dca4b1cc3ca8f109248f5e0d27f002056db3
491,282
def get_access_pdf(pdf_dir): """Gets a PDF file from a directory. Fails if there are multiple PDF files Args: pdf_dir (Path obj): directory containing PDF file """ pdf_files = [f for f in pdf_dir.iterdir() if f.is_file( ) and not f.name.startswith(".") and f.name.endswith(".pdf")] if len(pdf_files) == 0: raise Exception("No PDF file found") elif len(pdf_files) > 1: raise Exception("More than one PDF file found") else: return pdf_files[0]
7c2c661812e25d0024884aaf302312bde9fa86dc
155,759
def word_flipper(our_string): """ Flip the individual words in a sentence Args: our_string(string): Strings to have individual words flip Returns: string: String with words flipped """ word_list = our_string.split(" ") for idx in range(len(word_list)): word_list[idx] = word_list[idx][::-1] # [index1:index2:step] return " ".join(word_list)
fd484079407342925fc13583fb1fbee9ee472b14
708,675
def basal_metabolic_rate(gender, weight, height, age): """Calculate and return a person's basal metabolic rate (bmr) in calories per day. weight must be in kilograms, height must be in centimeters, and age must be in years. """ if gender.upper() == "F": bmr = 447.593 + 9.247 * weight + 3.098 * height - 4.330 * age else: bmr = 88.362 + 13.397 * weight + 4.799 * height - 5.677 * age return bmr
d9a3cb99fbcfbfe4d43075392dfe45bc34d97f9b
415,216
def associative_backdate(now, backdate): """Correct non-associative relative-delta month math :param now: datetime start point :param backdate: relativedelta value to move back from now. Asking for a relative delta back in time for a period of months and then adding the same number of months does NOT always produce the same starting value. For example May 30 - 3 months => Feb 28/29 depending on leap year determination. Adding 3 months to Feb 28 always produces May 28. Work around this problem by returning a backdated datetime value, that is by reducing now by the backdate, and also return a ``nowish`` datetime value, either equal to now or adjusted to make the pair associative, such that: nowish - backdate == result + backdate == nowish Therefore nowish will typically be equal to now, but altered in boundary cases for tests needing this associative property. :returns: backdated_datetime, nowish """ result = now - backdate return result, result + backdate
8ba36900c06710e659f878e4bbc364caeb4eaf67
53,230
import base64 def base64_2_str(base64_encode): """ Args: base64_encode: base64编码str Returns: str(utf-8) """ base64_decode = base64.b64decode(base64_encode).decode('utf-8') return base64_decode
0f9f520843122d25eae17f6167f589c8fdc81e5f
567,345
import json def get_inline_policies(iam_client, role_arn): """ Get inline policies in for a role """ inline_policies = iam_client.list_role_policies(RoleName=role_arn) return [ json.dumps(iam_client.get_role_policy(RoleName=role_arn, PolicyName=policy).get("PolicyDocument", )) for policy in inline_policies.get("PolicyNames", []) ]
92f7985381f16c3224be6d3eb725b9772bdc7498
234,130
import re def is_data_csv(fname): """Checks if a file is a data csv""" return re.search(r"^iter_(\d+).csv$", fname) is not None
4f590d472ca1c0cf4bbdea6c8157b0b20d836c4a
505,093
from pathlib import Path def AbsolutePath(*args, **kargs) -> Path: """Create an absolute path (useful as argument 'type')""" return Path(*args, **kargs).absolute()
1bf531a2d845ce37e9b34471bc183dcb75369f33
500,248
def prepare_dict(hermes_dict): """ Prepare a rhasspy type like dict from a hermes intent dict """ intent = hermes_dict["intent"]["intentName"] out_dict = {} out_dict.update({"slots": {s["slotName"]:s["rawValue"] for s in hermes_dict["slots"]}}) out_dict["intent"] = {"name": intent} return out_dict
64a48d88d7a713d3c58fe480632670ef9beb1676
187,151
def _merge_dicts(*dicts, **opts): """ The standard dictionary merge idiom, with a twist: an *opts* keyword dictionary. """ target = opts for d in dicts: target.update(d) return target
570224940ac697d532778f4140792af125f8bf35
526,371
def _find_AP_and_RL_diameter(major_axis, minor_axis, orientation, dim): """ This script checks the orientation of the and assigns the major/minor axis to the appropriate dimension, right- left (RL) or antero-posterior (AP). It also multiplies by the pixel size in mm. :param major_axis: major ellipse axis length calculated by regionprops :param minor_axis: minor ellipse axis length calculated by regionprops :param orientation: orientation in degree. Ranges between [0, 90] :param dim: pixel size in mm. :return: diameter_AP, diameter_RL """ if 0 <= orientation < 45.0: diameter_AP = minor_axis diameter_RL = major_axis else: diameter_AP = major_axis diameter_RL = minor_axis # Adjust with pixel size diameter_AP *= dim[0] diameter_RL *= dim[1] return diameter_AP, diameter_RL
403c4c42c845e427422eef839d66323be6b12dba
551,458
def yesno(value): """Convert 0/1 or True/False to 'yes'/'no' strings. Weka/LibSVM doesn't like labels that are numbers, so this is helpful for that. """ if value == 1 or value == True: return 'yes' return 'no'
050baf0e4dd604644bd595a0432a7f031ef019dd
504,159
import collections def GetTurnStats(game_results): """Returns a histogram of game lengths (in rounds played).""" hist = collections.Counter() for game_result in game_results: hist[game_result.num_rounds_played] += 1 return hist
78a4980ffa3a71d0181499f6448c75fa7d56c422
689,213
def vcross(self, labxr="", labyr="", labzr="", labx1="", laby1="", labz1="", labx2="", laby2="", labz2="", **kwargs): """Forms element table items from the cross product of two vectors. APDL Command: VCROSS Parameters ---------- labxr, labyr, labzr Label assigned to X, Y, and Z-component of resultant vector. labx1, laby1, labz1 X, Y, and Z-component of first vector label. labx2, laby2, labz2 X, Y, and Z-component of second vector label. Notes ----- Forms labeled result items for the selected element from the cross product of two vectors: {LabXR, LabYR, LabZR} = {LabX1, LabY1, LabZ1} X {LabX2, LabY2, LabZ2} Data must be in a consistent coordinate system. Labels are those associated with the ETABLE command. """ command = f"VCROSS,{labxr},{labyr},{labzr},{labx1},{laby1},{labz1},{labx2},{laby2},{labz2}" return self.run(command, **kwargs)
ed40fd87eebfae617c1a3406b07d0ec7261c83e5
665,295
def _GetWinLinkRuleNameSuffix(embed_manifest): """Returns the suffix used to select an appropriate linking rule depending on whether the manifest embedding is enabled.""" return "_embed" if embed_manifest else ""
2cf42d4a22007baa648842975f61d36406bd84cb
585,008
def get_lname_for_statedict(lname): """ Convert layer name into the form used to address the weight tensor of the module through model_statedict(). """ lname_bits= lname.split('_') lname_bits.append('weight') lname_for_statedict= '.'.join(lname_bits) return lname_for_statedict
a68276712134ecfb6ca379ede68438c478cd6cb1
299,678
def find_command(command, all_commands): """ Looks up a command in a list of dictioraries, returns a dictionary from a list :param command: Command to look up :param all_commands: list of all commands :return: dictionary """ for item in all_commands: if item["command"] == command: return item
c57590a52acc4698e98137915e8ff46002a2f739
254,169
import hashlib def compute_md5_hex(data): """Return the hex MD5 of the data""" h = hashlib.md5() h.update(data) return h.hexdigest()
a7b8bea7d8f396fb319ffff55dbc05ccf194e284
160,126
def field_to_list(row, key): """ Transforms key in row to a list. We split on semicolons if they exist in the string, otherwise we use commas. Args: row: row of data key: key for the field we want Reutrns: row: modified row """ if not row[key]: return row if ";" in row[key]: row[key] = [c.strip() for c in row[key].split(";")] elif "," in row[key]: row[key] = [c.strip() for c in row[key].split(",")] else: row[key] = [row[key]] return row
accc49cbb63a6262ed23ba5e144315fc2ac8517b
462,218
def active_queues(state): """List the task queues a worker is currently consuming from.""" if state.consumer.task_consumer: return [dict(queue.as_dict(recurse=True)) for queue in state.consumer.task_consumer.queues] return []
f699478913b8b259ebc881bd728a8f0cbd2bf458
615,011
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): """Find the position just after the matching endchar. Args: line: a CleansedLines line. startpos: start searching at this position. depth: nesting level at startpos. startchar: expression opening character. endchar: expression closing character. Returns: On finding matching endchar: (index just after matching endchar, 0) Otherwise: (-1, new depth at end of this line) """ for i in range(startpos, len(line)): if line[i] == startchar: depth += 1 elif line[i] == endchar: depth -= 1 if depth == 0: return (i + 1, 0) return (-1, depth)
97ea6f4347c21feb8287b70fbfe0935e9c2301b3
522,113
def alpha_waste(lyambda_cond_waste_avrg, rho_cond_waste_avrg, mu_cond_waste_avrg, W_mass, n_pipe_waste, L_waste): """ Calculates the coefficent of heat transfer(alpha) from steam to wall of pipe. Parameters ---------- lyambda_cond_waste_avrg : float The thermal conducivity of condensate, [W / (m * degrees celcium)] rho_cond_waste_avrg : float The destiny of condensate, [kg / m**3] mu_cond_waste_avrg : float The viscosity of condensate, [Pa / s] W_mass : float The flow rate steam of feed heat exchanger, [kg/s] n_pipe_waste : float The number of pipes in heat exchanger, [dimensionless] L_waste : float The length of tubes, [m] Returns ------- alpha_waste : float The coefficent of heat transfer(alpha) from steam to wall of pipe, [W / (m**2 * degrees celcium)] References ---------- Дытнерский, формула 2.24, стр.53 """ return lyambda_cond_waste_avrg * 2.02 * ((rho_cond_waste_avrg**2)* L_waste * n_pipe_waste / (mu_cond_waste_avrg * W_mass))**(1/3)
b7e6ef03a8ebc62ebdae3dcd1ac196dfb80871da
508,732
import json def post_to_query_string(dico): """ Transform a dict to a query string >>> post_to_query_string({'foo': 'bar', 'where': {'foo': 'bar'}}) ?foo=bar&where={"foo": "bar"} :params dico the dico to convert """ querystring = "?" for key in dico: querystring += "%s=" % key if type(dico[key]) == dict: querystring += json.dumps(dico[key]) else: querystring += str(dico[key]) querystring += "&" return querystring.rstrip('&')
21b2c43a948c148c37ba2649e8032b1f54f88926
586,726
def human_readable_resolution(r_ms: int) -> str: """ Resolves a resolution in milliseconds to a human readable string :param r_ms: resolution in milliseconds :return: human readable resolution """ switcher = { 60000: '1 Minute', 180000: '3 Minutes', 300000: '5 Minutes', 600000: '10 Minutes', 900000: '15 Minutes', 1800000: '30 Minutes', 2700000: '45 Minutes', 3600000: '1 Hour', 7200000: '2 Hours', 10800000: '3 Hours', 14400000: '4 Hours', 21600000: '6 Hours', 43200000: '12 Hour', 86400000: '1 Day' } return switcher.get(r_ms, f"Undetectable resolution: {r_ms}")
bb11a0de6800d4b0b3c4f3eb848ce8764d36deea
205,443
def split_by_timestamp(xyzrph: dict): """ Takes a Kluster xyzrph (the dictionary object that stores uncertainty, offsets, angles, etc. settings) and returns a new dictionary for each timestamped entry. Parameters ---------- xyzrph dict of offsets/angles/tpu parameters from the fqpr instance Returns ------- list list of dictionaries, one for each timestamp entry in the base xyzrph """ first_sensor = list(xyzrph.keys())[0] tstmps = list(xyzrph[first_sensor].keys()) split_data = [{} for t in tstmps] for ky, dictdata in xyzrph.items(): for tstmp, val in dictdata.items(): tindex = tstmps.index(tstmp) split_data[tindex][ky] = {tstmp: val} return split_data
bd57217e82e92b8aa0ea30598426db36788a64b2
66,797
def ensure_overlap(lh, mor, expr): """ ensures label overlap with weights and expression matrix Args: lh (pandas DataFrame): sparse DataFrame indicating likelihood for regulators mor (pandas DataFrame): sparse DataFrame indicating mode or regulation for transcription factors expr (:obj: `pandas DataFrame`): pandas DataFrame of shape [n_feats, n_samps] Returns: expression, mor, lh (pandas DataFrame): expression, mode of regulation, and likelihood frames, respectfully, re-indexed to be concordant with weights associated with regulon """ expression = expr.reindex(mor.columns) mor = mor.reindex(expression.index, axis=1) lh = lh.reindex(expression.index, axis=1) return expression, mor, lh
716e577a6748199ad9ef95e5b2fecacca0b24cf3
51,737
def clear_handlers(logger): """ Clear all handlers from logger Parameters ---------- logger : logging.logger Logger to remove all handlers from Returns ------- logger : logging.logger Logger with all handlers removed """ handlers = logger.handlers.copy() for handler in handlers: # Copied from `logging.shutdown`. try: handler.acquire() handler.flush() handler.close() except (OSError, ValueError): pass finally: handler.release() logger.removeHandler(handler) logger.handlers.clear() return logger
40e1d086df01c1afe4168165e7a59bc0110edef2
454,945
def google_api_query(query_dict: dict) -> str: """Join given query args into one string. Return query string in format required by googlapis: https://developers.google.com/books/docs/v1/using#WorkingVolumes """ if not query_dict: return "" def allowed_google_item(): if item[1] and item[0] in ["intitle", "inauthor"]: return True return False query_string = f"q={query_dict.get('search', '')}" for i, item in enumerate(query_dict.items()): if allowed_google_item(): query_string = f"{query_string}+{item[0]}:{item[1]}" return query_string
b169c0575874cf96b438d87d3e9305ffc917d96a
561,408
def accuracy(y_hat, y): """Get accuracy.""" return (y_hat.argmax(axis=1) == y).mean().asscalar()
9c63e1d8b7e06dc278b2965abb3931cd0f6208c7
691,321
def add_spaces(x): """Add four spaces to every line in x This is needed to make html raw blocks in rst format correctly """ y = '' if isinstance(x, str): x = x.split('\n') for q in x: y += ' ' + q return y
8443f4d019022b1e4705abbeca5bbd54bc94699d
691,179
def format_path(path): """ (str) -> str Make sure that path ends with a backslash '/' """ return path if path.endswith('/') else path + '/'
286193e6a5e775651e241e83792cb17bd2575d27
439,928
def range_overlap(ranges): """Return common overlap among a set of [left, right] ranges.""" lefts = [] rights = [] if ranges: for (left, right) in ranges: lefts.append(left) rights.append(right) max_left = max(lefts) min_right = min(rights) if min_right - max_left >= 1: return (max_left, min_right)
f8f8a907e6847dc308a537dab5033b8c49149674
651,142
import torch def cr_matrix_vector_product(m: torch.Tensor, x: torch.Tensor) -> torch.Tensor: """Returns the product of a complex matrix and a real vector. :param m: Matrix as ``(*, m, n, 2)`` tensor. :param x: Vector as ``(*, n)`` tensor. :return: Result as ``(*, m, 2)`` tensor. """ xr = x.unsqueeze(-1) mr = m[..., 0] mi = m[..., 1] rr = torch.matmul(mr, xr) ir = torch.matmul(mi, xr) return torch.cat([rr, ir], dim=-1)
cafc5f19441fc1c47eb9f82d0ae8992918a8df72
664,232
def restrict_chains(data, k): """Restrict data to people with at least k rows. Parameters ---------- data : pandas.DataFrame The `data` from US to be subsetted. k : int The minimum number of measurements needed for a person to be kept. Returns ------- data : pandas.DataFrame The subsetted data of people with at least k entires. """ # How many entries does each person have. # Take Ids of anyone with at least k values. # Subset the main data frame to remove anyone with less than k values. id_counts = data["pidp"].value_counts() trajectories_ids = list(id_counts.loc[id_counts >= k].index) data = data.loc[data["pidp"].isin(trajectories_ids)] return data
106fc9c43d12085392a84a188dcc8d40a89ae817
10,871
import math def xy_yaw_from_quaternion(quat): """Calculate the yaw angle in the xy plane from a rotation quaternion. :param quat: A unit quaternion representing the :type quat: Any container that has numerical values in index 0, 1, 2 and 3 :return: The yaw angle in radians projected on the xy plane :rtype: float """ x, y, z, w = quat[:4] return math.atan2(2 * z * w + 2 * x * y, w**2 + x**2 - y**2 - z**2)
255225c80bba64f0d347871584db8a4138c199b8
669,832
import re def compile_routes(dynamic_routes): """Compiles the regexes in the routes table.""" routes2 = [] for (verb, path_pattern, handler_func) in dynamic_routes: compiled_path = re.compile("^" + path_pattern + "/*" + "$") entry = (verb, path_pattern, compiled_path, handler_func) routes2.append(entry) continue return routes2
e6d5445c222fe4320f3bcfb52f94b6f9d677e1e7
507,304
def path_string(obj, **kwargs): """return physical path as a string""" return "/".join(obj.getPhysicalPath())
bae683d392b519f8c43f5e9d1415abb8cf8de636
20,865
def fancy_vector(v): """ Returns a given 3-vector or array in a cute way on the shell, if you use 'print' on the return value. """ return "\n / %5.2F \\\n" % (v[0]) + \ " | %5.2F |\n" % (v[1]) + \ " \\ %5.2F /\n" % (v[2])
2340f22aa87da00abad30b9946c374f34b38496d
2,665
import math def angle_trunc(the_angle): """ Truncate the angle between [0, 360) Arguments: the_angle {float} -- angle Returns: float -- Angle between [0,360)] """ while the_angle < 0.0: the_angle += math.pi * 2 return the_angle
81eafdf106990a1c0921a840e1b746796fc7b8eb
538,129
def loadOutputList(expt_name,outputType): """Loads a file containing all file names with a specified output data into a local list Args: expt_name (String): Name of experiment (which contains how many ever simulation output files) outputType (String): molpos or simtime. Determines which set of output filenames to load. Returns: TYPE: List """ datalist = [] if(outputType=='molpos'): path='data/'+expt_name+'/outputMolposList.txt' elif(outputType=='simtime'): path='data/'+expt_name+'/outputSimtimeList.txt' else: raise Exception('outputType required to be either \'molpos\' or \'simtime\'') with open(path) as f: for outputfile in f: datalist.append("data/"+expt_name+"/"+outputfile.rstrip()) return datalist
22f98f554b7cb6161a893a2f81cc4b916460ccc6
76,194
def _compose_middleware(method, middleware): """Apply the given ServiceMiddleware to the provided method and return an InvocationHandler. Args: method: instance method middleware: list of ServiceMiddleware Returns: InvocationHandler """ def base_handler(method, args): return method(*args) handler = base_handler if middleware: for m in middleware: handler = m(handler) return handler
79f6faa929cfbc5e8156f430bc22457cdeef6392
246,582
def remove_stopwords(df, file, path='', column = "Word"): """ Remove stopwords from a dataframe choosing a specific column in which to remove those words Parameters: ----------- df : pandas dataframe Dataframe of counts per word per user file : string Name of file that contains the stopwords path : string, default '' Path of the file that contains the stopwords column : string, default 'Word' Column to clean Returns: -------- df : pandas dataframe Dataframe of counts per word per user excluding the stopwords """ # Remove stopwords with open(path + file) as stopwords: stopwords = stopwords.readlines() stopwords = [word[:-1] for word in stopwords] df = df[~df[column].isin(stopwords)] return df
001cd22a8d62c53ea074ae0a290ce8974d857343
106,188
def fraunhofer_distance(d=1.0, wavelen=550e-6): """computes the Fraunhofer distance from a diffracting aperture. It is the limit between the near and far field Parameters ---------- d : float largest dimension of the aperture. i.e. aperture width/diameter (in mm) wl : float wavelength of light (in mm) Returns ------- fdist : float the Fraunhofer distance in mm Notes ----- This is the more stringent condition for the Fraunhofer Approximation. Therefore the far-field is the region where ``z > fraunhofer_distance`` """ return 2.0*d**2 / wavelen
07cac7ee31003c7f31c34209547ec9d845770fe7
490,973
def clean_keys_of_slashes(record): """ Replaces the slashes found in a dataset keys with underscores :param record: list containing a couple of dictionaries :return: record with keys without slashes """ for key in list(record): value = record[key] if '/' in key: # replace with _ record[key.replace('/', '_')]\ = record.pop(key) # Check if the value is a list containing nested dict and apply same if value: if isinstance(value, list) and isinstance(value[0], dict): for v in value: clean_keys_of_slashes(v) return record
126fa995e0cc434365846e14834f8d7a03ca9ccc
521,060
def add(n1, n2): """Adds the 2 given numbers""" return n1 + n2
ca670819dab8230e355e1b236d9cc74ed0b3b868
314
def remove_max_edge_from_cycle(tree, cycle): """ Remove the max-weighted edge from a *cycle* which is contained in the given *tree*. """ cycle = sorted(cycle, key=lambda edge: int(edge.weight), reverse=True) return tree.remove(cycle[0])
0fc5b4b725e005f07441f18b7df6e22db06b3087
475,319
def _calculate_positives_negatives(target_details): """ Takes expected and actual target values, generating true and false positives and negatives, including the actual correct # of positive and negative values. """ true_positive = 0 true_negative = 0 false_negative = 0 false_positive = 0 actual_positive = 0 actual_negative = 0 for idx in range(len(target_details)): predicted_target = target_details[idx]["predicted_target"] expected_target = target_details[idx]["expected_target"] if expected_target == 1: actual_positive = actual_positive + 1 else: actual_negative = actual_negative + 1 if predicted_target == 1 and expected_target == 1: true_positive = true_positive + 1 elif predicted_target == 0 and expected_target == 0: true_negative = true_negative + 1 elif predicted_target == 1 and expected_target == 0: false_positive = false_positive + 1 elif predicted_target == 0 and expected_target == 1: false_negative = false_negative + 1 return { "true_positive": float(true_positive), "false_positive": float(false_positive), "actual_positive": float(actual_positive), "true_negative": float(true_negative), "false_negative": float(false_negative), "actual_negative": float(actual_negative), }
43314d34e98c4e9fa959426666f17d1a7d71af44
104,269
def read_network(file): """ Read a (switched) Boolean network from a text file: Line 1: number of state variables Line 2: number of control inputs Line 3: number of sub-networks Line 4: transition matrix of the first sub-network (linear representation of a logical matrix) line 5: transition matrix of the second sub-network (linear representation of a logical matrix) ... :param file: a text file :return: (n, m, w, Ls), where n: number of state variables m: number of control inputs w: number of sub-systems Ls: a list of transition matrices, each for one sub-system """ with open(file, 'r') as f: n = int(f.readline().strip()) m = int(f.readline().strip()) w = int(f.readline().strip()) N = 2 ** n M = 2 ** m Ls = [] for _ in range(w): line = f.readline().strip() assert line, f'{w} transition matrices must be provided!' numbers = line.split() assert len(numbers) == M * N, f'The transition matrix must have {M * N} columns' Ls.append([int(num) for num in numbers]) return n, m, w, Ls
76907cb8342e3f96b88f519cb09a2c28bc6e137e
78,297
def read_keywords(args): """This function reads the keywords from the input file and creates: - a dictionary where the key is the old name and the value is the new name, these keywords will be further processed. - a list of keywords which will not be processed, typically keywords with argument(s) in its names. - a list of duplicates - duplicated keyword names or names which are parts of another keyword name, they will not be processed. :param args: Parsed arguments. :type args: ArgumentParser :returns: keyword names - dictionary where the key is the old name and the value is the new name; ignored keyword names - list of keywords which will not be processed; duplicates - duplicated keyword names or names which are parts of another keyword name, they will not be processed. :rtype: tuple(dict, list, list) """ kw_names = dict() ignored_kw_names = list() duplicates = list() for line in args.input: old_name, new_name = line.split(args.separator) if '$' in old_name: ignored_kw_names.append((old_name, new_name[:-1])) elif old_name in kw_names.keys(): duplicates.append((old_name, new_name[:-1])) else: kw_names[old_name] = new_name[:-1] # Remove duplicates: for old_name, _ in duplicates: new_name = kw_names.pop(old_name, None) if new_name: duplicates.append((old_name, new_name)) # Find KW names which are parts of other KW names: for old_name in kw_names.keys(): count = 0 for key in kw_names.keys(): if old_name in key: count += 1 if old_name in kw_names[key]: if old_name != key: count += 1 if count > 1: duplicates.append((old_name, kw_names[old_name])) kw_names.pop(old_name) return kw_names, ignored_kw_names, duplicates
fd261c6819424e171776130e74538b05baf6e830
67,920
def _check_pixel(tup): """Check if a pixel is black, supports RGBA""" return tup[0] == 0 and tup[1] == 0 and tup[2] == 0
a12b1a8ce51a59e37326ef8f7bc80a5e5a907a1a
679,629
def clean_sort_param(request, date_sort='created'): """ Handles empty and invalid values for sort and sort order. 'created' by ascending is the default ordering. """ sort = request.GET.get('sort', date_sort) order = request.GET.get('order', 'asc') if sort not in ('name', 'created', 'nomination'): sort = date_sort if order not in ('desc', 'asc'): order = 'asc' return sort, order
3292403c2c1b8b06c2cb8b32fa40ca06b835a1eb
354,778
def write_variable_length(value): """ Write a variable length variable. Parameters ---------- value : bytearray Value to be encoded as a variable of variable length. Returns ------- bytearray Variable with variable length. """ result = bytearray() result.insert(0, value & 0x7F) value >>= 7 if value: result.insert(0, (value & 0x7F) | 0x80) value >>= 7 if value: result.insert(0, (value & 0x7F) | 0x80) value >>= 7 if value: result.insert(0, (value & 0x7F) | 0x80) return result
59683e3028c7de59c309e1eb17377b0207467834
328,512
def date_str2csv_date(date_str): """Utility function to convert a JSON date string into a date that is consistent with the representation in the csv files we work with. Args: date_str - the Python date string Returns: A date string formatted for csv """ return ''.join([date_str[:4], date_str[5:7], date_str[8:10]])
8ea7aac0fba74c451e1b5b0775e1fa99bdec27a3
211,466
def affine_transformation(anchor_x, x0, x1, anchor_y, y0, y1): """Construct an affine coordinate transform and its inverse. From a GDAL-style specification of the parameters of an affine transformation, construct both the affine function that maps integer cell indices to the center points of the corresponding cells, and points in a cell to the cell they belong to. The point (anchor_x, anchor_y) is a corner of the cell (0, 0). Parameters ========== anchor_x: X-Coordinate of a corner of the cell (0, 0) x0: Increase in X-Coordinate between cells (0, 0) and (1, 0) x1: Increase in X-Coordinate between cells (0, 0) and (0, 1) anchor_y: Y-Coordinate of a corner of the cell (0, 0) y0: Increase in Y-Coordinate between cells (0, 0) and (1, 0) y1: Increase in Y-Coordinate between cells (0, 0) and (0, 1) Returns ======= coordinates_for_cell: Function mapping integer indices to coordinates cell_for_coordinates: Function mapping coordinates to the cell they lie indices Examples ======== >>> integer_grid_mids, integer_grid_cell = affine_transformation(0, 1, 0, 0, 0, 1) >>> integer_grid_mids(0, 0) (0.5, 0.5) >>> integer_grid_cell((9.4, 2.1)) (9, 2) >>> shifted_grid_mids, shifted_grid_cell = affine_transformation(5, 1, 0, 2, 0, 1) >>> shifted_grid_mids(0, 0) (5.5, 2.5) >>> shifted_grid_cell((9.4, 2.1)) (4, 0) >>> forward, back = affine_transformation(*numpy.random.normal(size=6)) >>> back(forward(0, 0)) (0, 0) >>> back(forward(3, 4)) (3, 4) """ def coordinates_for_cell(col, row): col += 0.5 row += 0.5 return (anchor_x + x0 * col + x1 * row, anchor_y + y0 * col + y1 * row) def cell_for_coordinates(coordinates): coeff = 1 / (x0 * y1 - x1 * y0) x_raw, y_raw = coordinates x = x_raw - anchor_x y = y_raw - anchor_y col = coeff * (y1 * x - x1 * y) row = coeff * (-y0 * x + x0 * y) # int() truncates towards 0, so we have to use something else. # Negative cell indices *should* not appear, but who knows what else this function will be used for! return (int(col), int(row)) return coordinates_for_cell, cell_for_coordinates
e5b079add0655665b7f02a022993e8f6a5c6b255
530,121
import json def get_json(file): """ Load JSON data into memory file: the path to the data file """ with open(file) as json_file: json_data = json.load(json_file) json_file.close() return json_data
e50656d07d199b607cb6fbf6053af7e7816f736a
89,567
def select_from_arxivcs_mag(sconn, scur, paper_id): """ Queries the sqlite3 table unpaywall on paper_id, returns the pdf_url (can be None)""" # cur = conn.cursor() query = """ SELECT pdf_url FROM arxivcs_mag WHERE paper_id = '{}' """.format(paper_id) scur.execute(query) # Only get one row (there will only be 1 row in the result). Only 1 field present. return scur.fetchone()[0]
5ba697f886a4366b9dd848d1c80b16dac61f255d
274,852
def cleanup_url(url: str) -> str: """Cleans up the text to be a valid URL. Returns: str: The cleaned up URL. """ url = url.replace(" ", "-") if url.startswith("http://") or url.startswith("https://"): return url else: return f"https://{url}"
b3eed31d45f7bfaa975c60212949dd8fe0e43729
640,985
def map_code_to_status(http_code: int): """Maps HTTP code to human-readable descriptive string""" if 100 <= http_code <= 199: return 'INFO' elif 200 <= http_code <= 299: return 'SUCCESS' elif 300 <= http_code <= 399: return 'REDIRECT' elif 400 <= http_code <= 499: return 'CLIENT_ERROR' elif 500 <= http_code <= 599: return 'SERVER_ERROR' else: return 'OTHER'
c14982cf8d239ca079a30e45b901472b05dca511
478,202
import time def string_to_date(date_string): """ Convert a formatted string into a date.""" return time.strptime(date_string, "%Y/%m/%d:%H:%M:%S")
1d6f15d1bed9b918be4449d8aad9577a0ab9bd87
186,021
def print_value(v): """double quote v if v is a string, return str(v) otherwise""" if isinstance(v, str): quoted = v.replace('"', r'\"') return f'"{quoted}"' else: return str(v)
487305e17639a4fd7ce60870d186f50ef4598de2
198,598
def quadratic_probe(h, i, m): """ Finds a possible next position using quadratic probing Note that there are many different ways of doing this, such as with a formula in the form ai + bi^2. This is just a simple example Quadratic hashing reduces primary clustering: linear probing may lead to large chunks of values together, reducing efficiency Quadratic hashing may lead to secondary clustering, although this isn't as much of an issue. The bigger problem is skipping indices (for example, we might only get to even indices in some cases); we need a load factor of at most 1/2 and a prime sized table to ensure that an empty cell will always be found :param h: Computed hash value that has resulted in a collision :param i: Offset :param m: Size of the table :return: The next index to be checked if it is open """ return (h + i*i) % m
3f202a892c2aa76077e79d365626e48b27c64462
300,749
import math def translate_key(key): """ Translate from MIDI coordinates to x/y coordinates. """ vertical = math.floor(key / 16) horizontal = key - (vertical * 16) return horizontal, vertical
1916fb2abc97f421494c3ced21d9f8ec05936845
675,354
def parse_email(ldap_entry): """ Returns the user's email address from an ldapentry """ if "mail" in ldap_entry: email = ldap_entry['mail'][0] else: email = ldap_entry['uid'][0] + "@pdx.edu" return email
fbd25b486aae71828b1e784b5f2b128b747ba3e7
244,532
def yes_or_no(question): """Prompt for yes/no question""" while True: reply = str(input(question+' (y/n): ')).lower().strip() if reply in ['y', 'yes']: return True if reply in ['n', 'no']: return False
72820ceef5730611e7ad11c8754581aae8e2c549
205,348
def filter_labels_by_class(obj_labels, classes): """Filters object labels by classes. Args: obj_labels: List of object labels classes: List of classes to keep, e.g. ['Car', 'Pedestrian', 'Cyclist'] Returns: obj_labels: List of filtered labels class_mask: Mask of labels to keep """ class_mask = [(obj.type in classes) for obj in obj_labels] return obj_labels[class_mask], class_mask
854a32da802c794b0622a0a36895590823b7c780
699,860
def extract_glob_pattern(path_arg: str) -> tuple: """Extract glob pattern from a path""" if not path_arg: return None, None base_path = pattern = None if '*' in path_arg: # we have globbing in src path path_components = path_arg.split("/") base_path_arr = [] # let's establish the base path for el in path_components: if '*' not in el: base_path_arr.append(el) else: break for el in base_path_arr: path_components.remove(el) # remove the base path components (those without *) from full path components base_path = '/'.join(base_path_arr) + '/' # rewrite the source path without the globbing part pattern = '/'.join(path_components) # the globbing part is the rest of element that contain * else: base_path = path_arg return (base_path, pattern)
abaa82b35dedd19e3c410a5612cdd7f30dc21b6c
465,713