content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def single_number(nums: list[int]) -> int: """Returns the only element in `nums` that appears exactly once Better Space than hashtable (O(n/2) = O(n)) - in the worst case everything appears once before encountering duplicates (since we could always delete entries from the hashtable once we encounter exactly 2) Complexity: n = len(nums) Time: O(n) Space: O(1) Args: nums: array of integers s.t. every element appears twice, except for one Returns: the only element in `nums` that appears exactly once Examples: >>> single_number([2,2,1]) 1 >>> single_number([4,1,2,1,2]) 4 >>> single_number([1]) 1 """ """ALGORITHM""" ## INITIALIZE VARS ## single_num = 0 for num in nums: single_num ^= num return single_num
0967dd88e209d9a050b81e020616c07ef5334568
334,638
def get_odd_numbers(num_list: list) -> list: """Returns a list of odd numbers from a list.""" return [num for num in num_list if num % 2 != 0]
e7ff779e18b478bf1717912a1ad5b7130eed9b42
39,914
def _get_tissue(x): """ It extracts the tissue name from a filename. """ if x.endswith("-projection"): return x.split("spredixcan-mashr-zscores-")[1].split("-projection")[0] else: return x.split("spredixcan-mashr-zscores-")[1].split("-data")[0]
3dfec961002039b777bc1cec09cc626ec499190d
356,748
def add_dicts_by_key(in_dict1, in_dict2): """ Combines two dictionaries and adds the values for those keys that are shared """ both = {} for key1 in in_dict1: for key2 in in_dict2: if key1 == key2: both[key1] = in_dict1[key1] + in_dict2[key2] return both
9061e1abf09899e2f8a93c9c02899e2f8ba769a9
656,634
import torch from typing import Tuple def dummy_attention(key: torch.Tensor, query: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Create a dummy tensor which have the same inputs and outputs to nn.MultiHeadAttention().__call__() Args: key (T): inputs to be passed as output query (T): dummy inputs value (T): dummy inputs Returns: Tuple[T, T]: values = (key, dummy outputs = torch.Tensor([])) """ return key, torch.Tensor([])
2be97f984a6589798a3c88a2dbae6e2090b7f181
329,139
def make_variable_names_with_time_steps(number_of_lags, data_column_names): """ lagged W first, instantaneous W last, i.e., ..., x1_{t-2}, x2_{t-2}, ..., x1_{t-1}, x2_{t-1}, ..., x1_{t}, x2_{t}, ... """ variable_names = [] for i in range(number_of_lags, 0, -1): variable_names_lagged = [s + '(t-{})'.format(i) for s in data_column_names] variable_names += variable_names_lagged variable_names_t = [s + '(t)' for s in data_column_names] variable_names += variable_names_t return variable_names
6ca75ebafb84fe9640841478ddd06bd6557a3225
468,247
def ChromeOSTelemetryRemote(test_config): """Substitutes the correct CrOS remote Telemetry arguments. VMs use a hard-coded remote address and port, while physical hardware use a magic hostname. Args: test_config: A dict containing a configuration for a specific test on a specific builder. """ def StringContainsSubstring(s, sub_strs): for sub_str in sub_strs: if sub_str in s: return True return False VM_POOLS = [ 'chromium.tests.cros.vm', 'chrome.tests.cros-vm', ] HW_POOLS = [ 'chrome-cros-dut', 'chrome.cros-dut', ] dimensions = test_config.get('swarming', {}).get('dimension_sets', []) assert len(dimensions) pool = dimensions[0].get('pool') if not pool: raise RuntimeError( 'No pool set for CrOS test, unable to determine whether running on ' 'a VM or physical hardware.') if StringContainsSubstring(pool, VM_POOLS): return [ '--remote=127.0.0.1', # By default, CrOS VMs' ssh servers listen on local port 9222. '--remote-ssh-port=9222', ] if StringContainsSubstring(pool, HW_POOLS): return [ # Magic hostname that resolves to a CrOS device in the test lab. '--remote=variable_chromeos_device_hostname', ] raise RuntimeError('Unknown CrOS pool %s' % pool)
623c37ed71e964ad0c0a7e8315216dbf3219696a
212,254
def setSourcesCategories(df): """ Return the DataFrame df with a renamed columns. The renaming axes set the source's name to its category, i.e Road traffic → Vehicular VEH → Vehicular Secondary bio → Secondary_bio BB → Bio_burning Biomass burning → Bio_burning etc. """ possible_sources ={ "Vehicular": "Vehicular", "VEH": "Vehicular", "VEH ind": "Vehicular_ind", "VEH dir": "Vehicular_dir", "Oil/Vehicular": "Vehicular", "Road traffic": "Vehicular", "Bio. burning": "Bio_burning", "Bio burning": "Bio_burning", "BB": "Bio_burning", "BB1": "Bio_burning1", "BB2": "Bio_burning2", "Sulfate-rich": "Sulfate_rich", "Nitrate-rich": "Nitrate_rich", "Sulfate rich": "Sulfate_rich", "Nitrate rich": "Nitrate_rich", "Secondaire": "Secondary_bio", "Secondary bio": "Secondary_bio", "Secondary biogenic": "Secondary_bio", "Marine biogenic/HFO": "Marine_bio/HFO", "Marine bio/HFO": "Marine_bio/HFO", "Marin bio/HFO": "Marine_bio/HFO", "Marine secondary": "Marine_bio", "Marin secondaire": "Marine_bio", "HFO": "HFO", "Marin": "Marine", "Sea/road salt": "Salt", "Sea salt": "Salt", "Aged sea salt": "Aged_salt", "Aged seasalt": "Aged_salt", "Primary bio": "Primary_bio", "Primary biogenic": "Primary_bio", "Biogenique": "Primary_bio", "Biogenic": "Primary_bio", "Mineral dust": "Dust", "Resuspended dust": "Dust", "Dust": "Dust", "Dust (mineral)": "Dust", "AOS/dust": "Dust", "Industrial": "Industrial", "Industry/vehicular": "Indus._veh.", "Arcellor": "Industrial", "Siderurgie": "Industrial", "Débris végétaux": "Plant_debris", "Chlorure": "Chloride", "PM other": "Other" } return df.rename(columns=possible_sources)
42e762c2376fe408d11ba7483b80d1cad11acdde
568,466
def class_partition(cls, string): """Given a set of characters and a string, take the longest prefix made up of the characters in the set, and return a tuple of (prefix, remainder). """ ns = string.lstrip(cls) lc = len(string) - len(ns) return (string[:lc], string[lc:])
cc38eac0560e289c8acaca77db6fc6203b5fb37b
177,335
import requests import json def shakespeare_api(text): """ Shakespeare API function to retrieve the translation. """ api_params = {"text" : text} response = requests.post('https://api.funtranslations.com/translate/shakespeare.json', data=api_params) # headers=headers, data=data # Check for errors in response if "error" not in response.text.lower(): response_dict = json.loads(response.text) shakespeare_description = response_dict["contents"]["translated"] return shakespeare_description else: error_text = response.text return error_text
f32f257ebb527895a68ab872eb2aed568bcbc111
493,666
def _convert_to_bool(value): """ If the value is `true` or `false`, return the boolean equivalent form of the value. This method is case insensitive. :param value: the string value :type value: str :return: the string value or the boolean value :rtype: str or bool """ if isinstance(value, str): lower_value = value.lower() if lower_value == "true": value = True elif lower_value == "false": value = False return value
aded942831f3a2808d89c1a77b76952415f6eeab
469,907
def decode_response_version_from_config(confbytes: bytes) -> str: """Decode the string version from the bytearray response from Ledger device""" return "{}.{}.{}".format( confbytes[1], confbytes[2], confbytes[3], )
66dc0b71b2c9a22ca8198fb2a5ecbe69a7a0871b
16,656
import string import secrets def generate_password(pass_len): """Creating a password of the specified length. """ chars_set = string.ascii_letters + string.digits return ''.join(secrets.choice(chars_set) for _ in range(pass_len))
962e5b82f5fd32806597a61260903439d4f529f1
371,469
def area_to_capacity(statistical_roof_model_area_based, power_density_flat, power_density_tilted): """Maps area shares to capacity shares of statistical roof model. The statistical roof model defines roof categories (e.g. south-facing with tilt 10°) and their shares in a population of roofs. This function maps areas shares to shares of installable pv capacity. It discriminates between flat and tilted roofs, i.e. the power density of flat roofs can be different than the one from tilted roofs. Parameters: * statistical_roof_model_area_based: model as described above, values are shares of total roof area * power_density_flat: power density of flat pv installations, unit must be consistent with next * power_density_tilted: power density of tilted pv installations, unit must be consistent with previous Returns: * statistical_roof_model_cpacity_based: model as described above, values are shares of total installable capacity """ cap_based = statistical_roof_model_area_based.copy() flat_roofs = cap_based.index.get_level_values(0) == "flat" tilted_roofs = cap_based.index.get_level_values(0) != "flat" cap_based[flat_roofs] = cap_based[flat_roofs] * power_density_flat cap_based[tilted_roofs] = cap_based[tilted_roofs] * power_density_tilted return cap_based / cap_based.sum()
0e57c01bfa7c44743edb260b6a1b406ebf0fb82b
46,305
def getadjacent(graph, vertex): """ vertex is the node indexnumber i.e 1, 2, 3, ... or 8 and returns all adjacent nodes For example: getadjacent(1) -> [2, 4, 6] getadjacent(2) -> [1, 3, 7] getadjacent(3) -> [2, 4, 8] .... """ nodes = [] for n1, n2 in graph['edges']: if n1 == vertex: nodes.append(n2) if n2 == vertex: nodes.append(n1) return sorted(nodes)
bb53103806daee7f641e1df31df8ad000ab10fde
90,284
import pathlib def _profile_avatar_upload_path(instance, filename): """Provides a clean upload path for user avatar images """ file_extension = pathlib.Path(filename).suffix return f'avatars/profiles/{instance.user.username}{file_extension}'
6b5ddea821f53825a51fb84c327686bc5044c86c
226,963
def force_list(obj): """Force object to be a list. If ``obj`` is a scalar value then a list with that value as sole element is returned, or if ``obj`` is a tuple then it is coerced into a list. """ if isinstance(obj, tuple): return list(obj) elif not isinstance(obj, list): return [obj] return obj
25eecea6f5aee1dbd1f6ce23643079c16871f2a2
151,996
def is_casava_v180_or_later(header_line): """Check if the header looks like it is Illumina software post-casava v1.8 Parameters ---------- header_line : bytes A header line Returns ------- bool ``True`` for if casava v1.8+, otherwise ``False`` Examples -------- >>> from skbio.util import is_casava_v180_or_later >>> is_casava_v180_or_later(b'@foo') False >>> id_ = b'@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0' >>> is_casava_v180_or_later(id_) True """ if not header_line.startswith(b'@'): raise ValueError("Non-header line passed in.") fields = header_line.split(b':') return len(fields) == 10 and fields[7] in b'YN'
da3c24ef74712590027ad238a2a871dcc10af913
261,209
def get_instance_pci_devs(inst, request_id=None): """Get the devices allocated to one or all requests for an instance. - For generic PCI request, the request id is None. - For sr-iov networking, the request id is a valid uuid - There are a couple of cases where all the PCI devices allocated to an instance need to be returned. Refer to libvirt driver that handles soft_reboot and hard_boot of 'xen' instances. """ pci_devices = inst.pci_devices if pci_devices is None: return [] return [device for device in pci_devices if device.request_id == request_id or request_id == 'all']
716ca1164e19f0ad052ebdaab38e603864e8ee10
395,610
def arity(argspec): """ Determinal positional arity of argspec.""" args = argspec.args if argspec.args else [] defaults = argspec.defaults if argspec.defaults else [] return len(args) - len(defaults)
f19407943f92a2a4faa4735abf678467861153e7
694,257
import time def timestamp(dt): """ Return a timestamp for a local, naive datetime instance. """ try: return dt.timestamp() except AttributeError: # Python 3.2 and earlier return time.mktime(dt.timetuple())
8d2b8dce2bde5ecaec4f196496e6e643f5b6d225
266,004
import re def potcar_eatom_list_from_outcar( filename='OUTCAR' ): """ Returns a list of EATOM values for the pseudopotentials used. Args: filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'. Returns: (List(Float)): A list of EATOM values, in the order they appear in the OUTCAR. """ with open( filename ) as f: outcar = f.read() eatom_re = re.compile(r"energy of atom\s+\d+\s+EATOM=\s*([-\d\.]+)") eatom = [ float( e ) for e in eatom_re.findall( outcar ) ] return eatom
895baa71772a196f2a34e6fe4d8bc4c846b27368
429,942
def _attrs_to_tuple(obj, attrs): """ Create a tuple of all values of *obj*'s *attrs*. """ return tuple(getattr(obj, a.name) for a in attrs)
3a38119b0f84a90bf42620d3fbcac1f37f59629c
154,389
import requests def genderize(first_name): """ Use genderize.io to return a dictionary with the result of the probability of a first name being of a man or a woman. Example: {'count': 5856, 'gender': 'male', 'name': 'Alex', 'probability': '0.87'} Parameters ---------- first_name: str Returns ------- query_result: dict """ return requests.get("https://api.genderize.io/", params={"name": first_name}).json()
dd6d107ce28bec3a5149f815ae15865cf34205be
36,569
def make_id_filter(pdb_ids, pdb_chains): """ Generate a dataset filter only allowing specific PDB ID/Chains. Parameters ---------- pdb_ids : list List of PDB IDs to accept. pdb_chains : list List of PDB chains corresponding to the IDs in `pdb_ids`. Returns ------- Function A function returning True for the given PDB ID/Chain and False otherwise. """ ids = set([f"{pid.upper()}_{chn}" for pid, chn in zip(pdb_ids, pdb_chains)]) def func(record): return f"{record.pdb_id}_{record.pdb_chain}" in ids return func
3ec14d143bbbfd5e872f684bea3c4fcb83d42b08
117,188
import torch def top_n_probs(predictions, k): """Get the top k, then remove all that are less than .75x as high probability as the most probable""" # Get the top k topk_probs, topk_indices = torch.topk(predictions, k) # Filter the top three to be only those that are at least .75x as likely as top1 top_n_indices = [] for prob, idx in zip(topk_probs, topk_indices): if prob / topk_probs[0] >= 0.75: top_n_indices.append(int(idx.detach().cpu().numpy())) return top_n_indices
0677ee668a7e7a428a1e32ec83f717c0b3cbf800
509,570
def settings_outside_clinical_bounds(cir, isf, sbr): """ Identifies whether any of the settings are outside clinical bounds (based on medical advisory) Parameters ---------- cir: float carb to insulin ratio for the particular scenario isf: float insulin sensitivity factor for the particular scenario sbr:float scheduled basal rate for the particular scenario Returns ------- a boolean for whether any of the settings fall outside of the clinical bounds criteria as defined in the function """ return ( (float(isf) < 10) | (float(isf) > 500) | (float(cir) < 2) | (float(cir) > 150) | (float(sbr) < 0.05) | (float(sbr) > 30) )
7892624f56af40210684e345f84f45ec67e8a866
286,259
def encode_one_hot(target, num_classes): """Encode integer labels into one-hot vectors Args: target (torch.Tensor): (N,) num_classes (int): the number of classes Returns: torch.FloatTensor: (N, C) """ one_hot = target.new_zeros(target.size(0), num_classes) one_hot = one_hot.scatter(1, target.unsqueeze(1), 1) return one_hot.float()
75236e32cd968c9af634a813ff98c86699a193b2
567,937
def compact(src): """Return a list of only truthy values in src.""" return [i for i in src if i]
20a3c52d2c6beea9a28ed42912162d58270ca62e
112,630
def _find_matching_edge(edge, graph): """ Retrieve an edge matching the edge inserted from the graph. :param edge: The edge to find. :param graph: The graph to find it in. :return: The edge including attributes. """ edges_data = graph.out_edges([edge[0]], data=True) edges = [edge_data[0:2] for edge_data in edges_data] matching_edge_index = edges.index(edge) return edges_data[matching_edge_index]
ad36b845c53af5e569ac19580788878d300ca957
373,081
import re def is_email(email: str, only: bool = False) -> bool: """ check whether a string is a valid email address :param email: the string to be checked :param only: indicate whether to allow mailto: at the beginning :return: True if a valid email, False otherwise """ if type(email) is not str: raise TypeError("The method only take str as its first input") # pattern = re.compile( # "(?:[a-z0-9!#$%&'*+\/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_`{|}~-]+)*|\"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])") # if only: # pattern = re.compile( # "^(?:[a-z0-9!#$%&'*+\/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_`{|}~-]+)*|\"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])$") pattern = re.compile(r"^(mailto:)?[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,}$") if only: pattern = re.compile(r"^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,}$") m = re.search(pattern, email.lower()) if m: return True return False
36a4bfb46c197817e8effbd31c006c8b58651671
601,566
from pathlib import Path def get_file_path(*, basedir, uid, suffix): """ Generates a standardised internal file path """ uid = str(uid) return Path(basedir) / uid[:2] / str(uid + suffix)
7f22c9be859aaef645e83fa70560e7bb1a6c592d
177,720
def clean_popest(table): """Cleans the population estimate data""" return table[["Code", "All ages"]].rename( columns={"Code": "geo_code", "All ages": "pop_2020"} )
ef0b7bb8d9a61709f03889833baba6e3b0ef7a00
29,067
def is_fixed_variable(config, variable): """Determine if a variable is listed in in the config as fixed. Args: config (dict): Configuration dictionary variable (str): Variable name. Returns: bool: True if fixed, False otherwise. """ return variable in config['fixed_variables']
0285bd85a202eef2efecc5b0ec834a287fb32b28
461,953
def strip_enclosing_quotes(string: str) -> str: """Strip leading/trailing whitespace and remove any enclosing quotes""" stripped = string.strip() # Need to check double quotes again in case they're nested inside # single quotes for quote in ['"', "'", '"']: if stripped.startswith(quote) and stripped.endswith(quote): stripped = stripped[1:-1] return stripped
712211d4a2044fd3c610ef7769ef73016c8569b9
573,696
def _covering_blocks(blocks, start, end=None): """Returns the blocks that includes the [start:end] portion.""" if end is None: end = start return [ block for block in blocks if block.start <= start and block.end >= end ]
96275c2d9a25ea8011412139db69401663add5ff
442,983
def get_doc_offset(offset, document): """ :type offset: list[str] :type document: dict >>> get_doc_offset(['a'], {'a': 4}) 4 >>> get_doc_offset(['a', 'b'], {'a': {'b': 4}}) 4 >>> get_doc_offset(['a'], {}) Traceback (most recent call last): ... KeyError: 'a' """ value = document for key in offset: value = value[key] return value
c0393a026831d669fadf9593dff4a6c96f80186f
588,541
async def edit_message(message, new_content): """ Modify a message. Most tests and ``send_message`` return the ``discord.Message`` they sent, which can be used here. **Helper Function** :param discord.Message message: The target message. Must be a ``discord.Message`` :param str new_content: The text to change `message` to. :returns: `message` after modification. :rtype: discord.Message """ return await message.edit(content=new_content)
6c962c67562140ee9410504caec366bec845dbe8
622,184
def is_country_in_list(country, names): """Does country have any name from 'names' list among its name/name:en tags of boundary/label. """ return any( any( country[property].get(name_tag) in names for name_tag in ('name', 'name:en') ) for property in ('label', 'boundary') if country[property] )
82d910d9b950bb03ca490636661b24259179cc0d
162,665
def apply_headers(response): """Tell receiving app that data is JSON via response header.""" response.headers['content-type'] = 'application/json' return response
e0fc4fce8a862308fd4011948c1241c3c4b957cc
197,830
def _safe(key, dic): """Safe call to a dictionary. Returns value or None if key or dictionary does not exist""" return dic[key] if dic and key in dic else None
30322ef32a5bed8a6184eff69fd8a938a64074e1
210,163
def is_nonce_too_low_exception(exception): """check if the error thrown by web3 is a 'nonce too low' error""" if not isinstance(exception, ValueError) or not isinstance(exception.args[0], dict): return False message = exception.args[0].get("message", "") return ( "There is another transaction with same nonce in the queue" in message or "Transaction nonce is too low" in message )
f4b465fc222eb68b59e5ea6fef410ac68485966e
694,446
import torch def mulaw_decode(x_mu, quantization_channels, input_int=True): """Adapted from torchaudio https://pytorch.org/audio/functional.html mu_law_encoding Args: x_mu (Tensor): Input tensor quantization_channels (int): Number of channels input_int: Bool True: convert x_mu (int) from int to float, before mu-law decode False: directly decode x_mu (float) Returns: Tensor: Input after mu-law decoding (float-value waveform (-1, 1)) """ mu = quantization_channels - 1.0 if not x_mu.is_floating_point(): x_mu = x_mu.to(torch.float) mu = torch.tensor(mu, dtype=x_mu.dtype, device=x_mu.device) if input_int: x = ((x_mu) / mu) * 2 - 1.0 else: x = x_mu x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu return x
cdc4f5e35a132853c549ea56fdfc581c0f476a46
613,922
import itertools def create_permutations(length): """ Create the list of permutations for a given length 'length'. This makes combinations of the numbers in the list of 1 through 'length'. (uses 'itertools' library) """ if length == 1: return 1 else: permutations_list = itertools.permutations(range(1,length+1)) return '\n'.join([' '.join(map(str, t)) for t in permutations_list])
44f0b7242c78288895881f3039ef7ac4a042d7d3
252,369
def ipv4_reassembly(frame): """Make data for IPv4 reassembly. Args: frame (pcapkit.protocols.pcap.frame.Frame): PCAP frame. Returns: Tuple[bool, Dict[str, Any]]: A tuple of data for IPv4 reassembly. * If the ``frame`` can be used for IPv4 reassembly. A frame can be reassembled if it contains IPv4 layer (:class:`pcapkit.protocols.internet.ipv4.IPv4`) and the **DF** (:attr:`IPv4.flags.df <pcapkit.protocols.internet.ipv4.DataType_IPv4_Flags.df>`) flag is :data:`False`. * If the ``frame`` can be reassembled, then the :obj:`dict` mapping of data for IPv4 reassembly (c.f. :term:`ipv4.packet`) will be returned; otherwise, returns :data:`None`. See Also: :class:`~pcapkit.reassembly.ipv4.IPv4Reassembly` """ if 'IPv4' in frame: ipv4 = frame['IPv4'].info if ipv4.flags.df: # dismiss not fragmented frame return False, None data = dict( bufid=( ipv4.src, # source IP address ipv4.dst, # destination IP address ipv4.id, # identification ipv4.proto.name, # payload protocol type ), num=frame.info.number, # original packet range number fo=ipv4.frag_offset, # fragment offset ihl=ipv4.hdr_len, # internet header length mf=ipv4.flags.mf, # more fragment flag tl=ipv4.len, # total length, header includes header=bytearray(ipv4.packet.header), # raw bytearray type header payload=bytearray(ipv4.packet.payload or b''), # raw bytearray type payload ) return True, data return False, None
b55ac3f9caa0007dd8a70a56de704f25a403755c
33,971
def get_violating_args(contracts, params_args): """ Get all arguments that violate the contracts. :param contracts: { string: (any) -> bool, ... }, contracts from parameters to qualifiers :param params_args: { string: any, ... }, parameters to arguments :return: { string: any, ... }, parameters to contract violating arguments """ error_params_args = {} for param, arg in params_args.items(): # No contract for parameter if param not in contracts: continue # Checking breaks try: valid = contracts[param](arg) except: error_params_args[param] = arg continue # Violate contract if not valid: error_params_args[param] = arg return error_params_args
3e70df860bf1b66f6949b21e5a3dd478a36cf56d
458,814
import inspect def is_class_attr(full_name, index): """Check if the object's parent is a class. Args: full_name: The full name of the object, like `tf.module.symbol`. index: The {full_name:py_object} dictionary for the public API. Returns: True if the object is a class attribute. """ parent_name = full_name.rsplit('.', 1)[0] if inspect.isclass(index[parent_name]): return True return False
80713ee209d3fa9282ec051779ed5b9ebdddb273
292,270
def split_headers_list(headers): """ Split the headers list into unique elements instead of 1 string. Args: headers (list): headers output from fasta_parser Returns: split_headers (list): headers list split into components. no_chrom (list): all things that are not the chromosome. """ split_headers = [item.split(" ") for item in headers] no_chrom = [item[1:] for item in split_headers] return split_headers, no_chrom
9295c66bbd1e85a444447340c83835b4051798c3
209,749
def sparse_batch_mm(m1, m2): """ https://github.com/pytorch/pytorch/issues/14489 m1: sparse matrix of size N x M m2: dense matrix of size B x M x K returns m1@m2 matrix of size B x N x K """ batch_size = m2.shape[0] # stack m2 into columns: (B x N x K) -> (N, B, K) -> (N, B * K) m2_stack = m2.transpose(0, 1).reshape(m1.shape[1], -1) result = m1.mm(m2_stack).reshape(m1.shape[0], batch_size, -1) \ .transpose(1, 0) return result
6fe7a5f4b407d27b71b646872d43a78154c594e8
39,073
import re def txt2list(txt_path=None): """ Load txt file and split the sentences into a list of strings """ if txt_path is None: raise ValueError("txt_path must be specified as a named argument! \ E.g. txt_path=../dataset/yourfile.txt") # Read input sequences from .txt file and put them in a list with open(txt_path) as f: text = f.read() sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text) try: sentences.remove('') # remove possible empty strings except: None return sentences
8fdb9ec2991f820fb58ee9706ff520c132b6ab3b
619,076
def cumulative_sum(v, cumulative): """ This function is used to output a stream where the n-th value on the output stream is the cumulative sum of the first n values of the input stream. The state of the input stream is cumulative. When used to create a stream, cumulative is the sum of all the values in the input stream received so far. v is the next value received in the input stream. """ cumulative += v return (cumulative, cumulative)
bcc7fbffb39fdb2186e81ef0e26bd42cfa684cd4
460,587
from datetime import datetime def str_to_datetime(s): """ Parse ISO datetime string and return a datetime object :param str s: date/time string :return: datetime object, None if not a valid date/time string :rtype: datetime.datetime | None """ t = None for fmt in ('%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d'): try: t = datetime.strptime(s, fmt) except ValueError: pass else: break return t
0d250d19f88a74e1778dfc052e9638a15c5635b1
622,526
def pade_21(k, params): """ Pade [2.1] eq.(2) """ l = params[0] a = params[1] b = params[2] a4b2=a*a*a*a + b*b aak2=a*a*k*2 return l*(k*k + aak2 + a4b2) / (a4b2 + aak2)
6137b7a39c45e092640738ca3f4180474a7e6043
229,290
def qualify(func: object) -> str: """Qualify a function.""" return ".".join((func.__module__, func.__qualname__))
bfda7050ff94f407a2a0d4b00b87ecb0370e9110
700,179
def _RemoveClone(name): """Return name up to the ".clone." marker.""" clone_index = name.find('.clone.') if clone_index != -1: return name[:clone_index] return name
dc47efdf8dd35a7a7f5c75a0aa2697c01112f453
132,577
import re def humanize(word): """ Capitalize the first word and turn underscores into spaces and strip a trailing ``"_id"``, if any. Like :func:`titleize`, this is meant for creating pretty output. Examples:: >>> humanize("employee_salary") "Employee salary" >>> humanize("author_id") "Author" """ word = re.sub(r"_id$", "", word) word = word.replace("_", " ") word = re.sub(r"(?i)([a-z\d]*)", lambda m: m.group(1).lower(), word) word = re.sub(r"^\w", lambda m: m.group(0).upper(), word) return word
22ef47a7112a2ac2b0ae14ce1822f3de3c3477c6
647,069
def divisors(n): """Return the integers that evenly divide n. >>> divisors(1) [1] >>> divisors(4) [1, 2] >>> divisors(12) [1, 2, 3, 4, 6] >>> [n for n in range(1, 1000) if sum(divisors(n)) == n] [1, 6, 28, 496] """ return [1] + [x for x in range(2, n) if n % x == 0]
ed8fe578580c1f2b5ceb76f669eacfd2913e894f
98,206
def word_splits(word, min_reminder=3, max_prefix_length=5): """ Return all splits of a word (taking in account min_reminder and max_prefix_length). """ max_split = min(max_prefix_length, len(word)-min_reminder) split_indexes = range(1, 1+max_split) return [(word[:i], word[i:]) for i in split_indexes]
d9f93f58f419ab154c917d39708daee60b8ba51b
423,466
def read_BED(path, last_col=False): """ This function reads bed files. last_col=True: If an extra column for a score (e.g. replication timing or gene expression) is given This function returns a list of lists with the BED file information and the list of scores. """ if not last_col: Data = [] with open(path) as f: for line in f: Data.append(line.strip().split()[:6]) return Data elif last_col: Data = [] Score = [] with open(path) as f: for line in f: Data.append(line.strip().split()[:6]) Score.append(float(line.strip().split()[-1])) return Data, Score else: print("ERROR")
a991be939edbdc3ff4506a9d5afac6d9eeb12dc0
474,841
import re def convert_to_snake_case(value: str) -> str: """ Convert the value from CamelCase to snake_case. Regex from: https://github.com/jpvanhal/inflection/blob/0.5.1/inflection/__init__.py#L397 """ value = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1_\2", value) value = re.sub(r"([a-z\d])([A-Z])", r"\1_\2", value) value = value.replace("-", "_") return value.lower()
cec33de8ed5c0dc934ef965b5367fe25b6a01699
509,952
def getclosest_ij(lats,lons,latpt,lonpt): """Function to find the index of the closest point to a certain lon/lat value.""" dist_sq = (lats-latpt)**2 + (lons-lonpt)**2 # find squared distance of every point on grid minindex_flattened = dist_sq.argmin() #return np.unravel_index(minindex_flattened, lats.shape) return minindex_flattened
860d10637dee2d694518da934c209eed08214c6d
617,484
import yaml def loadfile(f): """Loads a YAML file containing several documents into a list of objects :param f: string containing the path to the file to load :return: list of loaded objects """ try: return list(yaml.safe_load_all(open(f))) except Exception as e: print('Exception caught loading', f, e) raise
804e2b24ef924f8f0ac076eba2b7f79330d1acee
386,186
def create_outname(options): """ Creates filename for the output GTF that reflects the input options that were used. """ outname = options.outprefix + "_talon_abundance" if options.whitelist != None: outname = "_".join([ outname, "filtered" ]) outname += ".tsv" return outname
969357752e64da909e7dbe24b42847c3bf35efa1
558,336
def radio_button_message(n_clicks, value): """ Function that returns a message when the user selects a radio button. Args: n_clicks: Number of clicks registered by the submit button. value: The radio button option selected by the user. Returns: A message to the user as a response to the radio button input. """ if value is None: return 'Please select an option' else: return 'You have selected: ' + value
9a227348f6a72564491e5d8687627c6e88824350
334,135
def round(addr: int, align: int, up: bool = False) -> int: """Round an address up or down based on an alignment. Args: addr: the address align: the alignment value up: Whether to round up or not Returns: The aligned address """ if addr % align == 0: return addr else: if up: return (addr + (align - (addr % align))) return (addr - (addr % align))
0b77bd25d66a5522fb3dd568cc6f6ce0fedbd59c
655,677
import base64 import zlib def compress_text_to_b64(text): """Returns a compressed and b64 encoded string """ if isinstance(text, str): text = text.encode() return base64.b64encode(zlib.compress(text))
28446cc2b61414d63865347a2d31e3cfcc699778
481,265
def produceLTLStringMeaning(name, props_list): """ Produces a natural language explanation of the LTL :param name: a string specifying the LTL template name :param props_list: a list of list of strings indicating the propositions :return: a string of natural language meaning of the LTL """ connector_str = ' AND ' template_map = { "global": '("{}") is true throughout the entire trace', "eventual": '("{}") eventually occurs (may later become false)', "stability": '("{}") eventually occurs and stays true forever', "response": 'If ("{}") occurs, ("{}") eventually follows', "until": '("{}") has to be true until ("{}") eventually becomes true', "atmostonce": 'Only one contiguous interval exists where ("{}") is true', "sometime_before": 'If ("{}") occurs, ("{}") occured in the past' } str_template = template_map[name] ltl_meaning = connector_str.join([str_template.format(*x) for x in props_list]) return ltl_meaning
e69ba2219a36981d2c5a3a3e58dfd733a5d18ea0
491,702
def _ag_checksum(data): """ Compute a telegram checksum. """ sum = 0 for c in data: sum ^= c return sum
32d1ef971fc1fb63fb0583c32c309de3de41f39d
690,351
import struct def _decode_short(data): """Decodes two bytes of network data into an unsigned short.""" return struct.unpack('!H', data)[0]
c775ec423cadafc1b5a631eae12be60174f43101
174,323
def obtener_medallas_de_atleta(atletas: list, anio_0: int, anio_f: int, nombre_atleta: str) -> list: """ Función que genera una lista de las medallas de un atleta en cierto período de tiempo definido. Parámetros: atletas: list de diccionarios con la información de cada atleta. anio_0: int año de inicio. anio_f: int año final. nombre_atleta: str Retorna: medallas_atleta: list con diccionarios de cada medalla. diccionario de cada medalla: {'evento': str, 'anio': int, 'medalla': str}. """ # Inicializar lista de medallas. medallas_atleta = [] # Inicio de recorrido por la lista de atletas. for cada_atleta in atletas: # Definición de variables del atleta actual. anio_actual = cada_atleta['anio'] nombre_actual = cada_atleta['nombre'] evento_actual = cada_atleta['evento'] medalla_actual = cada_atleta['medalla'] # Verificación de nombre y rango de tiempo. if nombre_actual == nombre_atleta: if anio_0 <= anio_actual <= anio_f: if medalla_actual != "na": # Se añade el diccionario de medalla a la lista. medallas_atleta.append({'evento': evento_actual, 'anio': anio_actual, 'medalla': medalla_actual}) return medallas_atleta
f48571b37d3363c20a4c4c64744cce120c36e278
597,132
def _get_test_methods(test_case_name, test_class_map): """Takes ``test_class_map`` or ``skip_class_map`` and returns the list of methods for the test case or ``None`` if no methods were specified for it :param test_case_name: Name of the test case to check :param test_class_map: Dictionary mapping test names to a list of methods :return: List of methods or ``None`` if not specified for this test case """ if test_class_map is None or test_case_name not in test_class_map: return None return test_class_map[test_case_name]
559792219dafda5b552033daee6756d5156b8969
231,149
def _input_V_calcs(input_V, A2, rho_amp): """Get the effective preamp input noise current (ENI) and the output noise voltage from the noise voltage at the preamp input. :return: output voltage, effective input current (ENI) """ output_V = A2 * input_V eni = input_V / rho_amp return output_V, eni
7a6ee4a79829b9ab96c6f90ab4b1297885d6c257
385,994
import torch def _construct_empty_index(array: torch.Tensor) -> torch.Tensor: """ Constructs an empty array of indexes the same shape as ``array``. Args: array: The array to construct indexes for. """ temp = torch.arange(array.shape[-1], device=array.device) return temp * torch.ones_like(array, dtype=temp.dtype)
b0d643260e769ed5f9709461e33abea5eec92e4f
423,590
import typing def check_key_type(key: typing.Union[str, bytes]): """ Enforce the type of the key to bytes. Parameters ---------- key: str | bytes The key to check Returns ------- str The key as bytes """ if isinstance(key, bytes): return key return str(key).encode("utf-8")
07da98b6d296b97d0e454668dc6da9f4840e2788
617,142
def anticodon(data): """ Get the anticodon of this entry. """ return data["metadata"]["anticodon"]
ca776d7bb3f642e837e1195483ac7ae57719b698
400,465
def generate_Q_data(Q): """ 問題のテキストデータを生成する。改行コードはCR+LFとする。 Parameters ========== Q : tuple read_Q()の返り値 Returns ======= txt : str 問題のテキストデータ """ crlf = '\r\n' # DOSの改行コード size, block_num, block_size, block_data, block_type, num_lines = Q txt = 'SIZE %dX%d%s' % (size[0], size[1], crlf) txt += 'BLOCK_NUM %d%s' % (block_num, crlf) for i in range(1, block_num+1): txt += crlf txt += 'BLOCK#%d %dX%d%s' % (i, block_size[i][0], block_size[i][1], crlf) for row in block_data[i]: txt += ','.join(['+' if n == -1 else str(n) for n in row]) + crlf return txt
70c3c1c0b2550a6805b1b2abedf0e5b115377e05
41,977
def count(array): """ Returns the count of items in a in the list, dictionary or tuple. This is a wrapper for the PHP count() function that returns the count of items in an array. Parameters: array (list, dict, tuple): A list or a dictionary or a tuple. Returns: int: Count of the items. """ if not isinstance(array, (list, dict, tuple)): raise TypeError('Parameter must be an array or an object that implements Countable') return len(array)
bad7ebcd195f32217befeab288942d1d38bce5bb
439,508
def PadBytes(byte_string, n): """Prepend a byte string with n zero bytes.""" return n * '\x00' + byte_string
07333437651a79c1c0a6dbd1e723ecda0bdae894
358,250
def sum_multiples(ceiling, *factors): """Find the sum of all multiples of the provided factors below the ceiling.""" multiples = [] for number in range(ceiling): for factor in factors: # Check if the current number is a multiple of the current factor. if number % factor == 0: multiples.append(number) # Eliminate duplicate multiples by converting the list of multiples to a set. multiple_sum = sum(set(multiples)) return multiple_sum
0352782c5f073efec74213a56491c2a8c7cebd62
446,382
def parse_sample_ids(sampleidfile): """ Read the sample ID file and return a hash of SRS to list of SRR (SRS->[SRR]) """ srs={} with open(sampleidfile, 'r') as f: for l in f: p=l.strip().split("\t") if p[1] not in srs: srs[p[1]]=[] srs[p[1]].append(p[0]) return srs
401e44c6bda2273ab8692f68570972ea06116223
84,702
def compute_spike_rate(spikes): """Estimate spike rate from a vector of spike times, in seconds. Parameters ---------- spikes : 1d array Spike times, in seconds. Returns ------- float Average firing rate. Examples -------- Compute spike rate of 6 spikes >>> spikes = [0.5, 1, 1.5, 2, 2.5, 3] >>> compute_spike_rate(spikes) 2.4 """ return len(spikes) / (spikes[-1] - spikes[0])
3c9ddb4b32f658dc6a375763a6df9f25b67bc51d
90,782
def multi_dict(input_pairs): """ Similar to casting pairs to a dictionary, except that repeated pairs are allowed. To show the difference: >>> dict( [('a', 1), ('b', 2), ('a', 3)] ) {'a': 3, 'b': 2} >>> multi_dict( [('a', 1), ('b', 2), ('a', 3)] ) {'a': [1, 3], 'b': [2]} :param input_pairs: A list of (key, value) pairs. :return: A dictionary mapping keys to lists of values. """ output_dict = {} for key, value in input_pairs: existing_values = output_dict.get(key, []) existing_values.append(value) output_dict[key] = existing_values return output_dict
5c94fbd92035f10bc6a1eb2016c48b61c41ed421
431,780
def z2a(z): """ converts from redshift to scale factor """ return 1.0 / (1.0 + z)
5f2532e4bc92e43a1685d9dbacf375de1bebc759
429,192
def dmarc_find_organizational_domain(domain, public_suffix_list): """ Find the organizational domain of the given domain. Uses mainly the algorithm found at https://publicsuffix.org/list/ to get the organizational domain. Could return "" if none is found. """ # The algorithm could be more elaborate but this is simple and fast enough. organizational_domain = "" matching_rule = [] matching_count = 0 matching_exception = False labels = domain.split(".")[::-1] for rule, exception in public_suffix_list: if matching_exception and not exception: continue # We always need to have the whole rule available. zip (later) will # truncate to the shortest list. if len(rule) > len(labels): continue matched = 0 for a, b in zip(rule, labels): if a in ("*", b): matched += 1 else: break # The whole rule needs to match. if matched == len(rule): if exception: matching_rule = rule matching_count = matched matching_exception = exception elif matched > matching_count: matching_rule = rule matching_count = matched if matching_rule: if matching_exception: organizational_domain = ".".join(labels[:len(matching_rule)][::-1]) elif len(labels) > len(matching_rule): organizational_domain = ".".join(labels[:matching_count+1][::-1]) # Default matching rule is "*" elif len(labels) > 1: organizational_domain = ".".join(labels[:2][::-1]) return organizational_domain
a6cf238765d19e578d366396358b0d35d6652f8c
593,901
def make_row(row, lens): """Create a single row for an ascii table.""" cells = ('{row:<{len}}'.format(row=str(row[i]), len=lens[i]) for i in range(len(row))) return ' {0} '.format(' '.join(cells))
ba64a4d58f400b70e6b8411e1f25def3735e0e2a
440,668
def generate_labels_frii(train_i, test_i, labels): """Gets train and test labels for FRII vs Random classification. # Arguments train_i: Training indices. Output of get_random_data function. Shape of [samples,]. test_i: Testing indices. Output of get_random_data function. Shape of [samples,]. labels: label array. 'labels' dataset in data h5py file as numpy array with shape [samples,] # Returns Training and testing labels, with FRII sources as True and FRI and Random sources as False. """ train = labels[train_i] test = labels[test_i] train_y = train == 2 test_y = test == 2 return train_y, test_y
5005e4deadb68295299df963a7abc495fe2f0934
489,299
def format_avg_time(value): """ Formats a (hour, minute) tuple to 'hour:minute' string. """ if not isinstance(value, tuple): return value if value is not None else '' hour, minute = value if hour is None or minute is None: return '' return '{:02}:{:02}'.format(hour, minute)
9738a78a7782aa3bd6578b127c7b53a575ea34bb
326,846
def get_data_mod(data_type, data_name): """Get the pythondata-{}-{} module or raise a useful error message.""" imp = "import pythondata_{}_{} as dm".format(data_type, data_name) try: l = {} exec(imp, {}, l) dm = l['dm'] return dm except ImportError as e: raise ImportError("""\ pythondata-{dt}-{dn} module not installed! Unable to use {dn} {dt}. {e} You can install this by running; pip3 install git+https://github.com/litex-hub/pythondata-{dt}-{dn}.git """.format(dt=data_type, dn=data_name, e=e))
9462d619007665f9e6e5bf0dae609003ef7e3d42
81,178
def recursive_levenshtein(string_1, string_2, len_1=None, len_2=None, offset_1=0, offset_2=0, memo=None) -> float: """ Calculates the Levenshtein distance between two strings. Usage:: >>> recursive_levenshtein('kitten', 'sitting') 3 >>> recursive_levenshtein('kitten', 'kitten') 0 >>> recursive_levenshtein('', '') 0 """ if len_1 is None: len_1 = len(string_1) if len_2 is None: len_2 = len(string_2) if memo is None: memo = {} key = ','.join([str(offset_1), str(len_1), str(offset_2), str(len_2)]) if memo.get(key) is not None: return memo[key] if len_1 == 0: return len_2 elif len_2 == 0: return len_1 cost = 0 if string_1[offset_1] != string_2[offset_2]: cost = 1 dist = min( recursive_levenshtein(string_1, string_2, len_1 - 1, len_2, offset_1 + 1, offset_2, memo) + 1, recursive_levenshtein(string_1, string_2, len_1, len_2 - 1, offset_1, offset_2 + 1, memo) + 1, recursive_levenshtein(string_1, string_2, len_1 - 1, len_2 - 1, offset_1 + 1, offset_2 + 1, memo) + cost, ) memo[key] = dist return dist
2273b1300190b719ea126cb2d7e93ef389c4b609
354,927
from pathlib import Path from typing import List def get_acqui_id(mpath: Path, cam_folders: List[str], sync_folder: str) -> List[str]: """ Return the names of all acqusition ids within sync_dir across both camera's Parameters ---------- mpath : Path The parent micasense data directory that contains the cam_folders cam_folders : List[str] A list of the camera folder names, e.g. ["red_cam", "blue_cam"] sync_folder : str The name of the "SYNCXXXXSET" folder Returns ------- acqi_ids : List[str] List of acquisition id's, e.g. ["0000", "0001", "0002", ....., "XXXX"] """ acqi_ids = [] for cam in cam_folders: # cam = "red_cam" or "blue_cam" for f in (mpath / cam / sync_folder).glob("**/*.tif"): acqi_ids.append(f.stem.split("_")[1]) return sorted(list(set(acqi_ids)))
d1a1fac85c75060855ccd4d9dabe0b5dcdf8197a
114,600
def label_clusters(target): """ This function will change the cluster labels from numbers to the predetermined text labels. Parameters: target: The dataset target labels. Returns: target: The transformed dataset target labels. """ target['Target'] = target['Target'].apply(lambda x: str("Product-interested " "Traffic Type 20 " "and/or Rare Browser " "Users") if x == 1 else (str("Browser 8 Users") if x == 2 else "Others")) return target
44ed6459cc4a66f6a982c92c9e77e709cc2176d7
557,437
def attr_superclass(type_, subname=''): """An `attr.s` validator for asserting the superclass of a value. Args: * type_ (object) - The python type object to validate `value` is a subclass of. * subname (str) - Some sub-element of attrib.name; e.g. if checking a dictionary key, this might be ' keys'. Returns a validator function which raises TypeError if the value doesn't match the value. """ def inner(_self, attrib, value): if not issubclass(type(value), type_): raise TypeError( "'{name}' must be a subclass of {type!r} (got {value!r} that is a " "{actual!r}).".format( name=attrib.name+subname, type=type_, actual=value.__class__, value=value, ), attrib, value, type_, subname, ) return inner
9468ea59fae0b9f162309259537c2a3f3efa8a3f
148,922
import yaml def getfiles2dirs(files2dirs): """Returns dictionary (mapping files to destination directories) from YAML file.""" with open(files2dirs) as yamlfile: config = yaml.safe_load(yamlfile) return config
5e71a509915790195148b01b07292f71a614169b
296,313
def fixture_localhost(aiida_localhost): """Return a localhost ``Computer``.""" localhost = aiida_localhost localhost.set_default_mpiprocs_per_machine(1) return localhost
830db202d0752febd9358288752c2c80beaacf57
376,574
def cm2pt(cm=1): """2.54cm => 72pt (1 inch)""" return float(cm) * 72.0 / 2.54
fd3aab678cf22e4799cd7d81c1867d912f11683f
608,842
def define_a_plot_name(plot_idx): """Define unique plot name""" return f"PP" + str(plot_idx).zfill(8)
56a03d9b0a69f91b4d2c9322bf4405ad43ed9653
150,236
def split_comma_list(comma_str: str): """ Split a comma-separated list of values, stripping whitespace """ return [item.strip() for item in comma_str.split(',')]
85772bd27c88a83450b6f1fa2b9debe148151820
202,761
from typing import Counter import math def conditional_entropy(x_symbols, y_symbols): """ Computes the entropy of `x` given `y`. :param list x_symbols: A list of all observed `x` symbols. :param list y_symbols: A list of all observed `y` symbols. :return float entropy: The conditional entropy of `x` given `y`. """ # Cache counters; while the xy_counter is already computed in other # parts, particularly in the scorers, it is worth repeating the code # here to have a more general function. y_counter = Counter(y_symbols) xy_counter = Counter(list(zip(x_symbols, y_symbols))) population = sum(y_counter.values()) # Compute the entropy and return entropy = 0 for xy_pair, xy_count in xy_counter.items(): p_xy = xy_count / population p_y = y_counter[xy_pair[1]] / population entropy += p_xy * math.log(p_y / p_xy) return entropy
e6433e202b600d7518ee1255bd5f66a43b6781e8
66,545
def merge_two_dicts(x, y): """ Merges 2 dictionary. Overrides values from x with values from y, if key is the same. Needed for Python < 3.5 See: https://stackoverflow.com/a/26853961 :param x: :param y: :return: """ z = x.copy() # start with x's keys and values z.update(y) # modifies z with y's keys and values & returns None return z
1d79c0d3a59778b347a6eed4602116ea031cbeb0
612,134
def getStations(station = ''): """ Define SPARQL query to return a list of all known stations at the Carbon Portal. This can include NON-ICOS stations. Note: excluding WDGCC stations ( https://gaw.kishou.go.jp/ ), which are visible in the data portal for historic reasons. Parameters ---------- station : str, optional, case sensitive DESCRIPTION. The default is '', and empyt string which returns ALL stations. If you provide a station id, be aware, that it needs to be exactly as provided from the Triple Store....case sensitive. Returns ------- query : str, valid sparql query to run against the Carbon Portal SPARQL endpoint. """ if station: flt = 'FILTER(?id = "' + station + '") . ' else: flt = station query = """ prefix cpmeta: <http://meta.icos-cp.eu/ontologies/cpmeta/> select * from <http://meta.icos-cp.eu/resources/icos/> from <http://meta.icos-cp.eu/resources/extrastations/> where { ?uri cpmeta:hasStationId ?id . %s OPTIONAL {?uri cpmeta:hasName ?name } . OPTIONAL {?uri cpmeta:countryCode ?country }. OPTIONAL {?uri cpmeta:hasLatitude ?lat }. OPTIONAL {?uri cpmeta:hasLongitude ?lon }. OPTIONAL {?uri cpmeta:hasElevation ?elevation } . } """ %(flt) return query
4fea5918dca13c7892fc426f9c20668e4f71d3d1
545,916