content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from pathlib import Path
from typing import List
def _list_files(d: Path) -> List[Path]:
""" Recursively list files in a dir and its sub dirs """
if d.is_file():
return [d]
else:
sub_dirs = [d for d in d.iterdir() if d.is_dir()]
files = [f for f in d.iterdir() if f.is_file()]
for d in sub_dirs:
files += _list_files(d)
return files
|
e684d81ab49b2b06b257d25e5b304b6e696002d0
| 58,231 |
def _float(value: str) -> float:
"""Convert string to float value
Convert a string to a floating point number (including, e.g. -0.5960D-01). Whitespace or empty value is set to 0.0.
Args:
value: string value
Returns:
Float value
"""
if value.isspace() or not value:
return 0.0
else:
return float(value.replace("D", "e"))
|
501e73417178744a6f620438773e34708908c0de
| 662,375 |
def get_usa_veh_id(acc_id, vehicle_index):
"""
Returns global vehicle id for USA, year and index of accident.
The id is constructed as <Acc_id><Vehicle_index>
where Vehicle_index is three digits max.
"""
veh_id = acc_id * 1000
veh_id += vehicle_index
return veh_id
|
e89875d5f9f48293e3069a7fe468ee9455d24147
| 660,010 |
import re
def parse_github_url(gh_url):
"""Util to parse Github repo/PR id from a Github PR URL.
Args:
gh_url (string): URL of Github pull request.
Returns:
Tuple of (repo name, pr number)
"""
matches = re.match(r'https://github.com/([\w-]+/[\w-]+)/pull/(\d+)', gh_url)
if matches:
repo, prnum = matches.groups()
return (repo, int(prnum))
return None
|
ce6780c04153174a267ed25015ccb2d8dd9e85ca
| 364,678 |
from datetime import datetime
import pytz
def tumblr_date_to_datetime(timestamp):
"""
2016-01-15 22:01:42 GMT -> datetime()
"""
dt = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S GMT')
return dt.replace(tzinfo=pytz.utc)
|
c9184c05131260e0d4126b9803f45dfb69ecd3e9
| 322,627 |
def make_parameter(name, values, **kwargs):
""" define a new parameter that will be added to a store.
Primarily takes a name and an array of potential values.
May also be given a default value from inside the array.
May also be given a typechoice to help the UI which is required to be
one of 'list', 'range', 'option' or 'hidden'.
May also bve given a user friendly label.
"""
default = kwargs['default'] if 'default' in kwargs else values[0]
if default not in values:
raise RuntimeError("Invalid default, must be one of %s" % str(values))
typechoice = kwargs['typechoice'] if 'typechoice' in kwargs else 'range'
valid_types = ['list', 'range', 'option', 'hidden']
if typechoice not in valid_types:
raise RuntimeError(
"Invalid typechoice, must be one of %s" % str(valid_types))
label = kwargs['label'] if 'label' in kwargs else name
properties = dict()
properties['type'] = typechoice
properties['label'] = label
properties['values'] = values
properties['default'] = default
return properties
|
a77eb681f9c055c3407b4f5ed88180f90569e44a
| 582,870 |
def check_if_scenario_name_exists(conn, scenario_name):
"""
:param conn: the database connection
:param scenario_name: str; the scenario name
:return: scenario_id; str or None
"""
c = conn.cursor()
sql = "SELECT scenario_id FROM scenarios WHERE scenario_name = ?"
query = c.execute(sql, (scenario_name,)).fetchone()
if query is None:
scenario_id = None
else:
scenario_id = query[0]
c.close()
return scenario_id
|
ece671be889fc112995a08d1fb094470d630f6d9
| 217,795 |
def merge_feed_dict(*feed_dicts):
"""
Merge all feed dicts into one.
Args:
\**feed_dicts: List of feed dicts. The later ones will override
values specified in the previous ones. If a :obj:`None` is
specified, it will be simply ignored.
Returns:
The merged feed dict.
"""
ret = {}
for feed_dict in feed_dicts:
if feed_dict is not None:
ret.update(feed_dict)
return ret
|
9c8a0314dec75f484d86926737dd98ff7a54784d
| 76,249 |
def parse_tags(tags):
"""Parse string containing list of tags."""
if tags is not None:
tags = set(tags.split(","))
return tags
|
50ddf2a3c5d1ac494ed90d0df674f73dce4933ce
| 206,601 |
import csv
def parse_proteq_file(filename, simplify_protein_names=True):
"""Parse the given protein expression file"""
# Example:
# /mnt/e/TCGA-BRCA/b09881e2-d622-4e5c-834a-18b893848de9/mdanderson.org_BRCA.MDA_RPPA_Core.protein_expression.Level_3.F3DF6E4D-CA53-4DEA-B48D-652306B60C77.txt
with open(filename, 'r') as handle:
entries = [e for e in csv.DictReader(handle, delimiter='\t')]
table = {}
for entry in entries:
if simplify_protein_names:
name = entry['Sample REF'].split("-")[0]
assert len(name) > 0
else:
name = entry['Sample REF']
table[name] = entry
return table
|
9093eff93c78e17461e21d34189bd0ef36229dc4
| 489,504 |
def extract_capital_letters(x):
""" Extract capital letters from string """
try:
return "".join([s for s in x if s.isupper()])
except Exception as e:
print(f"Exception raised:\n{e}")
return ""
|
c313c72da7a6c836219a0ae88e0a718102206a0c
| 210,567 |
import sqlite3
def get_db(test=False):
""" Get/create a database connection.
If a local ergal.db file exists, a connection
is established and returned, otherwise a new
database is created and the connection is returned.
:param test: (optional) determines whether or not a test database
should be created.
"""
file = 'ergal_test.db' if test else 'ergal.db'
db = sqlite3.connect(file)
cursor = db.cursor()
db.execute("""
CREATE TABLE IF NOT EXISTS Profile (
id TEXT NOT NULL,
name TEXT NOT NULL,
base TEXT NOT NULL,
auth TEXT,
endpoints TEXT,
PRIMARY KEY(id))""")
return db, cursor
|
5ae1be5b6c74f788b89b260a2597ac543968a392
| 585,322 |
import re
def match_crash_field(page):
"""
Extract the crash number from the content of the fatality page.
:param str page: the content of the fatality page
:return: a string representing the crash number.
:rtype: str
"""
crashes_pattern = re.compile(
r'''
(?:
(?:Traffic\sFatality\s\#(\d{1,3}))
|
(?:Fatality\sCrash\s\#(\d{1,3}))
)
''',
re.VERBOSE,
)
matches = crashes_pattern.search(page)
if not matches:
return None
non_empty_match = [match for match in matches.groups() if match]
return non_empty_match[0]
|
1d11e05f4768f58cd7d785b1788f07d5e2573d20
| 495,689 |
def delete_keys_from_dict(dict_del, key):
"""
Method to delete keys from python dict
:param dict_del: Python dictionary with all keys
:param key: key to be deleted in the Python dictionary
:returns a new Python dictionary without the rules deleted
"""
if key in dict_del.keys():
del dict_del[key]
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, key)
return dict_del
|
27084ba400dc7745636868e13813427cd9e35321
| 226,462 |
def ends_overlap(left, right):
"""Returns whether the left ends with one of the non-empty prefixes of the right"""
for i in range(1, min(len(left), len(right)) + 1):
if left.endswith(right[:i]):
return True
return False
|
0acf669acc87bd0366d8198dad6fc7e5b3510548
| 450,494 |
def _extractDotSeparatedPair(string):
"""
Extract both parts from a string of the form part1.part2.
The '.' used is the last in the string.
Returns a pair (part1, part2).
Useful for parsing machine.buildType filenames.
"""
i = string.rfind('.')
return string[:i], string[i+1:]
|
a6bcebe7006989778da5f35148681281af8c6da6
| 354,961 |
def NAA_net_area(measurement, energy):
"""
NAA_net_area determines the net area of a given peak without the use
of compton regions. It generates a region of interest (ROI) based on
full width half maximum (FWHM). A line is generated using the edges of the
ROI and an integral of the line is determined. The gross counts of the peak
are then subtracted by the integral and a net area and uncertainty are
determined and returned.
"""
E0 = measurement.energy_cal[0]
Eslope = measurement.energy_cal[1]
sample_counts = measurement.data
energy_channel = int((energy - E0) / Eslope)
region_size = 1.3
# Rough estimate of FWHM.
fwhm = 0.05*energy**0.5
fwhm_channel = int(region_size * (fwhm - E0) / Eslope)
# peak gross area
gross_counts_peak = sum(sample_counts[(energy_channel - fwhm_channel):
(energy_channel + fwhm_channel)])
peak_channels = measurement.channel[(energy_channel - fwhm_channel):
(energy_channel + fwhm_channel)]
# first and last channel counts of peak
start_peak_c = sample_counts[(energy_channel - fwhm_channel)]
end_peak_c = sample_counts[(energy_channel + fwhm_channel)]
# generate line under peak
compton_area = (start_peak_c + end_peak_c) / 2 * len(peak_channels)
net_area = gross_counts_peak - compton_area
# evaluate uncertainty
net_area_uncertainty = (gross_counts_peak + compton_area)**0.5
return net_area, net_area_uncertainty
|
df934c7f63a6ee16e71e917861989f15490a4de1
| 467,333 |
import hashlib
def md5_shard(word):
"""Assign data to servers using a public hash algorithm."""
data = word.encode('utf-8')
return 'server%d' % (hashlib.md5(data).digest()[-1] % 4)
|
ae8ca09af92ca4d1886f4d9489ba50a376038089
| 429,402 |
from pathlib import Path
def upload_path(instance, filename):
"""Helper function. Return the upload path for a file submission."""
return Path(str(instance.thesis.pk)) / filename
|
5cff9a37081a0ad3b6b9d117481a449aad6424fd
| 552,796 |
def risk_cat(risk):
"""Returns the catagory of risk based on risk"""
if risk < 0.0:
return "Low"
elif risk < 0.5:
return "Moderate"
elif risk < 2:
return "High"
elif risk < 100000000:
return "Severe"
else:
return "Could not be calculated"
|
bc760e3792a079172dedda795124fd79abebc950
| 430,106 |
from typing import Dict
from typing import Any
def decode_lwl01(data: bytes) -> Dict[str, Any]:
"""Returns a dictionary of engineering values decoded from a Dragino LWL01 Water Leak
Sensor Uplink Payload.
The payload 'data' is a byte array.
"""
# holds the dictionary of results
res = {}
def int16(ix: int) -> int:
"""Returns a 16-bit integer from the 2 bytes starting at index 'ix' in data byte array.
"""
return (data[ix] << 8) | (data[ix + 1])
# Battery voltage
res['vdd'] = (int16(0) & 0x3FFF) / 1000
# Water Presence, 1 = Wet, 0 = Dry
res['water'] = 1 if data[0] & 0x40 else 0
return res
|
fd4605939119f2460ac64e03ae24da09205ec6de
| 431,344 |
import random
import hmac
def hash_password(password, salthex=None, reps=1000):
"""Compute secure (hash, salthex, reps) triplet for password.
The password string is required. The returned salthex and reps
must be saved and reused to hash any comparison password in
order for it to match the returned hash.
The salthex string will be chosen randomly if not provided, and
if provided must be an even-length string of hexadecimal
digits, recommended length 16 or
greater. E.g. salt="([0-9a-z][0-9a-z])*"
The reps integer must be 1 or greater and should be a
relatively large number (default 1000) to slow down brute-force
attacks."""
if not salthex:
salthex = ''.join([ "%02x" % random.randint(0, 0xFF)
for d in range(0,8) ])
salt = []
for p in range(0, len(salthex), 2):
salt.append(int(salthex[p:p+2], 16))
salt = bytes(salt)
if reps < 1:
reps = 1
msg = password.encode()
for r in range(0,reps):
msg = hmac.HMAC(salt, msg, digestmod='MD5').hexdigest().encode()
return (msg.decode(), salthex, reps)
|
cac468818560ed52b415157dde71d5416c34478c
| 3,988 |
def __remove_string_front(index, string):
"""Helper function to cleanup update_helper and improve readability.
Remove the first [index] characters of a string.
:param index: Number of characters to remove from front of string.
:param string: String to remove characters from.
:return: string with first [index] characters removed
"""
return string[index:]
|
6dc7d60e51ef67eab77cdcec3256e2cfc9f7c3b1
| 563,992 |
import math
def deg2pix(cmdist, angle, pixpercm):
"""Returns the value in pixels for given values (internal use)
arguments
cmdist -- distance to display in centimeters
angle -- size of stimulus in visual angle
pixpercm -- amount of pixels per centimeter for display
returns
pixelsize -- stimulus size in pixels (calculation based on size in
visual angle on display with given properties)
"""
cmsize = math.tan(math.radians(angle)) * float(cmdist)
return cmsize * pixpercm
|
1100a347f8021beb051a3a53f32527d9350cb46e
| 488,368 |
from typing import List
def calcular_ganador(tiempos: List[int]) -> int:
"""Calcula el ganador de la competencia
:param tiempos: lista de tiempos
:type tiempos: List[int]
:return: ganador
:rtype: int
"""
ganador = 0
for i, tiempo in enumerate(tiempos):
if tiempo < tiempos[ganador]:
ganador = i
return ganador + 1
|
20f080a2470bd70e34c10e17dbdd5b56d5ab5a19
| 568,632 |
def extract_mg_h(sai):
"""
Given an SAI, this find the most general pattern that will generate a
match.
E.g., ('sai', ('name', '?foa0'), 'UpdateTable', ('value', '?foa1'))
will yield: {('name', '?foa0'), ('value', '?foa1')}
"""
return frozenset({tuple(list(elem) + ['?constraint-val%i' % i])
for i, elem in enumerate(sai)
if isinstance(elem, tuple)})
|
0a1dd85f5fde1c3aeda4762483cfb87cbbc3d58e
| 638,133 |
def f(x):
"""
A function for testing on.
"""
return -(x + 2.0)**2 + 1.0
|
52d2c4a4dec5acaa34371a5cead727d94a36a478
| 39,878 |
def get_log_print_level(client):
"""Get log print level
Returns:
Current log print level
"""
return client.call('get_log_print_level')
|
425a7395904e007e6f894e06b7ee04cd8893b718
| 619,144 |
def _make_entropy_fn(level, num_levels):
"""This returns a function that returns a subrange of entropy.
E.g., if level=1 (medium) and num_levels=3, then the returned function will
map the range [x, x + y] to [x + y/3, x + 2y/3].
Args:
level: Integer in range [0, num_levels - 1].
num_levels: Number of difficulty levels.
Returns:
Function to restrict entropy range.
"""
lower = level / num_levels
upper = (level + 1) / num_levels
def modify_entropy(range_):
assert len(range_) == 2
length = range_[1] - range_[0]
return (range_[0] + lower * length, range_[0] + upper * length)
return modify_entropy
|
bfb087865dedf53049b40f6e6e3c45d3e54783d7
| 434,472 |
def newshape(s, lowdim=0):
"""
Given a shape s, calculate the shape of a new array with the
lowest dimension replaced. If lowdim is zero the lowest dimension
is removed, otherwise it is replaced.
"""
sl = list(s)
if lowdim == 0:
sl.pop()
else:
sl[-1] = lowdim
return tuple(sl)
|
e70b82720c75e63c13d138b5d06de20ba2237ecd
| 549,804 |
def TrimSequence(sequence, front_cutoff, end_cutoff):
"""
This function takes a sequence and trims the ends off by the specified
cutoffs.
Parameters:
- front_cutoff: the number of positions to trim off at the front
- end_cutoff: the number of positions to trim off at the end
- sequence: the sequence to be trimmed
"""
return sequence[front_cutoff:-end_cutoff]
|
073c6fa37aa48afb603ce73eb4e42c086e42038d
| 348,643 |
import requests
def get(url, params, proxies, headers):
"""Send a request with the GET method."""
response = requests.get(url, params=params, proxies=proxies, headers=headers)
return response
|
a481a91e5f3fc71f88de8d84efaac3dd666c302e
| 702,137 |
def checkAvailability(payload, age):
"""
Function to check availability in the hospitals based on
user age from the json response from the public API
Parameters
----------
payload : JSON
age: INT
Returns
-------
available_centers_str : String
Available hospitals
total_available_centers : Integer
Total available hospitals
"""
available_centers = set()
unavailable_centers = set()
available_centers_str = False
total_available_centers = 0
if('centers' in payload.keys()):
length = len(payload['centers'])
if(length>1):
for i in range(length):
sessions_len = len(payload['centers'][i]['sessions'])
for j in range(sessions_len):
if((payload['centers'][i]['sessions'][j]['available_capacity']>0) and
(payload['centers'][i]['sessions'][j]['min_age_limit']<=age)):
available_centers.add(payload['centers'][i]['name'])
available_centers_str = ", ".join(available_centers)
total_available_centers = len(available_centers)
return available_centers_str,total_available_centers
|
e85494407f102462c5ec269b3ff73ff064e74b9b
| 513,719 |
def parse_rule(line):
"""Parse line with rules to rule and replace tuple."""
rule, replace = line.split(' => ')
return rule, replace
|
0eedde9832217621269037aff7857e7adbc99b47
| 490,368 |
def clean_text(document_string):
"""
Function that takes in a document in
the form of a string, and pre-processes
it, returning a clean string ready
to be used to fit a CountVectorizer.
Pre-processing includes:
- lower-casing text
- eliminating punctuation
- dealing with edge case punctuation
and formatting
- replacing contractions with
the proper full words
:param: document_string: str
:returns: cleaned_text: str
:returns: words: list
"""
# Make text lowercase
raw_text = document_string.lower()
# Replace encoding error with a space
raw_text = raw_text.replace('\xa0', ' ')
# Normalize period formatting
raw_text = raw_text.replace('. ', '.')
raw_text = raw_text.replace('.', '. ')
# Replace exclamation point with a space
raw_text = raw_text.replace('!', ' ')
# Replace slashes with empty
raw_text = raw_text.replace('/', '')
# Replace questin marks with empty
raw_text = raw_text.replace('??', ' ')
raw_text = raw_text.replace('?', ' ')
# Replace dashes with space
raw_text = raw_text.replace('-', ' ')
raw_text = raw_text.replace('—', ' ')
# Replace ... with empty
raw_text = raw_text.replace('…', '')
raw_text = raw_text.replace('...', '')
# Replace = with 'equals'
raw_text = raw_text.replace('=', 'equals')
# Replace commas with empty
raw_text = raw_text.replace(',', '')
# Replace ampersand with and
raw_text = raw_text.replace('&', 'and')
# Replace semi-colon with empty
raw_text = raw_text.replace(';', '')
# Replace colon with empty
raw_text = raw_text.replace(':', '')
# Get rid of brackets
raw_text = raw_text.replace('[', '')
raw_text = raw_text.replace(']', '')
# Replace parentheses with empty
raw_text = raw_text.replace('(', '')
raw_text = raw_text.replace(')', '')
# Replace symbols with letters
raw_text = raw_text.replace('$', 's')
raw_text = raw_text.replace('¢', 'c')
# Replace quotes with nothing
raw_text = raw_text.replace('“', '')
raw_text = raw_text.replace('”', '')
raw_text = raw_text.replace('"', '')
raw_text = raw_text.replace("‘", "")
# Get rid of backslashes indicating contractions
raw_text = raw_text.replace(r'\\', '')
# Replace extra spaces with single space
raw_text = raw_text.replace(' ', ' ')
raw_text = raw_text.replace(' ', ' ')
# Some apostrophes are of a different type --> ’ instead of '
raw_text = raw_text.replace("’", "'")
# Replace contractions with full words, organized alphabetically
raw_text = raw_text.replace("can't", 'cannot')
raw_text = raw_text.replace("didn't", 'did not')
raw_text = raw_text.replace("doesn't", 'does not')
raw_text = raw_text.replace("don't", 'do not')
raw_text = raw_text.replace("hasn't", 'has not')
raw_text = raw_text.replace("he's", 'he is')
raw_text = raw_text.replace("i'd", 'i would')
raw_text = raw_text.replace("i'll", 'i will')
raw_text = raw_text.replace("i'm", 'i am')
raw_text = raw_text.replace("isn't", 'is not')
raw_text = raw_text.replace("it's", 'it is')
raw_text = raw_text.replace("nobody's", 'nobody is')
raw_text = raw_text.replace("she's", 'she is')
raw_text = raw_text.replace("shouldn't", 'should not')
raw_text = raw_text.replace("that'll", 'that will')
raw_text = raw_text.replace("that's", 'that is')
raw_text = raw_text.replace("there'd", 'there would')
raw_text = raw_text.replace("they're", 'they are')
raw_text = raw_text.replace("there's", 'there are')
raw_text = raw_text.replace("we'd", 'we would')
raw_text = raw_text.replace("we'll", 'we will')
raw_text = raw_text.replace("we're", 'we are')
raw_text = raw_text.replace("we've", 'we have')
raw_text = raw_text.replace("wouldn't", 'would have')
raw_text = raw_text.replace("you'd", 'you would')
raw_text = raw_text.replace("you'll", 'you will')
raw_text = raw_text.replace("you're", 'you are')
raw_text = raw_text.replace("you've", 'you have')
# Fix other contractions
raw_text = raw_text.replace("'s", ' is')
cleaned_text = raw_text
# Extract tokens
text_for_tokens = cleaned_text
text_for_tokens = text_for_tokens.replace('.', '')
words = text_for_tokens.split()
return (cleaned_text, words)
|
4965047d64e805db265a14152d39473e74e06e29
| 125,489 |
import hashlib
def double_sha256(ba):
"""
Perform two SHA256 operations on the input.
Args:
ba (bytes): data to hash.
Returns:
str: hash as a double digit hex string.
"""
d1 = hashlib.sha256(ba)
d2 = hashlib.sha256()
d1.hexdigest()
d2.update(d1.digest())
return d2.hexdigest()
|
dd4683e5a5ee18f5e1bffa820bf7ed667e16e285
| 253,874 |
def extract_primitives(transitions):
"""
Extract all the primitives out of the possible transititions, which are defined by the user and make an list
of unique primitives.
Args:
transitions (list): indicated start, end primitive and their transition probability
Returns:
list of all primitives
"""
primitives = set()
for transition in transitions:
primitives.add(transition[0]) # collect start primitive
primitives.add(transition[1]) # collect end primitive
return sorted(list(primitives))
|
ed8a52dd72d47edf0039c9ec3eefb48c803735c9
| 402,942 |
from datetime import datetime
import time
def utc2local(utc: datetime) -> datetime:
"""Converts UTC time to local."""
epoch = time.mktime(utc.timetuple())
offset = datetime.fromtimestamp(epoch) - datetime.utcfromtimestamp(epoch)
return utc + offset
|
985b263c0f2dd1fa1840656acdfe008bf84d6828
| 562,067 |
def event_converter(event):
"""
Converts event to string format that shows the event's portait on Reddit.
"""
if event["monsterType"] == "DRAGON":
event = event["monsterSubType"]
else:
event = event["monsterType"]
if event == "BARON_NASHOR":
return "[B](#mt-barons)"
elif event == "RIFTHERALD":
return "[H](#mt-herald)"
elif event == "FIRE_DRAGON":
return "[I](#mt-infernal)"
elif event == "EARTH_DRAGON":
return "[M](#mt-mountain)"
elif event == "WATER_DRAGON":
return "[O](#mt-ocean)"
else:
return "[C](#mt-cloud)"
|
d19a4bd673bdaa1844199d93f2b06a202498ee00
| 386,519 |
def calculate_elo(R1, R2, S, k=32):
"""
Args:
R1 (float): current rating of the first song
R2 (float): current rating of the second song
S (int): 1 for a win, 0 for a loss
k (float): decides how much the elo rating fluctuates
Returns:
float: the newly calculated elo of R1 in (R1 vs R2) where S decides if R1 wins or loses
Source: https://www.researchgate.net/publication/287630111_A_Comparison_between_Different_Chess_Rating_Systems_for_Ranking_Evolutionary_Algorithms
"""
base = 10
# changes how big the elo numbers are, the ratio stays the same
div = 400
# transformed rating
r1 = base ** (R1 / div)
r2 = base ** (R2 / div)
# expected score
e1 = r1 / (r1 + r2)
# new elo
elo = R1 + k * (S - e1)
return elo
|
f3437055c93e42b9ef0cd3da4a6ae25bd2f6fa73
| 423,134 |
def execute_javascript(self, script, *args):
"""
Synchronously executes JavaScript in the current window or frame.
:Args:
- script: The JavaScript to execute.
- *args: Any applicable arguments for your JavaScript.
"""
try:
value = self.context.driver.execute_script(script, *args)
self.context.logger.info(f'Successfully executed javascript {script} on the '
f'argument(s) {args if len(args) > 0 else "No args"}')
if value is not None:
self.context.logger.info(f'Result : {value}')
return value
except Exception as ex:
self.context.logger.error(f'Unable to execute javascript {script} on the '
f'argument(s) {args if len(args) > 0 else "No args"}.')
self.context.logger.exception(ex)
raise Exception('Unable to execute javascript {script} on the '
f'argument(s) {args if len(args) > 0 else "No args"}. Error: {ex}')
|
681c02d2c2d6f4715cc50dc20b07464e261c05be
| 124,416 |
def compete_policies(game, agent_1, agent_2, episodes=1_000, search_plies_1=1, search_plies_2=1):
"""Compete two agents against each other.
Alternates the agent's colors after each game and agents use epsilon=0 (no exploration).
Args:
game: A game instance, needs `reset()` and `play_move(move)` methods and `turn` attribute.
agent_1: The first agent, needs a `policy(game, epsilon, search_plies)` method.
agent_2: The second agent, needs a `policy(game, epsilon, search_plies)` method.
episodes: The number of games to play.
search_plies_1: How many ply agent_1 searches (1 or 2).
search_plies_2:How many ply agent_2 searches (1 or 2).
Returns:
Two element list of win fractions.
"""
scores = [0, 0]
for episode in range(episodes):
game.reset()
player_1 = episode % 2
while not game.has_finished():
if game.turn == player_1:
move = agent_1.policy(game, epsilon=0, plies=search_plies_1)
else:
move = agent_2.policy(game, epsilon=0, plies=search_plies_2)
game.play_move(move)
if game.winner == player_1:
scores[0] += 1
else:
scores[1] += 1
return [score / episodes for score in scores]
|
7e0869e994f506d5b3b499c646222ee1441298b3
| 300,245 |
def outlier_removal_drop(dataframe, colname, low_cut, high_cut):
"""Drop rows with outliers on dataframe[colname]"""
col = dataframe[colname]
dataframe = dataframe.loc[
col.isnull()
| col.apply(
lambda x: not isinstance(x, (int, float))
or (x >= low_cut and x <= high_cut)
),
:,
]
return dataframe
|
3501bbfced4750dd51517a35f95023bc3cd0aeb7
| 415,882 |
import math
def primality(n):
""" Check the primality of an integer. Returns True or False. """
if n < 2:
return False
if n < 4:
return True
if n % 2 == 0:
return False
if n % 3 == 0:
return False
root = math.sqrt(n)
f = 5
while f <= root:
if n%f == 0:
return False
if n%(f+2) == 0:
return False
f += 6
return True
|
b69325b3a6a40f1f5477a8de5dcad9f117228254
| 464,964 |
def find_line_of_residue(reslist, resinumber):
"""
Returns the line number where residue n appears in the reslist.
:param reslist: res file as list like:
['C1 1 -0.00146 0.26814 0.06351 11.00 0.05',
'RESI 4 BENZ',
'C2 1 -1.13341 -0.23247 -0.90730 11.00 0.05',]
:type reslist: list of lists
:param resinumber: residue number
:type resinumber: string
:return n: integer
:return line: string
>>> find_line_of_residue(['REM TEST', 'C1 1 -0.00146 0.26814 0.06351 11.00 0.05', \
'RESI 4 BENZ', 'C2 1 -1.13341 -0.23247 -0.90730 11.00 0.05'], "4")
[2, 'RESI 4 BENZ']
"""
for n, line in enumerate(reslist):
if line.upper().startswith('RESI'):
if line.split()[1] == str(resinumber):
return [n, line]
|
82a32e5e1f15f6c59e8285298de6e8e099d3827d
| 285,748 |
def read_input(filename):
"""
>>> template, ins_rules = read_input('example')
>>> template
'NNCB'
>>> len(ins_rules)
16
>>> ins_rules['NH']
'C'
>>> ins_rules['BH']
'H'
"""
template = ""
ins_rules = {}
with open(filename) as fd:
for line in fd:
line = line.strip()
if " -> " in line:
a, b = line.split(" -> ")
ins_rules[a] = b
elif len(line):
template = line
return template, ins_rules
|
df8a49d556bbe567871bc139cff5fb2919979c3e
| 275,427 |
def project(histogram, *args):
"""
Project to a single axis or several axes on a multidiminsional histogram.
Provided a list of axis numbers, this will produce the histogram over those
axes only. Flow bins are used if available.
"""
return histogram._project(*args)
|
7573f670370a1f8a6f0e8e1568bc09d6dea04186
| 320,739 |
def remove_line_break(src_str):
"""Replaces link breaks (NL, CR) into whitespaces.
Args:
src_str string: A source string.
Returns:
A modified string.
"""
dst_str = src_str
return dst_str.replace('\n', ' ').replace('\r', '')
|
4becefdd22b083ab22142f0288d0e467c8ed1ae2
| 156,836 |
import uuid
def from_str(uuid_str):
"""
Converts a uuid string to an uuid instance
"""
return uuid.UUID(uuid_str)
|
9bbd2174ae39283e3f8c024c5eccf599dd0186f4
| 411,023 |
import math
def get_candles_number_from_minutes(unit, candle_size, minutes):
"""
Get the number of bars needed for the given time interval
in minutes.
Notes
-----
Supports only "T", "D" and "H" units
Parameters
----------
unit: str
candle_size : int
minutes: int
Returns
-------
int
"""
if unit == "T":
res = (float(minutes) / candle_size)
elif unit == "H":
res = (minutes / 60.0) / candle_size
else: # unit == "D"
res = (minutes / 1440.0) / candle_size
return int(math.ceil(res))
|
5cb7d25483406ea1e23ba9ba6fd05f923e065cb0
| 584,924 |
def str2bool(val):
"""Convert string to boolean value
"""
try:
if val.lower() in ['false', 'off', '0']:
return False
else:
return True
except AttributeError:
raise TypeError('value {0} was not a string'.format(type(val)))
|
01d97b141686a79d310ab59a4acb318250b0746b
| 8,320 |
import struct
import socket
def ip_to_ascii(ip_address):
""" Converts the quad IP format to an integer representation. """
return struct.unpack('!L', socket.inet_aton(ip_address))[0]
|
2cb3ccbe70eed2dd2e8ac21d10e180805dec95ea
| 8,555 |
def enum(**named_values):
"""Creates an enum type."""
return type('Enum', (), named_values)
|
fa8390972f4ef343b3cbcc05bca064fe853aa876
| 677,231 |
def method_header(method_name=None, *args):
"""
Get the formatted method header with arguments included
"""
hdr = "%s(" % method_name
if (len(args) > 0):
hdr += args[0]
for arg in args[1:]:
hdr += ", %s" % arg
hdr += ')'
return hdr
|
2c5648c7061dceec5864550938e79a07d5346660
| 528,653 |
import zipfile
import re
def load_zip_file_keys(file, fileNameRegExp=''):
"""Returns an array with the entries of the ZIP file that match with the
regular expression.
The key's are the names or the file or the capturing group definied in the
fileNameRegExp
"""
try:
archive = zipfile.ZipFile(file, mode='r', allowZip64=True)
except:
raise Exception('Error loading the ZIP archive.')
pairs = []
for name in archive.namelist():
addFile = True
keyName = name
if fileNameRegExp != '':
m = re.match(fileNameRegExp, name)
if m == None:
addFile = False
else:
if len(m.groups()) > 0:
keyName = m.group(1)
if addFile:
pairs.append(keyName)
return pairs
|
af1b9bac8956adea22af3b98c66695f1e20e72e4
| 457,832 |
def virt_imei_shard_bounds(num_physical_imei_shards):
"""
Utility function to determine the virtual IMEI shard ranges that should be created for each physical shard.
Arguments:
num_physical_imei_shards: number of physical imei shards
Returns:
list of virt imei shards bounds
"""
k, m = divmod(100, num_physical_imei_shards)
return [(i * k + min(i, m), (i + 1) * k + min(i + 1, m)) for i in range(num_physical_imei_shards)]
|
9ffef0fa9832b2333dfe0da1dce8d9b1bd726d86
| 387,035 |
from typing import Tuple
from typing import List
from typing import Set
def get_sentences_and_labels(path: str) -> Tuple[List[str], List[List[str]], Set[str], Set[str]]:
"""Combines tokens into sentences and create vocab set for train data and labels.
For simplicity tokens with 'O' entity are omitted.
Args:
path: Path to the downloaded dataset file.
Returns:
(sentences, labels, train_vocab, label_vocab)
"""
words, tags = [], []
word_vocab, label_vocab = set(), set()
sentences, labels = [], []
data = open(path)
for line in data:
if line != '\n':
line = line.split()
words.append(line[1])
tags.append(line[0])
word_vocab.add(line[1])
label_vocab.add(line[0])
else:
sentences.append(" ".join([s for s in words]))
labels.append([t for t in tags])
words.clear()
tags.clear()
sentences = list(filter(None, sentences))
labels = list(filter(None, labels))
return sentences, labels, word_vocab, label_vocab
|
70d5f28d155e57de11463d739ded54e18c1205b3
| 544,023 |
def slurp(filename):
"""Return the contents of a file as a single string."""
with open(filename, 'r') as fh:
contents = fh.read()
return contents
|
2bac9fd78dbab0cba063d37e86ada00695950377
| 535,776 |
from typing import Counter
def _get_dicts(x_raw, y_raw):
"""Map features and classes to integer values, "indices".
Arguments:
x_raw: [[string]
list of list of string features; outer list represents samples
e.g., [['age_8', 'gender_M', '31', 'V72.3'],
['age_55', 'gender_F', '31']]
y_raw: [string]
list of classes
Returns:
feat_idx_dict: {string: int}
dictionary mapping features to feature indices
idx_feat_dict: {int: string}
dictionary mapping feature indices to features
class_idx_dict: {string: int}
dictionary mapping classes to class indices
idx_class_dict: {int: string}
dictionary mapping class indices to classes
"""
feat_counts = Counter([f for line in x_raw for f in line])
class_counts = Counter(y_raw)
feat_idx_dict, idx_feat_dict, class_idx_dict, idx_class_dict = {}, {}, {}, {}
for idx, (c, _) in enumerate(class_counts.most_common()):
class_idx_dict[c] = idx
idx_class_dict[idx] = c
for idx, (feat, _) in enumerate(feat_counts.most_common()):
feat_idx_dict[feat] = idx
idx_feat_dict[idx] = feat
return feat_idx_dict, idx_feat_dict, class_idx_dict, idx_class_dict
|
3810d0d694ccd0e139ee3898651f88b46cb32c92
| 318,876 |
import math
def _phi(x):
"""Cumulative density function for the standard normal distribution """
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
|
4c681132799d881001b848f5799e36de5ddbec93
| 99,072 |
def eq_xtl(
Cl, D, F,
):
"""
eq_xtl calculates the composition of a trace element in the remaining liquid after a certain amount of
crystallization has occured from a source melt when the crystal remeains in equilibrium with the melt
as described by White (2013) Chapter 7 eq. 7.81. It then calculates the concentration of that trace element
in a specific solid phase based on partition coefficient input.
Inputs:
Cl = concentration of trace element in original liquid
D = bulk distribution coefficient for trace element of crystallizing assemblage
F = fraction of melt remaining
Returns:
Cl_new = concentration of trace element in the remaining liquid
"""
Cl_new = Cl / (D + F * (1 - D))
return Cl_new
|
298a5557775a953a645aade70f41818e42e761ac
| 701,310 |
def abbrev(str, max_len):
"""
Return a string of up to max length.
adding elippis if string greater than max len.
"""
if len(str) > max_len: return str[:max_len] + "..."
else: return str
|
5cf43fd262f8b872275b50192d945fd66203df07
| 414,650 |
def selected_polygons(polygon_gdf, **kwargs):
"""Creates a plotting instance of the boundaries of all selected polygons.
Args:
polygon_gdf (geo-dataframe): geo-dataframe containing the selected polygons.
Kwargs:
Geopandas-supported keyword arguments.
Returns:
ax: Matplotlib axis object.
"""
ax = polygon_gdf.boundary.plot(**kwargs)
return ax
|
b55295408d08f702cbfccd5236b2d3b6b9d02742
| 205,142 |
def _GetPatternsDistance(pattern_a, pattern_b):
"""Calculates the distance between two patterns.
Args:
pattern_a: a SimilarPattern or TimeSpan.
pattern_b: a SimilarPattern or TimeSpan.
Returns:
Distance in seconds between the two patterns. If they are overlapped, the
distance is 0.
"""
end_a = pattern_a.start_time + pattern_a.duration
end_b = pattern_b.start_time + pattern_b.duration
b_falls_in_a = pattern_a.start_time <= end_b and end_b <= end_a
a_falls_in_b = pattern_b.start_time <= end_a and end_a <= end_b
if b_falls_in_a or a_falls_in_b:
return 0
elif pattern_a.start_time < pattern_b.start_time:
return pattern_b.start_time - end_a
else:
return pattern_a.start_time - end_b
|
394120af9968b7d6b28ec10748ecedf34b5ef3f0
| 495,242 |
def parse_item(item):
"""
Parse a CVE item from the NVD database
"""
parsed_items = {}
cve = item.get('cve', {})
CVE_data_meta = cve.get('CVE_data_meta', {})
ID = CVE_data_meta.get('ID', '')
parsed_items['id'] = ID
affects = cve.get('affects', {})
vendor = affects.get('vendor', {})
vendor_datas = vendor.get('vendor_data', [])
parsed_items['vendors'] = vendor_datas
return parsed_items
|
26d03e5054ceb33d071273d6894cfba0bed58ec5
| 646,935 |
import asyncio
def synchronize_generator(async_generator, *args, **kwargs):
"""
Returns a synchronous generator from an asynchronous generator
"""
ag = async_generator(*args, **kwargs)
async def consume_generator(stop_signal):
r = await ag.asend(stop_signal)
return r
loop = asyncio.new_event_loop()
try:
stop_signal = None
while not stop_signal:
val = loop.run_until_complete(consume_generator(stop_signal))
stop_signal = yield val
if stop_signal:
val = loop.run_until_complete(consume_generator(stop_signal))
loop.close()
loop = None
yield val
except StopAsyncIteration:
loop.close()
loop = None
assert loop is None
|
c301b333491d35cf654ac2eccbc6d4226485e805
| 57,163 |
def get_lines_from_file(file_path: str) -> list:
"""Get all the lines from a file as a list of strings.
:param str file_path: The path to the file.
:Returns: A list of lines (each a `str`).
"""
with open(file_path, encoding="utf-8") as temp:
return temp.readlines()
|
487a9c07bf2d46e84a3be2e1aed91e5df0a8179b
| 69,317 |
def is_verb_relation(token1, token2):
"""Return True if `token1` is a verb with subject `token2`."""
return (
(token1.upos == "VERB")
and (token2.upos in ["NOUN", "PRON"])
and (token2.deprel == "nsubj")
and (token2.head == token1.id)
)
|
1887b109a4c4e87ea94ead24c82d17931f28b676
| 606,939 |
def root_of_number(number: float):
"""
This function returns the square root of a number
"""
return {"root": number ** 0.5}
|
7d76cc52bdc56a6fe59c014a423b737b41fd4c51
| 252,759 |
def freebsd_translator(value):
"""Translates a "freebsd" target to freebsd selections."""
return {"@com_github_renatoutsch_rules_system//system:freebsd": value}
|
752f3d2b030fd01e0601bcc784dfb66900613a5a
| 610,521 |
def xStr(value, default=''):
"""
Extended str() adding a default result, if the input is None
"""
return default if (value is None) else str(value)
|
3ba950b312051abf94be259b9b4c1274a0fc47d8
| 122,846 |
def top_files(query, files, idfs, n):
"""
Given a `query` (a set of words), `files` (a dictionary mapping names of
files to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the filenames of the the `n` top
files that match the query, ranked according to tf-idf.
"""
tf_idfs = []
for filename, filewords in files.items():
tf_idf = 0
for word in query:
if word not in idfs:
continue
idf = idfs[word]
tf = filewords.count(word)
tf_idf += idf * tf
t = (filename, tf_idf)
tf_idfs.append(t)
sorted_list = sorted(tf_idfs, key=lambda k: k[1])
sorted_list.reverse()
file_list = [item[0] for item in sorted_list]
return file_list[:n]
|
7b0fcc9a8211a7d644ed660c58645932a3b9ac00
| 203,815 |
def prefixed_by(apath, some_paths):
"""Check if a path is a a subpath of one of the provided paths"""
return len([p for p in some_paths if apath.find(p) == 0]) > 0
|
efb34300ce04623800b85d1f8e48ef49d335b499
| 540,588 |
def jaccard_distance_numeric(x_or_y: float, x_and_y: float) -> float:
""" Calculated the jaccard distance between two series using pre-computed area values.
Parameters
----------
x_or_y, x_and_y: float
precomputed x_or_y and x_and_y, respectively.
"""
return (x_or_y - x_and_y) / x_or_y
|
a0636b20e45c906b32c76ef9511ed5cbbaf4d161
| 609,123 |
def expand_multi_index(df, new_cols):
"""
Expands a multi-index (and removes the multi-index).
Parameters
----------
df : pandas DataFrame
DataFrame with multi-index to be expanded.
new_cols : list
List of column names for expanded index.
Returns
-------
df : pandas DataFrame
DataFrame with expanded multi-index.
"""
# convert from objects
df = df.convert_dtypes()
multi_index = df.index
df.reset_index(inplace=True)
number_of_indices = len(multi_index[0])
for i in range(number_of_indices):
old_index = 'level_' + str(i)
new_index = new_cols[i]
df.rename(columns={old_index: new_index}, inplace=True)
return df
|
e8588f3648915bb846ac392912ca94a10b02fcb9
| 528,197 |
def GetLastTestedSVNVersion(last_tested_file):
"""Gets the lasted tested svn version from the file.
Args:
last_tested_file: The absolute path to the file that contains the last
tested svn version.
Returns:
The last tested svn version or 'None' if the file did not have a last tested
svn version (the file exists, but failed to convert the contents to an
integer) or the file does not exist.
"""
if not last_tested_file:
return None
last_svn_version = None
# Get the last tested svn version if the file exists.
try:
with open(last_tested_file) as file_obj:
# For now, the first line contains the last tested svn version.
return int(file_obj.read().rstrip())
except IOError:
pass
except ValueError:
pass
return last_svn_version
|
0f589bf03b07cbe7097c925a546cd8f9d0dde86c
| 358,164 |
from pathlib import Path
from datetime import datetime
import shutil
def check_and_backup(filepath):
""" Checks if a file exists at filepath and creates a timestamped backup of the file
Args:
filepath (str, pathlib.Path): Path to file
Returns:
pathlib.Path: Filepath
"""
filepath = Path(filepath)
if filepath.exists():
filename = filepath.stem
extension = filepath.suffix
backup_filename = f"{filename}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
backup_filepath = filepath.parent.joinpath(backup_filename).with_suffix(
extension
)
print(
f"Creating backup of previous version of {filepath.name} at {backup_filepath}"
)
shutil.copy(filepath, backup_filepath)
else:
print(f"{filepath.name} written")
return filepath
|
1c97abadb72fba8295a8e6f8bfa3fd5751011e33
| 273,247 |
def seq2sentence(seq):
"""Convert sequence to string of space-separated letters"""
return ' '.join(list(seq))
|
635b7b6a143ae9bbf5b12a598c8b3f0acd20b3a7
| 423,175 |
def uriunsplit(uri):
"""
Reverse of urisplit()
>>> uriunsplit(('scheme','authority','path','query','fragment'))
"scheme://authority/path?query#fragment"
"""
(scheme, authority, path, query, fragment) = uri
result = ''
if scheme:
result += scheme + ':'
if authority:
result += '//' + authority
if path:
result += path
if query:
result += '?' + query
if fragment:
result += '#' + fragment
return result
|
43f5a826ed62838af9fb59c034bf07b695a1b8e0
| 605,433 |
def c2c2DMO(X, choice="NIHAO"):
"""
The ratio between the baryon-influenced concentration c_-2 and the
dark-matter-only c_-2, as a function of the stellar-to-halo-mass
ratio, based on simulation results.
Syntax:
c2c2DMO(X,choice='NIHAO')
where
X: M_star / M_vir (float or array)
choice: choice of halo response --
'NIHAO' (default, Tollet+16, mimicking strong core formation)
'APOSTLE' (Bose+19, mimicking no core formation)
"""
if choice == "NIHAO":
# return 1. + 227.*X**1.45 - 0.567*X**0.131 # <<< Freundlich+20
return 1.2 + 227.0 * X ** 1.45 - X ** 0.131 # <<< test
elif choice == "APOSTLE":
return 1.0 + 227.0 * X ** 1.45
|
4e546add71267c34a9d77a733c5ce99b4ce47d47
| 427,237 |
def _find_version_line_in_file(file_path):
"""
Find and return the line in the given file containing `VERSION`.
:param file_path: Path to file to search.
:return: Line in file containing `VERSION`.
"""
with open(str(file_path), "r") as fileh:
version_lines = [
line for line in fileh.readlines() if line.startswith("VERSION")
]
if len(version_lines) != 1:
raise ValueError(
"Unable to determine 'VERSION' in {file}".format(file=file_path)
)
return version_lines[0]
|
ef3e648ac6b4e9f89e67986507eba74ba65ded2d
| 663,173 |
def get_position_below(original_position):
"""
Given a position (x,y) returns the position below the original position, defined as (x,y-1)
"""
(x,y) = original_position
return(x,y-1)
|
87a52a824dc124474ae734151aa4ff4302467030
| 459,916 |
def physicond(amp, t_phi, t_th):
""" Function enforcing the physical conditions of the system. Namely,
the perturbation in temperature cannot be a negative one. Thus function
assures a positive or null amplitude,
positive or null characteristic times,
and quicker rising exponential than the decaying exponential.
Parameters
----------
amp : float
Amplitude of the exponential pulse.
t_phi : float
Characteristic time of the decaying exponential.
t_th : float
Characteristic time of the rising (thermalisation) exponential.
Returns
-------
Same as parameters with physical constraints applied.
"""
# no negative amplitude
if amp <= 0 :
amp = 0
# no negative char. time
if t_th <= 0:
t_th = 1e-20
# rising time shorter than decay time
if t_phi <= t_th:
t_phi = t_th
return amp, t_phi, t_th
|
b8aafab125ba73408378c052dc8f2c7a883eed97
| 183,940 |
def match(pattern, address):
"""
Match ip address patterns.
This is not regex.
A star at the end of a ip address string does prefix matching.
None or a blank string matches any address
match('192.168.0.1', '192.168.0.1') == True
match('192.168.0.2', '192.168.0.1') == False
match('192.168.*', '192.168.23.56') == True
match('192.168.0.*', '192.168.0.1') == True
match('192.168.0.*', '192.168.0.35') == True
match('193.*', '192.168.0.1') == False
:param pattern: pattern to match against.
:param address: the address to check
:return: True if the address matches the pattern.
"""
if not pattern or pattern == "":
return True
if pattern.endswith('*'):
return address.startswith(pattern[:-1])
else:
return pattern == address
|
5943909fe6c83163700e40c18c9d5e6750764859
| 113,342 |
def create_api_call_headers(_client_id, _auth_token):
"""
Creates the necessary headers to invoke an API call.
@param _client_id: Client ID magic string.
@param _auth_token: Authentication token magic string. This is usually stored in the application state/configuration.
@return: A map of headers.
"""
ret = {}
ret["accept"] = "application/json"
#ret["x-digikey-locale-language"] = "en"
#ret["x-digikey-locale-currency"] = "usd"
ret["authorization"] = str(_auth_token)
ret["content-type"] = "application/json"
ret["x-ibm-client-id"] = str(_client_id)
return ret
|
0a768e25b9552d222197655a0994a9ed4eb9dca1
| 581,512 |
def get_r_env(A_env, A_A):
"""床面積の合計に対する外皮の部位の面積の合計の比 (7)
Args:
A_env(float): 外皮の部位の面積の合計 (m2)
A_A(float): 床面積の合計 (m2)
Returns:
float: 床面積の合計に対する外皮の部位の面積の合計の比
"""
return A_env / A_A
|
eb01ff8f3478e082d5649316445db34524c7735d
| 600,701 |
def exp_required(level: int) -> int:
"""
Computes EXP required for current level
- EXP Required: (8 * Level^3) / 4
"""
BASE_EXP = 8
amount = BASE_EXP * (level ** 3)
return round(amount / 4)
|
835d1c1c48ffa767edaabd6611b3f5173702bd10
| 301,774 |
import operator
def sequence_eq(sequence1, sequence2):
"""
Compares two sequences.
Parameters
----------
sequence1 : sequence
The first sequence.
sequence2 : sequence
The second sequence.
Returns
-------
bool
`True` iff `sequence1` equals `sequence2`, otherwise `False`.
"""
return len(sequence1) == len(sequence2) and all(map(operator.eq, sequence1, sequence2))
|
3e6520602272873f471c808ad14c6d3a90d31972
| 673,743 |
def _find_assignment(arg_token):
"""
Find the first non-escaped assignment in the given argument token.
Returns -1 if no assignment was found.
:param arg_token: The argument token
:return: The index of the first assignment, or -1
"""
idx = arg_token.find('=')
while idx != -1:
if idx != 0:
if arg_token[idx - 1] != '\\':
# No escape character
return idx
idx = arg_token.find('=', idx + 1)
# No assignment found
return -1
|
e7673c6aa8190f327cafc18b66d4394c38615543
| 236,027 |
def header_is_sorted_by_coordinate(header):
"""Return True if bam header indicates that this file is sorted by coordinate.
"""
return 'HD' in header and 'SO' in header['HD'] and header['HD']['SO'].lower() == 'coordinate'
|
b656770806818abe742be32bc14c31a8a8e3e535
| 703,016 |
def _pandas_rolling_since_018(df, window, *args, **kwargs):
"""Use rolling() to compute a rolling average"""
return df.rolling(window, *args, **kwargs).mean()
|
ef29d860811ff57df9ff9a023bfee2261d24498c
| 473,534 |
def create_readonly_assessment_params(content, answers):
"""Creates parameters for a readonly assessment in the view templates."""
assessment_params = {
'preamble': content['assessment']['preamble'],
'questionsList': content['assessment']['questionsList'],
'answers': answers,
}
return assessment_params
|
b93954ace8c84e894a1eda46d4a5fe3b17c69129
| 445,302 |
def distance_from_camera(bbox, image_shape, real_life_size):
"""
Calculates the distance of the object from the camera.
PARMS
bbox: Bounding box [px]
image_shape: Size of the image (width, height) [px]
real_life_size: Height of the object in real world [cms]
"""
## REFERENCE FOR GOPRO
# Focal Length and Image Size
# https://clicklikethis.com/gopro-sensor-size/
# https://gethypoxic.com/blogs/technical/gopro-hero9-teardown
# https://www.sony-semicon.co.jp/products/common/pdf/IMX677-AAPH5_AAPJ_Flyer.pdf
# http://photoseek.com/2013/compare-digital-camera-sensor-sizes-full-frame-35mm-aps-c-micro-four-thirds-1-inch-type/
# https://www.gophotonics.com/products/cmos-image-sensors/sony-corporation/21-209-imx677
# Camera Sensor: Sony IMX677
# Camera Sensor array pixel size: 1.12[um] X 1.12[um]
# Camera Resolution: 5663(H) X 4223(V)
# Camera Sensor dimensions: 6.343[mm/H] X 4.730[mm/V]
# Camera Focal Length: 2.92 mm
# 5633(px)
# 4 -------------------
# 2 - -
# 2 - -
# 3 - -
# (p - -
# x) - -
# -------------------
# REFERNCE FOR CALCULATION
# https://www.scantips.com/lights/subjectdistance.html
# GoPro Intrensic Camera Settings #
###################################
focal_length_mm = 5.21
unit_pixel_length = 1.12
sen_res = (5663, 4223)
sensor_height_mm = (unit_pixel_length*sen_res[1])/1000
sensor_width_mm = (unit_pixel_length*sen_res[0])/1000
###################################
# Calculation
image_height_px = image_shape[0]
image_width_px = image_shape[1]
(startX, startY, endX, endY) = bbox
height_of_object_px = endY - startY
width_of_object_px = endX - startX
obj_height_on_sensor_mm = (sensor_height_mm * height_of_object_px) / image_height_px
return (real_life_size * focal_length_mm)/obj_height_on_sensor_mm
|
037adbaba89ae13ed74ac5fc1d1cfc080c5107d1
| 676,041 |
def dotv3(a,b):
"""dot product of 3-vectors a and b"""
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2];
|
01d3b5bc8cd8e0de6da1617bf0dc300502c099fc
| 298,464 |
import torch
def batch_linear(x, W, b=None):
"""Computes y_i = x_i W_i + b_i where i is each observation index.
This is similar to `torch.nn.functional.linear`, but a version that
supports a different W for each observation.
x: has shape [obs, in_dims]
W: has shape [obs, out_dims, in_dims]
b: has shape [out_dims]
"""
if x.size()[1] != W.size()[-1]:
raise ValueError(
f'the in_dim of x ({x.size()[1]}) does not match in_dim of W ({W.size()[-1]})')
if x.size()[0] != W.size()[0]:
raise ValueError(
f'the obs of x ({x.size()[0]}) does not match obs of W ({W.size()[0]})')
obs = x.size()[0]
in_dims = x.size()[1]
out_dims = W.size()[1]
x = x.view(obs, 1, in_dims)
W = W.transpose(-2, -1)
if b is None:
return torch.bmm(x, W).view(obs, out_dims)
else:
b = b.view(1, 1, out_dims)
return torch.baddbmm(1, b, 1, x, W).view(obs, out_dims)
|
1cac8de9ad6b0941149f254a925da310f2c67fc6
| 46,428 |
def get_x_y(data):
"""Return the features (X) and target (y) for Ames Housing Dataset.
These values were chosen initially by investigating univariate comparisons with boxplots and scatter plots between features and Log Saleprice. Log transformed features are used in cases where a log transformation turned a right-skewed distribution into a normal distribution.
:param data: pd.DataFrame. Dataframe containing the Ames Housing Dataset.
:returns: (pd.DataFrame, pd.Series). The features and target variable
"""
features = [
"1stFlrSF_log",
"2ndFlrSF",
"TotalBsmtSF",
"GarageArea",
"GrLivArea_log",
"LotArea_log",
"LotFrontage_log",
"MasVnrArea",
"WoodDeckSF",
"BsmtFinSF1",
"BsmtUnfSF",
"EnclosedPorch",
"ScreenPorch",
"FullBath",
"TotRmsAbvGrd",
]
indicator_cols_dirty = set(data.filter(like="_")).difference(set(features))
indicator_cols_clean = list(
filter(lambda _: "_log" not in _, indicator_cols_dirty))
X = data[features + indicator_cols_clean]
y = data["SalePrice_log"]
return X, y
|
f772474de88823821244c3edb40dce2b1c49530b
| 352,301 |
from io import StringIO
import json
import hashlib
def digest_object(obj, encoding='utf-8'):
"""Convert any object to md5 hex digest through a ordonned and minified JSON data."""
io = StringIO()
json.dump(
obj, io, skipkeys=False, ensure_ascii=False,
check_circular=True, allow_nan=True, cls=None, indent=None,
separators=(',', ':'), default=None, sort_keys=True)
return hashlib.md5(io.getvalue().encode(encoding)).hexdigest()
|
77bcdfcc0a1a0782220a37cf29d2f86da236dd57
| 251,351 |
def get_labels(fields):
"""
Helper function to get .mat struct keys from `fields`
Parameters
----------
fields : dict_like
Returns
-------
labels : list
Struct keys
"""
labels = [k for k, v in sorted(fields.items(),
key=lambda x: x[-1][-1])]
return labels
|
d9fadbd05b2096c2a909125fbbc657f7e40cc90e
| 466,194 |
def nested_get(dictionary, keys):
"""Get nested fields in a dictionary"""
to_return = dictionary.get(keys[0], None)
for key in keys[1:]:
if to_return:
to_return = to_return.get(key, None)
else:
break
return to_return
|
a01f04aae670f118c97808ab27933799b0613c56
| 631,306 |
def collect_input_files_from_input_dictionary(input_file_dict):
"""Collect all @ids from the input file dictionary"""
all_inputs = []
for an_input_arg in input_file_dict:
if an_input_arg == 'additional_file_parameters':
continue
if isinstance(input_file_dict[an_input_arg], (list, tuple)):
all_inputs.extend(input_file_dict[an_input_arg])
else:
all_inputs.append(input_file_dict[an_input_arg])
return all_inputs
|
865ad6a453b2127a0a553f54a2cdac1d44d6dfc3
| 548,797 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.