content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from typing import AnyStr
import codecs
def auto_encode(string: AnyStr, encoding: str = "utf-8", errors: str = "strict") -> bytes:
"""Lookup a encoder and encode the string if it is bytes, else return it
untouched if it's already in bytes (for utf). If its an int, etc, it'll try
to wrap it in bytes for you.
:param string: The text to encode
:param encoding: The encoding-type to use; default is `utf-8`
:param errors: optional; pass `replace` or `namereplace` if you don't want
the default `strict` for how to process errors
:return: The encoded text
"""
encoder = codecs.getencoder(encoding=encoding)
if isinstance(string, bytes):
return string
elif isinstance(string, str):
return encoder(string)[0]
else:
return encoder(str(string))[0] | ff5854e843f718adaec728ac11083cfa22604e9e | 73,544 |
def merge_pages(key, pages):
"""Merge boto3 paginated results into single list.
Args:
key (str): the key used to gather results from each page.
pages (list): a list of pages of typically description dictionaries.
Returns:
list: a single flat list containing results of all pages.
"""
return [item for page in pages for item in page[key]] | 56ffe16f66e6a4d98c7e742d582f62a88d00a4e3 | 395,783 |
def convert_points_from_homogeneous(points):
"""Converts 3D points from the homogeneous coordinate
Args:
points (numpy.ndarray): The points to convert. It should a 2D array with
shape 4 x num_points.
Returns:
numpy.ndarray: Non-homogeneous points. A 2D array with shape
3 x num_points.
"""
return points[:3, :] | 96a21ae2d03f96f459e29978bb04ec779cbc5194 | 432,798 |
def parsemsg(s):
"""
Breaks a message from an IRC server into its prefix, command, and arguments.
"""
prefix = ''
trailing = []
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return prefix, command, args | 0c553a57c82547ea68d9978580d657c8d432f5d2 | 327,768 |
def get_elements_by(xmldoc, *args, **kwargs):
"""Searches xml for nodes with provided tag, attributes (args) and
attribute key-val pairs (kwargs)
Args:
xmldoc (xml.dom.minidom): minidom object (returned by minidom.parse())
tag (str, optional): name of tag to search for
*args (str, optional): attribute names to check element has
**kwargs (str=str, optional): attributeName=attributeValue to check
element has
Returns:
list: returns list of xml.Elements that have given tag, attribute(s)
and/or attribute key-val(s)
"""
if 'tag' in kwargs.keys():
tag = kwargs.pop('tag')
else:
tag = None
if tag is None: # Get all elements
tag = '*'
elements = xmldoc.getElementsByTagName(tag)
if len(args):
for a in args:
elements = [e for e in elements if e.hasAttribute(a)]
if len(kwargs):
for k in kwargs.keys():
elements = [e for e in elements if e.hasAttribute(k) and
e.attributes[k].value == kwargs[k]]
return elements | f9dc65fd48c5911430489adfef06b9943e562e83 | 397,072 |
def get_events(headers, data):
""" Build a list of dictionaries that have the detail and what that detail "contains".
Args:
headers (list): Headers for these type of events (Provides the order and expected output)
data (dict): Event data to lookup
Returns:
list: list of dictionary objects that maps the data to what is contained.
"""
# Map header names to what is contained in each.
contains_map = {
'source_addr': ['ip'],
'destination_ip': ['ip'],
'username': ['user name'],
'process_table_id': ['threatresponse process table id'],
'process_name': ['file path', 'file name'],
'process_id': ['pid'],
'process_command_line': ['file name'],
'file': ['file path'],
'domain': ['domain'],
'ImageLoaded': ['file path', 'file name'],
'Hashes': ['md5']
}
events = []
for event in data:
event_details = []
for head in headers:
data = event.get(head, None)
event_details.append({
'data': data,
'contains': contains_map.get(head, None) if data else None
})
events.append(event_details)
return events | 3b4ea233fa48deb193c26b74cd53352d6e5c875c | 392,467 |
def parse_plot_set(plot_set_string):
"""
Given one of the string arguments to the --plot-sets option, parse out a
data structure representing which conditions ought to be compared against
each other, and what those comparison plots/tables should be called.
The syntax of a plot set is [title:]condition[,condition[,condition...]].
The first condition is the comparison baseline, when applicable.
Returns a tuple of a plot set title, or None if unspecified, and a list of
condition names.
"""
colon_pos = plot_set_string.find(':')
if colon_pos != -1:
# Pull out the title before the colon
title = plot_set_string[0:colon_pos]
# And the rest of the specifier after it
plot_set_string = plot_set_string[colon_pos + 1:]
else:
# No title given
title = None
# Return the title and condition list tuple
return (title, plot_set_string.split(',')) | 1df83681aa3110dfd9302bd7918f15dfbfa497ab | 706,518 |
def check_for_security_updates_using_scheduled_task(api, configuration, api_version, api_exception):
""" Creates a scheduled task that checks for security updates.
The scheduled task runs immediately after it is created, and is deleted thereafter.
:param api: The Deep Security API modules.
:param configuration: Configuration object to pass to the api client.
:param api_version: The version of the API to use.
:param api_exception: The Deep Security API exception module.
:return: The ID of the scheduled task.
"""
# Set the name and task type
check_for_security_updates = api.ScheduledTask()
check_for_security_updates.name = "Check For Security Updates"
check_for_security_updates.type = "check-for-security-updates"
# Run when the scheduled task is created
check_for_security_updates.run_now = True
# Use a once-only recurrence
schedule_details = api.ScheduleDetails()
schedule_details.recurrence_type = 'none'
# Set the recurrence count to 1 so that the task is deleted after running
schedule_details.recurrence_count = 1
schedule_parameters = api.OnceOnlyScheduleParameters()
# The start time is not important because it is deleted after running
schedule_parameters.start_time = 0
schedule_details.once_only_schedule_parameters = schedule_parameters
check_for_security_updates.schedule_details = schedule_details
# Scan all computers
computer_filter = api.ComputerFilter()
computer_filter.type = "all-computers"
# Create the task parameters object and add the computer filter
task_parameters = api.CheckForSecurityUpdatesTaskParameters()
task_parameters.computer_filter = computer_filter
check_for_security_updates.check_for_security_updates_task_parameters = task_parameters
# Create the scheduled task on Deep Security Manager
scheduled_tasks_api = api.ScheduledTasksApi(api.ApiClient(configuration))
scheduled_task = scheduled_tasks_api.create_scheduled_task(check_for_security_updates, api_version)
return scheduled_task.id | 06b77568781624075fb8d8b4ad765d3e9b616bce | 196,087 |
def name_and_age(name, age):
"""Returns a string stating the person's age."""
if age >= 0:
return name + " is " + str(age) + " years old."
else:
return "Error: Invalid age" | 87e3320324261cf56c3dbb8150d01d95846c7454 | 360,582 |
def table_name(table, column):
"""Compute the table name
table (string) : the originate table's name
column : the column name
return : string
"""
return "{0}__{1}_agg".format(table, column) | 3d247d424f558d22559df2dbc9ac9e5e0fa2768f | 118,664 |
def to_hex_string(string: str) -> str:
"""Converts UTF-8 string into its hex representation
:param string: str
The string to convert to hex
:return:
Hex representation of the given string
"""
return string.encode('utf-8').hex() | 62b9b71af31bccdde136aa6d2dabbb2ee3df2ea7 | 25,169 |
from typing import List
def group_anagrams(strs: List[str]) -> List[List[str]]:
"""
LeetCode 49: Group Anagrams
Given an array of strings, group anagrams together.
"""
str_map = {}
res = []
for s in strs:
temp = ''.join(sorted(s))
if temp not in str_map:
str_map[temp] = len(res)
res.append([s])
else:
res[str_map[temp]].append(s)
return res | 847b448f194d798bbc5f201c2864cd1dd0d6c40c | 586,147 |
def generate_dot(dc):
"""
Generates a dot format graph of docker-compose depends_on between services
:param dict dc: Docker comppose configuration loaded as a python dict
:rtype: string
"""
lines = []
lines.append("digraph docker {")
for service_name, service in dc["services"].items():
lines.append(
f' "{service_name}" [label="{service_name}",shape=box,fillcolor="paleturquoise",style="filled,rounded"];'
)
for dep in service.get("depends_on", []):
lines.append(f' "{service_name}" -> "{dep}" [label = "depends_on"]')
lines.append("}")
return "\n".join(lines) | 6b9fcfbb939e779593b1e559065ab7daa1a4ba28 | 654,919 |
def set_search_csrf(session):
"""Extract the required CSRF token.
LinkedIn's search function requires a CSRF token equal to the JSESSIONID.
"""
csrf_token = session.cookies['JSESSIONID'].replace('"', '')
session.headers.update({'Csrf-Token': csrf_token})
return session | 1c506f6c7039b4908cadd772c2e7a289b7d18ae3 | 598,727 |
from typing import List
def str_in_list_non_case_sensitive(string: str, list_of_strings: List[str]) -> bool:
"""
>>> str_in_list_non_case_sensitive('aba',['abc','cde'])
False
>>> str_in_list_non_case_sensitive('aBa',['abc','Aba'])
True
"""
string = string.lower()
list_of_strings = [my_string.lower() for my_string in list_of_strings]
if string in list_of_strings:
return True
else:
return False | dd71eec9b75feb3b81ee4458a94a94b9a21cd398 | 403,757 |
def getattribute(objeto, name: str):
"""Returns the attribute matching passed name."""
# Get internal dict value matching name.
value = objeto.__dict__.get(name)
if not value:
# Raise AttributeError if attribute value not found.
return None
# Return attribute value.
return value | 8235c0c7440d0712d2978cfd72ca9529ba7b80b6 | 528,587 |
def quick_exponent_with_mod(base, power, modulo):
"""Compute quickly the exponent within a given modulo range.
Will apply a modulo with the specified base at every iteration of the
exponentiation algorithm, making sure the result is in the given range."""
# 'powers' will be a list of the base with powers of two applied, i.e.:
# with base==3, powers==[3, 3^2, 3^4, 3^8, 3^16, ...]
powers = [base]
# for each power of two
i = 2
while i <= power:
# compute base^(2^i) and add it to the list
powers.append((powers[-1] * powers[-1]) % modulo)
# next power of two
i *= 2
# list of booleans corresponding to which powers of two to include to make
# up the whole exponent
powers_to_include = list(bool(int(digit)) for digit in bin(power)[2:][::-1])
# accumulator for the product
accumulator = 1
# for each factor==base^(2^index)
for index, factor in enumerate(powers):
# if this power should be included
if powers_to_include[index]:
# multiply and apply modulo
accumulator *= factor
accumulator %= modulo
# return the product accumulator
return accumulator | 404373115f14b3d751c9bb7fb77b101aa7a7bd70 | 55,160 |
def get_schc_conf(config):
"""Return a dict of which values are taken from the config instance."""
dict_config = {}
for n in config.__dir__():
dict_config[n] = getattr(config, n)
return dict_config | 9f66d470dde02c0c1c2a12425bc976f03c5392ba | 193,795 |
def _calculate_pycls_momentum(
alpha: float, total_batch_size: int, max_epoch: int, update_period: int
):
"""pycls style momentum calculation which uses a relative model_ema to decouple momentum with
other training hyper-parameters e.g.
* training epochs
* interval to update ema
* batch sizes
Usually the alpha is a tiny positive floating number, e.g. 1e-4 or 1e-5,
with ``max_epoch=100``, ``total_batch_size=1024`` and ``update_period=32``, the ema
momentum should be 0.996723175, which has roughly same behavior to the default setting.
i.e. ``momentum=0.9999`` together with ``update_period=1``
"""
return max(0, 1 - alpha * (total_batch_size / max_epoch * update_period)) | 1928ac4d50e1e2f3acd5b187ef31639e309dfa21 | 518,279 |
def format_numeric_column(column):
"""
Format fn for simple numeric columns.
Returns column (zero-indexed) +1 to give 1-indexed label.
"""
return column + 1 | ff8393d4fb13d9742d3bc636d4720237596b26fe | 595,692 |
import itertools
def flatten(list2d):
"""
flatten nested lists into a single list
"""
return list(itertools.chain.from_iterable(list2d)) | 4df26e37e252c8ddc9bc6794ac66612aa005ef87 | 203,880 |
from typing import Iterable
from typing import Generator
def flatten(collection: Iterable[Iterable]) -> Generator:
"""Flatten list of lists in one plane list"""
return (item for sublist in collection for item in sublist) | 4ae959b5e8446209a3156bfe62e4c46ef4dc55db | 341,692 |
def gen_new_coordinates_from_change_in_coordinates(old_coordinates, change_in_coordinates):
"""Calculate new coordinates given coordinates(lat,lon) and change_in_coordinates(lat,lon)."""
return tuple(map(lambda x, y: x + y, old_coordinates, change_in_coordinates)) | cb12e51e7f7526038758bb1229c6fc8a815949e1 | 432,364 |
def _gt_get_all_nodes(self):
"""
Get all the nodes in this tree
"""
return self.tree.get_all_nodes() | 92793d01b0f4a0c347a7e5a717d3bf760b27288b | 59,960 |
import random
def choice(seq):
""" Choose a random element from a non-empty sequence.
(Based on the Python 2.x code for random.choice, and used for deterministic results across
platforms as Python 3.x changed the way random.choice worked.)
"""
return seq[int(random.random() * len(seq))] | d7fe586187bd873e62f2b25b43491ff2d28544ca | 325,701 |
import traceback
def _extract_stack(count=5):
"""
Helper function to extract the function stack.
:param count: Number of stack(last most) to extract.
:return: function names as string.
"""
result = ''
for t in traceback.extract_stack()[:count]:
result += '{0}->'.format(t[2])
return result | ca507781ad4a63fbc7d9f567135a1c797990272d | 279,964 |
def order_cols(df, cols):
"""Put columns in cols first, followed by rest of columns"""
rest = [col for col in df.columns if col not in cols]
df = df[cols + rest]
return df | 269e9ecaf976e89af0784daf7052cfd3fe494e27 | 575,667 |
def check_error(http):
"""
Checks for http errors (400 series) and returns True if an error exists.
Parameters
----------
http : addinfourl whose fp is a socket._fileobject
Returns
-------
has_error : bool
"""
err = http.code
if 400<= err < 500:
print("HTTP error {}. Make sure your PV exists.".format(err))
return True
return False | 20ba1426758e68b196708ec3260ff1f3a86c2820 | 684,573 |
def max_lt(seq, val):
"""
Return greatest item in seq for which item < val applies.
None is returned if seq was empty or all items in seq were >= val.
>>> max_lt([3, 6, 7, 11], 10)
7
>>> max_lt((5, 9, 12, 13), 12)
9
"""
idx = len(seq)-1
while idx >= 0:
if seq[idx] < val:
return seq[idx]
idx -= 1
return None | 0a6152a852c5e2c1421214d876ec6945ac42512b | 532,484 |
def f(value):
"""Format a float value to have 4 digits after the decimal point"""
return "{0:.4f}".format(value) | 1c4107a8f9c6a3b8e450a0c45647dc51816160ba | 195,667 |
def neighbour_coordinates(grid, point_coords):
"""Returns list of coordinates for neighbours of the given point,
within the bounds of the grid"""
grid_width = len(grid[0])
grid_height = len(grid)
point_row, point_col = point_coords
neighbour_coords = [
(point_row, point_col - 1),
(point_row, point_col + 1),
(point_row - 1, point_col),
(point_row + 1, point_col),
]
in_grid_neighbours = [
(row_n, col_n)
for row_n, col_n in neighbour_coords
if row_n >= 0 and row_n < grid_height and col_n >= 0 and col_n < grid_width
]
return in_grid_neighbours | 614e936c0992b734ec47c61405fda64b2a0ad348 | 604,418 |
def format_line(data, linestyle):
"""Formats a list of elements using the given line style"""
return linestyle.begin + linestyle.sep.join(data) + linestyle.end | 6a49a80f876ffe8a8f38e6e987051a0247858c6c | 19,148 |
from pathlib import Path
from typing import Optional
def resolve_absolute_offset(
dataset_path: Path, offset: str, target_path: Optional[Path] = None
) -> str:
"""
Expand a filename (offset) relative to the dataset.
>>> external_metadata_loc = Path('/tmp/target-metadata.yaml')
>>> resolve_absolute_offset(
... Path('/tmp/great_test_dataset'),
... 'band/my_great_band.jpg',
... external_metadata_loc,
... )
'/tmp/great_test_dataset/band/my_great_band.jpg'
>>> resolve_absolute_offset(
... Path('/tmp/great_test_dataset.tar.gz'),
... 'band/my_great_band.jpg',
... external_metadata_loc,
... )
'tar:/tmp/great_test_dataset.tar.gz!band/my_great_band.jpg'
>>> resolve_absolute_offset(
... Path('/tmp/great_test_dataset.tar'),
... 'band/my_great_band.jpg',
... )
'tar:/tmp/great_test_dataset.tar!band/my_great_band.jpg'
>>> resolve_absolute_offset(
... Path('/tmp/great_test_dataset.zip'),
... 'band/other/my_great_band.jpg',
... )
'zip:/tmp/great_test_dataset.zip!band/other/my_great_band.jpg'
>>> resolve_absolute_offset(
... Path('/tmp/MY_DATASET'),
... 'band/my_great_band.jpg',
... Path('/tmp/MY_DATASET/ga-metadata.yaml'),
... )
'band/my_great_band.jpg'
"""
dataset_path = dataset_path.absolute()
if target_path:
# If metadata is stored inside the dataset, keep paths relative.
if str(target_path.absolute()).startswith(str(dataset_path)):
return offset
# Bands are inside a tar file
if ".tar" in dataset_path.suffixes:
return "tar:{}!{}".format(dataset_path, offset)
elif ".zip" in dataset_path.suffixes:
return "zip:{}!{}".format(dataset_path, offset)
else:
return str(dataset_path / offset) | cbeb2dfb6fea41d5a995ada529d964357c4e9fb9 | 653,737 |
def find_single_number(arr: list[int]) -> int:
"""Finds the single number in a non-empty array of integers where every number appears exactly twice, except for one which appears exactly once.
Complexity:
Time: O(n)
Space: O(1)
Args:
arr: array of numbers with all but a single element appearing exactly twice,
the single element that does not appear two times appears exactly once
Returns: the single element that appears exactly once
Examples:
>>> find_single_number([1, 4, 2, 1, 3, 2, 3])
4
"""
"""ALGORITHM"""
## INITIALIZE VARS ##
single_number = 0
# XOR of all values in arr:
# all duplicate elements will cancel out
# leaving `single_number ^ 0` = `single_number`
for num in arr: # REDUCE
single_number ^= num
return single_number | a10f5f8cbb49968073c4a28d4eca883c128fb542 | 642,233 |
def num_active_calls(log, ad):
"""Get the count of current active calls.
Args:
log: Log object.
ad: Android Device Object.
Returns:
Count of current active calls.
"""
calls = ad.droid.telecomCallGetCallIds()
return len(calls) if calls else 0 | a6674df1e8e539478db6ab1a640fbce1cf0b6b4c | 701,482 |
def string_repr(source_string):
"""Return a nice visual representation of the given
SourceString and all its properties.
Any property that isn't populated (e.g. tags
or developer comment) will be omitted.
"""
return (
'[green]"{string}"[end]\n'
'{key}'
'{context}'
'{comment}'
'{charlimit}'
'{tags}'
' [high]occurrences:[end] [file]{occurrences}[end]\n'
).format(
string=source_string.string,
key=(
' [high]key:[end] "[yel]{}[end]"\n'.format(
source_string.key
)
),
context=(
' [high]context:[end] {}\n'.format(
u', '.join(source_string.context)
)
if source_string.context else ''
),
comment=(
' [high]comment:[end] {}\n'.format(
source_string.developer_comment
)
if source_string.developer_comment else ''
),
charlimit=(
' [high]character limit:[end] {}\n'.format(
source_string.character_limit
)
if source_string.character_limit else ''
),
tags=(
' [high]tags:[end] {}\n'.format(
u', '.join(source_string.tags)
)
if source_string.tags else ''
),
occurrences=u', '.join(source_string.occurrences),
) | b3261e9731445819173ab4a12e4f189a157660b8 | 232,209 |
def dice_similarity_coefficient(inter, union):
"""Computes the dice similarity coefficient.
Args:
inter (iterable): iterable of the intersections
union (iterable): iterable of the unions
"""
return 2 * sum(inter) / (sum(union) + sum(inter)) | ae58310528b7c24b7289cb3bcf76c72745c8bacc | 678,115 |
def get_local_node_mapping(tree, last_tree, spr):
"""
Determine the mapping between nodes in local trees across ARG.
A maps across local trees until it is broken (parent of recomb node).
This method assumes tree and last_tree share the same node naming
and do not contain intermediary nodes (i.e. single lineages).
"""
if last_tree is None:
# no mapping if last_tree is None
return None
else:
(rname, rtime), (cname, ctime) = spr
# assert ARG is SMC-style (no bubbles)
assert rname != cname
# recomb_parent is broken and does not map to anyone
recomb_parent = last_tree[rname].parents[0]
mapping = dict((node.name, node.name) for node in last_tree)
mapping[recomb_parent.name] = None
return mapping | b30a95bb0c23fc00005de9474da19e781bc0485e | 26,043 |
import math
def computePairN(att_name, att_value, data):
"""
Sub-function to compute number of pairs that input value > * used in Pairwise oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
data: dataframe that stored the data
Return: number of pairs of att_value > * in input data, number of pairs of att_value > * estimated using proportion, and proportion of group with att_value
"""
# input checked_atts includes names of checked sensitive attributes
total_N = len(data)
# get the unique value of this sensitive attribute
values_att = list (data[att_name].unique())
# for each value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name]==att_value].index+1
size_vi = len(position_lists_val)
count_vi_prefered_pairs = 0
for i in range(len(position_lists_val)):
cur_position = position_lists_val[i]
left_vi = size_vi - (i + 1)
count_vi_prefered_pairs = count_vi_prefered_pairs + (total_N - cur_position - left_vi)
# compute estimated fair pairs
total_pairs_vi = size_vi*(total_N-size_vi)
estimated_vi_pair = math.ceil((size_vi / total_N) * total_pairs_vi)
return int(count_vi_prefered_pairs),int(estimated_vi_pair),int(size_vi) | 495fdc105ab0aebf39464dffc50aa1a641973394 | 117,806 |
import json
def load_mapping(filename):
"""
Load mapping dict from file.
:param filename: mapping file name
:return: dictionary of key to one-hot encoding; number of keys
"""
f = open(filename)
mapping = json.load(f)
return mapping, len(mapping) | e87b9c968bf12e9eb6a8480e56b06256f4496bef | 269,694 |
def pvct(pv: float, compr_total: float):
""" Pore Volume times Total Compressibility
Parameters
---
pv : float
pore volume
compr_total : float
total compressibility
Return
pvct : float
pore volume total compressibility
"""
return pv*compr_total | 31c84e4dc94cb2f1c78c9e26ba02cec4c81f0800 | 38,022 |
def s_and(*args):
"""Logical and."""
result = True
for i in args:
result = result and i
if not result:
break
return result | 022f3a14e0430210a636828daf2056c653107c58 | 107,088 |
from typing import OrderedDict
def dictify(od):
"""Recursively replace OrderedDict with dict"""
if isinstance(od, OrderedDict):
return dict((k, dictify(v)) for k, v in od.items())
else:
return od | 886fc727dba02f9029945d5efa947d7573c3df19 | 624,913 |
def simple_format(num):
"""Takes a number and returns the simplest format for the number removing
all trailing 0's and the '.' if it's the trailing character.
>>> simple_format(123)
'123'
>>> simple_format(123.0)
'123'
>>> simple_format(123.01100)
'123.011'
"""
return ('%f' % num).rstrip('0').rstrip('.') | 1c8b27aa2f2d5059babe440a40fa1aeb067a59ec | 446,350 |
from typing import OrderedDict
def dict_sort(in_dict: dict)-> OrderedDict:
"""
sort dict by values, 给字典排序(依据值大小)
Args:
in_dict: dict, eg. {"游戏:132, "旅游":32}
Returns:
OrderedDict
"""
in_dict_sort = sorted(in_dict.items(), key=lambda x:x[1], reverse=True)
return OrderedDict(in_dict_sort) | b32187f78861c2f04c377030ecc45b1892ace708 | 674,947 |
import math
def mutual_information(co_oc, oi, oj, n):
"""
:param co_oc: Number of co occurrences of the terms oi and oj in the corpus
:param oi: Number of occurrences of the term oi in the corpus
:param oj: Number of occurrences of the term oi in the corpus
:param n: Total number of words in the corpus
:return:
"""
e = (oi * oj)/n
return math.log2(co_oc/e) | 76c27295c7e757282573eab71f2bb7cfd3df74cb | 2,906 |
def apply_rot_to_vec(rot, vec, unstack=False):
"""Multiply rotation matrix by a vector."""
if unstack:
x, y, z = [vec[:, i] for i in range(3)]
else:
x, y, z = vec
return [rot[0][0] * x + rot[0][1] * y + rot[0][2] * z,
rot[1][0] * x + rot[1][1] * y + rot[1][2] * z,
rot[2][0] * x + rot[2][1] * y + rot[2][2] * z] | 6e119bbac62f1ced346f6ac46abd69058aedf698 | 557,893 |
def get_cutoff_class(sample):
"""
Identify which set of cutoffs to use when classifying a given variant call
Arguments
---------
sample : namedtuple('Metrics')
Row from variant metrics table
Returns
-------
(source, svtype) : (str, str)
Key to cutoffs table
"""
if sample.sources == 'depth' and sample.svtype == 'DEL':
return ('depth', 'DEL')
elif sample.sources == 'depth' and sample.svtype == 'DUP':
return ('depth', 'DUP')
elif (sample.svtype in 'DEL DUP'.split() and sample.svsize >= 1000):
return ('pesr', 'CNV')
else:
return ('pesr', 'SV') | f4b6f37f912c638bf9fd70644658a699a4c051b6 | 564,072 |
import re
def original_image_extender(pipeline_index,
finder_image_urls,
extender_image_urls=[],
*args, **kwargs):
"""
Example:
http://media-cache-ec0.pinimg.com/70x/50/9b/bd/509bbd5c6543d473bc2b49befe75f4c6.jpg
http://media-cache-ec0.pinimg.com/236x/50/9b/bd/509bbd5c6543d473bc2b49befe75f4c6.jpg
http://media-cache-ec0.pinimg.com/736x/50/9b/bd/509bbd5c6543d473bc2b49befe75f4c6.jpg
to
http://media-cache-ec0.pinimg.com/originals/50/9b/bd/509bbd5c6543d473bc2b49befe75f4c6.jpg
"""
now_extender_image_urls = []
search_re = re.compile(r'.com/\d+x/', re.IGNORECASE)
for image_url in finder_image_urls:
if 'pinimg.com/' in image_url.lower():
if search_re.search(image_url):
extender_image_url = search_re.sub('.com/originals/', image_url, count=1)
now_extender_image_urls.append(extender_image_url)
output = {}
output['extender_image_urls'] = extender_image_urls + now_extender_image_urls
return output | 19c4c2f61cf027acbb5ffc75cd72bd2cc802cfab | 650,595 |
def read_to_whitespace(fptr, track):
"""
Read, skipping over white space. Then read all non-whitespace until one
character of white space is consumed. Return the non-whitespace that was
read. If the end of the file is encountered, return any non-whitespace if
available, or an empty bytes string.
Enter: fptr: file-like object to read from.
track: a dictionary used to hold temporary values
Exit: val: non-whitespace string
"""
if 'data' in track:
if track['next'] < len(track['data']) - 1:
out = track['data'][track['next']]
track['next'] += 1
return out
data = track['data'][-1][:-1]
else:
data = b''
track['data'] = (data + fptr.read(1024 * 1024) + b'X').split()
out = track['data'][0]
track['next'] = 1
return out | 80d246f578af932830169306816acb5f0d6bfffb | 670,190 |
def field_add(field, vector):
""" Add the vector to the field.
Parameters
----------
field: array (x, y, z, 3)
an image of vectors.
vector: array (3, )
the vector that will be added to the field.
Returns
-------
field: array (x, y, z, 3)
the incremented image of vectors.
"""
field[..., 0] += vector[0]
field[..., 1] += vector[1]
field[..., 2] += vector[2]
return field | 06387b1c72b97a926c3e72a83f3c033921d013d7 | 97,439 |
from typing import Tuple
from typing import Mapping
from typing import Counter
def create_mappings(dataset_path: str) -> Tuple[Mapping, Mapping]:
"""Creates separate mappings to indices for entities and relations."""
# counters to have entities/relations sorted from most frequent
entity_counter = Counter()
relation_counter = Counter()
with open(dataset_path, "r") as f:
for line in f:
# -1 to remove newline sign
head, relation, tail = line[:-1].split("\t")
entity_counter.update([head, tail])
relation_counter.update([relation])
entity2id = {}
relation2id = {}
for idx, (mid, _) in enumerate(entity_counter.most_common()):
entity2id[mid] = idx
for idx, (relation, _) in enumerate(relation_counter.most_common()):
relation2id[relation] = idx
return entity2id, relation2id | e1724c485a4cef659d27f75b6e597892f54201af | 192,694 |
import json
def response(message, status_code):
"""Returns a dictionary object for an API Gateway Lambda integration
response.
:param message: Message for JSON body of response
:type message: str or dict
:param int status_code: HTTP status code of response
:rtype: dict
"""
if isinstance(message, str):
message = {'message': message}
return {
'isBase64Encoded': False,
'statusCode': status_code,
'body': json.dumps(message),
'headers': {'Content-Type': 'application/json'}
} | 4928c22b409e7d81f6b287758eb36e7583ba029d | 494,603 |
import math
def _estimate_group_size(encoding_size):
"""
Given an encoding size (e.g. 128 B), estimate the number of encodings that will likely
be under 100MiB in data including blocks. Note this is hopefully very conservative
in estimating the average number of blocks each record is in.
"""
network_transaction_size = 104857600 # 100MiB
blocks_per_record_estimate = 50
return math.ceil(network_transaction_size / ((blocks_per_record_estimate * 64) + (encoding_size + 4))) | 7983795c0c6f21212882c888649596d028c6cecf | 461,003 |
def nrow(data):
"""
Return the amount of rows in `data`.
This is a useful shorthand for `data.nrow` in contexts where you don't have
direct access to the data frame in question, e.g. in group-by-aggregate
>>> data = di.DataFrame.read_csv("data/listings.csv")
>>> data.group_by("hood").aggregate(n=di.nrow)
"""
return data.nrow | 6246110df7ff356183258d5232e369ec632c1a1a | 336,143 |
def find_ballot(ballot_num, unique_ballot_manifest):
"""
Find ballot among all the batches
Input
-----
ballot_num : int
a ballot number that was sampled
unique_ballot_manifest : dict
ballot manifest with unique IDs across batches
Returns
-------
tuple : (original_ballot_label, batch_label, which_ballot_in_batch)
"""
for batch, ballots in unique_ballot_manifest.items():
if ballot_num in ballots:
position = ballots.index(ballot_num) + 1
return (batch, position)
print("Ballot %i not found" % ballot_num)
return None | f7edb4e8d553d30684d608a44c58a675b10d7e9b | 124,722 |
def mass_aspect(self, truncate_ell=max):
"""Compute the Bondi mass aspect of the AsymptoticBondiData.
The Bondi mass aspect is given by
M = -ℜ{ψ₂ + σ ∂ₜσ̄}
Note that the last term is a product between two fields. If, for example, these both have
ell_max=8, then their full product would have ell_max=16, meaning that we would go from
tracking 81 modes to 289. This shows that deciding how to truncate the output ell is
important, which is why this function has the extra argument that it does.
Parameters
==========
truncate_ell: int, or callable [defaults to `max`]
Determines how the ell_max value of the output is determined. If an integer is passed,
each term in the output is truncated to have at most that ell_max. (In particular,
terms that will not be used in the output are simply not computed, without incurring any
errors due to aliasing.) If a callable is passed, it is passed on to the
spherical_functions.Modes.multiply method. See that function's docstring for details.
The default behavior will result in the output having ell_max equal to the largest of
any of the individual Modes objects in the equation for M above -- but not the
product.
"""
if callable(truncate_ell):
return -(self.psi2 + self.sigma.multiply(self.sigma.bar.dot, truncator=truncate_ell)).real
elif truncate_ell:
return -(
self.psi2.truncate_ell(truncate_ell)
+ self.sigma.multiply(self.sigma.bar.dot, truncator=lambda tup: truncate_ell)
).real
else:
return -(self.psi2 + self.sigma * self.sigma.bar.dot).real | 828a70adf812111ccd232a53e4947231e4da87d8 | 427,315 |
def itemize(x):
"""
Extract item from a list/tuple with only one item.
>>> itemize([3])
3
>>> itemize([3, 2, 1])
[3, 2, 1]
>>> itemize([])
[]
:param list|tuple x: An indexable collection
:return: Return item in collection if there is only one, else
returns the collection.
:rtype: object|list|tuple
"""
return x[0] if len(x) == 1 else x | b88473b3ee5e417b9dd50bb066a08998790c09df | 544,174 |
def aggregate(collection, pipeline):
"""Executes an aggregation on a collection.
Args:
collection: a `pymongo.collection.Collection` or
`motor.motor_tornado.MotorCollection`
pipeline: a MongoDB aggregation pipeline
Returns:
a `pymongo.command_cursor.CommandCursor` or
`motor.motor_tornado.MotorCommandCursor`
"""
return collection.aggregate(pipeline, allowDiskUse=True) | 03ea889ea23fb81c6a329ee270df2ac253e90d69 | 5,002 |
def invert_dictionary(aDict):
"""Transforms a dict so that keys become values and values become keys"""
return {v: k for k, v in aDict.items()} | a4ec169b2f8d0857d4b22191c8823b17a03fc954 | 631,073 |
def phie_gaymard(phid, phin):
"""Estimate the effective porosity using Gaymard-Poupon [1]_ method.
Parameters
----------
phid : array_like
Density porosity (porosity calculated using density log)
phin : int, float
Neutron porosity (porosity calculated using neutron log)
Returns
-------
phie : array_like
Effective porosity using Gaymard-Poupon method
References
----------
.. [1] Gaymard, R., and A. Poupon. "Response Of Neutron And Formation
Density Logs In Hydrocarbon Bearing Formations." The Log Analyst 9 (1968).
"""
phie = (0.5 * (phid*phid + phin*phin)) ** 0.5
return phie | 57c59836310baa180b42b265112dc283774759d8 | 455,779 |
def as_flag(b):
"""Return bool b as a shell script flag '1' or '0'"""
if b:
return '1'
return '0' | 3caf3136b4845b7c6dec2e704744314101215399 | 268,543 |
from typing import OrderedDict
import random
def _downsample_dict(full_dict, num_samples):
"""Returns a down-sampled dictionary with a given number of samples in
each value list.
Args:
full_dict: A dictionary with lists as values to down-sample.
num_samples: The number of elements in each value list from full_dict
to keep.
Returns:
A dictionary with value lengths of num_samples.
"""
downsampled_dict = OrderedDict()
for key, value in full_dict.items():
if len(value) > num_samples:
downsampled_dict[key] = random.sample(value, k=num_samples)
else:
downsampled_dict[key] = value
return downsampled_dict | 4072a0f3cd1755b65a9070089d618992f76a582f | 194,810 |
from typing import Union
import copy
import collections
def dls_sort(orig: Union[dict, list, set]) -> Union[dict, list]:
"""
Given a nested dictionary, set or list, return a sorted version. Note that lists aren't sorted (they merely retain
their original order). Original data is unchanged.
:param orig: original dict or list (may or may not be sorted)
:return: sorted version of orig (note that we never return sets since they are unordered)
"""
orig = copy.deepcopy(orig)
if isinstance(orig, list):
return [dls_sort(e) for e in orig]
elif isinstance(orig, set):
# have to sort sets to be consistent since they have no order
return sorted(list(orig))
elif isinstance(orig, dict) or isinstance(orig, collections.OrderedDict):
sorted_dict = collections.OrderedDict()
for k in sorted(orig):
sorted_dict[k] = dls_sort(orig[k])
return sorted_dict
return orig | 5d77cf35b35d5f297ea16edfc472d87ce9030076 | 391,113 |
def get_util2d_shape_for_layer(model, layer=0):
"""
Define nrow and ncol for array (Util2d) shape of a given layer in
structured and/or unstructured models.
Parameters
----------
model : model object
model for which Util2d shape is sought.
layer : int
layer (base 0) for which Util2d shape is sought.
Returns
---------
(nrow,ncol) : tuple of ints
util2d shape for the given layer
"""
nr, nc, _, _ = model.get_nrow_ncol_nlay_nper()
if nr is None: # unstructured
nrow = 1
ncol = nc[layer]
else: # structured
nrow = nr
ncol = nc
return (nrow, ncol) | a56e00698b6d498800b895e83c84bed5b2ccc09d | 686,234 |
import re
def remove_pii_phone(text: str, replacement: str = '--PHONE--') -> str:
"""
Pass a string and return the text string with phone numbers removed
Parameters
----------
text : str
The text to replace phone numbers in
replacement : str
The text to replace phone numbers with
Returns
-------
replacement_text: str
A text string with the phone numbers removed and
replaced with specified text
Example
-------
text_with_phone = "Give me a call at 109-876-5432"
print(remove_pii_phone(text_with_phone))
> Give me a call at --PHONE--
"""
phone_regex_pattern = re.compile(
# core components of a phone number
r"(?:^|(?<=[^\w)]))(\+?1[ .-]?)?(\(?\d{3}\)?[ .-]?)?(\d{3}[ .-]?\d{4})"
# extensions, etc.
r"(\s?(?:ext\.?|[#x-])\s?\d{2,6})?(?:$|(?=\W))",
flags=re.UNICODE | re.IGNORECASE)
replacement_text = re.sub(phone_regex_pattern, replacement, text)
return replacement_text | 148b0564d2bcc0cae8de8d8f4e4c50fcdd221fc0 | 369,993 |
def normURLPath(path):
"""
Normalise the URL path by resolving segments of '.' and '..'.
"""
segs = []
pathSegs = path.split('/')
for seg in pathSegs:
if seg == '.':
pass
elif seg == '..':
if segs:
segs.pop()
else:
segs.append(seg)
if pathSegs[-1:] in (['.'],['..']):
segs.append('')
return '/'.join(segs) | 78abcc6c1cce17e5678baac10af96db508b3d57a | 647,490 |
def requests_length(requests):
"""Total number of pageviews across all sessions."""
return len([r for r in requests if 'is_pageview' in r and r['is_pageview'] == 'true']) | 8980ebdf8fe71aa5ffcf3bcc54920d1b0c83cf65 | 53,410 |
import io
def write_pxi(filename, definitions):
"""
Write a cython include file (.pxi), `filename`, with the definitions in the
`definitions` mapping.
"""
with io.open(filename, mode='w', encoding='utf-8') as pxi_file:
for name, val in definitions.items():
pxi_file.write(u"DEF {name} = {val}\n".format(name=name, val=val))
return filename | 0b2c892afc3d639b0d48169bfbe35917c5494110 | 265,485 |
def init_tdl(model, inputs, labels, device):
"""
Initialize TDLs using input and label data.
:param model: NARX model
:param inputs: inputs
:param labels: labels
:param device: device type
:return: input, labels, input TDL, output TDL
"""
max_delay_size = max(model.input_delay_size, model.output_delay_size)
# input tap-delay
itdl = inputs[:, max_delay_size - model.input_delay_size: max_delay_size, :].to(device)
# output tap-delay
otdl = labels[:, max_delay_size - model.output_delay_size: max_delay_size, :].to(device)
# input and label data
input = inputs[:, max_delay_size:, :]
label = labels[:, max_delay_size:, :]
return input, label, itdl, otdl | fcec1fd661e8fe2ccf8cab76ae1399a0fe2b0b87 | 159,591 |
from typing import Any
def is_num(num: Any) -> bool:
"""Function that tests if object can be converted to number
A function that takes any input and detects if it is a number by
attempting to convert the input to a float. This function catches
convertable digit string cases.
Source:
https://stackoverflow.com/questions/354038
:param num: object data to determine if it is conceivably an number
:type num: Any
:return: a boolean determination if the input is a number
:rtype: bool
"""
if isinstance(num, complex):
if num.imag == 0:
num = num.real
else:
return False
elif isinstance(num, bool):
return False
try:
float(num)
return True
except ValueError:
return False | d74a40b1cf5c2dee6b8001b7fc937a546ea55bc0 | 593,359 |
def count_crickMAX(args):
"""Count the number of sequences in the Crick fasta file"""
with open(args.crick, 'r') as crick_in:
count = 0
for line in crick_in:
if line.startswith('>'):
count +=1
return count | c4937613b917107f74aa6658719d3e6e243eebd2 | 32,289 |
def _get_nn_layers(spec):
"""
Returns a list of neural network layers if the model contains any.
Parameters
----------
spec: Model_pb
A model protobuf specification.
Returns
-------
[NN layer]
list of all layers (including layers from elements of a pipeline
"""
layers = []
if spec.WhichOneof('Type') == 'pipeline':
layers = []
for model_spec in spec.pipeline.models:
if not layers:
return _get_nn_layers(model_spec)
else:
layers.extend(_get_nn_layers(model_spec))
elif spec.WhichOneof('Type') in ['pipelineClassifier',
'pipelineRegressor']:
layers = []
for model_spec in spec.pipeline.models:
if not layers:
return _get_nn_layers(model_spec)
else:
layers.extend(_get_nn_layers(model_spec))
elif spec.neuralNetwork.layers:
layers = spec.neuralNetwork.layers
elif spec.neuralNetworkClassifier.layers:
layers = spec.neuralNetworkClassifier.layers
elif spec.neuralNetworkRegressor.layers:
layers = spec.neuralNetworkRegressor.layers
return layers | b992a9fab6b5e042fece06ae384f784f7ec38719 | 615,149 |
def get_func_id(func):
"""Get a quasi-identifier of a given function."""
return func.__qualname__ | 8aba6fdb53d5c1ba89e0e4443b919070516224fa | 641,397 |
from typing import Dict
def decode_internal_tx(raw_internal_tx: bytes) -> Dict:
"""
Decodes bytes representation of an internal transaction into a dictionary.
Args:
raw_internal_tx: Bytes representing an internal transaction.
Returns:
Internal transaction in dictionary form.
"""
internal_tx_items = raw_internal_tx.decode().split('\0')
internal_tx = {}
internal_tx['from'] = internal_tx_items[0]
internal_tx['to'] = internal_tx_items[1]
internal_tx['value'] = internal_tx_items[2]
internal_tx['input'] = internal_tx_items[3]
internal_tx['output'] = internal_tx_items[4]
internal_tx['traceType'] = internal_tx_items[5]
internal_tx['callType'] = internal_tx_items[6]
internal_tx['rewardType'] = internal_tx_items[7]
internal_tx['gas'] = internal_tx_items[8]
internal_tx['gasUsed'] = internal_tx_items[9]
internal_tx['transactionHash'] = internal_tx_items[10]
internal_tx['timestamp'] = internal_tx_items[11]
internal_tx['error'] = internal_tx_items[12]
return internal_tx | 44ab2b062d121f025a5bcde8ce48a49c576a758e | 457,935 |
def _ros_plot_pos(row, censorship, cohn):
"""
ROS-specific plotting positions.
Computes the plotting position for an observation based on its rank,
censorship status, and detection limit index.
Parameters
----------
row : pandas.Series or dict-like
Full observation (row) from a censored dataset. Requires a
'rank', 'detection_limit', and `censorship` column.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : pandas.DataFrame
Dataframe of Cohn numbers.
Returns
-------
plotting_position : float
See also
--------
cohn_numbers
"""
DL_index = row['det_limit_index']
rank = row['rank']
censored = row[censorship]
dl_1 = cohn.iloc[DL_index]
dl_2 = cohn.iloc[DL_index + 1]
if censored:
return (1 - dl_1['prob_exceedance']) * rank / (dl_1['ncen_equal']+1)
else:
return (1 - dl_1['prob_exceedance']) + (dl_1['prob_exceedance'] - dl_2['prob_exceedance']) * \
rank / (dl_1['nuncen_above']+1) | 5111aa3223e1a2111c106b8711edc54cee9f878f | 352,995 |
import re
def is_requirements_txt(filename) -> bool:
"""Check if filename is of requirements.txt format
:param filename: filename
:return: True if filename is in requirements.txt format
"""
return True if re.match(r'.*requirements.*\.txt$', filename) else False | 01a67f29b8da728ef21b17844a5eb2005b1e5b23 | 214,561 |
from typing import Dict
from typing import List
import random
def get_shared_chores(chores: Dict, chore_group: List) -> List:
"""
Find chores that are shared and should be assigned to a pool of several users.
:param chores: All chores
:param chore_group: List of the categories that should be shared
:return: A randomized list of chores to be assigned
"""
shared_chores = []
for group in chores:
if group in chore_group:
for chore in chores[group]:
shared_chores.append((chore, group))
return random.sample(shared_chores, len(shared_chores)) | c45c7c24517922082e9740dc31da2e27e083d46b | 501,687 |
def A_tot_wkend_days_init(M, i, t, e):
"""
Initialize number of weekend days worked in the i'th pattern, tour type t,
and weekend type e.
:param M: Model
:param i: weekend worked pattern
:param t: tour type
:param e: weekend type (1=Sun and Sat, 2=Fri and Sat)
:return: number of weekend days worked over entire schedule cycle (int)
"""
if e == 1:
return sum(M.A_wkend_days[i, 1, w, t, e] + M.A_wkend_days[i, 7, w, t, e] for w in M.WEEKS)
else:
return sum(M.A_wkend_days[i, 6, w, t, e] + M.A_wkend_days[i, 7, w, t, e] for w in M.WEEKS) | f1ee32a30bd4a89c30148c3d0cdf95da6c5836ed | 620,954 |
def exc_info(space):
"""Return the (type, value, traceback) of the most recent exception
caught by an except clause in the current stack frame or in an older stack
frame."""
operror = space.getexecutioncontext().sys_exc_info()
if operror is None:
return space.newtuple([space.w_None,space.w_None,space.w_None])
else:
return space.newtuple([operror.w_type, operror.w_value,
space.wrap(operror.application_traceback)]) | 39a1515476267863832434ca738241bf40fa8812 | 282,290 |
def top_n_filter(peak_set, n=40):
"""Keep only the top `n` most abundant peaks in the spectrum.
Parameters
----------
peak_set : :class:`Iterable` of :class:`~.PeakBase`
The peaks to filter
n : int, optional
The maximum number of peaks to retain, by default 40
Returns
-------
list
"""
reduced_peaks = sorted(peak_set, key=lambda x: x.intensity, reverse=True)[:n]
reduced_peaks.sort(key=lambda x: x.mz)
return reduced_peaks | fc12934ccfc6163903f6a8286a3dc9f5219ad484 | 487,481 |
def get_basic_authorization(request):
"""Gets basic auth username and password
Parameters
----------
request : A django request obj
Returns
-------
auth_username : base64 decoded
auth_password : base64 decoded
"""
http_auth = request.META.get('HTTP_AUTHORIZATION')
if not http_auth:
raise ValueError('http authorization missing')
(auth_method, auth) = http_auth.split(None, 1)
if auth_method.lower() != "basic":
raise ValueError('bad http authorization method')
try:
auth_username, auth_password = auth.strip().decode('base64').split(':')
except:
raise ValueError('bad authorization encoding')
return auth_username, auth_password | 3ea69f5b8a296bce9e03397b279d88ae577f795b | 423,394 |
def all_indices(s, substr):
"""
Find all indices in ``s`` where ``substr`` begins.
Returns results in list.
"""
indices = []
i = s.find(substr)
while i >= 0:
indices.append(i)
i = s.find(substr, i+1)
return indices | 93d199f07f923ff89c87a642edb6ec3cfb99dfbb | 552,927 |
def ints_to_rgb(r, g=None, b=None):
"""Convert ints in the [0...255] range to the standard [0...1] range.
Parameters:
:r:
The Red component value [0...255]
:g:
The Green component value [0...255]
:b:
The Blue component value [0...255]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % ints_to_rgb((255, 128, 0))
'(1, 0.501961, 0)'
"""
if type(r) in [list,tuple]:
r, g, b = r
return tuple(float(v) / 255.0 for v in [r, g, b]) | d24e336f13beb3538428724ce56532f1efb8fb6f | 618,197 |
def in_static_dir(filepath, static_dirs):
"""See if filepath is contained within a directory contained in
static_dirs."""
for directory in static_dirs:
if filepath.startswith(directory):
return True
else:
return False | 4f5c7357cefb6de3a3a5cd53fc8e154b12e7c418 | 490,857 |
def _create_query_dict(query_text):
"""
Create a dictionary with query key:value definitions
query_text is a comma delimited key:value sequence
"""
query_dict = dict()
if query_text:
for arg_value_str in query_text.split(','):
if ':' in arg_value_str:
arg_value_list = arg_value_str.split(':')
query_dict[arg_value_list[0].strip()] = arg_value_list[1].strip()
return query_dict | 2e4478bdf110911d4ca9fcc6c409aab3504a0b8a | 8,510 |
import binascii
def shinglify(clean_text):
"""
Generates list of 'shingles': crc sums of word subsequences of default length
:param clean_text: cleaned text to calculate shingles sequence.
:return: shingles sequence
"""
shingle_length = 3
result = []
for idx in range(len(clean_text) - shingle_length + 1):
result.append(
binascii.crc32(
bytes(
u' '.join(
[word for word in clean_text[idx:idx+shingle_length]]
),
'utf-8'
)
)
)
return result | aedd17f3eed48aaf1cbeb0bbe34103d594d6428b | 669,687 |
def layer_sizes(X, Y):
"""
设置网络结构
Args:
X: 输入的数据
Y: 输出值
Return:
n_x: 输入层节点数
n_h: 隐藏层节点数
n_y: 输出层节点数
"""
n_x = X.shape[0] # 输入层大小(节点数)
n_h = 4
n_y = Y.shape[0] # 输出层大小(节点数)
return (n_x, n_h, n_y) | 62f6d0f3a1cb772091fce3657d3f158bc86bd5a1 | 262,377 |
def label_continues_or_empty(seq, previous_label):
"""Returns True if seq continues previous_label or 'O' (that is no new
tags starting)."""
seq = list(seq) # copy
if previous_label.startswith('B-'):
continuation_label = 'I-' + previous_label[2:]
elif previous_label.startswith('I-'):
continuation_label = previous_label
else:
continuation_label = 'O'
while seq and seq[0] == continuation_label:
seq.pop(0)
return all(x == 'O' for x in seq) | d3a3c198ff8ba1a6e9be60920ab0cff380f9a5e0 | 278,215 |
import socket
def get_host_name() -> str:
"""Return the name of the current host"""
return socket.gethostname() | 70958a59576a20da2fc053f8ff1e8d7f81bf1ba4 | 143,575 |
def binary_scores_from_counts(ntp, nfp, ntn, nfn):
"""
Precision, recall, and F1 scores from counts of TP, FP, TN, FN.
Example usage:
p, r, f1 = binary_scores_from_counts(*map(len, error_sets))
"""
prec = ntp / float(ntp + nfp) if ntp + nfp > 0 else 0.0
rec = ntp / float(ntp + nfn) if ntp + nfn > 0 else 0.0
f1 = (2 * prec * rec) / (prec + rec) if prec + rec > 0 else 0.0
return prec, rec, f1 | cbdff276e5ee765c5154f2632b5a2edb35b72984 | 329,272 |
def plural(word: str, i: int) -> str:
"""Returns either "word" or "words" based on the number."""
return f"{word}{'' if i == 1 else 's'}" | 14efa098cc0a3af809194a5a88b806e6fc9c7b1a | 325,081 |
import random
import string
def rndstr(size=16, alphabet=""):
"""
Returns a string of random ascii characters or digits
:param size: The length of the string
:return: string
"""
rng = random.SystemRandom()
if not alphabet:
alphabet = string.ascii_letters[0:52] + string.digits
return type(alphabet)().join(rng.choice(alphabet) for _ in range(size)) | 03325a4ff88300d42dcbd7b1b58ba001e3581b5a | 392,941 |
def build_table(x,
perceiver,
cache,
cache_emb,
topk,
return_images=False,
index_dir=None):
"""
Maps each image to a linearized row in a table. Each entry in a row
is delimited by "|". Each entry comes from the topk results in the cache
as determined by cosine similarity in CLIP space.
"""
table = []
x = perceiver.encode_image(x).float()
x /= x.norm(dim=-1, keepdim=True)
if index_dir:
indices = cache_emb.search(x.cpu().numpy(), topk)[1]
for idx in range(len(x)):
row = ''
results = [cache[i] for i in indices[idx]]
for r in results:
row += r + ' | '
table.append(row)
else:
#print(x.shape)
#print(cache_emb.shape)
ipt = 100.0 * x.float() @ cache_emb.T.float()
similarity = (ipt).softmax(dim=-1)
for idx in range(len(x)):
row = ''
values, indices = similarity[idx].topk(topk)
for _, index in zip(values, indices):
row += cache[index] + ' | '
table.append(row)
if return_images:
return table, x
return table | 5822dbe6f54a0d582bc19785cf9b1d49a4ac5eda | 677,960 |
def FindRecentBuilds(ab_client, branch, target,
build_type='submitted',
build_attempt_status=None,
build_successful=None):
"""Queries for the latest build_ids from androidbuild.
Args:
ab_client: The androidbuild API client.
branch: The name of the git branch.
target: The name of the build target.
build_type: (Optional) The type of the build, defaults to 'submitted'.
build_attempt_status: (Optional) Status of attempt, use 'complete' to look
for completed builds only.
build_successful: (Optional) Whether to only return successful builds.
Returns:
A list of numeric build_ids, sorted from most recent to oldest (in reverse
numerical order.)
"""
kwargs = {
'branch': branch,
'target': target,
}
if build_type is not None:
kwargs['buildType'] = build_type
if build_attempt_status is not None:
kwargs['buildAttemptStatus'] = build_attempt_status
if build_successful is not None:
kwargs['successful'] = build_successful
builds = ab_client.build().list(**kwargs).execute().get('builds')
# Extract the build_ids, convert to int, arrange newest to oldest.
return sorted((int(build['buildId']) for build in builds), reverse=True) | 089dcb868be50d60dbebe912fe8f68dc73a28c40 | 102,366 |
import torch
def monte_carlo_pose_loss(pose_sample_logweights, cost_target):
"""
Args:
pose_sample_logweights: Shape (mc_samples, num_obj)
cost_target: Shape (num_obj, )
Returns:
Tensor: Shape (num_obj, )
"""
loss_tgt = cost_target
loss_pred = torch.logsumexp(pose_sample_logweights, dim=0) # (num_obj, )
loss_pose = loss_tgt + loss_pred # (num_obj, )
loss_pose[loss_pose.isnan()] = 0
return loss_pose | 93f599ecc04178fe982d88ce39d0bd846534e4fb | 152,970 |
def get_rules(view):
"""Get auto-popup scope rule."""
rules = view.settings().get("color_helper.scan")
return rules if rules is not None and rules.get("enabled", False) else None | 1e04d93514785af0bb569c6b7ea7a8b2458c0175 | 590,529 |
def count_bags_inside(bag_name, rules):
"""Count total number of bags in bag_name."""
if not rules[bag_name]:
return 0
count = 0
for inside_bag, number_of_bags in rules[bag_name].items():
count += (number_of_bags + number_of_bags
* count_bags_inside(inside_bag, rules))
return count | 68dcc6e6b400e1d4bc9c8fdd6ecf71a24caedaee | 171,272 |
def comma_separated_positions(text):
"""
Start and end positions of comma separated text items.
Commas and trailing spaces should not be included.
>>> comma_separated_positions("ABC, 2,3")
[(0, 3), (5, 6), (7, 8)]
"""
chunks = []
start = 0
end = 0
for item in text.split(","):
space_increment = 1 if item[0] == " " else 0
start += space_increment # Is there a space after the comma to ignore? ", "
end += len(item.lstrip()) + space_increment
chunks.append((start, end))
start += len(item.lstrip()) + 1 # Plus comma
end = start
return chunks | 6203ac8839be7718742426474ec093cdce5b58c3 | 101,123 |
def geometry_mismatch(shape1, shape2):
"""Compute relative mismatch of two shapes"""
return shape1.symmetric_difference(shape2).area / shape1.union(shape2).area | 0e56b8c8d8d030dd63d2176ad3f336d85951244b | 476,590 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.