content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def flatten(input_ds, output_ds, lax_mode=False, _copy_data=True, copy_slices=None):
"""Flatten an input NetCDF dataset and write the result in an output NetCDF dataset.
For variable that are too big to fit in memory, the optional "copy_slices" input allows to copy some or all of the
variables in slices.
:param input_ds: input netcdf4 dataset
:param output_ds: output netcdf4 dataset
:param lax_mode: if false (default), not resolving a reference halts the execution. If true, continue with warning.
:param _copy_data: if true (default), then all data arrays are copied from the input to the output dataset.
If false, then this does not happen.
Use this option *only* if the data arrays of the flattened dataset are never to be accessed.
If false then consider setting the fill mode for the output netcd4 dataset to "off" for improved performance.
:param copy_slices: dictionary containing variable_name: shape pairs, where variable_name is the path to the
variable name in the original Dataset (for instance /group1/group2/my_variable), and shape is either None for
using default slice value, or a custom slicing shap in the form of a tuple of the same dimension as the variable
(for instance (1000,2000,1500,) for a 3-dimensional variable). If a variable from the Dataset is not contained
in the dict, it will not be sliced and copied normally.
"""
_Flattener(input_ds, lax_mode, _copy_data=_copy_data, copy_slices=copy_slices).flatten(output_ds)
| 5,351,000 |
def validate(i):
"""
Input: {
model_name - model name:
earth
lm
nnet
party
randomforest
rpart
svm
model_file - file with model (object) code
features_table - features table (in experiment module format)
(keep_temp_files) - if 'yes', keep temp files
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
prediction_table - experiment table with predictions
}
"""
import os
import pickle
mn=i['model_name']
mf=i['model_file']
mf1=i['model_file']+'.model.obj'
mf7=i['model_file']+'.model.decision_tree.json'
ftable=i['features_table']
ktf=i.get('keep_temp_files','')
lftable=len(ftable)
# Convert categorical features to floats
r=convert_categories_to_floats({'table':ftable})
if r['return']>0: return r
fconv=r['conv']
fconv1=r['conv1']
ftable1=r['table']
lt=[]
# Load model object
f=open(mf1, 'rb')
clf=pickle.load(f)
f.close()
sx=''
#############################################################
if mn=='dtc' or mn=='dtr':
from sklearn import tree
pr=clf.predict(ftable1)
# Check if CK decision tree file exists
if os.path.isfile(mf7):
r=ck.load_json_file({'json_file':mf7})
if r['return']>0: return r
labels=r['dict']
prx=[]
q=-1
for ft in ftable1:
q+=1
found=False
value=False
for label in labels:
p=labels[label]
dd=p['decision']
dv=p['value']
skip=False
for k in range(0,len(dd),2):
x=dd[k]
y=dd[k+1]
yc=y['comparison']
yf=int(y['feature'])
yv=float(y['value'])
if yc!='<=': return {'return':1, 'error':'not yet supported condition '+yc+' in decision tree'}
if x=='':
if not ft[yf]<=yv: skip=True
else:
if ft[yf]<=yv: skip=True
if skip: break
if not skip:
found=True
if dv=='true': value=True
else: value=False
break
if not found:
return {'return':1, 'error':'decision tree is incomplete'}
lt.append(label)
# print '**********'
# for z in range(0, len(ftable1[q])):
# zx=ftable1[q][z]
# print 'X['+str(z)+']='+str(zx)
else:
return {'return':1, 'error':'model name '+mn+' is not found in module model.sklearn'}
pr1=[]
for q in pr:
pr1.append([q])
lt1=[]
for q in lt:
lt1.append([q])
return {'return':0, 'prediction_table':pr1, 'label_table':lt1}
| 5,351,001 |
def cluster_absent(
name='localhost',
quiet=None):
"""
Machine is not running as a cluster node
quiet:
execute the command in quiet mode (no output)
"""
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __salt__['crm.status']():
ret['result'] = True
ret['comment'] = 'Cluster is already not running'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Cluster node {} would be removed'.format(name)
ret['changes']['name'] = name
return ret
try:
# Here starts the actual process
result = __salt__['crm.cluster_remove'](
host=name,
force=True,
quiet=quiet)
if result:
ret['changes']['name'] = name
ret['comment'] = 'Error removing cluster node'
ret['result'] = False
return ret
ret['changes']['name'] = name
ret['comment'] = 'Cluster node removed'
ret['result'] = True
return ret
except exceptions.CommandExecutionError as err:
ret['comment'] = six.text_type(err)
return ret
| 5,351,002 |
def link_symbols_in_code_blocks(path, blocks, symbols):
"""Link symbols appearing a sequence of blocks."""
return [link_symbols_in_code_block(path, block, symbols)
for block in blocks]
| 5,351,003 |
def cem_model_factory(
env, network=mlp, network_params={},
input_shape=None,
min_std=1e-6, init_std=1.0, adaptive_std=False,
model_file_path=None, name='cem'):
"""
Model for gradient method
"""
def build_graph(model, network=network,
input_shape=input_shape,
network_params=network_params):
policy = make_policy(
env, 'pi', model, network_params=network_params,
input_shape=input_shape,
init_std=init_std, adaptive_std=adaptive_std,
min_std=min_std, network=network)
model['policy'] = policy
model.add_output_node(policy.distribution.output_node)
var_list = policy.get_trainable_variables()
shapes = map(tf_utils.var_shape, var_list)
total_size = sum(np.prod(shape) for shape in shapes)
model['theta'] = tf.placeholder(tf.float32, [total_size])
var_list = policy.get_trainable_variables()
model['gf'] = tf_utils.flatten_vars(var_list)
model['sff'] = tf_utils.setfromflat(var_list, model['theta'])
if model_file_path is not None:
return Model.load(model_file_path, name)
return Model(env, build_graph,
empty_feed_dict, name=name)
| 5,351,004 |
def get_job(job_id: UUID) -> Job:
"""
Get job by ID.
Args:
job_id (UUID): ID of the job to be returned.
Returns:
Job
"""
return JobRepository.get_one_by_id(model_id=job_id)
| 5,351,005 |
def set_job_dirs():
"""Sets job directories based on env variables set by Vertex AI."""
model_dir = os.getenv('AIP_MODEL_DIR', LOCAL_MODEL_DIR)
if model_dir[0:5] == 'gs://':
model_dir = model_dir.replace('gs://', '/gcs/')
checkpoint_dir = os.getenv('AIP_CHECKPOINT_DIR', LOCAL_CHECKPOINT_DIR)
if checkpoint_dir[0:5] == 'gs://':
checkpoint_dir = checkpoint_dir.replace('gs://', '/gcs/')
return model_dir, checkpoint_dir
| 5,351,006 |
def localize_datetime(input_df, timezone=DEFAULT_TIMEZONE,
tms_gmt_col=DEFAULT_TMS_GMT_COL):
"""
Convert datetime column from UTC to another timezone.
"""
tmz = pytz.timezone(timezone)
df = input_df.copy()
return (df.set_index(tms_gmt_col)
.tz_localize(pytz.utc) # UTC time
.tz_convert(tmz))
| 5,351,007 |
def get_available_port() -> int:
"""Finds and returns an available port on the system."""
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
_, port = sock.getsockname()
return int(port)
| 5,351,008 |
def prettyprint_float(val, digits):
"""Print a floating-point value in a nice way."""
format_string = "%." + f"{digits:d}" + "f"
return (format_string % val).rstrip("0").rstrip(".")
| 5,351,009 |
def year_page(archive_id: str, year: int) -> Any:
"""
Get year page for archive.
Parameters
----------
archive : str
Must be an arXiv archive identifier.
year: int
Must be a two or four digit year.
Returns
-------
dict
Search result response data.
int
HTTP status code.
dict
Headers to add to the response.
"""
thisYear = date.today().year
if year is None:
year = thisYear
if year > thisYear:
# 307 because year might be valid in the future
return {}, status.HTTP_307_TEMPORARY_REDIRECT, {'Location': '/'}
if year < 100:
if year >= 91:
year = 1900 + year
else:
year = 2000 + year
if archive_id not in taxonomy.ARCHIVES:
raise BadRequest("Unknown archive.")
else:
archive = taxonomy.ARCHIVES[archive_id]
listing_service = get_listing_service()
month_listing = listing_service.monthly_counts(archive_id, year)
for month in month_listing['month_counts']:
month['art'] = ascii_art_month(archive_id, month) # type: ignore
month['yymm'] = f"{month['year']}-{month['month']:02}" # type: ignore
month['url'] = url_for('browse.list_articles', # type: ignore
context=archive_id,
subcontext=f"{month['year']}{month['month']:02}")
response_data: Dict[str, Any] = {
'archive_id': archive_id,
'archive': archive,
'months': month_listing['month_counts'],
'listing': month_listing,
'year': str(year),
'stats_by_year': stats_by_year(archive_id, archive, years_operating(archive), year)
}
response_headers: Dict[str, Any] = {}
response_status = status.HTTP_200_OK
return response_data, response_status, response_headers
| 5,351,010 |
def test_delete_artifact_store_works(tmp_path: str) -> None:
"""Test delete_artifact_store works as expected."""
Repo.init(tmp_path)
Repository.init_repo(str(tmp_path))
repo = Repository(str(tmp_path))
local_service = repo.get_service()
artifact_store_dir = os.path.join(tmp_path, "test_store")
local_artifact_store = LocalArtifactStore(
path=artifact_store_dir, repo_path=repo.path
)
local_service.register_artifact_store(
"local_artifact_store_2", local_artifact_store
)
local_service.delete_artifact_store("local_artifact_store_2")
with pytest.raises(DoesNotExistException):
local_service.get_artifact_store("local_artifact_store_2")
| 5,351,011 |
def vert_polyFit2(data, z, bin0, step=1, deg=2):
"""
Trying to use the vertical polynomial fit to clean up the data
not reallly sure about what im doing though
"""
data = np.squeeze(data)
z = np.squeeze(z)
dz = np.nanmean(np.gradient(np.squeeze(z)))
bin1 = int(np.ceil(bin0/dz))
fits = []
zFits = []
bins = []
for i in range(len(z)):
if 2*i+bin1 < len(z):
bins.append(np.arange(i,2*i+bin1+1))
mask = np.isfinite(data[i:2*i+bin1])
dataIn = data[i:2*i+bin1]
zIn = z[i:2*i+bin1]
dataIn = dataIn[mask]
if dataIn.size == 0:
fits.append(np.nan)
zFits.append(np.nan)
else:
zIn = zIn[mask]
zFits.append(np.nanmean(z[i:2*i+bin1]))
p = scipy.polyfit(zIn, dataIn, deg)
fits.append(np.nanmean(scipy.polyval(p, z[i:2*i+bin1][mask])))
fits = np.hstack(fits)
zFits = np.hstack(zFits)
mask2 = np.isfinite(fits)
P = scipy.interpolate.interp1d(zFits[mask2], fits[mask2], fill_value='extrapolate')
fitrev = P(z)
return fitrev
| 5,351,012 |
def is_downloadable(url):
"""
Does the url contain a downloadable resource
"""
h = requests.head(url, allow_redirects=True)
header = h.headers
content_type = header.get('content-type')
print content_type
if 'text' in content_type.lower():
return False
if 'html' in content_type.lower():
return False
return True
| 5,351,013 |
def _main(argv):
"""Command-line interface."""
parser = ArgumentParser()
parser.add_argument('--quiet', action='store_const', default=False,
const=True,
help='Do not display debugging messages')
parser.add_argument('--dbfile', nargs=1, default='imdb.zip',
help='Database file')
parser.add_argument('--rebuild-db', nargs=1, metavar='DIR',
help='Rebuild the database file from IMDb data files')
parser.add_argument('--search', nargs='*',
help='Search the database')
for argname in SUPPORTED_ARGS:
parser.add_argument('--' + argname.replace('_', '-'), nargs='*',
metavar='TITLE',
help='Display ' + argname.replace('_', ' '))
parser.add_argument('--all', nargs='*', metavar='TITLE',
help='Display all information')
if len(argv) == 0:
parser.error('nothing to do.')
args = parser.parse_args(argv)
iface = IMDb(dbfile=args.dbfile, # Database filename
debug=not args.quiet)
if args.rebuild_db:
iface.rebuild_index(args.rebuild_db[0])
titles = []
if args.search:
queries = []
check_for_year = False
for query in args.search:
if check_for_year:
try:
iquery = int(query)
except ValueError:
pass
else:
if iquery > 1850 and iquery < 2100:
queries[-1][1] = iquery
check_for_year = False
continue
queries.append([query, None])
check_for_year = True
print "Search results:"
for query, year in queries:
results = iface.search(query, year=year)
for title, score in results:
print " %s (%s)" % (title, str(score))
if len(results) > 0:
titles.append(results[0][0])
print ''
for argname in SUPPORTED_ARGS:
argval = args.all if args.all is not None else getattr(args, argname)
if argval is None:
continue
my_titles = [IMDbTitle(i) for i in argval]
if not my_titles:
my_titles = titles
# Populate the requested information
populator = getattr(iface, 'populate_' + argname)
populator(my_titles)
# Print the information
for title in my_titles:
print u"%s for %s:" % (argname.title().replace('_', ' '), title)
val = getattr(title, argname)
if val is None:
val = u'(None)'
elif argname == 'rating':
val = u"%s/10, %7s votes" % (val.score, val.nratings)
elif argname == 'plot':
val = val.summary
# if val.byline: val += u" (by %s)" % (val.byline,)
elif argname == 'genres':
val = u", ".join(val)
elif argname == 'running_time':
val = u'%3d minutes' % val
elif argname == 'cast' or argname == 'writers' or \
argname == 'directors':
val = u"\n ".join(str(i) for i in val)
print u" %s" % (val,)
print ''
| 5,351,014 |
def dataframe_is_one_query_target_pair(dataframe):
"""
make sure there is only one query sequence and reference sequence in the
given dataframe. Used to check that we aren't aggregating % identity
numbers across bin alignment pairs.
:param dataframe:
:return:
"""
num_query_bins = len(dataframe['query bin'].unique())
num_ref_bins = len(dataframe['ref bin'].unique())
if not num_query_bins == 1:
"Dataframe has a mix of {} query bins: {}".format(
num_query_bins, dataframe['query bin'].unique())
if not num_ref_bins == 1:
"Dataframe has a mix of {} reference bins: {}".format(
num_query_bins, dataframe['ref bin'].unique())
if (num_query_bins == 1) & (num_ref_bins == 1):
return True
else:
return False
| 5,351,015 |
def update_api_key(
self,
name: str,
permission: str,
expiration: int,
active: bool,
key: str = None,
description: str = None,
ip_list: str = None,
) -> bool:
"""Update existing API key on Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - apiKey
- PUT
- /apiKey/{name}
:param name: API Key Name
:type name: str
:param permission: API Key privileges. Allowed values are
'net_read_write' for RW and 'net_read" for RO
:type permission: str
:param expiration: API Key expiration in UNIX timestamp. Key will
automatically become inactive on expiration date.
:type expiration: int
:param active: API Key state is active (True) or inactive (False).
Inactive keys cannot be used to make requests.
:type active: bool
:param key: API Key value, defaults to None
:type key: str, optional
:param description: API Key description, defaults to None
:type description: str, optional
:param ip_list: List of allowed IP's to make requests with this API
Key. Leave blank to allow all IP's. OptionalAPI Key state is
active (True) or inactive (False). Inactive keys cannot be used
to make requests, defaults to None
:type ip_list: str
:return: Returns True/False based on successful call
:rtype: bool
"""
api_key_entry = {
"name": name,
"permission": permission,
"expiration": expiration,
"active": active,
}
if key is not None:
api_key_entry["key"] = key
if description is not None:
api_key_entry["description"] = description
if ip_list is not None:
api_key_entry["ip_list"] = ip_list
return self._put(
"/apiKey/{}".format(name),
data=api_key_entry,
expected_status=[204],
return_type="bool",
)
| 5,351,016 |
def cleanGender(x):
"""
This is a helper funciton that will help cleanup the gender variable.
"""
if x in ['female', 'mostly_female']:
return 'female'
if x in ['male', 'mostly_male']:
return 'male'
if x in ['couple'] :
return 'couple'
else:
return 'unknownGender'
| 5,351,017 |
def get_servers():
"""
Retrieve all the discord servers in the database
:return: List of servers
"""
session = Session()
servers = session.query(Server).all()
return servers
| 5,351,018 |
def wait_till_postgres_responsive(url):
"""Check if something responds to ``url`` """
engine = sa.create_engine(url)
conn = engine.connect()
conn.close()
return True
| 5,351,019 |
def get_string_display(attr1, attr2, helper1, helper2, attribute_mode):
"""
get the attribute mode for string
attribute mode can be:
'base', 'full', 'partial', 'masked'
Note that some attribute does not have partial mode, in this case, partial mode will return masked mode
Remeber to call has_partial_mode function to check if it actually has partial mode!
Example:
Input:
attr1: '1000151475'
attr2: '1000151575'
helper1: '*******4**'
helper2: '*******5**'
attribute_mode: 'partial'
Output:
['*******<span style="color:red">4</span>**', '*******<span style="color:red">5</span>**']
"""
if attribute_mode == 'base':
attr1_display = attr1
attr2_display = attr2
return [attr1_display, attr2_display]
elif attribute_mode == 'full':
if not attr1 or not attr2:
if not attr1:
attr1_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">'
else:
attr1_display = attr1
if not attr2:
attr2_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">'
else:
attr2_display = attr2
else:
if '*' not in helper1 and '*' not in helper2:
attr1_display = attr1
attr2_display = attr2
else:
attr1_display = ''
attr2_display = ''
i = 0
j = 0
k = 0
while k < len(helper1):
if helper1[k] == '*':
attr1_display += attr1[i]
attr2_display += attr2[j]
k += 1
i += 1
j += 1
elif k+1 < len(helper1) and i+1 < len(attr1) and j+1 < len(attr2) and \
helper1[k] not in ['*', '_', '?'] and helper1[k+1] not in ['*', '_', '?'] and attr1[i] == attr2[j+1] and attr1[i+1] == attr2[j]:
attr1_display += '<span class="transpose_text">' + attr1[i] + attr1[i+1] + '</span>'
attr2_display += '<span class="transpose_text">' + attr2[j] + attr2[j+1] + '</span>'
k += 2
i += 2
j += 2
elif helper1[k] == '_' or helper1[k] == '?':
attr2_display += '<span class="indel_text">' + attr2[j] + '</span>'
k += 1
j += 1
elif helper2[k] == '_' or helper2[k] == '?':
attr1_display += '<span class="indel_text">' + attr1[i] + '</span>'
k += 1
i += 1
else:
attr1_display += '<span class="replace_text">' + attr1[i] + '</span>'
attr2_display += '<span class="replace_text">' + attr2[j] + '</span>'
k += 1
i += 1
j += 1
return [attr1_display, attr2_display]
elif attribute_mode == 'partial':
if not attr1 or not attr2:
if not attr1:
attr1_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">'
else:
attr1_display = '*'*len(attr1)
if not attr2:
attr2_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">'
else:
attr2_display = '*'*len(attr2)
else:
if '*' not in helper1 and '*' not in helper2:
attr1_display = len(attr1)*'@'
attr2_display = len(attr2)*'&'
elif helper1 == helper2:
attr1_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">'
attr2_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">'
else:
attr1_display = ''
attr2_display = ''
i = 0
j = 0
k = 0
while k < len(helper1):
if helper1[k] == '*':
attr1_display += '*'
attr2_display += '*'
k += 1
i += 1
j += 1
elif k+1 < len(helper1) and i+1 < len(attr1) and j+1 < len(attr2) and \
helper1[k] not in ['*', '_', '?'] and helper1[k+1] not in ['*', '_', '?'] and attr1[i] == attr2[j+1] and attr1[i+1] == attr2[j]:
attr1_display += '<span class="transpose_text">' + attr1[i] + attr1[i+1] + '</span>'
attr2_display += '<span class="transpose_text">' + attr2[j] + attr2[j+1] + '</span>'
k += 2
i += 2
j += 2
elif helper1[k] == '_' or helper1[k] == '?':
attr2_display += '<span class="indel_text">' + attr2[j] + '</span>'
k += 1
j += 1
elif helper2[k] == '_' or helper2[k] == '?':
attr1_display += '<span class="indel_text">' + attr1[i] + '</span>'
k += 1
i += 1
else:
attr1_display += '<span class="replace_text">' + attr1[i] + '</span>'
attr2_display += '<span class="replace_text">' + attr2[j] + '</span>'
k += 1
i += 1
j += 1
return [attr1_display, attr2_display]
elif attribute_mode == 'masked':
if not attr1 or not attr2:
if not attr1:
attr1_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">'
else:
attr1_display = '*'*len(attr1)
if not attr2:
attr2_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">'
else:
attr2_display = '*'*len(attr2)
else:
if '*' not in helper1 and '*' not in helper2:
attr1_display = len(attr1)*'@'
attr2_display = len(attr2)*'&'
elif helper1 == helper2:
attr1_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">'
attr2_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">'
else:
attr1_display = ''
attr2_display = ''
i = 0
j = 0
k = 0
while k < len(helper1):
if helper1[k] == '*':
attr1_display += '*'
attr2_display += '*'
k += 1
i += 1
j += 1
elif k+1 < len(helper1) and i+1 < len(attr1) and j+1 < len(attr2) and \
helper1[k] not in ['*', '_', '?'] and helper1[k+1] not in ['*', '_', '?'] and attr1[i] == attr2[j+1] and attr1[i+1] == attr2[j]:
attr1_display += '<span class="transpose_text">' + '@&' + '</span>'
attr2_display += '<span class="transpose_text">' + '&@' + '</span>'
k += 2
i += 2
j += 2
elif helper1[k] == '_' or helper1[k] == '?':
attr2_display += '<span class="indel_text">' + '&' + '</span>'
k += 1
j += 1
elif helper2[k] == '_' or helper2[k] == '?':
attr1_display += '<span class="indel_text">' + '@' + '</span>'
k += 1
i += 1
else:
attr1_display += '<span class="replace_text">' + '@' + '</span>'
attr2_display += '<span class="replace_text">' + '&' + '</span>'
k += 1
i += 1
j += 1
return [attr1_display, attr2_display]
| 5,351,020 |
def is_doi(identifier: str) -> bool:
"""Validates if identifier is a valid DOI
Args:
identifier (str): potential doi string
Returns:
bool: true if identifier is a valid DOI
"""
doi_patterns = [
r"(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?![\"&\'])\S)+)",
r"(10.\d{4,9}/[-._;()/:A-Z0-9]+)",
r"(10.\d{4}/\d+-\d+X?(\d+)\d+<[\d\w]+:[\d\w]*>\d+.\d+.\w+;\d)",
r"(10.1021/\w\w\d+)",
r"(10.1207/[\w\d]+\&\d+_\d+)",
]
for pattern in doi_patterns:
match = bool(re.match(pattern, identifier))
if match:
return True
return False
| 5,351,021 |
def meshgrid_flatten(*X):
"""
Functionally same as numpy.meshgrid() with different output
format. Function np.meshgrid() takes n 1d ndarrays of size
N_1,...,N_n, and returns X_1,...,X_n n-dimensional arrays of shape
(N_1, N_2,... N_n). This returns instead a 2d array of shape
(N_1*...*N_n, n).
"""
if len(X) == 1: # Because np.meshgrid() can't handle
return np.array([X[0]]).T # less than 2 arguments
return np.vstack(
map(lambda x: x.flatten(), mylib_meshgrid.meshgrid(*X, indexing='ij'))
).T
| 5,351,022 |
def transform_scale(
features,
factor: float,
origin: Union[str, list] = "centroid",
mutate: bool = False,
):
"""
Scale a GeoJSON from a given
point by a factor of scaling
(ex: factor=2 would make the GeoJSON 200% larger).
If a FeatureCollection is provided, the origin
point will be calculated based on each individual Feature.
:param features: GeoJSON to be scaled
:param factor: of scaling, positive or negative values greater than 0
:param origin: Point from which the scaling will occur
(string options: sw/se/nw/ne/center/centroid)
:param mutate: allows GeoJSON input to be mutated
(significant performance increase if true)
:return: Scaled Geojson
Example :-
>>> from turfpy.transformation import transform_scale
>>> from geojson import Polygon, Feature
>>> f = Feature(geometry=Polygon([[[0,29],[3.5,29],[2.5,32],[0,29]]]))
>>> transform_scale(f, 3, origin=[0, 29])
"""
if not features:
raise Exception("geojson is required")
if not factor:
raise Exception("invalid factor")
if not mutate:
features = copy.deepcopy(features)
if features["type"] == "FeatureCollection":
def _callback_feature_each(feature, feature_index):
nonlocal factor, origin, features
features["features"][feature_index] = scale(feature, factor, origin)
feature_each(features, _callback_feature_each)
return features
return scale(features, factor, origin)
| 5,351,023 |
def create_agent(opt):
"""Create an agent from the options model, model_params and model_file.
The input is either of the form "parlai.agents.ir_baseline.agents/IrBaselineAgent"
(i.e. the path followed by the class name) or else just 'IrBaseline' which
assumes the path above, and a class name suffixed with 'Agent'
"""
dir_name = opt['model']
if ':' in dir_name:
s = dir_name.split(':')
module_name = s[0]
class_name = s[1]
else:
module_name = "parlai.agents.%s.agents" % (dir_name)
words = opt['model'].split('_')
class_name = ''
for w in words:
class_name += ( w[0].upper() + w[1:])
class_name += 'Agent'
print(class_name)
my_module = importlib.import_module(module_name)
model_class = getattr(my_module, class_name)
return model_class(opt)
| 5,351,024 |
def check_version(actver, version, cmp_op):
"""
Check version string of an active module against a required version.
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to
date.
Copyright (C) 2013 The IPython Development Team
Distributed under the terms of the BSD License.
"""
if isinstance(actver, tuple):
actver = '.'.join([str(i) for i in actver])
# Hacks needed so that LooseVersion understands that (for example)
# version = '3.0.0' is in fact bigger than actver = '3.0.0rc1'
if is_stable_version(version) and not is_stable_version(actver) and \
actver.startswith(version) and version != actver:
version = version + 'zz'
elif is_stable_version(actver) and not is_stable_version(version) and \
version.startswith(actver) and version != actver:
actver = actver + 'zz'
try:
if cmp_op == '>':
return LooseVersion(actver) > LooseVersion(version)
elif cmp_op == '>=':
return LooseVersion(actver) >= LooseVersion(version)
elif cmp_op == '=':
return LooseVersion(actver) == LooseVersion(version)
elif cmp_op == '<':
return LooseVersion(actver) < LooseVersion(version)
elif cmp_op == '<=':
return LooseVersion(actver) <= LooseVersion(version)
else:
return False
except TypeError:
return True
| 5,351,025 |
def calculate_efficiency_metrics(pipeline, X, y):
"""
Apply KFold to the given data and calculate the follow metrics for each fold :
- accuracy
- precision
- recall
- f1-score
- confusion matrix
Finally, it will calculate for each metric its mean and standard deviation.
:param pipeline: A Sklearn pipeline
:param X: A numpy array with the features
:param y: A numpy array with the target
"""
_logger.info("Calculating efficiency metrics")
with mlflow.start_run():
model_timestamp = str(_create_new_model_path().name)
mlflow.set_tag("model_version", model_timestamp)
kfold = KFold(n_splits=5, random_state=4532, shuffle=True)
results = {}
fold_number = 0
for train_index, test_index in kfold.split(X):
X_train, X_test = X.iloc[train_index, :], X.iloc[test_index]
y_train, y_test = y.iloc[train_index, :], y.iloc[test_index]
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
metrics = {
'accuracy': accuracy_score(y_test, y_pred),
'precision': precision_score(y_test, y_pred, average="weighted", labels=np.unique(y_pred)),
'recall': recall_score(y_test, y_pred, average="weighted", labels=np.unique(y_pred)),
'f1': f1_score(y_test, y_pred, average="weighted", labels=np.unique(y_pred))
}
results[fold_number] = metrics
fold_number += 1
overall_results = get_overall_results(results)
mlflow.log_metrics(overall_results)
_logger.info("Metrics generated successfully")
| 5,351,026 |
def dfa_intersection(dfa_1: dict, dfa_2: dict) -> dict:
""" Returns a DFA accepting the intersection of the DFAs in
input.
Let :math:`A_1 = (Σ, S_1 , s_{01} , ρ_1 , F_1 )` and
:math:`A_2 = (Σ, S_2 , s_{02} , ρ_2 , F_2 )` be two DFAs.
Then there is a DFA :math:`A_∧` that runs simultaneously both
:math:`A_1` and :math:`A_2` on the input word and
accepts when both accept.
It is defined as:
:math:`A_∧ = (Σ, S_1 × S_2 , (s_{01} , s_{02} ), ρ, F_1 × F_2 )`
where
:math:`ρ((s_1 , s_2 ), a) = (s_{X1} , s_{X2} )` iff
:math:`s_{X1} = ρ_1 (s_1 , a)` and :math:`s_{X2}= ρ_2 (s_2 , a)`
Implementation proposed guarantees the resulting DFA has only
**reachable** states.
:param dict dfa_1: first input DFA;
:param dict dfa_2: second input DFA.
:return: *(dict)* representing the intersected DFA.
"""
intersection = {
'alphabet': dfa_1['alphabet'].intersection(dfa_2['alphabet']),
'states': {(dfa_1['initial_state'], dfa_2['initial_state'])},
'initial_state': (dfa_1['initial_state'], dfa_2['initial_state']),
'accepting_states': set(),
'transitions': dict()
}
boundary = set()
boundary.add(intersection['initial_state'])
while boundary:
(state_dfa_1, state_dfa_2) = boundary.pop()
if state_dfa_1 in dfa_1['accepting_states'] \
and state_dfa_2 in dfa_2['accepting_states']:
intersection['accepting_states'].add((state_dfa_1, state_dfa_2))
for a in intersection['alphabet']:
if (state_dfa_1, a) in dfa_1['transitions'] \
and (state_dfa_2, a) in dfa_2['transitions']:
next_state_1 = dfa_1['transitions'][state_dfa_1, a]
next_state_2 = dfa_2['transitions'][state_dfa_2, a]
if (next_state_1, next_state_2) not in intersection['states']:
intersection['states'].add((next_state_1, next_state_2))
boundary.add((next_state_1, next_state_2))
intersection['transitions'][(state_dfa_1, state_dfa_2), a] = \
(next_state_1, next_state_2)
return intersection
| 5,351,027 |
def change_status(request, page_id):
"""
Switch the status of a page.
"""
perm = PagePermission(request.user).check('change', method='POST')
if perm and request.method == 'POST':
page = Page.objects.get(pk=page_id)
page.status = int(request.POST['status'])
page.invalidate()
page.save()
return HttpResponse(str(page.status))
raise Http404
| 5,351,028 |
def str2bytes(s):
"""
Returns byte string representation of product state.
Parameters
----------
s : str
Representation of a product state, in terms of a string.
"""
return bitarray2bytes(str2bitarray(s))
| 5,351,029 |
def find_links(text, image_only=False):
"""
Find Markdown links in text and return a match object.
Markdown links are expected to have the form [some txt](A-url.ext)
or .
Parameters
----------
text : str
Text in which to search for links.
image_only : bool
If ``True``, find only markdown image links, i.e. those that
begin with an exclamation mark.
Returns
-------
list
List of ``re.Match`` objects, one for each link found. Each object
has two named groups, 'link_text', which contains the the part between
the square brackets, and 'link',which is the URL (or file name for an
image).
"""
if image_only:
markdown_link = \
re.compile(r"!\[(?P<link_text>.+?\n*?.*?)\]\((?P<link_url>.+?)\)",
flags=re.MULTILINE)
else:
markdown_link = \
re.compile(r"!?\[(?P<link_text>.+?\n*?.*?)\]\((?P<link_url>.+?)\)",
flags=re.MULTILINE)
groups = [m for m in markdown_link.finditer(text)]
return groups
| 5,351,030 |
def least_one_row(data_frame):
"""
checking at least one row in dataframe
Input: pandas dataframe
Output: True or False
"""
if data_frame:
return True
return False
| 5,351,031 |
def get_reference_docs():
"""Create reference documentation from the source code.
A bit like Sphinx autodoc, but using Markdown, and more basic.
Returns a str in Markdown format.
Note that this function is used to build the Dash Slicer chapter
in the Dash docs.
"""
methods = []
props = []
sig = str(inspect.signature(dash_slicer.VolumeSlicer.__init__)).replace(
"self, ", ""
)
doc = f"**class `VolumeSlicer{sig}`**"
doc += "\n\n" + dedent(dash_slicer.VolumeSlicer.__doc__).rstrip()
methods.append(doc)
for name in dir(dash_slicer.VolumeSlicer):
val = getattr(dash_slicer.VolumeSlicer, name)
if name.startswith("_") or not hasattr(val, "__doc__"):
pass
elif callable(val):
# Method
sig = str(inspect.signature(val)).replace("self, ", "")
doc = f"**method `VolumeSlicer.{name}{sig}`**"
doc += "\n\n" + dedent(val.__doc__).rstrip()
methods.append(doc)
else:
# Property
doc = f"**property `VolumeSlicer.{name}`**"
try:
typ = val.fget.__annotations__["return"].__name__
doc += f" (`{typ}`)"
except (AttributeError, KeyError):
pass
doc += ": " + dedent(val.__doc__).rstrip()
props.append(doc)
parts = []
parts.append("### The VolumeSlicer class")
parts += methods
parts += props
parts.append(dash_slicer.slicer.__doc__)
return "\n\n".join(parts)
| 5,351,032 |
def stop_nova_openstack_compute():
"""Stop the contrail openstack compute service."""
tsn_nodes = []
tor_nodes = []
host = env.host_string
if 'tsn' in env.roledefs:
tsn_nodes = env.roledefs['tsn']
if 'toragent' in env.roledefs:
tor_nodes = env.roledefs['toragent']
if host not in (tsn_nodes and tor_nodes) :
if detect_ostype() in ['ubuntu']:
sudo('service nova-compute stop')
return
sudo('service openstack-nova-compute stop')
| 5,351,033 |
def generate_label_colors(labels: list, colors: list, palette='Set2'):
"""Matches labels with colors
If there are more labels than colors, repeat and cycle through colors
"""
label_colors = defaultdict(dict)
num_repeats = math.ceil(len(labels) / len(colors))
for label in enumerate(labels):
label_colors[label[1]] = (colors * num_repeats)[label[0]]
return {**label_colors}
| 5,351,034 |
def get_xml_file_path_from_image_name(image_name, xml_dir_or_txt):
"""Retrieve xml filepath from xml dir
Args:
image_name:
xml_dir_or_txt:
Returns:
xml_path:
"""
if os.path.isfile(xml_dir_or_txt):
filepaths = fileio.read_list_from_txt(xml_dir_or_txt, field=-1)
elif os.path.isdir(xml_dir_or_txt):
filepaths = list(glob2.glob(os.path.join(xml_dir_or_txt, '**', '*xml')))
else:
raise ValueError('xml_dir_or_txt is neither a directory nor file')
image_name_no_ext = os.path.splitext(os.path.basename(image_name))[0]
xml_path_list = []
for filepath in filepaths:
if image_name_no_ext in filepath:
xml_path_list.append(filepath)
# print(filepath)
assert len(xml_path_list) <= 1, 'xml_path_list expect 0 or 1 element but found {}!'.format(len(xml_path_list))
if len(xml_path_list) == 1:
xml_path = xml_path_list[0]
else:
xml_path = None
return xml_path
| 5,351,035 |
def cancel_command():
"""Removes current reply keyboard"""
SendMessage(context.user.id, "🔹 What's next?", reply_markup=ReplyKeyboardRemove()).webhook()
| 5,351,036 |
def index():
"""
View root page function that returns the index page and its data
"""
# getting top headlines in sources
topheadlines_sources = get_sources('top-headlines')
business_sources = get_sources('business')
entertainment_sources = get_sources('entertainment')
title = 'Home - Welcome to your online News room'
# print(topheadlines_sources.articles)
# search_source = request.args.get(source_query)
# if search_source:
# return redirect(url_for('search',source_name=search_source))
# else:
return render_template('index.html', title = title , topheadlines_sources = topheadlines_sources, business_sources = business_sources, entertainment_sources = entertainment_sources)
| 5,351,037 |
def func(foo):
"""
Parameters:
foo (Foo): ignored
"""
pass
| 5,351,038 |
def CheckPrerequisites(_):
"""Verifies that the required resources are present.
Raises NotImplementedError.
"""
# currently only support GCS object storage
# TODO(user): add AWS & Azure support for object storage
if FLAGS.ch_network_test_service_type == STORAGE and FLAGS.cloud != 'GCP':
raise NotImplementedError('Benchmark only supports GCS object storage.')
| 5,351,039 |
def start_ltm(tup,
taus,
w=0.1,
add_coh=False,
use_cv=False,
add_const=False,
verbose=False,
**kwargs):
"""Calculate the lifetime density map for given data.
Parameters
----------
tup : datatuple
tuple with wl, t, data
taus : list of floats
Used to build the basis vectors.
w : float, optional
Used sigma for calculating the , by default 0.1.
add_coh : bool, optional
If true, coherent contributions are added to the basis.
By default False.
use_cv : bool, optional
Whether to use cross-validation, by default False
add_const : bool, optional
Whether to add an explict constant, by default False
verbose : bool, optional
Wheater to be verobse, by default False
Returns
-------
tuple of (linear_model, coefs, fit, alphas)
The linear model is the used sklearn model. Coefs is the arrary
of the coefficents, fit contains the resulting fit and alphas
is an array of the applied alpha value when using cv.
"""
X = _make_base(tup, taus, w=w, add_const=add_const, add_coh=add_coh)
if not use_cv:
mod = lm.ElasticNet(**kwargs, l1_ratio=0.98)
else:
mod = lm.ElasticNetCV(**kwargs, l1_ratio=0.98)
mod.fit_intercept = not add_const
mod.warm_start = 1
coefs = np.empty((X.shape[1], tup.data.shape[1]))
fit = np.empty_like(tup.data)
alphas = np.empty(tup.data.shape[1])
for i in range(tup.data.shape[1]):
if verbose:
print(i, 'ha', end=';')
mod.fit(X, tup.data[:, i])
coefs[:, i] = mod.coef_.copy()
fit[:, i] = mod.predict(X)
if hasattr(mod, 'alpha_'):
alphas[i] = mod.alpha_
return mod, coefs, fit, alphas
| 5,351,040 |
def manhattan_distance(origin, destination):
"""Return the Manhattan distance between the origin and the destination.
@type origin: Location
@type destination: Location
@rtype: int
>>> pt1 = Location(1,2)
>>> pt2 = Location(3,4)
>>> print(manhattan_distance(pt1, pt2))
4
"""
return (abs(origin.row - destination.row) +
abs(origin.column - destination.column))
| 5,351,041 |
def get_release():
"""Get the current release of the application.
By release, we mean the release from the version.json file à la Mozilla [1]
(if any). If this file has not been found, it defaults to "NA".
[1]
https://github.com/mozilla-services/Dockerflow/blob/master/docs/version_object.md
"""
# Try to get the current release from the version.json file generated by the
# CI during the Docker image build
try:
with open(os.path.join(BASE_DIR, "version.json"), encoding="utf8") as version:
return json.load(version)["version"]
except FileNotFoundError:
return "NA"
| 5,351,042 |
def test_BBPSSW_psi_minus_psi_plus():
"""
psi- psi+
0b0
[ 0. +0.j 0.5+0.j -0.5+0.j 0. +0.j]
0b1
[0.+0.j 0.+0.j 0.+0.j 0.+0.j]
0b10
[0.+0.j 0.+0.j 0.+0.j 0.+0.j]
0b11
[ 0. +0.j 0.5+0.j -0.5+0.j 0. +0.j]
"""
counter = 0
for i in range(100):
tl, kept1, kept2, meas1, meas2, ep1, ep2 = create_scenario(psi_minus, psi_plus, i)
assert kept1.entangled_memory == {'node_id': 'a2', 'memo_id': 'kept2'}
assert kept2.entangled_memory == {'node_id': 'a1', 'memo_id': 'kept1'}
assert ep1.meas_res == ep2.meas_res
ket1 = tl.quantum_manager.get(kept1.qstate_key)
ket2 = tl.quantum_manager.get(kept2.qstate_key)
assert id(ket1) == id(ket2)
assert kept1.qstate_key in ket1.keys and kept2.qstate_key in ket1.keys
state = correct_order(ket1.state, ket1.keys)
assert complex_array_equal(psi_minus, state)
if ep1.meas_res == 0:
counter += 1
else:
# assert quantum state
pass
assert abs(counter - 50) < 10
| 5,351,043 |
def add_mclag_member(ctx, domain_id, portchannel_names):
"""Add member MCLAG interfaces from MCLAG Domain"""
db = ctx.obj['db']
entry = db.get_entry('MCLAG_DOMAIN', domain_id)
if len(entry) == 0:
ctx.fail("MCLAG Domain " + domain_id + " not configured, configure mclag domain first")
portchannel_list = portchannel_names.split(",")
for portchannel_name in portchannel_list:
if is_portchannel_name_valid(portchannel_name) != True:
ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO))
db.set_entry('MCLAG_INTERFACE', (domain_id, portchannel_name), {'if_type':"PortChannel"} )
| 5,351,044 |
def find_best_resampler(
features_train: pd.DataFrame, labels_train: pd.DataFrame, parameters: Dict
) -> List:
"""Compare several resamplers and find the best one to handle imbalanced labels.
Args:
features_train: Training data of independent features.
labels_train: Training data of next month payment default status.
parameters: Parameters defined in parameters.yml.
Returns:
A list containing the best resampler and the search CV results as DataFrame.
"""
col_dict = _get_column_dtype(features_train)
if labels_train.shape[0] == features_train.shape[0]:
labels_train.index = features_train.index
# Create transformers for each dtype
transformers = [
("num_n_trans", StandardScaler(), col_dict["num_normal"]),
(
"num_s_trans",
QuantileTransformer(random_state=parameters["random_state"]),
col_dict["num_skewed"],
),
("ordi_trans", "passthrough", col_dict["ordinal"]),
("bool_pass", "passthrough", col_dict["boolean"]),
(
"cat_trans",
JamesSteinEncoder(random_state=parameters["random_state"], return_df=False),
col_dict["category"],
),
]
transformers = _remove_unused_transformers(transformers)
# Combine the transformers as preprocessor
preprocessor = ColumnTransformer(transformers=transformers)
num_cols = col_dict["num_normal"] + col_dict["num_skewed"]
nomi_cols = col_dict["ordinal"] + col_dict["boolean"] + col_dict["category"]
# Extract target
target_train = labels_train["DEFAULT_PAY"]
# Initalize samplers
smotenc_smpl = SMOTENC(
categorical_features=[
x for x in range(len(num_cols), len(num_cols) + len(nomi_cols))
],
n_jobs=-1,
)
ro_smpl = RandomOverSampler()
enn_smpl = EditedNearestNeighbours(n_jobs=-1)
tl_smpl = TomekLinks(n_jobs=-1)
ru_smpl = RandomUnderSampler()
# Initalize classifier
clf = ExtraTreesClassifier(max_depth=10, n_jobs=-1)
# Create parameter grid
param_grid = {
"sampler": [None, smotenc_smpl, ro_smpl, enn_smpl, tl_smpl, ru_smpl],
"classifier": [clf],
}
# Create classifier pipeline
resampler = PipelineImb(
steps=[
("preprocessor", preprocessor),
("sampler", smotenc_smpl),
("classifier", clf),
]
)
# Start grid search
search_cv = GridSearchCV(
resampler,
param_grid=param_grid,
scoring=[
"precision",
"recall",
"f1",
"roc_auc",
],
refit="f1",
error_score=0,
verbose=2,
)
timer_start = timer()
search_cv.fit(features_train, target_train)
timer_end = timer()
# Log search duration
logger = logging.getLogger(__name__)
logger.info(
f"Best resampler search elapsed time : {_get_time_delta(timer_end - timer_start)}."
)
# Save search result as DataFrame
search_results = pd.DataFrame(search_cv.cv_results_).sort_values(
by=["rank_test_f1"]
)
# Remove unused steps from resampler
resampler = search_cv.best_estimator_
resampler.set_params(
steps=_remove_unused_steps(steps=resampler.steps, remove_clf=True)
)
return [resampler, search_results]
| 5,351,045 |
def grid(dim, num):
"""Build a one-dim grid of num points"""
if dim.type == "categorical":
return categorical_grid(dim, num)
elif dim.type == "integer":
return discrete_grid(dim, num)
elif dim.type == "real":
return real_grid(dim, num)
elif dim.type == "fidelity":
return fidelity_grid(dim, num)
else:
raise TypeError(
"Grid Search only supports `real`, `integer`, `categorical` and `fidelity`: "
f"`{dim.type}`\n"
"For more information on dimension types, see "
"https://orion.readthedocs.io/en/stable/user/searchspace.html"
)
| 5,351,046 |
def path(graph, source, target, excluded_edges=None, ooc_types=ooc_types):
""" Path of functions between two types """
if not isinstance(source, type):
source = type(source)
if not isinstance(target, type):
target = type(target)
for cls in concatv(source.mro(), _virtual_superclasses):
if cls in graph:
source = cls
break
# If both source and target are Out-Of-Core types then restrict ourselves
# to the graph of out-of-core types
if ooc_types:
oocs = tuple(ooc_types)
if issubclass(source, oocs) and issubclass(target, oocs):
graph = graph.subgraph([n for n in graph.nodes()
if issubclass(n, oocs)])
with without_edges(graph, excluded_edges) as g:
pth = nx.shortest_path(g, source=source, target=target, weight='cost')
edge = adjacency(graph)
def path_part(src, tgt):
node = edge[src][tgt]
return PathPart(src, tgt, node['func'], node['cost'])
return map(path_part, pth, pth[1:])
| 5,351,047 |
def is_resource_sufficient(order_ingredients):
"""Return true or false"""
for item in order_ingredients:
if order_ingredients[item]>=resources[item]:
print(f"Sorry not Enough {item} to Make Coffee.")
return False
return True
| 5,351,048 |
def get_gh_releases_api(project, version=None):
"""
"""
# https://developer.github.com/v3/auth/
# self.headers = {'Authorization': 'token %s' % self.api_token}
# https://api.github.com/repos/pygame/stuntcat/releases/latest
repo = get_repo_from_url(project.github_repo)
if not repo:
return
url = f'https://api.github.com/repos/{repo}/releases'
if version is not None:
url += f'/{version}'
if Config.GITHUB_RELEASES_OAUTH is None:
headers = {}
else:
headers = {'Authorization': 'token %s' % Config.GITHUB_RELEASES_OAUTH}
resp = requests.get(
url,
headers = headers
)
if resp.status_code != 200:
raise ValueError('github api failed')
data = resp.json()
return data
| 5,351,049 |
def set_source(code, filename='__main__.py', sections=False, independent=False,
report=None):
"""
Sets the contents of the Source to be the given code. Can also be
optionally given a filename.
Args:
code (str): The contents of the source file.
filename (str): The filename of the students' code. Defaults to
__main__.py.
sections (str or bool): Whether or not the file should be divided into
sections. If a str, then it should be a
Python regular expression for how the sections
are separated. If False, there will be no
sections. If True, then the default pattern
will be used: '^##### Part (\\d+)$'
report (Report): The report object to store data and feedback in. If
left None, defaults to the global MAIN_REPORT.
"""
if report is None:
report = MAIN_REPORT
report['source']['code'] = code
report['source']['full'] = code
report['source']['lines'] = code.split("\n")
report['source']['filename'] = filename
report['source']['independent'] = independent
report['source']['success'] = True
if not sections:
report['source']['sections'] = None
report['source']['section'] = None
_check_issues(code, report)
else:
if sections:
pattern = DEFAULT_PATTERN
else:
pattern = sections
report.group = 0
report['source']['section_pattern'] = pattern
report['source']['section'] = 0
report['source']['line_offset'] = 0
report['source']['sections'] = re.split(pattern, code,
flags=re.MULTILINE)
report['source']['code'] = report['source']['sections'][0]
| 5,351,050 |
def test_make_tiles():
"""Test make_tiles"""
tiles = make_tiles()
assert len(tiles) == 104
assert len(list(filter(lambda tile: tile[0] == 'R', tiles))) == 26
assert len(list(filter(lambda tile: tile[1] == 1, tiles))) == 8
assert len(
list(filter(lambda tile: tile[0] == 'K' and tile[1] == 10,
tiles))) == 2
| 5,351,051 |
def weak_move(board):
"""Weak AI - makes a random valid move.
Args:
board: (Board) The game board.
Returns:
Array: Our chosen move.
"""
valid_moves = _get_moves(board, Square.black)
# Return a random valid move
our_move = valid_moves[random.randrange(0, len(valid_moves))]
logging.info('Weak AI chose r%sc%s', our_move[0], our_move[1])
return our_move
| 5,351,052 |
def flow_to_image(flow):
"""
Input:
flow:
Output:
Img array:
Description:
Transfer flow map to image.
Part of code forked from flownet.
"""
out = []
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
maxrad = -1
for i in range(flow.shape[0]):
u = flow[i, :, :, 0]
v = flow[i, :, :, 1]
idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)
u[idxunknow] = 0
v[idxunknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(maxrad, np.max(rad))
u = u / (maxrad + np.finfo(float).eps)
v = v / (maxrad + np.finfo(float).eps)
img = compute_color(u, v)
out.append(img)
return np.float32(np.uint8(out))
| 5,351,053 |
def set_engines(N=0):
"""
Called only when read_file is called.
Sets the MC engines that are used in verification according to
if there are 4 or 8 processors. if if_no_bip = 1, we will not use any bip and reachx engines
"""
global reachs,pdrs,sims,intrps,bmcs,n_proc,abs_ratio,ifbip,bmcs1, if_no_bip, allpdrs,allbmcs
bmcs1 = [9] #BMC3
## #for HWMCC we want to set N = 8
## N = 8
if N == 0:
N = n_proc = 1+os.sysconf(os.sysconf_names["SC_NPROCESSORS_ONLN"])
## N = n_proc = 8 ### simulate 4 processors for HWMCC - turn this off a hwmcc.
else:
n_proc = N
## print 'n_proc = %d'%n_proc
#strategy is to use 2x number of processors
N = n_proc = -1+2*N
if N <= 1:
reachs = [24]
pdrs = [7]
## bmcs = [30]
bmcs = [9]
intrps = []
sims = []
slps = [18]
elif N <= 2:
reachs = [24]
pdrs = [7]
bmcs = [30]
intrps = []
sims = []
slps = [18]
elif N <= 4:
reachs = [24] #reachy
pdrs = [7,34] #prdm pdr_abstract
if if_no_bip:
allpdrs = pdrs = [7,19] #pdrm pdrmm
bmcs = [9,30] #bmc3 bmc3 -S
intrps = [23] #unterp_m
sims = [26] #Rarity_sim
slps = [18] #sleep
# 0.PDR, 1.INTERPOLATION, 2.BMC, 3.SIMULATION,
# 4.REACHX, 5.PRE_SIMP, 6.simple, 7.PDRM, 8.REACHM, 9.BMC3
# 10.Min_ret, 11.For_ret, 12.REACHP, 13.REACHN 14.PDRseed 15.prove_part_2,
# 16.prove_part_3, 17.verify, 18.sleep, 19.PDRMm, 20.prove_part_1,
# 21.run_parallel, 22.INTRP_bwd, 23.Interp_m 24.REACHY 25.REACHYc 26.Rarity Sim 27.simplify
# 28.speculate, 29.quick_sec, 30.bmc3 -S, 31.BMC2 32.extract -a 33.extract 34.pdr_abstract
# 35.par_scorr, 36.dsat, 37.iprove
# BIPS = 0.PDR, 1.INTERPOLATION, 2.BMC, 14.PDRseed, 22.INTRP_bwd, 34.pdr_abstract
# also reparam which uses ,reparam
elif N <= 8: #used for HWMCC
reachs = [24] #REACHY
allpdrs = pdrs = [7,34,14] #PDRM pdr_abstract PDR_seed
intrps = [23,1] #Interp_m
allbmcs = bmcs = [9,30,31] #BMC3 bmc3 -S
if if_no_bip:
allpdrs = pdrs = [7,19] #PDRM PDRMm
intrps = allintrps = [23] #Interp_m
bmcs = allbmcs = [2]
sims = [26] #Rarity_Sim
slps = [18] #sleep
else:
reachs = [24] #REACHY REACHX
pdrs = [7,34,14,19,0] #PDRM pdr_abstract PDR_seed PDRMm PDR
intrps = [23,1] #Interp_m INTERPOLATION
bmcs = allbmcs
if if_no_bip:
allpdrs = pdrs = [7,19] #PDRM PDRMm
intrps = allintrps = [23] #Interp_m
reachs = [24] #REACHY
bmcs = [9,30] #BMC3 bmc3 -S
sims = [26] #Rarity_Sim
slps = [18]
| 5,351,054 |
def parse_model_value(value, context):
"""
do interpolation first from context,
"x is {size}" with size = 5 will be interpolated to "x is 5"
then return interpolated string
:param value:
:param context:
:return:
"""
return value.format(**context)
| 5,351,055 |
def apply_tropomi_operator(
filename,
n_elements,
gc_startdate,
gc_enddate,
xlim,
ylim,
gc_cache,
build_jacobian,
sensi_cache,
):
"""
Apply the tropomi operator to map GEOS-Chem methane data to TROPOMI observation space.
Arguments
filename [str] : TROPOMI netcdf data file to read
n_elements [int] : Number of state vector elements
gc_startdate [datetime64] : First day of inversion period, for GEOS-Chem and TROPOMI
gc_enddate [datetime64] : Last day of inversion period, for GEOS-Chem and TROPOMI
xlim [float] : Longitude bounds for simulation domain
ylim [float] : Latitude bounds for simulation domain
gc_cache [str] : Path to GEOS-Chem output data
build_jacobian [log] : Are we trying to map GEOS-Chem sensitivities to TROPOMI observation space?
sensi_cache [str] : If build_jacobian=True, this is the path to the GEOS-Chem sensitivity data
Returns
output [dict] : Dictionary with one or two fields:
- obs_GC : GEOS-Chem and TROPOMI methane data
- TROPOMI methane
- GEOS-Chem methane
- TROPOMI lat, lon
- TROPOMI lat index, lon index
If build_jacobian=True, also include:
- K : Jacobian matrix
"""
# Read TROPOMI data
TROPOMI = read_tropomi(filename)
if TROPOMI == None:
print(f"Skipping {filename} due to file processing issue.")
return TROPOMI
# We're only going to consider data within lat/lon/time bounds, with QA > 0.5, and with safe surface albedo values
sat_ind = filter_tropomi(TROPOMI, xlim, ylim, gc_startdate, gc_enddate)
# Number of TROPOMI observations
n_obs = len(sat_ind[0])
print("Found", n_obs, "TROPOMI observations.")
# If need to build Jacobian from GEOS-Chem perturbation simulation sensitivity data:
if build_jacobian:
# Initialize Jacobian K
jacobian_K = np.zeros([n_obs, n_elements], dtype=np.float32)
jacobian_K.fill(np.nan)
# Initialize a list to store the dates we want to look at
all_strdate = []
# For each TROPOMI observation
for k in range(n_obs):
# Get the date and hour
iSat = sat_ind[0][k] # lat index
jSat = sat_ind[1][k] # lon index
time = pd.to_datetime(str(TROPOMI["utctime"][iSat]))
strdate = time.round("60min").strftime("%Y%m%d_%H")
all_strdate.append(strdate)
all_strdate = list(set(all_strdate))
# Read GEOS_Chem data for the dates of interest
all_date_gc = read_all_geoschem(all_strdate, gc_cache, build_jacobian, sensi_cache)
# Initialize array with n_obs rows and 6 columns. Columns are TROPOMI CH4, GEOSChem CH4, longitude, latitude, II, JJ
obs_GC = np.zeros([n_obs, 6], dtype=np.float32)
obs_GC.fill(np.nan)
# For each TROPOMI observation:
for k in range(n_obs):
# Get GEOS-Chem data for the date of the observation:
iSat = sat_ind[0][k]
jSat = sat_ind[1][k]
p_sat = TROPOMI["pressures"][iSat, jSat, :]
dry_air_subcolumns = TROPOMI["dry_air_subcolumns"][iSat, jSat, :] # mol m-2
apriori = TROPOMI["methane_profile_apriori"][iSat, jSat, :] # mol m-2
avkern = TROPOMI["column_AK"][iSat, jSat, :]
time = pd.to_datetime(str(TROPOMI["utctime"][iSat]))
strdate = time.round("60min").strftime("%Y%m%d_%H")
GEOSCHEM = all_date_gc[strdate]
# Find GEOS-Chem lats & lons closest to the corners of the TROPOMI pixel
longitude_bounds = TROPOMI["longitude_bounds"][iSat, jSat, :]
latitude_bounds = TROPOMI["latitude_bounds"][iSat, jSat, :]
corners_lon_index = []
corners_lat_index = []
for l in range(4):
iGC = nearest_loc(longitude_bounds[l], GEOSCHEM["lon"])
jGC = nearest_loc(latitude_bounds[l], GEOSCHEM["lat"])
corners_lon_index.append(iGC)
corners_lat_index.append(jGC)
# If the tolerance in nearest_loc() is not satisfied, skip the observation
if np.nan in corners_lon_index + corners_lat_index:
continue
# Get lat/lon indexes and coordinates of GEOS-Chem grid cells closest to the TROPOMI corners
ij_GC = [(x, y) for x in set(corners_lon_index) for y in set(corners_lat_index)]
gc_coords = [(GEOSCHEM["lon"][i], GEOSCHEM["lat"][j]) for i, j in ij_GC]
# Compute the overlapping area between the TROPOMI pixel and GEOS-Chem grid cells it touches
overlap_area = np.zeros(len(gc_coords))
dlon = GEOSCHEM["lon"][1] - GEOSCHEM["lon"][0]
dlat = GEOSCHEM["lat"][1] - GEOSCHEM["lat"][0]
# Polygon representing TROPOMI pixel
polygon_tropomi = Polygon(np.column_stack((longitude_bounds, latitude_bounds)))
# For each GEOS-Chem grid cell that touches the TROPOMI pixel:
for gridcellIndex in range(len(gc_coords)):
# Define polygon representing the GEOS-Chem grid cell
coords = gc_coords[gridcellIndex]
geoschem_corners_lon = [
coords[0] - dlon / 2,
coords[0] + dlon / 2,
coords[0] + dlon / 2,
coords[0] - dlon / 2,
]
geoschem_corners_lat = [
coords[1] - dlat / 2,
coords[1] - dlat / 2,
coords[1] + dlat / 2,
coords[1] + dlat / 2,
]
polygon_geoschem = Polygon(
np.column_stack((geoschem_corners_lon, geoschem_corners_lat))
)
# Calculate overlapping area as the intersection of the two polygons
if polygon_geoschem.intersects(polygon_tropomi):
overlap_area[gridcellIndex] = polygon_tropomi.intersection(
polygon_geoschem
).area
# If there is no overlap between GEOS-Chem and TROPOMI, skip to next observation:
if sum(overlap_area) == 0:
continue
# =======================================================
# Map GEOS-Chem to TROPOMI observation space
# =======================================================
# Otherwise, initialize tropomi virtual xch4 and virtual sensitivity as zero
area_weighted_virtual_tropomi = 0 # virtual tropomi xch4
area_weighted_virtual_tropomi_sensitivity = 0 # virtual tropomi sensitivity
# For each GEOS-Chem grid cell that touches the TROPOMI pixel:
for gridcellIndex in range(len(gc_coords)):
# Get GEOS-Chem lat/lon indices for the cell
iGC, jGC = ij_GC[gridcellIndex]
# Get GEOS-Chem pressure edges for the cell
p_gc = GEOSCHEM["PEDGE"][iGC, jGC, :]
# Get GEOS-Chem methane for the cell
gc_CH4 = GEOSCHEM["CH4"][iGC, jGC, :]
# Get merged GEOS-Chem/TROPOMI pressure grid for the cell
merged = merge_pressure_grids(p_sat, p_gc)
# Remap GEOS-Chem methane to TROPOMI pressure levels
sat_CH4 = remap(
gc_CH4,
merged["data_type"],
merged["p_merge"],
merged["edge_index"],
merged["first_gc_edge"],
) # ppb
# Convert ppb to mol m-2
sat_CH4_molm2 = sat_CH4 * 1e-9 * dry_air_subcolumns # mol m-2
# Derive the column-averaged XCH4 that TROPOMI would see over this ground cell
# using eq. 46 from TROPOMI Methane ATBD, Hasekamp et al. 2019
virtual_tropomi_gridcellIndex = (
sum(apriori + avkern * (sat_CH4_molm2 - apriori))
/ sum(dry_air_subcolumns)
* 1e9
) # ppb
# Weight by overlapping area (to be divided out later) and add to sum
area_weighted_virtual_tropomi += (
overlap_area[gridcellIndex] * virtual_tropomi_gridcellIndex
) # ppb m2
# If building Jacobian matrix from GEOS-Chem perturbation simulation sensitivity data:
if build_jacobian:
# Get GEOS-Chem perturbation sensitivities at this lat/lon, for all vertical levels and state vector elements
sensi_lonlat = GEOSCHEM["Sensitivities"][iGC, jGC, :, :]
# Map the sensitivities to TROPOMI pressure levels
sat_deltaCH4 = remap_sensitivities(
sensi_lonlat,
merged["data_type"],
merged["p_merge"],
merged["edge_index"],
merged["first_gc_edge"],
) # mixing ratio, unitless
# Tile the TROPOMI averaging kernel
avkern_tiled = np.transpose(np.tile(avkern, (n_elements, 1)))
# Tile the TROPOMI dry air subcolumns
dry_air_subcolumns_tiled = np.transpose(
np.tile(dry_air_subcolumns, (n_elements, 1))
) # mol m-2
# Derive the change in column-averaged XCH4 that TROPOMI would see over this ground cell
tropomi_sensitivity_gridcellIndex = np.sum(
avkern_tiled * sat_deltaCH4 * dry_air_subcolumns_tiled, 0
) / sum(
dry_air_subcolumns
) # mixing ratio, unitless
# Weight by overlapping area (to be divided out later) and add to sum
area_weighted_virtual_tropomi_sensitivity += (
overlap_area[gridcellIndex] * tropomi_sensitivity_gridcellIndex
) # m2
# Compute virtual TROPOMI observation as weighted mean by overlapping area
# i.e., need to divide out area [m2] from the previous step
virtual_tropomi = area_weighted_virtual_tropomi / sum(overlap_area)
# Save actual and virtual TROPOMI data
obs_GC[k, 0] = TROPOMI["methane"][
iSat, jSat
] # Actual TROPOMI methane column observation
obs_GC[k, 1] = virtual_tropomi # Virtual TROPOMI methane column observation
obs_GC[k, 2] = TROPOMI["longitude"][iSat, jSat] # TROPOMI longitude
obs_GC[k, 3] = TROPOMI["latitude"][iSat, jSat] # TROPOMI latitude
obs_GC[k, 4] = iSat # TROPOMI index of longitude
obs_GC[k, 5] = jSat # TROPOMI index of latitude
if build_jacobian:
# Compute TROPOMI sensitivity as weighted mean by overlapping area
# i.e., need to divide out area [m2] from the previous step
jacobian_K[k, :] = area_weighted_virtual_tropomi_sensitivity / sum(
overlap_area
)
# Output
output = {}
# Always return the coincident TROPOMI and GEOS-Chem data
output["obs_GC"] = obs_GC
# Optionally return the Jacobian
if build_jacobian:
output["K"] = jacobian_K
return output
| 5,351,056 |
def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike:
"""Load an analysis configuration from a file.
Args:
yaml: YAML object to use in loading the configuration.
filename: Filename of the YAML configuration file.
Returns:
dict-like object containing the loaded configuration
"""
with open(filename, "r") as f:
config = yaml.load(f)
return cast(DictLike, config)
| 5,351,057 |
def device_list(request):
"""
:param request:
:return:
"""
device_list = True
list = Device.objects.all()
return render(request, "back/device_list.html", locals())
| 5,351,058 |
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 50))
print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| 5,351,059 |
def L008_eval(segment, raw_stack, **kwargs):
""" This is a slightly odd one, because we'll almost always evaluate from a point a few places
after the problem site """
# We need at least two segments behind us for this to work
if len(raw_stack) < 2:
return True
else:
cm1 = raw_stack[-1]
cm2 = raw_stack[-2]
if cm2.name == 'comma':
if cm1.name not in ['whitespace', 'newline']:
# comma followed by something that isn't whitespace!
return cm2
elif cm1.raw not in ['\n', ' '] and not segment.is_comment:
return cm1
return True
| 5,351,060 |
def overwrite_ruffus_args(args, config):
"""
:param args:
:param config:
:return:
"""
if config.has_section('Ruffus'):
cmdargs = dict()
cmdargs['draw_horizontally'] = bool
cmdargs['flowchart'] = str
cmdargs['flowchart_format'] = str
cmdargs['forced_tasks'] = lambda x: x.split()
cmdargs['history_file'] = str
cmdargs['jobs'] = int
cmdargs['just_print'] = bool
cmdargs['key_legend_in_graph'] = bool
cmdargs['log_file'] = str
cmdargs['recreate_database'] = bool
cmdargs['target_tasks'] = lambda x: x.split()
cmdargs['touch_files_only'] = bool
cmdargs['use_threads'] = bool
cmdargs['verbose'] = lambda x: x.split()
for k, v in config.items('Ruffus'):
try:
args.__setattr__(k, cmdargs[k](v))
except KeyError:
pass
return args
| 5,351,061 |
def read_sfr_df():
"""Reads and prepares the sfr_df
Parameters:
Returns:
sfr_df(pd.DataFrame): dataframe of the fits file mosdef_sfrs_latest.fits
"""
sfr_df = read_file(imd.loc_sfrs_latest)
sfr_df['FIELD_STR'] = [sfr_df.iloc[i]['FIELD'].decode(
"utf-8").rstrip() for i in range(len(sfr_df))]
return sfr_df
| 5,351,062 |
def redshift(x, vo=0., ve=0.,def_wlog=False):
"""
x: The measured wavelength.
v: Speed of the observer [km/s].
ve: Speed of the emitter [km/s].
Returns:
The emitted wavelength l'.
Notes:
f_m = f_e (Wright & Eastman 2014)
"""
if np.isnan(vo):
vo = 0 # propagate nan as zero (@calibration in fib B)
a = (1.0+vo/c) / (1.0+ve/c)
if def_wlog:
return x + np.log(a) # logarithmic
#return x + a # logarithmic + approximation v << c
else:
return x * a
#return x / (1.0-v/c)
| 5,351,063 |
def overlay_spectra_plot(array, nrow=5,ncol=5,**kwargs):
"""
Overlay spectra on a collapsed cube.
Parameters
----------
array : 3D numpy array
nrow : int
Number of rows in the figure.
ncol : int
Number of columns in the figure.
**kwargs : dict
Keyword arguments passed to `ax.plot` for the spectra
Returns
-------
fig : matplotlib.figure.Figure
The figure object.
"""
cube = np.nan_to_num(array)
fig,ax = plt.subplots(subplot_kw={'projection':mmap.wcs},figsize=(10,10))
fig.set_constrained_layout(False)
collapsed_cube = np.nanmean(cube,axis=2)
vmin,vmax = np.percentile(collapsed_cube[collapsed_cube>0], [0.1,99.9])
ax.imshow(collapsed_cube,cmap='Greys',norm=mpl.colors.LogNorm(vmin=vmin,vmax=vmin))
w = 1/ncol # in figure coords
h = 1/nrow # in figure coords
dr,dc = collapsed_cube.shape
# create grid of inset_axes on figure
for i in range(nrow):
for j in range(ncol):
b,l = i*h, j*w
#print(f'left:{l:0.1f} col: {j} bottom:{b:0.1f} row:{i}')
bl = [b,l]
ax2 = ax.inset_axes([l,b,w,h])
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_facecolor('none')
#ax.add_patch(mpl.patches.Rectangle([l,b],w,h,transform=ax.transAxes,color='r',alpha=0.5))
#ju.annotate(f'row:{i} col:{j}',l,b,ha='left',va='bottom',ax=ax,transform='axes')
#print(f'{int(b*dr)}:{int((b+h)*dr)},{int(l*dc)}:{int((l+w)*dc)}')
line = np.nanmean(mmap.co[sl][int(b*dr):int((b+h)*dr),int(l*dc):int((l+w)*dc),vsl],axis=(0,1))
ax2.plot(mmap.v[vsl],ju.scale_ptp(line),'r',lw=1,**kwargs)
ax2.set_ylim(ax2.get_ylim()[0],max(ax2.get_ylim()[1],.3))
#ax2.set_axis_off()
#ax.add_patch(mpl.patches.Rectangle([bl[0],bl[1]],w*dc,h*dr,transform=ax.transData,alpha=0.25))
return fig
| 5,351,064 |
def digest_from_rsa_scheme(scheme, hash_library=DEFAULT_HASH_LIBRARY):
"""
<Purpose>
Get digest object from RSA scheme.
<Arguments>
scheme:
A string that indicates the signature scheme used to generate
'signature'. Currently supported RSA schemes are defined in
`securesystemslib.keys.RSA_SIGNATURE_SCHEMES`
hash_library:
The crypto library to use for the given hash algorithm (e.g., 'hashlib').
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are
improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if an unsupported
hashing algorithm is specified, or digest could not be generated with given
the algorithm.
securesystemslib.exceptions.UnsupportedLibraryError, if an unsupported
library was requested via 'hash_library'.
<Side Effects>
None.
<Returns>
Digest object
e.g.
hashlib.new(algorithm) or
PycaDiggestWrapper object
"""
# Are the arguments properly formatted? If not, raise
# 'securesystemslib.exceptions.FormatError'.
securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(scheme)
# Get hash algorithm from rsa scheme (hash algorithm id is specified after
# the last dash; e.g. rsassa-pss-sha256 -> sha256)
hash_algorithm = scheme.split('-')[-1]
return digest(hash_algorithm, hash_library)
| 5,351,065 |
def getconfig(filename):
"""
1. Checks if the config file exists.
2. If not, creates it with the content in default_config.
3. Reads the config file and returns it.
Returns False in case of errors.
"""
global default_config
if os.path.exists(filename):
configfile = open(filename, "r")
else:
try:
f = open(filename, 'w')
f.write(default_config)
f.close()
configfile = open(filename, "r")
except IOError:
return False
ret = []
for line in configfile:
line = line.strip()
if not line or line[0] == '#':
continue
else:
ret.append(line)
configfile.close()
return ret
| 5,351,066 |
def table(content, accesskey:str ="", class_: str ="", contenteditable: str ="",
data_key: str="", data_value: str="", dir_: str="", draggable: str="",
hidden: str="", id_: str="", lang: str="", spellcheck: str="",
style: str="", tabindex: str="", title: str="", translate: str=""):
"""
Returns a table.\n
`content`: Contents of the table.\n
"""
g_args = global_args(accesskey, class_, contenteditable, data_key, data_value,
dir_, draggable, hidden, id_, lang, spellcheck, style,
tabindex, title, translate)
return f"<table {g_args}>{content}</table>\n"
| 5,351,067 |
def _ev_match(
output_dir, last_acceptable_entry_index, certificate, entry_type,
extra_data, certificate_index):
"""Matcher function for the scanner. Returns the certificate's hash if
it is a valid, non-expired, EV certificate, None otherwise."""
# Only generate whitelist for non-precertificates. It is expected that if
# a precertificate was submitted then the issued SCT would be embedded
# in the final certificate.
if entry_type != client_pb2.X509_ENTRY:
return None
# No point in including expired certificates.
if certificate.is_expired():
return None
# Do not include entries beyond the last entry included in the whitelist
# generated on January 1st, 2015.
if certificate_index > last_acceptable_entry_index:
return None
# Only include certificates that have an EV OID.
matching_policies = find_matching_policies(certificate)
if not matching_policies:
return None
# Removed the requirement that the root of the chain matches the root that
# should be used for the EV policy OID.
# See https://code.google.com/p/chromium/issues/detail?id=524635 for
# details.
# Matching certificate
if output_dir:
_write_cert_and_chain(
output_dir, certificate, extra_data, certificate_index)
return calculate_certificate_hash(certificate)
| 5,351,068 |
def axis_ratio_disklike(scale=0.3, truncate=0.2):
"""Sample (one minus) the axis ratio of a disk-like galaxy from the Rayleigh distribution
Parameters
----------
scale : float
scale of the Rayleigh distribution; the bigger, the smaller the axis ratio
truncate : float
the minimum value of the axis ratio
Note
----
The default parameters are used in Lenspop ([1]_) and are expected for elliptical sources.
References
----------
.. [1] Collett, Thomas E.
"The population of galaxy–galaxy strong lenses in forthcoming optical imaging surveys."
The Astrophysical Journal 811.1 (2015): 20.
Returns
-------
float
the axis ratio
"""
q = 0.0
while q < truncate:
q = 1.0 - np.random.rayleigh(scale, size=None)
return q
| 5,351,069 |
def rk4(y0, t0, te, N, deriv, filename=None):
"""
General RK4 driver for
N coupled differential eq's,
fixed stepsize
Input:
- y0: Vector containing initial values for y
- t0: Initial time
- te: Ending time
- N: Number of steps
- deriv: See rk4_step
- filename: Optional, use if you want to write
data to file at each step.
Format used:
t y[0] y[1] ... (%10.15E)
Output:
If filename=None, return tuple containing:
- time: Array of times at which it has iterated over
- yout: N*len(y0) numpy array containing y for each timestep
If filename specified, None is returned.
"""
h = (te-t0)/float(N)
t = t0;
if filename == None:
#Setup arrays
time = numpy.zeros(N);
yout = []
#Inital values
yout.append(y0);
time[0] = t0;
t = t0;
#Loop over timesteps
for i in xrange(1,N):
yout.append(rk4_step(yout[i-1],t,h,deriv));
t = t0 + h*i;
time[i] = t;
return (time,yout)
else:
ofile = open(filename,'w')
#Format string used for output file
ostring = "%20.8E " + ("%20.8E "*len(y0)) + "\n"
#Initial values
y = y0
t = t0
foo = [t]; foo[1:] = y;
ofile.write(ostring % tuple(foo))
while (t < te):
y = rk4_step(y,t,h,deriv)
t +=h
foo = [t]; foo[1:] = y;
ofile.write(ostring % tuple(foo))
ofile.close()
return None
| 5,351,070 |
def create_txt_substitute_record_rule_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Args:
client: Client object
args: Usually demisto.args()
Returns:
Outputs
"""
name = args.get('name')
rp_zone = args.get('rp_zone')
comment = args.get('comment')
text = args.get('text')
infoblox_object_type = 'record:rpz:txt'
raw_response = client.create_substitute_record_rule(infoblox_object_type, name=name, rp_zone=rp_zone,
comment=comment, text=text)
rule = raw_response.get('result', {})
fixed_keys_rule_res = {RESPONSE_TRANSLATION_DICTIONARY.get(key, string_to_context_key(key)): val for key, val in
rule.items()}
title = f'{INTEGRATION_NAME} - Response Policy Zone rule: {name} has been created:'
context = {
f'{INTEGRATION_CONTEXT_NAME}.ModifiedResponsePolicyZoneRules(val.Name && val.Name === obj.Name)': fixed_keys_rule_res}
human_readable = tableToMarkdown(title, fixed_keys_rule_res, headerTransform=pascalToSpace)
return human_readable, context, raw_response
| 5,351,071 |
def pot_rho_linear(SP, t, rho0=1025, a=2e-4, b=7e-4, SP0=35, t0=15):
"""
Potential density calculated using a linear equation of state:
Parameters
----------
SP : array-like
Salinity [g/kg]
t : array-like
Temperature [°C]
rho0 : float, optional
Constant density [kg/m^3]
a : float, optional
Thermal expansion coefficient [1/°C]
b : float, optional
saline expansion coefficient [kg/g]
SP0 : float, optional
Constant salinity [g/kg]
t0 : float, optional
Constant temperature [°C]
Returns
-------
pot_rho : ndarray
Potential density [kg/m^3]
"""
return rho0 * (1 - a * (t - t0) + b * (SP - SP0))
| 5,351,072 |
def oembed(url, params=""):
"""
Render an OEmbed-compatible link as an embedded item.
:param url: A URL of an OEmbed provider.
:return: The OEMbed ``<embed>`` code.
"""
# Note: this method isn't currently very efficient - the data isn't
# cached or stored.
kwargs = dict(urlparse.parse_qsl(params))
try:
return mark_safe(get_oembed_data(
url,
**kwargs
)['html'])
except (KeyError, ProviderException):
if settings.DEBUG:
return "No OEmbed data returned"
return ""
| 5,351,073 |
async def create_assc(conn : asyncpg.Connection, name : str, type : str,
base : str, leader : int) -> Association:
"""Create an association with the fields given.
type must be 'Brotherhood','College', or 'Guild'.
"""
psql = """
SELECT assc_id
FROM associations
WHERE assc_name = $1;
"""
if await conn.fetchval(psql, name) is not None:
raise Checks.NameTaken(name)
psql1 = """
WITH rows AS (
INSERT INTO associations
(assc_name, assc_type, leader_id, assc_icon, base)
VALUES ($1, $2, $3, $4, $5)
RETURNING assc_id
)
SELECT assc_id FROM rows;
"""
psql2 = """
UPDATE players
SET assc = $1, guild_rank = 'Leader'
WHERE user_id = $2;
"""
psql3 = """
INSERT INTO brotherhood_champions (assc_id)
VALUES ($1);
"""
assc_id = await conn.fetchval(
psql1, name, type, leader, Vars.DEFAULT_ICON, base)
await conn.execute(psql2, assc_id, leader)
if type == "Brotherhood":
await conn.execute(psql3, assc_id)
return await get_assc_by_id(conn, assc_id)
| 5,351,074 |
def ssx_plot(data):
"""
Plot the current list of ints so far. Data requires the following keys
* xdim (int) X dimension for the plot
* ydim (int) Y dimension for the plot
* int_indices (list of ints) list of int 'hits' to light up on the plot
* plot_name (path) full filename to save the plot
"""
import numpy as np
from matplotlib import pyplot as plt
for dim in ('xdim', 'ydim'):
if not isinstance(data.get(dim), int):
raise ValueError(f'"{dim}" not provided as an integer: {data.get(dim)}')
bad_ints = [int_index for int_index in data.get('int_indices', []) if not isinstance(int_index, int)]
if bad_ints:
raise ValueError(f'Got int indices which are not numbers: {bad_ints}')
xdim, ydim = data['xdim'], data['ydim']
lattice_counts = np.zeros(xdim*ydim, dtype=np.dtype(int))
for index in data['int_indices']:
lattice_counts[index] += 1
lattice_counts = lattice_counts.reshape((ydim, xdim))
# reverse the order of alternating rows
lattice_counts[1::2, :] = lattice_counts[1::2, ::-1]
# plot the lattice counts
plt.figure(figsize=(xdim/10., ydim/10.))
plt.axes([0, 0, 1, 1]) # Make the plot occupy the whole canvas
plt.axis('off')
plt.imshow(lattice_counts, cmap='hot', interpolation=None, vmax=4)
plt.savefig(data['plot_filename'])
| 5,351,075 |
def filter_tof_to_csr(
tof_slices: np.ndarray,
push_indices: np.ndarray,
tof_indices: np.ndarray,
push_indptr: np.ndarray,
) -> tuple:
"""Get a CSR-matrix with raw indices satisfying push indices and tof slices.
Parameters
----------
tof_slices : np.int64[:, 3]
Each row of the array is assumed to be a (start, stop, step) tuple.
This array is assumed to be sorted, disjunct and strictly increasing
(i.e. np.all(np.diff(tof_slices[:, :2].ravel()) >= 0) = True).
push_indices : np.int64[:]
The push indices from where to retrieve the TOF slices.
tof_indices : np.uint32[:]
The self.tof_indices array of a TimsTOF object.
push_indptr : np.int64[:]
The self.push_indptr array of a TimsTOF object.
Returns
-------
(np.int64[:], np.int64[:], np.int64[:],)
An (indptr, values, columns) tuple, where indptr are push indices,
values raw indices, and columns the tof_slices.
"""
indptr = [0]
values = []
columns = []
for push_index in push_indices:
start = push_indptr[push_index]
end = push_indptr[push_index + 1]
idx = start
for i, (tof_start, tof_stop, tof_step) in enumerate(tof_slices):
idx += np.searchsorted(tof_indices[idx: end], tof_start)
tof_value = tof_indices[idx]
while (tof_value < tof_stop) and (idx < end):
if tof_value in range(tof_start, tof_stop, tof_step):
values.append(idx)
columns.append(i)
break # TODO what if multiple hits?
idx += 1
tof_value = tof_indices[idx]
indptr.append(len(values))
return np.array(indptr), np.array(values), np.array(columns)
| 5,351,076 |
def get_path_from_query_string(req):
"""Gets path from query string
Args:
req (flask.request): Request object from Flask
Returns:
path (str): Value of "path" parameter from query string
Raises:
exceptions.UserError: If "path" is not found in query string
"""
if req.args.get('path') is None:
raise exceptions.UserError('Path not found in query string')
return req.args.get('path')
| 5,351,077 |
def test_cray_bos_sessiontemplateteplate_list(cli_runner, rest_mock):
""" Test cray bos sessiontemplatetemplate list """
runner, cli, config = cli_runner
result = runner.invoke(
cli,
['bos', 'sessiontemplatetemplate', 'list']
)
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'] == 'GET'
assert data['url'] == '{}/apis/bos/v1/sessiontemplatetemplate'.format(
config['default']['hostname']
)
| 5,351,078 |
def entropy_grassberger(n, base=None):
""""
Estimate the entropy of a discrete distribution from counts per category
n: array of counts
base: base in which to measure the entropy (default: nats)
"""
N = np.sum(n)
entropy = np.log(N) - np.sum(n*scipy.special.digamma(n+1e-20))/N
if base:
entropy /= np.log(base)
return entropy
| 5,351,079 |
def beaching_kernel(particle, fieldset, time):
"""
Author: Victor Onink, 25/01/2022
"""
# Beaching
if particle.beach == 0:
dist = fieldset.distance2shore[time, particle.depth, particle.lat, particle.lon]
if dist < fieldset.Coastal_Boundary:
if ParcelsRandom.uniform(0, 1) > fieldset.p_beach:
particle.beach = 1
# Resuspension
elif particle.beach == 1:
if ParcelsRandom.uniform(0, 1) > fieldset.p_resus:
particle.beach = 0
| 5,351,080 |
def get_param_layout():
"""Get layout for causality finding parameters definition window
Parameters
----------
Returns
-------
`List[List[Element]]`
Layout for causality finding parameters window
"""
box = [
[
sg.Text('Parameters')
],
[
sg.Text(' Epochs: '),sg.Input(key=cte.EPOCHS,size=(10,1), default_text="1000"),
sg.Text(' Kernel Size: '),sg.Input(key=cte.KERNEL,size=(10,1), default_text="4")
],
[
sg.Text(' Depth: '),sg.Input(key=cte.LEVEL,size=(10,1), default_text="1"),
sg.Text(' Learning Rate: '),sg.Input(key=cte.RATE,size=(10,1), default_text="0.01")
],
[
sg.Text(' Dilation: '),sg.Input(key=cte.DILATION,size=(10,1), default_text="4"),
sg.Text(' Significance: '),sg.Input(key=cte.SIGNIFICANCE,size=(10,1), default_text="0.8")
],
[
sg.Text('Optimizer: '),sg.Input(key=cte.OPTIMIZER,size=(10,1), default_text="Adam"),
sg.Text(' Log Interval: '),sg.Input(key=cte.LOGINT,size=(10,1), default_text="500")
],
[sg.Button('Create Causal Graph', key=cte.CREATE)]
]
return box
| 5,351,081 |
def senti_histplot(senti_df):
"""histogram plot for sentiment"""
senti_hist = (
alt.Chart(senti_df)
.mark_bar()
.encode(alt.Y(cts.SENTI, bin=True), x="count()", color=cts.SENTI,)
.properties(height=300, width=100)
).interactive()
return senti_hist
| 5,351,082 |
def pd_df_sampling(df, coltarget="y", n1max=10000, n2max=-1, isconcat=1):
"""
DownSampler
:param df:
:param coltarget: binary class
:param n1max:
:param n2max:
:param isconcat:
:return:
"""
df1 = df[df[coltarget] == 0].sample(n=n1max)
n2max = len(df[df[coltarget] == 1]) if n2max == -1 else n2max
df0 = df[df[coltarget] == 1].sample(n=n2max)
if isconcat:
df2 = pd.concat((df1, df0))
df2 = df2.sample(frac=1.0, replace=True)
return df2
else:
print("y=1", n2max, "y=0", len(df1))
return df0, df1
| 5,351,083 |
def _build_parser():
"""Build parser object."""
from functools import partial
from pathlib import Path
from argparse import (
ArgumentParser,
ArgumentDefaultsHelpFormatter,
)
from packaging.version import Version
from .version import check_latest, is_flagged
from niworkflows.utils.spaces import Reference, OutputReferencesAction
def _path_exists(path, parser):
"""Ensure a given path exists."""
if path is None or not Path(path).exists():
raise parser.error(f"Path does not exist: <{path}>.")
return Path(path).absolute()
def _min_one(value, parser):
"""Ensure an argument is not lower than 1."""
value = int(value)
if value < 1:
raise parser.error("Argument can't be less than one.")
return value
def _to_gb(value):
scale = {"G": 1, "T": 10 ** 3, "M": 1e-3, "K": 1e-6, "B": 1e-9}
digits = "".join([c for c in value if c.isdigit()])
units = value[len(digits):] or "M"
return int(digits) * scale[units[0]]
def _drop_sub(value):
value = str(value)
return value.lstrip("sub-")
def _bids_filter(value):
from json import loads
if value and Path(value).exists():
return loads(Path(value).read_text())
verstr = f"dMRIPrep v{config.environment.version}"
currentv = Version(config.environment.version)
is_release = not any(
(currentv.is_devrelease, currentv.is_prerelease, currentv.is_postrelease)
)
parser = ArgumentParser(
description="dMRIPrep: dMRI PREProcessing workflows v{}".format(
config.environment.version
),
formatter_class=ArgumentDefaultsHelpFormatter,
)
PathExists = partial(_path_exists, parser=parser)
PositiveInt = partial(_min_one, parser=parser)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument(
"bids_dir",
action="store",
type=PathExists,
help="the root folder of a BIDS valid dataset (sub-XXXXX folders should "
"be found at the top level in this folder).",
)
parser.add_argument(
"output_dir",
action="store",
type=Path,
help="the output path for the outcomes of preprocessing and visual " "reports",
)
parser.add_argument(
"analysis_level",
choices=["participant"],
help='processing stage to be run, only "participant" in the case of '
"dMRIPrep (see BIDS-Apps specification).",
)
# optional arguments
parser.add_argument("--version", action="version", version=verstr)
g_bids = parser.add_argument_group("Options for filtering BIDS queries")
g_bids.add_argument(
"--skip-bids-validation",
action="store_true",
default=False,
help="assume the input dataset is BIDS compliant and skip the validation",
)
g_bids.add_argument(
"--participant-label",
"--participant_label",
action="store",
nargs="+",
type=_drop_sub,
help="a space delimited list of participant identifiers or a single "
"identifier (the sub- prefix can be removed)",
)
g_bids.add_argument(
"--bids-filter-file",
dest="bids_filters",
action="store",
type=_bids_filter,
metavar="PATH",
help="a JSON file describing custom BIDS input filter using pybids "
"{<suffix>:{<entity>:<filter>,...},...} "
"(https://github.com/bids-standard/pybids/blob/master/bids/layout/config/bids.json)",
)
g_bids.add_argument(
"--anat-derivatives", action='store', metavar="PATH", type=PathExists,
help="Reuse the anatomical derivatives from another fMRIPrep run or calculated "
"with an alternative processing tool (NOT RECOMMENDED)."
)
g_perfm = parser.add_argument_group("Options to handle performance")
g_perfm.add_argument(
"--nprocs",
"--nthreads",
"--n_cpus",
"-n-cpus",
action="store",
type=PositiveInt,
help="maximum number of threads across all processes",
)
g_perfm.add_argument(
"--omp-nthreads",
action="store",
type=PositiveInt,
help="maximum number of threads per-process",
)
g_perfm.add_argument(
"--mem",
"--mem_mb",
"--mem-mb",
dest="memory_gb",
action="store",
type=_to_gb,
help="upper bound memory limit for dMRIPrep processes",
)
g_perfm.add_argument(
"--low-mem",
action="store_true",
help="attempt to reduce memory usage (will increase disk usage "
"in working directory)",
)
g_perfm.add_argument(
"--use-plugin",
action="store",
default=None,
help="nipype plugin configuration file",
)
g_perfm.add_argument(
"--anat-only", action="store_true", help="run anatomical workflows only"
)
g_perfm.add_argument(
"--boilerplate_only",
action="store_true",
default=False,
help="generate boilerplate only",
)
g_perfm.add_argument(
"--md-only-boilerplate",
action="store_true",
default=False,
help="skip generation of HTML and LaTeX formatted citation with pandoc",
)
g_perfm.add_argument(
"-v",
"--verbose",
dest="verbose_count",
action="count",
default=0,
help="increases log verbosity for each occurrence, debug level is -vvv",
)
g_conf = parser.add_argument_group("Workflow configuration")
g_conf.add_argument(
"--ignore",
required=False,
action="store",
nargs="+",
default=[],
choices=["fieldmaps", "slicetiming", "sbref"],
help="ignore selected aspects of the input dataset to disable corresponding "
"parts of the workflow (a space delimited list)",
)
g_conf.add_argument(
"--longitudinal",
action="store_true",
help="treat dataset as longitudinal - may increase runtime",
)
g_conf.add_argument(
"--output-spaces",
nargs="*",
action=OutputReferencesAction,
help="""\
Standard and non-standard spaces to resample anatomical and functional images to. \
Standard spaces may be specified by the form \
``<SPACE>[:cohort-<label>][:res-<resolution>][...]``, where ``<SPACE>`` is \
a keyword designating a spatial reference, and may be followed by optional, \
colon-separated parameters. \
Non-standard spaces imply specific orientations and sampling grids. \
The default value of this flag (meaning, if the argument is not include in the command line) \
is ``--output-spaces run`` - the original space and sampling grid of the original DWI run. \
Important to note, the ``res-*`` modifier does not define the resolution used for \
the spatial normalization. To generate no DWI outputs (if that is intended for some reason), \
use this option without specifying any spatial references. For further details, please check out \
https://www.nipreps.org/dmriprep/en/%s/spaces.html"""
% (currentv.base_version if is_release else "latest"),
)
# ANTs options
g_ants = parser.add_argument_group("Specific options for ANTs registrations")
g_ants.add_argument(
"--skull-strip-template",
default="OASIS30ANTs",
type=Reference.from_string,
help="select a template for skull-stripping with antsBrainExtraction",
)
g_ants.add_argument(
"--skull-strip-fixed-seed",
action="store_true",
help="do not use a random seed for skull-stripping - will ensure "
"run-to-run replicability when used with --omp-nthreads 1",
)
# Fieldmap options
g_fmap = parser.add_argument_group("Specific options for handling fieldmaps")
g_fmap.add_argument(
"--fmap-bspline",
action="store_true",
default=False,
help="fit a B-Spline field using least-squares (experimental)",
)
g_fmap.add_argument(
"--fmap-no-demean",
action="store_false",
default=True,
help="do not remove median (within mask) from fieldmap",
)
# SyN-unwarp options
g_syn = parser.add_argument_group("Specific options for SyN distortion correction")
g_syn.add_argument(
"--use-syn-sdc",
action="store_true",
default=False,
help="EXPERIMENTAL: Use fieldmap-free distortion correction",
)
g_syn.add_argument(
"--force-syn",
action="store_true",
default=False,
help="EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to "
"fieldmap correction, if available",
)
# FreeSurfer options
g_fs = parser.add_argument_group("Specific options for FreeSurfer preprocessing")
g_fs.add_argument(
"--fs-license-file",
metavar="PATH",
type=PathExists,
help="Path to FreeSurfer license key file. Get it (for free) by registering"
" at https://surfer.nmr.mgh.harvard.edu/registration.html",
)
g_fs.add_argument(
"--fs-subjects-dir",
metavar="PATH",
type=Path,
help="Path to existing FreeSurfer subjects directory to reuse. "
"(default: OUTPUT_DIR/freesurfer)",
)
# Surface generation xor
g_surfs = parser.add_argument_group("Surface preprocessing options")
g_surfs_xor = g_surfs.add_mutually_exclusive_group()
g_surfs_xor.add_argument(
"--no-submm-recon",
action="store_false",
dest="hires",
help="disable sub-millimeter (hires) reconstruction",
)
g_surfs_xor.add_argument(
"--fs-no-reconall",
action="store_false",
dest="run_reconall",
help="disable FreeSurfer surface preprocessing.",
)
g_other = parser.add_argument_group("Other options")
g_other.add_argument(
"-w",
"--work-dir",
action="store",
type=Path,
default=Path("work").absolute(),
help="path where intermediate results should be stored",
)
g_other.add_argument(
"--clean-workdir",
action="store_true",
default=False,
help="Clears working directory of contents. Use of this flag is not"
"recommended when running concurrent processes of dMRIPrep.",
)
g_other.add_argument(
"--resource-monitor",
action="store_true",
default=False,
help="enable Nipype's resource monitoring to keep track of memory and CPU usage",
)
g_other.add_argument(
"--reports-only",
action="store_true",
default=False,
help="only generate reports, don't run workflows. This will only rerun report "
"aggregation, not reportlet generation for specific nodes.",
)
g_other.add_argument(
"--run-uuid",
action="store",
default=None,
help="Specify UUID of previous run, to include error logs in report. "
"No effect without --reports-only.",
)
g_other.add_argument(
"--write-graph",
action="store_true",
default=False,
help="Write workflow graph.",
)
g_other.add_argument(
"--stop-on-first-crash",
action="store_true",
default=False,
help="Force stopping on first crash, even if a work directory"
" was specified.",
)
g_other.add_argument(
"--notrack",
action="store_true",
default=False,
help="Opt-out of sending tracking information of this run to "
"the dMRIPREP developers. This information helps to "
"improve dMRIPREP and provides an indicator of real "
"world usage crucial for obtaining funding.",
)
g_other.add_argument(
"--sloppy",
dest="debug",
action="store_true",
default=False,
help="Use low-quality tools for speed - TESTING ONLY",
)
latest = check_latest()
if latest is not None and currentv < latest:
print(
"""\
You are using dMRIPrep-%s, and a newer version of dMRIPrep is available: %s.
Please check out our documentation about how and when to upgrade:
https://dmriprep.readthedocs.io/en/latest/faq.html#upgrading"""
% (currentv, latest),
file=sys.stderr,
)
_blist = is_flagged()
if _blist[0]:
_reason = _blist[1] or "unknown"
print(
"""\
WARNING: Version %s of dMRIPrep (current) has been FLAGGED
(reason: %s).
That means some severe flaw was found in it and we strongly
discourage its usage."""
% (config.environment.version, _reason),
file=sys.stderr,
)
return parser
| 5,351,084 |
def get_deliverer(batch_size, max_staleness, session):
""" Helper function to returns the correct deliverer class for the
batch_size and max_stalennes parameters
"""
if batch_size < 1:
return SimpleDeliverer(session)
else:
return BatchDeliverer(session, batch_size, max_staleness)
| 5,351,085 |
def trace_fweight_deprecated(fimage, xinit, ltrace=None, rtraceinvvar=None, radius=3.):
""" Python port of trace_fweight.pro from IDLUTILS
Parameters:
-----------
fimage: 2D ndarray
Image for tracing
xinit: ndarray
Initial guesses for x-trace
invvar: ndarray, optional
Inverse variance array for the image
radius: float, optional
Radius for centroiding; default to 3.0
"""
# Init
nx = fimage.shape[1]
ny = fimage.shape[0]
ncen = len(xinit)
xnew = copy.deepcopy(xinit)
xerr = np.zeros(ncen) + 999.
ycen = np.arange(ny, dtype=int)
invvar = 0. * fimage + 1.
x1 = xinit - radius + 0.5
x2 = xinit + radius + 0.5
ix1 = np.floor(x1).astype(int)
ix2 = np.floor(x2).astype(int)
fullpix = int(np.maximum(np.min(ix2-ix1)-1, 0))
sumw = np.zeros(ny)
sumxw = np.zeros(ny)
sumwt = np.zeros(ny)
sumsx1 = np.zeros(ny)
sumsx2 = np.zeros(ny)
qbad = np.array([False]*ny)
if invvar is None:
invvar = np.zeros_like(fimage) + 1.
# Compute
for ii in range(0,fullpix+3):
spot = ix1 - 1 + ii
ih = np.clip(spot,0,nx-1)
xdiff = spot - xinit
#
wt = np.clip(radius - np.abs(xdiff) + 0.5,0,1) * ((spot >= 0) & (spot < nx))
sumw = sumw + fimage[ycen,ih] * wt
sumwt = sumwt + wt
sumxw = sumxw + fimage[ycen,ih] * xdiff * wt
var_term = wt**2 / (invvar[ycen,ih] + (invvar[ycen,ih] == 0))
sumsx2 = sumsx2 + var_term
sumsx1 = sumsx1 + xdiff**2 * var_term
#qbad = qbad or (invvar[ycen,ih] <= 0)
qbad = np.any([qbad, invvar[ycen,ih] <= 0], axis=0)
# Fill up
good = (sumw > 0) & (~qbad)
if np.sum(good) > 0:
delta_x = sumxw[good]/sumw[good]
xnew[good] = delta_x + xinit[good]
xerr[good] = np.sqrt(sumsx1[good] + sumsx2[good]*delta_x**2)/sumw[good]
bad = np.any([np.abs(xnew-xinit) > radius + 0.5, xinit < radius - 0.5, xinit > nx - 0.5 - radius], axis=0)
if np.sum(bad) > 0:
xnew[bad] = xinit[bad]
xerr[bad] = 999.0
# Return
return xnew, xerr
| 5,351,086 |
def home():
"""Renders the card page."""
cardStack = model.CardStack()
return render_template(
'cards.html',
title ='POSTIN - Swipe',
cardSet = cardStack.cardList,
year=datetime.now().year,
)
| 5,351,087 |
def uncompress(filename: str, path: str = os.getcwd()) -> None:
"""Uncompress a tar file
Args:
filename: a tar file (tar, tgz, ...)
path: where the filename will be uncompressed
Example:
>>> import robotathome as rh
>>> rh.uncompress('~/WORKSPACE/Robot@Home2_db.tgz')
"""
class ProgressFileObject(io.FileIO):
def __init__(self, path, *args, **kwargs):
self._total_size = os.path.getsize(path)
io.FileIO.__init__(self, path, *args, **kwargs)
def read(self, size):
sys.stdout.write("\rUncompressing %d of %d MB (%d%%)" % (self.tell() / 1048576, self._total_size / 1048576, self.tell()*100/self._total_size))
sys.stdout.flush()
return io.FileIO.read(self, size)
try:
rh.logger.info("Extracting files from {}: ", (os.path.basename(filename)))
file_obj=ProgressFileObject(os.path.expanduser(filename))
tf = tarfile.open(fileobj=file_obj)
tf.extractall(path=os.path.expanduser(path))
file_obj.close()
except Exception as error_code:
rh.logger.info("Error: {}", error_code)
else:
tf.close()
print()
rh.logger.info("Extraction success. Don't forget to remove {} if you are not plenty of space.",
(os.path.basename(filename)))
| 5,351,088 |
def index(request):
"""
User profile page.
"""
user = request.user
profile = user.userprofile
context = collect_view_data(request, 'profile')
context['user'] = user
context['profile'] = profile
context['uform'] = UserForm(request, request.user, init=True)
context['upform'] = UserProfileForm(request, profile, init=True)
context['pform'] = ChangePasswordForm(request.user)
context['sform'] = SSHKeyForm(request, request.user)
context['ssh_keys'] = request.user.usersshkey_set.all().order_by('id')
context['email_aform'] = EmailActivationProfileForm(profile.email_token)
context['phone_aform'] = PhoneActivationProfileForm(profile.phone_token)
return render(request, 'gui/profile/profile.html', context)
| 5,351,089 |
def user_confirm_email(token):
"""Confirm a user account using his email address and a token to approve.
Parameters
----------
token : str
The token associated with an email address.
"""
try:
email = ts.loads(token, max_age=86400)
except Exception as e:
logger.error(str(e))
abort(404)
user = User.query.filter_by(email=email).one_or_none()
if user is None:
flash(
'You did not sign-up yet to RAMP. Please sign-up first.',
category='error'
)
return redirect(url_for('auth.sign_up'))
elif user.access_level in ('user', 'admin'):
flash(
"Your account is already approved. You don't need to confirm your "
"email address", category='error'
)
return redirect(url_for('auth.login'))
elif user.access_level == 'asked':
flash(
"Your email address already has been confirmed. You need to wait "
"for an approval from a RAMP administrator", category='error'
)
return redirect(url_for('general.index'))
User.query.filter_by(email=email).update({'access_level': 'asked'})
db.session.commit()
admin_users = User.query.filter_by(access_level='admin')
for admin in admin_users:
subject = 'Approve registration of {}'.format(
user.name
)
body = body_formatter_user(user)
url_approve = ('http://{}/sign_up/{}'
.format(app.config['DOMAIN_NAME'], user.name))
body += 'Click on the link to approve the registration '
body += 'of this user: {}'.format(url_approve)
send_mail(admin.email, subject, body)
flash(
"An email has been sent to the RAMP administrator(s) who will "
"approve your account"
)
return redirect(url_for('auth.login'))
| 5,351,090 |
def num_zeros_end(num):
"""
Counts the number of zeros at the end
of the number 'num'.
"""
iszero = True
num_zeros = 0
i = len(num)-1
while (iszero == True) and (i != 0):
if num[i] == "0":
num_zeros += 1
elif num[i] != "0":
iszero = False
i -= 1
return num_zeros
| 5,351,091 |
def batch_summarize(texts: List[str]) -> List[str]:
"""Summarizes the texts (local mode).
:param texts: The texts to summarize.
:type texts: List[str]
:return: The summarized texts.
:rtype: List[str]
"""
if _summarizer is None:
load_summarizer()
assert _summarizer is not None
tokenizer = get_summarizer_tokenizer()
prompts = [summarizer_prompt.format(text=text) for text in texts]
information = {
"prompt_length": max(len(tokenizer.encode(prompt)) for prompt in prompts)
}
parameters = format_parameters_to_local(summarizer_parameters, information)
response = _summarizer(prompts, **parameters)
return [
cut_on_stop(choices[0]["generated_text"], summarizer_parameters["stop"])
for choices in response
]
| 5,351,092 |
def test_list_posts_of_feed(
client: testclient.TestClient, db_session: sa.orm.Session,
):
"""
Test list posts of a specific RSS feed.
"""
with factories.single_commit(db_session):
post_1 = factories.PostFactory()
factories.PostFactory()
response = client.get(f"/api/feeds/{post_1.rss_feed_id}/posts/")
assert response.status_code == 200
content = response.json()
assert len(content) == 1
| 5,351,093 |
def _reader_bytes(handle, count=None, chunk_size=1024):
"""
Read a given number of bytes
Examples:
>>> tmp_dir = getfixture('tmpdir')
>>> tmp_file = tmp_dir.join('test')
>>> tmp_file.write('abcdefghi')
>>> handle = tmp_file.open()
>>> handle.seek(4)
4
>>> list(_reader_bytes(handle, 2))
['ef']
>>> handle = tmp_file.open()
>>> handle.seek(4)
4
>>> list(_reader_bytes(handle, chunk_size=3))
['efg', 'hi']
"""
remain = count
while remain is None or remain > 0:
if remain is not None:
chunk_size = min(chunk_size, remain)
data = handle.read(chunk_size)
if remain is not None:
remain -= len(data)
if len(data) == 0:
return
yield data
| 5,351,094 |
def test_fcos_head_get_bboxes():
"""Test fcos head get_bboxes() in ort."""
fcos_model = fcos_config()
s = 128
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'img_shape': (s, s, 3),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3)
}]
cls_scores = [
torch.rand(1, fcos_model.num_classes, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
bboxes = [
torch.rand(1, 4, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
centerness = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
fcos_model.get_bboxes = partial(
fcos_model.get_bboxes, img_metas=img_metas, with_nms=False)
ort_validate(fcos_model.get_bboxes, (cls_scores, bboxes, centerness))
| 5,351,095 |
def dbg(message: str) -> None:
"""Prints a debug-level log message.
"""
_log(co.GRAY, "🖝 ", message)
| 5,351,096 |
def make_final_legend():
"""Makes the legend of figure 4 of the publication."""
fig = plt.figure(figsize=(10, 1))
me.get_final_graph_legend(fig)
fig.savefig("cumul_shuttle_leg.pdf")
| 5,351,097 |
def Vp_estimation(z, T, x, g=param.g):
""" Estimation of the Vp profile from the results of solving the system.
"""
DT = T - T[-1] # temperature variation in the layer compared to T[ICB]
drhoP = -param.rhoH**2. * g * z / Seismic_observations.calcK(z)
drhoT = -param.rhoH * param.alpha * DT # *(Mp*h+X*Mx)
rhoL = (param.rhoD - (1 - x[0]) * param.rhoH - drhoT[0] - drhoP[0]) / x[0]
# print rhoL
# rhoL2=x[0]/(1/(rhoD-drhoT[0]-drhoP[1])-(1-x[0])/rhoH)
# print rhoL
rho_new = x * rhoL + (1 - x) * param.rhoH + drhoT + drhoP
Vp_new = np.sqrt(Seismic_observations.calcK(z) / rho_new)
return rho_new, Vp_new
| 5,351,098 |
def ShowAllSchedUsage(cmd_args=None):
""" Prints the sched usage information for all threads of each task
"""
out_str = ''
for taskp in kern.tasks:
ShowTask([unsigned(taskp)])
print ShowAllSchedUsage.header
for actp in IterateQueue(taskp.threads, 'thread *', 'task_threads'):
out_str = "{: <#20x}".format(actp)
out_str += "{: ^10s}".format(str(int(actp.sched_pri)))
state = int(actp.state)
thread_state_chars = {0:'', 1:'W', 2:'S', 4:'R', 8:'U', 16:'H', 32:'A', 64:'P', 128:'I'}
state_str = ''
state_str += thread_state_chars[int(state & 0x1)]
state_str += thread_state_chars[int(state & 0x2)]
state_str += thread_state_chars[int(state & 0x4)]
state_str += thread_state_chars[int(state & 0x8)]
state_str += thread_state_chars[int(state & 0x10)]
state_str += thread_state_chars[int(state & 0x20)]
state_str += thread_state_chars[int(state & 0x40)]
state_str += thread_state_chars[int(state & 0x80)]
out_str += "{: ^10s}".format(state_str)
out_str += "{: >15d}".format(actp.sched_usage)
print out_str + "\n"
print "\n\n"
| 5,351,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.