content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def expand_template(template, variables, imports, raw_imports=None):
"""Expand a template."""
if raw_imports is None:
raw_imports = imports
env = jinja2.Environment(loader=OneFileLoader(template))
template = env.get_template(template)
return template.render(imports=imports, variables=variables, raw_imports=raw_imports)
| 5,346,800 |
def __contains__(container: Any, item: Any, /) -> bool:
"""Check if the first item contains the second item: `b in a`."""
container_type = type(container)
try:
contains_method = debuiltins._mro_getattr(container_type, "__contains__")
except AttributeError:
# Cheating until `for` is unravelled (and thus iterators).
return debuiltins.any(x is item or x == item for x in container)
else:
if contains_method is None:
raise TypeError(f"{container_type.__name__!r} object is not a container")
is_contained = contains_method(container, item)
return truth(is_contained)
| 5,346,801 |
def generate_search_url(request_type):
"""Given a request type, generate a query URL for kitsu.io."""
url = BASE_URL_KITSUIO.format(request_type)
return url
| 5,346,802 |
def register_magic(func: Callable[[Expr], Expr]):
"""
Make a magic command more like Julia's macro system.
Instead of using string, you can register a magic that uses Expr as the
input and return a modified Expr. It is usually easier and safer to
execute metaprogramming this way.
Parameters
----------
func : Callable[[Expr], Expr]
Function that will used as a magic command.
Returns
-------
Callable
Registered function itself.
Examples
--------
.. code-block:: python
@register_magic
def print_code(expr):
print(expr)
return expr
The ``print_code`` magic is registered as an ipython magic.
.. code-block:: python
%print_code a = 1
.. code-block:: python
%%print_code
def func(a):
return a + 1
"""
@register_line_cell_magic
@needs_local_scope
@wraps(func)
def _ipy_magic(line: str, cell: str = None, local_ns=None):
if cell is None:
cell = line
block = parse(cell)
block_out = func(block)
return block_out.eval(local_ns, local_ns)
return func
| 5,346,803 |
def flowcellDirFastqToBwaBamFlow(self, taskPrefix="", dependencies=set()) :
"""
Takes as input 'flowcellFastqDir' pointing to the CASAVA 1.8 flowcell
project/sample fastq directory structure. For each project/sample,
the fastqs are aligned using BWA, sorted and merged into a single
BAM file. The bam output is placed in a parallel project/sample
directory structure below 'flowcellBamDir'
params:
samtoolsBin
flowcellFastqDir
flowcellBamDir
calls:
FastqPairToBwaBamFlow
supplies:
bamFile
fastq1File
fastq2File
"""
#
# 1. separate fastqs into matching pairs:
#
fqs = {}
fqDigger = FileDigger(".fastq.gz", ["Project_", "Sample_"])
for (project, sample, fqPath) in fqDigger.getNextFile(self.params.flowcellFastqDir) :
if (self.params.sampleNameList != None) and \
(len(self.params.sampleNameList) != 0) and \
(sample not in self.params.sampleNameList) : continue
fqFile = os.path.basename(fqPath)
w = (fqFile.split(".")[0]).split("_")
if len(w) != 5 :
raise Exception("Unexpected fastq filename format: '%s'" % (fqPath))
(sample2, index, lane, read, num) = w
if sample != sample2 :
raise Exception("Fastq name sample disagrees with directory sample: '%s;" % (fqPath))
key = (project, sample, index, lane, num)
if key not in fqs : fqs[key] = [None, None]
readNo = int(read[1])
if fqs[key][readNo - 1] != None :
raise Exceptoin("Unresolvable repeated fastq file pattern in sample: '%s'" % (fqPath))
fqs[key][readNo - 1] = fqPath
ensureDir(self.params.flowcellBamDir)
#
# 2. run all fastq pairs through BWA:
#
nextWait = set()
for key in fqs.keys() :
(project, sample, index, lane, num) = key
sampleBamDir = os.path.join(self.params.flowcellBamDir, "Project_" + project, "Sample_" + sample)
ensureDir(sampleBamDir)
keytag = "_".join(key)
self.params.bamFile = os.path.join(sampleBamDir, keytag + ".bam")
self.params.fastq1File = fqs[key][0]
self.params.fastq2File = fqs[key][1]
nextWait.add(self.addWorkflowTask(preJoin(taskPrefix, keytag), FastqPairToBwaBamFlow(self.params), dependencies=dependencies))
return nextWait
| 5,346,804 |
def ValidatePregnum(resp):
"""Validate pregnum in the respondent file.
resp: respondent DataFrame
"""
# read the pregnancy frame
preg = nsfg.ReadFemPreg()
# make the map from caseid to list of pregnancy indices
preg_map = nsfg.MakePregMap(preg)
# iterate through the respondent pregnum series
for index, pregnum in resp.pregnum.items():
caseid = resp.caseid[index]
indices = preg_map[caseid]
# check that pregnum from the respondent file equals
# the number of records in the pregnancy file
if len(indices) != pregnum:
print(caseid, len(indices), pregnum)
return False
return True
| 5,346,805 |
def is_super_admin(view, view_args, view_kwargs, *args, **kwargs):
"""
Permission function for things allowed exclusively to super admin.
Do not use this if the resource is also accessible by a normal admin, use the is_admin decorator instead.
:return:
"""
user = current_user
if not user.is_super_admin:
return ForbiddenError({'source': ''}, 'Super admin access is required').respond()
return view(*view_args, **view_kwargs)
| 5,346,806 |
def normalize_depth(val, min_v, max_v):
"""
print 'nomalized depth value'
nomalize values to 0-255 & close distance value has high value. (similar to stereo vision's disparity map)
"""
return (((max_v - val) / (max_v - min_v)) * 255).astype(np.uint8)
| 5,346,807 |
def main():
"""Build and package OpenSSL."""
parser = argparse.ArgumentParser(prog='OpenSSL setup',
description='This script will compile '
'OpenSSL 1.0.1+ and optionally create '
'a native macOS package.')
parser.add_argument('-b', '--build', action='store_true',
help='Compile the OpenSSL binary')
parser.add_argument('-s', '--skip', action='store_true',
help='Skip recompiling if possible. Only recommended '
'for development purposes.')
parser.add_argument('-p', '--pkg', action='store_true',
help='Package the OpenSSL output directory.')
parser.add_argument('-i', '--install', action='store_true',
help='Install the OpenSSL package.')
parser.add_argument('-v', '--verbose', action='count', default=1,
help="Increase verbosity level. Repeatable up to "
"2 times (-vv)")
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# set argument variables
log.verbose = args.verbose
skip = args.skip
root.root_check()
if args.build:
log.info("Bulding OpenSSL...")
check_dir = os.path.isdir(PKG_PAYLOAD_DIR)
# When the skip option is passed and the build directory exists, skip
# download and compiling of openssl. Note we still do linking.
if (skip and check_dir):
log.debug("Skip flag was provided. We will not compile OpenSSL "
"on this run.")
else:
download_and_extract_openssl()
build()
current_certs()
if args.pkg:
log.info("Building a package for OpenSSL...")
# Change back into our local directory so we can output our package
# via relative paths
os.chdir(CURRENT_DIR)
version = CONFIG['openssl_version']
rc = package.pkg(root=PKG_PAYLOAD_DIR,
version=version,
identifier="{}.openssl".format(CONFIG['pkgid']),
output='openssl-{}.pkg'.format(version),
)
if rc == 0:
log.info("OpenSSL packaged properly")
else:
log.error("Looks like package creation failed")
if args.install:
log.info("Installing OpenSSL pacakge...")
os.chdir(CURRENT_DIR)
cmd = ['/usr/sbin/installer', '-pkg',
'openssl-{}.pkg'.format(version), '-tgt', '/']
runner.Popen(cmd)
| 5,346,808 |
def test_matrix_split_7(interactions_ds):
"""Test if error is thrown with an invalid value of item_test_ratio (> 1)."""
try:
matrix_split(interactions_ds, item_test_ratio=2)
except Exception as e:
assert str(e) == 'Invalid item_test_ratio of 2: must be in the range (0, 1]'
| 5,346,809 |
def parse_iori_block(block):
"""Turn IORI data blocks into `IoriData` objects.
Convert rotation from Quaternion format to Euler angles.
Parameters
----------
block: list of KVLItem
A list of KVLItem corresponding to a IORI data block.
Returns
-------
iori_data: IoriData
A IoriData object holding the IORI information of a block.
"""
block_dict = {
s.key: s for s in block
}
data = block_dict['IORI'].value * 1.0 / block_dict["SCAL"].value
rotation = np.array([R.from_quat(q).as_euler('zyx', degrees=True) for q in data])
z, y, x = rotation.T
return IoriData(
cts = block_dict['STMP'].value,
z = z,
y = y,
x = x,
)
| 5,346,810 |
def signal_process(logger, pid, signalnum):
"""Signal process with signal, N/A on Windows."""
try:
os.kill(pid, signalnum)
logger.info("Waiting for process to report")
time.sleep(5)
except OSError as err:
logger.error("Hit OS error trying to signal process: %s", err)
except AttributeError:
logger.error("Cannot send signal to a process on Windows")
| 5,346,811 |
def ungap_all(align):
"""
Removes all gaps (``-`` symbols) from all sequences of the :class:`~data.Align`
instance *align* and returns the resulting ~data.Container instance.
"""
result = data.Container()
for n,s,g in align:
result.append(n, s.translate(None, '-'), g)
return result
| 5,346,812 |
def expandBcv(bcv):
"""If the bcv is an interval, expand if.
"""
if len(bcv) == 6:
return bcv
else:
return "-".join(splitBcv(bcv))
| 5,346,813 |
def plot_makers(args):
"""
Visualize the differential expression of marker genes across clusters.
"""
adata = sc.read_h5ad(input_file)
# choose the plotting types
if len(plot_type) != 0:
if "violin" in plot_type:
print("plotting violin")
sc.pl.violin(adata, gene_list, groupby=groupby, show=show, use_raw=use_raw, save=project+"."+figure_type)
if "dotplot" in plot_type:
print("plotting dotplot")
sc.pl.dotplot(adata, gene_list, groupby=groupby, show=show, use_raw=use_raw, save=project+"."+figure_type)
if "stacked_violin" in plot_type:
print("plotting stacked_violin")
sc.pl.stacked_violin(adata, gene_list, groupby=groupby, rotation=90, show=show, use_raw=use_raw, save=project+"."+figure_type)
if "rank_genes_groups_violin" in plot_type:
print("plotting rank_genes_groups_violin")
sc.pl.rank_genes_groups_violin(adata, groups=groups, n_genes=n_genes, show=show, use_raw=use_raw, save=project+"."+figure_type)
if "umap" in plot_type:
print("plotting umap")
sc.pl.umap(adata, color=gene_list, show=show, use_raw=use_raw, save="_gene_expr"+project+"."+figure_type)
else:
print("No such type of plot")
| 5,346,814 |
def web():
"""Start the salmon web server"""
app = create_app() # noqa: F841
click.secho(f"Running webserver on http://127.0.0.1:{config.WEB_PORT}", fg="cyan")
loop.run_forever()
| 5,346,815 |
def get_starting_dir_abs_path() -> str:
"""
Returns the absolute path to the starting directory of the project. Starting directory is used for example for
turning relative paths (from Settings) into absolute paths (those paths are relative to the starting directory).
"""
if _starting_dir is None:
dir_path = os.getenv("QF_STARTING_DIRECTORY")
if dir_path is None:
raise KeyError("Starting directory wasn't set. Use set_starting_dir_abs_path() function "
"or set the environment variable QF_STARTING_DIRECTORY to the proper value")
else:
return dir_path
else:
return _starting_dir
| 5,346,816 |
def cluster_profile_platform(cluster_profile):
"""Translate from steps.cluster_profile to workflow.as slugs."""
if cluster_profile == 'azure4':
return 'azure'
if cluster_profile == 'packet':
return 'metal'
return cluster_profile
| 5,346,817 |
def get_price_lambda_star_lp_1_cvxpy(w: np.ndarray, c_plus: np.ndarray, psi_plus: np.ndarray) \
-> float:
"""
Computes lambda_star based on dual program of the projection of w_star.
:param w: current state in workload space.
:param c_plus: vector normal to the level set in the monotone region 'right above' the face.
:param psi_plus: vector normal to the closest face.
:return: lambda_star: price of random oscillations along the closest face.
"""
assert not StrategicIdlingHedging._is_w_inside_artificial_monotone_region(w, psi_plus)
num_wl = w.shape[0]
lambda_var = cvx.Variable(1)
v_dagger_var = cvx.Variable((num_wl, 1))
objective = cvx.Maximize(v_dagger_var.T @ w)
constraints = [c_plus - v_dagger_var - lambda_var * psi_plus == 0,
v_dagger_var >= 0]
prob = cvx.Problem(objective, constraints)
_ = prob.solve(solver=cvx.SCS, eps=1e-8)
lambda_star = lambda_var.value[0]
if prob.status != 'optimal':
lambda_star = None
return lambda_star
| 5,346,818 |
def _get_nodes(
network: typing.Union[NetworkIdentifier, Network],
sample_size: typing.Optional[int],
predicate: typing.Callable,
) -> typing.List[Node]:
"""Decaches domain objects: Node.
"""
nodeset = [i for i in get_nodes(network) if predicate(i)]
if sample_size is not None:
sample_size = min(sample_size, len(nodeset))
return nodeset if sample_size is None else random.sample(nodeset, sample_size)
| 5,346,819 |
def cdl_key():
"""Four-class system (grain, forage, vegetable, orchard. Plus 5: non-ag/undefined"""
key = {1: ('Corn', 1),
2: ('Cotton', 1),
3: ('Rice', 1),
4: ('Sorghum', 1),
5: ('Soybeans', 1),
6: ('Sunflower', 1),
7: ('', 5),
8: ('', 5),
9: ('', 5),
10: ('Peanuts', 1),
11: ('Tobacco', 2),
12: ('Sweet Corn', 1),
13: ('Pop or Orn Corn', 1),
14: ('Mint', 2),
15: ('', 5),
16: ('', 5),
17: ('', 5),
18: ('', 5),
19: ('', 5),
20: ('', 5),
21: ('Barley', 1),
22: ('Durum Wheat', 1),
23: ('Spring Wheat', 1),
24: ('Winter Wheat', 1),
25: ('Other Small Grains', 1),
26: ('Dbl Crop WinWht/Soybeans', 1),
27: ('Rye', 1),
28: ('Oats', 1),
29: ('Millet', 1),
30: ('Speltz', 1),
31: ('Canola', 1),
32: ('Flaxseed', 1),
33: ('Safflower', 1),
34: ('Rape Seed', 1),
35: ('Mustard', 1),
36: ('Alfalfa', 3),
37: ('Other Hay/Non Alfalfa', 3),
38: ('Camelina', 1),
39: ('Buckwheat', 1),
40: ('', 5),
41: ('Sugarbeets', 2),
42: ('Dry Beans', 2),
43: ('Potatoes', 2),
44: ('Other Crops', 2),
45: ('Sugarcane', 2),
46: ('Sweet Potatoes', 2),
47: ('Misc Vegs & Fruits', 2),
48: ('Watermelons', 2),
49: ('Onions', 2),
50: ('Cucumbers', 2),
51: ('Chick Peas', 2),
52: ('Lentils', 2),
53: ('Peas', 2),
54: ('Tomatoes', 2),
55: ('Caneberries', 2),
56: ('Hops', 2),
57: ('Herbs', 2),
58: ('Clover/Wildflowers', 3),
59: ('Sod/Grass Seed', 3),
60: ('Switchgrass', 3),
61: ('Fallow/Idle Cropland', 3),
62: ('Pasture/Grass', 3),
63: ('Forest', 5),
64: ('Shrubland', 5),
65: ('Barren', 5),
66: ('Cherries', 4),
67: ('Peaches', 4),
68: ('Apples', 4),
69: ('Grapes', 4),
70: ('Christmas Trees', 4),
71: ('Other Tree Crops', 4),
72: ('Citrus', 4),
73: ('', 5),
74: ('Pecans', 4),
75: ('Almonds', 4),
76: ('Walnuts', 4),
77: ('Pears', 4),
78: ('', 5),
79: ('', 5),
80: ('', 5),
81: ('Clouds/No Data', 5),
82: ('Developed', 5),
83: ('Water', 5),
84: ('', 5),
85: ('', 5),
86: ('', 5),
87: ('Wetlands', 5),
88: ('Nonag/Undefined', 5),
89: ('', 5),
90: ('', 5),
91: ('', 5),
92: ('Aquaculture', 5),
93: ('', 5),
94: ('', 5),
95: ('', 5),
96: ('', 5),
97: ('', 5),
98: ('', 5),
99: ('', 5),
100: ('', 5),
101: ('', 5),
102: ('', 5),
103: ('', 5),
104: ('', 5),
105: ('', 5),
106: ('', 5),
107: ('', 5),
108: ('', 5),
109: ('', 5),
110: ('', 5),
111: ('Open Water', 5),
112: ('Perennial Ice/Snow', 5),
113: ('', 5),
114: ('', 5),
115: ('', 5),
116: ('', 5),
117: ('', 5),
118: ('', 5),
119: ('', 5),
120: ('', 5),
121: ('Developed/Open Space', 5),
122: ('Developed/Low Intensity', 5),
123: ('Developed/Med Intensity', 5),
124: ('Developed/High Intensity', 5),
125: ('', 5),
126: ('', 5),
127: ('', 5),
128: ('', 5),
129: ('', 5),
130: ('', 5),
131: ('Barren', 5),
132: ('', 5),
133: ('', 5),
134: ('', 5),
135: ('', 5),
136: ('', 5),
137: ('', 5),
138: ('', 5),
139: ('', 5),
140: ('', 5),
141: ('Deciduous Forest', 5),
142: ('Evergreen Forest', 5),
143: ('Mixed Forest', 5),
144: ('', 5),
145: ('', 5),
146: ('', 5),
147: ('', 5),
148: ('', 5),
149: ('', 5),
150: ('', 5),
151: ('', 5),
152: ('Shrubland', 5),
153: ('', 5),
154: ('', 5),
155: ('', 5),
156: ('', 5),
157: ('', 5),
158: ('', 5),
159: ('', 5),
160: ('', 5),
161: ('', 5),
162: ('', 5),
163: ('', 5),
164: ('', 5),
165: ('', 5),
166: ('', 5),
167: ('', 5),
168: ('', 5),
169: ('', 5),
170: ('', 5),
171: ('', 5),
172: ('', 5),
173: ('', 5),
174: ('', 5),
175: ('', 5),
176: ('Grassland/Pasture', 5),
177: ('', 5),
178: ('', 5),
179: ('', 5),
180: ('', 5),
181: ('', 5),
182: ('', 5),
183: ('', 5),
184: ('', 5),
185: ('', 5),
186: ('', 5),
187: ('', 5),
188: ('', 5),
189: ('', 5),
190: ('Woody Wetlands', 5),
191: ('', 5),
192: ('', 5),
193: ('', 5),
194: ('', 5),
195: ('Herbaceous Wetlands', 5),
196: ('', 5),
197: ('', 5),
198: ('', 5),
199: ('', 5),
200: ('', 5),
201: ('', 5),
202: ('', 5),
203: ('', 5),
204: ('Pistachios', 4),
205: ('Triticale', 1),
206: ('Carrots', 2),
207: ('Asparagus', 2),
208: ('Garlic', 2),
209: ('Cantaloupes', 2),
210: ('Prunes', 2),
211: ('Olives', 2),
212: ('Oranges', 3),
213: ('Honeydew Melons', 2),
214: ('Broccoli', 2),
215: ('Avocados', 2),
216: ('Peppers', 2),
217: ('Pomegranates', 4),
218: ('Nectarines', 4),
219: ('Greens', 2),
220: ('Plums', 4),
221: ('Strawberries', 2),
222: ('Squash', 2),
223: ('Apricots', 4),
224: ('Vetch', 3),
225: ('Dbl Crop WinWht/Corn', 1),
226: ('Dbl Crop Oats/Corn', 1),
227: ('Lettuce', 2),
228: ('', 1),
229: ('Pumpkins', 2),
230: ('Dbl Crop Lettuce/Durum Wht', 2),
231: ('Dbl Crop Lettuce/Cantaloupe', 2),
232: ('Dbl Crop Lettuce/Cotton', 2),
233: ('Dbl Crop Lettuce/Barley', 2),
234: ('Dbl Crop Durum Wht/Sorghum', 1),
235: ('Dbl Crop Barley/Sorghum', 1),
236: ('Dbl Crop WinWht/Sorghum', 1),
237: ('Dbl Crop Barley/Corn', 1),
238: ('Dbl Crop WinWht/Cotton', 1),
239: ('Dbl Crop Soybeans/Cotton', 1),
240: ('Dbl Crop Soybeans/Oats', 1),
241: ('Dbl Crop Corn/Soybeans', 1),
242: ('Blueberries', 2),
243: ('Cabbage', 2),
244: ('Cauliflower', 2),
245: ('Celery', 2),
246: ('Radishes', 2),
247: ('Turnips', 2),
248: ('Eggplants', 2),
249: ('Gourds', 2),
250: ('Cranberries', 2),
251: ('', 5),
252: ('', 5),
253: ('', 5),
254: ('Dbl Crop Barley/Soybeans', 1),
255: ('', 5)}
return key
| 5,346,820 |
def create_signal(fs,N):
""" create signal with gaussian noise"""
dt = 1./fs
t = np.linspace(0,N*dt,N)
| 5,346,821 |
def test_ebm(ra, dec, smap=0, nest=False):
"""Make some tests."""
# Parse input
coordinates = SkyCoord(ra=ra, dec=dec, unit=units.degree)
# Convert to galactic coordinates.
l = coordinates.galactic.l.degree
b = coordinates.galactic.b.degree
theta = (90. - b) * np.pi / 180.
phi = l * np.pi / 180.
print("l, b = %.3f, %.3f" % (l, b))
print("theta, phi = %.3f, %.3f" % (theta, phi))
m = load_map(smap)
# from this code
if smap == 5:
nest = True
ebv = healpy.get_interp_val(m, theta, phi, nest=nest)
# from astroquery
t = others.astroquery.get_extinction_table('%.4f %.4f' % (ra, dec))
if smap in [0, 2, 3]:
t = t[9]['A_SFD'] / t[9]['A_over_E_B_V_SFD']
else:
t = t[9]['A_SandF'] / t[9]['A_over_E_B_V_SandF']
print(t)
# from SNf code (ned)
f = snfactory.sfd_ebmv(ra, dec)
# from sncosmo
sn = sncosmo.get_ebv_from_map([ra, dec], mapdir='/home/chotard/.extinction/maps/')
# from other query
ebv_sfd = argonaut.query(ra, dec, coordsys='equ', mode='sfd')['EBV_SFD'][0]
print("\nAll results:")
print(" - Healpy (lambda/nasa map): %.5f" % ebv)
print(" - Astropy/IrsaDust: %.5f" % t)
print(" - SNf code (irsa or ned): %.5f" % f, f)
print(" - sncosmo (local N/S maps): %.5f" % sn)
print(" - argonaut.skypams: %.5f" % ebv_sfd)
| 5,346,822 |
def colmeta(colname, infile=None, name=None, units=None, ucd=None, desc=None,
outfile=None):
"""
Modifies the metadata of one or more columns. Some or all of the name,
units, ucd, utype and description of the column(s),
identified by "colname" can be set by using some or all of the listed flags.
Typically, "colname" will simply be the name of a single column.
:param colname: string, name of the column to change meta data for
:param infile: string, the location and file name for the input file, if
not defined will return the STILTS command string
:param outfile: string, the location and file name for the output file,
if not defined will default to infile
:param name: string, new name for the column
:param units: string, new unit for the column
:param ucd: string, new UCD for the column
:param desc: string, new description for the column
:return:
"""
cmdstr = "colmeta "
if name is None and units is None and ucd is None and desc is None:
return 0
if name is not None:
cmdstr += '-name {0} '.format(__checkq__(str(name)))
if units is not None:
cmdstr += '-units {0} '.format(__checkq__(str(units)))
if ucd is not None:
cmdstr += '-ucd {0} '.format(__checkq__(str(ucd)))
if desc is not None:
cmdstr += '-desc {0} '.format(__checkq__(str(desc)))
cmdstr += '{0}'.format(colname)
if infile is None:
return cmdstr
if outfile is not None:
tpipe(cmdstr, infile=infile, outfile=outfile)
else:
tpipe(cmdstr, infile=infile, outfile=infile)
| 5,346,823 |
def get_confusion_matrix_chart(cm, title):
"""Plot custom confusion matrix chart."""
source = pd.DataFrame([[0, 0, cm['TN']],
[0, 1, cm['FP']],
[1, 0, cm['FN']],
[1, 1, cm['TP']],
], columns=["actual values", "predicted values", "count"])
base = alt.Chart(source).encode(
y='actual values:O',
x='predicted values:O',
).properties(
width=200,
height=200,
title=title,
)
rects = base.mark_rect().encode(
color='count:Q',
)
text = base.mark_text(
align='center',
baseline='middle',
color='black',
size=12,
dx=0,
).encode(
text='count:Q',
)
return rects + text
| 5,346,824 |
def get_ppo_plus_eco_params(scenario):
"""Returns the param for the 'ppo_plus_eco' method."""
assert scenario in DMLAB_SCENARIOS, (
'Non-DMLab scenarios not supported as of today by PPO+ECO method')
if scenario == 'noreward' or scenario == 'norewardnofire':
return md(get_common_params(scenario), {
'action_set': '' if scenario == 'noreward' else 'nofire',
'_gin.create_single_env.run_oracle_before_monitor': True,
'_gin.CuriosityEnvWrapper.scale_task_reward': 0.0,
'_gin.create_environments.scale_task_reward_for_eval': 0,
'_gin.create_environments.scale_surrogate_reward_for_eval': 1,
'_gin.OracleExplorationReward.reward_grid_size': 30,
'r_checkpoint': '',
'_gin.CuriosityEnvWrapper.scale_surrogate_reward':
0.03017241379310345,
'_gin.train.ent_coef': 0.002053525026457146,
'_gin.create_environments.online_r_training': True,
'_gin.RNetworkTrainer.observation_history_size': 60000,
'_gin.RNetworkTrainer.training_interval': -1,
'_gin.CuriosityEnvWrapper.exploration_reward_min_step': 60000,
'_gin.RNetworkTrainer.num_epochs': 10,
})
else:
return md(get_common_params(scenario), {
'action_set': '',
'r_checkpoint': '',
'_gin.EpisodicMemory.capacity': 200,
'_gin.similarity_to_memory.similarity_aggregation': 'percentile',
'_gin.EpisodicMemory.replacement': 'random',
'_gin.CuriosityEnvWrapper.scale_task_reward': 1.0,
'_gin.CuriosityEnvWrapper.scale_surrogate_reward':
0.03017241379310345,
'_gin.train.ent_coef': 0.002053525026457146,
'_gin.create_environments.online_r_training': True,
'_gin.RNetworkTrainer.observation_history_size': 60000,
'_gin.RNetworkTrainer.training_interval': -1,
'_gin.CuriosityEnvWrapper.exploration_reward_min_step': 60000,
'_gin.RNetworkTrainer.num_epochs': 10,
})
| 5,346,825 |
def field_display(name):
"""
Works with Django's get_FOO_display mechanism for fields with choices set. Given
the name of a field, returns a producer that calls get_<name>_display.
"""
return qs.include_fields(name), producers.method(f"get_{name}_display")
| 5,346,826 |
def set_trace(response):
"""
Set a header containing the request duration and push detailed trace to the MQ
:param response:
:return:
"""
if TRACE_PERFORMANCE:
req_time = int((time.time() - g.request_start) * 1000)
trace = {
"duration": req_time,
"depth": g.request_depth,
"method": g.request_method,
"url": g.request_url,
"uuid": g.request_uuid,
"sequence": g.request_seq_this,
"responseCode": response.status_code,
"dbTime": g.db_time
}
if g.first_request:
trace["totalRequestCount"] = g.request_seq_next
trace_publisher.push('trace', trace)
flask_logger.debug(f'request trace: {req_time} ms ({g.request_method} {g.request_url})')
response.headers.add('x-trace-request-time', str(req_time))
response.headers.add('x-trace-seq-next', str(g.request_seq_next))
return response
| 5,346,827 |
def extract_attachments(payload: Dict) -> List[Image]:
"""
Extract images from attachments.
There could be other attachments, but currently we only extract images.
"""
attachments = []
for item in payload.get('attachment', []):
# noinspection PyProtectedMember
if item.get("type") in ("Document", "Image") and item.get("mediaType") in Image._valid_media_types:
if item.get('pyfed:inlineImage', False):
# Skip this image as it's indicated to be inline in content and source already
continue
attachments.append(
ActivitypubImage(
url=item.get('url'),
name=item.get('name') or "",
media_type=item.get("mediaType"),
)
)
return attachments
| 5,346,828 |
def yolo3_mobilenet1_0_custom(
classes,
transfer=None,
pretrained_base=True,
pretrained=False,
norm_layer=BatchNorm, norm_kwargs=None,
**kwargs):
"""YOLO3 multi-scale with mobilenet base network on custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from yolo networks trained on other
datasets.
pretrained_base : boolean
Whether fetch and load pretrained weights for base network.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
mxnet.gluon.HybridBlock
Fully hybrid yolo3 network.
"""
if transfer is None:
base_net = get_mobilenet(multiplier=1,
pretrained=pretrained_base,
norm_layer=norm_layer, norm_kwargs=norm_kwargs,
**kwargs)
stages = [base_net.features[:33],
base_net.features[33:69],
base_net.features[69:-2]]
anchors = [
[10, 13, 16, 30, 33, 23],
[30, 61, 62, 45, 59, 119],
[116, 90, 156, 198, 373, 326]]
strides = [8, 16, 32]
net = get_yolov3(
'mobilenet1.0', stages, [512, 256, 128], anchors, strides, classes, 'voc',
pretrained=pretrained, norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs)
else:
from ...model_zoo import get_model
net = get_model(
'yolo3_mobilenet1.0_' +
str(transfer),
pretrained=True,
**kwargs)
net.reset_class(classes)
return net
| 5,346,829 |
def calculate_levenshtein_distance(str_1, str_2):
"""
The Levenshtein distance is a string metric for measuring the difference between two sequences.
It is calculated as the minimum number of single-character edits necessary to transform one string into another
"""
distance = 0
buffer_removed = buffer_added = 0
for x in ndiff(str_1, str_2):
code = x[0]
# Code ? is ignored as it does not translate to any modification
if code == ' ':
distance += max(buffer_removed, buffer_added)
buffer_removed = buffer_added = 0
elif code == '-':
buffer_removed += 1
elif code == '+':
buffer_added += 1
distance += max(buffer_removed, buffer_added)
return distance
| 5,346,830 |
def make_datetime(value, *, format_=DATETIME_FORMAT):
"""
>>> make_datetime('2001-12-31T23:59:59')
datetime.datetime(2001, 12, 31, 23, 59, 59)
"""
return datetime.datetime.strptime(value, format_)
| 5,346,831 |
def log_batch_account_list(batch_mgmt_client, config, resource_group=None):
# type: (azure.mgmt.batch.BatchManagementClient, dict, str) -> None
"""Log Batch account properties from ARM
:param azure.mgmt.batch.BatchManagementClient batch_mgmt_client:
batch management client
:param dict config: configuration dict
:param str resource_group: resource group of Batch account
"""
if batch_mgmt_client is None:
raise RuntimeError(
'Batch management client is invalid, please specify management '
'aad credentials and valid subscription_id')
if resource_group is None:
accounts = batch_mgmt_client.batch_account.list()
else:
accounts = batch_mgmt_client.batch_account.list_by_resource_group(
resource_group)
mgmt_aad = settings.credentials_management(config)
log = ['all batch accounts in subscription {}'.format(
mgmt_aad.subscription_id)]
for ba in accounts:
log.extend(_generate_batch_account_log_entry(ba))
if len(log) == 1:
logger.error('no batch accounts found in subscription {}'.format(
mgmt_aad.subscription_id))
else:
logger.info(os.linesep.join(log))
| 5,346,832 |
def named_struct_dict(typename, field_names=None, default=None, fixed=False, *, structdict_module=__name__,
base_dict=None, sorted_repr=None, verbose=False, rename=False, module=None, qualname_prefix=None,
frame_depth=1):
"""Returns a new subclass of StructDict with all fields as properties."""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if fixed:
mixin_type = NamedFixedStructDictMixin.__name__
else:
mixin_type = NamedStructDictMixin.__name__
if inspect.isclass(base_dict):
base_dict = base_dict.__name__
if base_dict is None:
base_dict = 'dict'
elif base_dict not in ('dict', 'OrderedDict', 'SortedDict'):
raise NotImplementedError(f"base_dict: {base_dict} is not supported.")
if sorted_repr is None:
sorted_repr = True if base_dict in ('dict',) else False
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names)) if field_names else []
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f"_{index}"
seen.add(name)
for name in [typename, structdict_module] + field_names:
if type(name) is not str:
raise TypeError('Type names, field names and structdict_module must be strings')
if name is not structdict_module and not name.isidentifier():
raise ValueError(f"Type names and field names must be valid identifiers: {name!r}")
if _iskeyword(name):
raise ValueError(f"Type names and field names cannot be a keyword: {name!r}")
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError(f"Field names cannot start with an underscore: {name!r}")
if name in seen:
raise ValueError(f"Encountered duplicate field name: {name!r}")
seen.add(name)
default_val = "None" if default is None else 'default_val'
# Fill-in the class template
class_definition = _struct_prop_dict_class_template.format(
structdict_module=structdict_module,
mixin_type=mixin_type,
base_dict=base_dict,
typename=typename,
field_names=tuple(field_names),
kwargs_map=(", ".join([f"{field_name}={default_val}" for field_name in field_names]).replace("'", "")) + (
"," if field_names else ""),
kwargs_eq_map=(", ".join([f"{field_name}={field_name}" for field_name in field_names]).replace("'", "")) + (
"," if field_names else ""),
sorted_repr=sorted_repr
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__=f"struct_prop_dict_{typename}")
namespace.update(default_val=default)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named structdict is created. Bypass this step in environments where
# _sys._getframe is not defined (Jython for example) or _sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
try:
frame = _sys._getframe(frame_depth)
except (AttributeError, ValueError):
pass
else:
if module is None:
module = frame.f_globals.get('__name__', '__main__')
if qualname_prefix is None:
qualname_prefix = frame.f_locals.get('__qualname__', '')
if module is not None:
result.__module__ = module
if qualname_prefix:
result.__qualname__ = f'{qualname_prefix}.' + result.__qualname__
return result
| 5,346,833 |
def PytorchONNXRuntimeModel(model, input_sample=None, onnxruntime_session_options=None):
"""
Create a ONNX Runtime model from pytorch.
:param model: 1. Pytorch model to be converted to ONNXRuntime for inference
2. Path to ONNXRuntime saved model.
:param input_sample: A set of inputs for trace, defaults to None if you have trace before or
model is a LightningModule with any dataloader attached,
defaults to None.
:param onnxruntime_session_options: A session option for onnxruntime accelerator.
:return: A PytorchONNXRuntimeModel instance
"""
from .pytorch.pytorch_onnxruntime_model import PytorchONNXRuntimeModel
return PytorchONNXRuntimeModel(model, input_sample,
onnxruntime_session_options=onnxruntime_session_options)
| 5,346,834 |
def get_model_python_path():
"""
Returns the python path for a model
"""
return os.path.dirname(__file__)
| 5,346,835 |
def intensity_variance(mask: np.ndarray, image: np.ndarray) -> float:
"""Returns variance of all intensity values in region of interest."""
return np.var(image[mask])
| 5,346,836 |
def permute_array(arr, axis=0):
"""Permute array along a certain axis
Args:
arr: numpy array
axis: axis along which to permute the array
"""
if axis == 0:
return np.random.permutation(arr)
else:
return np.random.permutation(arr.swapaxes(0, axis)).swapaxes(0, axis)
| 5,346,837 |
def convertGMLToGeoJSON(config, outputDir, gmlFilepath, layerName, t_srs='EPSG:4326',
flip_gml_coords=False):
""" Convert a GML file to a shapefile. Will silently exit if GeoJSON already exists
@param config A Python ConfigParser containing the section 'GDAL/OGR' and option 'PATH_OF_OGR2OGR'
@param outputDir String representing the absolute/relative path of the directory into which GeoJSON should be written
@param gmlFilepath String representing the absolute path of the GML file to convert
@param layerName String representing the name of the layer contained in the GML file to write to a GeoJSON
@param t_srs String representing the spatial reference system of the output GeoJSON, of the form 'EPSG:XXXX'
@return String representing the name of the GeoJSON written
@exception Exception if the conversion failed.
"""
pathToOgrCmd = config.get('GDAL/OGR', 'PATH_OF_OGR2OGR')
if not os.path.isdir(outputDir):
raise IOError(errno.ENOTDIR, "Output directory %s is not a directory" % (outputDir,))
if not os.access(outputDir, os.W_OK):
raise IOError(errno.EACCES, "Not allowed to write to output directory %s" % (outputDir,))
outputDir = os.path.abspath(outputDir)
geojsonFilename = "%s.geojson" % (layerName,)
geojsonFilepath = os.path.join(outputDir, geojsonFilename)
if not os.path.exists(geojsonFilepath):
# Need to flip coordinates in GML as SSURGO WFS now returns coordinates in lat, lon order
# rather than lon, lat order that OGR expects. For more information, see:
# http://trac.osgeo.org/gdal/wiki/FAQVector#HowdoIflipcoordinateswhentheyarenotintheexpectedorder
if flip_gml_coords and t_srs =='EPSG:4326':
ogrCommand = "%s -f 'GeoJSON' -nln %s -s_srs '+proj=latlong +datum=WGS84 +axis=neu +wktext' -t_srs %s %s %s" % (pathToOgrCmd, layerName, t_srs, geojsonFilepath, gmlFilepath)
else:
ogrCommand = "%s -f 'GeoJSON' -nln %s -t_srs %s %s %s" % (pathToOgrCmd, layerName, t_srs, geojsonFilepath, gmlFilepath)
returnCode = os.system(ogrCommand)
if returnCode != 0:
raise Exception("GML to GeoJSON command %s returned %d" % (ogrCommand, returnCode))
return geojsonFilename
| 5,346,838 |
def interactive(pluginpath, preselect=None, content_type="video", compact_mode=False, no_crop=False):
"""
Execute a given kodi plugin
:param unicode pluginpath: The path to the plugin to execute.
:param list preselect: A list of pre selection to make.
:param str content_type: The content type to list, if more than one type is available. e.g. video, audio
:param bool compact_mode: If True the listitems view will be compacted, else full detailed. (default => False)
:param bool no_crop: Disable croping of long lines of text if True, (default => False)
"""
plugin_id = os.path.basename(pluginpath)
callback_url = base_url = u"plugin://{}/".format(plugin_id)
# Keep track of parents so we can have a '..' option to go back
parent_stack = []
while callback_url is not None:
if not callback_url.startswith(base_url):
raise RuntimeError("callback url is outside the scope of this addon: {}".format(callback_url))
# Execute the addon in a separate process
data = execute_addon(pluginpath, callback_url, content_type)
if data["succeeded"] is False:
print("Failed to execute addon. Please check log.")
try:
input_raw("Press enter to continue:")
except KeyboardInterrupt:
break
# Revert back to previous callback if one exists
if parent_stack:
callback_url = parent_stack.pop()
continue
else:
break
# Item list with first item as the previous directory item
items = [{"label": "..", "path": parent_stack[-1]}] if parent_stack else []
# Display listitem selection if listitems are found
if data["listitem"]:
items.extend(item[1] for item in data["listitem"])
elif data["resolved"]:
items.append(data["resolved"])
items.extend(data["playlist"][1:])
# Display the list of listitems for user to select
if compact_mode:
selected_item = compact_item_selector(items, callback_url, preselect)
else:
selected_item = detailed_item_selector(items, preselect, no_crop)
if selected_item:
if parent_stack and selected_item["path"] == parent_stack[-1]:
callback_url = parent_stack.pop()
else:
parent_stack.append(callback_url)
callback_url = selected_item["path"]
else:
break
| 5,346,839 |
def dsmatch(name, dataset, fn):
"""
Fuzzy search best matching object for string name in dataset.
Args:
name (str): String to look for
dataset (list): List of objects to search for
fn (function): Function to obtain a string from a element of the dataset
Returns:
First element with the maximun fuzzy ratio.
"""
max_ratio = 0
matching = None
for e in dataset:
if fuzz and name:
ratio = fuzz.token_sort_ratio(normalize(name), normalize(fn(e)))
if ratio > max_ratio:
max_ratio = ratio
matching = e
elif normalize(name) == normalize(fn(e)):
matching = e
break
return matching
| 5,346,840 |
def populate_listings(num_listings):
"""Populates the database with seed data."""
fake = Faker()
listing_types = []
listing_types_str = ["Houses", "Condos", "Apartments", "Town Houses"]
for listing_type_str in listing_types_str:
listing_type = ListingType(type_string=listing_type_str)
listing_types.append(listing_type)
db.session.add(listing_type)
db.session.commit()
listing_statuses = []
listing_statuses_str = ["Posted", "Verified", "Rejected", "Occupied"]
for listing_status_str in listing_statuses_str:
listing_status = ListingStatus(status_string=listing_status_str)
listing_statuses.append(listing_status)
db.session.add(listing_status)
db.session.commit()
listing_info = [
{
"title": "Isolated House",
"description": "House with a nice garden around",
"media_obj": {"title": "View from the ranch", "image_name": "Alone-house.jpg"},
"house_type": 1
},
{
"title": "Colorful apartment",
"description": "Apartments with balcony and blue borders",
"media_obj": {"title": "View from across the street", "image_name": "Apartments.jpg"},
"house_type": 3
},
{
"title": "Amazing palace",
"description": "Fully furnished palace",
"media_obj": {"title": "View from the garden", "image_name": "Big-building.jpg"},
"house_type": 1
},
{
"title": "Beach side house",
"description": "A safe home with amazing view of a beach",
"media_obj": {"title": "View across the street", "image_name": "Blue-brown.jpg"},
"house_type": 1
},
{
"title": "Victorian house",
"description": "Victorian houses with funky colors",
"media_obj": {"title": "View across the street", "image_name": "Colored-apartments.jpg"},
"house_type": 1
},
{
"title": "Serene country-side cottage",
"description": "Cozy cottage on a hill side",
"media_obj": {"title": "View with chimney", "image_name": "Cottage.jpg"},
"house_type": 1
},
{
"title": "House surrounded by plants",
"description": "House tucked inside a forest",
"media_obj": {"title": "View with pool", "image_name": "Forest-house.jpg"},
"house_type": 1
},
{
"title": "House with big glass windows",
"description": "Futuristic glass walled house",
"media_obj": {"title": "View from garden with snow", "image_name": "Glass-building.jpg"},
"house_type": 1
},
{
"title": "House on a lake between mountains",
"description": "Floating house on the lake, with view of mountain",
"media_obj": {"title": "Good view", "image_name": "good-house.jpg"},
"house_type": 1
},
{
"title": "House with a nice pool",
"description": "House with a nice garden and a pool",
"media_obj": {"title": "View with the pool", "image_name": "House-with-pool.jpg"},
"house_type": 1
},
{
"title": "House with a hill-side view",
"description": "House on a hill side with a nice pool",
"media_obj": {"title": "View with mountains", "image_name": "Infinity-pool.jpg"},
"house_type": 1
},
{
"title": "House on a lake",
"description": "Lake house, ideal for horror movies",
"media_obj": {"title": "View across the lake", "image_name": "Lake-house.jpg"},
"house_type": 1
},
{
"title": "Simple house",
"description": "Simple house, ready to move in",
"media_obj": {"title": "House with pumpkins", "image_name": "Single-house.jpg"},
"house_type": 1
},
{
"title": "Cozy house beside a lake",
"description": "Cozy house in a cold climate beside the lake",
"media_obj": {"title": "House across the lake", "image_name": "Snow-house.jpg"},
"house_type": 1
},
{
"title": "Moroccan house",
"description": "Clean and simple architecture",
"media_obj": {"title": "House with small windows", "image_name": "White-house.jpg"},
"house_type": 1
},
{
"title": "Condo on the corner",
"description": "On the corner, closer to everything",
"media_obj": {"title": "Condo", "image_name": "condo.jpg"},
"house_type": 2
}
]
listings = []
verified_status_id = list(filter(lambda ltype: ltype.status_string == "Verified", listing_statuses))[0].id
for idx in range(num_listings):
listings.append(
Listing(
title=listing_info[idx]["title"],
description=listing_info[idx]["description"],
for_rent_flag=choice([True, False]),
building_number=choice(["#", "No.", ""]) + str(randint(0, 100)),
apartment=choice(["Sofi", "Aragon", "Northpoint", "Southpoint", ""]),
street_name=fake.street_name(),
city=choice(["San Francisco", "San Jose", "Sunnywale"]),
state="California",
zip_code=fake.zipcode(),
country="United States of America",
listing_price=randint(10, 50) * choice([50, 100, 200, 300]),
listing_status=verified_status_id,
listing_type=listing_info[idx]["house_type"],
listing_user=choice([1, 2, 3, 4, 5]),
listing_views=0,
is_furnished=choice([False, True]),
square_footage=choice([10, 30, 40]) * choice([50, 75, 100]),
num_baths=randint(1, 4),
num_beds=randint(1, 4),
distance=randint(1,100),
num_parking_spots=randint(1, 3),
pet_policy=choice([False, True]),
smoking_policy=choice([False, True]),
media=[],
)
)
for listing in listings:
db.session.add(listing)
db.session.commit()
media_list = []
for idx in range(num_listings):
media_obj = listing_info[idx]["media_obj"]
media = Media(
listing_id=(idx+1),
media_title=media_obj["title"],
media_path=media_obj["image_name"],
)
media_list.append(media)
db.session.add(media)
db.session.commit()
| 5,346,841 |
def _generate_flame_clip_name(item, publish_fields):
"""
Generates a name which will be displayed in the dropdown in Flame.
:param item: The publish item being processed.
:param publish_fields: Publish fields
:returns: name string
"""
# this implementation generates names on the following form:
#
# Comp, scene.nk (output background), v023
# Comp, Nuke, v023
# Lighting CBBs, final.nk, v034
#
# (depending on what pieces are available in context and names, names
# may vary)
context = item.context
name = ""
# If we have template fields passed in, then we'll try to extract
# some information from them. If we don't, then we fall back on
# some defaults worked out below.
publish_fields = publish_fields or dict()
# the shot will already be implied by the clip inside Flame (the clip
# file which we are updating is a per-shot file. But if the context
# contains a task or a step, we can display that:
if context.task:
name += "%s, " % context.task["name"].capitalize()
elif context.step:
name += "%s, " % context.step["name"].capitalize()
# If we have a channel set for the write node or a name for the scene,
# add those. If we don't have a name from the template fields, then we
# fall back on the file sequence's basename without the extension or
# frame number on the end (if possible).
default_name, _ = os.path.splitext(
os.path.basename(item.properties["sequence_paths"][0])
)
# Strips numbers off the end of the file name, plus any underscore or
# . characters right before it.
#
# foo.1234 -> foo
# foo1234 -> foo
# foo_1234 -> foo
default_name = re.sub(r"[._]*\d+$", "", default_name)
rp_name = publish_fields.get("name", default_name,)
rp_channel = publish_fields.get("channel")
if rp_name and rp_channel:
name += "%s.nk (output %s), " % (rp_name, rp_channel)
elif not rp_name:
name += "Nuke output %s, " % rp_channel
elif not rp_channel:
name += "%s.nk, " % rp_name
else:
name += "Nuke, "
# Do our best to get a usable version number. If we have data extracted
# using a template, we use that. If we don't, then we can look to see
# if this publish item came with a clip PublishedFile, in which case
# we use the version_number field from that entity +1, as a new version
# of that published clip will be created as part of this update process,
# and that is what we want to associate ourselves with here.
version = publish_fields.get("version")
if version is None and "flame_clip_publish" in item.properties:
version = item.properties["flame_clip_publish"]["version_number"] + 1
version = version or 0
name += "v%03d" % version
return name
| 5,346,842 |
def f(q):
"""Constraint map for the origami."""
return 0.5 * (np.array([
q[0] ** 2,
(q[1] - q[0]) ** 2 + q[2] ** 2 + q[3] ** 2,
(q[4] - q[1]) ** 2 + (q[5] - q[2]) ** 2 + (q[6] - q[3]) ** 2,
q[4] ** 2 + q[5] ** 2 + q[6] ** 2,
q[7] ** 2 + q[8] ** 2 + q[9] ** 2,
(q[7] - q[1]) ** 2 + (q[8] - q[2]) ** 2 + (q[9] - q[3]) ** 2,
(q[7] - q[4]) ** 2 + (q[8] - q[5]) ** 2 + (q[9] - q[6]) ** 2,
q[10] ** 2 + q[11] ** 2,
(q[10] - q[0]) ** 2 + q[11] ** 2,
(q[10] - q[1]) ** 2 + (q[11] - q[2]) ** 2 + q[3] ** 2,
(q[10] - q[7]) ** 2 + (q[11] - q[8]) ** 2 + q[9] ** 2,
]) - lengths2) / (lengths)
| 5,346,843 |
def resize(clip, newsize=None, height=None, width=None):
"""
Returns a video clip that is a resized version of the clip.
Parameters
------------
newsize:
Can be either
- ``(height,width)`` in pixels or a float representing
- A scaling factor, like 0.5
- A function of time returning one of these.
width:
width of the new clip in pixel. The height is then computed so
that the width/height ratio is conserved.
height:
height of the new clip in pixel. The width is then computed so
that the width/height ratio is conserved.
Examples
----------
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.size
if newsize != None:
def trans_newsize(ns):
if isinstance(ns, (int, float)):
return [ns * w, ns * h]
else:
return ns
if hasattr(newsize, "__call__"):
newsize2 = lambda t : trans_newsize(newsize(t))
if clip.ismask:
fun = lambda gf,t: (1.0*resizer((255 * gf(t))
.astype('uint8'),
newsize2(t))/255)
else:
fun = lambda gf,t: resizer(gf(t).astype('uint8'),
newsize2(t))
return clip.fl(fun, keep_duration=True, apply_to='mask')
else:
newsize = trans_newsize(newsize)
elif height != None:
newsize = [w * height / h, height]
elif width != None:
newsize = [width, h * width / w]
if clip.ismask:
fl = lambda pic: 1.0*resizer((255 * pic).astype('uint8'),
newsize)/255
else:
fl = lambda pic: resizer(pic.astype('uint8'), newsize)
return clip.fl_image(fl, apply_to='mask')
| 5,346,844 |
def test_put_available():
"""Test put available."""
env = simpy.Environment()
container = core.EventsContainer(env=env)
container.initialize_container([{"id": "default", "capacity": 10, "level": 5}])
def process():
at_most_5 = container.get_container_event(level=5, operator="le")
at_most_6 = container.get_container_event(level=6, operator="le")
at_most_3 = container.get_container_event(level=3, operator="le")
assert at_most_5.triggered, "a"
assert at_most_6.triggered, "b"
assert not at_most_3.triggered, "c"
yield container.get(1)
at_most_3 = container.get_container_event(level=3, operator="le")
assert not at_most_3.triggered, "d"
yield container.put(1)
at_most_3 = container.get_container_event(level=3, operator="le")
assert not at_most_3.triggered, "e"
yield container.get(2)
at_most_3 = container.get_container_event(level=3, operator="le")
assert at_most_3.triggered, "f"
env.process(process())
env.run()
| 5,346,845 |
def tearDown(self):
"""
Replacement function of original unittest **tearDown** function
"""
# call options modules function if registered
try:
if hasattr(self, '__libs_options'):
for opt in self.__libs_options:
if 'teardown' in opt.REGISTERED:
self.__syslogger.info('Call teardown() of "%s" options module...' % opt.fullname)
CONFIG.TEST.CURRENT_STATE = 'teardown of "%s" module' % opt.fullname
opt.teardown()
except Exception as e:
self.__syslogger.exception(e)
raise
# change state name
CONFIG.TEST.CURRENT_STATE = "tearDown %s" % self.shortDescription()
# call original function
try:
self.__originalTearDown()
except Exception as e:
self.__syslogger.exception(e)
raise
| 5,346,846 |
def save_yaml(
input_dict: dict,
output_path="output.yaml",
parents: bool = False,
exist_ok: bool = False,
):
"""Save dictionary as yaml.
Args:
input_dict (dict):
A variable of type 'dict'.
output_path (str or pathlib.Path):
Path to save the output file.
parents (bool):
Make parent directories if they do not exist. Default is
False.
exist_ok (bool):
Overwrite the output file if it exists. Default is False.
Returns:
Examples:
>>> from tiatoolbox import utils
>>> input_dict = {'hello': 'Hello World!'}
>>> utils.misc.save_yaml(input_dict, './hello.yaml')
"""
path = pathlib.Path(output_path)
if path.exists() and not exist_ok:
raise FileExistsError("File already exists.")
if parents:
path.parent.mkdir(parents=True, exist_ok=True)
with open( # skipcq: PTC-W6004: PTC-W6004
str(pathlib.Path(output_path)), "w"
) as yaml_file:
yaml.dump(input_dict, yaml_file)
| 5,346,847 |
def calc_rt_pytmm(pol, omega, kx, n, d):
"""API-compatible wrapper around pytmm
"""
vec_omega = omega.numpy()
vec_lambda = C0/vec_omega*2*np.pi
vec_n = n.numpy()
vec_d = d.numpy()
vec_d = np.append(np.inf, vec_d)
vec_d = np.append(vec_d, np.inf)
vec_kx = kx.numpy().reshape([-1,1])
vec_k0 = 2 * np.pi / vec_lambda.reshape([1,-1])
vec_theta = np.arcsin(vec_kx / vec_k0)
r = np.zeros((len(kx), len(omega)), dtype=np.complex64)
t = np.zeros((len(kx), len(omega)), dtype=np.complex64)
for i, theta in enumerate(vec_theta):
for j, lam in enumerate(vec_lambda):
out = coh_tmm(pol, vec_n, vec_d, theta[j], lam)
r[i, j] = out['r']
t[i, j] = out['t']
t = tf.constant(t)
r = tf.constant(r)
return tf.constant(t), tf.constant(r)
| 5,346,848 |
def associate_ms(image_id):
"""
Associate the monitoring sources, i.e., their forced fits,
of the current image with the ones in the running catalog.
These associations are treated separately from the normal
associations and there will only be 1-to-1 associations.
The runcat-monitoring source pairs will be inserted in a
temporary table.
Of these, the runcat and runcat_flux tables are updated with
the new datapoints if the (monitoring) source already existed,
otherwise they are inserted as a new source.
The source pair is appended to the light-curve table
(assocxtrsource), with a type = 8 (for the first occurence)
or type = 9 (for existing runcat sources).
After all this, the temporary table is emptied again.
"""
_del_tempruncat()
_insert_tempruncat(image_id)
_insert_1_to_1_assoc()
_update_1_to_1_runcat()
n_updated = _update_1_to_1_runcat_flux()
if n_updated:
logger.debug("Updated flux for %s monitor sources" % n_updated)
n_inserted = _insert_1_to_1_runcat_flux()
if n_inserted:
logger.debug("Inserted new-band flux measurement for %s monitor sources"
% n_inserted)
_insert_new_runcat(image_id)
_insert_new_runcat_flux(image_id)
_insert_new_1_to_1_assoc(image_id)
_update_monitor_runcats(image_id)
_del_tempruncat()
| 5,346,849 |
def statistic_bbox(dic, dic_im):
""" Statistic number of bbox of seed and image-level data for each class
Parameters
----------
dic: seed roidb dictionary
dic_im: image-level roidb dictionary
Returns
-------
num_bbox: list for number of 20 class's bbox
num_bbox_im: list for number of 20 class's bbox
"""
num_bbox = [0] * 20
num_bbox_im = [0] * 20
for d in dic:
for c in d['gt_classes']:
num_bbox[c-1] += 1
for d in dic_im:
for c in d['gt_classes']:
num_bbox_im[c-1] += 1
print("Statistic for seed data bbox: ", num_bbox)
print("Statistic for image-level data bbox: ", num_bbox_im)
return num_bbox, num_bbox_im
| 5,346,850 |
def test_apply_filter_sweep(
frequencies: List[float], frame_rate: int, kind: str,
bands: List[Tuple[Optional[float], Optional[float]]],
invert: bool, order: int, frequency: float, waveform: str,
spectrogram_params: Dict[str, Any], expected: np.ndarray
) -> None:
"""Test `apply_filter_sweep` function."""
waves = [
generate_mono_wave(
'sine', frequency, np.ones(frame_rate), frame_rate
)
for frequency in frequencies
]
sound = sum(waves)
sound = np.vstack((sound, sound))
event = Event(
instrument='any_instrument',
start_time=0,
duration=1,
frequency=min(frequencies),
velocity=1,
effects='',
frame_rate=frame_rate
)
sound = apply_filter_sweep(
sound, event, kind, bands, invert, order, frequency, waveform
)
spc = spectrogram(sound[0], frame_rate, **spectrogram_params)[2]
result = spc.sum(axis=1)[:len(expected)]
np.testing.assert_almost_equal(result, expected)
| 5,346,851 |
def github_youtube_config_files():
"""
Function that returns a list of pyGithub files with youtube config channel data
Returns:
A list of pyGithub contentFile objects
"""
if settings.GITHUB_ACCESS_TOKEN:
github_client = github.Github(settings.GITHUB_ACCESS_TOKEN)
else:
github_client = github.Github()
repo = github_client.get_repo(CONFIG_FILE_REPO)
return repo.get_contents(CONFIG_FILE_FOLDER, ref=settings.OPEN_VIDEO_DATA_BRANCH)
| 5,346,852 |
def reverse_uint(uint,num_bits=None):
"""
This function takes an unsigned integer and reverses all of its bits.
num_bits is number of bits to assume are present in the unsigned integer.
If num_bits is not specified, the minimum number of bits needed to represent the unsigned integer is assumed.
If num_bits is specified, it must be greater than the minimum number of bits needed to represent the unsigned integer.
>>> reverse_uint(3,8)
192
>>> bin(192)
'0b11000000'
"""
if not isinstance(uint,int):
raise Exception('input must be an integer, not %s' % repr(type(uint)))
if uint < 0:
raise Exception('input must be non-negative: %s' % repr(uint))
if min_bits_uint(uint) > num_bits:
raise Exception('Input uint must be storable in at most num_bits (%d) number of bits, but requires %d bits' % (num_bits,min_bits_uint(uint)))
result = 0
extracted_bits = 0
while (num_bits is not None and extracted_bits < num_bits) or uint != 0:
uint,rem = divmod(uint,2)
result = (result<<1) | rem
extracted_bits += 1
return result
| 5,346,853 |
def min_by_tail(lhs, ctx):
"""Element ↓
(any) -> min(a, key=lambda x: x[-1])
"""
lhs = iterable(lhs, ctx=ctx)
if len(lhs) == 0:
return []
else:
return min_by(lhs, key=tail, cmp=less_than, ctx=ctx)
| 5,346,854 |
def tests_transaction_is_affordable_agent_is_the_seller():
"""Check if the agent has the goods (the agent=sender is the seller)."""
currency_endowment = {"FET": 0}
good_endowment = {"good_id": 0}
ownership_state = OwnershipState(
amount_by_currency_id=currency_endowment, quantities_by_good_id=good_endowment,
)
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId(AUTHOR, "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": 10},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
info={"some_info_key": "some_info_value"},
ledger_id="fetchai",
tx_nonce="transaction nonce",
)
assert ownership_state.is_affordable_transaction(
tx_message=tx_message
), "We must reject the transaction."
| 5,346,855 |
def SecureBytesEqual( a, b ):
"""Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks."""
# Consistent timing matters more here than data type flexibility
# We do NOT want to support py2's str type because iterating over them
# (below) produces different results.
if type( a ) != bytes or type( b ) != bytes:
raise TypeError( "inputs must be bytes instances" )
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len( a ) != len( b ):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip( a, b ):
result |= x ^ y
return result == 0
| 5,346,856 |
def encode(something):
"""
We encode all messages as base64-encoded pickle objects in case
later on, we want to persist them or send them to another system.
This is extraneous for now.
"""
return base64.b64encode(pickle.dumps(something))
| 5,346,857 |
def scrape_proposal_page(browser, proposal_url):
"""
Navigates to the page giving details about a piece of legislation, scrapes
that data, and adds a model to the database session. Returns the new DB
model.
"""
browser.get(proposal_url)
file_number = int(extract_text(browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblFile2'
)))
proposal_title = extract_text(browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblTitle2'
))
proposal_type = extract_text(browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblIntroduced2'
))
proposal_status = extract_text(browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblStatus2'
))
introduction_date = parse_date(extract_text(
browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblIntroduced2'
)
))
db_proposal = db.Proposal(file_number, proposal_title)
db_proposal.status = proposal_status
db_proposal.proposal_type = proposal_type
db_proposal.introduction_date = introduction_date
db.session.add(db_proposal)
db.session.flush()
# TODO probably should refactor this out a t least
return db_proposal
| 5,346,858 |
def installRecommendation(install, uninstall, working_set=working_set, tuples=False):
"""Human Readable advice on which modules have to be installed on
current Working Set.
"""
installList = []
for i in install:
is_in = False
for p in working_set:
if i[0] == p.key and i[1] == p.version:
is_in = True
break
if not is_in:
if not tuples:
print('~~ Install: '+i[0]+' version '+i[1])
else:
installList.append((i[0], i[1]))
for u in uninstall:
is_in = False
for p in working_set:
if u[0] == p.key and u[1] == p.version:
is_in = True
break
if is_in:
if not tuples:
print('~~ Uninstall: '+u[0]+' version '+u[1])
return installList
| 5,346,859 |
def obsangle(thetas, phis, alpha_obs):
"""
Return the cosine of the observer angle for the different shockwave segments and and
and observer at and angle alpha_obs with respect to the jet axis
(contained in yz plane)
"""
#u_obs_x, u_obs_y, u_obs_z = 0., sin(alpha_obs), cos(alpha_obs)
u_obs_y, u_obs_z = sin(alpha_obs), cos(alpha_obs)
#seg_x =
seg_y = sin(thetas)*sin(phis)
seg_z = cos(thetas)
#return arccos(u_obs_x*seg_x + u_obs_y*seg_y + u_obs_z*seg_z)
return u_obs_y*seg_y + u_obs_z*seg_z
| 5,346,860 |
def writeJSON(dbFile: str, db: dict, prettyPrint=False):
"""Write the given json-serializable dictionary to the given file path.
All objects in the dictionary must be JSON-serializable.
:param str dbFile: Path to the file which db should be written to
:param dict db: The json-serializable dictionary to write
:param bool prettyPrint: When False, write minified JSON. When true, write JSON with basic pretty printing (indentation)
"""
with open(dbFile, "w") as f:
if prettyPrint:
json.dump(db, f, indent=4, sort_keys=True)
else:
json.dump(db, f)
| 5,346,861 |
def _image_tensor_input_placeholder(input_shape=None):
"""Returns input placeholder and a 4-D uint8 image tensor."""
if input_shape is None:
input_shape = (None, None, None, 3)
input_tensor = tf.placeholder(
dtype=tf.uint8, shape=input_shape, name='image_tensor')
return input_tensor, input_tensor
| 5,346,862 |
def _load_top_bonds(f, topology, **kwargs):
"""Take a mol2 file section with the heading '@<TRIPOS>BOND' and save to the topology.bonds attribute."""
while True:
line = f.readline()
if _is_end_of_rti(line):
line = line.split()
bond = Bond(
connection_members=(
topology.sites[int(line[1]) - 1],
topology.sites[int(line[2]) - 1],
)
)
topology.add_connection(bond)
else:
break
return line
| 5,346,863 |
def timelength_to_phrase(
timelength: spec.Timelength,
from_representation: spec.TimelengthRepresentation = None,
) -> spec.TimelengthPhrase:
"""convert Timelength to TimelengthPhrase
## Inputs
- timelength: Timelength
- from_representation: str representation name of input timelength
## Returns
- TimelengthPhrase timelength
"""
return convert_timelength(
timelength=timelength,
to_representation='TimelengthPhrase',
from_representation=from_representation,
)
| 5,346,864 |
def order_columns(self: DataFrame, order: str = "asc", by_dtypes: bool = False):
"""
Rearrange the columns in alphabetical order.
An option of rearrangement by dtypes is possible.
:param self:
:param by_dtypes: boolean to rearrange by dtypes first
"""
if order not in ['asc', 'desc']:
raise Exception("'{}' is not an acceptable ordering value, you can only use {'asc','desc'}".format(order))
if by_dtypes:
dtypes_dict = dict()
for col, dtype in self.dtypes:
dtypes_dict.setdefault(dtype, list())
dtypes_dict[dtype].append(col)
dtypes_dict = dict(sorted(dtypes_dict.items()))
columns = [col for values in dtypes_dict.values()
for col in sorted(values)]
return self.select(columns)
else:
return self.select(sorted(self.columns, reverse=False if order == "asc" else True))
| 5,346,865 |
def main():
"""
Place videos in test_videos folder
Usage python3 pupil_detect.py --video_file "/test_videos/filename.mkv"
Default video python3 pupil_detect.py
"""
try:
default_video = "/pupil_detect/test_videos/sample.mkv"
parser = argparse.ArgumentParser()
parser.add_argument('--video_file', nargs='?', const=1, type=str, default=default_video)
args = parser.parse_args()
# Run pupil tracking
track_pupil = PupilDetect(args.video_file)
track_pupil.track_frame()
except Exception as e:
print("Error encountered ", e)
| 5,346,866 |
def main():
""" do main work """
cmdArgs = sys.argv[1:]
if not cmdArgs:
msg = "There is no version in args. Current version: "
msg += version.current()
print(msg)
if GIT_EXE:
print("Result of 'git describe': ")
print(_runGitCmd('describe'))
msg = "\nUsage: " + sys.argv[0] + " x.y.z where x,y,z are numbers"
print(msg)
return 0
newVer = cmdArgs[0]
if not version.checkFormat(newVer):
print('Version %r has invalid format' % newVer)
return 1
if not GIT_EXE:
print("There is no 'git'. Install 'git' to use this script.")
return 2
if not _checkChangeLog(newVer):
print('There is no records for the version %r in changelog file' % newVer)
return 3
question = 'Bump version to %s?'
question += ' It will write the version to file,'
question += '\nadd it to git repo, commit it and add git tag with the version.'
answer = _getAnswerYesNo(question % newVer)
if not answer:
return 0
print("Bumping version to %r .." % newVer)
_bumpVersion(newVer)
print("Building distribution ..")
_runPyScript('setup.py clean sdist bdist_wheel')
answer = _getAnswerYesNo('Distribution was built successfully. Publish it to pypi?')
if answer:
print("Publishing distribution ..")
_runPyScript('setup.py publish')
print("Distribution was published.")
answer = _getAnswerYesNo('Publish release to github?')
if answer:
scriptPath = os.path.join('scripts', 'publish-github-release.py')
args = '%s %s' % (scriptPath, newVer)
print("Publishing release to github ..")
_runPyScript(args, tryPy3 = True)
print("Release was published on github.")
print("Writing new dev version to file %r .." % version.VERSION_FILE_PATH)
nextVer = _writeNewDevVersion(newVer)
print("New dev version %r was written to file." % nextVer)
return 0
| 5,346,867 |
def reorderWithinGroup(players_by_wins):
"""Shuffle players with the same score.
Args:
players_by_wins: a dictionary returned by splitByScore().
Returns a list of the re-ordered player ids.
"""
for score in players_by_wins.keys():
random.shuffle(players_by_wins[score])
# players_by_wins is a dictionary with scores as keys. When
# converting to a list, need to make sure it is sorted by score,
# from highest to lowest.
players_ordered = []
score_keys = players_by_wins.keys()
score_keys.sort(reverse=True)
for score in score_keys:
players_ordered.append(players_by_wins[score])
# Convert back to a list.
players_ordered = list(chain.from_iterable(players_ordered))
# Return the ordered ids.
ordered_ids = [x[0] for x in players_ordered]
return(ordered_ids)
| 5,346,868 |
def writebr(text):
"""@deprecated: (since='0.8', replacement=logging.debug)"""
write(text + "\n")
| 5,346,869 |
def selection(population, method):
"""Apply selection method of a given population.
Args:
population: (list of) plans to apply the selection on.
method: (str) selection method:
- rws (Roulette Wheel Selection)
- sus (Stochastic Universal Selection)
- ts (Tournament Selection)
Returns:
(list of) plans representing the new pool
"""
if method == "rws":
return roulette_wheel_selection(population)
elif method == "sus":
return stochastic_universal_sampling(population)
elif method == "ts":
return tournament_selection(population)
| 5,346,870 |
def PSL_prefix(row, cols):
"""Returns the prefix a domain (www.images for www.images.example.com)"""
psl_data = psl.search_tree(row[cols[0]])
if psl_data:
return(psl_data[1], psl_data[0])
return (None, None)
| 5,346,871 |
def build_model(sess,t,Y,model='sde',sf0=1.0,ell0=[2,2],sfg0=1.0,ellg0=[1e5],
W=6,ktype="id",whiten=True,
fix_ell=False,fix_sf=False,fix_Z=False,fix_U=False,fix_sn=False,
fix_ellg=False,fix_sfg=False,fix_Zg=True,fix_Ug=False):
"""
Args:
sess: TensowFlow session needed for initialization and optimization
t: Python array of numpy vectors storing observation times
Y: Python array of numpy matrices storing observations. Observations
are stored in rows.
model: 'sde' or 'ode'
sf0: Integer initial value of the signal variance of drift GP
ell0: Python/numpy array of floats for the initial value of the
lengthscale of drift GP
sfg0: Integer initial value of the signal variance of diffusion GP
ellg0: Python/numpy array of a single float for the initial value of the
lengthscale of diffusion GP
W: Integer denoting the width of the inducing point grid. If the problem
dimension is D, total number of inducing points is W**D
ktype: Kernel type. We have made experiments only with Kronecker kernel,
denoted by 'id'. The other kernels are not supported.
whiten: Boolean. Currently we perform the optimization only in the
white domain
fix_ell: Boolean - whether drift GP lengthscale is fixed or optimized
fix_sf: Boolean - whether drift GP signal variance is fixed or optimized
fix_Z: Boolean - whether drift GP inducing locations are fixed or optimized
fix_U: Boolean - whether drift GP inducing vectors are fixed or optimized
fix_sn: Boolean - whether noise variance is fixed or optimized
fix_ellg: Boolean - whether diffusion GP lengthscale is fixed or optimized
fix_sfg: Boolean - whether diffusion GP signal variance is fixed or optimized
fix_Zg: Boolean - whether diffusion GP inducing locations are fixed or optimized
fix_Ug: Boolean - whether diffusion GP inducing vectors are fixed or optimized
Returns:
npde: A new NPDE model
"""
print('Model being initialized...')
def init_U0(Y=None,t=None,kern=None,Z0=None,whiten=None):
Ug = (Y[1:,:] - Y[:-1,:]) / np.reshape(t[1:]-t[:-1],(-1,1))
with tf.name_scope("init_U0"):
tmp = NPODE(Z0=Y[:-1,:],U0=Ug,sn0=0,kern=kern,jitter=0.25,whiten=False,
fix_Z=True,fix_U=True,fix_sn=True)
U0 = tmp.f(X=Z0)
if whiten:
Lz = tf.cholesky(kern.K(Z0))
U0 = tf.matrix_triangular_solve(Lz, U0, lower=True)
U0 = sess.run(U0)
return U0
D = len(ell0)
Nt = len(Y)
x0 = np.zeros((Nt,D))
Ys = np.zeros((0,D))
for i in range(Nt):
x0[i,:] = Y[i][0,:]
Ys = np.vstack((Ys,Y[i]))
maxs = np.max(Ys,0)
mins = np.min(Ys,0)
grids = []
for i in range(D):
grids.append(np.linspace(mins[i],maxs[i],W))
vecs = np.meshgrid(*grids)
Z0 = np.zeros((0,W**D))
for i in range(D):
Z0 = np.vstack((Z0,vecs[i].T.flatten()))
Z0 = Z0.T
tmp_kern = OperatorKernel(sf0,ell0,ktype="id",fix_ell=True,fix_sf=True)
U0 = np.zeros(Z0.shape,dtype=np.float64)
for i in range(len(Y)):
U0 += init_U0(Y[i],t[i],tmp_kern,Z0,whiten)
U0 /= len(Y)
sn0 = 0.5*np.ones(D)
Ug0 = np.ones([Z0.shape[0],1])*0.01
ell0 = np.asarray(ell0,dtype=np.float64)
ellg0 = np.asarray(ellg0,dtype=np.float64)
kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=ktype, fix_ell=fix_ell, fix_sf=fix_sf)
if model is 'ode':
npde = NPODE(Z0=Z0, U0=U0, sn0=sn0, kern=kern, whiten=whiten, fix_Z=fix_Z, fix_U=fix_U, fix_sn=fix_sn)
sess.run(tf.global_variables_initializer())
return npde
elif model is 'sde':
diffus = BrownianMotion(sf0=sfg0, ell0=ellg0, U0=Ug0, Z0=Z0, whiten=whiten,\
fix_sf=fix_sfg, fix_ell=fix_ellg, fix_Z=fix_Zg, fix_U=fix_Ug)
npde = NPSDE(Z0=Z0, U0=U0, sn0=sn0, kern=kern, diffus=diffus, whiten=whiten,\
fix_Z=fix_Z, fix_U=fix_U, fix_sn=fix_sn)
sess.run(tf.global_variables_initializer())
return npde
else:
raise NotImplementedError("model parameter should be either 'ode' or 'sde', not {:s}\n".format(model))
| 5,346,872 |
def region():
"""Get current default region. Defaults to the region of the instance on
ec2 if not otherwise defined.
"""
parser = _get_parser()
parser.parse_args()
print(client_region())
| 5,346,873 |
def paginatedUrls(pattern, view, kwargs=None, name=None):
"""
Takes a group of url tuples and adds paginated urls.
Extends a url tuple to include paginated urls.
Currently doesn't handle url() compiled patterns.
"""
results = [(pattern, view, kwargs, name)]
tail = ''
mtail = re.search('(/+\+?\\*?\??\$?)$', pattern)
if mtail:
tail = mtail.group(1)
pattern = pattern[:len(pattern) - len(tail)]
results += [(pattern + "/(?P<page_number>\d+)" + tail, view, kwargs)]
results += [(pattern + "/(?P<page_number>\d+)\|(?P<page_limit>\d+)" +
tail, view, kwargs)]
if not kwargs:
kwargs = dict()
kwargs['page_limit'] = 0
results += [(pattern + "/?\|(?P<page_limit>all)" + tail, view, kwargs)]
return results
| 5,346,874 |
def invoke(name, region, namespace, eventdata, type, no_color):
"""
\b
Invoke the SCF remote function.
\b
Common usage:
\b
* Invoke the function test in ap-guangzhou and in namespace default
$ scf remote invoke --name test --region ap-guangzhou --namespace default
"""
type_dict = {
"sync": "RequestResponse",
"async": "Event"
}
logtype = "tail"
if type.lower() not in type_dict:
Operation("Log type must in {l}".format(l=INVOCATION_TYPE)).warning()
return
if type.lower == "async":
Operation('invoke start ,you can get the invoke logs by excute `scf logs -r %s -ns %s -n %s`' % (
region, namespace, name)).information()
if eventdata:
try:
eventdata = get_data(eventdata)
except Exception as e:
raise EventFileException("Read file error: %s" % (str(e)))
else:
eventdata = json.dumps({"key1": "value1", "key2": "value2"})
Invoke.do_cli(name, region, namespace, eventdata, logtype, type_dict[type.lower()])
| 5,346,875 |
def set_context(logger, value):
"""
Walks the tree of loggers and tries to set the context for each handler
:param logger: logger
:param value: value to set
"""
_logger = logger
while _logger:
for handler in _logger.handlers:
try:
handler.set_context(value)
except AttributeError:
# Not all handlers need to have context passed in so we ignore
# the error when handlers do not have set_context defined.
pass
if _logger.propagate is True:
_logger = _logger.parent
else:
_logger = None
| 5,346,876 |
def write_index(feat_values, chrom, start, stop, stranded_genome_index, unstranded_genome_index):
""" Writing the features info in the proper index files. """
# Writing info to the stranded indexes
if feat_values[0] != {}:
write_index_line(feat_values[0], chrom, start, stop, "+", stranded_genome_index)
else:
stranded_genome_index.write("\t".join((chrom, start, stop, "+", "antisense\n")))
if feat_values[1] != {}:
write_index_line(feat_values[1], chrom, start, stop, "-", stranded_genome_index)
else:
stranded_genome_index.write("\t".join((chrom, start, stop, "-", "antisense\n")))
# Writing info to the unstranded index
unstranded_feat = dict(feat_values[0], **feat_values[1])
for name in set(feat_values[0]) & set(feat_values[1]):
unstranded_feat[name] += feat_values[0][name]
write_index_line(unstranded_feat, chrom, start, stop, ".", unstranded_genome_index)
| 5,346,877 |
def _to_intraday_trix(date: pd.Timestamp, provider: providers.DataProvider,
period: int)-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns an ndarray containing the TRIX for a given +data+ and +provider+,
averaged across a given +period+.
"""
# First, get the triple-smoothed 15 period exponential moving average
data = _get_intraday_data(date, provider)
ewm1 = pd.Series.ewm(data['close'], span=period).mean()
ewm2 = pd.Series.ewm(ewm1, span=period).mean()
ewm3 = pd.Series.ewm(ewm2, span=period).mean()
# Return the percentage change from last period
ewm3_yesterday = ewm3.shift(periods=1, fill_value=ewm3[0])
trix = (ewm3 / ewm3_yesterday) - 1
return nd.array(trix.values, utils.try_gpu(0))
| 5,346,878 |
def _decode(integer):
"""
Decode the given 32-bit integer into a MAX_LENGTH character string according
to the scheme in the specification. Returns a string.
"""
if integer.bit_length() > 32:
raise ValueError("Can only decode 32-bit integers.")
decoded_int = 0
# Since each byte has its bits distributed along the given integer at
# BIT_SHIFT intervals, we'll get the bits from one byte at a time.
for input_start in range(4):
# Move to the beginning of the correct output byte.
output_pos = input_start * 8
# Read the bits from the input at BIT_SHIFT intervals, lowest-order
# bits first.
for input_bit in range(input_start, integer.bit_length(), BIT_SHIFT):
current_bit = getBit(integer, input_bit)
# If the current bit is 1, set the corresponding bit in the result.
# Otherwise, we can leave the result bit as 0.
if current_bit:
decoded_int = setBit(decoded_int, output_pos)
# Move to the next position in the output byte.
output_pos += 1
# Get a byte array from the decoded integer. We're reversing the byte order
# because we read the input integer from lowest-order bit to highest-order.
decoded_bytes = decoded_int.to_bytes(4, byteorder="little")
# Get the characters represented by each byte, ignoring empty bytes.
chars = []
for byte in decoded_bytes:
if byte:
chars.append(chr(byte))
return "".join(chars)
| 5,346,879 |
def register(name):
"""Registers a new data loader function under the given name."""
def add_to_dict(func):
_LOADERS[name] = func
return func
return add_to_dict
| 5,346,880 |
def get_api_host(staging):
"""If 'staging' is truthy, return staging API host instead of prod."""
return STAGING_API_HOST if staging else PROD_API_HOST
| 5,346,881 |
def get(path, params={}):
"""Make an authenticated GET request to the GitHub API."""
return requests.get(
os.path.join("https://api.github.com/", path),
auth=(USER, PASS),
params=params
).json()
| 5,346,882 |
def delete_user(auth, client):
"""
Delete a user
:auth: dict
:client: users_client object
"""
log("What user you want to delete?")
user_to_delete = find_user_by_username(auth, client)
if user_to_delete is False:
log("Could not find user.", serv="ERROR")
return False
confirmation = yes_or_no("You really want to delete %s?" % user_to_delete["username"])
if confirmation is False:
log("Aborted...")
return False
try:
client.users_user_id_delete(int(user_to_delete["id"]))
log("Successfully deleted user %s" % user_to_delete["username"], serv="SUCCESS")
return True
except:
log("Could not delete user %s. Error by backend" % user_to_delete["username"], serv="ERROR")
return False
| 5,346,883 |
def downloadComic(url):
"""
Downloads the latest comic from a range of sites.
"""
print('Downloading page ' + url + '...')
# Request the url, check the status
res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
# Find url of comic image, the element is unique to each of these
# sites
if url == 'http://www.lefthandedtoons.com/':
comicElem = soup.select('#comicwrap > div.comicdata > img')
elif url == 'https://www.buttersafe.com/':
comicElem = soup.select('#comic img')
elif url == 'https://www.exocomics.com/':
comicElem = soup.select('img', class_='image-style-main-comic')
# checks if element retrieve, if it is, downloads the image
if comicElem == []:
print('Could not find comic image.')
else:
comicUrl = comicElem[0].get('src')
# Download and save the image.
res = requests.get(comicUrl, headers={'User-Agent': 'Mozilla/5.0'})
res.raise_for_status()
imageFileName = os.path.join('comix', os.path.basename(comicUrl))
if os.path.exists(imageFileName) == True:
print('Image ' + os.path.basename(comicUrl) +
' has already been downloaded.')
else:
imageFile = open(os.path.join(
'comix',
os.path.basename(comicUrl)),
'wb'
)
print('Downloading image %s...' % (comicUrl))
for chunk in res.iter_content(100000):
imageFile.write(chunk)
imageFile.close()
| 5,346,884 |
def plot_figure(ms_label, ms_counts, left, right, params_dict, save_directory, localizations=None, sumof=None):
"""
Plots amino acid spatistics.
Parameters
----------
ms_label : str
Mass shift in string format.
ms_counts : int
Number of peptides in a mass shift.
left : list
Amino acid statistics data [[values], [errors]]
right : list
Amino acid frequences in peptides
params_dict : dict
Parameters dict.
save_directory: str
Saving directory.
localizations : Counter
Localization counter using ms/ms level.
sumof : List
List of str tuples for constituent mass shifts.
"""
b = 0.1 # shift in bar plots
width = 0.2 # for bar plots
labels = params_dict['labels']
labeltext = ms_label + ' Da mass shift,\n' + str(ms_counts) + ' peptides'
x = np.arange(len(labels))
distributions = left[0]
errors = left[1]
fig, ax_left = plt.subplots()
fig.set_size_inches(params_dict['figsize'])
ax_left.bar(x - b, distributions.loc[labels],
yerr=errors.loc[labels], width=width, color=colors[2], linewidth=0)
ax_left.set_ylabel('Relative AA abundance', color=colors[2])
ax_left.set_xticks(x)
ax_left.set_xticklabels(labels)
ax_left.hlines(1, -1, x[-1] + 1, linestyles='dashed', color=colors[2])
ax_right = ax_left.twinx()
ax_right.bar(x + b, right, width=width, linewidth=0, color=colors[0])
ax_right.set_ylim(0, 125)
ax_right.set_yticks(np.arange(0, 120, 20))
ax_right.set_ylabel('Peptides with AA, %', color=colors[0])
ax_left.spines['left'].set_color(colors[2])
ax_right.spines['left'].set_color(colors[2])
ax_left.spines['right'].set_color(colors[0])
ax_right.spines['right'].set_color(colors[0])
ax_left.tick_params('y', colors=colors[2])
ax_right.tick_params('y', colors=colors[0])
pright = matplotlib.lines.Line2D([], [], marker=None, label=labeltext, alpha=0)
ax_left.set_xlim(-1, x[-1] + 1)
ax_left.set_ylim(0, distributions.loc[labels].max() * 1.4)
logger.debug('Localizations for %s figure: %s', ms_label, localizations)
if localizations:
ax3 = ax_left.twinx()
ax3.spines['right'].set_position(('axes', 1.1))
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
ax3.set_ylabel('Localization count', color=colors[3])
for sp in ax3.spines.values():
sp.set_visible(False)
ax3.spines['right'].set_visible(True)
ax3.spines['right'].set_color(colors[3])
ax3.tick_params('y', colors=colors[3])
# plot simple modifications (not sum) with the first style,
# then parts of sum
values = [localizations.get(key + '_' + ms_label) for key in labels]
maxcount = _get_max(values)
label_prefix = 'Location of '
ax3.scatter(x, values, marker=_marker_styles[0], color=colors[3], label=label_prefix + ms_label)
if isinstance(sumof, list):
for pair, (color, style) in zip(sumof, _generate_pair_markers()):
values_1 = [localizations.get(key + '_' + pair[0]) for key in labels]
maxcount = max(maxcount, _get_max(values_1))
ax3.scatter(x, values_1, marker=style[0], color=color, label=label_prefix + pair[0])
if pair[0] != pair[1]:
values_2 = [localizations.get(key + '_' + pair[1]) for key in labels]
if values_2:
maxcount = max(maxcount, _get_max(values_2))
ax3.scatter(x, values_2, marker=style[1], color=color, label=label_prefix + pair[1])
terms = {key for key in localizations if key[1:6] == '-term'}
# logger.debug('Found terminal localizations: %s', terms)
for t in terms:
label = '{} at {}: {}'.format(*reversed(t.split('_')), localizations[t])
p = ax3.plot([], [], label=label)[0]
p.set_visible(False)
pright.set_label(pright.get_label() + '\nNot localized: {}'.format(localizations.get('non-localized', 0)))
if maxcount:
ax3.legend(loc='upper left', ncol=2)
ax3.set_ylim(0, 1.4 * max(maxcount, 1))
ax_right.legend(handles=[pright], loc='upper right', edgecolor='dimgrey', fancybox=True, handlelength=0)
fig.tight_layout()
fig.savefig(os.path.join(save_directory, ms_label + '.png'), dpi=500)
fig.savefig(os.path.join(save_directory, ms_label + '.svg'))
plt.close()
| 5,346,885 |
def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats:
"""Get the statistics for the all builders."""
print('getting list of builders...')
stats = BuildStats()
for builder in requests.get(BASE_URL).json().keys():
# TODO: maybe filter the builds to the ones we care about
stats += get_builder_stats(builder, time_window )
return stats
| 5,346,886 |
def remove_empty(s):
"""\
Remove empty strings from a list.
>>> a = ['a', 2, '', 'b', '']
>>> remove_empty(a)
[{u}'a', 2, {u}'b']
"""
while True:
try:
s.remove('')
except ValueError:
break
return s
| 5,346,887 |
def quantum_ia(nb_stick: int, past: list, backend_sim: Aer) -> list:
"""Quantum IA.
Args:
nb_stick: nb of stick left
past: past turn
backend_sim: backend for quantum
Return: Prediction to use
"""
def quadratibot(nb_stick: int, past: list, backend_sim: Aer) -> list:
"""Quadratic + QAOA function
Args:
nb_stick: nb of stick left
past: past turn
backend_sim: backend for quantum
Return: Gates to use
"""
def get_quantum_solution_for(
quadprog: QuadraticProgram, quantumInstance: QuantumInstance, optimizer=None
):
_eval_count = 0
def callback(eval_count, parameters, mean, std):
nonlocal _eval_count
_eval_count = eval_count
# Create solver and optimizer
solver = QAOA(
optimizer=optimizer,
quantum_instance=quantumInstance,
callback=callback,
max_evals_grouped=3,
)
# Create optimizer for solver
optimizer = MinimumEigenOptimizer(solver)
# Get result from optimizer
result = optimizer.solve(quadprog)
return result, _eval_count
# Check number of stick max
if nb_stick >= 3:
max_stick = 3
else:
max_stick = nb_stick
# Check the past
poten_stick = nb_stick
for i in range(len(past)):
if past[i] == "/":
poten_stick += 0.5
if past[i] == "¬":
u = 1
if len(past) - 1 >= i + u:
while past[i + u] == "¬":
u += 1
if past[i + u] == "/":
poten_stick += 0.5
# Check last turn
last_st = 0
if past[0] == "¬":
u = 1
while past[0 + u] == "¬":
u += 1
if past[0 + u] == "/":
last_st = 0.5
if past[0] == "/":
last_st = 0.5
quadprog = QuadraticProgram(name="qnim")
quadprog.integer_var(name="x", lowerbound=0, upperbound=max_stick)
quadprog.integer_var(name="sup", lowerbound=0, upperbound=max_stick)
quadprog.integer_var(name="intric", lowerbound=0, upperbound=max_stick)
quadprog.maximize(
linear={"x": 1, "sup": 0.5, "intric": last_st},
quadratic={("sup", "intric"): 0.5},
)
# General constraints
quadprog.linear_constraint(
linear={"x": 1, "sup": 1, "intric": 1}, sense=">", rhs=0, name="gen_min"
)
quadprog.linear_constraint(
linear={"x": 1, "sup": 1, "intric": 1},
sense="<=",
rhs=max_stick,
name="gen_max",
)
# Mod4 constraints
if math.ceil(poten_stick % 4) - 0.5 > 0:
quadprog.linear_constraint(
linear={"x": 1, "sup": 1},
sense="<=",
rhs=math.ceil(poten_stick % 4),
name="qua_mod4",
)
if nb_stick % 4 - 1 > 0:
quadprog.linear_constraint(
linear={"x": 1, "sup": 1, "intric": 1},
sense="<=",
rhs=nb_stick % 4 - 1,
name="cla_mod4",
)
# Get QAOA result
final_result = []
simulator_instance = QuantumInstance(backend=backend_sim)
qaoa_result, qaoa_eval_count = get_quantum_solution_for(
quadprog, simulator_instance
)
# Format and print result
for cropHectares, cropName in zip(qaoa_result.x, qaoa_result.variable_names):
for i in range(int(cropHectares)):
final_result.append(cropName)
return final_result
def gronim(output: list, backend_sim: Aer) -> Tuple[Any, ...]:
"""Grover for best predict.
Args:
output: every possible prediction
backend_sim: backend for quantum
Return: best predict
"""
def diffuser(nqubits):
qc = QuantumCircuit(nqubits)
for qubit in range(nqubits):
qc.h(qubit)
for qubit in range(nqubits):
qc.x(qubit)
qc.h(nqubits - 1)
qc.mct(list(range(nqubits - 1)), nqubits - 1)
qc.h(nqubits - 1)
for qubit in range(nqubits):
qc.x(qubit)
for qubit in range(nqubits):
qc.h(qubit)
U_s = qc.to_gate()
U_s.name = "$Diff$"
return U_s
def ram(nqubits, lists_final):
list_qram = [i for i in range(nqubits)]
qram = QuantumRegister(nqubits, "qram")
qalgo = QuantumRegister(nqubits, "algo")
qc = QuantumCircuit(qram, qalgo)
control_h = MCMT("h", nqubits, 1).to_gate()
map_ram_2 = [["x", "x"], ["o", "x"], ["x", "o"], ["o", "o"]]
map_ram_3 = [
["x", "x", "x"],
["o", "x", "x"],
["x", "o", "x"],
["o", "o", "x"],
["x", "x", "o"],
["o", "x", "o"],
["x", "o", "o"],
["o", "o", "o"],
]
if len(bin(len(lists_final))[2:]) == 3:
map_ram = map_ram_3
if len(bin(len(lists_final))[2:]) == 2:
map_ram = map_ram_2
for i, m_ram in zip(range(len(lists_final)), map_ram):
# qc.barrier()
for index, gate in enumerate(m_ram):
if gate == "x":
qc.x(qram[index])
if lists_final[i][0] == "x" or lists_final[i][0] == "sup":
qc.mcx(qram, qalgo[0])
else:
qc.append(control_h, [*list_qram, qalgo[0]])
if len(lists_final[i]) == 3:
if lists_final[i][1] == "x":
qc.mcx(qram, qalgo[1])
elif lists_final[i][1] == "intric":
qc.mcx([qram[0], qram[1], qram[2], qalgo[0]], qalgo[1])
else:
qc.append(control_h, [*list_qram, qalgo[1]])
if lists_final[i][-1] == "x":
qc.mcx(qram, qalgo[-1])
elif lists_final[i][-1] == "intric":
if len(lists_final[i]) == 3:
qc.mcx([qram[0], qram[1], qram[2], qalgo[1]], qalgo[-1])
else:
qc.mcx([qram[0], qram[1], qalgo[0]], qalgo[-1])
else:
qc.append(control_h, [*list_qram, qalgo[-1]])
for index, gate in enumerate(m_ram):
if gate == "x":
qc.x(qram[index])
# print(qc.draw())
U_s = qc.to_gate()
U_s.name = "$Qram$"
return U_s
def algo(nqubits):
qc = QuantumCircuit(nqubits)
qc.h(0)
qc.x(0)
U_s = qc.to_gate()
U_s.name = "$Algo$"
return U_s
lists_final = []
lists_full = list(itertools.permutations(output, len(output)))
for u in lists_full:
if u not in lists_final:
lists_final.append(u)
len_qram = len(bin(len(lists_final))[2:])
qram = QuantumRegister(len_qram, "qram")
qalgo = QuantumRegister(len_qram, "algo")
oracle = QuantumRegister(1, "oracle")
c = ClassicalRegister(len_qram, "measurement")
qc = QuantumCircuit(qram, qalgo, oracle, c)
# Init
qc.h(qram)
qc.x(oracle)
qc.h(oracle)
qc.barrier()
# Qram
qc.append(ram(len_qram, lists_final), [*[i for i in range(len_qram * 2)]])
qc.barrier()
# Algorithm
qc.append(algo(len_qram), [*[i for i in range(len_qram, len_qram * 2)]])
qc.barrier()
# Oracle
qc.mcx([qalgo[0], qalgo[-1]], oracle)
qc.barrier()
# Revert Algo + Qram
qc.append(
algo(len_qram).inverse(), [*[i for i in range(len_qram, len_qram * 2)]]
)
qc.append(
ram(len_qram, lists_final).inverse(), [*[i for i in range(len_qram * 2)]]
)
qc.barrier()
# Diffuser
qc.append(diffuser(len_qram), [*[i for i in range(len_qram)]])
# Measure of the outputs
qc.barrier()
qc.measure(qram, c)
job = execute(qc, backend_sim, shots=512, memory=True)
result_job = job.result()
result_count = result_job.get_counts()
result_memory = job.result().get_memory()
if len(result_count) == 1:
final_result = int(result_memory[0], 2)
else:
final_result = max(result_count, key=result_count.get)
final_result = int(final_result, 2)
to_return = lists_final[final_result]
return to_return
gates = quadratibot(nb_stick, past, backend_sim)
if len(gates) < 2:
predict = gates
elif len(set(gates)) != len(gates):
predict = gates
else:
predict = gronim(gates, backend_sim)
return predict
| 5,346,888 |
def test_generate_fn():
"""Test generating filename."""
assert generate_fn(var1="this", var2="that", num=7, zero=0) == "marsdata_var1_this-var2_that-num_7-zero_0.txt"
| 5,346,889 |
def construct_model_cnn_gram(num_classes, input_shape):
"""
construct model architecture
:param num_classes: number of output classes of the model [int]
:return: model - Keras model object
"""
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPool2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(num_classes, activation='softmax'))
return model
| 5,346,890 |
def test_failed_forecasting_invalid_horizon(app, clean_redis, setup_test_data):
""" This one (as well as the fallback) should fail as the horizon is invalid."""
solar_device1: Asset = Asset.query.filter_by(name="solar-asset-1").one_or_none()
create_forecasting_jobs(
timed_value_type="Power",
start_of_roll=as_server_time(datetime(2015, 1, 1, 21)),
end_of_roll=as_server_time(datetime(2015, 1, 1, 23)),
horizons=[timedelta(hours=18)],
asset_id=solar_device1.id,
custom_model_params=custom_model_params(),
)
work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception)
check_failures(app.queues["forecasting"], 2 * ["InvalidHorizonException"])
| 5,346,891 |
def get_basic_data(match_info, event):
"""input: dictionary | output: dictionary updated"""
match_info['status'] = event['status']['type']['name']
match_info['start_time'] = event['date']
match_info['match_id'] = event['id']
match_info['time'] = event['status']['displayClock']
match_info['period'] = event['status']['period']
match_info['display_period'] = give_display_period(match_info['period'])
match_info['detail'] = event['status']['type']['detail']
match_info['match_type_id'] = event['status']['type']['id']
return match_info
| 5,346,892 |
def run():
"""
Main method run for Cloudkeeper-OS
"""
configuration.configure()
server.serve()
| 5,346,893 |
def handle_solution(f, problem_id, user, lang):
"""
When user uploads the solution, this function takes care of it.
It runs the grader, saves the running time and output and saves
the submission info to database.
:param f: submission file (program)
:param problem_id: the id of the problem user submitted solution to
:param user: user id of user that made submission
:param lang: what language did user use
:return grade: grade that user got in this submission
:return submission.grade: best grade user got on this problem
:return error: -1 if no errors, otherwise output of agrader.sh
"""
# Get directory where files are stored.
directory = os.popen('echo $CG_FILES_UPLOADED').read().strip()
# Create directory where user's problem submission stuff will get stored.
problem_dir = "{0}/{1}/{2}".format(directory, user, problem_id)
try:
os.mkdir(problem_dir)
except OSError:
pass # directory already exists
# Write the submission file to previously created directory, rename it.
_, end = f.name.split('.')
f_local = '{2}_{0}.{1}'.format(user, end, problem_id)
with open('{0}/{1}'.format(problem_dir, f_local), 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
# Grade the task using agrader.sh. First compile the program if necessary and then copy
# files to docker container. Then run the program and check the grade
# with grader.
runner_ret_val = -1
grade = -1
error = -1
compiler_output = \
subprocess.check_output('bash problem/grader/compile_and_copy.sh {0} {1} {2}'.format(f_local, problem_id, user),
shell=True).split('\n')[-2]
if compiler_output == 'OK':
if end == 'py':
if lang == 'Python 2':
runner_ret_val = subprocess.call('bash problem/grader/run_py.sh {0} {1}'.format(user, problem_id),
shell=True)
elif lang == 'Python 3':
runner_ret_val = subprocess.call('bash problem/grader/run_py3.sh {0} {1}'.format(user, problem_id),
shell=True)
elif end == 'java':
runner_ret_val = subprocess.call('bash problem/grader/run_java.sh {0} {1}'.format(user, problem_id),
shell=True)
elif end == 'cs':
runner_ret_val = subprocess.call('bash problem/grader/run_cs.sh {0} {1}'.format(user, problem_id),
shell=True)
else:
runner_ret_val = subprocess.call('bash problem/grader/run_c.sh {0} {1}'.format(user, problem_id), shell=True)
if runner_ret_val == 0:
grader_out = subprocess.check_output('bash problem/grader/grade.sh {0} {1}'.format(user, problem_id),
shell=True).split('\n')[-2]
try:
grade = int(grader_out)
except ValueError:
grade = -1
error = grader_out
else:
error = "RTE"
else:
error = compiler_output
# Add submission
user = User.objects.get(username=user)
today = date.today()
today_str = '{0}-{1}-{2}'.format(today.year, today.month, today.day)
try: # if user has already submitted solution for this problem before
submission = Submission.objects.get(user=user.id, problem=problem_id)
submission.tries += 1
if grade > submission.grade:
submission.grade = grade
submission.date = today_str
# Save newer solution with same points.
if grade >= submission.grade:
os.system('bash problem/grader/move_output.sh {0} {1} {2}'.format(user.username, problem_id, 1))
else:
os.system('bash problem/grader/move_output.sh {0} {1} {2}'.format(user.username, problem_id, 0))
except ObjectDoesNotExist: # this is user's first submission
submission = Submission()
submission.user_id = user.id
submission.problem_id = problem_id
submission.grade = grade
submission.date = today_str
submission.tries = 1
os.system('bash problem/grader/move_output.sh {0} {1} {2}'.format(user.username, problem_id, 1))
finally: # at the end we need to update some data about best submissions
if grade == 10 and submission.tries_until_correct == 0:
submission.tries_until_correct = submission.tries
# Update number of people that solved this problem.
problem = Problem.objects.get(pk=problem_id)
if problem.solved_by_how_many == 0:
problem.first_solved_by = user
problem.first_solved_on = today_str
problem.solved_by_how_many += 1
problem.last_successful_try = today_str
submission.save()
return grade, submission.grade, error
| 5,346,894 |
def convert_blockgrad(node, **kwargs):
""" Skip operator """
return create_basic_op_node('Identity', node, kwargs)
| 5,346,895 |
def is_complete(node):
"""
all children of a sum node have same scope as the parent
"""
assert node is not None
for sum_node in reversed(get_nodes_by_type(node, Sum)):
nscope = set(sum_node.scope)
if len(sum_node.children) == 0:
return False, "Sum node %s has no children" % sum_node.id
for child in sum_node.children:
if nscope != set(child.scope):
return False, "children of (sum) node %s do not have the same scope as parent" % sum_node.id
return True, None
| 5,346,896 |
def step_inplace(Ts, ae, target, weight, depth, intrinsics, lm=.0001, ep=10.0):
""" dense gauss newton update with computing similiarity matrix """
pts = pops.inv_project(depth, intrinsics)
pts = pts.permute(0,3,1,2).contiguous()
# tensor representation of SE3
se3 = Ts.data.permute(0,3,1,2).contiguous()
ae = ae / 8.0
# build the linear system
H, b = SE3BuilderInplace.apply(se3, ae, pts, target, weight, intrinsics)
I = torch.eye(6, device=H.device)[...,None,None]
H = H + (lm*H + ep) * I # damping
dx = SE3Solver.apply(H, b)
dx = dx.permute(0,3,4,1,2).squeeze(-1).contiguous()
Ts = SE3.exp(dx) * Ts
return Ts
| 5,346,897 |
def _compute_extent_axis(axis_range, grid_steps):
"""Compute extent for matplotlib.pyplot.imshow() along one axis.
:param axis_range: 1D numpy float array with 2 elements; axis range for plotting
:param grid_steps: positive integer, number of grid steps in each dimension
:return: 1D numpy float array with 2 elements
"""
delta = (axis_range[1] - axis_range[0]) / (2.0 * (grid_steps - 1))
# the range is covered by grid_steps - 1 pixels with one half of a pixel overlapping on each side; delta is half the
# pixel width
return np.array([axis_range[0] - delta, axis_range[1] + delta])
| 5,346,898 |
def normalize_path(filepath, expand_vars=False):
""" Fully normalizes a given filepath to an absolute path.
:param str filepath: The filepath to normalize
:param bool expand_vars: Expands embedded environment variables if True
:returns: The fully noralized filepath
:rtype: str
"""
filepath = str(pathlib.Path(filepath).expanduser().resolve())
if expand_vars:
filepath = os.path.expandvars(filepath)
return filepath
| 5,346,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.