content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import os
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import time
from subprocess import Popen, PIPE
import numpy as np
import pickle
N = 20
def fail_if_error(error):
if error:
print(error)
raise AssertionError()
def remove_min_max(x):
trimmed = x[:]
trimmed.remove(max(trimmed))
trimmed.remove(min(trimmed))
return trimmed
def extract_runtime(output):
ind = output.find(b'Execution time: ')
stop_ind = output.find(b' seconds', ind)
return float(output[ind + 16:stop_ind])
def run_benchmark(cmd):
results = []
for _ in range(N):
p = Popen(cmd, stdin=None, stdout=PIPE, stderr=PIPE)
output, _ = p.communicate()
#fail_if_error(error)
results.append(extract_runtime(output))
print('Got the following runtimes:', results)
results = remove_min_max(results)
re = 1000 * np.array(results)
return np.mean(re), np.std(re)
def fuse_cmd(command, cid, dim):
cmd = command + [str(x) for x in dim]
print('Running command: ', cmd)
return cmd
def run_benchmark_all_dims(command, cid, dims):
return [run_benchmark(fuse_cmd(command, cid, dim)) for dim in dims]
# The benchmarks.
benchmarks = {
'gd1': [],
'gd2': []
}
results = benchmarks.copy()
name = ['base-native', 'base-byte', 'mirage-base',
'owl-native', 'owl-byte', 'mirage-owl', 'js']
# Commands for running the benchmarks.
for b in benchmarks.keys():
benchmarks[b].append(([b + '/' + 'gd_base.exe'], 0))
benchmarks[b].append(([b + '/' + 'gd_base.bc'], 1))
benchmarks[b].append(([b + '_mirage/' + 'gd_base.native'], 2))
benchmarks[b].append(([b + '/' + 'gd_owl.exe'], 3))
benchmarks[b].append(([b + '/' + 'gd_owl.bc'], 4))
benchmarks[b].append(([b + '_mirage/' + 'gd_owl.native'], 5))
benchmarks[b].append((['node', b + '/' + 'gd_base.js'], 6))
for fig_id, b in enumerate(benchmarks.keys()):
re = [(0, 0)] * len(name)
for command, colour_id in benchmarks[b]:
re[colour_id] = run_benchmark(command)
results[b] = zip(*re)
re_str = "Results:\n"
for b in benchmarks.keys():
mu, std = results[b]
re_str += (b + '\n')
re_str += '\t'.join(name)
re_str += '\n'
re_str += '\t\t'.join(('%.2f' % x) for x in mu)
re_str += '\n'
re_str += '\t\t'.join(('%.2f' % x) for x in std)
re_str += '\n'
results_dir = 'results/' + time.strftime('%d-%m-%Y--%H:%M:%S') + '/'
os.makedirs(results_dir)
pickle.dump(results, open(results_dir + "/results.p", "wb"))
#with open(results_dir + 'results.txt', 'w+') as f:
# f.write(re_str)
font=18
params = {'legend.fontsize': font,
'figure.figsize': (10, 6),
'axes.labelsize': font-2,
'axes.titlesize': font,
'xtick.labelsize':font,
'ytick.labelsize':font}
pylab.rcParams.update(params)
bar_mu = [([0],[0])]*(len(name))
bar_std = [([0],[0])]*(len(name))
blen = len(benchmarks.keys())
mus = [0] * blen
stds = [0] * blen
for i, b in enumerate(benchmarks.keys()):
mus[i], stds[i] = results[b]
bar_mu = zip(*mus)
bar_std = zip(*stds)
n_groups = 2
bar_width = 0.11
fig, ax = plt.subplots()
index = np.arange(n_groups)
opacity = 0.9
error_config = {'ecolor': '0.3'}
for i, n in enumerate(name):
ax.bar(index + i * bar_width, bar_mu[i], bar_width,
alpha=opacity,
yerr=bar_std[i], error_kw=error_config,
label=name[i])
ax.set_xlabel('Use Gradient Descent to find $argmin(f)$')
ax.set_ylabel('Time (ms)')
ax.set_xticks(index + bar_width * 2.3)
ax.set_xticklabels(('$f(x) = sin(x)$', '$f(x)=x^3 - 2x^2 +2$'))
ax.legend()
fig.tight_layout()
plt.savefig(results_dir + 'numcmp.pdf', format='pdf')
plt.close()
|
python
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
from .validators import validate_file_size
# Create your models here.
#this models.py file means realtional table or schema in the databse and title, content etc are fields in the database table just like in sql.
class Post(models.Model):
title = models.CharField(max_length = 120)
content = models.TextField()
date_posted = models.DateTimeField(default = timezone.now) #this will show on which date the post was created.
author = models.ForeignKey(User, on_delete=models.CASCADE) #this means if user got deleted then the authr will get deleted but not vice versa.
image = models.ImageField( default="default1.jpg", upload_to = "profile_pics", blank =True,null =True) #and this allow images to get uploaded in the post and upload_to means in which folder all the files have been saved.
likes = models.ManyToManyField(User, related_name='likes', blank=True) #it means that one post can have many likes and many posts can have many likes
video = models.FileField(upload_to='profile_vids', blank=True, null=True, validators=[validate_file_size]) # this is used for uploading video or documents likep pdf in the post.
restrict_comments = models.BooleanField(default=False) #this means that we want posts to allow comments on it or not.
favorite = models.ManyToManyField(User, related_name="favorite", blank=True) #same as likes
def __str__(self):
return self.title
def total_likes(self):
return self.likes.count()
#this function means in post-detail template where we want our users to redirect after clicking on post's title in the website.
def get_absolute_url(self):
return reverse('blog:post-detail', kwargs={'pk' : self.pk})
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE) #this means that if post is deleted then all comments will get deleted also but if comments got deleted then post wil not get deleted obviously.
user = models.ForeignKey(User, on_delete=models.CASCADE) # same as above if user got deleted then all comments will get deleted but not vive versa.
comment = models.TextField(max_length=160)
reply = models.ForeignKey('Comment', null = True, related_name='replies', on_delete=models.CASCADE,blank =True)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{}-{}'.format(self.post.title, str(self.user.username))
|
python
|
import pathlib
from typing import Callable, Union
import click
__version__ = "21.5.16"
APP_NAME = "cldb"
app_dir = pathlib.Path(click.get_app_dir(APP_NAME, roaming=False, force_posix=True))
cache_root = app_dir / "cache"
config = {
"bs_features": "lxml",
}
from . import models # noqa: E402
SpecFetcher = Callable[[str, str], Union[models.Lens, models.Camera]]
|
python
|
#!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import numbers
import struct
from histogrammar.defs import Container, Factory, identity, JsonFormatException, ContainerException
from histogrammar.util import n_dim, datatype, serializable, inheritdoc, maybeAdd, floatToJson, hasKeys, numeq, \
xrange, long, basestring
from histogrammar.primitives.count import Count
class Bin(Factory, Container):
"""Split a quantity into equally spaced bins between a low and high threshold and fill exactly one bin per datum.
When composed with :doc:`Count <histogrammar.primitives.count.Count>`, this produces a standard histogram:
::
Bin.ing(100, 0, 10, fill_x, Count.ing())
and when nested, it produces a two-dimensional histogram:
::
Bin.ing(100, 0, 10, fill_x,
Bin.ing(100, 0, 10, fill_y, Count.ing()))
Combining with [Deviate](#deviate-mean-and-variance) produces a physicist's "profile plot:"
::
Bin.ing(100, 0, 10, fill_x, Deviate.ing(fill_y))
and so on.
"""
@staticmethod
def ed(low, high, entries, values, underflow, overflow, nanflow):
"""Create a Bin that is only capable of being added.
Parameters:
low (float): the minimum-value edge of the first bin.
high (float): the maximum-value edge of the last bin; must be strictly greater than `low`.
entries (float): the number of entries.
values (list of :doc:`Container <histogrammar.defs.Container>`): the filled sub-aggregators,
one for each bin.
underflow (:doc:`Container <histogrammar.defs.Container>`): the filled underflow bin.
overflow (:doc:`Container <histogrammar.defs.Container>`): the filled overflow bin.
nanflow (:doc:`Container <histogrammar.defs.Container>`): is the filled nanflow bin.
"""
if not isinstance(low, numbers.Real) and entries not in ("nan", "inf", "-inf"):
raise TypeError("low ({0}) must be a number".format(low))
if not isinstance(high, numbers.Real) and entries not in ("nan", "inf", "-inf"):
raise TypeError("high ({0}) must be a number".format(high))
if not isinstance(entries, numbers.Real) and entries not in ("nan", "inf", "-inf"):
raise TypeError("entries ({0}) must be a number".format(entries))
if not isinstance(values, (list, tuple)) and not all(isinstance(v, Container) for v in values):
raise TypeError("values ({0}) must be a list of Containers".format(values))
if not isinstance(underflow, Container):
raise TypeError("underflow ({0}) must be a Container".format(underflow))
if not isinstance(overflow, Container):
raise TypeError("overflow ({0}) must be a Container".format(overflow))
if not isinstance(nanflow, Container):
raise TypeError("nanflow ({0}) must be a Container".format(nanflow))
if low >= high:
raise ValueError("low ({0}) must be less than high ({1})".format(low, high))
if entries < 0.0:
raise ValueError("entries ({0}) cannot be negative".format(entries))
if len(values) < 1:
raise ValueError("values ({0}) must have at least one element".format(values))
out = Bin(len(values), float(low), float(high), None, None, underflow, overflow, nanflow)
out.entries = float(entries)
out.values = values
out.contentType = values[0].name
return out.specialize()
@staticmethod
def ing(num, low, high, quantity, value=Count(), underflow=Count(), overflow=Count(), nanflow=Count()):
"""Synonym for ``__init__``."""
return Bin(num, low, high, quantity, value, underflow, overflow, nanflow)
def __init__(self, num, low, high, quantity=identity, value=Count(),
underflow=Count(), overflow=Count(), nanflow=Count()):
"""Create a Bin that is capable of being filled and added.
Parameters:
num (int): the number of bins; must be at least one.
low (float): the minimum-value edge of the first bin.
high (float): the maximum-value edge of the last bin; must be strictly greater than `low`.
quantity (function returning float or string): function that computes the quantity of interest from
the data. pass on all values by default. If a string is given, quantity is set to identity(string),
in which case that column is picked up from a pandas df.
value (:doc:`Container <histogrammar.defs.Container>`): generates sub-aggregators to put in each bin.
underflow (:doc:`Container <histogrammar.defs.Container>`): a sub-aggregator to use for data whose quantity
is less than `low`.
overflow (:doc:`Container <histogrammar.defs.Container>`): a sub-aggregator to use for data whose quantity
is greater than or equal to `high`.
nanflow (:doc:`Container <histogrammar.defs.Container>`): a sub-aggregator to use for data whose quantity
is NaN.
Other parameters:
entries (float): the number of entries, initially 0.0.
values (list of :doc:`Container <histogrammar.defs.Container>`): the sub-aggregators in each bin.
"""
if not isinstance(num, (int, long)):
raise TypeError("num ({0}) must be an integer".format(num))
if not isinstance(low, numbers.Real):
raise TypeError("low ({0}) must be a number".format(low))
if not isinstance(high, numbers.Real):
raise TypeError("high ({0}) must be a number".format(high))
if value is not None and not isinstance(value, Container):
raise TypeError("value ({0}) must be a Container".format(value))
if not isinstance(underflow, Container):
raise TypeError("underflow ({0}) must be a Container".format(underflow))
if not isinstance(overflow, Container):
raise TypeError("overflow ({0}) must be a Container".format(overflow))
if not isinstance(nanflow, Container):
raise TypeError("nanflow ({0}) must be a Container".format(nanflow))
if num < 1:
raise ValueError("num ({0}) must be least one".format(num))
if low >= high:
raise ValueError("low ({0}) must be less than high ({1})".format(low, high))
self.entries = 0.0
self.low = float(low)
self.high = float(high)
self.quantity = serializable(identity(quantity) if isinstance(quantity, str) else quantity)
if value is None:
self.values = [None] * num
self.contentType = "Count"
else:
self.values = [value.zero() for i in xrange(num)]
self.contentType = value.name
self.underflow = underflow.copy()
self.overflow = overflow.copy()
self.nanflow = nanflow.copy()
super(Bin, self).__init__()
self.specialize()
def ascii(self):
"""Prints ascii histogram, for debuging on headless machines"""
underflow = self.underflow.entries
overflow = self.overflow.entries
nanflow = self.nanflow.entries
values = [underflow] + [x.entries for x in self.values] + [overflow, nanflow]
minimum = min(values)
maximum = max(values)
# Map values to number of dots representing them (maximum is 63)
mintomax = maximum - minimum
if mintomax == 0.0:
mintomax = 1.0
prop = 62.0 / mintomax
length = len(self.values)
dots = [None] * length
i = 0
while i < length:
dots[i] = int(round((values[i] - minimum)*prop))
i += 1
# Get range of values corresponding to each bin
ranges = ["underflow"] + [None] * (length - 3) + ["overflow", "nanflow"]
i = 1
while i < (length - 2):
ranges[i] = "[" + str(self.range(i))[1:]
i += 1
printedValues = ["{0:<.4g}".format(v) for v in values]
printedValuesWidth = max(len(x) for x in printedValues)
formatter = "{0:<14} {1:<%s} {2:<65}" % printedValuesWidth
print(" " * printedValuesWidth + "{0:>16}{1:>65}".format(minimum, maximum))
print(" " * (16 + printedValuesWidth) + "+" + "-" * 62 + "+")
i = 0
while i < length:
print(formatter.format(ranges[i], printedValues[i], "|" + "*" * dots[i] + " " * (62 - dots[i]) + "|"))
i += 1
print(" " * (16 + printedValuesWidth) + "+" + "-" * 62 + "+")
def histogram(self):
"""Return a plain histogram by converting all sub-aggregator values into Counts"""
out = Bin(len(self.values), self.low, self.high, self.quantity, None,
self.underflow.copy(), self.overflow.copy(), self.nanflow.copy())
out.entries = float(self.entries)
for i, v in enumerate(self.values):
out.values[i] = Count.ed(v.entries)
return out.specialize()
@inheritdoc(Container)
def zero(self):
return Bin(len(self.values), self.low, self.high, self.quantity, self.values[0].zero(), self.underflow.zero(),
self.overflow.zero(), self.nanflow.zero())
@inheritdoc(Container)
def __add__(self, other):
if isinstance(other, Bin):
if self.low != other.low:
raise ContainerException("cannot add Bins because low differs ({0} vs {1})".format(self.low, other.low))
if self.high != other.high:
raise ContainerException(
"cannot add Bins because high differs ({0} vs {1})".format(
self.high, other.high))
if len(self.values) != len(other.values):
raise ContainerException("cannot add Bins because nubmer of values differs ({0} vs {1})".format(
len(self.values), len(other.values)))
if len(self.values) == 0:
raise ContainerException("cannot add Bins because number of values is zero")
out = Bin(len(self.values),
self.low,
self.high,
self.quantity,
self.values[0],
self.underflow + other.underflow,
self.overflow + other.overflow,
self.nanflow + other.nanflow)
out.entries = self.entries + other.entries
out.values = [x + y for x, y in zip(self.values, other.values)]
return out.specialize()
else:
raise ContainerException("cannot add {0} and {1}".format(self.name, other.name))
@inheritdoc(Container)
def __iadd__(self, other):
if isinstance(other, Bin):
if self.low != other.low:
raise ContainerException("cannot add Bins because low differs ({0} vs {1})".format(self.low, other.low))
if self.high != other.high:
raise ContainerException(
"cannot add Bins because high differs ({0} vs {1})".format(
self.high, other.high))
if len(self.values) != len(other.values):
raise ContainerException("cannot add Bins because nubmer of values differs ({0} vs {1})".format(
len(self.values), len(other.values)))
if len(self.values) == 0:
raise ContainerException("cannot add Bins because number of values is zero")
self.entries += other.entries
for x, y in zip(self.values, other.values):
x += y
self.underflow += other.underflow
self.overflow += other.overflow
self.nanflow += other.nanflow
return self
else:
raise ContainerException("cannot add {0} and {1}".format(self.name, other.name))
@inheritdoc(Container)
def __mul__(self, factor):
if math.isnan(factor) or factor <= 0.0:
return self.zero()
else:
out = self.zero()
out.entries = factor * self.entries
for i, v in enumerate(self.values):
out.values[i] = v * factor
out.overflow = self.overflow * factor
out.underflow = self.underflow * factor
out.nanflow = self.nanflow * factor
return out.specialize()
@inheritdoc(Container)
def __rmul__(self, factor):
return self.__mul__(factor)
@property
def num(self):
"""Number of bins."""
return len(self.values)
def bin(self, x):
"""Find the bin index associated with numerical value ``x``.
@return -1 if ``x`` is out of range; the bin index otherwise.
"""
if self.under(x) or self.over(x) or self.nan(x):
return -1
else:
return int(math.floor(self.num * (x - self.low) / (self.high - self.low)))
def under(self, x):
"""Return ``true`` iff ``x`` is in the underflow region (less than ``low``)."""
return not math.isnan(x) and x < self.low
def over(self, x):
"""Return ``true`` iff ``x`` is in the overflow region (greater than ``high``)."""
return not math.isnan(x) and x >= self.high
def nan(self, x):
"""Return ``true`` iff ``x`` is in the nanflow region (equal to ``NaN``)."""
return math.isnan(x)
@property
def indexes(self):
"""Get a sequence of valid indexes."""
return range(self.num)
def range(self, index):
"""Get the low and high edge of a bin (given by index number)."""
return ((self.high - self.low) * index / self.num + self.low,
(self.high - self.low) * (index + 1) / self.num + self.low)
@inheritdoc(Container)
def fill(self, datum, weight=1.0):
self._checkForCrossReferences()
if weight > 0.0:
q = self.quantity(datum)
if not isinstance(q, numbers.Real):
raise TypeError("function return value ({0}) must be boolean or number".format(q))
if self.under(q):
self.underflow.fill(datum, weight)
elif self.over(q):
self.overflow.fill(datum, weight)
elif self.nan(q):
self.nanflow.fill(datum, weight)
else:
self.values[self.bin(q)].fill(datum, weight)
# no possibility of exception from here on out (for rollback)
self.entries += weight
def _cppGenerateCode(self, parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes,
derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix,
fillIndent, weightVars, weightVarStack, tmpVarTypes):
return self._c99GenerateCode(parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes,
derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode,
fillPrefix, fillIndent, weightVars, weightVarStack, tmpVarTypes)
def _c99GenerateCode(self, parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes,
derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix,
fillIndent, weightVars, weightVarStack, tmpVarTypes):
normexpr = self._c99QuantityExpr(
parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
None)
initCode.append(" " * initIndent + self._c99ExpandPrefix(*initPrefix) + ".entries = 0.0;")
fillCode.append(" " * fillIndent + self._c99ExpandPrefix(*fillPrefix) +
".entries += " + weightVarStack[-1] + ";")
fillCode.append(" " * fillIndent + "if (std::isnan({0})) {{".format(normexpr))
self.nanflow._c99GenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"nanflow"),
),
initIndent,
fillCode,
fillPrefix + (("var",
"nanflow"),
),
fillIndent + 2,
weightVars,
weightVarStack,
tmpVarTypes)
fillCode.append(" " * fillIndent + "}")
fillCode.append(" " * fillIndent + "else if ({0} < {1}) {{".format(normexpr, self.low))
self.underflow._c99GenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"underflow"),
),
initIndent,
fillCode,
fillPrefix + (("var",
"underflow"),
),
fillIndent + 2,
weightVars,
weightVarStack,
tmpVarTypes)
fillCode.append(" " * fillIndent + "}")
fillCode.append(" " * fillIndent + "else if ({0} >= {1}) {{".format(normexpr, self.high))
self.overflow._c99GenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"overflow"),
),
initIndent,
fillCode,
fillPrefix + (("var",
"overflow"),
),
fillIndent + 2,
weightVars,
weightVarStack,
tmpVarTypes)
fillCode.append(" " * fillIndent + "}")
fillCode.append(" " * fillIndent + "else {")
bin = "bin_" + str(len(tmpVarTypes))
tmpVarTypes[bin] = "int"
initCode.append(" " * initIndent + "for ({0} = 0; {0} < {1}; ++{0}) {{".format(bin, len(self.values)))
fillCode.append(" " * (fillIndent + 2) +
"{0} = floor(({1} - {2}) * {3});".format(bin, normexpr, self.low,
len(self.values)/(self.high - self.low)))
self.values[0]._c99GenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"values"),
("index",
bin)),
initIndent + 2,
fillCode,
fillPrefix + (("var",
"values"),
("index",
bin)),
fillIndent + 2,
weightVars,
weightVarStack,
tmpVarTypes)
initCode.append(" " * initIndent + "}")
fillCode.append(" " * fillIndent + "}")
storageStructs[self._c99StructName()] = """
typedef struct {{
double entries;
{3} underflow;
{4} overflow;
{5} nanflow;
{1} values[{2}];
{1}& getValues(int i) {{ return values[i]; }}
}} {0};
""".format(self._c99StructName(), self.values[0]._c99StorageType(), len(self.values), self.underflow._c99StorageType(),
self.overflow._c99StorageType(), self.nanflow._c99StorageType())
def _clingUpdate(self, filler, *extractorPrefix):
obj = self._clingExpandPrefix(filler, *extractorPrefix)
self.entries += obj.entries
for i in xrange(len(self.values)):
self.values[i]._clingUpdate(obj, ("func", ["getValues", i]))
self.underflow._clingUpdate(obj, ("var", "underflow"))
self.overflow._clingUpdate(obj, ("var", "overflow"))
self.nanflow._clingUpdate(obj, ("var", "nanflow"))
def _c99StructName(self):
return "Bn" + str(len(self.values)) + self.values[0]._c99StructName(
) + self.underflow._c99StructName() + self.overflow._c99StructName() + self.nanflow._c99StructName()
def _cudaGenerateCode(self, parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes,
derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix,
fillIndent, combineCode, totalPrefix, itemPrefix, combineIndent, jsonCode, jsonPrefix,
jsonIndent, weightVars, weightVarStack, tmpVarTypes, suppressName):
normexpr = self._cudaQuantityExpr(
parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
None)
initCode.append(" " * initIndent + self._c99ExpandPrefix(*initPrefix) + ".entries = 0.0f;")
fillCode.append(" " * fillIndent +
"atomicAdd(&" + self._c99ExpandPrefix(*fillPrefix) + ".entries, " + weightVarStack[-1] + ");")
combineCode.append(
" " *
combineIndent +
"atomicAdd(&" +
self._c99ExpandPrefix(
*
totalPrefix) +
".entries, " +
self._c99ExpandPrefix(
*
itemPrefix) +
".entries);")
jsonCode.append(" " *
jsonIndent +
"fprintf(out, \"{\\\"low\\\": " +
str(self.low) +
", \\\"high\\\": " +
str(self.high) +
", \\\"entries\\\": \");")
jsonCode.append(" " * jsonIndent + "floatToJson(out, " + self._c99ExpandPrefix(*jsonPrefix) + ".entries);")
fillCode.append(" " * fillIndent + "if (isnan({0})) {{".format(normexpr))
jsonCode.append(
" " *
jsonIndent +
"fprintf(out, \", \\\"nanflow:type\\\": \\\"" +
self.nanflow.name +
"\\\"\");")
jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"nanflow\\\": \");")
self.nanflow._cudaGenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"nanflow"),
),
initIndent + 2,
fillCode,
fillPrefix + (("var",
"nanflow"),
),
fillIndent + 2,
combineCode,
totalPrefix + (("var",
"nanflow"),
),
itemPrefix + (("var",
"nanflow"),
),
combineIndent,
jsonCode,
jsonPrefix + (("var",
"nanflow"),
),
jsonIndent,
weightVars,
weightVarStack,
tmpVarTypes,
False)
fillCode.append(" " * fillIndent + "}")
fillCode.append(" " * fillIndent + "else if ({0} < {1}) {{".format(normexpr, self.low))
jsonCode.append(
" " *
jsonIndent +
"fprintf(out, \", \\\"underflow:type\\\": \\\"" +
self.underflow.name +
"\\\"\");")
jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"underflow\\\": \");")
self.underflow._cudaGenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"underflow"),
),
initIndent + 2,
fillCode,
fillPrefix + (("var",
"underflow"),
),
fillIndent + 2,
combineCode,
totalPrefix + (("var",
"underflow"),
),
itemPrefix + (("var",
"underflow"),
),
combineIndent,
jsonCode,
jsonPrefix + (("var",
"underflow"),
),
jsonIndent,
weightVars,
weightVarStack,
tmpVarTypes,
False)
fillCode.append(" " * fillIndent + "}")
fillCode.append(" " * fillIndent + "else if ({0} >= {1}) {{".format(normexpr, self.high))
jsonCode.append(
" " *
jsonIndent +
"fprintf(out, \", \\\"overflow:type\\\": \\\"" +
self.overflow.name +
"\\\"\");")
jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"overflow\\\": \");")
self.overflow._cudaGenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"overflow"),
),
initIndent + 2,
fillCode,
fillPrefix + (("var",
"overflow"),
),
fillIndent + 2,
combineCode,
totalPrefix + (("var",
"overflow"),
),
itemPrefix + (("var",
"overflow"),
),
combineIndent,
jsonCode,
jsonPrefix + (("var",
"overflow"),
),
jsonIndent,
weightVars,
weightVarStack,
tmpVarTypes,
False)
fillCode.append(" " * fillIndent + "}")
fillCode.append(" " * fillIndent + "else {")
bin = "bin_" + str(len(tmpVarTypes))
tmpVarTypes[bin] = "int"
initCode.append(" " * initIndent + "for ({0} = 0; {0} < {1}; ++{0}) {{".format(bin, len(self.values)))
fillCode.append(" " * (fillIndent + 2) +
"{0} = floor(({1} - {2}) * {3});".format(bin, normexpr, self.low,
len(self.values)/(self.high - self.low)))
combineCode.append(" " * combineIndent + "for ({0} = 0; {0} < {1}; ++{0}) {{".format(bin, len(self.values)))
jsonCode.append(
" " *
jsonIndent +
"fprintf(out, \", \\\"values:type\\\": \\\"" +
self.values[0].name +
"\\\"\");")
if hasattr(self.values[0], "quantity") and self.values[0].quantity.name is not None:
jsonCode.append(
" " *
jsonIndent +
"fprintf(out, \", \\\"values:name\\\": \\\"" +
self.values[0].quantity.name +
"\\\"\");")
jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"values\\\": [\");")
jsonCode.append(" " * jsonIndent + "for ({0} = 0; {0} < {1}; ++{0}) {{".format(bin, len(self.values)))
self.values[0]._cudaGenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"values"),
("index",
bin)),
initIndent + 2,
fillCode,
fillPrefix + (("var",
"values"),
("index",
bin)),
fillIndent + 2,
combineCode,
totalPrefix + (("var",
"values"),
("index",
bin)),
itemPrefix + (("var",
"values"),
("index",
bin)),
combineIndent + 2,
jsonCode,
jsonPrefix + (("var",
"values"),
("index",
bin)),
jsonIndent + 2,
weightVars,
weightVarStack,
tmpVarTypes,
True)
initCode.append(" " * initIndent + "}")
fillCode.append(" " * fillIndent + "}")
combineCode.append(" " * combineIndent + "}")
jsonCode.append(" " * jsonIndent + " if ({0} != {1})".format(bin, len(self.values) - 1))
jsonCode.append(" " * jsonIndent + " fprintf(out, \", \");")
jsonCode.append(" " * jsonIndent + "}")
if suppressName or self.quantity.name is None:
jsonCode.append(" " * jsonIndent + "fprintf(out, \"]}\");")
else:
jsonCode.append(" " * jsonIndent + "fprintf(out, \"], \\\"name\\\": " +
json.dumps(json.dumps(self.quantity.name))[1:-1] + "}\");")
storageStructs[self._c99StructName()] = """
typedef struct {{
float entries;
{3} underflow;
{4} overflow;
{5} nanflow;
{1} values[{2}];
}} {0};
""".format(self._c99StructName(), self.values[0]._cudaStorageType(), len(self.values),
self.underflow._cudaStorageType(), self.overflow._cudaStorageType(), self.nanflow._cudaStorageType())
def _cudaUnpackAndFill(self, data, bigendian, alignment):
format = "<f"
entries, = struct.unpack(format, data[:struct.calcsize(format)])
self.entries += entries
data = data[struct.calcsize(format):]
data = self.underflow._cudaUnpackAndFill(data, bigendian, alignment)
data = self.overflow._cudaUnpackAndFill(data, bigendian, alignment)
data = self.nanflow._cudaUnpackAndFill(data, bigendian, alignment)
for value in self.values:
data = value._cudaUnpackAndFill(data, bigendian, alignment)
return data
def _numpy(self, data, weights, shape):
q = self.quantity(data)
self._checkNPQuantity(q, shape)
self._checkNPWeights(weights, shape)
weights = self._makeNPWeights(weights, shape)
newentries = weights.sum()
import numpy
selection = numpy.isnan(q)
numpy.bitwise_not(selection, selection)
subweights = weights.copy()
subweights[selection] = 0.0
self.nanflow._numpy(data, subweights, shape)
# avoid nan warning in calculations by flinging the nans elsewhere
numpy.bitwise_not(selection, selection)
q = numpy.array(q, dtype=numpy.float64)
q[selection] = self.high
weights = weights.copy()
weights[selection] = 0.0
numpy.greater_equal(q, self.low, selection)
subweights[:] = weights
subweights[selection] = 0.0
self.underflow._numpy(data, subweights, shape)
numpy.less(q, self.high, selection)
subweights[:] = weights
subweights[selection] = 0.0
self.overflow._numpy(data, subweights, shape)
if all(isinstance(value, Count) and value.transform is identity for value in self.values) and numpy.all(
numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):
# Numpy defines histograms as including the upper edge of the last bin only, so drop that
weights[q == self.high] == 0.0
h, _ = numpy.histogram(q, self.num, (self.low, self.high), weights=weights)
for hi, value in zip(h, self.values):
value.fill(None, float(hi))
else:
q = numpy.array(q, dtype=numpy.float64)
numpy.subtract(q, self.low, q)
numpy.multiply(q, self.num, q)
numpy.divide(q, self.high - self.low, q)
numpy.floor(q, q)
q = numpy.array(q, dtype=int)
for index, value in enumerate(self.values):
numpy.not_equal(q, index, selection)
subweights[:] = weights
subweights[selection] = 0.0
value._numpy(data, subweights, shape)
# no possibility of exception from here on out (for rollback)
self.entries += float(newentries)
def _sparksql(self, jvm, converter):
return converter.Bin(len(self.values), self.low, self.high, self.quantity.asSparkSQL(),
self.values[0]._sparksql(jvm, converter), self.underflow._sparksql(jvm, converter),
self.overflow._sparksql(jvm, converter), self.nanflow._sparksql(jvm, converter))
@property
def children(self):
"""List of sub-aggregators, to make it possible to walk the tree."""
return [self.underflow, self.overflow, self.nanflow] + self.values
@inheritdoc(Container)
def toJsonFragment(self, suppressName):
if getattr(self.values[0], "quantity", None) is not None:
binsName = self.values[0].quantity.name
elif getattr(self.values[0], "quantityName", None) is not None:
binsName = self.values[0].quantityName
else:
binsName = None
return maybeAdd({
"low": floatToJson(self.low),
"high": floatToJson(self.high),
"entries": floatToJson(self.entries),
"values:type": self.values[0].name,
"values": [x.toJsonFragment(True) for x in self.values],
"underflow:type": self.underflow.name,
"underflow": self.underflow.toJsonFragment(False),
"overflow:type": self.overflow.name,
"overflow": self.overflow.toJsonFragment(False),
"nanflow:type": self.nanflow.name,
"nanflow": self.nanflow.toJsonFragment(False),
}, **{"name": None if suppressName else self.quantity.name,
"values:name": binsName})
@staticmethod
@inheritdoc(Factory)
def fromJsonFragment(json, nameFromParent):
if isinstance(json, dict) and hasKeys(json.keys(), ["low", "high", "entries", "values:type", "values",
"underflow:type", "underflow", "overflow:type", "overflow",
"nanflow:type", "nanflow"], ["name", "values:name"]):
if json["low"] in ("nan", "inf", "-inf") or isinstance(json["low"], numbers.Real):
low = float(json["low"])
else:
raise JsonFormatException(json, "Bin.low")
if json["high"] in ("nan", "inf", "-inf") or isinstance(json["high"], numbers.Real):
high = float(json["high"])
else:
raise JsonFormatException(json, "Bin.high")
if json["entries"] in ("nan", "inf", "-inf") or isinstance(json["entries"], numbers.Real):
entries = float(json["entries"])
else:
raise JsonFormatException(json, "Bin.entries")
if isinstance(json.get("name", None), basestring):
name = json["name"]
elif json.get("name", None) is None:
name = None
else:
raise JsonFormatException(json["name"], "Bin.name")
if isinstance(json["values:type"], basestring):
valuesFactory = Factory.registered[json["values:type"]]
else:
raise JsonFormatException(json, "Bin.values:type")
if isinstance(json.get("values:name", None), basestring):
valuesName = json["values:name"]
elif json.get("values:name", None) is None:
valuesName = None
else:
raise JsonFormatException(json["values:name"], "Bin.values:name")
if isinstance(json["values"], list):
values = [valuesFactory.fromJsonFragment(x, valuesName) for x in json["values"]]
else:
raise JsonFormatException(json, "Bin.values")
if isinstance(json["underflow:type"], basestring):
underflowFactory = Factory.registered[json["underflow:type"]]
else:
raise JsonFormatException(json, "Bin.underflow:type")
underflow = underflowFactory.fromJsonFragment(json["underflow"], None)
if isinstance(json["overflow:type"], basestring):
overflowFactory = Factory.registered[json["overflow:type"]]
else:
raise JsonFormatException(json, "Bin.overflow:type")
overflow = overflowFactory.fromJsonFragment(json["overflow"], None)
if isinstance(json["nanflow:type"], basestring):
nanflowFactory = Factory.registered[json["nanflow:type"]]
else:
raise JsonFormatException(json, "Bin.nanflow:type")
nanflow = nanflowFactory.fromJsonFragment(json["nanflow"], None)
out = Bin.ed(low, high, entries, values, underflow, overflow, nanflow)
out.quantity.name = nameFromParent if name is None else name
return out.specialize()
else:
raise JsonFormatException(json, "Bin")
def __repr__(self):
return "<Bin num={0} low={1} high={2} values={3} underflow={4} overflow={5} nanflow={6}>".format(
len(self.values), self.low, self.high, self.values[0].name, self.underflow.name, self.overflow.name,
self.nanflow.name)
def __eq__(self, other):
return isinstance(other, Bin) and numeq(self.low, other.low) and numeq(self.high, other.high) and \
self.quantity == other.quantity and numeq(self.entries, other.entries) and \
self.values == other.values and self.underflow == other.underflow and \
self.overflow == other.overflow and self.nanflow == other.nanflow
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.low, self.high, self.quantity, self.entries, tuple(
self.values), self.underflow, self.overflow, self.nanflow))
@property
def n_bins(self):
"""Get number of bins, consistent with SparselyBin and Categorize """
return self.num
def num_bins(self, low=None, high=None):
"""
Returns number of bins of a given (sub-)range
Possible to set range with low and high params
:param low: lower edge of range, default is None
:param high: higher edge of range, default is None
:returns: number of bins in range
:rtype: int
"""
import numpy as np
# trivial cases first
if low is None and high is None:
return len(self.values)
# catch weird cases
elif low is not None and high is not None:
if low > high:
raise RuntimeError('low {low} greater than high {high}'.format(low=low, high=high))
if low < self.low and high < self.low:
# note: all these data end up in the underflow bin, with no real index
return 0
if low >= self.high and high >= self.high:
# note: all these data end up in the overflow bin, with no real index
return 0
# lowest edge
if low is None or low < self.low:
low = self.low
else: # low >= self.low and low < self.high
minBin = self.bin(low)
low = self.low + self.bin_width() * minBin
# highest edge
if high is None or high >= self.high:
high = self.high
else: # high < self.high and high >= self.low
maxBin = self.bin(high)
if np.isclose(high, self.low + self.bin_width() * maxBin):
maxBin -= 1
high = self.low + self.bin_width() * (maxBin + 1)
# number of bins
num_bins = int((high - low) / self.bin_width())
return num_bins
def bin_width(self):
"""
Returns bin width
"""
return (self.high - self.low) / len(self.values)
def bin_entries(self, low=None, high=None, xvalues=[]):
"""
Returns bin values
Possible to set range with low and high params, and list of selected x-values
:param low: lower edge of range, default is None
:param high: higher edge of range, default is None
:param xvalues: list of x-values to get entries of, alternative to low and high
:returns: numpy array with numbers of entries for selected bins
:rtype: numpy.array
"""
import numpy as np
# trivial case
if low is None and high is None and len(xvalues) == 0:
return np.array([x.entries for x in self.values])
# catch weird cases
elif low is not None and high is not None and len(xvalues) == 0:
if low > high:
raise RuntimeError('low {low} greater than high {high}'.format(low=low, high=high))
if low < self.low and high < self.low:
# note: all these data end up in the underflow bin
return np.array([])
if low >= self.high and high >= self.high:
# note: all these data end up in the overflow bin
return np.array([])
# entries at request list of x-values
elif len(xvalues) > 0:
entries = [self.values[self.bin(x)].entries if self.bin(x) in self.indexes else 0.0 for x in xvalues]
return np.array(entries)
# lowest edge
if low is None or low < self.low:
minBin = 0
else: # low >= self.low and low < self.high
minBin = self.bin(low)
# highest edge
if high is None or high >= self.high:
maxBin = len(self.values) - 1
else: # high < self.high and high >= self.low
maxBin = self.bin(high)
if np.isclose(high, self.low + self.bin_width() * maxBin):
maxBin -= 1
return np.array([self.values[i].entries for i in range(minBin, maxBin + 1)])
def bin_edges(self, low=None, high=None):
"""
Returns bin edges
:param low: lower edge of range, default is None
:param high: higher edge of range, default is None
:returns: numpy array with bin edges for selected range
:rtype: numpy.array
"""
import numpy as np
num_bins = self.num_bins(low, high)
# trivial cases first
if low is None and high is None:
return np.linspace(self.low, self.high, num_bins + 1)
# catch weird cases
elif low is not None and high is not None:
if low > high:
raise RuntimeError('low {low} greater than high {high}'.format(low=low, high=high))
if low < self.low and high < self.low:
# note: all these data end up in the underflow bin
return np.linspace(self.low, self.low, num_bins + 1)
if low >= self.high and high >= self.high:
# note: all these data end up in the overflow bin
return np.linspace(self.high, self.high, num_bins + 1)
# lowest edge
if low is None or low < self.low:
low = self.low
else: # low >= self.low and low < self.high
minBin = self.bin(low)
low = self.low + self.bin_width() * minBin
# highest edge
if high is None or high >= self.high:
high = self.high
else: # high < self.high and high >= self.low
maxBin = self.bin(high)
if np.isclose(high, self.low + self.bin_width() * maxBin):
maxBin -= 1
high = self.low + self.bin_width() * (maxBin + 1)
edges = np.linspace(low, high, num_bins + 1)
return edges
def bin_centers(self, low=None, high=None):
"""
Returns bin centers
:param low: lower edge of range, default is None
:param high: higher edge of range, default is None
:returns: numpy array with bin centers for selected range
:rtype: numpy.array
"""
import numpy as np
# trivial case
if low is None and high is None:
return np.array([sum(self.range(i)) / 2.0 for i in self.indexes])
# catch weird cases
elif low is not None and high is not None:
if low > high:
raise RuntimeError('low {low} greater than high {high}'.format(low=low, high=high))
if low < self.low and high < self.low:
# note: all these data end up in the underflow bin
return np.array([])
if low >= self.high and high >= self.high:
# note: all these data end up in the overflow bin
return np.array([])
# lowest edge
if low is None or low < self.low:
minBin = 0
else: # low >= self.low and low < self.high
minBin = self.bin(low)
# highest edge
if high is None or high >= self.high:
maxBin = len(self.values) - 1
else: # high < self.high and high >= self.low
maxBin = self.bin(high)
if np.isclose(high, self.low + self.bin_width() * maxBin):
maxBin -= 1
return np.array([sum(self.range(i)) / 2.0 for i in range(minBin, maxBin + 1)])
def _center_from_key(self, idx):
xc = (idx + 0.5) * self.bin_width() + self.low
return xc
@property
def mpv(self):
"""Return bin-center of most probable value
"""
bin_entries = self.bin_entries()
bin_centers = self.bin_centers()
# if two max elements are equal, this will return the element with the lowest index.
max_idx = max(enumerate(bin_entries), key=lambda x: x[1])[0]
bc = bin_centers[max_idx]
return bc
# extra properties: number of dimensions and datatypes of sub-hists
Bin.n_dim = n_dim
Bin.datatype = datatype
# register extra methods such as plotting
Factory.register(Bin)
|
python
|
import time
from environment_server.actor_data import ActorData
import traceback
def replay_buffer_process(params, batch_sizes, batch_addresses, transition_queue, replay_lock):
try:
from replay_buffer import replay_client
import random
batch_data = [ActorData(params, b, a) for b, a in zip(batch_sizes, batch_addresses)]
consecutive_batches = params['Misc']['consecutive_batches']
num_actions = params['Agent57']['dual_heads']['num_actions'] - 1
for b, data in zip(batch_sizes, batch_data):
with data.lock:
for i in range(b):
with replay_lock:
episode_id, j = replay_client.request_episode_id(params)
while episode_id < 0 or j < 0:
time.sleep(1)
with replay_lock:
episode_id, j = replay_client.request_episode_id(params)
data.episode_ids[i] = episode_id
data.j[i] = j
data.actions[i] = random.randint(0, num_actions)
data.resets[:] = True
data.timer[:] = time.time()
data.status[:] = 1
while True:
for b, data in zip(batch_sizes, batch_data):
while data.status[0] != 0:
pass
with data.lock:
# print("Buffer")
total_time = time.time() - data.timer[0]
data.timer[0] = time.time()
print(f"\rActions per second {consecutive_batches / (total_time * len(batch_sizes))} ", end="")
transition_queue.put([
data.episode_ids.copy(),
data.steps.copy(),
data.extrinsic_rewards.copy(),
data.intrinsic_rewards.copy(),
data.prev_actions.copy(),
data.observations.copy(),
data.hidden.copy(),
data.mu.copy(),
data.q_value.copy(),
data.discounted_q.copy(),
data.resets.copy()
])
for i, reset in enumerate(data.resets):
if reset:
with replay_lock:
episode_id, j = replay_client.request_episode_id(params)
while episode_id < 0 or j < 0:
print(f"\rPausing while eps_id or j < 0: {episode_id} {j}", end="")
time.sleep(120)
with replay_lock:
episode_id, j = replay_client.request_episode_id(params)
data.episode_ids[i] = episode_id
data.j[i] = j
data.status[0] = 1
except Exception as e:
print(e)
print(traceback.print_exc(4))
def transition_upload_process(params, batch_sizes, batch_addresses, replay_lock, config=None):
try:
from replay_buffer import replay_client, database
import random
cm = database.ConnectionManager(config)
batch_data = [ActorData(params, b, a) for b, a in zip(batch_sizes, batch_addresses)]
consecutive_batches = params['Misc']['consecutive_batches']
num_actions = params['Agent57']['dual_heads']['num_actions'] - 1
for b, data in zip(batch_sizes, batch_data):
with data.lock:
for i in range(b):
with replay_lock:
episode_id, j = replay_client.request_episode_id(params)
while episode_id < 0 or j < 0:
time.sleep(1)
with replay_lock:
episode_id, j = replay_client.request_episode_id(params)
data.episode_ids[i] = episode_id
data.j[i] = j
data.actions[i] = random.randint(0, num_actions)
data.resets[:] = True
data.timer[:] = time.time()
data.status[:] = 1
while True:
for b, data in zip(batch_sizes, batch_data):
while data.status[0] != 0:
pass
with data.lock:
# print("Buffer")
total_time = time.time() - data.timer[0]
data.timer[0] = time.time()
print(f"\rActions per second {consecutive_batches / (total_time * len(batch_sizes))} ", end="")
transitions = []
resets = []
for episode_id, step, extrinsic_reward, intrinsic_reward, action, observation, hidden, mu, value, discounted, reset in zip(
data.episode_ids.copy(),
data.steps.copy(),
data.extrinsic_rewards.copy(),
data.intrinsic_rewards.copy(),
data.prev_actions.copy(),
data.observations.copy(),
data.hidden.copy(),
data.mu.copy(),
data.q_value.copy(),
data.discounted_q.copy(),
data.resets.copy()):
transitions.append([int(episode_id),
int(step),
float(extrinsic_reward),
float(intrinsic_reward),
int(action),
observation.tobytes(),
hidden.tobytes(),
float(mu),
float(value),
float(discounted)])
resets.append([episode_id, reset])
while len(transitions) > 0:
with replay_lock:
allowed_to_upload = replay_client.request_transition_upload(params, len(transitions))
if allowed_to_upload > 0:
packets_1024 = allowed_to_upload // 1024
final_packet = allowed_to_upload - (packets_1024 * 1024)
packets = [1024 for _ in range(packets_1024)]
if final_packet:
packets.append(final_packet)
for upload in packets:
uploading = transitions[:upload]
resetting = resets[:upload]
transitions = transitions[upload:]
resets = resets[upload:]
success = cm.upload_transitions(uploading)
if success:
for r in resetting:
if r[1]:
with replay_lock:
replay_client.signal_episode_end(params, int(r[0]))
else:
transitions += uploading
resets += resetting
for i, reset in enumerate(data.resets):
if reset:
with replay_lock:
episode_id, j = replay_client.request_episode_id(params)
while episode_id < 0 or j < 0:
print(f"\rPausing while eps_id or j < 0: {episode_id} {j}", end="")
time.sleep(120)
with replay_lock:
episode_id, j = replay_client.request_episode_id(params)
data.episode_ids[i] = episode_id
data.j[i] = j
data.status[0] = 1
except Exception as e:
print(e)
print(traceback.print_exc(4))
def environment_process(params, batch_sizes, batch_addresses):
try:
from environment_server.environment_wrapper import Environment
batch_data = [ActorData(params, b, a) for b, a in zip(batch_sizes, batch_addresses)]
frameskip = params['Misc']['frameskip'] if type(params['Misc']['frameskip']) == int else False
multi_envs = [[Environment(params['Misc']['environment'], params['Misc']['obs_type'], frameskip,
params['Misc']['max_episode_length'], i, data.actions, data.observations,
data.extrinsic_rewards, data.steps, data.resets, data.loss_of_life,
params['Misc']['reward_scale']) for i in range(b)] for b, data in
zip(batch_sizes, batch_data)]
while True:
for b, multi_env, data in zip(batch_sizes, multi_envs, batch_data):
while data.status[0] != 1:
pass
with data.lock:
# print("Env")
for env in multi_env:
env()
data.prev_actions[:] = data.actions
data.status[0] = 2
except Exception as e:
print(e)
print(traceback.print_exc(4))
# environment_process(params, batch_sizes, batch_addresses)
def split_environment_process(params, batch_sizes, batch_addresses, num_splits):
from multiprocessing import Process, Queue
from environment_server.environment_wrapper import multi_env
envs = []
starts = []
ends = []
batch_data = []
for b, address in zip(batch_sizes, batch_addresses):
batch_data.append(ActorData(params, b, address))
env = []
start = []
end = []
for i in range(num_splits):
s = Queue()
e = Queue()
env.append(Process(target=multi_env, args=(params, b, address, s, e, num_splits, i)))
start.append(s)
end.append(e)
envs.append(env)
starts.append(start)
ends.append(end)
for env in envs:
for e in env:
e.start()
while True:
for data, start, end in zip(batch_data, starts, ends):
while data.status[0] != 1:
pass
with data.lock:
for s in start:
s.put(True)
for e in end:
e.get()
data.prev_actions[:] = data.actions
data.status[0] = 2
def intrinsic_motivation_process(params, batch_sizes, path_index, path_template, batch_addresses, device):
import tensorflow as tf
dtype = params['Misc']['dtype']
if dtype == 'float16':
tf.keras.mixed_precision.set_global_policy('mixed_float16')
from models.intrinsic_motivation import IntrinsicMotivation, get_intrinsic_motivation_model
batch_data = [ActorData(params, b, a) for b, a in zip(batch_sizes, batch_addresses)]
with tf.device(device):
with path_index.get_lock():
if path_index.value >= 0:
path = path_template.format("im", path_index.value)
else:
path = None
print(f"Loading model from {path}\n", end="")
model = get_intrinsic_motivation_model(params, batch_sizes, path)
update_every = params['Misc']['actor_weight_update']
next_update = update_every
while True:
for b, batch_size, data in zip(range(len(batch_sizes)), batch_sizes, batch_data):
while data.status[0] != 2:
pass
with data.lock:
observations = tf.cast(data.observations, dtype) / 255
intrinsic_rewards = model(observations, b)
# transition = [resets, x, prev_reward_e, j, beta, prev_reward_i]
data.intrinsic_rewards[:] = intrinsic_rewards.numpy()
resets = data.resets.copy()
data.status[0] = 3
# resetting after intrinsic calculation for final intrinsic reward
model.reset(b, resets)
next_update -= 1
if next_update <= 0:
next_update = update_every
with path_index.get_lock():
if path_index.value >= 0:
path = path_template.format("im", path_index.value)
print(f"Loading model from {path}\n", end="")
model.load_weights(path)
def dqn_process(params, batch_sizes, path_index, path_template, batch_addresses, device):
import tensorflow as tf
dtype = params['Misc']['dtype']
if dtype == 'float16':
tf.keras.mixed_precision.set_global_policy('mixed_float16')
from models.dqn import Agent57, get_agent57_model
from models import dqn
from bandit import policies
batch_data = [ActorData(params, b, a) for b, a in zip(batch_sizes, batch_addresses)]
with tf.device(device):
with path_index.get_lock():
if path_index.value >= 0:
path = path_template.format("dqn", path_index.value)
else:
path = None
print(f"Loading model from {path}\n", end="")
model = get_agent57_model(params, path)
num_actions = params['Agent57']['dual_heads']['num_actions']
hidden_units = 4 * params['Agent57']['lstm']['units']
N = params['Misc']['N']
L = params['Misc']['L']
consecutive_batches = params['Misc']['consecutive_batches']
update_every = params['Misc']['actor_weight_update']
next_update = update_every
zero_discount_on_life_loss = params['Misc']['zero_discount_on_life_loss']
hiddens = [tf.zeros((b, hidden_units), dtype) for b in batch_sizes]
greeds = tf.pow(params['Misc']['greed_e'],
1. + (params['Misc']['greed_a'] * (tf.range(L, dtype=dtype) / (L - 1))))
greeds = tf.tile(greeds, [consecutive_batches])
mu_random = greeds / num_actions
mu_q = 1. - (greeds * ((num_actions - 1) / num_actions))
greeds = tf.split(greeds, num_or_size_splits=batch_sizes)
mu_random = tf.split(mu_random, num_or_size_splits=batch_sizes)
mu_q = tf.split(mu_q, num_or_size_splits=batch_sizes)
while True:
next_hiddens = []
for b, hidden, greed, m_r, m_q, data in zip(batch_sizes, hiddens, greeds, mu_random, mu_q, batch_data):
while data.status[0] != 3:
pass
with data.lock:
data.hidden[:] = hidden.numpy()
resets = tf.convert_to_tensor(data.resets)
one_hot_js = tf.one_hot(data.j, depth=N, dtype=dtype)
prev_a = tf.one_hot(data.prev_actions, depth=num_actions, dtype=dtype)
beta, gamma = policies.tf_get_policy(tf.cast(data.j, dtype), N, dtype)
beta = tf.expand_dims(beta, axis=-1)
prev_reward_e = dqn.h(tf.convert_to_tensor(data.extrinsic_rewards, dtype))
prev_r_i = dqn.h(tf.convert_to_tensor(data.intrinsic_rewards, dtype=dtype))
observations = tf.cast(data.observations, dtype) / 255
q_values, hidden = model(observations, prev_a, prev_reward_e, prev_r_i,
one_hot_js, beta, hidden)
action = tf.argmax(q_values, axis=-1, output_type=tf.int32)
q_values = dqn.h_inverse(q_values)
random_action = tf.random.uniform(action.shape, 0, num_actions - 1, tf.int32)
random_decision = tf.random.uniform(action.shape, 0., 1., dtype) < greed
random_decision = random_decision | resets
action = tf.where(random_decision, random_action, action)
mu = tf.where(random_decision, m_r, m_q)
selected_q_values = tf.reduce_sum(
tf.one_hot(action, depth=num_actions, dtype=dtype) * q_values, axis=-1)
discounted_q = tf.reduce_sum(tf.multiply(tf.nn.softmax(q_values, axis=-1),
q_values),
axis=-1)
if zero_discount_on_life_loss:
discounted_q = tf.multiply(discounted_q, tf.where(tf.convert_to_tensor(data.loss_of_life),
tf.zeros_like(gamma), gamma))
else:
discounted_q = tf.multiply(discounted_q, gamma)
data.actions[:] = action.numpy()
data.q_value[:] = selected_q_values.numpy()
data.discounted_q[:] = discounted_q.numpy()
data.mu[:] = mu.numpy()
data.status[0] = 0
if tf.reduce_any(resets):
resets = tf.tile(tf.expand_dims(resets, -1), [1, hidden_units])
hidden = tf.where(resets, tf.zeros_like(hidden), hidden)
next_hiddens.append(hidden)
hiddens = next_hiddens
next_update -= 1
if next_update <= 0:
next_update = update_every
with path_index.get_lock():
if path_index.value >= 0:
path = path_template.format("dqn", path_index.value)
print(f"Loading model from {path}\n", end="")
model.load_weights(path)
def Agent57_process(params, batch_sizes, path_index, path_template, batch_addresses, device, splits, split_position):
try:
import tensorflow as tf
dtype = params['Misc']['dtype']
if dtype == 'float16':
tf.keras.mixed_precision.set_global_policy('mixed_float16')
from models.intrinsic_motivation import IntrinsicMotivation, get_intrinsic_motivation_model
from models.dqn import Agent57, get_agent57_model
from models import dqn
from bandit import policies
batch_data = [ActorData(params, b, a) for b, a in zip(batch_sizes, batch_addresses)]
with tf.device(device):
while True:
with path_index.get_lock():
if path_index.value >= 0:
path = path_template.format("{}", path_index.value)
break
else:
time.sleep(3)
loading = path.format("im")
print(f"Loading model from {loading}\n", end="")
im = get_intrinsic_motivation_model(params, batch_sizes, loading)
loading = path.format("dqn")
print(f"Loading model from {loading}\n", end="")
agent57 = get_agent57_model(params, loading)
num_actions = params['Agent57']['dual_heads']['num_actions']
hidden_units = 4 * params['Agent57']['lstm']['units']
N = params['Misc']['N']
L = params['Misc']['L']
consecutive_batches = params['Misc']['consecutive_batches']
zero_discount_on_life_loss = params['Misc']['zero_discount_on_life_loss']
hiddens = [tf.zeros((b, hidden_units), dtype) for b in batch_sizes]
greeds = tf.pow(params['Misc']['greed_e'],
1. + (params['Misc']['greed_a'] * (tf.cast(tf.range(L), dtype) / (L - 1))))
greeds = tf.tile(greeds, [consecutive_batches])
mu_random = greeds / num_actions
mu_q = 1. - (greeds * ((num_actions - 1) / num_actions))
greeds = tf.split(greeds, num_or_size_splits=batch_sizes)
mu_random = tf.split(mu_random, num_or_size_splits=batch_sizes)
mu_q = tf.split(mu_q, num_or_size_splits=batch_sizes)
update_every = params['Misc']['actor_weight_update'] * (len(batch_sizes) / consecutive_batches)
next_update = update_every
for i in range(len(batch_sizes) - 1, -1, -1):
if i % splits != split_position:
batch_sizes.pop(i)
batch_data.pop(i)
greeds.pop(i)
mu_random.pop(i)
mu_q.pop(i)
hiddens.pop(i)
while True:
next_hiddens = []
for b, batch_size, data, greed, m_r, m_q, hidden in zip(range(len(batch_sizes)), batch_sizes,
batch_data, greeds, mu_random, mu_q, hiddens):
while data.status[0] != 2:
pass
with data.lock:
# print("Agent")
data.hidden[:] = hidden.numpy()
observations = tf.cast(data.observations, dtype) / 255
prev_r_i = im(observations, b)
data.intrinsic_rewards[:] = prev_r_i.numpy()
prev_r_i = dqn.h(prev_r_i)
resets = tf.convert_to_tensor(data.resets)
one_hot_js = tf.one_hot(data.j, depth=N, dtype=dtype)
prev_a = tf.one_hot(data.actions, depth=num_actions, dtype=dtype)
beta, gamma = policies.tf_get_policy(tf.cast(data.j, dtype), N, dtype)
beta = tf.expand_dims(beta, axis=-1)
prev_reward_e = dqn.h(tf.convert_to_tensor(data.extrinsic_rewards, dtype))
q_values, hidden = agent57(observations, prev_a, prev_reward_e, prev_r_i,
one_hot_js, beta, hidden)
action = tf.argmax(q_values, axis=-1, output_type=tf.int32)
q_values = dqn.h_inverse(q_values)
random_action = tf.random.uniform(action.shape, 0, num_actions - 1, tf.int32)
random_decision = tf.random.uniform(action.shape, 0., 1., dtype) < greed
random_decision = random_decision | resets
action = tf.where(random_decision, random_action, action)
mu = tf.where(random_decision, m_r, m_q)
selected_q_values = tf.reduce_sum(
tf.one_hot(action, depth=num_actions, dtype=dtype) * q_values, axis=-1)
discounted_q = tf.reduce_sum(tf.multiply(tf.nn.softmax(q_values, axis=-1),
q_values),
axis=-1)
if zero_discount_on_life_loss:
discounted_q = tf.multiply(discounted_q, tf.where(tf.convert_to_tensor(data.loss_of_life),
tf.zeros_like(gamma), gamma))
else:
discounted_q = tf.multiply(discounted_q, gamma)
data.actions[:] = action.numpy()
data.q_value[:] = selected_q_values.numpy()
data.discounted_q[:] = discounted_q.numpy()
data.mu[:] = mu.numpy()
resets = data.resets.copy()
data.status[0] = 0
im.reset(b, resets)
if tf.reduce_any(resets):
resets = tf.tile(tf.expand_dims(resets, -1), [1, hidden_units])
hidden = tf.where(resets, tf.zeros_like(hidden), hidden)
next_hiddens.append(hidden)
hiddens = next_hiddens
next_update -= 1
if next_update <= 0:
next_update = update_every
with path_index.get_lock():
if path_index.value >= 0:
path = path_template.format("{}", path_index.value)
else:
path = None
loading = path.format("im")
print(f"\nLoading model from {loading}\n", end="")
im.load_weights(loading)
loading = path.format("dqn")
print(f"Loading model from {loading}\n", end="")
agent57.load_weights(loading)
except Exception as e:
print(e)
print(traceback.print_exc(4))
# Agent57_process(params, batch_sizes, path_index, path_template, batch_addresses, device, splits, split_position)
def weight_downloading_process(params, path_index, path_template, path_limit, download_period):
from learning_server.weights_client import download_files
import os
import re
import time
root = "/".join(re.split(r'/|\\', path_template)[:-1])
os.makedirs(root, exist_ok=True)
del root
while True:
with path_index.get_lock():
path = path_template.format("{}", (path_index.value + 1) % path_limit)
try:
download_files(params, path)
path_index.value = (path_index.value + 1) % path_limit
except Exception as e:
print(traceback.print_exc())
print(e)
time.sleep(download_period)
if __name__ == "__main__":
import yaml
import tensorflow as tf
import numpy as np
from multiprocessing import Queue, Value, Process, Lock
from environment_server import local_replay_buffer
with open('../params.yml', 'r') as file:
params = yaml.full_load(file)
queue = Queue()
path_index = Value('i', -1)
path_template = '../weights/agent57_{}_{}.h5'
L = params['Misc']['L']
path_limit = 3
download_period = params['Misc']['download_period']
preferred_device_types = ['TPU', 'GPU']
secondary_device_types = ['CPU']
devices = []
for device_type in preferred_device_types:
for device in tf.config.get_visible_devices(device_type):
devices.append(":".join(device.name.split(":")[-2:]))
if len(devices) == 0:
for device_type in secondary_device_types:
for device in tf.config.get_visible_devices(device_type):
devices.append(":".join(device.name.split(":")[-2:]))
if len(devices) == 0:
raise Exception
elif len(devices) < 2:
devices.append(devices[0])
batches = params['Misc']['consecutive_batches'] * params['Misc']['batch_splits']
num_envs = L * params['Misc']['consecutive_batches']
batch_size = num_envs // batches
last_batch_size = num_envs - (batch_size * (batches - 1))
batch_sizes = [batch_size for _ in range(batches - 1)]
batch_sizes.append(last_batch_size)
print(f"Working with batch sizes {batch_sizes}\n", end="")
print(f"on devices {devices}\n", end="")
batch_memory = [ActorData(params, b) for b in batch_sizes]
batch_addresses = [bm.shared_mem.name for bm in batch_memory]
transition_queue = Queue()
replay_lock = Lock()
processes = [
#Process(target=local_replay_buffer.transition_upload_process, args=(params, transition_queue, replay_lock)),
Process(target=weight_downloading_process,
args=(params, path_index, path_template, path_limit, download_period)),
#Process(target=replay_buffer_process,
# args=(params, batch_sizes, batch_addresses, transition_queue, replay_lock)),
Process(target=transition_upload_process,
args=(params, batch_sizes, batch_addresses, replay_lock)),
Process(target=split_environment_process,
args=(params, batch_sizes, batch_addresses, 2))]
# Process(target=environment_process,
# args=(params, batch_sizes, batch_addresses))]
if params['Misc']['split_stream']:
splits = len(devices)
for i, device in enumerate(devices):
processes.append(Process(target=Agent57_process,
args=(
params, batch_sizes, path_index, path_template, batch_addresses, device, splits,
i)))
else:
processes.append(Process(target=intrinsic_motivation_process,
args=(params, batch_sizes, path_index, path_template, batch_addresses,
devices[0])))
processes.append(Process(target=dqn_process,
args=(params, batch_sizes, path_index, path_template, batch_addresses,
devices[1] if len(devices) > 1 else devices[0])))
for p in processes:
p.start()
if params['Misc']['render_actor']:
import cv2
while True:
cv2.imshow("Actor", np.concatenate([batch_memory[0].observations[0], batch_memory[-1].observations[-1]], 1))
cv2.waitKey(1)
|
python
|
# (C) Copyright 2007-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" The interface for objects using the 'Service' trait type. """
# Enthought library imports.
from traits.api import Instance, Interface
# Local imports.
from .i_service_registry import IServiceRegistry
class IServiceUser(Interface):
""" The interface for objects using the 'Service' trait type. """
# The service registry that the object's services are stored in.
service_registry = Instance(IServiceRegistry)
|
python
|
# Models
import tensorflow as tf
import numpy as np
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2_as_graph
import time
import pickle
import os
import sklearn
from sklearn.ensemble import RandomForestRegressor
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings("ignore")
def create_fc(config):
config['hidden_layers'] = len(config['layers'])
input = tf.keras.layers.Input(shape=config['input_shape'])
if config['input_dropout'] is not None:
x = tf.keras.layers.Dropout(config['input_dropout'])(input)
else:
x = input
for i in range(config['hidden_layers']):
dim = config['layers'][i]
act = 'relu'
x = tf.keras.layers.Dense(dim,activation=act)(x)
if config['dropout'] is not None:
x = tf.keras.layers.Dropout(config['dropout'])(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dense(config['output_shape'],activation='softmax')(x)
model = tf.keras.Model(inputs=input, outputs=x)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9),
metrics=['accuracy'])
return model
def vgg_block(x, filters, layers):
for _ in range(layers):
x = tf.keras.layers.Conv2D(filters, (3,3), padding='same', activation='relu')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.MaxPooling2D((2,2), strides=(2,2))(x)
return x
def create_vgg(config):
config['num_layers'] = len(config['vgg_layers'])
input = tf.keras.layers.Input(shape=config['input_shape'])
x = input
for i in range(config['num_layers']):
block_size = config['vgg_layers'][i]
filter_num = config['filters'][i]
act = 'relu'
x = vgg_block(x,filter_num,block_size)
x = tf.keras.layers.Flatten()(x)
config['num_hidden_layers'] = len(config['hidden_layers'])
for i in range(config['num_hidden_layers']):
dim = config['hidden_layers'][i]
act = 'relu'
x = tf.keras.layers.Dense(dim,activation=act)(x)
x = tf.keras.layers.Dense(config['output_shape'],activation='softmax')(x)
model = tf.keras.Model(inputs=input, outputs=x)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9),
metrics=['accuracy'])
return model
def inception_block(x, f1, f2_in, f2_out, f3_in, f3_out, f4_out):
# 1x1 conv
conv1 = tf.keras.layers.Conv2D(f1, (1,1), padding='same', activation='relu')(x)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
# 3x3 conv
conv3 = tf.keras.layers.Conv2D(f2_in, (1,1), padding='same', activation='relu')(x)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
conv3 = tf.keras.layers.Conv2D(f2_out, (3,3), padding='same', activation='relu')(conv3)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
# 5x5 conv
conv5 = tf.keras.layers.Conv2D(f3_in, (1,1), padding='same', activation='relu')(x)
conv5 = tf.keras.layers.BatchNormalization()(conv5)
conv5 = tf.keras.layers.Conv2D(f3_out, (5,5), padding='same', activation='relu')(conv5)
conv5 = tf.keras.layers.BatchNormalization()(conv5)
# 3x3 max pooling
pool = tf.keras.layers.MaxPooling2D((3,3), strides=(1,1), padding='same')(x)
pool = tf.keras.layers.Conv2D(f4_out, (1,1), padding='same', activation='relu')(pool)
pool = tf.keras.layers.BatchNormalization()(pool)
# concatenate filters, assumes filters/channels last
layer_out = tf.keras.layers.concatenate([conv1, conv3, conv5, pool], axis=-1)
return layer_out
def create_inception(config):
config['num_layers'] = len(config['inception_layers'])
input = tf.keras.layers.Input(shape=config['input_shape'])
x = tf.keras.layers.Conv2D(64, (7,7), padding='valid', activation='relu', strides=(2,2))(input)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.MaxPooling2D((3,3), strides=(2,2), padding='same')(x)
x = tf.keras.layers.Conv2D(128, (1,1), padding='same', activation='relu', strides=(1,1))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(192, (3,3), padding='same', activation='relu', strides=(1,1))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.MaxPooling2D((3,3), strides=(2,2), padding='same')(x)
for i in range(config['num_layers']):
for j in range(config['inception_layers'][i]):
x = inception_block(x,config['f1'][i][j],config['f2_in'][i][j],config['f2_out'][i][j],
config['f3_in'][i][j],config['f3_out'][i][j],config['f4_out'][i][j])
x = tf.keras.layers.MaxPooling2D((3,3), strides=(2,2), padding='same')(x)
x = tf.keras.layers.Flatten()(x)
config['num_hidden_layers'] = len(config['hidden_layers'])
for i in range(config['num_hidden_layers']):
dim = config['hidden_layers'][i]
act = 'relu'
x = tf.keras.layers.Dense(dim,activation=act)(x)
x = tf.keras.layers.Dense(config['output_shape'],activation='softmax')(x)
model = tf.keras.Model(inputs=input, outputs=x)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9),
metrics=['accuracy'])
return model
def conv_relu(x, filters, kernel_size, strides=1):
x = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding = 'same')(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.BatchNormalization()(x)
return x
def identity_block(tensor, filters):
x = conv_relu(tensor, filters=filters, kernel_size=1, strides=1)
x = conv_relu(x, filters=filters, kernel_size=3, strides=1)
x = tf.keras.layers.Conv2D(filters=4*filters, kernel_size=1, strides=1)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Add()([tensor,x])
x = tf.keras.layers.ReLU()(x)
return x
def identity_block_small(tensor, filters):
x = conv_relu(tensor, filters=filters, kernel_size=3, strides=1)
x = conv_relu(x, filters=filters, kernel_size=3, strides=1)
x = tf.keras.layers.Add()([tensor,x])
x = tf.keras.layers.ReLU()(x)
return x
def projection_block(tensor, filters, strides):
x = conv_relu(tensor, filters=filters, kernel_size=1, strides=strides)
x = conv_relu(x, filters=filters, kernel_size=3, strides=1)
x = tf.keras.layers.Conv2D(filters=4*filters, kernel_size=1, strides=1)(x)
x = tf.keras.layers.BatchNormalization()(x)
shortcut = tf.keras.layers.Conv2D(filters=4*filters, kernel_size=1, strides=strides)(tensor)
shortcut = tf.keras.layers.BatchNormalization()(shortcut)
x = tf.keras.layers.Add()([shortcut,x])
x = tf.keras.layers.ReLU()(x)
return x
def projection_block_small(tensor, filters, strides):
x = conv_relu(tensor, filters=filters, kernel_size=3, strides=strides)
x = conv_relu(x, filters=filters, kernel_size=3, strides=1)
shortcut = tf.keras.layers.Conv2D(filters=filters, kernel_size=1, strides=strides)(tensor)
shortcut = tf.keras.layers.BatchNormalization()(shortcut)
x = tf.keras.layers.Add()([shortcut,x])
x = tf.keras.layers.ReLU()(x)
return x
def resnet_block(x, filters, reps, strides):
x = projection_block(x, filters, strides)
for _ in range(reps-1):
x = identity_block(x,filters)
return x
def resnet_block_small(x, filters, reps, strides):
x = projection_block_small(x, filters, strides)
for _ in range(reps):
x = identity_block_small(x,filters)
return x
def create_resnet(config):
input = tf.keras.layers.Input(shape=config['input_shape'])
x = conv_relu(input, filters=64, kernel_size=7, strides=2)
x = tf.keras.layers.MaxPool2D(pool_size = 3, strides =2)(x)
if config['small']==False:
x = resnet_block(x, filters=64, reps=config['resnet_layers'][0], strides=1)
x = resnet_block(x, filters=128, reps=config['resnet_layers'][1], strides=2)
x = resnet_block(x, filters=256, reps=config['resnet_layers'][2], strides=2)
x = resnet_block(x, filters=512, reps=config['resnet_layers'][3], strides=2)
else:
x = resnet_block_small(x, filters=64, reps=config['resnet_layers'][0], strides=1)
x = resnet_block_small(x, filters=128, reps=config['resnet_layers'][1], strides=2)
x = resnet_block_small(x, filters=256, reps=config['resnet_layers'][2], strides=2)
x = resnet_block_small(x, filters=512, reps=config['resnet_layers'][3], strides=2)
x = tf.keras.layers.GlobalAvgPool2D()(x)
config['num_hidden_layers'] = len(config['hidden_layers'])
for i in range(config['num_hidden_layers']):
dim = config['hidden_layers'][i]
act = 'relu'
x = tf.keras.layers.Dense(dim,activation=act)(x)
output = tf.keras.layers.Dense(config['output_shape'], activation ='softmax')(x)
model = tf.keras.Model(inputs=input, outputs=output)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9),
metrics=['accuracy'])
return model
def create_base_vgg_config(params):
config = dict()
config['input_shape'] = (params['input_shape'],params['input_shape'],3)
config['vgg_layers'] = [params['vgg_layers']] * params['vgg_layers_size']
filters = []
for i in range(params['vgg_layers_size']):
filters.append(params['filters']*(2**i))
config['filters'] = filters
config['hidden_layers'] = [params['hidden_layers']] * params['hidden_layers_size']
config['output_shape'] = params['output_shape']
config['input_size'] = params['input_size']
config['batch_size'] = params['batch_size']
config['model'] = "VGG"
return config
def create_base_inception_config(params):
config = dict()
config['input_shape'] = (params['input_shape'],params['input_shape'],3)
config['inception_layers'] = [params['inception_layers']] * 3
config['f1'] = []
config['f2_in'] = []
config['f2_out'] = []
config['f3_in'] = []
config['f3_out'] = []
config['f4_out'] = []
for val in config['inception_layers']:
config['f1'].append([params['f1']]*val)
config['f2_in'].append([params['f2_in']]*val)
config['f2_out'].append([params['f2_out']]*val)
config['f3_in'].append([params['f3_in']]*val)
config['f3_out'].append([params['f3_out']]*val)
config['f4_out'].append([params['f4_out']]*val)
config['hidden_layers'] = [params['hidden_layers']] * params['hidden_layers_size']
config['output_shape'] = params['output_shape']
config['input_size'] = params['input_size']
config['batch_size'] = params['batch_size']
config['model'] = "Inception"
return config
def create_base_resnet_config(params):
config = dict()
config['input_shape'] = (params['input_shape'],params['input_shape'],3)
config['small'] = False
config['resnet_layers'] = [params['resnet_layers']] * 4
config['hidden_layers'] = [params['hidden_layers']] * params['hidden_layers_size']
config['output_shape'] = params['output_shape']
config['input_size'] = params['input_size']
config['batch_size'] = params['batch_size']
config['model'] = "ResNet"
return config
def create_base_fc_config(params):
config = dict()
config['input_shape'] = params['input_shape']
config['input_dropout'] = 0.2
config['dropout'] = 0.5
config['layers'] = [1000] * params['hidden_layers']
config['output_shape'] = params['output_shape']
config['input_size'] = params['input_size']
config['batch_size'] = params['batch_size']
config['model'] = "FC"
return config
def get_flops(model, batch_size=None,allowed_flops=['MatMul', 'Mul', 'Rsqrt', 'BiasAdd', 'Sub', 'Softmax', 'Conv2D', 'MaxPool', 'Mean']):
if batch_size is None:
batch_size = 1
real_model = tf.function(model).get_concrete_function(tf.TensorSpec([batch_size] + model.inputs[0].shape[1:], model.inputs[0].dtype))
frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(real_model)
run_meta = tf.compat.v1.RunMetadata()
opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()
opts['output'] = 'none'
flops = tf.compat.v1.profiler.profile(graph=frozen_func.graph,
run_meta=run_meta, cmd='op', options=opts)
ret_val = dict()
for fl in allowed_flops:
ret_val[fl] = 0
f = flops.children
while(len(f) > 0):
if f[0].name in allowed_flops:
ret_val[f[0].name] = f[0].total_float_ops
f = f[0].children
return ret_val
def get_weights(model):
ret_val = dict()
ret_val['trainable'] = np.sum([np.product([xi for xi in x.get_shape()]) for x in model.trainable_weights])
ret_val['non_trainable'] = np.sum([np.product([xi for xi in x.get_shape()]) for x in model.non_trainable_weights])
return ret_val
def get_layers(model):
ret_val = dict()
for l in model.layers:
name = l.__class__.__name__
if name in ret_val:
ret_val[name] += 1
else:
ret_val[name] = 1
return ret_val
allowed_flops = ['MatMul', 'Mul', 'Rsqrt', 'BiasAdd', 'Sub', 'Softmax', 'Conv2D', 'MaxPool', 'Mean']
def get_model_params(model,batch_size = 64,x_shape=[]):
flops = get_flops(model)
weights = get_weights(model)
layers = get_layers(model)
return flops,weights,layers
model_creator = dict()
model_creator['vgg'] = create_vgg
model_creator['resnet'] = create_resnet
model_creator['inception'] = create_inception
model_creator['fc'] = create_fc
base_param_creator = dict()
base_param_creator['vgg'] = create_base_vgg_config
base_param_creator['resnet'] = create_base_resnet_config
base_param_creator['inception'] = create_base_inception_config
base_param_creator['fc'] = create_base_fc_config
def get_single_train_data(output_config):
x = []
y = []
x.append(list(output_config['flops_param'].values()) +
list(output_config['layers_param'].values()) +
list(output_config['weights_param'].values()))
x[-1].append(output_config['input_size'])
x[-1].append(output_config['batch_size'])
x = np.asarray(x).astype('float64')
return x
def get_training_time(config,model_name):
input_config = base_param_creator[model_name](config)
model = model_creator[model_name](input_config)
flops,weights,layers = get_model_params(model)
input_config['flops_param'] = flops
input_config['weights_param'] = weights
input_config['layers_param'] = layers
with open('results/trained_model.pickle', 'rb') as handle:
models = pickle.load(handle)
x = get_single_train_data(input_config)
time = models[model_name].predict(x)[0]
return time * config['epochs']
|
python
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
n = {'nodes':20,'edges':3}
G = nx.barabasi_albert_graph(n['nodes'],n['edges'])
max_degree = max(nx.degree(G).values())
min_sigmoid = 0.5
max_sigmoid = np.exp(1)/(1.+np.exp(1))
degrees = nx.degree(G)
sigmoid = lambda value: (1./(1+np.exp(-value))-min_sigmoid)/(max_sigmoid-min_sigmoid)
for node in degrees:
tmp = degrees[node]
degrees[node] = sigmoid(tmp/float(max_degree))
#Choose alpha from a uniform random distirbution
alpha = degrees
timesteps = 10
#--initial conditions
INITIAL = 0
attitudes = np.zeros((n['nodes'],timesteps))
attitudes[:,INITIAL] = np.random.random_sample(size=(n['nodes'],))
#TODO: Influence kernel
for node in G.nodes():
print G.successors(node)
for t in xrange(1,timesteps):
for agent in G.nodes():
attitudes[agent,t] = (1-alpha[agent])*attitudes[agent,t-1] + alpha[agent]*attitudes[G.neighbors(agent),t-1].mean()
|
python
|
#!/usr/bin/python2
import optparse
import os
import shutil
import stat
import subprocess
import sys
from builds.GpBuild import GpBuild
def install_gpdb(dependency_name):
status = subprocess.call("mkdir -p /usr/local/gpdb", shell=True)
if status:
return status
status = subprocess.call(
"tar -xzf " + dependency_name + "/*.tar.gz -C /usr/local/gpdb",
shell=True)
return status
def create_gpadmin_user():
status = subprocess.call("gpdb_src/concourse/scripts/setup_gpadmin_user.bash")
os.chmod('/bin/ping', os.stat('/bin/ping').st_mode | stat.S_ISUID)
if status:
return status
def copy_output():
shutil.copyfile("gpdb_src/src/test/regress/regression.diffs", "icg_output/regression.diffs")
shutil.copyfile("gpdb_src/src/test/regress/regression.out", "icg_output/regression.out")
def main():
parser = optparse.OptionParser()
parser.add_option("--build_type", dest="build_type", default="RELEASE")
parser.add_option("--mode", choices=['orca', 'codegen', 'orca_codegen', 'planner'])
parser.add_option("--compiler", dest="compiler")
parser.add_option("--cxxflags", dest="cxxflags")
parser.add_option("--output_dir", dest="output_dir", default="install")
parser.add_option("--gpdb_name", dest="gpdb_name")
(options, args) = parser.parse_args()
if options.mode == 'orca':
ciCommon = GpBuild(options.mode)
elif options.mode == 'planner':
ciCommon = GpBuild(options.mode)
for dependency in args:
status = ciCommon.install_dependency(dependency)
if status:
return status
status = install_gpdb(options.gpdb_name)
if status:
return status
status = ciCommon.configure()
if status:
return status
status = create_gpadmin_user()
if status:
return status
status = ciCommon.icg()
if status:
copy_output()
return status
if __name__ == "__main__":
sys.exit(main())
|
python
|
'''
Pulls segmentation masks from the database and exports them
into folders with specifiable format (JPEG, TIFF, etc.).
2019 Benjamin Kellenberger
'''
import os
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Export segmentation map annotations from database to images.')
parser.add_argument('--settings_filepath', type=str, default='config/settings.ini', const=1, nargs='?',
help='Manual specification of the directory of the settings.ini file; only considered if environment variable unset (default: "config/settings.ini").')
parser.add_argument('--target_folder', type=str, default='export', const=1, nargs='?',
help='Export directory for the segmentation image files.')
parser.add_argument('--file_format', type=str, default='TIFF', const=1, nargs='?',
help='File format for segmentation annotations (default: TIFF).')
parser.add_argument('--export_annotations', type=bool, default=True, const=1, nargs='?',
help='Whether to export annotations (default: True).')
parser.add_argument('--limit_users', type=str,
help='Which users (comma-separated list of usernames) to limit annotations to (default: None).')
parser.add_argument('--exclude_users', type=str, default=None, const=1, nargs='?',
help='Comma-separated list of usernames whose annotations not to include (default: None).')
#TODO: implement:
# parser.add_argument('--export_predictions', type=bool, default=False, const=1, nargs='?',
# help='Whether to export predictions (default: False).')
# parser.add_argument('--predictions_min_date', type=str, default=None, const=1, nargs='?',
# help='Timestamp of earliest predictions to consider (default: None, i.e. all).')
args = parser.parse_args()
# setup
print('Setup...')
if not 'AIDE_CONFIG_PATH' in os.environ:
os.environ['AIDE_CONFIG_PATH'] = str(args.settings_filepath)
import glob
from tqdm import tqdm
import datetime
import numpy as np
from PIL import Image
import base64
from io import BytesIO
from util.configDef import Config
from modules import Database
config = Config()
# check if correct type of annotations
exportAnnotations = args.export_annotations
if exportAnnotations and not config.getProperty('Project', 'annotationType') == 'segmentationMasks':
print('Warning: project annotations are not segmentation masks; skipping annotation export...')
exportAnnotations = False
# setup DB connection
dbConn = Database(config)
if dbConn.connectionPool is None:
raise Exception('Error connecting to database.')
dbSchema = config.getProperty('Database', 'schema')
# check if valid file format provided
valid_file_formats = (
'jpg',
'jpeg',
'png',
'gif',
'tif',
'tiff',
'bmp',
'ico',
'jfif',
'pjpeg',
'pjp'
)
if args.file_format.lower().strip().strip('.') not in valid_file_formats:
raise Exception('Error: provided file format ("{}") is not valid.'.format(args.file_format))
os.makedirs(args.target_folder, exist_ok=True)
# query and export label definition
labelQuery = dbConn.execute('SELECT * FROM {schema}.labelclass;'.format(schema=dbSchema), None, 'all')
with open(os.path.join(args.target_folder, 'classDefinitions.txt'), 'w') as f:
f.write('labelclass,index\n')
for labelIdx, l in enumerate(labelQuery):
f.write('{},{}\n'.format(l['name'],labelIdx))
# start querying and exporting
if exportAnnotations:
sql = '''
SELECT * FROM {schema}.annotation AS anno
JOIN (SELECT id AS imID, filename FROM {schema}.image) AS img
ON anno.image = img.imID
'''.format(schema=dbSchema)
queryArgs = []
# included and excluded users
if args.limit_users is not None:
limitUsers = []
for u in args.limit_users.split(','):
limitUsers.append(u.strip())
sql += 'WHERE anno.username IN %s'
queryArgs = []
queryArgs.append(tuple(limitUsers))
if args.exclude_users is not None:
excludeUsers = []
for u in args.exclude_users.split(','):
excludeUsers.append(u.strip())
if args.limit_users is not None:
sql += 'AND anno.username NOT in %s'
else:
sql += 'WHERE anno.username IN %s'
queryArgs.append(tuple(excludeUsers))
if len(queryArgs) == 0:
queryArgs = None
cursor = dbConn.execute_cursor(sql, queryArgs)
# iterate
print('Exporting images...\n')
while True:
nextItem = cursor.fetchone()
if nextItem is None:
break
# parse
imgName = nextItem['filename']
imgName, _ = os.path.splitext(imgName)
targetName = os.path.join(args.target_folder, imgName+'.'+args.file_format)
parent,_ = os.path.split(targetName)
os.makedirs(parent, exist_ok=True)
# convert base64 mask to image
width = nextItem['width']
height = nextItem['height']
segmask = bytes(nextItem['segmentationmask'], 'UTF-8')
raster = np.frombuffer(base64.b64decode(nextItem['segmentationmask']), dtype=np.uint8)
raster = np.reshape(raster, (height,width,))
img = Image.fromarray(raster)
img.save(targetName)
print(targetName)
|
python
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.urls import reverse
from django.test import TestCase, Client
import base64
import mock
import adal
class AuthTestCase(TestCase):
client = Client()
home_url = reverse('home')
auth_url = reverse('auth')
auth_ip_url = reverse('auth_ip')
auth_dual_url = reverse('auth_dual')
username = 'testu'
email = '[email protected]'
password = 'testpass'
def setUp(self):
self.test_user = User.objects.create(username=self.username, email=self.email)
def basic_auth(self, username, password):
return 'Basic {}'.format(base64.b64encode('{}:{}'.format(username, password).encode('utf-8')).decode('utf-8'))
# @mock.patch('adal.AuthenticationContext.acquire_token_with_username_password')
# def test_home_redirects(self, mock_api_call):
# mock_api_call.return_value = {
# 'userId': self.email
# }
# response = self.client.get(self.home_url)
# self.assertRedirects
@mock.patch('adal.AuthenticationContext.acquire_token_with_username_password')
def test_auth_adal_with_username(self, mock_api_call):
mock_api_call.return_value = {
'userId': self.email
}
# fetch a reponse using basic auth
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION=self.basic_auth(self.username, self.password)
)
self.assertEqual(response.status_code, 200)
self.assertIn('email', response.json())
self.assertEqual(response.json()['email'], self.email)
# fetch again to test auth credential caching
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION=self.basic_auth(self.username, self.password)
)
self.assertEqual(response.status_code, 200)
self.assertIn('email', response.json())
self.assertEqual(response.json()['email'], self.email)
# fetch again to test session credential caching
response = self.client.get(self.auth_url)
self.assertEqual(response.status_code, 200)
self.assertIn('email', response.json())
self.assertEqual(response.json()['email'], self.email)
@mock.patch('adal.AuthenticationContext.acquire_token_with_username_password')
def test_auth_adal_with_invalid_username(self, mock_api_call):
mock_api_call.side_effect = adal.adal_error.AdalError('Azure AD disagrees!')
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION=self.basic_auth(self.username, self.password)
)
self.assertEqual(response.status_code, 401)
def test_auth_adal_with_bad_creds(self):
# no credentials
response = self.client.get(self.auth_url)
self.assertEqual(response.status_code, 401)
# malformed Authorization Header
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION='Basic'
)
self.assertEqual(response.status_code, 401)
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION='Not a legit header'
)
self.assertEqual(response.status_code, 401)
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION=u'Basic 😭😭😭😕😕😕'
)
self.assertEqual(response.status_code, 401)
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION='Basic ==abcdef/+=='
)
self.assertEqual(response.status_code, 401)
# legit header, but invalid payload
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION='Basic 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/==='
)
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION='Basic '+base64.b64encode(b'notlegit').decode('utf-8')
)
self.assertEqual(response.status_code, 401)
@mock.patch('adal.AuthenticationContext.acquire_token_with_username_password')
def test_auth_ip_with_username(self, mock_api_call):
mock_api_call.return_value = {
'userId': self.email
}
# perform call to auth_ip with full creds
response = self.client.get(self.auth_ip_url,
HTTP_AUTHORIZATION=self.basic_auth(self.username, self.password)
)
self.assertEqual(response.status_code, 200)
self.assertIn('email', response.json())
self.assertEqual(response.json()['email'], self.email)
def test_auth_ip_without_creds(self):
response = self.client.get(self.auth_ip_url)
self.assertEqual(response.status_code, 200)
@mock.patch('adal.AuthenticationContext.acquire_token_with_username_password')
def test_auth_ip_with_session(self, mock_api_call):
mock_api_call.return_value = {
'userId': self.email
}
# perform call to auth with full creds
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION=self.basic_auth(self.username, self.password)
)
self.assertEqual(response.status_code, 200)
self.assertIn('email', response.json())
self.assertEqual(response.json()['email'], self.email)
# perform call to auth_ip
response = self.client.get(self.auth_ip_url)
self.assertEqual(response.status_code, 200)
self.assertIn('email', response.json())
self.assertEqual(response.json()['email'], self.email)
@mock.patch('adal.AuthenticationContext.acquire_token_with_username_password')
def test_auth_dual(self, mock_api_call):
mock_api_call.return_value = {
'userId': self.email
}
# perform call to auth with full creds
response = self.client.get(self.auth_url,
HTTP_AUTHORIZATION=self.basic_auth(self.username, self.password)
)
self.assertEqual(response.status_code, 200)
self.assertIn('email', response.json())
self.assertEqual(response.json()['email'], self.email)
# perform call to auth_dual
response = self.client.get(self.auth_dual_url)
self.assertEqual(response.status_code, 200)
self.assertIn('email', response.json())
self.assertEqual(response.json()['email'], self.email)
def test_auth_dual_without_creds(self):
response = self.client.get(self.auth_dual_url)
self.assertEqual(response.status_code, 200)
|
python
|
from tempfile import NamedTemporaryFile
from ..cache import get_cache_key, get_hexdigest, get_hashed_mtime
from ..utils import compile_less
from ..settings import LESS_EXECUTABLE, LESS_USE_CACHE,\
LESS_CACHE_TIMEOUT, LESS_ROOT, LESS_OUTPUT_DIR, LESS_DEVMODE,\
LESS_DEVMODE_WATCH_DIRS
from django.conf import settings
from django.contrib.staticfiles import finders
from django.core.cache import cache
from django.template.base import Library, Node, TemplateSyntaxError
import logging
import subprocess
import os
import sys
STATIC_ROOT = getattr(settings, "STATIC_ROOT", getattr(settings, "MEDIA_ROOT"))
logger = logging.getLogger("less")
register = Library()
class InlineLessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def compile(self, source):
source_file = NamedTemporaryFile(delete=False)
source_file.write(source)
source_file.close()
args = [LESS_EXECUTABLE, source_file.name]
popen_kwargs = dict(
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if os.name == "nt":
popen_kwargs["shell"] = True
p = subprocess.Popen(args, **popen_kwargs)
out, errors = p.communicate()
os.remove(source_file.name)
if out:
return out.decode(settings.FILE_CHARSET)
elif errors:
return errors.decode(settings.FILE_CHARSET)
return u""
def render(self, context):
output = self.nodelist.render(context)
if LESS_USE_CACHE:
cache_key = get_cache_key(get_hexdigest(output))
cached = cache.get(cache_key, None)
if cached is not None:
return cached
output = self.compile(output)
cache.set(cache_key, output, LESS_CACHE_TIMEOUT)
return output
else:
return self.compile(output)
@register.tag(name="inlineless")
def do_inlineless(parser, token):
nodelist = parser.parse(("endinlineless",))
parser.delete_first_token()
return InlineLessNode(nodelist)
def less_paths(path):
full_path = os.path.join(STATIC_ROOT, path)
if settings.DEBUG and not os.path.exists(full_path):
# while developing it is more confortable
# searching for the less files rather then
# doing collectstatics all the time
full_path = finders.find(path)
if full_path is None:
raise TemplateSyntaxError("Can't find staticfile named: {}".format(path))
file_name = os.path.split(path)[-1]
output_dir = os.path.join(LESS_ROOT, LESS_OUTPUT_DIR, os.path.dirname(path))
return full_path, file_name, output_dir
@register.simple_tag
def less(path):
logger.info("processing file %s" % path)
full_path, file_name, output_dir = less_paths(path)
base_file_name = os.path.splitext(file_name)[0]
if LESS_DEVMODE and any(map(lambda watched_dir: full_path.startswith(watched_dir), LESS_DEVMODE_WATCH_DIRS)):
return os.path.join(os.path.dirname(path), "%s.css" % base_file_name)
hashed_mtime = get_hashed_mtime(full_path)
output_file = "%s-%s.css" % (base_file_name, hashed_mtime)
output_path = os.path.join(output_dir, output_file)
encoded_full_path = full_path
if isinstance(full_path, unicode):
filesystem_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
encoded_full_path = full_path.encode(filesystem_encoding)
if not os.path.exists(output_path):
compile_less(encoded_full_path, output_path, path)
# Remove old files
compiled_filename = os.path.split(output_path)[-1]
for filename in os.listdir(output_dir):
if filename.startswith(base_file_name) and filename != compiled_filename:
os.remove(os.path.join(output_dir, filename))
return os.path.join(LESS_OUTPUT_DIR, os.path.dirname(path), output_file)
|
python
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="deepof",
version="0.1.4",
author="Lucas Miranda",
author_email="[email protected]",
description="deepof (Deep Open Field): Open Field animal pose classification tool ",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://gitlab.mpcdf.mpg.de/lucasmir/deepof/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
],
python_requires=">3.7",
platform="Platform independent",
License="MIT",
include_package_data=True,
install_requires=[
"tensorflow>=2.4",
"numpy",
"pandas",
"joblib",
"matplotlib",
"networkx",
"opencv-python",
"regex",
"scikit-learn",
"scipy",
"seaborn",
"sklearn",
"tables",
"tensorflow-probability",
"tqdm",
"umap-learn",
],
)
|
python
|
import pygame
pygame.font.init()
class Font():
title = pygame.font.SysFont('Arial', 60)
place = pygame.font.SysFont('Arial', 32)
normal_text_large = pygame.font.SysFont('Arial', 32)
normal_text_small = pygame.font.SysFont('Arial', 24)
card_energy = pygame.font.SysFont('Arial', 28)
card_name = pygame.font.SysFont('Arial', 16)
card_description = pygame.font.SysFont('Arial', 14)
card_stat = pygame.font.SysFont('Arial', 24)
class Color():
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
yellow = (255, 255, 0)
white = (255, 255, 255)
|
python
|
#!/usr/bin/env python
#
# file: Connection.py
# author: Cyrus Harrison <[email protected]>
# created: 6/1/2010
# purpose:
# Provides a 'Connection' class that interacts with a redmine instance to
# extract results from redmine queries.
#
import urllib2,urllib,csv,getpass,warnings
from collections import namedtuple
from Issue import *
try:
import pyPdf
except:
print "Warning: pyrmine requires the 'pyPdf' ",
print "module for full pdf functionality."
class Connection(object):
def __init__(self,base_url):
"""
Creates a redmine connection object to redmine instance at the given
url.
"""
self.urls = {}
if base_url[-1] == "/":
base_url = base_url[:-1]
self.urls["base"] = base_url
self.urls["login"] = "%s/login/" % base_url
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
def login(self,uname=None,passwd=None):
"""
Login handshake.
If username & passwd are not given this function asks for them via
stdout/stdin.
"""
if uname is None:
uname = raw_input("username:")
if passwd is None:
passwd = getpass.getpass("password:")
f = self.opener.open(self.urls["login"])
data = f.read()
f.close()
split_key = '<input name="authenticity_token" type="hidden" value="'
data = data.split(split_key)[1]
atok= data.split('" />')[0]
params = dict(username=uname,
password=passwd,
authenticity_token=atok)
params = urllib.urlencode(params)
f = self.opener.open(self.urls["login"], params)
data = f.readlines()
f.close()
def open_base_url(self,url):
"""
Constructs and opens an url relative to the base of this connection.
"""
url = "%s/%s" % (self.urls["base"],url)
return self.opener.open(url)
def open_project_url(self,project,url):
"""
Constructs and opens a project url relative to the base of this
connection.
"""
url = "%s/projects/%s/%s" % (self.urls["base"] ,project)
return self.opener.open(url)
def fetch_issues(self,project,query_id=-1,iclass=Issue):
"""
Executes a query and returns a set of Issues holding
the results.
You can specify which class is used to wrap returned issues via 'iclass'.
"""
issues_url = "%s/projects/%s/issues.csv" % (self.urls["base"] ,project)
if int(query_id) >= 0:
params = {}
params['query_id'] = str(query_id)
issues_url += "?" + urllib.urlencode(params)
print "[executing query: %s]" % issues_url
f = self.opener.open(issues_url)
csv_reader = csv.reader(f)
issues = [ row for row in csv_reader]
fields = [self.__format_field_name(val) for val in issues[0]]
issues = issues[1:]
print "[query returned %d issues]" % len(issues)
IssueTuple = namedtuple("Issue",fields)
issues = [iclass(IssueTuple(*i),self) for i in issues]
return fields,issues
def save_query_pdf(self,project,query_id,output_file):
"""
Collects pdfs of all issues returned by a query and combines them into
a single output pdf.
"""
fields,issues = self.fetch_issues(project,query_id)
nissues = len(issues)
if nissues == 0:
print "[query returned no issues -",
print " skipping creation of '%s']" % output_file
return
# try to ingore some deprecation warnings from pyPdf
with warnings.catch_warnings():
warnings.simplefilter("ignore")
opdf = pyPdf.PdfFileWriter()
for i in issues:
print "[downloading issue %s]" % i.id
idata = i.fetch_pdf_buffer()
ipdf = pyPdf.PdfFileReader(idata)
for p in range(ipdf.numPages):
opdf.addPage(ipdf.getPage(p))
print "[creating %s]" % output_file
opdf.write(file(output_file,"wb"))
def __format_field_name(self,name):
"""
Helper that makes sure field names comply w/ rules required for
creating a 'namedtuple' object.
"""
name = name.lower().replace(" ","_")
if name == "#":
name = "id"
name = name.replace("%","percent")
return name
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 2 17:33:00 2017
@author: spauliuk
"""
"""
File ODYM_Functions
Check https://github.com/IndEcol/ODYM for latest version.
Contains class definitions for ODYM
standard abbreviation: msf (material-system-functions)
dependencies:
numpy >= 1.9
scipy >= 0.14
Repository for this class, documentation, and tutorials: https://github.com/IndEcol/ODYM
"""
import os
import logging
import numpy as np
#import pandas as pd
import xlrd
import openpyxl
import pypandoc
import ODYM_Classes as msc
####################################
# Define functions #
####################################
def __version__(): # return version of this file
return str('1.0')
def function_logger(log_filename, log_pathname, file_level=logging.DEBUG, console_level=logging.WARNING):
"""
This is the logging routine of the model. It returns alogger that can be used by other functions to write to the
log(file).
:param file_level: Verbosity level for the logger's output file. This can be log.WARNING (default),
log.INFO, log.DEBUG
:param log_filename: The filename for the logfile.
:param log_pathname: The pathname for the logfile.
:param console_level: Verbosity level for the logger's output file.
out
:param logfile_type: Type of file to write. Markdown syntax is the default.
TODO: If other outputs types are desired, they can be converted via pandoc.
:return: A logger that can be used by other files to write to the log(file)
"""
log_file = os.path.join(log_pathname, log_filename)
# logging.basicConfig(format='%(levelname)s (%(filename)s <%(funcName)s>): %(message)s',
# filename=log_file,
# level=logging.INFO)
logger = logging.getLogger()
logger.handlers = [] # required if you don't want to exit the shell
logger.setLevel(file_level)
# The logger for console output
console_log = logging.StreamHandler() #StreamHandler logs to console
console_log.setLevel(console_level)
# console_log_format = logging.Formatter('%(message)s')
console_log_format = logging.Formatter('%(levelname)s (%(filename)s <%(funcName)s>): %(message)s')
console_log.setFormatter(console_log_format)
logger.addHandler(console_log)
# The logger for log file output
file_log = logging.FileHandler(log_file, mode='w', encoding=None, delay=False)
file_log.setLevel(file_level)
file_log_format = logging.Formatter('%(message)s\n')
file_log.setFormatter(file_log_format)
logger.addHandler(file_log)
return logger, console_log, file_log
def ensure_dir(f): # Checks whether a given directory f exists, and creates it if not
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def sort_index(mylist,direction): # returns index that sorts a list, either ascending or descending
if direction == 'ascending':
return sorted(range(len(mylist)), key=lambda k: mylist[k])
elif direction == 'descending':
return sorted(range(len(mylist)), key=lambda k: mylist[k], reverse=True)
else:
return None
def GroupingDict2Array(GroupingDict, ElementList):
'''
Tbd.
'''
NoOfItems = len(GroupingDict.keys())
GroupingList = []
for m in GroupingDict.keys():
GroupingList.append(m)
ElementContentArray = np.zeros((100,NoOfItems))
PosCount = 0
for m in GroupingList:
for n in GroupingDict[m].keys():
ElInd = ElementList.index(n)
ElementContentArray[ElInd,PosCount] = GroupingDict[m][n]
PosCount += 1
return GroupingList, ElementContentArray
def ListStringToListNumbers(ListStr):
"""
Extracts numbers from a string that looks like a list commant in python, and returns them as proper list
Examples: ListStringToListNumbers('[1,2,3]') yields [1,2,3]
"""
return [int(s) for s in ListStr[ListStr.find('['):ListStr.find(']')+1].replace('[',',').replace(']',',').split(',') if s.isdigit()]
def EvalItemSelectString(ItemSelectStr,IndexLength):
'''
Extract index item selection lists from ODYM datafile information
'''
if ItemSelectStr == 'All' or ItemSelectStr == 'ALL' or ItemSelectStr == 'all':
Res = 'all' # Selects all from list
elif ItemSelectStr.find('except') > -1: # type 'All except', return full list [0,1,2,5,6,7]
Res = np.arange(0,IndexLength)
b = ItemSelectStr[ItemSelectStr.find('['):ItemSelectStr.find(']')+1].replace('[',',').replace(']',',')
RemoveList = [int(s) for s in b.split(',') if s.isdigit()]
Res = np.delete(Res,RemoveList)
Res = Res.tolist()
elif ItemSelectStr.find(']') > -1: # type '[...]', return full list
Res = ItemSelectStr[ItemSelectStr.find('[')::]
elif ItemSelectStr.find(')') > -1: # type '[..:..)', return range a:b
Res = ItemSelectStr[ItemSelectStr.find('[')+1:-1]
else:
Res = 'ItemSelectString could not be detected.'
return Res
def MI_Tuple(value, Is):
"""
Define function for obtaining multiindex tuple from index value
value: flattened index position, Is: Number of values for each index dimension
Example: MI_Tuple(10, [3,4,2,6]) returns [0,0,1,4]
MI_Tuple is the inverse of Tuple_MI.
"""
IsValuesRev = []
CurrentValue = value
for m in range(0,len(Is)):
IsValuesRev.append(CurrentValue % Is[len(Is)-m-1])
CurrentValue = CurrentValue // Is[len(Is)-m-1]
return IsValuesRev[::-1]
def Tuple_MI(Tuple, IdxLength):
"""
Function to return the absolution position of a multiindex when the index tuple
and the index hierarchy and size are given.
Example: Tuple_MI([2,7,3],[100,10,5]) = 138
Tuple_MI is the inverse of MI_Tuple.
"""
# First, generate the index position offset values
A = IdxLength[1:] + IdxLength[:1] # Shift 1 to left
A[-1] = 1 # Replace lowest index by 1
A.reverse()
IdxPosOffset = np.cumproduct(A).tolist()
IdxPosOffset.reverse()
Position = np.sum([a*b for a,b in zip(Tuple,IdxPosOffset)])
return Position
def TableWithFlowsToShares(Table,axis):
"""
Given a 2D-table with flow values that sum up to a total,
either along the columns (= across rows, axis =0) or along the rows (=across the columns, axis =1).
The function then converts the flows into shares (between 0 and 1), that each element has in the column sum (axis =0)
or the row sum (axis =1).
Only makes sense if all table entries have the same sign, that is not checked by the function.
"""
Shares = np.zeros(Table.shape)
if axis == 0: # shares along columns
colsum = Table.sum(axis=0)
Divisor = np.einsum('b,a->ab',colsum,np.ones(Table.shape[0]))
if axis == 1: # shares along rows
rowsum = Table.sum(axis=1)
Divisor = np.einsum('a,b->ab',rowsum,np.ones(Table.shape[1]))
Divided = np.divide(1, Divisor, out=np.zeros_like(Divisor), where=Divisor!=0)
Shares = Table * Divided
return Shares
def DetermineElementComposition_All_Oth(me):
"""
Given an array of flows of materials (rows) broken down into chem. elements (columns),
where the first element is "all" and the last element is "other",
the function determines the share of each element in the material, and fills nonexistent rows with a 1 for all and other, resp.
"""
result = np.zeros(me.shape)
Shares = TableWithFlowsToShares(me[:,1::],1)
SharesSum = Shares.sum(axis=1)
result[:,0] = 1
result[:,1::] = Shares.copy()
for m in range(0,me.shape[0]):
if SharesSum[m] == 0:
result[m,-1] = 1
return result
def ModelIndexPositions_FromData(Positions,RowPos,ColPos):
"""
This function is needed to read data files into ODYM. It takes the positions of a given data point
in the parameter file and checks where in the model index structure this data points belongs,
if it is needed at all.
"""
TargetPosition = []
for m in range(0,len(Positions)):
if m < len(RowPos):
try:
TargetPosition.append(Positions[m].index(RowPos[m]))
except:
break
else:
try:
TargetPosition.append(Positions[m].index(ColPos[m-len(RowPos)]))
except:
break
return TargetPosition
def ParseModelControl(Model_Configsheet,ScriptConfig):
""" Parse the RECC and ODYM model control parameters from the ODYM config sheet. """
SCix = 1
# search for script config list entry
while Model_Configsheet.cell(SCix, 2).value != 'General Info':
SCix += 1
SCix += 2 # start on first data row
while Model_Configsheet.cell(SCix, 4).value != None:
ScriptConfig[Model_Configsheet.cell(SCix, 3).value] = Model_Configsheet.cell(SCix,4).value
SCix += 1
SCix = 1
# search for script config list entry
while Model_Configsheet.cell(SCix, 2).value != 'Software version selection':
SCix += 1
SCix += 2 # start on first data row
while Model_Configsheet.cell(SCix, 4).value != None:
ScriptConfig[Model_Configsheet.cell(SCix, 3).value] = Model_Configsheet.cell(SCix,4).value
SCix += 1
return ScriptConfig
def ParseClassificationFile_Main(Classsheet,Mylog):
""" Parse the ODYM classification file, format version
"""
ci = 2 # column index to start with
MasterClassification = {} # Dict of master classifications
while Classsheet.cell(1,ci).value != None:
TheseItems = []
ri = 11 # row index to start with
ThisName = Classsheet.cell(1,ci).value
ThisDim = Classsheet.cell(2,ci).value
ThisID = Classsheet.cell(4,ci).value
ThisUUID = Classsheet.cell(5,ci).value
while Classsheet.cell(ri,ci).value != None:
TheseItems.append(Classsheet.cell(ri,ci).value) # read the classification items
ri += 1
MasterClassification[ThisName] = msc.Classification(Name = ThisName, Dimension = ThisDim, ID = ThisID, UUID = ThisUUID, Items = TheseItems)
ci += 1
return MasterClassification
def ParseConfigFile(Model_Configsheet,ScriptConfig,Mylog):
"""
Standard routine to parse the ODYM model config file.
"""
ITix = 0
# search for index table entry
while True:
if Model_Configsheet.cell(ITix+1, 2).value == 'Index Table':
break
else:
ITix += 1
IT_Aspects = []
IT_Description = []
IT_Dimension = []
IT_Classification = []
IT_Selector = []
IT_IndexLetter = []
ITix += 2 # start on first data row
while Model_Configsheet.cell(ITix+1,3).value != None:
IT_Aspects.append(Model_Configsheet.cell(ITix+1,3).value)
IT_Description.append(Model_Configsheet.cell(ITix+1,4).value)
IT_Dimension.append(Model_Configsheet.cell(ITix+1,5).value)
IT_Classification.append(Model_Configsheet.cell(ITix+1,6).value)
IT_Selector.append(Model_Configsheet.cell(ITix+1,7).value)
IT_IndexLetter.append(Model_Configsheet.cell(ITix+1,8).value)
ITix += 1
Mylog.info('Read parameter list from model config sheet.')
PLix = 0
while True: # search for parameter list entry
if Model_Configsheet.cell(PLix+1, 2).value == 'Model Parameters':
break
else:
PLix += 1
PL_Names = []
PL_Description = []
PL_Version = []
PL_IndexStructure = []
PL_IndexMatch = []
PL_IndexLayer = []
PLix += 2 # start on first data row
while Model_Configsheet.cell(PLix+1,3).value != None:
PL_Names.append(Model_Configsheet.cell(PLix+1,3).value)
PL_Description.append(Model_Configsheet.cell(PLix+1,4).value)
PL_Version.append(Model_Configsheet.cell(PLix+1,5).value)
PL_IndexStructure.append(Model_Configsheet.cell(PLix+1,6).value)
PL_IndexMatch.append(Model_Configsheet.cell(PLix+1,7).value)
PL_IndexLayer.append(ListStringToListNumbers(Model_Configsheet.cell(PLix+1,8).value)) # strip numbers out of list string
PLix += 1
Mylog.info('Read process list from model config sheet.')
PrLix = 1
# search for process list entry
while Model_Configsheet.cell(PrLix, 2).value != 'Process Group List':
PrLix += 1
PrL_Number = []
PrL_Name = []
PrL_Comment = []
PrL_Type = []
PrLix += 2 # start on first data row
while True:
if Model_Configsheet.cell(PrLix,3).value is None:
break
PrL_Number.append(int(Model_Configsheet.cell(PrLix,3).value))
PrL_Name.append(Model_Configsheet.cell(PrLix,4).value)
PrL_Type.append(Model_Configsheet.cell(PrLix,5).value)
PrL_Comment.append(Model_Configsheet.cell(PrLix,6).value)
PrLix += 1
# while Model_Configsheet.cell(PrLix,3).value != None:
# print(Model_Configsheet.cell(PrLix,3).value)
# PrL_Number.append(int(Model_Configsheet.cell(PrLix,3).value))
# PrL_Name.append(Model_Configsheet.cell(PrLix,4).value)
# PrL_Type.append(Model_Configsheet.cell(PrLix,5).value)
# PrL_Comment.append(Model_Configsheet.cell(PrLix,6).value)
# PrLix += 1
Mylog.info('Read model run control from model config sheet.')
PrLix = 0
# search for model flow control entry
while True:
if Model_Configsheet.cell(PrLix+1, 2).value == 'Model flow control':
break
else:
PrLix += 1
# start on first data row
PrLix += 2
while True:
if Model_Configsheet.cell(PrLix+1, 3).value != None:
try:
ScriptConfig[Model_Configsheet.cell(PrLix+1, 3).value] = Model_Configsheet.cell(PrLix+1,4).value
except:
None
PrLix += 1
else:
break
Mylog.info('Read model output control from model config sheet.')
PrLix = 0
# search for model flow control entry
while True:
if Model_Configsheet.cell(PrLix+1, 2).value == 'Model output control':
break
else:
PrLix += 1
# start on first data row
PrLix += 2
while True:
if Model_Configsheet.cell(PrLix+1, 3).value != None:
try:
ScriptConfig[Model_Configsheet.cell(PrLix+1, 3).value] = Model_Configsheet.cell(PrLix+1,4).value
except:
None
PrLix += 1
else:
break
return IT_Aspects,IT_Description,IT_Dimension,IT_Classification,IT_Selector,IT_IndexLetter,PL_Names,PL_Description,PL_Version,PL_IndexStructure,PL_IndexMatch,PL_IndexLayer,PrL_Number,PrL_Name,PrL_Comment,PrL_Type,ScriptConfig
def ReadParameter(ParPath, ThisPar, ThisParIx, IndexMatch, ThisParLayerSel, MasterClassification,
IndexTable, IndexTable_ClassificationNames, ScriptConfig, Mylog):
"""
This function reads a model parameter from the corresponding parameter file
"""
Parfile = xlrd.open_workbook(ParPath + '.xlsx')
ParHeader = Parfile.sheet_by_name('Cover')
IM = eval(IndexMatch) # List that matches model aspects to parameter indices
ri = 1 # row index
MetaData = {}
while True: # read cover sheet info
ThisItem = ParHeader.cell_value(ri,0)
if ThisItem != 'Dataset_RecordType':
MetaData[ThisItem] = ParHeader.cell_value(ri,1)
ri += 1
else:
break # terminate while loop when all meta information is read.
# Now we are in the row of Dataset_RecordType
# Check whether parameter file uses same classification:
if 'ODYM_Classifications_Master_' + \
ScriptConfig['Version of master classification'] != MetaData['Dataset_Classification_version_number']:
Mylog.critical('CLASSIFICATION FILE FATAL ERROR: Classification file of parameter ' + ThisPar +
' is not identical to the classification master file used for the current model run.')
if ParHeader.cell_value(ri,1) == 'List':
IList = []
IListMeaning = []
ci = 1 # column index
while True:
if ParHeader.cell_value(ri +1,ci) != '':
IList.append(ParHeader.cell_value(ri +1,ci))
IListMeaning.append(ParHeader.cell_value(ri +2,ci))
ci += 1
else:
break
# Re-Order indices to fit model aspect order:
IList = [IList[i] for i in IM]
IListMeaning = [IListMeaning[i] for i in IM]
ValueList = []
VIComment = []
ci = 1 # column index
while True:
if ParHeader.cell_value(ri +4,ci) != '':
ValueList.append(ParHeader.cell_value(ri +3,ci))
VIComment.append(ParHeader.cell_value(ri +4,ci))
ci += 1
else:
break
# Check whether all indices are present in the index table of the model
if set(IList).issubset(set(IndexTable_ClassificationNames)) is False:
Mylog.error('CLASSIFICATION ERROR: Index list of data file for parameter ' + ThisPar +
' contains indices that are not part of the current model run.')
# Check how well items match between model and data, select items to import
IndexSizesM = [] # List of dimension size for model
for m in range(0,len(ThisParIx)):
ThisDim = ThisParIx[m]
# Check whether index is present in parameter file:
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
if ThisDimClassificationName != IList[m]:
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
ThisDim + ' of parameter ' + ThisPar +
' must be identical to the specified classification of the corresponding parameter dimension, which is ' + IList[m])
break # Stop parsing parameter, will cause model to halt
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
# Read parameter values into array:
Values = np.zeros((IndexSizesM))
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded
ValuesSheet = Parfile.sheet_by_name('Values_Master')
ColOffset = len(IList)
RowOffset = 1 # fixed for this format, different quantification layers (value, error, etc.) will be read later
cx = 0
while True:
try:
CV = ValuesSheet.cell_value(cx + RowOffset, ColOffset)
except:
break
TargetPosition = []
for mx in range(0,len(IList)): # mx iterates over the aspects of the parameter
CurrentItem = ValuesSheet.cell_value(cx + RowOffset, IM[mx])
try:
TargetPosition.append(IndexTable.set_index('IndexLetter').loc[ThisParIx[mx]].Classification.Items.index(CurrentItem))
except:
break # Current parameter value is not needed for model, outside scope for a certain aspect.
if len(TargetPosition) == len(ThisParIx):
Values[tuple(TargetPosition)] = CV
ValIns[tuple(TargetPosition)] = 1
cx += 1
Mylog.info('A total of ' + str(cx+1) + ' values was read from file for parameter ' + ThisPar + '.')
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar + ' were assigned.')
### Table version ###
if ParHeader.cell_value(ri,1) == 'Table': # have 3 while loops, one for row indices, one for column indices, one for value layers
RIList = []
RISize = []
RIListMeaning = []
ci = 1 # column index
while True:
if ParHeader.cell_value(ri +1,ci) != '':
RIList.append(ParHeader.cell_value(ri +1,ci))
RISize.append(int(ParHeader.cell_value(ri +2,1)))
RIListMeaning.append(ParHeader.cell_value(ri +3,ci))
ci += 1
else:
break
RISize = RISize[0]
CIList = []
CISize = []
CIListMeaning = []
ci = 1 # column index
while True:
if ParHeader.cell_value(ri +4,ci) != '':
CIList.append(ParHeader.cell_value(ri +4,ci))
CISize.append(int(ParHeader.cell_value(ri +5,1)))
CIListMeaning.append(ParHeader.cell_value(ri +6,ci))
ci += 1
else:
break
CISize = CISize[0]
# Re-Order indices to fit model aspect order:
ComIList = RIList + CIList
ComIList = [ComIList[i] for i in IM]
ValueList = []
VIComment = []
ci = 1 # column index
while True:
if ParHeader.cell_value(ri +7,ci) != '':
ValueList.append(ParHeader.cell_value(ri +7,ci))
VIComment.append(ParHeader.cell_value(ri +8,ci))
ci += 1
else:
break
# Check whether all indices are present in the index table of the model
if set(RIList).issubset(set(IndexTable_ClassificationNames)) is False:
Mylog.error('CLASSIFICATION ERROR: Row index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
if set(CIList).issubset(set(IndexTable_ClassificationNames)) is False:
Mylog.error('CLASSIFICATION ERROR: Column index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
# Determine index letters for RIList and CIList
RIIndexLetter = []
for m in range(0,len(RIList)):
RIIndexLetter.append(ThisParIx[IM.index(m)])
CIIndexLetter = []
for m in range(0,len(CIList)):
CIIndexLetter.append(ThisParIx[IM.index(m+len(RIList))])
# Check how well items match between model and data, select items to import
IndexSizesM = [] # List of dimension size for model
for m in range(0,len(ThisParIx)):
ThisDim = ThisParIx[m]
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
if ThisDimClassificationName != ComIList[m]:
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
ThisDim + ' of parameter ' + ThisPar +
' must be identical to the specified classification of the corresponding parameter dimension, which is ' +
ComIList[m])
break # Stop parsing parameter, will cause model to halt
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
# Read parameter values into array:
Values = np.zeros((IndexSizesM))
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded
ValuesSheet = Parfile.sheet_by_name(ValueList[ThisParLayerSel[0]])
ColOffset = len(RIList)
RowOffset = len(CIList)
RowNos = RISize
ColNos = CISize
TargetPos_R = []
for m in range(0,RowNos):
TP_RD = []
for mc in range(0,len(RIList)):
try:
CurrentItem = int(ValuesSheet.cell_value(m + RowOffset, mc))
except:
CurrentItem = ValuesSheet.cell_value(m + RowOffset, mc)
try:
IX = ThisParIx.find(RIIndexLetter[mc])
TPIX = IndexTable.set_index('IndexLetter').loc[RIIndexLetter[mc]].Classification.Items.index(CurrentItem)
TP_RD.append((IX,TPIX))
except:
TP_RD.append(None)
break
TargetPos_R.append(TP_RD)
TargetPos_C = []
for n in range(0,ColNos):
TP_CD = []
for mc in range(0,len(CIList)):
try:
CurrentItem = int(ValuesSheet.cell_value(mc, n + ColOffset))
except:
CurrentItem = ValuesSheet.cell_value(mc, n + ColOffset)
try:
IX = ThisParIx.find(CIIndexLetter[mc])
TPIX = IndexTable.set_index('IndexLetter').loc[CIIndexLetter[mc]].Classification.Items.index(CurrentItem)
TP_CD.append((IX,TPIX))
except:
TP_CD.append(None)
break
TargetPos_C.append(TP_CD)
for m in range(0,RowNos):
for n in range(0,ColNos):
TargetPosition = [0 for i in range(0,len(ComIList))]
try:
for i in range(0,len(RIList)):
TargetPosition[TargetPos_R[m][i][0]] = TargetPos_R[m][i][1]
for i in range(0,len(CIList)):
TargetPosition[TargetPos_C[n][i][0]] = TargetPos_C[n][i][1]
except:
TargetPosition = [0]
if len(TargetPosition) == len(ComIList):
Values[tuple(TargetPosition)] = ValuesSheet.cell_value(m + RowOffset, n + ColOffset)
ValIns[tuple(TargetPosition)] = 1
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar +
' were assigned.')
return MetaData, Values
def ReadParameterV2(ParPath, ThisPar, ThisParIx, IndexMatch, ThisParLayerSel, MasterClassification,
IndexTable, IndexTable_ClassificationNames, ScriptConfig, Mylog, ParseUncertainty):
"""
This function reads a model parameter from the corresponding parameter file
"""
Parfile = xlrd.open_workbook(ParPath + '.xlsx')
ParHeader = Parfile.sheet_by_name('Cover')
IM = eval(IndexMatch) # List that matches model aspects to parameter indices
ri = 1 # row index
MetaData = {}
while True: # read cover sheet info
ThisItem = ParHeader.cell_value(ri,0)
if (ThisItem != '[Empty on purpose]' and ThisItem != 'Dataset_RecordType'):
MetaData[ThisItem] = ParHeader.cell_value(ri,1)
if ThisItem == 'Dataset_Unit':
if ParHeader.cell_value(ri,1) == 'GLOBAL':
MetaData['Unit_Global'] = ParHeader.cell_value(ri,2)
MetaData['Unit_Global_Comment'] = ParHeader.cell_value(ri,3)
if ThisItem == 'Dataset_Uncertainty':
# if LIST is specified, nothing happens here.
if ParHeader.cell_value(ri,1) == 'GLOBAL':
MetaData['Dataset_Uncertainty_Global'] = ParHeader.cell_value(ri,2)
if ParHeader.cell_value(ri,1) == 'TABLE':
MetaData['Dataset_Uncertainty_Sheet'] = ParHeader.cell_value(ri,2)
if ThisItem == 'Dataset_Comment':
if ParHeader.cell_value(ri,1) == 'GLOBAL':
MetaData['Dataset_Comment_Global'] = ParHeader.cell_value(ri,2)
ri += 1
else:
break # terminate while loop when all meta information is read.
# Now we are in the row of Dataset_RecordType
# Check whether parameter file uses same classification:
if ScriptConfig['Version of master classification'] != MetaData['Dataset_Classification_version_number']:
Mylog.critical('CLASSIFICATION FILE FATAL ERROR: Classification file of parameter ' + ThisPar +
' is not identical to the classification master file used for the current model run.')
# Continue parsing until line 'Dataset_RecordType' is found:
while True:
ThisItem = ParHeader.cell_value(ri,0)
if ThisItem == 'Dataset_RecordType':
break
else:
ri += 1
### List version ###
if ParHeader.cell_value(ri,1) == 'LIST':
IList = []
IListMeaning = []
RI_Start = ri + 2
while True:
if ParHeader.cell_value(RI_Start,0) != '':
IList.append(ParHeader.cell_value(RI_Start,0))
IListMeaning.append(ParHeader.cell_value(RI_Start,1))
RI_Start += 1
else:
break
# Re-Order indices to fit model aspect order:
IList = [IList[i] for i in IM]
IListMeaning = [IListMeaning[i] for i in IM]
ValueList = []
VIComment = []
RI_Start = ri + 2
while True:
if ParHeader.cell_value(RI_Start,2) != '':
ValueList.append(ParHeader.cell_value(RI_Start,2))
VIComment.append(ParHeader.cell_value(RI_Start,3))
RI_Start += 1
else:
break
# Check whether all indices are present in the index table of the model
if set(IList).issubset(set(IndexTable_ClassificationNames)) is False:
Mylog.error('CLASSIFICATION ERROR: Index list of data file for parameter ' + ThisPar +
' contains indices that are not part of the current model run.')
# Check how well items match between model and data, select items to import
IndexSizesM = [] # List of dimension size for model
for m in range(0,len(ThisParIx)):
ThisDim = ThisParIx[m]
# Check whether index is present in parameter file:
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
if ThisDimClassificationName != IList[m]:
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
ThisDim + ' of parameter ' + ThisPar +
' must be identical to the specified classification of the corresponding parameter dimension, which is ' + IList[m])
break # Stop parsing parameter, will cause model to halt
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
# Read parameter values into array, uncertainty into list:
Values = np.zeros((IndexSizesM)) # Array for parameter values
Uncertainty = [None] * np.product(IndexSizesM) # parameter value uncertainties
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded
ValuesSheet = Parfile.sheet_by_name('Values_Master')
ColOffset = len(IList)
RowOffset = 1 # fixed for this format, different quantification layers (value, error, etc.) will be read later
cx = 0
while True:
try:
CV = ValuesSheet.cell_value(cx + RowOffset, ColOffset)
except:
break
TargetPosition = []
for mx in range(0,len(IList)): # mx iterates over the aspects of the parameter
CurrentItem = ValuesSheet.cell_value(cx + RowOffset, IM[mx])
try:
TargetPosition.append(IndexTable.set_index('IndexLetter').loc[ThisParIx[mx]].Classification.Items.index(CurrentItem))
except:
break # Current parameter value is not needed for model, outside scope for a certain aspect.
if len(TargetPosition) == len(ThisParIx):
Values[tuple(TargetPosition)] = CV
ValIns[tuple(TargetPosition)] = 1
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = ValuesSheet.cell_value(cx + RowOffset, ColOffset + 3)
cx += 1
Mylog.info('A total of ' + str(cx) + ' values was read from file for parameter ' + ThisPar + '.')
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar + ' were assigned.')
### Table version ###
if ParHeader.cell_value(ri,1) == 'TABLE': # have 3 while loops, one for row indices, one for column indices, one for value layers
ColNos = int(ParHeader.cell_value(ri,5)) # Number of columns in dataset
RowNos = int(ParHeader.cell_value(ri,3)) # Number of rows in dataset
RI = ri + 2 # row where indices start
RIList = []
RIListMeaning = []
while True:
if ParHeader.cell_value(RI,0) != '':
RIList.append(ParHeader.cell_value(RI,0))
RIListMeaning.append(ParHeader.cell_value(RI,1))
RI += 1
else:
break
RI = ri + 2 # row where indices start
CIList = []
CIListMeaning = []
while True:
if ParHeader.cell_value(RI,2) != '':
CIList.append(ParHeader.cell_value(RI,2))
CIListMeaning.append(ParHeader.cell_value(RI,3))
RI += 1
else:
break
# Re-Order indices to fit model aspect order:
ComIList = RIList + CIList # List of all indices, both rows and columns
ComIList = [ComIList[i] for i in IM]
RI = ri + 2 # row where indices start
ValueList = []
VIComment = []
while True:
if ParHeader.cell_value(RI,4) != '':
ValueList.append(ParHeader.cell_value(RI,4))
VIComment.append(ParHeader.cell_value(RI,5))
RI += 1
else:
break
# Check whether all indices are present in the index table of the model
if set(RIList).issubset(set(IndexTable_ClassificationNames)) is False:
Mylog.error('CLASSIFICATION ERROR: Row index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
if set(CIList).issubset(set(IndexTable_ClassificationNames)) is False:
Mylog.error('CLASSIFICATION ERROR: Column index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
# Determine index letters for RIList and CIList
RIIndexLetter = []
for m in range(0,len(RIList)):
RIIndexLetter.append(ThisParIx[IM.index(m)])
CIIndexLetter = []
for m in range(0,len(CIList)):
CIIndexLetter.append(ThisParIx[IM.index(m+len(RIList))])
# Check how well items match between model and data, select items to import
IndexSizesM = [] # List of dimension size for model
for m in range(0,len(ThisParIx)):
ThisDim = ThisParIx[m]
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
if ThisDimClassificationName != ComIList[m]:
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
ThisDim + ' of parameter ' + ThisPar +
' must be identical to the specified classification of the corresponding parameter dimension, which is ' +
ComIList[m])
break # Stop parsing parameter, will cause model to halt
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
# Read parameter values into array:
Values = np.zeros((IndexSizesM)) # Array for parameter values
Uncertainty = [None] * np.product(IndexSizesM) # parameter value uncertainties
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded, contains 0 or 1.
ValuesSheet = Parfile.sheet_by_name(ValueList[ThisParLayerSel[0]])
if ParseUncertainty == True:
if 'Dataset_Uncertainty_Sheet' in MetaData:
UncertSheet = Parfile.sheet_by_name(MetaData['Dataset_Uncertainty_Sheet'])
ColOffset = len(RIList)
RowOffset = len(CIList)
cx = 0
TargetPos_R = [] # Determine all row target positions in data array
for m in range(0,RowNos):
TP_RD = []
for mc in range(0,len(RIList)):
try:
CurrentItem = int(ValuesSheet.cell_value(m + RowOffset, mc)) # in case items come as int, e.g., years
except:
CurrentItem = ValuesSheet.cell_value(m + RowOffset, mc)
try:
IX = ThisParIx.find(RIIndexLetter[mc])
TPIX = IndexTable.set_index('IndexLetter').loc[RIIndexLetter[mc]].Classification.Items.index(CurrentItem)
TP_RD.append((IX,TPIX))
except:
TP_RD.append(None)
break
TargetPos_R.append(TP_RD)
TargetPos_C = [] # Determine all col target positions in data array
for n in range(0,ColNos):
TP_CD = []
for mc in range(0,len(CIList)):
try:
CurrentItem = int(ValuesSheet.cell_value(mc, n + ColOffset))
except:
CurrentItem = ValuesSheet.cell_value(mc, n + ColOffset)
try:
IX = ThisParIx.find(CIIndexLetter[mc])
TPIX = IndexTable.set_index('IndexLetter').loc[CIIndexLetter[mc]].Classification.Items.index(CurrentItem)
TP_CD.append((IX,TPIX))
except:
TP_CD.append(None)
break
TargetPos_C.append(TP_CD)
for m in range(0,RowNos): # Read values from excel template
for n in range(0,ColNos):
TargetPosition = [0 for i in range(0,len(ComIList))]
try:
for i in range(0,len(RIList)):
TargetPosition[TargetPos_R[m][i][0]] = TargetPos_R[m][i][1]
for i in range(0,len(CIList)):
TargetPosition[TargetPos_C[n][i][0]] = TargetPos_C[n][i][1]
except:
TargetPosition = [0]
if len(TargetPosition) == len(ComIList): # Read value if TargetPosition Tuple has same length as indexList
Values[tuple(TargetPosition)] = ValuesSheet.cell_value(m + RowOffset, n + ColOffset)
ValIns[tuple(TargetPosition)] = 1
# Add uncertainty
if ParseUncertainty == True:
if 'Dataset_Uncertainty_Global' in MetaData:
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = MetaData['Dataset_Uncertainty_Global']
if 'Dataset_Uncertainty_Sheet' in MetaData:
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = UncertSheet.cell_value(m + RowOffset, n + ColOffset)
cx += 1
Mylog.info('A total of ' + str(cx) + ' values was read from file for parameter ' + ThisPar + '.')
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar +
' were assigned.')
if ParseUncertainty == True:
return MetaData, Values, Uncertainty
else:
return MetaData, Values
def ReadParameterXLSX(ParPath, ThisPar, ThisParIx, IndexMatch, ThisParLayerSel, MasterClassification,
IndexTable, IndexTable_ClassificationNames, ScriptConfig, Mylog, ParseUncertainty):
"""
This function reads a model parameter from the corresponding parameter file and used openpyxl
"""
Parfile = openpyxl.load_workbook(ParPath + '.xlsx', data_only=True)
ParHeader = Parfile['Cover']
IM = eval(IndexMatch) # List that matches model aspects to parameter indices
ri = 2 # row index
MetaData = {}
while True: # read cover sheet info
ThisItem = ParHeader.cell(ri,1).value
if (ThisItem != '[Empty on purpose]' and ThisItem != 'Dataset_RecordType'):
MetaData[ThisItem] = ParHeader.cell(ri,2).value
if ThisItem == 'Dataset_Unit':
if ParHeader.cell(ri,2).value == 'GLOBAL':
MetaData['Unit_Global'] = ParHeader.cell(ri,3).value
MetaData['Unit_Global_Comment'] = ParHeader.cell(ri,4).value
if ThisItem == 'Dataset_Uncertainty':
# if LIST is specified, nothing happens here.
if ParHeader.cell(ri,2).value == 'GLOBAL':
MetaData['Dataset_Uncertainty_Global'] = ParHeader.cell(ri,3).value
if ParHeader.cell(ri,2).value == 'TABLE':
MetaData['Dataset_Uncertainty_Sheet'] = ParHeader.cell(ri,3).value
if ThisItem == 'Dataset_Comment':
if ParHeader.cell(ri,2).value == 'GLOBAL':
MetaData['Dataset_Comment_Global'] = ParHeader.cell(ri,3).value
ri += 1
else:
break # terminate while loop when all meta information is read.
# Now we are in the row of Dataset_RecordType
# Check whether parameter file uses same classification:
if ScriptConfig['Version of master classification'] != MetaData['Dataset_Classification_version_number']:
Mylog.critical('CLASSIFICATION FILE FATAL ERROR: Classification file of parameter ' + ThisPar +
' is not identical to the classification master file used for the current model run.')
# Continue parsing until line 'Dataset_RecordType' is found:
while True:
ThisItem = ParHeader.cell(ri,1).value
if ThisItem == 'Dataset_RecordType':
print(ParHeader.cell(ri,2).value)
break
else:
ri += 1
### List version ###
if ParHeader.cell(ri,2).value == 'LIST': # ri = 21
IList = []
IListMeaning = []
RI_Start = ri + 2
while ParHeader.cell(RI_Start,1).value != None:
IList.append(ParHeader.cell(RI_Start,1).value)
IListMeaning.append(ParHeader.cell(RI_Start,2).value)
RI_Start += 1
# Re-Order indices to fit model aspect order:
IList = [IList[i] for i in IM]
IListMeaning = [IListMeaning[i] for i in IM]
ValueList = []
VIComment = []
RI_Start = ri + 2
while ParHeader.cell(RI_Start,3).value != None:
ValueList.append(ParHeader.cell(RI_Start,3).value)
VIComment.append(ParHeader.cell(RI_Start,4).value)
RI_Start += 1
# Check whether all indices are present in the index table of the model
if set(IList).issubset(set(IndexTable_ClassificationNames)) is False:
Mylog.error('CLASSIFICATION ERROR: Index list of data file for parameter ' + ThisPar +
' contains indices that are not part of the current model run.')
# Check how well items match between model and data, select items to import
IndexSizesM = [] # List of dimension size for model
for m in range(0,len(ThisParIx)):
ThisDim = ThisParIx[m]
# Check whether index is present in parameter file:
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
if ThisDimClassificationName != IList[m]:
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
ThisDim + ' of parameter ' + ThisPar +
' must be identical to the specified classification of the corresponding parameter dimension, which is ' + IList[m])
break # Stop parsing parameter, will cause model to halt
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
# Read parameter values into array, uncertainty into list:
Values = np.zeros((IndexSizesM)) # Array for parameter values
Uncertainty = [None] * np.product(IndexSizesM) # parameter value uncertainties
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded
ValuesSheet = Parfile['Values_Master']
ColOffset = len(IList)
RowOffset = 1 # fixed for this format, different quantification layers (value, error, etc.) will be read later
cx = 0
while True:
if ValuesSheet.cell(cx + RowOffset+1, ColOffset+1).value != None:
CV = ValuesSheet.cell(cx + RowOffset+1, ColOffset+1).value
else:
break
TargetPosition = []
for mx in range(0,len(IList)): # mx iterates over the aspects of the parameter
CurrentItem = ValuesSheet.cell(cx + RowOffset+1, IM[mx]+1).value
try:
TargetPosition.append(IndexTable.set_index('IndexLetter').loc[ThisParIx[mx]].Classification.Items.index(CurrentItem))
except:
break # Current parameter value is not needed for model, outside scope for a certain aspect.
if len(TargetPosition) == len(ThisParIx):
Values[tuple(TargetPosition)] = CV
ValIns[tuple(TargetPosition)] = 1
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = ValuesSheet.cell(cx + RowOffset+1, ColOffset + 3).value
cx += 1
Mylog.info('A total of ' + str(cx) + ' values was read from file for parameter ' + ThisPar + '.')
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar + ' were assigned.')
### Table version ###
if ParHeader.cell(ri,2).value == 'TABLE': # have 3 while loops, one for row indices, one for column indices, one for value layers
ColNos = int(ParHeader.cell(ri,6).value) # Number of columns in dataset
RowNos = int(ParHeader.cell(ri,4).value) # Number of rows in dataset
RI = ri + 2 # row where indices start
RIList = []
RIListMeaning = []
while True:
if ParHeader.cell(RI,1).value != None:
RIList.append(ParHeader.cell(RI,1).value)
RIListMeaning.append(ParHeader.cell(RI,2).value)
RI += 1
else:
break
RI = ri + 2 # row where indices start
CIList = []
CIListMeaning = []
while True:
if ParHeader.cell(RI,3).value != None:
CIList.append(ParHeader.cell(RI,3).value)
CIListMeaning.append(ParHeader.cell(RI,4).value)
RI += 1
else:
break
# Re-Order indices to fit model aspect order:
ComIList = RIList + CIList # List of all indices, both rows and columns
ComIList = [ComIList[i] for i in IM]
RI = ri + 2 # row where indices start
ValueList = []
VIComment = []
while True:
if ParHeader.cell(RI,5).value != None:
ValueList.append(ParHeader.cell(RI,5).value)
VIComment.append(ParHeader.cell(RI,6).value)
RI += 1
else:
break
# Check whether all indices are present in the index table of the model
if set(RIList).issubset(set(IndexTable_ClassificationNames)) is False:
Mylog.error('CLASSIFICATION ERROR: Row index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
if set(CIList).issubset(set(IndexTable_ClassificationNames)) is False:
Mylog.error('CLASSIFICATION ERROR: Column index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
# Determine index letters for RIList and CIList
RIIndexLetter = []
for m in range(0,len(RIList)):
RIIndexLetter.append(ThisParIx[IM.index(m)])
CIIndexLetter = []
for m in range(0,len(CIList)):
CIIndexLetter.append(ThisParIx[IM.index(m+len(RIList))])
# Check how well items match between model and data, select items to import
IndexSizesM = [] # List of dimension size for model
for m in range(0,len(ThisParIx)):
ThisDim = ThisParIx[m]
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
if ThisDimClassificationName != ComIList[m]:
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
ThisDim + ' of parameter ' + ThisPar +
' must be identical to the specified classification of the corresponding parameter dimension, which is ' +
ComIList[m])
break # Stop parsing parameter, will cause model to halt
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
# Read parameter values into array:
Values = np.zeros((IndexSizesM)) # Array for parameter values
Uncertainty = [None] * np.product(IndexSizesM) # parameter value uncertainties
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded, contains 0 or 1.
ValuesSheet = Parfile[ValueList[ThisParLayerSel[0]]]
if ParseUncertainty == True:
if 'Dataset_Uncertainty_Sheet' in MetaData:
UncertSheet = Parfile[MetaData['Dataset_Uncertainty_Sheet']]
ColOffset = len(RIList)
RowOffset = len(CIList)
cx = 0
TargetPos_R = [] # Determine all row target positions in data array
for m in range(0,RowNos):
TP_RD = []
for mc in range(0,len(RIList)):
try:
CurrentItem = int(ValuesSheet.cell(m + RowOffset+1, mc+1).value) # in case items come as int, e.g., years
except:
CurrentItem = ValuesSheet.cell(m + RowOffset+1, mc+1).value
try:
IX = ThisParIx.find(RIIndexLetter[mc])
TPIX = IndexTable.set_index('IndexLetter').loc[RIIndexLetter[mc]].Classification.Items.index(CurrentItem)
TP_RD.append((IX,TPIX))
except:
TP_RD.append(None)
break
TargetPos_R.append(TP_RD)
TargetPos_C = [] # Determine all col target positions in data array
for n in range(0,ColNos):
TP_CD = []
for mc in range(0,len(CIList)):
try:
CurrentItem = int(ValuesSheet.cell(mc+1, n + ColOffset+1).value)
except:
CurrentItem = ValuesSheet.cell(mc+1, n + ColOffset+1).value
try:
IX = ThisParIx.find(CIIndexLetter[mc])
TPIX = IndexTable.set_index('IndexLetter').loc[CIIndexLetter[mc]].Classification.Items.index(CurrentItem)
TP_CD.append((IX,TPIX))
except:
TP_CD.append(None)
break
TargetPos_C.append(TP_CD)
for m in range(0,RowNos): # Read values from excel template
for n in range(0,ColNos):
TargetPosition = [0 for i in range(0,len(ComIList))]
try:
for i in range(0,len(RIList)):
TargetPosition[TargetPos_R[m][i][0]] = TargetPos_R[m][i][1]
for i in range(0,len(CIList)):
TargetPosition[TargetPos_C[n][i][0]] = TargetPos_C[n][i][1]
except:
TargetPosition = [0]
if len(TargetPosition) == len(ComIList): # Read value if TargetPosition Tuple has same length as indexList
Values[tuple(TargetPosition)] = ValuesSheet.cell(m + RowOffset+1, n + ColOffset+1).value
ValIns[tuple(TargetPosition)] = 1
# Add uncertainty
if ParseUncertainty == True:
if 'Dataset_Uncertainty_Global' in MetaData:
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = MetaData['Dataset_Uncertainty_Global']
if 'Dataset_Uncertainty_Sheet' in MetaData:
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = UncertSheet.cell_value(m + RowOffset, n + ColOffset)
cx += 1
Mylog.info('A total of ' + str(cx) + ' values was read from file for parameter ' + ThisPar + '.')
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar +
' were assigned.')
if ParseUncertainty == True:
return MetaData, Values, Uncertainty
else:
return MetaData, Values
def ExcelSheetFill(Workbook, Sheetname, values, topcornerlabel=None,
rowlabels=None, collabels=None, Style=None,
rowselect=None, colselect=None):
Sheet = Workbook.add_sheet(Sheetname)
if topcornerlabel is not None:
if Style is not None:
Sheet.write(0,0,label = topcornerlabel, style = Style) # write top corner label
else:
Sheet.write(0,0,label = topcornerlabel) # write top corner label
if rowselect is None: # assign row select if not present (includes all rows in that case)
rowselect = np.ones((values.shape[0]))
if colselect is None: # assign col select if not present (includes all columns in that case)
colselect = np.ones((values.shape[1]))
if rowlabels is not None: # write row labels
rowindexcount = 0
for m in range(0,len(rowlabels)):
if rowselect[m] == 1: # True if True or 1
if Style is None:
Sheet.write(rowindexcount +1, 0, label = rowlabels[m])
else:
Sheet.write(rowindexcount +1, 0, label = rowlabels[m], style = Style)
rowindexcount += 1
if collabels is not None: # write column labels
colindexcount = 0
for m in range(0,len(collabels)):
if colselect[m] == 1: # True if True or 1
if Style is None:
Sheet.write(0, colindexcount +1, label = collabels[m])
else:
Sheet.write(0, colindexcount +1, label = collabels[m], style = Style)
colindexcount += 1
# write values:
rowindexcount = 0
for m in range(0,values.shape[0]): # for all rows
if rowselect[m] == 1:
colindexcount = 0
for n in range(0,values.shape[1]): # for all columns
if colselect[n] == 1:
Sheet.write(rowindexcount +1, colindexcount + 1, label=values[m, n])
colindexcount += 1
rowindexcount += 1
def ExcelExportAdd_tAB(Sheet,Data,rowoffset,coloffset,IName,UName,RName,FName,REName,ALabels,BLabels):
"""
This function exports a 3D array with aspects time, A, and B to a given excel sheet.
Same as xlsxExportAdd_tAB but this function is for xls files with xlrd.
The t dimension is exported in one row, the A and B dimensions as several rows.
Each row starts with IName (indicator), UName (unit), RName (region),
FName (figure where data are used), REName (Resource efficiency scenario),
and then come the values for the dimensions A and B and from coloffset onwards, the time dimension.
Function is meant to be used multiple times, so a rowoffset is given, incremented, and returned for the next run.
"""
for m in range(0,len(ALabels)):
for n in range(0,len(BLabels)):
Sheet.write(rowoffset, 0, label = IName)
Sheet.write(rowoffset, 1, label = UName)
Sheet.write(rowoffset, 2, label = RName)
Sheet.write(rowoffset, 3, label = FName)
Sheet.write(rowoffset, 4, label = REName)
Sheet.write(rowoffset, 5, label = ALabels[m])
Sheet.write(rowoffset, 6, label = BLabels[n])
for t in range(0,Data.shape[0]):
Sheet.write(rowoffset, coloffset + t, label = Data[t,m,n])
rowoffset += 1
return rowoffset
def xlsxExportAdd_tAB(Sheet,Data,rowoffset,coloffset,IName,UName,RName,FName,REName,ALabels,BLabels):
"""
This function exports a 3D array with aspects time, A, and B to a given excel sheet.
Same as ExcelExportAdd_tAB but this function is for xlsx files with openpyxl.
The t dimension is exported in one row, the A and B dimensions as several rows.
Each row starts with IName (indicator), UName (unit), RName (region),
FName (figure where data are used), REName (Resource efficiency scenario),
and then come the values for the dimensions A and B and from coloffset onwards, the time dimension.
Function is meant to be used multiple times, so a rowoffset is given, incremented, and returned for the next run.
"""
for m in range(0,len(ALabels)):
for n in range(0,len(BLabels)):
Sheet.cell(row=rowoffset, column=1).value = IName
Sheet.cell(row=rowoffset, column=2).value = UName
Sheet.cell(row=rowoffset, column=3).value = RName
Sheet.cell(row=rowoffset, column=4).value = FName
Sheet.cell(row=rowoffset, column=5).value = REName
Sheet.cell(row=rowoffset, column=6).value = ALabels[m]
Sheet.cell(row=rowoffset, column=7).value = BLabels[n]
for t in range(0,Data.shape[0]):
Sheet.cell(row=rowoffset, column=coloffset + t +1).value = Data[t,m,n]
rowoffset += 1
return rowoffset
def convert_log(file, file_format='html'):
"""
Converts the log file to a given file format
:param file: The filename and path
:param file_format: The desired format
"""
output_filename = os.path.splitext(file)[0] + '.' + file_format
output = pypandoc.convert_file(file, file_format, outputfile=output_filename)
assert output == ""
# The End
|
python
|
import inspect
class ExecutionOrder():
"""An Enum that is used to designate whether a TestSet's tests
need to be run sequentially or can be run in any order."""
UNORDERED = 0
SEQUENTIAL = 1
class TestSet():
"""A very general class that represents an arbitrary set of tests. By default
each test in a TestSet should be a instance method that begins with the string
'test', however the 'load_tests' method may be overridden to modify this
behavior. A TestSet may also include 'setup' and 'teardown' methods, called
before the tests in the set start and after they are finished, respectively."""
execution_order = ExecutionOrder.UNORDERED
configuration = None
environment = None
@classmethod
def load_tests(cls):
"""A method used by the test runner to obtain the list of test functions
this TestSet defines. The list returned should be a list of function
objects which are instance methods available to this class. The order of the
tests returned from the TestSet base class is undefined."""
# Find all methods prefixed with the testMethodPrefix
testMethodPrefix = "test"
test_functions = []
testFnNames = filter(lambda n,p=testMethodPrefix:
n[:len(p)] == p, dir(cls))
members = inspect.getmembers(cls, predicate=inspect.ismethod)
for function_name, function in members:
if function_name in testFnNames:
test_functions.append(function)
# Recur on superclasses to get their test methods as well
for baseclass in cls.__bases__:
for function in baseclass.load_tests():
if function.__name__ not in testFnNames:
testFnNames.append(testFnName)
test_functions.append(function)
return test_functions
@classmethod
def validate(cls, boolean_expression, error_message=""):
"""Asserts that the condition passed into 'boolean_expression' is true and raises the
error message specified by 'error_message' if it is false."""
assert boolean_expression, error_message
class SequentialTestSet(TestSet):
"""A TestSet whose tests are called in the order they are written (by line
number). In addition the setup() and teardown() are called before the first and
last test only, instead of after every test method."""
execution_order = ExecutionOrder.SEQUENTIAL
@classmethod
def load_tests(cls):
"""Like the load_test method in TestSet but sorts the test methods by their
line number. The youngest subclass has it's methods run first, and each
ancestor's test methods are called after it's child's."""
ln = lambda f: f.im_func.func_code.co_firstlineno
lncmp = lambda a, b: cmp(ln(a), ln(b))
# Find all methods prefixed with the testMethodPrefix
testMethodPrefix = "test"
test_functions = []
testFnNames = filter(lambda n,p=testMethodPrefix:
n[:len(p)] == p, dir(cls))
members = inspect.getmembers(cls, predicate=inspect.ismethod)
for function_name, function in members:
if function_name in testFnNames:
test_functions.append(function)
test_functions.sort(lncmp)
# Recur on superclasses to get their test methods as well
for baseclass in cls.__bases__:
superclass_functions = []
for function in baseclass.load_tests():
if function.__name__ not in testFnNames:
testFnNames.append(testFnName)
superclass_functions.append(function)
# sort the superclasses functions and add them to the end of the
# test_functions list
superclass_functions.sort(lncmp)
test_functions += superclass_functions
return test_functions
class UnorderedTestSet(TestSet):
"""A TestSet whose tests can be called in any order. Prefer this subclass over
the TestSet superclass, as the TestSet's ExecutionOrder is not defined."""
execution_order = ExecutionOrder.UNORDERED
|
python
|
from . import EncodedNumber
from . import RandomizedIterativeAffine
from phe import paillier
|
python
|
"""
data generator class
"""
import os
import numpy as np
import cv2
import copy
from LibMccnn.util import readPfm
import random
from tensorflow import expand_dims
#import matplotlib.pyplot as plt
from skimage.feature import local_binary_pattern
class ImageDataGenerator:
"""
input image patch pairs generator
"""
def __init__(self, left_image_list_file, radius,shuffle=False,
patch_size=(11, 11),
in_left_suffix='im0.png',
in_right_suffix='im1.png',
gtX_suffix='disp0GT.pfm',
# tunable hyperparameters
# see origin paper for details
dataset_neg_low=1.5, dataset_neg_high=6,
dataset_pos=0.5,nchannels=1
):
"""
left_image_list_file: path to text file containing training set left image PATHS, one path per line
list of left image paths are formed directly by reading lines from file
list of corresponding right image and ground truth disparity image paths are
formed by replacing in_left_suffix with in_right_suffix and gt_suffix from every left image path
"""
# Init params
self.shuffle = shuffle
self.patch_size = patch_size
self.in_left_suffix = in_left_suffix
self.in_right_suffix = in_right_suffix
self.gtX_suffix = gtX_suffix
self.dataset_neg_low = dataset_neg_low
self.dataset_neg_high = dataset_neg_high
self.dataset_pos = dataset_pos
self.nchannels = nchannels
self.radius = radius
# the pointer indicates which image are next to be used
# a mini-batch is fully constructed using one image(pair)
self.pointer = 0
self.read_image_list(left_image_list_file)
self.prefetch()
if self.shuffle:
self.shuffle_data()
def read_image_list(self, image_list):
"""
form lists of left, right & ground truth paths
"""
self.left_paths = []
self.right_paths = []
self.gtX_paths = []
for folder in image_list:
self.left_paths.append(os.path.join(folder,'im0.png'))
self.right_paths.append(os.path.join(folder,'im1.png'))
self.gtX_paths.append(os.path.join(folder,'disp0GT.pfm'))
# store total number of data
self.data_size = len(self.left_paths)
print("total image num in file {} is {}".format(image_list, self.data_size))
def prefetch(self):
"""
prefetch all images
generally dataset for stereo matching contains small number of images
so prefetch would not consume too much RAM
"""
self.left_images = []
self.right_images = []
self.gtX_images = []
# Create an empty list of lbps
if self.nchannels == 2:
self.left_lbps = []
self.right_lbps = []
for _ in range(self.data_size):
# NOTE: read image as grayscale as the origin paper suggested
#left_image = cv2.imread(self.left_paths[_], cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.
#right_image = cv2.imread(self.right_paths[_], cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.
left_image = cv2.imdecode(np.fromfile(self.left_paths[_],dtype=np.uint8), cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.
right_image = cv2.imdecode(np.fromfile(self.right_paths[_], dtype=np.uint8),cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.
# preprocess images by subtracting the mean and dividing by the standard deviation
# as the paper described
left_image = (left_image - np.mean(left_image, axis=(0, 1))) / np.std(left_image, axis=(0, 1))
right_image = (right_image - np.mean(right_image, axis=(0, 1))) / np.std(right_image, axis=(0, 1))
if self.nchannels == 2:
# Load the lbp data
left_lbp = np.load(self.left_lbp_paths[_]).astype(np.float32)
right_lbp = np.load(self.right_lbp_paths[_]).astype(np.float32)
# Normalize using zero mean and unit variance
left_lbp = (left_lbp - np.mean(left_lbp,axis=(0,1))) / np.std(left_lbp, axis=(0,1))
right_lbp = (right_lbp - np.mean(right_lbp, axis=(0,1))) / np.std(right_lbp, axis=(0,1))
# Put the pre-processed images within the lists
self.left_images.append(left_image)
self.right_images.append(right_image)
self.gtX_images.append(readPfm(self.gtX_paths[_]))
if self.nchannels == 2:
self.left_lbps.append(left_lbp)
self.right_lbps.append(right_lbp)
print("prefetch done")
def shuffle_data(self):
"""
Random shuffle the images and labels
"""
# Copy the paths
left_paths = copy.deepcopy(self.left_paths)
right_paths = copy.deepcopy(self.right_paths)
gtX_paths = copy.deepcopy(self.gtX_paths)
if self.nchannels == 2:
left_lbp_paths = copy.deepcopy(self.left_lbp_paths)
right_lbp_paths = copy.deepcopy(self.right_lbp_paths)
# Copy the images
left_images = copy.deepcopy(self.left_images)
right_images = copy.deepcopy(self.right_images)
gtX_images = copy.deepcopy(self.gtX_images)
if self.nchannels == 2:
left_lbps = copy.deepcopy(self.left_lbps)
right_lbps = copy.deepcopy(self.right_lbps)
# Reinitialize the paths
self.left_paths = []
self.right_paths = []
self.gtX_paths = []
if self.nchannels == 2:
self.left_lbp_paths = []
self.right_lbp_paths = []
# Reinitialize the images
self.left_images = []
self.right_images = []
self.gtX_images = []
if self.nchannels == 2:
self.left_lbps = []
self.right_lbps = []
# create list of permutated index and shuffle data accordingly
idx = np.random.permutation(self.data_size)
for i in idx:
# Permute the paths
self.left_paths.append(left_paths[i])
self.right_paths.append(right_paths[i])
self.gtX_paths.append(gtX_paths[i])
if self.nchannels == 2:
self.left_lbp_paths.append(left_lbp_paths[i])
self.right_lbp_paths.append(right_lbp_paths[i])
# Permute the images
self.left_images.append(left_images[i])
self.right_images.append(right_images[i])
self.gtX_images.append(gtX_images[i])
if self.nchannels == 2:
self.left_lbps.append(left_lbps[i])
self.right_lbps.append(right_lbps[i])
def reset_pointer(self):
"""
reset pointer to beginning of the list
"""
self.pointer = 0
if self.shuffle:
self.shuffle_data()
def next_batch(self, batch_size):
"""
This function reads the next left, right and gt images,
and random pick batch_size patch pairs from these images to
construct the next batch of training data
NOTE: one batch consists of 1 left image patch, and 2 right image patches,
which consists of 1 positive sample and 1 negative sample
NOTE: in the origin MC-CNN paper, the authors propose to use various data augmentation strategies
to enhance the model generalization. Here I do not implement those strategis but I believe it's no
difficult to do that.
"""
# Get next batch of image (path) and labels
left_path = self.left_paths[self.pointer]
right_path = self.right_paths[self.pointer]
gtX_path = self.gtX_paths[self.pointer]
if self.nchannels == 2:
left_lbp_path = self.left_lbp_paths[self.pointer]
right_lbp_path = self.right_lbp_paths[self.pointer]
left_image = self.left_images[self.pointer]
right_image = self.right_images[self.pointer]
gtX_image = self.gtX_images[self.pointer]
if self.nchannels == 2:
left_lbp = self.left_lbps[self.pointer]
right_lbp = self.right_lbps[self.pointer]
assert left_image.shape == right_image.shape
assert left_image.shape[0:2] == gtX_image.shape
height, width = left_image.shape[0:2]
# random choose pixels around which to pick image patchs
rows = np.random.permutation(height)[0:batch_size]
cols = np.random.permutation(width)[0:batch_size]
# rule out those pixels with disparity inf and occlusion
for _ in range(batch_size):
while gtX_image[rows[_], cols[_]] == float('inf') or \
int(gtX_image[rows[_], cols[_]]) > cols[_]:
# random pick another pixel
rows[_] = random.randint(0, height-1)
cols[_] = random.randint(0, width-1)
# augment raw image with zero paddings
# this prevents potential indexing error occurring near boundaries
auged_left_image = np.zeros([height+self.patch_size[0]-1, width+self.patch_size[1]-1, 1], dtype=np.float32)
auged_right_image = np.zeros([height+self.patch_size[0]-1, width+self.patch_size[1]-1, 1], dtype=np.float32)
if self.nchannels == 2:
auged_left_lbp = np.zeros([height+self.patch_size[0]-1, width+self.patch_size[1]-1, 1], dtype=np.float32)
auged_right_lbp = np.zeros([height+self.patch_size[0]-1, width+self.patch_size[1]-1, 1], dtype=np.float32)
# NOTE: patch size should always be odd
rows_auged = int((self.patch_size[0] - 1)/2)
cols_auged = int((self.patch_size[1] - 1)/2)
auged_left_image[rows_auged: rows_auged+height, cols_auged: cols_auged+width, 0] = left_image
auged_right_image[rows_auged: rows_auged+height, cols_auged: cols_auged+width, 0] = right_image
if self.nchannels == 2:
auged_left_lbp[rows_auged: rows_auged+height, cols_auged: cols_auged+width, 0] = left_lbp
auged_right_lbp[rows_auged: rows_auged+height, cols_auged: cols_auged+width, 0] = right_lbp
# pick patches
patches_left = np.ndarray([batch_size, self.patch_size[0], self.patch_size[1], self.nchannels], dtype=np.float32)
patches_right_pos = np.ndarray([batch_size, self.patch_size[0], self.patch_size[1], self.nchannels], dtype=np.float32)
patches_right_neg = np.ndarray([batch_size, self.patch_size[0], self.patch_size[1], self.nchannels], dtype=np.float32)
for _ in range(batch_size):
row = rows[_]
col = cols[_]
# Get the left patch
patch_left = auged_left_image[row:row + self.patch_size[0], col:col+self.patch_size[1]]
if self.nchannels == 2:
patch_lbp_left = auged_left_lbp[row:row + self.patch_size[0], col:col+self.patch_size[1]]
# Put the channels as input for the left network
patches_left[_,:,:,0] = patch_left.reshape((self.patch_size[0],self.patch_size[1]))
if self.nchannels == 2:
patches_left[_,:,:,1] = patch_lbp_left.reshape((self.patch_size[0],self.patch_size[1]))
right_col = col - int(gtX_image[row, col])
# postive example
# small random deviation added
#pos_col = right_col
pos_col = -1
pos_row = -1
while pos_col < 0 or pos_col >= width:
pos_col = int(right_col + np.random.uniform(-1*self.dataset_pos, self.dataset_pos))
# Get the positive right patch
patch_right_pos = auged_right_image[row:row+self.patch_size[0], pos_col:pos_col+self.patch_size[1]]
if self.nchannels == 2:
patch_lbp_right_pos = auged_right_lbp[row:row+self.patch_size[0], pos_col:pos_col+self.patch_size[1]]
# Put the channels as input for the right positive network
patches_right_pos[_,:,:,0] = patch_right_pos.reshape((self.patch_size[0],self.patch_size[1]))
if self.nchannels == 2:
patches_right_pos[_,:,:,1] = patch_lbp_right_pos.reshape((self.patch_size[0],self.patch_size[1]))
# negative example
# large random deviation added
neg_col = -1
while neg_col < 0 or neg_col >= width:
neg_dev = np.random.uniform(self.dataset_neg_low, self.dataset_neg_high)
if np.random.randint(-1, 1) == -1:
neg_dev = -1 * neg_dev
neg_col = int(right_col + neg_dev)
# Get the negative right patch
patch_right_neg = auged_right_image[row:row+self.patch_size[0], neg_col:neg_col+self.patch_size[1]]
if self.nchannels == 2:
patch_lbp_right_neg = auged_right_lbp[row:row+self.patch_size[0], neg_col:neg_col+self.patch_size[1]]
# Put the channels as input for the right negative network
patches_right_neg[_,:,:,0] = patch_right_neg.reshape((self.patch_size[0],self.patch_size[1]))
if self.nchannels == 2:
patches_right_neg[_,:,:,1] = patch_lbp_right_neg.reshape((self.patch_size[0],self.patch_size[1]))
if False:
fig = plt.figure()
plt.subplot(2,2,1)
plt.imshow(np.squeeze(patch_left))
plt.title('Anchor'.format(_))
plt.subplot(2,2,2)
plt.imshow(np.squeeze(patch_right_pos))
plt.title('Positive')
plt.subplot(2,2,4)
plt.imshow(np.squeeze(patch_right_neg))
plt.title('Negitive')
plt.show()
#update pointer
self.pointer += 1
return patches_left, patches_right_pos, patches_right_neg
def next_pair(self):
# Get next images
left_path = self.left_paths[self.pointer]
right_path = self.right_paths[self.pointer]
gtX_path = self.gtX_paths[self.pointer]
# Read images
left_image = self.left_images[self.pointer]
right_image = self.right_images[self.pointer]
gtX_image = self.gtX_images[self.pointer]
assert left_image.shape == right_image.shape
assert left_image.shape[0:2] == gtX_image.shape
#update pointer
self.pointer += 1
return left_image, right_image, gtX_image
def test_mk(self, path):
if os.path.exists(path):
return
else:
os.mkdir(path)
# just used for debug
if __name__ == "__main__" :
dg = ImageDataGenerator("/scratch/xz/MC-CNN-python/data/list/train.txt")
patches_left, patches_right_pos, patches_right_neg = dg.next_batch(128)
print(patches_left.shape)
print(patches_right_pos.shape)
print(patches_right_neg.shape)
|
python
|
import pytest
xgcm = pytest.importorskip("xgcm")
from xarrayutils.build_grids import rebuild_grid
from numpy.testing import assert_allclose
from .datasets import datagrid_dimtest, datagrid_dimtest_ll
@pytest.mark.parametrize(
"test_coord",
["i", "j", "i_g", "j_g", "XC", "XG", "YC", "YG", "dxC", "dxG", "dyC", "dyG"],
)
# TODO This should be able to read all coord variable from the dataset
# so its not hardcoded, but I cant get it to work
def test_rebuild_grid(datagrid_dimtest, test_coord):
a = datagrid_dimtest
coords = a.coords.keys()
coords_stripped = [x for x in coords if x not in ["i", "j", "XC", "YC"]]
stripped = a.drop(coords_stripped)
b = rebuild_grid(stripped, x_wrap=360.0, y_wrap=180.0, ll_dist=False)
assert b[test_coord].dims == a[test_coord].dims
assert_allclose(b[test_coord].data, a[test_coord].data)
@pytest.mark.parametrize(
"test_coord",
["i", "j", "i_g", "j_g", "XC", "XG", "YC", "YG", "dxC", "dxG", "dyC", "dyG"],
)
# TODO This should be able to read all coord variable from the dataset
# so its not hardcoded, but I cant get it to work
def test_rebuild_grid_ll(datagrid_dimtest_ll, test_coord):
a = datagrid_dimtest_ll
coords = a.coords.keys()
coords_stripped = [x for x in coords if x not in ["i", "j", "XC", "YC"]]
stripped = a.drop(coords_stripped)
b = rebuild_grid(stripped, x_wrap=360.0, y_wrap=180.0, ll_dist=True)
assert b[test_coord].dims == a[test_coord].dims
assert_allclose(b[test_coord].data, a[test_coord].data)
|
python
|
#!/usr/bin/env python
from mailanalyzer import statistics
from mailanalyzer import parsing
from collections import defaultdict
import datetime
import csv
import time
# path to directory with Dovecot mailing list archive
dir_name = ''
# output path
output = 'output/dovecot/{}_'.format(time.strftime('%Y%m%d_%H%M'))
# creates stats instance
datestats = statistics.DateStatistic(date_aggregation = '%Y-%m')
parsing.process_mbox_dir(dir_name, datestats)
datestats.write_provider_data(output + 'provider_data.csv')
datestats.write_hidden_provider_data(output + 'hidden_provider_data.csv')
|
python
|
# -*- coding: utf-8 -*-
"""
Train a simple deep ResNet on the UPavia 2D dataset.
Exception: Only layers of same output shape can be merged using sum mode. Layer shapes:
[(None, 7, 7, 128), (None, 4, 4, 128)]
"""
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.utils.np_utils import to_categorical
import keras.callbacks as kcallbacks
from sklearn import metrics
import time
import resnet
from . import zeroPadding
from . import normalization
from . import sampleFixNum
from . import doPCA
import collections
import time
from . import averageAccuracy
batch_size = 32 #training parameters setting
nb_classes = 9
nb_epoch = 400 #400
# input image dimensions
img_rows, img_cols = 27, 27 #27, 27
# The CIFAR10 images are RGB.
INPUT_DIMENSION = 103
VALIDATION_SPLIT = 0.9
PATCH_LENGTH = 13 #Patch_size (13*2+1)*(13*2+1)
#INPUT_DIMENSION = 103
best_weights_path = '/home/finoa/DL-on-HSI-Classification/Best_models/best_3DRAWResNet_UPavia.hdf5'
uPavia = sio.loadmat('/home/finoa/DL-on-HSI-Classification/Datasets/UPavia/PaviaU.mat')
gt_uPavia = sio.loadmat('/home/finoa/DL-on-HSI-Classification/Datasets/UPavia/PaviaU_gt.mat')
data_UP = uPavia['paviaU']
gt_UP = gt_uPavia['paviaU_gt']
print(data_UP.shape)
# def zeroPadding(old_matrix, pad_length):
# new_matrix = np.lib.pad(old_matrix, ((pad_length, pad_length),(pad_length, pad_length)), 'constant', constant_values=0)
# return new_matrix
def trainDataZeroPadding(old_matrx, pad_length):
new_matrix = np.lib.pad(old_matrx, ((0, pad_length), (0, pad_length)), 'constant', constant_values=0)
return new_matrix
def indexToAssignment(index_, Row, Col, pad_length):
new_assign = {}
for counter, value in enumerate(index_):
assign_0 = value // Col + pad_length
assign_1 = value % Col + pad_length
new_assign[counter] = [assign_0, assign_1]
return new_assign
def assignmentToIndex( assign_0, assign_1, Row, Col):
new_index = assign_0 * Col + assign_1
return new_index
def selectNeighboringPatch(matrix, pos_row, pos_col, ex_len):
selected_rows = matrix[list(range(pos_row-ex_len,pos_row+ex_len+1)), :, :]
selected_patch = selected_rows[:, list(range(pos_col-ex_len, pos_col+ex_len+1)), :]
return selected_patch
# def sampling(proptionVal, groundTruth): #divide dataset into train and test datasets
# labels_loc = {}
# train = {}
# test = {}
# m = max(groundTruth)
# for i in range(m):
# indices = [j for j, x in enumerate(groundTruth.ravel().tolist()) if x == i + 1]
# np.random.shuffle(indices)
# labels_loc[i] = indices
# nb_val = int(proptionVal * len(indices))
# train[i] = indices[:-nb_val]
# test[i] = indices[-nb_val:]
# # whole_indices = []
# train_indices = []
# test_indices = []
# for i in range(m):
# # whole_indices += labels_loc[i]
# train_indices += train[i]
# test_indices += test[i]
# np.random.shuffle(train_indices)
# np.random.shuffle(test_indices)
# return train_indices, test_indices
data = data_UP.reshape(np.prod(data_UP.shape[:2]),np.prod(data_UP.shape[2:]))
gt = gt_UP.reshape(np.prod(gt_UP.shape[:2]),)
data = normalization.Normalization(data)
data_ = data.reshape(data_UP.shape[0], data_UP.shape[1],data_UP.shape[2])
# data_trans = data.transpose()
# whole_pca = doPCA.dimension_PCA(data_trans, data_UP, INPUT_DIMENSION)
whole_pca = data_ #raw data
print (whole_pca.shape)
padded_data = zeroPadding.zeroPadding_3D(whole_pca, PATCH_LENGTH)
ITER = 10 #ITER = 10
CATEGORY = 9
OA = []
AA = []
TRAINING_TIME = []
TESTING_TIME = []
ELEMENT_ACC = np.zeros((ITER, CATEGORY))
for index_iter in range(ITER):
print ("Iteration #:", index_iter)
millis = int(round(time.time()) * 1000) % 4294967295
np.random.seed(millis)
train_indices, test_indices = sampleFixNum.sampling(VALIDATION_SPLIT, gt)
y_train = gt[train_indices] - 1
y_train = to_categorical(np.asarray(y_train))
y_test = gt[test_indices] - 1
y_test = to_categorical(np.asarray(y_test))
#first principal component training data
train_assign = indexToAssignment(train_indices, whole_pca.shape[0], whole_pca.shape[1], PATCH_LENGTH)
train_data = np.zeros((len(train_assign), 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, INPUT_DIMENSION))
#train_data = np.zeros((len(train_assign), 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1))
for i in range(len(train_assign)):
train_data[i] = selectNeighboringPatch(padded_data,train_assign[i][0],train_assign[i][1],PATCH_LENGTH)
# train_data[i] = trainDataZeroPadding(train_data_origin, img_rows - 2*PATCH_LENGTH - 1)
#first principal component testing data
test_assign = indexToAssignment(test_indices, whole_pca.shape[0], whole_pca.shape[1], PATCH_LENGTH)
test_data = np.zeros((len(test_assign), 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, INPUT_DIMENSION))
#test_data = np.zeros((len(test_assign), 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1))
for i in range(len(test_assign)):
test_data[i] = selectNeighboringPatch(padded_data,test_assign[i][0],test_assign[i][1],PATCH_LENGTH)
# test_data[i] = trainDataZeroPadding(test_data_origin, img_cols - 2*PATCH_LENGTH - 1)
x_train = train_data.reshape(train_data.shape[0], train_data.shape[1], train_data.shape[2], INPUT_DIMENSION)
x_test = test_data.reshape(test_data.shape[0], test_data.shape[1], test_data.shape[2], INPUT_DIMENSION)
model = resnet.ResnetBuilder.build_resnet_50((INPUT_DIMENSION, img_rows, img_cols), nb_classes)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
earlyStopping = kcallbacks.EarlyStopping(monitor='val_loss', patience=100, verbose=1, mode='auto')
saveBestModel = kcallbacks.ModelCheckpoint(best_weights_path, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
tic = time.clock()
history = model.fit(x_train, y_train, validation_data=(x_test[-4800:], y_test[-4800:]), batch_size=batch_size, nb_epoch=nb_epoch, shuffle=True ,callbacks=[earlyStopping, saveBestModel])
toc = time.clock()
tic1 = time.clock()
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=batch_size)
toc1 = time.clock()
print('Training Time: ', toc - tic)
print('Test time:', toc1 - tic1)
print('Test score:', loss_and_metrics[0])
print('Test accuracy:', loss_and_metrics[1])
print(list(history.history.keys()))
pred_test = model.predict(x_test).argmax(axis=1)
collections.Counter(pred_test)
gt_test = gt[test_indices] - 1
overall_acc = metrics.accuracy_score(pred_test, gt_test)
confusion_matrix = metrics.confusion_matrix(pred_test, gt_test)
each_acc, average_acc = averageAccuracy.AA_andEachClassAccuracy(confusion_matrix)
OA.append(overall_acc)
AA.append(average_acc)
TRAINING_TIME.append(toc - tic)
TESTING_TIME.append(toc1 - tic1)
ELEMENT_ACC[index_iter, :] = each_acc
print("Overall Accuracy:", overall_acc)
print("Confusion matrix:", confusion_matrix)
print("Average Accuracy:", average_acc)
print("Each Class Accuracies are listed as follows:")
for idx, acc in enumerate(each_acc):
print("Class %d : %.3e" % (idx, acc))
f = open('/home/finoa/Desktop/record_new_3DRes_32.txt', 'w')
sentence1 = 'OAs, mean_OA ± std_OA for each iteration are:' + str(OA) + str(np.mean(OA)) + ' ± ' + str(np.std(OA)) +'\n'
f.write(sentence1)
sentence2 = 'AAs, mean_AA ± std_AA for each iteration are:' + str(AA) + str(np.mean(AA)) + ' ± ' + str(np.std(AA)) +'\n'
f.write(sentence2)
sentence3 = 'Average Training time is :' + str(np.mean(TRAINING_TIME)) +'\n'
f.write(sentence3)
sentence4 = 'Average Testing time is:' + str(np.mean(TESTING_TIME)) +'\n'
f.write(sentence4)
element_mean = np.mean(ELEMENT_ACC, axis=0)
element_std = np.std(ELEMENT_ACC, axis=0)
sentence5 = "Mean of all elements in confusion matrix:" + str(np.mean(ELEMENT_ACC, axis=0)) +'\n'
f.write(sentence5)
sentence6 = "Standard deviation of all elements in confusion matrix" + str(np.std(ELEMENT_ACC, axis=0)) +'\n'
f.write(sentence6)
f.close()
print_matrix = np.zeros((CATEGORY), dtype=object)
for i in range(CATEGORY):
print_matrix[i] = str(element_mean[i]) + " ± " + str(element_std[i])
np.savetxt("/home/finoa/Desktop/element_acc_raw3d_resnet.txt", print_matrix.astype(str), fmt='%s', delimiter="\t", newline='\n')
print('Test score:', loss_and_metrics[0])
print('Test accuracy:', loss_and_metrics[1])
print(list(history.history.keys()))
plt.figure(2)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.figure(3)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
python
|
import numpy as np
from datetime import datetime
from twisted.internet.defer import inlineCallbacks
from EGGS_labrad.lib.clients.cryovac_clients.RGA_gui import RGA_gui
class RGA_client(RGA_gui):
name = 'RGA Client'
BUFFERID = 289961
def __init__(self, reactor, cxn=None, parent=None):
super().__init__()
self.cxn = cxn
self.gui = self
self.gui.setupUi()
self.reactor = reactor
self.servers = ['RGA Server', 'Data Vault']
# initialization sequence
d = self.connect()
d.addCallback(self.initData)
d.addCallback(self.initializeGUI)
# SETUP
@inlineCallbacks
def connect(self):
"""
Creates an asynchronous connection to labrad.
"""
# create connection to labrad manager
if not self.cxn:
import os
LABRADHOST = os.environ['LABRADHOST']
from labrad.wrappers import connectAsync
self.cxn = yield connectAsync(LABRADHOST, name=self.name)
# get servers
try:
self.dv = self.cxn.data_vault
self.reg = self.cxn.registry
self.rga = self.cxn.rga_server
except Exception as e:
print('Required servers not connected, disabling widget.')
self.setEnabled(False)
# connect to signals
# device parameters
yield self.rga.signal__buffer_update(self.BUFFERID)
yield self.rga.addListener(listener=self.updateBuffer, source=None, ID=self.BUFFERID)
# server connections
yield self.cxn.manager.subscribe_to_named_message('Server Connect', 9898989, True)
yield self.cxn.manager.addListener(listener=self.on_connect, source=None, ID=9898989)
yield self.cxn.manager.subscribe_to_named_message('Server Disconnect', 9898989 + 1, True)
yield self.cxn.manager.addListener(listener=self.on_disconnect, source=None, ID=9898989 + 1)
# set recording stuff
self.c_record = self.cxn.context()
return self.cxn
@inlineCallbacks
def initData(self, cxn):
"""
Get startup data from servers and show on GUI.
"""
# lock while starting up
self.setEnabled(False)
self.buffer_readout.appendPlainText('Initializing client...')
# lockswitches
self.gui.general_lockswitch.setChecked(True)
self.gui.ionizer_lockswitch.setChecked(True)
self.gui.detector_lockswitch.setChecked(True)
self.gui.scan_lockswitch.setChecked(True)
# ionizer
ee_val = yield self.rga.ionizer_electron_energy()
ie_val = yield self.rga.ionizer_ion_energy()
fl_val = yield self.rga.ionizer_emission_current()
vf_val = yield self.rga.ionizer_focus_voltage()
self.gui.ionizer_ee.setValue(ee_val)
self.gui.ionizer_ie.setCurrentIndex(ie_val)
self.gui.ionizer_fl.setValue(fl_val)
self.gui.ionizer_vf.setValue(vf_val)
# detector
hv_val = yield self.rga.detector_cdem_voltage()
nf_val = yield self.rga.detector_noise_floor()
self.gui.detector_hv.setValue(hv_val)
self.gui.detector_nf.setCurrentIndex(nf_val)
# scan
mi_val = yield self.rga.scan_mass_initial()
mf_val = yield self.rga.scan_mass_final()
sa_val = yield self.rga.scan_mass_steps()
self.gui.scan_mi.setValue(mi_val)
self.gui.scan_mf.setValue(mf_val)
self.gui.scan_sa.setValue(sa_val)
# unlock after startup
self.setEnabled(True)
self.buffer_readout.appendPlainText('Initialized.')
return cxn
def initializeGUI(self, cxn):
"""
Connect signals to slots and other initializations.
"""
# general
self.gui.initialize.clicked.connect(lambda: self.rga.initialize())
self.gui.calibrate_detector.clicked.connect(lambda: self.rga.detector_calibrate())
self.gui.general_tp.clicked.connect(lambda: self.rga.tpm_start())
# ionizer
self.gui.ionizer_ee.valueChanged.connect(lambda value: self.rga.ionizer_electron_energy(int(value)))
self.gui.ionizer_ie.currentIndexChanged.connect(lambda index: self.rga.ionizer_ion_energy(index))
self.gui.ionizer_fl.valueChanged.connect(lambda value: self.rga.ionizer_emission_current(value))
self.gui.ionizer_vf.valueChanged.connect(lambda value: self.rga.ionizer_focus_voltage(value))
# detector
self.gui.detector_hv.valueChanged.connect(lambda value: self.rga.detector_cdem_voltage(int(value)))
self.gui.detector_nf.currentIndexChanged.connect(lambda index: self.rga.detector_noise_floor(index))
# scan
self.gui.scan_start.clicked.connect(lambda: self.startScan())
# buffer
self.gui.buffer_clear.clicked.connect(lambda: self.gui.buffer_readout.clear())
return cxn
# SIGNALS
@inlineCallbacks
def on_connect(self, c, message):
server_name = message[1]
if server_name in self.servers:
print(server_name + ' reconnected, enabling widget.')
# get latest values
yield self.initData(self.cxn)
self.setEnabled(True)
def on_disconnect(self, c, message):
server_name = message[1]
if server_name in self.servers:
print(server_name + ' disconnected, disabling widget.')
self.setEnabled(False)
def updateBuffer(self, c, data):
"""
Updates GUI when values are received from server.
"""
param, value = data
self.gui.buffer_readout.appendPlainText('{}: {}'.format(param, value))
# SLOTS
@inlineCallbacks
def startScan(self):
"""
Creates a new dataset to record pressure and
tells polling loop to add data to data vault.
"""
# set up datavault
date = datetime.now()
year = str(date.year)
month = '{:02d}'.format(date.month)
trunk1 = '{0:s}_{1:s}_{2:02d}'.format(year, month, date.day)
trunk2 = '{0:s}_{1:02d}:{2:02d}'.format(self.name, date.hour, date.minute)
yield self.dv.cd(['', year, month, trunk1, trunk2], True, context=self.c_record)
yield self.dv.new('SRS RGA Scan', [('Mass', 'amu')],
[('Scan', 'Current', '1e-16 A')], context=self.c_record)
# get scan parameters from widgets
mass_initial = int(self.gui.scan_mi.value())
mass_final = int(self.gui.scan_mf.value())
mass_step = int(self.gui.scan_sa.value())
type = self.gui.scan_type.currentText()
num_scans = int(self.gui.scan_num.value())
# send scan parameters to RGA
yield self.rga.scan_mass_initial(mass_initial)
yield self.rga.scan_mass_final(mass_final)
yield self.rga.scan_mass_steps(mass_step)
# do scan
self.gui.buffer_readout.appendPlainText('Starting scan...')
self.gui.setEnabled(False)
x, y = yield self.rga.scan_start(type, num_scans)
data_tmp = np.array([x, y]).transpose()
yield self.dv.add_ex(data_tmp, context=self.c_record)
self.gui.buffer_readout.appendPlainText('Scan finished.')
self.gui.setEnabled(True)
def closeEvent(self, event):
self.cxn.disconnect()
if self.reactor.running:
self.reactor.stop()
if __name__ == "__main__":
from EGGS_labrad.lib.clients import runClient
runClient(RGA_client)
|
python
|
import glob
import os
import getpass
print(" ------------------------\n| Piggy's Trolling Tools: |\n| :-: File Deleter :-: |\n ------------------------\n")
where = input("Input directory where files should be deleted: ")
regex = input("Enter regex to match files (example: important*.txt): ")
os.chdir(where)
os.system("cls")
file_list = glob.glob(regex)
print(f"Loaded {file_list.__len__()} files:\n")
print(file_list)
getpass.getpass(prompt="\nPress enter to delete\n")
for file_ in file_list:
try:
os.remove(file_)
print(f"Removed: \"{file_}\"|")
except Exception as error:
print(error)
pass
print("\nFinished.\n")
|
python
|
"""
Basic atmosphere functions
This dark flight model predicts the landing sight of a meteoroid by
propagating the position and velocity through the atmosphere using a
5th-order adaptive step size integrator (ODE45).
Created on Mon Oct 17 10:59:00 2016
@author: Trent Jansen-Sturgeon
"""
import numpy as np
from scipy.interpolate import interp1d
def reynolds(rho, vel, dvisc, length=0.05):
"""returns reynolds no as fn of fluid density,
velocity, dynamic viscocity and diam of body"""
#put len as 0.05, for all typical meteorites
return rho * vel * length / dvisc
def knudsen(mach, re):
"""returns knudsen number, fn of mach and reynolds, for dry air
(take ratio specific heats as 1.40)"""
# Note: 1.4 is only specifically for dry air at atmospheric temperatures.
# We might need to change this in darkflight...
return mach / re * np.sqrt(np.pi * 1.40 / 2.0)
def SoS(T):
"""function input atmospheric temp (T). returns speed of sound in m/s
as fn of temperature (in K).
see:
http://en.wikipedia.org/wiki/Earth's_atmosphere and
http://en.wikipedia.org/wiki/Density_of_air
"""
return 331.3 * np.sqrt( T / 273.15)
def viscosity(T):
"""function input atmospheric T in Kelvin. Dynamic Viscosity of atm in Pa.s
is returned.
Function uses Sutherland's formula.
"""
###viscosty of air as function of height, using Sutherland's formula:
#http://en.wikipedia.org/wiki/Viscosity#Gases
#http://wiki.xtronics.com/index.php/Viscosity#The_dependence_on_pressucd_of_Viscosity
#('With Gases until the pressure is less than 3% of normal air pressure
#the change is negligible on falling bodies.')
C = 120 #Sutherland's constant for air - Kelvin
#see wikipedia http://en.wikipedia.org/wiki/Viscosity
# Gas C[K] T_ref[K] mu_ref[mu_Pa s]
# air 120 291.15 18.27
reference_visc = 18.27e-6
reference_T = 291.15
return reference_visc * (reference_T + C)/(T + C) * (T / reference_T)**1.5
def dragcoef(re, mach, kn=0.0001, A=1.21):
"""returns drag coefficient as fn of reynolds number, mach and knudsen no;
for a spherical particle in fluid
"""
##Kn >=10 : ReVelle 1976; Masson et al. 1960
## Transition Flow Regime : 0.1<Kn<10 : Khanukaeva 2005
## Continuum Flow Regime : Kn >= 0.1
## -Hypersonic: for spheres (Bronshten 1983, 1983; ReVelle 1976; Masson et al. 1960)
## for circular cylinders (Truitt 1959)
## for tiles and bricks (Zhdan et al. 2007)
## -Subsonic (see Haider and Levenspiel (1989, 1989))
cd_fm = 2.00
cd_cont = 0.98182 * A**2 - 1.78457 * A + 1.641837
cd_mid = 1.0
cd_low = 1.0
cd_rescale = 1.0 #rescale low speed drag as ratio compared to sphere
kn_max = 10.0 # free molecular flow
kn_min = 0.01 #continuum regime
if kn > kn_max:
cd = cd_fm
elif kn > kn_min: # bridging function for transition to continuum
# top = np.log( 1.0/kn) - np.log( 1.0/kn_max)
# bot = np.log( 1.0/kn_min) - np.log( 1.0/kn_max)
# avg = (cd_fm + cd_cont) / 2.0
# diff = (cd_fm - cd_cont) / 2.0
# cd = avg + diff * np.exp( np.pi*top/bot )
cd = cd_cont + (cd_fm - cd_cont) * np.exp(-0.001 * re**2)
#return 1.55 + 0.65 * np.cos( np.pi*top/bot )
else: #now in pure contimuum flow, **the default for most darkflight**
if mach > 8.0:
cd = cd_cont
## if A<0.75:
## cd=2.0;
## elif A<1.21:
## A_min = 0.75;
## cd=0.92 + 1.08 * (1.21-A) / (1.21-A_min);
## elif A<1.75:
## A_max = 1.75;
## cd=0.92 + 1.08 * (A - 1.21) / (A_max- 1.21);
## else:
## cd=2.0;
elif mach > 2.0: # 2 to 8
#arbitrary bridging function
cd = cd_mid + (cd_cont-cd_mid) * (mach-2.0) / (8.0-2.0)
elif mach > 1.1: # 1.1 to 2
cd = cd_mid
elif mach > 0.6: #0.6 to 1.1
cd = cd_low * (0.725 + 0.275*np.sin( np.pi*(mach-0.85)/0.5))
else: # mach < 0.6:
# a = 0.1806; b = 0.6459
# c = 0.4251; d = 6880.95
# cd = 24 / re * (1 + a * re**b) + c / (1 + d / re)
# # re cutt-ofs from boundary layer theory
if re< 2e5:
# from Brown&lawler
cd=((24/re)*(1.0+0.15*re**0.681))+(0.407/(1+8710/re));
elif re<3.2e5:
cd =- 2.24229520715170e-17*re**3 \
+ 2.28663611400439e-11*re**2 \
- 7.46988882625855e-06*re \
+ 0.986662115581471
elif re<3.5e6:
cd =0.4/3180000*re+0.15974842767295
else:
cd = 0.6
return cd
######################################
def interp_shape(A, vals):
# Shape (A) corresponding to [sphere, cylinder, brick]
A_vals = np.array([1.21, 1.6, 2.7])
val = interp1d(A_vals, vals, kind='linear', fill_value='extrapolate')(A)
return val
def cd_hypersonic(A):
# Hypersonic drag coefficient for a variety of shapes
cd_hyp_vals = np.array([0.92, 1.3, 2.0])
cd_hyp = interp_shape(A, cd_hyp_vals)
return cd_hyp
def cd_subsonic(re, A):
''' Sub-critical drag coefficient '''
# Estimate thi assuming an ellipsoid
V = 1; sa_eq = (36 * np.pi * V**2)**(1./3)
a_ax = np.sqrt(A * V**(2./3) / np.pi); c_ax = (3 * V**(1./3)) / (4 * A)
thi = sa_eq / (4 * np.pi * ((a_ax**3.2 + 2 * (a_ax * c_ax)**1.6) / 3)**(1./1.6))
thi_perp = (sa_eq / 4) / (np.pi * a_ax**2)
# # Equation 10 of Holzer and Sommerfeld (2008)
# cd_sub = lambda re: 8./(re * thi_perp**0.5) + 16./(re * thi**0.5) +\
# + 3./(re**0.5 * thi**0.75) + 0.4210**(0.4 * (-np.log(thi))**0.2) / thi_perp
# Sub-critical regime - Haider and Levenspiel (1989)
a = np.exp(2.3288 - 6.4581 * thi + 2.4486 * thi**2)
b = 0.0964 + 0.5565 * thi
c = np.exp(4.905 - 13.8944 * thi + 18.4222 * thi**2 - 10.2599 * thi**3)
d = np.exp(1.4681 + 12.2584 * thi - 20.7322 * thi**2 + 15.8855 * thi**3)
cd_subcrit = lambda re: 24./re * (1 + a * re**b) + c/(1 + d/re)
# Forget about the critical / supercritical regions for now...
cd_sub = cd_subcrit(re)
# ''' Super-critical drag coefficient '''
# cd_super_vals = np.array([0.33, 0.6, 2.0]) #<--- need to change this to cd_supercrit
# cd_supercrit = interp_shape(A, cd_super_vals)
# ''' Linking drag coefficient through the critical region '''
# # Magic correction function 1: Logistic fn (in logspace)
# logistic_fn = lambda re: cd_subcrit(re) + (cd_supercrit - cd_subcrit(re)) \
# / (1 + (los*re_c / re)**(1/log_hw))
# # # Smooth bodies - Boundary Layer Theory by Schlichting
# # log_hw = 0.6 # log-half-width of the critical region
# # re_c = 4.5e5 # re at cd_critical
# # los = 2. # logistic function offset
# # cd_dip = [0.1, 0.3, logistic_fn(re_c)] # lowest point within cd_critical for sphere/cylinder
# # Rough bodies -
# log_hw = 0.4 # log-half-width of the critical region
# re_c = 1e5 # re at cd_critical
# los = 2. # logistic function offset
# cd_dip = [0.2, 0.5, logistic_fn(re_c)] # lowest point within cd_critical for sphere/cylinder
# # Magic correction function 2: Gumbel Distribution (in logspace)
# cd_c = interp_shape(A, np.array(cd_dip)) # cd_critical value
# gumbel_dist = lambda re: (cd_c - logistic_fn(re_c)) * \
# np.exp(1 - 1/log_hw * np.log(re/re_c) - (re_c/re)**(1/log_hw))
# # Overall corrected drag equation
# cd_sub = logistic_fn(re) + gumbel_dist(re)
return cd_sub
def cd_fm(vel):
# Calculation of Coefficients in Meteoric Physics Equations - Khanukaeva (2005)
cd_fm = 2. + np.sqrt(1.2) / (2. * vel/1000.) * (1. + (vel/1000.)**2 / 16. + 30.)
return cd_fm
def dragcoeff(vel, temp, rho_a, A): # by Trent
'''
The drag coefficient is bloody annoying!
'''
# Determine some needed variables
mu_a = viscosity(temp) # Air Viscosity (Pa.s)
mach = vel / SoS(temp) # Mach Number
re = reynolds(rho_a, vel, mu_a, 0.1) # Reynolds Number
kn = knudsen(mach, re) # Knudsen Number
''' Determine cd '''
if kn > 10.0: # Free molecular flow
cd = cd_fm(vel)
# ^---- still not fully convinced by this equation - never gets near the traditional value of 2?
elif kn > 0.01: # Bridging function for transition to continuum
cd = cd_subsonic(re, A) + (cd_fm(vel) - cd_subsonic(re, A)) * np.exp(-0.001 * re**2)
# ^---- don't know how to verify yet
# print('Transition region: kn={0:.2e}, cd={1:.2f}'.format(kn, cd))
else: # Pure continuum flow [the default for most darkflight]
# See Miller and Bailey (1979) for details
cd_sub = cd_subsonic(re, A)
cd_hyp = cd_hypersonic(A)
logistic_fn = lambda M: cd_sub + (cd_hyp - cd_sub) \
/ (1 + np.exp(-(M - M_c) / hw))
# Can tweek these perameters
hw_vals = np.array([0.5, 0.3, 0.1]) # logistic and gumbel half-widths
hw = interp_shape(A, hw_vals)
M_c_vals = np.array([1.5, 1.2, 1.1])
M_c = interp_shape(A, M_c_vals)
cd_crit_vals = np.array([1, logistic_fn(mach)/0.92, logistic_fn(mach)/0.92])# Critical values
cd_c = interp_shape(A, cd_crit_vals)
gumbel_dist = lambda M: (cd_c - logistic_fn(M))* np.exp(-(M - M_c) / hw \
- np.exp(-(M - M_c) / hw)) / np.exp(-1)
cd = logistic_fn(mach) + gumbel_dist(mach)
# # Miller and Bailey (1979) - page12
# mmach = np.array([0.3, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.5, 2, 3, 4])
# cd_mm = np.array([-1, 0.55, 0.6, 0.65, 0.68, 0.75, 0.85, 0.98, 1.0, 0.97, 0.95, -1])
# print(logistic_fn(mm) + gumbel_dist(mm))
return cd, re, kn, mach
def dragcoefff(m, A):
# Drag coefficient according to Carter, et.al. (2011)
# More to be used as upper and lower limits for cd
m = float(m)
# Cube (A = 2.18)
if m >= 1.150:
cd_cube = 2.1 * np.exp(-1.16 * (m+0.35)) - 6.5 * np.exp(-2.23 * (m+0.35)) + 1.67
elif m >= 0:
cd_cube = 0.60 * m**2 + 1.04
# Sphere (A = 1.21)
if m >= 0.722:
cd_sphere = 2.1 * np.exp(-1.2 * (m+0.35)) - 8.9 * np.exp(-2.2 * (m+0.35)) + 0.92
elif m >= 0:
cd_sphere = 0.45 * m**2 + 0.424
shapes = np.array([1.21, 2.18]); cd_shape = np.array([cd_sphere, cd_cube])
cd = interp1d(shapes, cd_shape, kind='linear', fill_value='extrapolate')(A)
return cd
# Test the drag coefficient:
if __name__ == '__main__':
import matplotlib.pyplot as plt
temp = 300.; rho_a = 1.225 # At sea-level
vel = np.logspace(1, 4, 1000)
A = np.linspace(1.21,2.7,5)
fig, axs = plt.subplots(2,1,figsize=(16,9))
# # Plot the original equations ==========================================
# mu_a = viscosity(temp) # Air Viscosity (Pa.s)
# mach = vel / SoS(temp) # Mach Number
# for a in A:
# re = reynolds(rho_a, vel, mu_a, 0.1) # Reynolds Number
# kn = knudsen(mach, re) # Knudsen Number
# cd = np.zeros(len(vel))
# for i in range(len(vel)):
# cd[i] = dragcoef(re[i], mach[i], kn[i], a)
# axs[0].plot(re, cd, '-', label=str(a))
# axs[1].plot(mach, cd, '-', label=str(a))
# # Plot the new equations =============================================
for a in A:
cd = np.zeros(len(vel))
re = np.zeros(len(vel))
kn = np.zeros(len(vel))
mach = np.zeros(len(vel))
for i, v in enumerate(vel):
[cd[i], re[i], kn[i], mach[i]] = \
dragcoeff(v, temp, rho_a, a)
axs[0].plot(re, cd, '-', label=str(a))
axs[1].plot(mach, cd, '-', label=str(a))
axs[0].set_xscale('log')#; axs[0].set_yscale('log')
axs[0].set_xlim([1e4,1e8]); axs[0].set_ylim([0,4])
axs[1].set_xlim([0,5]); axs[1].set_ylim([0,4])
axs[0].set_xlabel('Reynolds'); axs[0].set_ylabel('cd')
axs[1].set_xlabel('Mach No.'); axs[1].set_ylabel('cd')
axs[0].legend(); axs[1].legend(); axs[0].grid(); axs[1].grid()
fig2, axs = plt.subplots(2,1,figsize=(16,9))
mach = np.hstack((np.linspace(0.2,1.2,6), np.linspace(1.5,4,6)))
for m in mach:
v = m * SoS(temp)
RE = np.logspace(4,7,1000)
Rho_a = RE * viscosity(temp) / (v * 0.1)
cd = np.zeros(len(vel))
re = np.zeros(len(vel))
kn = np.zeros(len(vel))
mach = np.zeros(len(vel))
for i, rho_a in enumerate(Rho_a):
[cd[i], re[i], kn[i], mach[i]] = \
dragcoeff(v, temp, rho_a, 1.21)
axs[0].plot(re, cd, '-', label=str(m))
axs[1].plot(mach, cd, '.', label=str(m))
# Plot the new new equation ===========================================
# T = 300; dvisc = viscosity(T)
# mach = vel / SoS(T)
# for a in A:
# cd = np.zeros(len(mach))
# for i, m in enumerate(mach):
# cd[i] = dragcoefff(m, a)
# axs[1].plot(mach, cd, '-', label=str(a))
# re = reynolds(rho_a, vel, dvisc, 0.1)
# axs[0].plot(re, cd, '-', label=str(a))
axs[0].set_xscale('log')#; axs[0].set_yscale('log')
axs[0].set_xlim([1e4,1e8]); axs[0].set_ylim([0,4])
axs[1].set_xlim([0,5]); axs[1].set_ylim([0,4])
axs[0].set_xlabel('Reynolds'); axs[0].set_ylabel('cd')
axs[1].set_xlabel('Mach No.'); axs[1].set_ylabel('cd')
axs[0].legend(); axs[1].legend(); axs[0].grid(); axs[1].grid()
plt.show()
# # Finding the relation between thi and shape [thi(A)]:
# plt.figure()
# M = 10.; rho = 3500.; V = M / rho
# sa_eq = (36 * np.pi * V**2)**(1./3)
# A = np.linspace(1,3,100)
# a = np.sqrt(A * V**(2./3) / np.pi)
# c = (3 * V**(1./3)) / (4 * A)
# thi_elip = sa_eq / (4 * np.pi * ((a**3.2 + 2 * (a * c)**1.6) / 3)**(1./1.6))
# plt.plot(A, thi_elip, label='ellipsoid')
# a = np.sqrt(A * V**(2./3))
# c = V**(1./3) / A
# thi_cube = sa_eq / (2 * (a**2 + 2 * a * c))
# plt.plot(A, thi_cube, label='cube')
# r = (2 * V**(1./3)) / (np.pi * A)
# h = V / (np.pi * r**2)
# thi_cyli = sa_eq / (2 * np.pi * r * h + 2 * np.pi * r**2)
# plt.plot(A, thi_cyli, label='cylinder')
# plt.axvline(x=1.21)
# plt.xlabel('Shape'); plt.ylabel('thi')
# plt.legend(); plt.show()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import _env # noqa
from controller._base import AdminHandler
from misc._route import route
@route('/category')
class Index(AdminHandler):
def get(self):
self.render()
|
python
|
# -*- coding: utf-8 -*-
from pydantic.datetime_parse import parse_datetime
from .. import fhirtypes # noqa: F401
from .. import communication
def test_Communication_1(base_settings):
filename = (
base_settings["unittest_data_dir"] / "communication-example.canonical.json"
)
inst = communication.Communication.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Communication" == inst.resource_type
impl_Communication_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Communication" == data["resourceType"]
inst2 = communication.Communication(**data)
impl_Communication_1(inst2)
def impl_Communication_1(inst):
assert inst.category.coding[0].code == "Alert"
assert inst.category.coding[0].system == "http://acme.org/messagetypes"
assert inst.category.text == "Alert"
assert (
inst.extension[0].url
== "http://hl7.org/fhir/StructureDefinition/communication-reasonNotPerformed"
)
assert inst.extension[0].valueCodeableConcept.coding[0].code == "EIE"
assert (
inst.extension[0].valueCodeableConcept.coding[0].display == "entered in error"
)
assert (
inst.extension[0].valueCodeableConcept.coding[0].system
== "http://hl7.org/fhir/v3/ActReason"
)
assert inst.id == "communication-example"
assert inst.identifier[0].system == "urn:oid:1.3.4.5.6.7"
assert inst.identifier[0].type.text == "Paging System"
assert inst.identifier[0].value == "2345678901"
assert (
inst.payload[0].contentString
== "Patient 1 has a very high serum potassium value (7.2 mmol/L on 2014-Dec-12 at 5:55 pm)"
)
assert inst.payload[1].contentReference.reference == "Observation/643666aa12f"
assert inst.recipient[0].reference == "Practitioner/21"
assert inst.sender.reference == "Device/f001"
assert inst.sent == parse_datetime("2014-12-12T18:01:10-08:00")
assert inst.status == "suspended"
assert inst.subject.reference == "Patient/1"
assert inst.text.div == "<div>Patient has very high serum potassium</div>"
assert inst.text.status == "generated"
|
python
|
import argparse
import os
from util import util
import torch
import models
import numpy as np
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
## task
parser.add_argument('--task', type=str, default='Audio2Headpose', help='|Audio2Feature|Feature2Face|Full|')
## basic parameters
parser.add_argument('--model', type=str, default='audio2headpose', help='trained model')
parser.add_argument('--dataset_mode', type=str, default='audiovisual', help='chooses how datasets are loaded. [unaligned | aligned | single]')
parser.add_argument('--name', type=str, default='Audio2Headpose', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints/', help='models are saved here')
# data parameters
parser.add_argument('--FPS', type=str, default=60, help='video fps')
parser.add_argument('--sample_rate', type=int, default=16000, help='audio sample rate')
parser.add_argument('--audioRF_history', type=int, default=60, help='audio history receptive field length')
parser.add_argument('--audioRF_future', type=int, default=0, help='audio future receptive field length')
parser.add_argument('--feature_decoder', type=str, default='WaveNet', help='|WaveNet|LSTM|')
parser.add_argument('--loss', type=str, default='GMM', help='|GMM|L2|')
# dataset parameters
parser.add_argument('--dataset_names', type=str, default='name', help='chooses how datasets are loaded.')
parser.add_argument('--dataroot', type=str, default='path')
parser.add_argument('--frame_jump_stride', type=int, default=1, help='jump index in audio dataset.')
parser.add_argument('--num_threads', default=0, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--audio_encoder', type=str, default='APC', help='|CNN|LSTM|APC|')
parser.add_argument('--audiofeature_input_channels', type=int, default=80, help='input channels of audio features')
parser.add_argument('--frame_future', type=int, default=15)
parser.add_argument('--predict_length', type=int, default=5)
parser.add_argument('--audio_windows', type=int, default=2)
parser.add_argument('--time_frame_length', type=int, default=240, help='length of training frames in each iteration')
# APC parameters
parser.add_argument('--APC_hidden_size', type=int, default=512)
parser.add_argument('--APC_rnn_layers', type=int, default=3)
parser.add_argument("--APC_residual", action="store_true")
parser.add_argument('--APC_frame_history', type=int, default=60)
## network parameters
# audio2headpose wavenet
parser.add_argument('--A2H_wavenet_residual_layers', type=int, default=7, help='residual layer numbers')
parser.add_argument('--A2H_wavenet_residual_blocks', type=int, default=2, help='residual block numbers')
parser.add_argument('--A2H_wavenet_dilation_channels', type=int, default=128, help='dilation convolution channels')
parser.add_argument('--A2H_wavenet_residual_channels', type=int, default=128, help='residual channels')
parser.add_argument('--A2H_wavenet_skip_channels', type=int, default=256, help='skip channels')
parser.add_argument('--A2H_wavenet_kernel_size', type=int, default=2, help='dilation convolution kernel size')
parser.add_argument('--A2H_wavenet_use_bias', type=bool, default=True, help='whether to use bias in dilation convolution')
parser.add_argument('--A2H_wavenet_cond', type=bool, default=True, help='whether use condition input')
parser.add_argument('--A2H_wavenet_cond_channels', type=int, default=512, help='whether use condition input')
parser.add_argument('--A2H_wavenet_input_channels', type=int, default=12, help='input channels')
parser.add_argument('--A2H_GMM_ncenter', type=int, default=1, help='gaussian distribution numbers, 1 for single gaussian distribution')
parser.add_argument('--A2H_GMM_ndim', type=int, default=12, help='dimension of each gaussian, usually number of pts')
parser.add_argument('--A2H_GMM_sigma_min', type=float, default=0.03, help='minimal gaussian sigma values')
# additional parameters
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
parser.add_argument('--sequence_length', type=int, default=240, help='length of training frames in each iteration')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# save and return the parser
self.parser = parser
return opt
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
if opt.isTrain:
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
# set datasets
if self.isTrain:
opt.train_dataset_names = np.loadtxt(os.path.join(opt.dataroot,
opt.dataset_names,
opt.train_dataset_names), dtype=np.str).tolist()
if type(opt.train_dataset_names) == str:
opt.train_dataset_names = [opt.train_dataset_names]
opt.validate_dataset_names = np.loadtxt(os.path.join(opt.dataroot,
opt.dataset_names,
opt.validate_dataset_names), dtype=np.str).tolist()
if type(opt.validate_dataset_names) == str:
opt.validate_dataset_names = [opt.validate_dataset_names]
self.opt = opt
return self.opt
|
python
|
def qsort(arr):
if len(arr) <= 1:
return arr
pivot = arr.pop()
greater, lesser = [], []
for item in arr:
if item > pivot:
greater.append(item)
else:
lesser.append(item)
return qsort(lesser) + [pivot] + qsort(greater)
|
python
|
from .MyMplCanvas import *
from .MyMplCanvas import MyMplCanvas
from .DataCanvas import *
from .FitCanvas import *
from .KineticsCanvas import *
from .SpectrumCanvas import *
|
python
|
import pytest
import pathlib
from align.cell_fabric import Canvas, Pdk, Wire, Via
mydir = pathlib.Path(__file__).resolve().parent
pdkfile = mydir.parent.parent / 'pdks' / 'FinFET14nm_Mock_PDK' / 'layers.json'
@pytest.fixture
def setup():
p = Pdk().load(pdkfile)
c = Canvas(p)
c.addGen( Wire( nm='m1', layer='M1', direction='v', clg=None, spg=None))
c.addGen( Wire( nm='m2', layer='M2', direction='h', clg=None, spg=None))
c.addGen( Via( nm="v1", layer="V1", h_clg=None, v_clg=None))
m1 = p['M1']
v1 = p['V1']
m2 = p['M2']
assert 'Width' in m1
assert 'Width' in m2
assert 'VencA_L' in v1
assert 'VencA_H' in v1
assert 'VencP_L' in v1
assert 'VencP_H' in v1
assert 'WidthX' in v1
assert 'WidthY' in v1
assert 'MinL' in m1
assert 'MinL' in m2
assert m1['MinL'] <= 200
assert m2['MinL'] <= 200
def cr( x, y):
assert x%2 == 0
assert y%2 == 0
return [ -x//2, -y//2, x//2, y//2]
c.terminals = [{'layer': 'M1', 'netName': 'x', 'rect': cr( m1['Width'], v1['WidthY']+2*v1['VencA_L'])},
{'layer': 'M2', 'netName': 'x', 'rect': cr( v1['WidthX']+2*v1['VencA_H'], m2['Width'])},
{'layer': 'V1', 'netName': 'x', 'rect': cr( v1['WidthX'], v1['WidthY'])}]
return c
def test_enclosure_ok(setup):
c = setup
c.terminals[0]['rect'][1] -= 200
c.terminals[1]['rect'][0] -= 200
c.gen_data()
assert c.drc.num_errors == 0
def test_enclosure_fail_right(setup):
c = setup
c.terminals[0]['rect'][1] -= 200
c.terminals[1]['rect'][0] -= 200
c.terminals[1]['rect'][2] -= 1
c.gen_data()
assert c.drc.num_errors == 1
def test_enclosure_fail_right_top(setup):
c = setup
c.terminals[0]['rect'][1] -= 200
c.terminals[1]['rect'][0] -= 200
c.terminals[1]['rect'][2] -= 1
c.terminals[0]['rect'][3] -= 1
c.gen_data()
assert c.drc.num_errors == 2
def test_enclosure_fail_left(setup):
c = setup
c.terminals[0]['rect'][3] += 200
c.terminals[1]['rect'][2] += 200
c.terminals[1]['rect'][0] += 1
c.gen_data()
assert c.drc.num_errors == 1
def test_enclosure_fail_bottom(setup):
c = setup
c.terminals[0]['rect'][3] += 200
c.terminals[1]['rect'][2] += 200
c.terminals[0]['rect'][1] += 1
c.gen_data()
assert c.drc.num_errors == 1
|
python
|
from babelsubs.generators.base import register, BaseGenerator
class DFXPGenerator(BaseGenerator):
"""
Since the internal storage is already in dfxp, the generator is just
a small shim to keep the public interface between all generators
regular.
"""
file_type = ['dfxp', 'xml' ]
def __init__(self, subtitle_set, line_delimiter=u'\n', language=None):
super(DFXPGenerator, self).__init__(subtitle_set, line_delimiter,
language)
def __unicode__(self):
return self.subtitle_set.to_xml()
@classmethod
def generate(cls, subtitle_set, language=None):
return unicode(cls(subtitle_set=subtitle_set, language=language))
register(DFXPGenerator)
|
python
|
from ..iorw import GCSHandler
class MockGCSFileSystem(object):
def __init__(self):
self._file = MockGCSFile()
def open(self, *args, **kwargs):
return self._file
def ls(self, *args, **kwargs):
return []
class MockGCSFile(object):
def __enter__(self):
self._value = 'default value'
return self
def __exit__(self, *args, **kwargs):
pass
def read(self):
return self._value
def write(self, data):
pass
def test_gcs_read(mocker):
mocker.patch('papermill.iorw.GCSFileSystem', MockGCSFileSystem)
gcs_handler = GCSHandler()
client = gcs_handler._get_client()
assert gcs_handler.read('gs://bucket/test.ipynb') == 'default value'
# Check that client is only generated once
assert client is gcs_handler._get_client()
def test_gcs_write(mocker):
mocker.patch('papermill.iorw.GCSFileSystem', MockGCSFileSystem)
gcs_handler = GCSHandler()
client = gcs_handler._get_client()
gcs_handler.write('new value', 'gs://bucket/test.ipynb')
# Check that client is only generated once
assert client is gcs_handler._get_client()
def test_gcs_listdir(mocker):
mocker.patch('papermill.iorw.GCSFileSystem', MockGCSFileSystem)
gcs_handler = GCSHandler()
client = gcs_handler._get_client()
gcs_handler.listdir('testdir')
# Check that client is only generated once
assert client is gcs_handler._get_client()
|
python
|
import boto3
exceptions = boto3.client('sdb').exceptions
AttributeDoesNotExist = exceptions.AttributeDoesNotExist
DuplicateItemName = exceptions.DuplicateItemName
InvalidNextToken = exceptions.InvalidNextToken
InvalidNumberPredicates = exceptions.InvalidNumberPredicates
InvalidNumberValueTests = exceptions.InvalidNumberValueTests
InvalidParameterValue = exceptions.InvalidParameterValue
InvalidQueryExpression = exceptions.InvalidQueryExpression
MissingParameter = exceptions.MissingParameter
NoSuchDomain = exceptions.NoSuchDomain
NumberDomainAttributesExceeded = exceptions.NumberDomainAttributesExceeded
NumberDomainBytesExceeded = exceptions.NumberDomainBytesExceeded
NumberDomainsExceeded = exceptions.NumberDomainsExceeded
NumberItemAttributesExceeded = exceptions.NumberItemAttributesExceeded
NumberSubmittedAttributesExceeded = exceptions.NumberSubmittedAttributesExceeded
NumberSubmittedItemsExceeded = exceptions.NumberSubmittedItemsExceeded
RequestTimeout = exceptions.RequestTimeout
TooManyRequestedAttributes = exceptions.TooManyRequestedAttributes
|
python
|
import re
from basic import *
from amino_acids import amino_acids
from tcr_distances_blosum import blosum
from paths import path_to_db
from translation import get_translation
gap_character = '.'
verbose = False
#verbose = ( __name__ == '__main__' )
## look at imgt cdrs
## http://www.imgt.org/IMGTScientificChart/Nomenclature/IMGT-FRCDRdefinition.html
## imgt_cdr_positions = [ ( 27, 38 ), ( 56, 65 ) ] ## 1-indexed
## these are 1-indexed !!!
##
## note that the TRAV mouse alignment seems to be shifted by 1 relative to IMGT for FR1 and FR2 and by 2 for C104 (->106)
## looks like the insertion happens around TRAV-alpha alignment position 86
## the other three agree at anchor positions C23, W41, C104
##
## indexed by id[2]
pb_cdr_positions = { 'mouse': {'A': [ ( 28, 39 ), ( 57, 66 ), (82, 88) ],
'D': [ ( 28, 39 ), ( 57, 66 ), (82, 88) ], #since we shift
'B': [ ( 27, 38 ), ( 56, 65 ), (81, 86) ],
'G': [ ( 27, 38 ), ( 56, 65 ), (81, 86) ] },
'human': {'A': [ ( 27, 38 ), ( 56, 65 ), (81, 86) ],
'B': [ ( 27, 38 ), ( 56, 65 ), (81, 86) ],
'G': [ ( 27, 38 ), ( 56, 65 ), (81, 86) ],
'D': [ ( 27, 38 ), ( 56, 65 ), (81, 86) ] },
}
CWC_positions = { 'mouse': { 'A': [24,42,106], 'D': [24,42,106], 'B': [23,41,104], 'G': [23,41,104] }, # use id[2]
'human': { 'A': [23,41,104], 'D': [23,41,104], 'B': [23,41,104], 'G': [23,41,104] } } #D-shift
## 1-indexed:
extra_alignment_columns = { 'mouse':{'A':[9,86],'B':[],'G':[],'D':[9,86] }, ## 1-indexed
'human':{'A':[],'B':[],'G':[],'D':[] } }
# core_positions_generic_1indexed = [
# 21, 23, 25, ## 23 is C
# 39, 41, ## 41 is W
# 53, 54, 55,
# 78, ## maybe also 80?
# 89, ## 89 is L
# 102, 103, 104 ## 104 is C
# ]
outfields = "id organism chain region nucseq frame aligned_protseq cdr_columns cdrs".split()
cdrs_sep = ';'
outfile = 'db/gammadelta_db.tsv'
out = open(outfile,'w')
out.write('\t'.join( outfields )+'\n' )
for organism in [ 'mouse','human' ]:
## the alignments:
big_fasta = {}
fastadir = '/home/pbradley/tcr_scripts/db/genedb_090617/'
all_functional = {}
for fasta_tag, big_fasta_file in \
[ ['align',fastadir+'IMGTGENEDB-ReferenceSequences.fasta-AA-WithGaps-F+ORF+inframeP'],
['prot',fastadir+'IMGTGENEDB-ReferenceSequences.fasta-AA-WithoutGaps-F+ORF+inframeP'],
['nuc',fastadir+'IMGTGENEDB-ReferenceSequences.fasta-nt-WithoutGaps-F+ORF+allP']]:
assert exists( big_fasta_file )
desired_species = {'human':'Homo sapiens','mouse':'Mus musculus'}[ organism ]
desired_regions = ['V-REGION','J-REGION','D-REGION']
desired_prefix = 'TR'
fasta = {}
for line in open( big_fasta_file,'rU'):
if line[0] == '>':
id = ''
l = line[1:-1].split('|')
species = l[2]
functional = l[3]
region = l[4]
prefix = l[1][:2]
if desired_species in species and region in desired_regions and prefix == desired_prefix:
id = l[1]
assert region[0] == id[3]
fasta[id] = ''
if id in all_functional:
assert functional == all_functional[id]
else:
all_functional[id] = functional
else:
id=''
if False and desired_species in species and region not in desired_regions:
print 'not region:',region,fasta_tag
if False and desired_species in species and prefix != 'TR':
print 'not prefix:',prefix,fasta_tag
else:
if id:
fasta[id] += line.split()[0]
big_fasta[fasta_tag] = fasta
print 'num_ids:',fasta_tag,len(fasta.keys())
align_fasta = big_fasta['align']
prot_fasta = big_fasta['prot']
nuc_fasta = big_fasta['nuc']
for chain in 'AB':
## get relevant V regions
v_ids = []
for id in align_fasta:
if id[3] == 'V':
if chain == 'A' and id[2] == 'G' or chain == 'B' and id[2] in 'AD':
v_ids.append( id )
maxlen = max( ( len(align_fasta[x]) for x in v_ids ) )
for id in sorted(v_ids):
abdg = id[2]
alseq = align_fasta[id]
alseq += gap_character*( maxlen - len(alseq))
## for mouse delta, adjust alignment to match mouse alpha
if organism=='mouse' and abdg=='D':
pos1=extra_alignment_columns[organism]['A'][0] - 1
pos2=extra_alignment_columns[organism]['A'][1] - 1
alseq = alseq[:pos1]+gap_character+alseq[pos1:]
alseq = alseq[:pos2]+gap_character+alseq[pos2:]
assert alseq.endswith(gap_character+gap_character)
alseq = alseq[:-2]
assert len(alseq) == maxlen
cwc = ''.join( ( alseq[x-1] for x in CWC_positions[organism][abdg] ) )
if cwc != 'CWC' and len(cwc.replace(gap_character,''))==3:
print 'funny CWC:',cwc,alseq, organism, id, all_functional[id]
extraseq = ''.join( ( alseq[x-1] for x in extra_alignment_columns[organism][abdg] ) )
if extraseq and extraseq.replace(gap_character,'') :
print 'extra:',extraseq, organism, id, all_functional[id]
protseq = prot_fasta[id]
nucseq = nuc_fasta[id]
assert protseq == alseq.replace(gap_character,'')
#print ' ',protseq
myframe = -1
for i in range(3):
tseq = get_translation( nucseq, '+{}'.format(i+1) )[0]
#print i,tseq
if protseq in tseq:
myframe = i + 3*tseq.index(protseq)
#print id, myframe
if myframe==-1:
print 'bad frame:',id, myframe,protseq ### NOTE SKIPPING THIS ONE WITH A BAD PROTEIN SEQUENCE
continue
assert myframe >= 0 and myframe<3
cpos = CWC_positions[organism][abdg][-1] # 1-indexed
cdr_columns = pb_cdr_positions[organism][abdg] + [[cpos,maxlen]] ## all 1-indexed
cdrs = [ alseq[x[0]-1:x[1]] for x in cdr_columns ]
region = 'V'
outl = { 'id': id,
'organism': organism,
#'functional': 0 if Ntrunc or Ctrunc else 1,
'chain': chain,
'region': region,
'nucseq': nucseq,
'aligned_protseq': alseq,
'frame': '+{}'.format( myframe+1 ), ## convention for frame is 1-indexed
'cdr_columns':cdrs_sep.join( '{}-{}'.format(x[0],x[1]) for x in cdr_columns ),
'cdrs': cdrs_sep.join( cdrs ),
}
out.write( make_tsv_line( outl, outfields )+'\n' )
## now the J regions
j_ids = []
for id in align_fasta:
if id[3] == 'J':
if chain == 'A' and id[2] == 'G' or chain == 'B' and id[2] == 'D':
j_ids.append( id )
bounds = {}
for id in j_ids:
jseq = prot_fasta[id]
#print 'jseq:',organism, chain, jseq, id
m = re.search( 'F[AG].G', jseq )
assert m
txt = m.group(0)
assert jseq.count(txt)==1
num_in = jseq.index(txt)+1 # in the CDR3
num_out = len(jseq) - num_in
bounds[id] = [ num_in, num_out ]
maxin = max( ( x[0] for x in bounds.values()))
maxout = max( ( x[1] for x in bounds.values()))
#maxlen = max( ( len(align_fasta[x]) for x in v_ids ) )
for id in j_ids:
jseq = prot_fasta[id]
num_in,num_out = bounds[id]
alseq = gap_character*(maxin -num_in) + jseq + gap_character*(maxout-num_out)
print 'jseq:',organism, chain, alseq, id
protseq = prot_fasta[id]
nucseq = nuc_fasta[id]
assert protseq == alseq.replace(gap_character,'')
#print ' ',protseq
myframe = -1
for i in range(3):
tseq = get_translation( nucseq, '+{}'.format(i+1) )[0]
#print i,tseq
if protseq in tseq:
myframe = i + 3*tseq.index(protseq)
#print id, myframe
if myframe==-1:
print 'bad frame:',id, myframe,protseq ### NOTE SKIPPING THIS ONE WITH A BAD PROTEIN SEQUENCE
continue
assert myframe >= 0 and myframe<3
cdr_columns = [[1,maxin]]
cdrs = [ alseq[:maxin]]
region = 'J'
outl = { 'id': id,
'organism': organism,
#'functional': 0 if Ntrunc or Ctrunc else 1,
'chain': chain,
'region': region,
'nucseq': nucseq,
'aligned_protseq': alseq,
'frame': '+{}'.format( myframe+1 ), ## convention for frame is 1-indexed
'cdr_columns':cdrs_sep.join( '{}-{}'.format(x[0],x[1]) for x in cdr_columns ),
'cdrs': cdrs_sep.join( cdrs ),
}
out.write( make_tsv_line( outl, outfields )+'\n' )
if chain == 'B':
## now the D regions
d_ids = []
for id in align_fasta:
if id[2:4] == 'DD':
d_ids.append( id )
maxlen = max( ( len(prot_fasta[x]) for x in d_ids ) )
#maxlen = max( ( len(align_fasta[x]) for x in v_ids ) )
for id in d_ids:
protseq = prot_fasta[id]
alseq = protseq + gap_character*(maxlen-len(protseq))
nucseq = nuc_fasta[id]
assert protseq == alseq.replace(gap_character,'')
#print ' ',protseq
myframe = -1
for i in range(3):
tseq = get_translation( nucseq, '+{}'.format(i+1) )[0]
#print i,tseq
if protseq in tseq:
myframe = i + 3*tseq.index(protseq)
#print id, myframe
if myframe==-1:
print 'bad frame:',id, myframe,protseq ### NOTE SKIPPING THIS ONE WITH A BAD PROTEIN SEQUENCE
continue
assert myframe >= 0 and myframe<3
region = 'D'
outl = { 'id': id,
'organism': organism,
#'functional': 0 if Ntrunc or Ctrunc else 1,
'chain': chain,
'region': region,
'nucseq': nucseq,
'aligned_protseq': alseq,
'frame': '+{}'.format( myframe+1 ), ## convention for frame is 1-indexed
'cdr_columns':'',
'cdrs': ''
}
out.write( make_tsv_line( outl, outfields )+'\n' )
out.close()
|
python
|
token = 'NDY2ODc4MzY3MTg3MDc1MDcz.DiqkoA.JVgJqYhnL6yyCnKeAnCHx5OvR3E' #Put Your bots token here
prefix = '*' #put prefix here
link = 'https://discordapp.com/api/oauth2/authorize?client_id=466878367187075073&permissions=0&scope=bot' #put bot invite link here
ownerid = '329526048553172992' #put your id here
|
python
|
import numpy as np
import torch
from torch.utils import data
from kaldi_io import read_mat
import h5py
# PyTorch Dataset
class SpoofDatsetEval(data.Dataset):
''' Evaluation, no label
'''
def __init__(self, scp_file):
with open(scp_file) as f:
temp = f.readlines()
content = [x.strip() for x in temp]
self.key_dic = {index: i.split()[0]
for (index, i) in enumerate(content)}
self.ark_dic = {index: i.split()[1]
for (index, i) in enumerate(content)}
def __len__(self):
return len(self.key_dic.keys())
def __getitem__(self, index):
utt_id = self.key_dic[index]
X = np.expand_dims(read_mat(self.ark_dic[index]), axis=0)
return utt_id, X
class SpoofLeaveOneOutDatset(data.Dataset):
'''
Leave out
AA (for PA)
SS_1 (for LA)
during training, to test how NN generalizes to new attack condition
classification label becomes:
multi-class classification for PA: AA, AB, AC, BA, BB, BC, CA, CB, CC --> 10 classes
(bonafide: 0), (AB: 1), (AC: 2), (BA: 3), (BB: 4), (BC: 5),
(CA: 6), (CB: 7), (CC: 8) +- (AA:9)
multi-class classification for LA: SS_1, SS_2, SS_4, US_1, VC_1, VC_4 --> 7 classes
(bonafide: 0), (SS_2: 1), (SS_4: 2), (US_1: 3), (VC_1: 4), (VC_4: 5) +- (SS_1: 6)
'''
def __init__(self, scp_file, utt2index_file, mode='train', condition='PA'):
with open(scp_file) as f:
temp = f.readlines()
content = [x.strip() for x in temp]
self.key_dic = {index: i.split()[0]
for (index, i) in enumerate(content)}
self.ark_dic = {index: i.split()[1]
for (index, i) in enumerate(content)}
with open(utt2index_file) as f:
temp = f.readlines()
self.label_dic = {
index: int(
x.strip().split()[1]) for (
index,
x) in enumerate(temp)}
for index, label in self.label_dic.items():
if label == 1:
if mode == 'train': # remove label AA (for PA) or SS_1 (for LA)
self.key_dic.pop(index)
elif mode == 'test':
if condition == 'PA':
self.label_dic[index] = 9
elif condition == 'LA':
self.label_dic[index] = 6
if label > 1:
self.label_dic[index] = label - 1
counter = 0
self.mapping = {}
for index in self.key_dic.keys(
): # because of the popping, indexing is messed up
self.mapping[counter] = index
counter += 1
def __len__(self):
return len(self.mapping.keys())
def __getitem__(self, counter):
index = self.mapping[counter]
utt_id = self.key_dic[index]
X = np.expand_dims(read_mat(self.ark_dic[index]), axis=0)
y = self.label_dic[index]
return utt_id, X, y
class SpoofDatsetSystemID3(data.Dataset):
'''
use hdf5 file instead of ark file to access feats
'''
def __init__(self, raw, scp_file, utt2index_file):
self.h5f = h5py.File(raw, 'r')
with open(scp_file) as f:
temp = f.readlines()
content = [x.strip() for x in temp]
self.key_dic = {index: i.split()[0]
for (index, i) in enumerate(content)}
with open(utt2index_file) as f:
temp = f.readlines()
self.label_dic = {
index: int(
x.strip().split()[1]) for (
index,
x) in enumerate(temp)}
assert len(self.key_dic.keys()) == len(self.label_dic.keys())
def __len__(self):
return len(self.key_dic.keys())
def __getitem__(self, index):
utt_id = self.key_dic[index]
X = np.expand_dims(self.h5f[utt_id][:], axis=0)
y = self.label_dic[index]
return utt_id, X, y
class SpoofDatsetSystemID2(data.Dataset):
'''
read all data onto the disc instead of reading it on the fly
'''
def __init__(self, scp_file, utt2index_file):
with open(scp_file) as f:
temp = f.readlines()
content = [x.strip() for x in temp]
self.key_dic = {index: i.split()[0]
for (index, i) in enumerate(content)}
self.feat_dic = {index: np.expand_dims(read_mat(i.split()[1]), axis=0)
for (index, i) in enumerate(content)}
with open(utt2index_file) as f:
temp = f.readlines()
self.label_dic = {
index: int(
x.strip().split()[1]) for (
index,
x) in enumerate(temp)}
assert len(self.key_dic.keys()) == len(self.label_dic.keys())
def __len__(self):
return len(self.key_dic.keys())
def __getitem__(self, index):
utt_id = self.key_dic[index]
X = self.feat_dic[index]
y = self.label_dic[index]
return utt_id, X, y
class SpoofDatsetSystemID(data.Dataset):
''' multi-class classification for PA: AA, AB, AC, BA, BB, BC, CA, CB, CC --> 10 classes
(bonafide: 0), (AA: 1), (AB: 2), (AC: 3), (BA: 4), (BB: 5), (BC: 6),
(CA: 7), (CB: 8), (CC: 9)
multi-class classification for LA: SS_1, SS_2, SS_4, US_1, VC_1, VC_4 --> 7 classes
(bonafide: 0), (SS_1: 1), (SS_2: 2), (SS_4: 3), (US_1: 4), (VC_1: 5), (VC_4: 6)
if leave_one_out:
for pa: leave out the class with label == 9
for la: leave out the class with label == 6
'''
def __init__(
self,
scp_file,
utt2index_file,
binary_class,
leave_one_out=False):
with open(scp_file) as f:
temp = f.readlines()
content = [x.strip() for x in temp]
self.key_dic = {index: i.split()[0]
for (index, i) in enumerate(content)}
self.ark_dic = {index: i.split()[1]
for (index, i) in enumerate(content)}
with open(utt2index_file) as f:
temp = f.readlines()
temp_dic = {
index: int(
x.strip().split()[1]) for (
index,
x) in enumerate(temp)}
# leave one out
self.all_idx = {}
counter = 0
for index, label in temp_dic.items():
if leave_one_out:
if label != 6:
self.all_idx[counter] = index
counter += 1
else:
self.all_idx[counter] = index
counter += 1
if binary_class:
self.label_dic = {
index: 0 if orig_label == 0 else 1 for (
index, orig_label) in temp_dic.items()}
else:
self.label_dic = temp_dic
if not leave_one_out:
assert len(self.all_idx.keys()) == len(self.label_dic.keys())
def __len__(self):
return len(self.all_idx.keys())
def __getitem__(self, counter):
index = self.all_idx[counter]
utt_id = self.key_dic[index]
X = np.expand_dims(read_mat(self.ark_dic[index]), axis=0)
y = self.label_dic[index]
return utt_id, X, y
|
python
|
# lets create a class to hold our category data
class Category:
def __init__(self, name):
self.name = name
def __str__(self):
return f"No Products in {self.name}"
#How can you represent this class data as a string? My guess is __str__
|
python
|
import ConfigParser
import os
from StringIO import StringIO
homedir = os.getenv('APPDATA' if os.name.lower() == 'nt' else 'HOME', None)
default_cfg = StringIO("""
[visual]
background = black
foreground = white
height = 800
width = 800
cortex = classic
default_view = lateral
[overlay]
min_thresh = 2.0
max_thresh = robust_max
[options]
logging_level = INFO
subjects_dir =
""")
config = ConfigParser.ConfigParser()
config.readfp(default_cfg)
config.read([os.path.expanduser('~/.surfer.cfg'), 'surfer.cfg'])
|
python
|
import matplotlib.pyplot as plt
import numpy as np
def plot_bar_from_counter(counter, ax=None):
""""
This function creates a bar plot from a counter.
:param counter: This is a counter object, a dictionary with the item as the key
and the frequency as the value
:param ax: an axis of matplotlib
:return: the axis wit the object in it for further formating if necessary
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
frequencies = counter.values()
names = counter.keys()
# Get the histogram
x_coordinates = np.arange(len(counter))
ax.bar(x_coordinates, frequencies, align='center')
# Format the graph
ax.xaxis.set_major_locator(plt.FixedLocator(x_coordinates))
ax.xaxis.set_major_formatter(plt.FixedFormatter(names))
ax.set_ylim([0, 1.1 * max(frequencies)])
return ax
|
python
|
from django import forms
class LoginCustomerForm(forms.Form):
email_address = forms.CharField(label='Email address', max_length=100)
password = forms.CharField(label='Password', max_length=100, widget=forms.PasswordInput)
class RegisterCustomerForm(forms.Form):
first_name = forms.CharField(label='First name', max_length=100)
last_name = forms.CharField(label='Last name', max_length=100)
username = forms.CharField(label='Username', max_length=100)
email_address = forms.CharField(label='Email address', max_length=100)
password = forms.CharField(label='Password', max_length=100, widget=forms.PasswordInput)
class SelectProductQuantity(forms.Form):
def __init__(self, quantity, *args, **kwargs):
if type(quantity) == int:
super(SelectProductQuantity, self).__init__(*args, **kwargs)
self.fields['quantity'] = forms.ChoiceField(choices=tuple([(i+1, i+1) for i in range(quantity)]))
else:
int_quantity = int(quantity['quantity'])
super(SelectProductQuantity, self).__init__(*args, **kwargs)
self.fields['quantity'] = forms.ChoiceField(choices=tuple([(i+1, i+1) for i in range(int_quantity)]))
|
python
|
import datetime
import textwrap
import uuid
from app_ccf.models import Application, StatusUpdate
from app_ccf.models import VoucherCode, VoucherCodeBatch, VoucherCodeCheckStatus
from shared.test_utils import DEFAULT_CCF_APP_FIELDS
from . import base_test
from . import utils
class ApplicationTests(base_test.CcfBaseTest):
def setUp(self):
super().setUp()
self.fields = DEFAULT_CCF_APP_FIELDS.copy()
self.application = Application(
**self.fields,
)
self.application.status = Application.ApplicationStatus.NEEDS_REVIEW
self.application.save()
def test_save_secondTime_doesNotUpdateSubmittedDate(self):
old_time = self.application.submitted_date
self.application.save()
new_time = self.application.submitted_date
self.assertEqual(new_time, old_time)
def test_save_changedStatus_updatesStatusLastModified(self):
old_time = self.application.status_last_modified
# Different from original status
self.application.status = Application.ApplicationStatus.APPROVED
self.application.save()
new_time = self.application.status_last_modified
self.assertGreater(new_time, old_time)
def test_save_unchangedStatus_doesNotUpdateStatusLastModified(self):
old_time = self.application.status_last_modified
# Same as original status
self.application.status = Application.ApplicationStatus.NEEDS_REVIEW
self.application.save()
new_time = self.application.status_last_modified
self.assertEqual(new_time, old_time)
def test_bulk_update_status(self):
StatusUpdate.objects.all().delete()
apps = [
Application(**self.fields) for _ in range(100)
]
Application.objects.bulk_create(apps)
self.assertEqual(0, len(StatusUpdate.objects.all()))
Application.bulk_update_status(
apps[:50], Application.ApplicationStatus.NEEDS_REVIEW)
self.assertEqual(
Application.ApplicationStatus.NEEDS_REVIEW, apps[49].status)
self.assertEqual(
Application.ApplicationStatus.SUBMITTED, apps[50].status)
self.assertEqual(50, len(StatusUpdate.objects.all()))
# Update all applications to NEEDS_REVIEW even for those whose status
# is already NEEDS_REVIEW. Make sure only 50 new StatusUpdate objects
# were added.
Application.bulk_update_status(
apps, Application.ApplicationStatus.NEEDS_REVIEW)
self.assertEqual(
Application.ApplicationStatus.NEEDS_REVIEW, apps[50].status)
self.assertEqual(100, len(StatusUpdate.objects.all()))
def test_get_full_address(self):
self.application.addr1 = '111 E 11th St'
self.application.addr2 = 'APT 1'
self.application.city = 'New York'
self.application.state = 'NY'
self.application.zip_code = '11111'
expected_full_address = textwrap.dedent("""\
111 E 11th St
APT 1
New York, NY 11111""")
self.assertEqual(expected_full_address,
self.application.get_full_address())
def test_get_full_address_2ndAddressLineBeingEmpty(self):
self.application.addr1 = '111 E 11th St'
self.application.city = 'New York'
self.application.state = 'NY'
self.application.zip_code = '11111'
expected_full_address = textwrap.dedent("""\
111 E 11th St
New York, NY 11111""")
self.assertEqual(expected_full_address,
self.application.get_full_address())
class VoucherCodeTests(base_test.CcfBaseTest):
VALID_CODE = "VALID1234"
EXPIRED_CODE = "EXPIRED12"
USED_CODE = "USED12345"
INVALIDATED_CODE = "INVALID12"
def setUp(self):
super().setUp()
today = datetime.datetime.now(datetime.timezone.utc)
tomorrow = today + datetime.timedelta(days=1)
yesterday = today - datetime.timedelta(days=1)
batch_fields = {
'num_codes': 1,
'code_length': 9,
'base_amount': 400,
'created': yesterday,
}
VoucherCode.objects.create(
code=self.VALID_CODE,
added_amount=400,
batch=VoucherCodeBatch.objects.create(
**batch_fields,
expiration_date=tomorrow))
VoucherCode.objects.create(
code=self.USED_CODE,
added_amount=400,
batch=VoucherCodeBatch.objects.create(
**batch_fields,
expiration_date=tomorrow),
application=Application.objects.create(
**DEFAULT_CCF_APP_FIELDS))
VoucherCode.objects.create(
code=self.EXPIRED_CODE,
added_amount=400,
batch=VoucherCodeBatch.objects.create(
**batch_fields,
expiration_date=yesterday))
VoucherCode.objects.create(
code=self.INVALIDATED_CODE,
added_amount=400,
batch=VoucherCodeBatch.objects.create(
**batch_fields,
expiration_date=tomorrow),
is_active=False)
def test_verify_code_validCode(self):
self.assertEqual(
VoucherCode.verify_code(self.VALID_CODE),
VoucherCodeCheckStatus.SUCCESS)
def test_verify_code_codeDoesNotExist(self):
self.assertEqual(
VoucherCode.verify_code("FAKECODE"),
VoucherCodeCheckStatus.CODE_NOT_FOUND)
def test_verify_code_codeAlreadyUsed(self):
self.assertEqual(
VoucherCode.verify_code(self.USED_CODE),
VoucherCodeCheckStatus.CODE_ALREADY_USED)
def test_verify_code_codeInvalidated(self):
self.assertEqual(
VoucherCode.verify_code(self.INVALIDATED_CODE),
VoucherCodeCheckStatus.CODE_INVALIDATED)
def test_verify_code_codeExpired(self):
self.assertEqual(
VoucherCode.verify_code(self.EXPIRED_CODE),
VoucherCodeCheckStatus.CODE_EXPIRED)
|
python
|
# coding: utf-8
from binance_api.servicebase import ServiceBase
import json
from binance.client import Client
from binance.exceptions import BinanceAPIException
class Order(ServiceBase):
def _get_filter(self, symbol, filter_name):
if symbol is None: return None
exinfo = self.binance.exchange_info
if symbol not in exinfo: return None
return exinfo[symbol][filter_name]
def tick_price(self, symbol_or_pair = None):
filters = self._get_filter(self.to_symbol(symbol_or_pair), "price")
if filters is None: return None
return float(filters["tickSize"])
def min_create_amount(self, symbol_or_pair = None):
filters = self._get_filter(self.to_symbol(symbol_or_pair), "lot")
if filters is None: return None
return float(filters["minQty"])
def tick_amount(self, symbol_or_pair = None):
filters = self._get_filter(self.to_symbol(symbol_or_pair), "lot")
if filters is None: return None
return float(filters["stepSize"])
def create(self, params = {}):
is_test_mode = "test_mode" in self.binance.options and self.binance.options["test_mode"] is True
client = self.client
# convert param from cc to binance style
# SYMBOL
symbol = self.pair_to_symbol(params["pair"])
# SIDE
# ORDER_TYPE (limit or market)
side = None
is_limit_order = None
order_type_str = params["order_type"].lower()
if order_type_str == "buy":
side = Client.SIDE_BUY
is_limit_order = True
elif order_type_str == "sell":
side = Client.SIDE_SELL
is_limit_order = True
elif order_type_str == "market_buy":
side = Client.SIDE_BUY
is_limit_order = False
elif order_type_str == "market_sell":
side = Client.SIDE_SELL
is_limit_order = False
else:
return self._get_fail_retstr("order_type:" + params["order_type"] + " is not supported")
# quantity
quantity = params["amount"]
try:
def make_price_str(price_inst):
if isinstance(price_inst, str):
price = price_inst
else:
price='%.8f' % (price_inst)
return price
price = None
if is_limit_order:
price = make_price_str(params['rate'])
# place an order
if is_test_mode == False:
# make valid order
if price is not None:
ret = client.create_order(
symbol=symbol,
side=side,
timeInForce="GTC",
type=Client.ORDER_TYPE_LIMIT if is_limit_order else Client.ORDER_TYPE_MARKET,
quantity=quantity,
price=price
)
else:
ret = client.create_order(
symbol=symbol,
side=side,
timeInForce="GTC",
type=Client.ORDER_TYPE_LIMIT if is_limit_order else Client.ORDER_TYPE_MARKET,
quantity=quantity,
)
else:
# make test order
if price is not None:
ret = client.create_test_order(
symbol=symbol,
side=side,
type=Client.ORDER_TYPE_LIMIT if is_limit_order else Client.ORDER_TYPE_MARKET,
quantity=quantity,
price=price
)
else:
ret = client.create_test_order(
symbol=symbol,
side=side,
type=Client.ORDER_TYPE_LIMIT if is_limit_order else Client.ORDER_TYPE_MARKET,
quantity=quantity,
)
except BinanceAPIException as e:
return self._get_fail_retstr(str(e.status_code) + ":" + str(e.message))
if not self._check_success(ret):
return self._process_ret(ret)
new_ret = {}
new_ret["success"] = True
new_ret["pair"] = self.symbol_to_pair(ret["symbol"])
new_ret["id"] = ret["orderId"]
new_ret["rate"] = ret["price"]
new_ret["order_type"] = ret["side"].lower()
new_ret["order_type_detail"] = ret["type"]
new_ret["amount"] = ret["origQty"]
new_ret["order_status"] = ret["status"]
new_ret["clientOrderId"] = ret["clientOrderId"]
#created_at = datetime.fromtimestamp(ret["transactTime"], tz=pytz.utc)
new_ret["created_at"] = self._gtctime_to_createdat_str(ret["transactTime"]/1000)
"""
ret example
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
}
"""
return json.dumps(new_ret)
def cancel(self, params = {}):
defaults = {
'id': ""
}
defaults.update(params)
params = defaults.copy()
params["orderId"] = params["id"]
del params["id"]
if "pair" in params:
params["symbol"] = self.pair_to_symbol(params["pair"])
try:
ret = self.client.cancel_order(symbol=params["symbol"], orderId=params["orderId"])
except BinanceAPIException as e:
return self._get_except_retstr(e)
if not self._check_success(ret):
return self._process_ret(ret)
ret["success"] = True
ret["id"] = ret["orderId"]
return self._process_ret(ret)
def _create_symbol_list_from_param(self, params):
symbols = []
if "pair" not in params and "symbol" not in params:
return False, self._get_fail_retstr("Set 'symbol' or 'pair' when accessing binance api")
if "pair" in params:
if isinstance(params["pair"], list):
symbols = list(map(lambda x:self.to_symbol(x), params["pair"]))
else:
symbols = [params["pair"]]
else:
if isinstance(params["symbol"], list):
symbols = params["symbol"]
else:
symbols = [params["symbol"]]
return True, symbols
def opens(self, params = {}):
ok, symbols = self._create_symbol_list_from_param(params)
if not ok:
return symbols
new_ret = {"success":True}
all_orders = []
force_all_order_request = "__force_all_order__" in params and params["__force_all_order__"]
for symbol in symbols:
try:
if force_all_order_request:
print("****** get all orders (not only opens) ******")
ret = self.client.get_all_orders(symbol=symbol)
else:
ret = self.client.get_open_orders(symbol=symbol)
except BinanceAPIException as e:
return self._get_except_retstr(e)
if not self._check_success(ret):
return self._process_ret(ret)
orders = []
for open_order in ret:
new_order = {}
new_order["symbol"] = open_order["symbol"]
new_order["pair"] = self.symbol_to_pair(open_order["symbol"])
new_order["id"] = open_order["orderId"]
new_order["rate"] = open_order["price"]
new_order["orig_amount"] = float(open_order["origQty"])
new_order["pending_amount"] = float(open_order["origQty"]) - float(open_order["executedQty"])
new_order["order_type"] = open_order["side"].lower()
new_order["created_at"] = self._gtctime_to_createdat_str(open_order["time"]/1000)
new_order["stop_loss_rate"] = None if float(open_order["stopPrice"]) == 0 else open_order["stopPrice"]
new_order["pending_market_buy_amount"] = None
new_order["status"] = open_order["status"]
orders.append(new_order)
all_orders += orders
new_ret["orders"] = list(sorted(all_orders, key=lambda x:x["created_at"], reverse=True))
return json.dumps(new_ret)
# result sample
"""
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559,
"isWorking": trueO
}
]
"""
def transactions(self, params = {}):
ok, symbols = self._create_symbol_list_from_param(params)
if not ok:
return symbols
new_ret = {"success": True}
all_trades = []
for symbol in symbols:
try:
ret = self.client.get_my_trades(symbol=symbol)
except BinanceAPIException as e:
return self._get_except_retstr(e)
pair_name = self.symbol_to_pair(symbol)
qty_currency = pair_name.split("_")[0]
base_currency = pair_name.split("_")[1]
# change ret to cc style
trades = []
for t in ret:
"""
#"id": 38,
#"order_id": 49,
#"created_at": "2015-11-18T07:02:21.000Z",
#"funds": {
# "btc": "0.1",
# "jpy": "-4096.135"
#},
#"pair": "btc_jpy",
#"rate": "40900.0",
#"fee_currency": "JPY",
#"fee": "6.135",
#"liquidity": "T",
#"side": "buy"
"""
trade = {}
trade["id"] = t["id"]
trade["pair"] = pair_name
trade["order_id"] = t["orderId"]
trade["rate"] = t["price"]
trade["created_at"] = self._gtctime_to_createdat_str(t["time"]/1000)
trade["fee_currency"] = t["commissionAsset"]
trade["fee"] = t["commission"]
trade["liquidity"] = "M" if t["isMaker"] else "T"
trade["side"] = "buy" if t["isBuyer"] == True else "sell"
funds = {}
if t["isBuyer"] == True:
funds[qty_currency] = float(t["qty"])
funds[base_currency] = - float(t["qty"]) * float(t["price"])
else:
funds[qty_currency] = - float(t["qty"])
funds[base_currency] = float(t["qty"]) * float(t["price"])
if t["commissionAsset"].lower() == qty_currency.lower():
# minus commission from qty currency
funds[qty_currency] -= float(t["commission"])
elif t["commissionAsset"].lower() == base_currency.lower():
# minus commission from base currency
# 取引している通貨とは別の通貨から惹かれる場合もあるので、両方チェック(BNB等)
funds[base_currency] -= float(t["commission"])
trade["funds"] = funds
trades.append(trade)
all_trades += trades
new_ret["transactions"] = list(sorted(all_trades, key=lambda x:x["created_at"], reverse=True))
return json.dumps(new_ret)
# Check an order's status. でもいい?
# /api/v3/myTrades は weightが5で利用weightがきつい。1symbolに対して5使ってしまう
# だから、PositionManager側でparseするより、api側で状態を取得して返すようにするほうがいい??
# => PositionMnaagerに買いていたものを、cc api, binance api に移す感じ
# 追記: 現状のママでよさそう
# 1分毎に行う処理
# new order: weight 1 * positions
# cancel order: weight 1 * positions
#
# 毎フレーム行う処理
# balance の確認: weight 5
# openOrder の確認: weight 1 * positions
# transactions の確認: weight 5 * positions
# => 1分間に10回処理をする場合 & 6通貨同時進行
# 2*6 + (6+6*6)*10 = 432/min < 1200/min
# => 問題なさそう
# day limit は order のみなので、balance 等のrequestは別?
if __name__ == "__main__":
import os
import sys
print(sys.path)
from binance_api.binance_api import Binance
api_key = None
secret_key = None
with open(os.path.join(os.path.dirname(__file__), "key/test_binance_key.txt")) as r:
api_key = r.readline().strip()
secret_key = r.readline().strip()
api = Binance(api_key, secret_key)
trades_str = api.order.transactions({"symbol": ["BTCUSDT", "LSKBTC"]})
trades = json.loads(trades_str)
print("transactions")
for trade in trades["transactions"]:
print("=====")
for key in sorted(trade.keys()):
print(key + ":" + str(trade[key]))
print("--------all orders---------")
orders_str = api.order.opens({"symbol":["BTCUSDT", "LSKBTC", "OMGBTC"], "__force_all_order__": True})
orders = json.loads(orders_str)
for order in orders["orders"]:
print("=====")
for key in sorted(order.keys()):
print(key + ":" + str(order[key]))
|
python
|
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for fetching the features Oppia provides to its users."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import config_domain
import feconf
class ExplorationFeaturesHandler(base.BaseHandler):
"""Returns features the given exploration is configured to support."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_play_exploration
def get(self, exploration_id):
"""Handles GET requests for an exploration's features.
Args:
exploration_id: str. The ID of the exploration.
"""
whitelisted_exploration_ids_for_playthroughs = (
config_domain.WHITELISTED_EXPLORATION_IDS_FOR_PLAYTHROUGHS.value)
self.render_json({
'is_improvements_tab_enabled':
config_domain.IS_IMPROVEMENTS_TAB_ENABLED.value,
'is_exploration_whitelisted':
exploration_id in whitelisted_exploration_ids_for_playthroughs,
'always_ask_learners_for_answer_details':
config_domain.ALWAYS_ASK_LEARNERS_FOR_ANSWER_DETAILS.value
})
|
python
|
# -*- coding: utf-8 -*-
import logging
from datetime import datetime, timedelta
from dateutil import tz
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate
from django.db.models import Q, Prefetch, Sum
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.http import urlquote
from rest_framework import exceptions, serializers
from rest_framework.response import Response
from rest_framework.validators import UniqueValidator
from foundation.models import Invoice, Product
logger = logging.getLogger(__name__)
def get_todays_date_plus_days(days=0):
"""Returns the current date plus paramter number of days."""
return timezone.now() + timedelta(days=days)
class InvoiceItemRetrieveUpdateDestroySerializer(serializers.Serializer):
invoice = serializers.PrimaryKeyRelatedField(read_only=True)
product = serializers.PrimaryKeyRelatedField(read_only=True)
description = serializers.CharField(read_only=True,)
quantity = serializers.IntegerField(required=False, allow_null=True)
unit_price = serializers.FloatField(read_only=True)
total_price = serializers.FloatField(read_only=True)
# Meta Information.
class Meta:
fields = (
'invoice',
'product',
'description',
'quantity',
'unit_price',
'total_price'
)
def validate_quantity(self, data):
if data < 1:
raise exceptions.ValidationError(_("Please pick number greater then zero."))
return data
def update(self, instance, validated_data):
"""
Override this function to include extra functionality.
"""
instance.quantity = validated_data.get('quantity', instance.quantity)
instance.save()
instance.invoice.invalidate('total')
return validated_data
def delete(self, instance):
instance.delete()
|
python
|
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
"""Textual UI: progress bar and colprint"""
import os
import sys
from datetime import datetime, timedelta
from contextlib import contextmanager
import logging
import math
import six
LOG = logging.getLogger(__name__)
__all__ = ['colprint', 'find_console_width', 'ProgressBar',
'clear_progress_bar', 'redraw_progress_bar', 'safe_output']
if not six.PY3:
input = raw_input
class ProgressBar(object):
"""Show percent progress every 'n' seconds"""
def __init__(self, total, delay=None, show_size=lambda x: x, note=None):
"""
total - total number of items that are going to be processed
delay - update delay, in seconds
show_size - function to return the string to display instead of the number `size`
"""
assert total >= 0, total
assert delay is None or delay >= 0, delay
assert show_size
_set_current_progress_bar(self)
if delay is None:
if _istty():
delay = 0.1
else:
delay = 5 # output redirection; less flood
self.delay = timedelta(seconds=delay)
self.delay_duration = timedelta(seconds=1)
self.start = datetime.now()
self.elapsed = None # time elapsed from start
self.estimated_time_left = None
self.lastprint = None
self.lastprint_duration = None # for updating duration/ETA
self.lastprocessed = 0
self.lastline = ''
self.total = total
self.processed = 0
self.show_size = show_size
self.note = note
self.duration_display = ''
self._length = 0 # current length of the progress display
@classmethod
def iterate(cls, sequence, note=None, post=None):
"""Iterate a sequence and update the progress bar accordingly
The sequence must have a 'len' attribute if it is an arbitrary
generator.
note -- Text to print before the progress bar
post -- Text to print at the end of progress (w/ fmt vars)
"""
p = cls(len(sequence), note=note)
clean_exit = False
try:
for item in sequence:
yield item
p.tick()
clean_exit = True
finally:
p.close()
if post and clean_exit:
line = post.format(**p.__dict__)
if _istty():
sys.stdout.write(line + '\n')
else:
print(line)
def tick(self, items=1):
"""The method that updates the display if necessary.
After creating the ``PercentProgress`` object, this method must be
called for every item processed (or, pass items=ITEMS for every ITEMS
processed).
This method must be called no more than ``self.total`` times (otherwise
you get assertion error .. implying a bug in your code)
Return True if progress bar was redrawn.
"""
self.processed += items
assert self.processed <= self.total, \
'{0} <= {1}'.format(self.processed, self.total)
now = datetime.now()
if (self.lastprint == None or
(now - self.lastprint) > self.delay):
self.lastprint = now
self.redraw()
return True
else:
return False
def clear(self):
"""Erase the entire progress bar and put the cursor at first column"""
# Move cursor to the beginning of current progress line so that further
# messages will overwrite the progress bar. Also overwrite the previous
# progress bar with empty space.
if _istty():
sys.stdout.write('\r' + ' '*self._length + '\r')
sys.stdout.flush()
def close(self):
"""Close (hide) the progress bar
Erase the progress bar and print the closing message in place of the
previous progress bar text.
"""
self.redraw()
self.clear()
_del_current_progress_bar(self)
def redraw(self):
self.clear()
percent = _calculate_percent(self.processed, self.total)
now = datetime.now()
self.elapsed = now - self.start
if self.processed:
self.estimated_time_left = self.elapsed.seconds * (self.total-self.processed)/self.processed
# Update time elapsed/left once a second only (delay_duration = 1s).
if self.elapsed.seconds and (
self.lastprint_duration is None or \
now - self.lastprint_duration > self.delay_duration):
self.lastprint_duration = now
elapsed = _format_duration(self.elapsed.seconds)
if self.estimated_time_left:
self.duration_display = '({0}; {1} left)'.format(
elapsed, _format_duration(self.estimated_time_left))
else:
self.duration_display = '({0})'.format(elapsed)
bar_width = 20
bar_filled = int(round(20.0/100 * percent))
filled = ['='] * bar_filled
if filled:
filled[-1] = '>'
filled = ''.join(filled)
progress_bar = ''.join([
(self.note+': ') if self.note else '',
# header:
'[',
# solid bar
filled,
# empty space
' ' * (bar_width-bar_filled),
# footer
'] {0:-3}% {1}/{2} {3}'.format(
percent,
self.show_size(self.processed),
self.show_size(self.total),
self.duration_display
)
])
self._length = len(progress_bar)
if _istty():
sys.stdout.write('\r' + progress_bar + '\r')
else:
# Only print if different from last progress line, to prevent flood
if self.lastline != progress_bar:
self.lastline = progress_bar
print(progress_bar)
sys.stdout.flush()
def clear_progress_bar():
"""Clear progress bar, if any"""
if _current_progress_bar:
_current_progress_bar.clear()
def redraw_progress_bar():
"""Redraw progress bar, if any"""
if _current_progress_bar:
_current_progress_bar.redraw()
@contextmanager
def safe_output():
"""Wrapper that makes it safe to print to stdout
If a progress bar is currently being shown, this wrapper takes care of
clearing it before .. and then redrawing it after
"""
clear_progress_bar()
yield
redraw_progress_bar()
def askyesno(question, default):
"""Ask (Y/N) type of question to the user"""
assert isinstance(default, bool), '"default" must be a boolean'
s = '{0} ({1}/{2}) '.format(
question,
default and 'Y' or 'y',
default and 'n' or 'N')
while True:
val = input(s).strip().lower()
if val == '':
return default
elif val in ('y', 'yes', 'ok'):
return True
elif val in ('n', 'no'):
return False
# This function was written by Alex Martelli
# http://stackoverflow.com/questions/1396820/
def colprint(table, totwidth=None):
"""Print the table in terminal taking care of wrapping/alignment
- `table`: A table of strings. Elements must not be `None`
- `totwidth`: If None, console width is used
"""
if not table: return
if totwidth is None:
totwidth = find_console_width()
if totwidth is not None:
totwidth -= 1 # for not printing an extra empty line on windows
numcols = max(len(row) for row in table)
# ensure all rows have >= numcols columns, maybe empty
padded = [row+numcols*['',] for row in table]
# compute col widths, including separating space (except for last one)
widths = [ 1 + max(len(x) for x in column) for column in zip(*padded)]
widths[-1] -= 1
# drop or truncate columns from the right in order to fit
if totwidth is not None:
while sum(widths) > totwidth:
mustlose = sum(widths) - totwidth
if widths[-1] <= mustlose:
del widths[-1]
else:
widths[-1] -= mustlose
break
# and finally, the output phase!
for row in padded:
s = ''.join(['%*s' % (-w, i[:w])
for w, i in zip(widths, row)])
LOG.info(s)
def find_console_width():
"""Return the console width
Return ``None`` if stdout is not a terminal (eg: a pipe)
"""
if sys.platform.startswith('win'):
return _find_windows_console_width()
else:
return _find_unix_console_width()
@contextmanager
def longrun(log, finalfn=lambda: None):
"""Decorator for performing a long operation with consideration for the
command line.
1. Catch keyboard interrupts and exit gracefully
2. Print total time elapsed always at the end (successful or not)
3. Call ``finalfn`` always at the end (successful or not)
"""
start_time = datetime.now()
try:
yield
except KeyboardInterrupt:
log.info('*** interrupted by user - Ctrl+c ***')
raise SystemExit(3)
finally:
finalfn()
end_time = datetime.now()
log.info('')
log.info('-----')
log.info('Total time elapsed: %s', end_time-start_time)
def _find_unix_console_width():
import termios, fcntl, struct, sys
# fcntl.ioctl will fail if stdout is not a tty
if sys.stdout.isatty():
s = struct.pack("HHHH", 0, 0, 0, 0)
fd_stdout = sys.stdout.fileno()
size = fcntl.ioctl(fd_stdout, termios.TIOCGWINSZ, s)
height, width = struct.unpack("HHHH", size)[:2]
return width
else:
return None
def _find_windows_console_width():
# http://code.activestate.com/recipes/440694/
from ctypes import windll, create_string_buffer
STDIN, STDOUT, STDERR = -10, -11, -12
h = windll.kernel32.GetStdHandle(STDERR)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex
def _byteshr(bytes):
"""Human-readable version of bytes count"""
for x in ['bytes','KB','MB','GB','TB']:
if bytes < 1024.0:
return "%3.1f%s" % (bytes, x)
bytes /= 1024.0
raise ValueError('cannot find human-readable version')
def _calculate_percent(numerator, denominator):
assert numerator <= denominator, '%d <= %d' % (numerator, denominator)
if denominator == 0:
if numerator == 0:
return 100
else:
raise ValueError('denominator cannot be zero')
return int(round( numerator / float(denominator) * 100 ))
def _format_duration(seconds):
s = []
if seconds > 60:
s.append('{0}m'.format(int(seconds/60)))
s.append('{0}s'.format(int(seconds % 60)))
return ''.join(s)
# Handle to the current progress bar object. There cannot be more than one
# progress bar for obvious reasons.
_current_progress_bar = None
def _set_current_progress_bar(pbar):
global _current_progress_bar
assert _current_progress_bar is None, 'there is already a pbar'
_current_progress_bar = pbar
def _del_current_progress_bar(pbar):
global _current_progress_bar
assert _current_progress_bar is pbar, 'pbar is something else'
_current_progress_bar = None
def _istty():
return hasattr(sys.stdout, 'fileno') and \
os.isatty(sys.stdout.fileno())
|
python
|
import tensorflow as tf
import numpy as np
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
def neuron_layer(X, n_neurons, name, activation=None):
with tf.name_scope(name):
n_inputs = int(X.get_shape()[1])
stddev = 2/ np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
W = tf.Variable(init, name="kernel")
b = tf.Variable(tf.zeros([n_neurons]), name="bias")
Z = tf.matmul(X, W) + b
if activation is not None:
return activation(Z)
else:
return Z
def shuffle_batch(X, y, batch_size):
pass
if __name__ == "__main__":
n_inputs = 28*28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# 구성 단계
with tf.name_scope("dnn"):
hidden1 = neuron_layer(X, n_hidden1, name="hidden1", activation=tf.nn.relu)
hidden2 = neuron_layer(n_hidden1, n_hidden2, name="n_hidden2", activation=tf.nn.relu)
logits = neuron_layer(n_hidden2, n_outputs, name="n_outputs", activation=tf.nn.relu)
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# 실행단계
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.float32)
y_test = y_test.astype(np.float32)
X_valid, X_train = X_train[:5000], X_train[5000:]
y_valid, y_train = y_train[:5000], y_train[5000:]
n_epochs = 40
batch_size = 50
|
python
|
import datetime
import typing
import uuid
from commercetools import types
from commercetools._schemas._api_client import (
ApiClientDraftSchema,
ApiClientPagedQueryResponseSchema,
ApiClientSchema,
)
from commercetools.testing.abstract import BaseModel, ServiceBackend
class ApiClientsModel(BaseModel):
_primary_type_name = "api-client"
_resource_schema = ApiClientSchema
def _create_from_draft(
self, draft: types.ApiClientDraft, id: typing.Optional[str] = None
) -> types.ApiClient:
object_id = str(uuid.UUID(id) if id is not None else uuid.uuid4())
return types.ApiClient(
id=str(object_id),
name=draft.name,
scope=draft.scope,
secret=str(uuid.uuid4()),
created_at=datetime.datetime.now(datetime.timezone.utc),
)
class ApiClientsBackend(ServiceBackend):
service_path = "api-clients"
model_class = ApiClientsModel
_schema_draft = ApiClientDraftSchema
_schema_query_response = ApiClientPagedQueryResponseSchema
_verify_version = False
def urls(self):
return [
("^$", "GET", self.query),
("^$", "POST", self.create),
("^(?P<id>[^/]+)$", "GET", self.get_by_id),
("^(?P<id>[^/]+)$", "DELETE", self.delete_by_id),
]
|
python
|
import os
import pandas as pd
def test_wildcard_filter():
from pachypy.utils import wildcard_filter, wildcard_match
x = ['a', 'ab', 'b']
assert wildcard_match('', None) is True
assert wildcard_match('', '*') is True
assert wildcard_match('', '?') is False
assert wildcard_match('a', '*') is True
assert wildcard_match('a', 'a') is True
assert wildcard_match('b', 'a') is False
assert wildcard_filter(x, '*') == x
assert wildcard_filter(x, None) == x
assert wildcard_filter(x, 'a') == ['a']
assert wildcard_filter(x, ['a']) == ['a']
assert wildcard_filter(x, [['a']]) == ['a']
assert wildcard_filter(x, 'a*') == ['a', 'ab']
assert wildcard_filter(x, 'a?') == ['ab']
assert wildcard_filter(x, [['a*'], 'b']) == x
assert wildcard_filter(x, ['*a', '*b']) == x
assert wildcard_filter(x, ['a', 'b']) == ['a', 'b']
def test_expand_files():
from pathlib import Path
from pachypy.utils import expand_files
mock_dir = lambda glob: os.path.join(os.path.dirname(__file__), 'mock', glob)
assert len(expand_files(None)) == 0
assert len(expand_files(mock_dir('*.csv'))) == 7
assert len(expand_files(Path(mock_dir('*.csv')))) == 7
assert len(expand_files([mock_dir('list_*.csv'), Path(mock_dir('get_*.csv'))])) == 7
def test_invert_dict():
from pachypy.utils import invert_dict
assert invert_dict({'a': '1'}) == {'1': ['a']}
assert invert_dict({'a': '1', 'b': '1'}) == {'1': ['a', 'b']}
assert invert_dict({'a': '1', 'b': '2'}) == {'1': ['a'], '2': ['b']}
def test_to_timestamp():
from pachypy.utils import to_timestamp
assert to_timestamp(0, 0) is None
assert to_timestamp(1554878996, 0) == pd.Timestamp('2019-04-10 06:49:56', tz='utc')
def test_to_timedelta():
from pachypy.utils import to_timedelta
assert to_timedelta(0, 0) == pd.Timedelta(0, unit='s')
assert to_timedelta(0, 1) == pd.Timedelta(0.1, unit='s')
|
python
|
import pytest
def test_rnaseq_expression_init():
from genomic_data_service.rnaseq.domain.expression import Expression
data = [
'POMC',
'ENST000, ENST001',
0.03,
90.1,
]
expression = Expression(
*data
)
assert isinstance(expression, Expression)
assert expression.gene_id == 'POMC'
assert expression.transcript_ids == 'ENST000, ENST001'
assert expression.tpm == 0.03
assert expression.fpkm == 90.1
def test_rnaseq_expression_dict():
from genomic_data_service.rnaseq.domain.expression import Expression
data = [
'POMC',
'ENST000, ENST001',
0.03,
90.1,
]
expression = Expression(
*data
)
assert expression.__dict__ == {
'fpkm': 90.1,
'gene_id': 'POMC',
'tpm': 0.03,
'transcript_ids': 'ENST000, ENST001'
}
def test_rnaseq_expression_as_dict():
from genomic_data_service.rnaseq.domain.expression import Expression
data = [
'POMC',
'ENST000, ENST001',
'0.03',
90.1,
]
expression = Expression(
*data
)
assert expression.as_dict() == {
'fpkm': 90.1,
'gene_id': 'POMC',
'tpm': 0.03,
'transcript_ids': ['ENST000', 'ENST001'],
}
def test_rnaseq_expression_remove_version_from_gene_id():
from genomic_data_service.rnaseq.domain.expression import remove_version_from_gene_id
assert remove_version_from_gene_id('ENSG00000224939') == 'ENSG00000224939'
assert remove_version_from_gene_id('ENSG00000224939.14') == 'ENSG00000224939'
def test_rnaseq_expression_gene_id_without_version():
from genomic_data_service.rnaseq.domain.expression import Expression
data = [
'ENSG00000034677.12',
'ENST000, ENST001',
0.03,
90.1,
]
expression = Expression(
*data
)
assert expression.gene_id_without_version == 'ENSG00000034677'
def test_rnaseq_expression_expressions_init(mock_portal):
from genomic_data_service.rnaseq.expressions import Expressions
from genomic_data_service.rnaseq.repository.memory import Memory
repository = Memory()
portal = mock_portal
expressions = Expressions(portal, repository)
assert isinstance(expressions, Expressions)
def test_rnaseq_expression_prefix_numerical_gene_id():
from genomic_data_service.rnaseq.domain.expression import prefix_numerical_gene_id
assert prefix_numerical_gene_id('ENSG00000224939') == 'ENSG00000224939'
assert prefix_numerical_gene_id('ENSG00000224939.14') == 'ENSG00000224939.14'
assert prefix_numerical_gene_id('21301') == 'tRNAscan:21301'
assert prefix_numerical_gene_id('32719') == 'tRNAscan:32719'
assert prefix_numerical_gene_id(21301) == 'tRNAscan:21301'
|
python
|
import sys, os
import pandas as pd
import numpy as np
file_path = os.path.dirname(os.path.realpath(__file__))
ps_calcs_lib_path = os.path.abspath(os.path.join(file_path, "../../", "lib/ps_calcs"))
sys.path.insert(0, ps_calcs_lib_path)
import ps_calcs
def rdo(ybi, yi, year, depths):
rca_dist_opp = []
for geo_level in depths["bra"]:
print "geo level:", geo_level
ybi_data = ybi.reset_index()
bra_criterion = ybi_data["bra_id"].str.len() == geo_level
cnae_criterion = ybi_data["cnae_id"].str.len() == 6
ybi_data = ybi_data[bra_criterion & cnae_criterion]
# ybi_data = ybi_data.reindex(index=ybi_index)
# ybi_data = ybi_data.drop(["year", "num_emp", "num_est", "wage_avg", "num_emp_est"], axis=1)
ybi_data = ybi_data[["bra_id", "cnae_id", "wage"]]
# ybi_data = ybi_data.unstack()
# levels = ybi_data.columns.levels
# labels = ybi_data.columns.labels
# ybi_data.columns = levels[1][labels[1]]
'''
RCAS
'''
# ybi_data = ybi_data.pivot(index="bra_id", columns="cnae_id", values="wage").fillna(0)
ybi_data = ybi_data.pivot(index="bra_id", columns="cnae_id", values="wage")
rcas = ps_calcs.rca(ybi_data)
rcas_binary = rcas.copy()
rcas_binary[rcas_binary >= 1] = 1
rcas_binary[rcas_binary < 1] = 0
'''
DISTANCES
'''
'''calculate proximity for opportunity gain calculation'''
prox = ps_calcs.proximity(rcas_binary)
'''calculate distances using proximity'''
dist = ps_calcs.distance(rcas_binary, prox).fillna(0)
'''
OPP GAIN
'''
'''calculate product complexity'''
pci = ps_calcs.complexity(rcas_binary)[1]
'''calculate opportunity gain'''
opp_gain = ps_calcs.opportunity_gain(rcas_binary, prox, pci)
rdo = []
for bra in rcas.index:
for cnae in rcas.columns:
rdo.append([year, bra, cnae, rcas[cnae][bra], dist[cnae][bra], opp_gain[cnae][bra]])
rca_dist_opp += rdo
# now time to merge!
print "merging datasets..."
ybi_rdo = pd.DataFrame(rca_dist_opp, columns=["year", "bra_id", "cnae_id", "rca", "distance", "opp_gain"])
ybi_rdo["year"] = ybi_rdo["year"].astype(int)
ybi_rdo["rca"][ybi_rdo["rca"] == 0] = np.nan
ybi_rdo = ybi_rdo.set_index(["year", "bra_id", "cnae_id"])
# get union of both sets of indexes
all_ybi_indexes = set(ybi.index).union(set(ybi_rdo.index))
all_ybi_indexes = pd.MultiIndex.from_tuples(all_ybi_indexes, names=["year", "bra_id", "cnae_id"])
# ybi = ybi.reindex(index=all_ybi_indexes, fill_value=0)
ybi = ybi.reindex(index=all_ybi_indexes)
ybi["rca"] = ybi_rdo["rca"]
ybi["distance"] = ybi_rdo["distance"]
ybi["opp_gain"] = ybi_rdo["opp_gain"]
return ybi
|
python
|
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module provides Pyserini's dense search interface to FAISS index.
The main entry point is the ``SimpleDenseSearcher`` class.
"""
import os
from dataclasses import dataclass
from typing import Dict, List, Union, Optional, Tuple
import numpy as np
import pandas as pd
from transformers import (AutoModel, AutoTokenizer, BertModel, BertTokenizer, BertTokenizerFast,
DPRQuestionEncoder, DPRQuestionEncoderTokenizer, RobertaTokenizer)
from transformers.file_utils import is_faiss_available, requires_backends
from pyserini.util import (download_encoded_queries, download_prebuilt_index,
get_dense_indexes_info, get_sparse_index)
from pyserini.search.lucene import LuceneSearcher
from pyserini.index import Document
from ._model import AnceEncoder
import torch
from ..encode import PcaEncoder
if is_faiss_available():
import faiss
class QueryEncoder:
def __init__(self, encoded_query_dir: str = None):
self.has_model = False
self.has_encoded_query = False
if encoded_query_dir:
self.embedding = self._load_embeddings(encoded_query_dir)
self.has_encoded_query = True
def encode(self, query: str):
return self.embedding[query]
@classmethod
def load_encoded_queries(cls, encoded_query_name: str):
"""Build a query encoder from a pre-encoded query; download the encoded queries if necessary.
Parameters
----------
encoded_query_name : str
pre encoded query name.
Returns
-------
QueryEncoder
Encoder built from the pre encoded queries.
"""
print(f'Attempting to initialize pre-encoded queries {encoded_query_name}.')
try:
query_dir = download_encoded_queries(encoded_query_name)
except ValueError as e:
print(str(e))
return None
print(f'Initializing {encoded_query_name}...')
return cls(encoded_query_dir=query_dir)
@staticmethod
def _load_embeddings(encoded_query_dir):
df = pd.read_pickle(os.path.join(encoded_query_dir, 'embedding.pkl'))
return dict(zip(df['text'].tolist(), df['embedding'].tolist()))
class TctColBertQueryEncoder(QueryEncoder):
def __init__(self, encoder_dir: str = None, tokenizer_name: str = None,
encoded_query_dir: str = None, device: str = 'cpu'):
super().__init__(encoded_query_dir)
if encoder_dir:
self.device = device
self.model = BertModel.from_pretrained(encoder_dir)
self.model.to(self.device)
self.tokenizer = BertTokenizer.from_pretrained(tokenizer_name or encoder_dir)
self.has_model = True
if (not self.has_model) and (not self.has_encoded_query):
raise Exception('Neither query encoder model nor encoded queries provided. Please provide at least one')
def encode(self, query: str):
if self.has_model:
max_length = 36 # hardcode for now
inputs = self.tokenizer(
'[CLS] [Q] ' + query + '[MASK]' * max_length,
max_length=max_length,
truncation=True,
add_special_tokens=False,
return_tensors='pt'
)
inputs.to(self.device)
outputs = self.model(**inputs)
embeddings = outputs.last_hidden_state.detach().cpu().numpy()
return np.average(embeddings[:, 4:, :], axis=-2).flatten()
else:
return super().encode(query)
class DprQueryEncoder(QueryEncoder):
def __init__(self, encoder_dir: str = None, tokenizer_name: str = None,
encoded_query_dir: str = None, device: str = 'cpu'):
super().__init__(encoded_query_dir)
if encoder_dir:
self.device = device
self.model = DPRQuestionEncoder.from_pretrained(encoder_dir)
self.model.to(self.device)
self.tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(tokenizer_name or encoder_dir)
self.has_model = True
if (not self.has_model) and (not self.has_encoded_query):
raise Exception('Neither query encoder model nor encoded queries provided. Please provide at least one')
def encode(self, query: str):
if self.has_model:
input_ids = self.tokenizer(query, return_tensors='pt')
input_ids.to(self.device)
embeddings = self.model(input_ids["input_ids"]).pooler_output.detach().cpu().numpy()
return embeddings.flatten()
else:
return super().encode(query)
class BprQueryEncoder(QueryEncoder):
def __init__(self, encoder_dir: str = None, tokenizer_name: str = None,
encoded_query_dir: str = None, device: str = 'cpu'):
self.has_model = False
self.has_encoded_query = False
if encoded_query_dir:
self.embedding = self._load_embeddings(encoded_query_dir)
self.has_encoded_query = True
if encoder_dir:
self.device = device
self.model = DPRQuestionEncoder.from_pretrained(encoder_dir)
self.model.to(self.device)
self.tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(tokenizer_name or encoder_dir)
self.has_model = True
if (not self.has_model) and (not self.has_encoded_query):
raise Exception('Neither query encoder model nor encoded queries provided. Please provide at least one')
def encode(self, query: str):
if self.has_model:
input_ids = self.tokenizer(query, return_tensors='pt')
input_ids.to(self.device)
embeddings = self.model(input_ids["input_ids"]).pooler_output.detach().cpu()
dense_embeddings = embeddings.numpy()
sparse_embeddings = self.convert_to_binary_code(embeddings).numpy()
return {'dense': dense_embeddings.flatten(), 'sparse': sparse_embeddings.flatten()}
else:
return super().encode(query)
def convert_to_binary_code(self, input_repr: torch.Tensor):
return input_repr.new_ones(input_repr.size()).masked_fill_(input_repr < 0, -1.0)
@staticmethod
def _load_embeddings(encoded_query_dir):
df = pd.read_pickle(os.path.join(encoded_query_dir, 'embedding.pkl'))
ret = {}
for text, dense, sparse in zip(df['text'].tolist(), df['dense_embedding'].tolist(),
df['sparse_embedding'].tolist()):
ret[text] = {'dense': dense, 'sparse': sparse}
return ret
class DkrrDprQueryEncoder(QueryEncoder):
def __init__(self, encoder_dir: str = None, encoded_query_dir: str = None, device: str = 'cpu',
prefix: str = "question:"):
super().__init__(encoded_query_dir)
self.device = device
self.model = BertModel.from_pretrained(encoder_dir)
self.model.to(self.device)
self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
self.has_model = True
self.prefix = prefix
@staticmethod
def _mean_pooling(model_output, attention_mask):
model_output = model_output[0].masked_fill(1 - attention_mask[:, :, None], 0.)
model_output = torch.sum(model_output, dim=1) / torch.clamp(torch.sum(attention_mask, dim=1), min=1e-9)[:, None]
return model_output.flatten()
def encode(self, query: str):
if self.has_model:
if self.prefix:
query = f'{self.prefix} {query}'
inputs = self.tokenizer(query, return_tensors='pt', max_length=40, padding="max_length")
inputs.to(self.device)
outputs = self.model(input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"])
embeddings = self._mean_pooling(outputs, inputs['attention_mask']).detach().cpu().numpy()
return embeddings.flatten()
else:
return super().encode(query)
class AnceQueryEncoder(QueryEncoder):
def __init__(self, encoder_dir: str = None, tokenizer_name: str = None,
encoded_query_dir: str = None, device: str = 'cpu'):
super().__init__(encoded_query_dir)
if encoder_dir:
self.device = device
self.model = AnceEncoder.from_pretrained(encoder_dir)
self.model.to(self.device)
self.tokenizer = RobertaTokenizer.from_pretrained(tokenizer_name or encoder_dir)
self.has_model = True
self.tokenizer.do_lower_case = True
if (not self.has_model) and (not self.has_encoded_query):
raise Exception('Neither query encoder model nor encoded queries provided. Please provide at least one')
def encode(self, query: str):
if self.has_model:
inputs = self.tokenizer(
[query],
max_length=64,
padding='longest',
truncation=True,
add_special_tokens=True,
return_tensors='pt'
)
inputs.to(self.device)
embeddings = self.model(inputs["input_ids"]).detach().cpu().numpy()
return embeddings.flatten()
else:
return super().encode(query)
def prf_encode(self, query: str):
if self.has_model:
inputs = self.tokenizer(
[query],
max_length=512,
padding='longest',
truncation=True,
add_special_tokens=False,
return_tensors='pt'
)
inputs.to(self.device)
embeddings = self.model(inputs["input_ids"]).detach().cpu().numpy()
return embeddings.flatten()
else:
return super().encode(query)
def prf_batch_encode(self, query: List[str]):
inputs = self.tokenizer(
query,
max_length=512,
padding='longest',
truncation=True,
add_special_tokens=False,
return_tensors='pt'
)
inputs.to(self.device)
embeddings = self.model(inputs["input_ids"]).detach().cpu().numpy()
return embeddings
class AutoQueryEncoder(QueryEncoder):
def __init__(self, encoder_dir: str = None, tokenizer_name: str = None,
encoded_query_dir: str = None, device: str = 'cpu',
pooling: str = 'cls', l2_norm: bool = False):
super().__init__(encoded_query_dir)
if encoder_dir:
self.device = device
self.model = AutoModel.from_pretrained(encoder_dir)
self.model.to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name or encoder_dir)
self.has_model = True
self.pooling = pooling
self.l2_norm = l2_norm
if (not self.has_model) and (not self.has_encoded_query):
raise Exception('Neither query encoder model nor encoded queries provided. Please provide at least one')
@staticmethod
def _mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
def encode(self, query: str):
if self.has_model:
inputs = self.tokenizer(
query,
padding='longest',
truncation=True,
add_special_tokens=True,
return_tensors='pt'
)
inputs.to(self.device)
outputs = self.model(**inputs)
if self.pooling == "mean":
embeddings = self._mean_pooling(outputs, inputs['attention_mask']).detach().cpu().numpy()
else:
embeddings = outputs[0][:, 0, :].detach().cpu().numpy()
if self.l2_norm:
faiss.normalize_L2(embeddings)
return embeddings.flatten()
else:
return super().encode(query)
@dataclass
class DenseSearchResult:
docid: str
score: float
@dataclass
class PRFDenseSearchResult:
docid: str
score: float
vectors: [float]
class SimpleDenseSearcher:
"""Simple Searcher for dense representation
Parameters
----------
index_dir : str
Path to faiss index directory.
"""
def __init__(self, index_dir: str, query_encoder: Union[QueryEncoder, str],
prebuilt_index_name: Optional[str] = None):
requires_backends(self, "faiss")
if isinstance(query_encoder, QueryEncoder) or isinstance(query_encoder, PcaEncoder):
self.query_encoder = query_encoder
else:
self.query_encoder = self._init_encoder_from_str(query_encoder)
self.index, self.docids = self.load_index(index_dir)
self.dimension = self.index.d
self.num_docs = self.index.ntotal
assert self.docids is None or self.num_docs == len(self.docids)
if prebuilt_index_name:
sparse_index = get_sparse_index(prebuilt_index_name)
self.ssearcher = LuceneSearcher.from_prebuilt_index(sparse_index)
@classmethod
def from_prebuilt_index(cls, prebuilt_index_name: str, query_encoder: QueryEncoder):
"""Build a searcher from a pre-built index; download the index if necessary.
Parameters
----------
query_encoder: QueryEncoder
the query encoder, which has `encode` method that convert query text to embedding
prebuilt_index_name : str
Prebuilt index name.
Returns
-------
SimpleDenseSearcher
Searcher built from the prebuilt faiss index.
"""
print(f'Attempting to initialize pre-built index {prebuilt_index_name}.')
try:
index_dir = download_prebuilt_index(prebuilt_index_name)
except ValueError as e:
print(str(e))
return None
print(f'Initializing {prebuilt_index_name}...')
return cls(index_dir, query_encoder, prebuilt_index_name)
@staticmethod
def list_prebuilt_indexes():
"""Display information about available prebuilt indexes."""
get_dense_indexes_info()
def search(self, query: Union[str, np.ndarray], k: int = 10, threads: int = 1, return_vector: bool = False) \
-> Union[List[DenseSearchResult], Tuple[np.ndarray, List[PRFDenseSearchResult]]]:
"""Search the collection.
Parameters
----------
query : Union[str, np.ndarray]
query text or query embeddings
k : int
Number of hits to return.
threads : int
Maximum number of threads to use for intra-query search.
return_vector : bool
Return the results with vectors
Returns
-------
Union[List[DenseSearchResult], Tuple[np.ndarray, List[PRFDenseSearchResult]]]
Either returns a list of search results.
Or returns the query vector with the list of PRF dense search results with vectors.
"""
if isinstance(query, str):
emb_q = self.query_encoder.encode(query)
assert len(emb_q) == self.dimension
emb_q = emb_q.reshape((1, len(emb_q)))
else:
emb_q = query
faiss.omp_set_num_threads(threads)
if return_vector:
distances, indexes, vectors = self.index.search_and_reconstruct(emb_q, k)
vectors = vectors[0]
distances = distances.flat
indexes = indexes.flat
return emb_q, [PRFDenseSearchResult(self.docids[idx], score, vector)
for score, idx, vector in zip(distances, indexes, vectors) if idx != -1]
else:
distances, indexes = self.index.search(emb_q, k)
distances = distances.flat
indexes = indexes.flat
return [DenseSearchResult(self.docids[idx], score)
for score, idx in zip(distances, indexes) if idx != -1]
def batch_search(self, queries: Union[List[str], np.ndarray], q_ids: List[str], k: int = 10,
threads: int = 1, return_vector: bool = False) \
-> Union[Dict[str, List[DenseSearchResult]], Tuple[np.ndarray, Dict[str, List[PRFDenseSearchResult]]]]:
"""
Parameters
----------
queries : Union[List[str], np.ndarray]
List of query texts or list of query embeddings
q_ids : List[str]
List of corresponding query ids.
k : int
Number of hits to return.
threads : int
Maximum number of threads to use.
return_vector : bool
Return the results with vectors
Returns
-------
Union[Dict[str, List[DenseSearchResult]], Tuple[np.ndarray, Dict[str, List[PRFDenseSearchResult]]]]
Either returns a dictionary holding the search results, with the query ids as keys and the
corresponding lists of search results as the values.
Or returns a tuple with ndarray of query vectors and a dictionary of PRF Dense Search Results with vectors
"""
if isinstance(queries, np.ndarray):
q_embs = queries
else:
q_embs = np.array([self.query_encoder.encode(q) for q in queries])
n, m = q_embs.shape
assert m == self.dimension
faiss.omp_set_num_threads(threads)
if return_vector:
D, I, V = self.index.search_and_reconstruct(q_embs, k)
return q_embs, {key: [PRFDenseSearchResult(self.docids[idx], score, vector)
for score, idx, vector in zip(distances, indexes, vectors) if idx != -1]
for key, distances, indexes, vectors in zip(q_ids, D, I, V)}
else:
D, I = self.index.search(q_embs, k)
return {key: [DenseSearchResult(self.docids[idx], score)
for score, idx in zip(distances, indexes) if idx != -1]
for key, distances, indexes in zip(q_ids, D, I)}
def load_index(self, index_dir: str):
index_path = os.path.join(index_dir, 'index')
docid_path = os.path.join(index_dir, 'docid')
index = faiss.read_index(index_path)
docids = self.load_docids(docid_path)
return index, docids
def doc(self, docid: Union[str, int]) -> Optional[Document]:
"""Return the :class:`Document` corresponding to ``docid``. Since dense indexes don't store documents
but sparse indexes do, route over to corresponding sparse index (according to prebuilt_index_info.py)
and use its doc API
Parameters
----------
docid : Union[str, int]
Overloaded ``docid``: either an external collection ``docid`` (``str``) or an internal Lucene ``docid``
(``int``).
Returns
-------
Document
:class:`Document` corresponding to the ``docid``.
"""
return self.ssearcher.doc(docid) if self.ssearcher else None
@staticmethod
def _init_encoder_from_str(encoder):
encoder = encoder.lower()
if 'dpr' in encoder:
return DprQueryEncoder(encoder_dir=encoder)
elif 'tct_colbert' in encoder:
return TctColBertQueryEncoder(encoder_dir=encoder)
elif 'ance' in encoder:
return AnceQueryEncoder(encoder_dir=encoder)
elif 'sentence' in encoder:
return AutoQueryEncoder(encoder_dir=encoder, pooling='mean', l2_norm=True)
else:
return AutoQueryEncoder(encoder_dir=encoder)
@staticmethod
def load_docids(docid_path: str) -> List[str]:
id_f = open(docid_path, 'r')
docids = [line.rstrip() for line in id_f.readlines()]
id_f.close()
return docids
class BinaryDenseSearcher(SimpleDenseSearcher):
"""Simple Searcher for binary-dense representation
Parameters
----------
index_dir : str
Path to faiss index directory.
"""
def __init__(self, index_dir: str, query_encoder: Union[QueryEncoder, str],
prebuilt_index_name: Optional[str] = None):
super().__init__(index_dir, query_encoder, prebuilt_index_name)
def search(self, query: str, k: int = 10, binary_k: int = 100, rerank: bool = True, threads: int = 1) \
-> List[DenseSearchResult]:
"""Search the collection.
Parameters
----------
query : str
query text
k : int
Number of hits to return at second stage.
binary_k : int
Number of hits to return at first stage.
rerank: bool
Whether to use dense repr to rerank the binary ranking results.
threads : int
Maximum number of threads to use for intra-query search.
Returns
-------
List[DenseSearchResult]
List of search results.
"""
ret = self.query_encoder.encode(query)
dense_emb_q = ret['dense']
sparse_emb_q = ret['sparse']
assert len(dense_emb_q) == self.dimension
assert len(sparse_emb_q) == self.dimension
dense_emb_q = dense_emb_q.reshape((1, len(dense_emb_q)))
sparse_emb_q = sparse_emb_q.reshape((1, len(sparse_emb_q)))
faiss.omp_set_num_threads(threads)
distances, indexes = self.binary_dense_search(k, binary_k, rerank, dense_emb_q, sparse_emb_q)
distances = distances.flat
indexes = indexes.flat
return [DenseSearchResult(str(idx), score)
for score, idx in zip(distances, indexes) if idx != -1]
def batch_search(self, queries: List[str], q_ids: List[str], k: int = 10, binary_k: int = 100,
rerank: bool = True, threads: int = 1) -> Dict[str, List[DenseSearchResult]]:
"""
Parameters
----------
queries : List[str]
List of query texts
q_ids : List[str]
List of corresponding query ids.
k : int
Number of hits to return.
binary_k : int
Number of hits to return at first stage.
rerank: bool
Whether to use dense repr to rerank the binary ranking results.
threads : int
Maximum number of threads to use.
Returns
-------
Dict[str, List[DenseSearchResult]]
Dictionary holding the search results, with the query ids as keys and the corresponding lists of search
results as the values.
"""
dense_q_embs = []
sparse_q_embs = []
for q in queries:
ret = self.query_encoder.encode(q)
dense_q_embs.append(ret['dense'])
sparse_q_embs.append(ret['sparse'])
dense_q_embs = np.array(dense_q_embs)
sparse_q_embs = np.array(sparse_q_embs)
n, m = dense_q_embs.shape
assert m == self.dimension
faiss.omp_set_num_threads(threads)
D, I = self.binary_dense_search(k, binary_k, rerank, dense_q_embs, sparse_q_embs)
return {key: [DenseSearchResult(str(idx), score)
for score, idx in zip(distances, indexes) if idx != -1]
for key, distances, indexes in zip(q_ids, D, I)}
def binary_dense_search(self, k, binary_k, rerank, dense_emb_q, sparse_emb_q):
num_queries = dense_emb_q.shape[0]
sparse_emb_q = np.packbits(np.where(sparse_emb_q > 0, 1, 0)).reshape(num_queries, -1)
if not rerank:
distances, indexes = self.index.search(sparse_emb_q, k)
else:
raw_index = self.index.index
_, indexes = raw_index.search(sparse_emb_q, binary_k)
sparse_emb_p = np.vstack(
[np.unpackbits(raw_index.reconstruct(int(id_))) for id_ in indexes.reshape(-1)]
)
sparse_emb_p = sparse_emb_p.reshape(
dense_emb_q.shape[0], binary_k, dense_emb_q.shape[1]
)
sparse_emb_p = sparse_emb_p.astype(np.float32)
sparse_emb_p = sparse_emb_p * 2 - 1
distances = np.einsum("ijk,ik->ij", sparse_emb_p, dense_emb_q)
sorted_indices = np.argsort(-distances, axis=1)
indexes = indexes[np.arange(num_queries)[:, None], sorted_indices]
indexes = np.array([self.index.id_map.at(int(id_)) for id_ in indexes.reshape(-1)], dtype=np.int)
indexes = indexes.reshape(num_queries, -1)[:, :k]
distances = distances[np.arange(num_queries)[:, None], sorted_indices][:, :k]
return distances, indexes
def load_index(self, index_dir: str):
index_path = os.path.join(index_dir, 'index')
index = faiss.read_index_binary(index_path)
return index, None
@staticmethod
def _init_encoder_from_str(encoder):
encoder = encoder.lower()
if 'bpr' in encoder:
return BprQueryEncoder(encoder_dir=encoder)
else:
raise NotImplementedError
|
python
|
import string
class caesar():
def caesar_cipher(user_message, user_shift):
alphabet = string.ascii_lowercase
shifted_alphabet = alphabet[user_shift:] + alphabet[:user_shift]
table = str.maketrans(alphabet, shifted_alphabet)
return user_message.translate(table)
|
python
|
from celery import Celery
import consumer
app_name = 'consumer' # take the celery app name
app = Celery(app_name, broker=consumer.redis_url)
# produce
for i in range(100):
a = 1
b = 2
consumer.consume.delay(a, b)
|
python
|
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from matplotlib import image as mpimg
from scipy import signal
from scipy import fftpack
import scipy.io
class PSNR_calculator:
def __init__(self,original_image,fixed_image):
self.orig = original_image[:,:,0]
self.fixed = np.array(fixed_image)
def evaluate_PSNR(self):
MSE = np.mean(np.power(np.abs(np.subtract(self.fixed,self.orig)),2))
if mse == 0.:
100
MAX = 255
return 20*np.log10(MAX)-10*np.log10(MSE)
|
python
|
"""
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
Needs some environment strings in the live server.
LIVE -- To know its in the live server.
SECRET_KEY -- The secret key to be used.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname( os.path.dirname( os.path.abspath( __file__ ) ) )
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
if os.environ.get( 'LIVE' ):
DEBUG = False
else:
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
if DEBUG:
SECRET_KEY = 'hai'
else:
SECRET_KEY = os.environ[ 'SECRET_KEY' ]
# Hosts/domain names that are valid for this site; required if DEBUG is False
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'accounts',
'start_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'init_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join( BASE_DIR, 'templates' ),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'init_django.wsgi.application'
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join( BASE_DIR, 'db.sqlite3' ),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join( BASE_DIR, 'static_root' )
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join( BASE_DIR, 'static' ),
)
LOGIN_URL = 'accounts:login'
LOGOUT_URL = 'accounts:logout'
LOGIN_REDIRECT_URL = '/'
AUTH_USER_MODEL = 'accounts.Account'
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import cross_section_analysis.utils as cs_utils
class WaterReferenceLinesOnRaster():
"""
Processing of cross-section profiles to find water reference lines on the
topography raster within user-specified topography intervals:
* Low reference line
* mean low water line
* mean intertidal line
* mean high water line
* highest astronomical tide line
"""
def __init__(
self,
raster_pixel_resolution,
raster_origin_refEN,
raster_topo_raw,
searchInterval_lowReferenceLine=(),
searchInterval_meanLowWater=(),
searchInterval_meanIntertidal=(),
searchInterval_meanHighWater=(),
searchInterval_highestAstronomicalTide=()):
self.rPixelResolution_x, self.rPixelResolution_y = raster_pixel_resolution;
self.rPixelResolution_mean = np.mean(
np.absolute([self.rPixelResolution_x, self.rPixelResolution_y])
);
self.rEasting_origin, self.rNorthing_origin = raster_origin_refEN;
self.raster_topo_raw = raster_topo_raw;
self.searchInterval_lowReferenceLine = searchInterval_lowReferenceLine;
self.searchInterval_meanLowWater = searchInterval_meanLowWater;
self.searchInterval_meanIntertidal = searchInterval_meanIntertidal;
self.searchInterval_meanHighWater = searchInterval_meanHighWater;
self.searchInterval_highestAstronomicalTide = searchInterval_highestAstronomicalTide;
def _init_markerPoints_on_crossSectionProfile(self):
# Low Reference Line
# Location (index) on the cross-section of the low reference points
self.np_raster_lowReferenceLine_ixCrossSection = np.array([], dtype=np.int);
# Column numbers (X) of the image containing the low reference points
self.np_raster_lowReferenceLine_x = np.array([], dtype=np.int);
# Row numbers (Y) of the image containing the low reference points
self.np_raster_lowReferenceLine_y = np.array([], dtype=np.int);
# Topography values at the location of the low reference points
self.np_raster_lowReferenceLine_topo = np.array([], dtype=np.float32);
# ID of the cross-section where the low reference points are found on
self.np_raster_lowReferenceLine_crossSection_ID = np.array([], dtype=np.int);
# Mean Low Water
self.np_raster_meanLowWater_ixCrossSection = np.array([], dtype=np.int);
self.np_raster_meanLowWater_x = np.array([], dtype=np.int);
self.np_raster_meanLowWater_y = np.array([], dtype=np.int);
self.np_raster_meanLowWater_topo = np.array([], dtype=np.float32);
self.np_raster_meanLowWater_crossSection_ID = np.array([], dtype=np.int);
# Mean Intertidal Line
self.np_raster_meanIntertidal_ixCrossSection = np.array([], dtype=np.int);
self.np_raster_meanIntertidal_x = np.array([], dtype=np.int);
self.np_raster_meanIntertidal_y = np.array([], dtype=np.int);
self.np_raster_meanIntertidal_topo = np.array([], dtype=np.float32);
self.np_raster_meanIntertidal_crossSection_ID = np.array([], dtype=np.int);
# Mean High Water
self.np_raster_meanHighWater_ixCrossSection = np.array([], dtype=np.int);
self.np_raster_meanHighWater_x = np.array([], dtype=np.int);
self.np_raster_meanHighWater_y = np.array([], dtype=np.int);
self.np_raster_meanHighWater_topo = np.array([], dtype=np.float32);
self.np_raster_meanHighWater_crossSection_ID = np.array([], dtype=np.int);
# Highest Astronomical Tide
self.np_raster_highestAstronomicalTide_ixCrossSection = np.array([], dtype=np.int);
self.np_raster_highestAstronomicalTide_x = np.array([], dtype=np.int);
self.np_raster_highestAstronomicalTide_y = np.array([], dtype=np.int);
self.np_raster_highestAstronomicalTide_topo = np.array([], dtype=np.float32);
self.np_raster_highestAstronomicalTide_crossSection_ID = np.array([], dtype=np.int);
self.marker_colsHeader = [
'CrossSection_ID', 'Point_CrossSectionIndex',
'Point_X', 'Point_Y',
'Point_Easting', 'Point_Northing',
'Point_Topography'
];
def _append_lowReferencePoint_from_crossSection_to_raster(
self, point_ixCrossSection):
point_x = self.cc[point_ixCrossSection];
point_y = self.rr[point_ixCrossSection];
self.np_raster_lowReferenceLine_x = np.append(
self.np_raster_lowReferenceLine_x,
point_x
);
self.np_raster_lowReferenceLine_y = np.append(
self.np_raster_lowReferenceLine_y,
point_y
);
self.np_raster_lowReferenceLine_topo = np.append(
self.np_raster_lowReferenceLine_topo,
self.raster_topo_raw[point_y, point_x]
);
self.np_raster_lowReferenceLine_ixCrossSection = np.append(
self.np_raster_lowReferenceLine_ixCrossSection,
point_ixCrossSection
);
self.np_raster_lowReferenceLine_crossSection_ID = np.append(
self.np_raster_lowReferenceLine_crossSection_ID,
self.raster_crossSection_count
);
def _append_meanLowWaterPoint_from_crossSection_to_raster(
self, point_ixCrossSection):
point_x = self.cc[point_ixCrossSection];
point_y = self.rr[point_ixCrossSection];
self.np_raster_meanLowWater_x = np.append(
self.np_raster_meanLowWater_x,
point_x
);
self.np_raster_meanLowWater_y = np.append(
self.np_raster_meanLowWater_y,
point_y
);
self.np_raster_meanLowWater_topo = np.append(
self.np_raster_meanLowWater_topo,
self.raster_topo_raw[point_y, point_x]
);
self.np_raster_meanLowWater_ixCrossSection = np.append(
self.np_raster_meanLowWater_ixCrossSection,
point_ixCrossSection
);
self.np_raster_meanLowWater_crossSection_ID = np.append(
self.np_raster_meanLowWater_crossSection_ID,
self.raster_crossSection_count
);
def _append_meanIntertidalPoint_from_crossSection_to_raster(
self, point_ixCrossSection):
point_x = self.cc[point_ixCrossSection];
point_y = self.rr[point_ixCrossSection];
self.np_raster_meanIntertidal_x = np.append(
self.np_raster_meanIntertidal_x,
point_x
);
self.np_raster_meanIntertidal_y = np.append(
self.np_raster_meanIntertidal_y,
point_y
);
self.np_raster_meanIntertidal_topo = np.append(
self.np_raster_meanIntertidal_topo,
self.raster_topo_raw[point_y, point_x]
);
self.np_raster_meanIntertidal_ixCrossSection = np.append(
self.np_raster_meanIntertidal_ixCrossSection,
point_ixCrossSection
);
self.np_raster_meanIntertidal_crossSection_ID = np.append(
self.np_raster_meanIntertidal_crossSection_ID,
self.raster_crossSection_count
);
def _append_meanHighWaterPoint_from_crossSection_to_raster(
self, point_ixCrossSection):
point_x = self.cc[point_ixCrossSection];
point_y = self.rr[point_ixCrossSection];
self.np_raster_meanHighWater_x = np.append(
self.np_raster_meanHighWater_x,
point_x
);
self.np_raster_meanHighWater_y = np.append(
self.np_raster_meanHighWater_y,
point_y
);
self.np_raster_meanHighWater_topo = np.append(
self.np_raster_meanHighWater_topo,
self.raster_topo_raw[point_y, point_x]
);
self.np_raster_meanHighWater_ixCrossSection = np.append(
self.np_raster_meanHighWater_ixCrossSection,
point_ixCrossSection
);
self.np_raster_meanHighWater_crossSection_ID = np.append(
self.np_raster_meanHighWater_crossSection_ID,
self.raster_crossSection_count
);
def _append_highestAstronomicalTidePoint_from_crossSection_to_raster(
self, point_ixCrossSection):
point_x = self.cc[point_ixCrossSection];
point_y = self.rr[point_ixCrossSection];
self.np_raster_highestAstronomicalTide_x = np.append(
self.np_raster_highestAstronomicalTide_x,
point_x
);
self.np_raster_highestAstronomicalTide_y = np.append(
self.np_raster_highestAstronomicalTide_y,
point_y
);
self.np_raster_highestAstronomicalTide_topo = np.append(
self.np_raster_highestAstronomicalTide_topo,
self.raster_topo_raw[point_y, point_x]
);
self.np_raster_highestAstronomicalTide_ixCrossSection = np.append(
self.np_raster_highestAstronomicalTide_ixCrossSection,
point_ixCrossSection
);
self.np_raster_highestAstronomicalTide_crossSection_ID = np.append(
self.np_raster_highestAstronomicalTide_crossSection_ID,
self.raster_crossSection_count
);
def run_through_crossSections_over_raster(
self,
np_coordinatesCrossSection_refRaster_start_x,
np_coordinatesCrossSection_refRaster_start_y,
np_coordinatesCrossSection_refRaster_end_x,
np_coordinatesCrossSection_refRaster_end_y):
np_raster_crossSection_count = np.arange(
0, np_coordinatesCrossSection_refRaster_start_x.size, 1,
dtype=np.int
);
self._init_markerPoints_on_crossSectionProfile();
for self.raster_crossSection_count in np_raster_crossSection_count:
# Extract coordinates of selected cross-section from all available
# cross-sections.
self.coordinatesCrossSection_refRaster_start_x = np_coordinatesCrossSection_refRaster_start_x[
self.raster_crossSection_count
];
self.coordinatesCrossSection_refRaster_start_y = np_coordinatesCrossSection_refRaster_start_y[
self.raster_crossSection_count
];
self.coordinatesCrossSection_refRaster_end_x = np_coordinatesCrossSection_refRaster_end_x[
self.raster_crossSection_count
];
self.coordinatesCrossSection_refRaster_end_y = np_coordinatesCrossSection_refRaster_end_y[
self.raster_crossSection_count
];
self.find_water_line_markers_on_crossSection(
self.coordinatesCrossSection_refRaster_start_x,
self.coordinatesCrossSection_refRaster_start_y,
self.coordinatesCrossSection_refRaster_end_x,
self.coordinatesCrossSection_refRaster_end_y
);
def find_water_line_markers_on_crossSection(
self,
coordinatesCrossSection_refRaster_start_x,
coordinatesCrossSection_refRaster_start_y,
coordinatesCrossSection_refRaster_end_x,
coordinatesCrossSection_refRaster_end_y):
self.crossSectionProfile_topo,\
self.rr,\
self.cc = cs_utils.get_crossSectionProfile_from_raster(
self.raster_topo_raw,
coordinatesCrossSection_refRaster_start_x,
coordinatesCrossSection_refRaster_start_y,
coordinatesCrossSection_refRaster_end_x,
coordinatesCrossSection_refRaster_end_y
);
# Low Reference Line
lowReferenceLine_ixCrossSection = self.get_coordinatesThresholdCross(
self.searchInterval_lowReferenceLine
);
if lowReferenceLine_ixCrossSection != -1:
self._append_lowReferencePoint_from_crossSection_to_raster(
lowReferenceLine_ixCrossSection
);
# Mean Low Water
meanLowWater_ixCrossSection = self.get_coordinatesThresholdCross(
self.searchInterval_meanLowWater
);
if meanLowWater_ixCrossSection != -1:
self._append_meanLowWaterPoint_from_crossSection_to_raster(
meanLowWater_ixCrossSection
);
# Mean Intertidal
meanIntertidal_ixCrossSection = self.get_coordinatesThresholdCross(
self.searchInterval_meanIntertidal
);
if meanIntertidal_ixCrossSection != -1:
self._append_meanIntertidalPoint_from_crossSection_to_raster(
meanIntertidal_ixCrossSection
);
# Mean High Water
meanHighWater_ixCrossSection = self.get_coordinatesThresholdCross(
self.searchInterval_meanHighWater
);
if meanHighWater_ixCrossSection != -1:
self._append_meanHighWaterPoint_from_crossSection_to_raster(
meanHighWater_ixCrossSection
);
# Highest Astronical Tide
highestAstronomicalTide_ixCrossSection = self.get_coordinatesThresholdCross(
self.searchInterval_highestAstronomicalTide
);
if highestAstronomicalTide_ixCrossSection != -1:
self._append_highestAstronomicalTidePoint_from_crossSection_to_raster(
highestAstronomicalTide_ixCrossSection
);
def get_coordinatesThresholdCross(self, searchInterval):
"""
Get the coordinates of the position of the cross-section where the
value for the topography raster is in the user-specified interval.
Parameters
----------
searchInterval (tuple of float)
Interval of topography where to search for a point
Returns
-------
numpy.int
Index of the first detected point within the interval
If no valid index is found, returns -1
"""
if searchInterval is None:
return -1;
elif len(searchInterval) != 2:
return -1;
else:
thresholdTopo_min, thresholdTopo_max = searchInterval;
for (ixThreshold,), topo in np.ndenumerate(self.crossSectionProfile_topo):
if topo >= thresholdTopo_min and topo <= thresholdTopo_max:
break;
if ixThreshold >= (self.crossSectionProfile_topo.size-1):
return -1;
else:
return ixThreshold;
def get_lowReferencePoints_over_complete_raster(self):
np_raster_lowReferenceLine_easting = self.rEasting_origin\
+ self.np_raster_lowReferenceLine_x*self.rPixelResolution_x;
np_raster_lowReferenceLine_northing = self.rNorthing_origin\
+ self.np_raster_lowReferenceLine_y*self.rPixelResolution_y;
output_lowReferencePoints = np.column_stack((
self.np_raster_lowReferenceLine_crossSection_ID,
self.np_raster_lowReferenceLine_ixCrossSection,
self.np_raster_lowReferenceLine_x,
self.np_raster_lowReferenceLine_y,
np_raster_lowReferenceLine_easting,
np_raster_lowReferenceLine_northing,
self.np_raster_lowReferenceLine_topo
));
return pd.DataFrame(
data=output_lowReferencePoints,
columns=self.marker_colsHeader
);
def get_meanLowWaterPoints_over_complete_raster(self):
np_raster_meanLowWater_easting = self.rEasting_origin\
+ self.np_raster_meanLowWater_x*self.rPixelResolution_x;
np_raster_meanLowWater_northing = self.rNorthing_origin\
+ self.np_raster_meanLowWater_y*self.rPixelResolution_y;
output_meanLowWaterPoints = np.column_stack((
self.np_raster_meanLowWater_crossSection_ID,
self.np_raster_meanLowWater_ixCrossSection,
self.np_raster_meanLowWater_x,
self.np_raster_meanLowWater_y,
np_raster_meanLowWater_easting,
np_raster_meanLowWater_northing,
self.np_raster_meanLowWater_topo
));
return pd.DataFrame(
data=output_meanLowWaterPoints,
columns=self.marker_colsHeader
);
def get_meanIntertidalPoints_over_complete_raster(self):
np_raster_meanIntertidal_easting = self.rEasting_origin\
+ self.np_raster_meanIntertidal_x*self.rPixelResolution_x;
np_raster_meanIntertidal_northing = self.rNorthing_origin\
+ self.np_raster_meanIntertidal_y*self.rPixelResolution_y;
output_meanIntertidalPoints = np.column_stack((
self.np_raster_meanIntertidal_crossSection_ID,
self.np_raster_meanIntertidal_ixCrossSection,
self.np_raster_meanIntertidal_x,
self.np_raster_meanIntertidal_y,
np_raster_meanIntertidal_easting,
np_raster_meanIntertidal_northing,
self.np_raster_meanIntertidal_topo
));
return pd.DataFrame(
data=output_meanIntertidalPoints,
columns=self.marker_colsHeader
);
def get_meanHighWaterPoints_over_complete_raster(self):
np_raster_meanHighWater_easting = self.rEasting_origin\
+ self.np_raster_meanHighWater_x*self.rPixelResolution_x;
np_raster_meanHighWater_northing = self.rNorthing_origin\
+ self.np_raster_meanHighWater_y*self.rPixelResolution_y;
output_meanHighWaterPoints = np.column_stack((
self.np_raster_meanHighWater_crossSection_ID,
self.np_raster_meanHighWater_ixCrossSection,
self.np_raster_meanHighWater_x,
self.np_raster_meanHighWater_y,
np_raster_meanHighWater_easting,
np_raster_meanHighWater_northing,
self.np_raster_meanHighWater_topo
));
return pd.DataFrame(
data=output_meanHighWaterPoints,
columns=self.marker_colsHeader
);
def get_highestAstronomicalTidePoints_over_complete_raster(self):
np_raster_highestAstronomicalTide_easting = self.rEasting_origin\
+ self.np_raster_highestAstronomicalTide_x*self.rPixelResolution_x;
np_raster_highestAstronomicalTide_northing = self.rNorthing_origin\
+ self.np_raster_highestAstronomicalTide_y*self.rPixelResolution_y;
output_highestAstronomicalTidePoints = np.column_stack((
self.np_raster_highestAstronomicalTide_crossSection_ID,
self.np_raster_highestAstronomicalTide_ixCrossSection,
self.np_raster_highestAstronomicalTide_x,
self.np_raster_highestAstronomicalTide_y,
np_raster_highestAstronomicalTide_easting,
np_raster_highestAstronomicalTide_northing,
self.np_raster_highestAstronomicalTide_topo
));
return pd.DataFrame(
data=output_highestAstronomicalTidePoints,
columns=self.marker_colsHeader
);
|
python
|
#1.只负责写视图
import os
import datetime
from flask import render_template
from main import app
from models import Curriculum#导入这个表
from flask import redirect#跳转 即Django中的重定向功能
import functools
from flask import session
from models import *
class Calendar:
"""
当前类实现日历功能
1、返回列表嵌套列表的日历
2、安装日历格式打印日历
# 如果一号周周一那么第一行1-7号 0
# 如果一号周周二那么第一行empty*1+1-6号 1
# 如果一号周周三那么第一行empty*2+1-5号 2
# 如果一号周周四那么第一行empty*3+1-4号 3
# 如果一号周周五那么第一行empyt*4+1-3号 4
# 如果一号周周六那么第一行empty*5+1-2号 5
# 如果一号周日那么第一行empty*6+1号 6
# 输入 1月
# 得到1月1号是周几
# [] 填充7个元素 索引0对应周一
# 返回列表
# day_range 1-30
"""
def __init__(self,month = "now"):
self.result = []
big_month = [1, 3, 5, 7, 8, 10, 12]
small_month = [4, 6, 9, 11]
#获取当前月
now = datetime.datetime.now()
if month == "now":
month = now.month
first_date = datetime.datetime(now.year, now.month, 1, 0, 0)
# 年 月 日 时 分
else:
#assert int(month) in range(1,13)
first_date = datetime.datetime(now.year,month, 1, 0, 0)
if month in big_month:
day_range = range(1, 32) # 指定月份的总天数
elif month in small_month:
day_range = range(1, 31)
else:
day_range = range(1, 29)
# 获取指定月天数
self.day_range = list(day_range)
first_week = first_date.weekday() # 获取指定月1号是周几 6
line1 = [] # 第一行数据
for e in range(first_week):
line1.append("empty")
for d in range(7 - first_week):
line1.append(
str(self.day_range.pop(0))+"—django开发"
)
self.result.append(line1)
while self.day_range: # 如果总天数列表有值,就接着循环
line = [] # 每个子列表
for i in range(7):
if len(line) < 7 and self.day_range:
line.append(str(self.day_range.pop(0))+"—django开发")
else:
line.append("empty")
self.result.append(line)
def return_month(self):
"""
返回列表嵌套列表的日历
"""
return self.result
def print_month(self):
"""
安装日历格式打印日历
"""
print("星期一 星期二 星期三 星期四 星期五 星期六 星期日")
for line in self.result:
for day in line:
day = day.center(6)
print(day, end=" ")
print()
def loginValid(fun):#这是一个装饰器
@functools.wraps(fun)#保留原函数的名称
def inner(*args,**kwargs):
username = request.cookies.get('username')#Django中是大写的COOKIE
id = request.cookies.get('id','0')#是0 下一句就不成立了
user=User.query.get(int(id))#从数据库中获取id为此值的数据
session_username = session.get('username')#获取session--------字典可以用这种方法,
if user:#检测是否有对应id的用户
if user.user_name == username and username == session_username:#检测用户是否对应
return fun(*args,**kwargs)
else:
return redirect('/login/')
else:
return redirect('/login')
return inner
@app.route("/")#再进行路由
@loginValid#先执行这个
def index():
name = "laojiu"
return render_template("index.html",**locals())
@app.route("/login/",methods=['GET','POST'])
def login():
error = ''#放在这GET请求和POST请求都有error信息了就
if request.method == 'POST':
form_data = request.form
email = form_data.get('email')
password = form_data.get('password')
#下面是表单校验
user = User.query.filter_by(email=email).first()
if user:
db_password = user.password
if password == db_password:
response = redirect('/index/')
response.set_cookie('username',user.user_name)
response.set_cookie('email',user.email)
response.set_cookie('id',str(user.id))
session['username'] = user.user_name#设置session--------
return response#接下来是获取cookie校验
else:
error = '密码错误'
else:
error = '用户名不存在'
return render_template("login.html",error = error)
@app.route('/logout/',methods=['GET','POST'])
def logout():
response = redirect('/login/')
response.delete_cookie('username')
response.delete_cookie('email')
response.delete_cookie('id')
session.pop('username')#删除session第1种方法----------
# del session['username']#第二种方法
return response
@app.route("/base/")
def base():
return render_template("base.html")
@app.route("/index/")
def exindex():
# c = Curriculum()
# c.c_id = '0001'
# c.c_name = 'c++基础'
# c.c_time = datetime.datetime.now()
# c.save()
curr_list=Curriculum.query.all()
return render_template("ex_index.html",curr_list=curr_list)
@app.route("/userinfo/")
def userinfo():
calendar = Calendar().return_month()
now = datetime.datetime.now()
return render_template("userinfo.html",**locals())
from flask import request
from models import User
@app.route('/register/',methods=['GET','POST'])
def register():
'''
form表单提交的数据由request.form接收
request是从models.py中导包导入进来的
:return:
'''
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
user = User()
user.user_name = username
user.password = password
user.email = email
user.save()
return render_template('register.html')
@app.route('/holiday_leave/',methods='GET','POST')
def holiday_leave():
if request.method == 'POST':
data = request.form
request_user = data.get('request_user')
request_type = data.get('request_type')
start_time = data.get('start_time')
end_time = data.get('end_time')
phone = data.get('phone')
request_description = data.get('request_description')
leave = Leave()
leave.request_id = request.get_data() # 请假人id
request_name = models.Column(models.String(32)) # 姓名
request_type = models.Column(models.String(32)) # 请假类型
request_start_time = models.Column(models.String(32)) # 起始时间
request_end_time = models.Column(models.String(32)) # 结束时间
request_description = models.Column(models.Text) # 请假原因
request_phone = models.Column(models.String(32)) # 联系方式
request_status = models.Column(models.String(32)) # 假条状态
return render_template('holiday_leave.html')
# #9-27新增
# @app.route('/picture/',methods='GET','POST')
# def picture():
# p = {'picture':'img/1.jpg'}
# if request.method == 'POST':
# file = request.files.get('photo')
# file_name = file.filename
# file_path = 'img/%s'%file_name
# file_path = os.path.join(STATICFILES_DIR,'img/%s%filename')
# file.save(file_path)
# p = Picture()
# p.picture = file_path
# p.save()
#
# return render_template('picture.html',p = p)
# from main import api
# from flask_restful import Resource
#
# @api.resource('/Api/v1/leave/')
# class leaveApi(Resource):
# def get(self):#查
# return {'method':'这是get请求,负责返回所有的数据'}
# def post(self):#增
# data = request.form
# request_id = data.get('request_id')
# request_name = data.get('request_name')
# request_type = data.get('request_type')
# request_start_time = data.get('request_start_time')
# request_end_time = data.get('request_end_time')
# request_description = data.get('request_description')
# request_phone = data.get('request_phone')
# request_status = data.get('request_status')
#
# leave = Leave()
# leave.request_id = request_id
# leave.request_name = request_name
# leave.request_type = request_type
# leave.request_start_time = request_start_time
# leave.request_end_time = request_end_time
# leave.request_description = request_description
# leave.request_phone = request_phone
# leave.request_status = request_status
# return {'method':'负责保存数据'}
# def put(self):#改
# return {'method':'负责修改数据'}
# def delete(self):#删
# return {'method':'负责删除数据'}
|
python
|
from kaplot import *
import pymedia.audio.acodec as acodec
import pymedia.muxer as muxer
import sys
import wave
if False:
file = open(sys.argv[1], 'rb')
rawdata = file.read()
wfile = wave.open('test.wav', 'wb')
print muxer.extensions
demuxer = muxer.Demuxer('mp3')
frames = demuxer.parse(rawdata)
decoder = acodec.Decoder(demuxer.streams[0]) # mp3 always got 1 audio stream
audio = decoder.decode(frames[0][1])
print audio.sample_rate, audio.channels
wfile.setparams((audio.channels, 2, audio.sample_rate, 0, 'NONE', ''))
for frame in frames[:10]:
#import pdb; pdb.set_trace()
audio = decoder.decode(frame[1])
wfile.writeframes(audio.data)
sys.exit(0)
wavefile = wave.open(sys.argv[1], 'rb')
print "channels: ", wavefile.getnchannels()
print "getsampwidth: ", wavefile.getsampwidth()
print "getframerate: ", wavefile.getframerate()
print "getnframes: ", wavefile.getnframes()
print "getparams: ", wavefile.getparams()
length = 1 # seconds
if wavefile.getsampwidth() == 2:
rawdata = wavefile.readframes(length*wavefile.getframerate() * wavefile.getnchannels())
data = fromstring(rawdata, Int16)
datal = data[arange(len(data)/2) * 2]
datar = data[arange(len(data)/2) * 2+1]
else:
print "unsupported sample width", wavefile.getsampwidth()
sys.exit(0)
interval = 1.0/wavefile.getframerate()
#interval = 0.1
#x = arange(0, 4.+interval/2, interval)
#x = arange(0, 1, interval)
#y = 1+sin(x*2*pi) + sin(x*30*2*pi) + sin(x*10*2*pi)
#y = 0.1+sin(x *2.5 * 2 * pi)
b = box()
#plot(x, y)
ftspectrum(datal, T=interval, shifted=False, logarithmic=False)
#b.world = (0,0), (2000, 0.05)
b.setRange(0, 2000)
b.grow(1.1)
print b.world
#b.ylogarithmic = True
#b.xinterval = 10
#b.xinteger = True
show()
|
python
|
import numpy as np
import pandas as pd
import json
from itertools import groupby
from collections import namedtuple
# NodeEdgeIncidence = namedtuple('NodeEdgeIncidence', ['nid', 'role', 'eid','meta'])
class NodeEdgeIncidence(object):
__slots__ = ("nid", "role", "eid", "meta")
def __init__(self, nid, role, eid, meta):
self.nid = nid
self.role = role
self.eid = eid
self.meta = meta
def __repr__(self):
return "NodeEdgeIncidence({})".format(
", ".join(
[
"{}={}".format(key, getattr(self, key))
for key in self.__class__.__slots__
]
)
)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def to_dict(self):
return {key: getattr(self, key) for key in self.__class__.__slots__}
def incidence_list_from_records(data, role_fields):
"""
Construct annotated node-edge incidence list from list of records.
An entry [i,l,e,m] of this list states that "node i has role l in edge e with metadata m"
Input:
data [list]: A list of records (JSON-like)
role_fields [list]: A list of role labels
Output:
IL [list]: A list of node-edge incidence records
"""
IL = []
for role in role_fields:
for record in data:
for i in record[role]:
new_row = NodeEdgeIncidence(
i, role, record["eid"], {"date": record["date"]}
)
IL.append(new_row)
return IL
def records_from_incidence_list(IL, role_fields):
"""
Construct list of records from node-edge incidence list.
"""
IL = sorted(IL, key=lambda x: x.eid, reverse=True)
chunked = [list(v) for eid, v in groupby(IL, lambda x: x.eid)]
records = []
for chunk in chunked:
record = {role: [] for role in role_fields}
record["eid"] = chunk[0].eid
record["date"] = chunk[0].meta[
"date"
] # TODO: We need to accommodate other fields in the future
for line in chunk:
record[line.role].append(line.nid)
records.append(record)
return sorted(records, key=lambda x: x["eid"])
def normalise_counters(counters):
"""Normalise a dictionary of counters inplace."""
for node, d in counters.items():
total = sum(d.values())
if total == 0.0:
counters[node] = None
else:
for key in d:
d[key] /= total
def entropy(iterable):
""" Calculates the entropy of an iterable. """
if iterable[0] is None:
return None
v = np.array([p for p in iterable if p > 0])
v = v / v.sum()
return (-(v * np.log2(v)).sum())
def average_entropy(func):
"""
Takes a function that returns one or more probability distributions and returns an average entropy.
"""
def avg_entropy(args, **kwargs):
density = pd.DataFrame(func(args, **kwargs)).T
return density.apply(entropy, axis=1).mean()
return avg_entropy
def average_value(func):
"""
Converts function output to a Pandas series and takes the mean.
"""
def avg_value(args, **kwargs):
return pd.Series(func(args, **kwargs)).mean()
return avg_value
def variance_value(func):
"""
Converts function output to a Pandas series and takes the variance.
"""
def var_value(args, **kwargs):
return pd.Series(func(args, **kwargs)).var()
return var_value
def entropy_value(func):
"""
Converts function output to a Pandas series and calculates the entropy.
"""
def ent_value(args, **kwargs):
values = pd.Series(func(args, **kwargs))
values = values / values.sum()
return entropy(values)
return ent_value
def sort_matrix(A, v):
row_sorted = A[np.argsort(v)]
col_sorted = row_sorted[:, np.argsort(v)]
return col_sorted
|
python
|
from copy import deepcopy
import csv
from enum import Enum
import inspect
from os.path import join as joinpath
from pprint import pprint
from sys import version_info
from typing import Any, Callable, Dict, List, Optional, Tuple, TYPE_CHECKING, TypeVar, Union
import dictdiffer
from pandas import DataFrame
import stringcase
from mstrio import config
from mstrio.api import objects
from mstrio.api.exceptions import VersionException
from mstrio.connection import Connection
from mstrio.types import ObjectTypes, ObjectSubTypes, ExtendedType # noqa
from mstrio.utils import helper
from mstrio.utils.acl import ACE, ACLMixin, Rights
from mstrio.utils.dependence_mixin import DependenceMixin
from mstrio.utils.time_helper import bulk_str_to_datetime, DatetimeFormats, map_str_to_datetime
if TYPE_CHECKING:
from mstrio.server import Project
T = TypeVar("T")
class EntityBase(helper.Dictable):
"""This class is for objects that do not have a specified MSTR type.
Attributes:
connection: A MicroStrategy connection object
id: Object ID
type: Object type set to None
name: Object name
"""
_OBJECT_TYPE: ObjectTypes = ObjectTypes.NONE # MSTR object type defined in ObjectTypes
_REST_ATTR_MAP: Dict[str, str] = {}
_API_GETTERS: Dict[Union[str, tuple], Callable] = {}
_FROM_DICT_MAP: Dict[str, Callable] = {
'type': ObjectTypes
} # map attributes to Enums and Composites
_AVAILABLE_ATTRIBUTES: Dict[str, type] = {} # fetched on runtime from all Getters
_API_PATCH: dict = {}
_PATCH_PATH_TYPES: Dict[str, type] = {} # used in update_properties method
_API_DELETE: Callable = staticmethod(objects.delete_object)
def __init__(self, connection: Connection, object_id: str, **kwargs) -> None:
self._init_variables(connection=connection, id=object_id, **kwargs)
if config.fetch_on_init and self._find_func("id") is not None:
self.fetch("id")
if config.verbose:
print(self)
def _init_variables(self, **kwargs):
"""Initialize variables given kwargs.
Note: attributes not accepted by any implementation of this function
in the inheritance chain will be disregarded.
"""
kwargs = self._rest_to_python(kwargs)
# create _AVAILABLE_ATTRIBUTES map
self._AVAILABLE_ATTRIBUTES.update({key: type(val) for key, val in kwargs.items()})
self._connection = kwargs.get("connection")
self._id = kwargs.get("id")
self._type = self._OBJECT_TYPE if 'type' in self._FROM_DICT_MAP else kwargs.get(
"type", self._OBJECT_TYPE)
self.name = kwargs.get("name")
self._altered_properties = dict()
def fetch(self, attr: Optional[str] = None) -> None:
"""Fetch the latest object state from the I-Server.
Note:
This method can overwrite local changes made to the object.
Args:
attr: Attribute name to be fetched.
Raises:
ValueError: if `attr` cannot be fetched.
"""
functions = self._API_GETTERS # by default fetch all endpoints
if attr is not None: # if attr is specified fetch endpoint matched to the attribute name
function = self._find_func(attr)
if not function:
raise ValueError(f"The attribute `{attr}` cannot be fetched for this object")
else:
functions = {attr: func for attr, func in functions.items() if func == function}
for key, func in functions.items(): # call respective API getters
param_value_dict = auto_match_args_entity(func, self)
response = func(**param_value_dict)
if response.ok:
response = response.json()
if type(response) == dict:
object_dict = {
key if isinstance(key, str) and len(response) == 1 else k: v
for k, v in response.items()
}
self._set_object(**object_dict)
elif type(response) == list:
self._set_object(**{key: response})
# TODO: consider changing camel_to_snake logic to work with
# list of keys
# keep track of fetched attributes
self._add_to_fetched(key)
def _add_to_fetched(self, keys: Union[str, tuple]) -> None:
if isinstance(keys, str):
keys = [keys]
for key in keys:
key = key[1:] if key.startswith("_") else key
self._fetched_attributes.add(key)
@classmethod
def _find_func(cls, attr: str) -> Optional[Callable]:
"""Try to find API endpoint in `cls._API_GETTERS` responsible for chosen
attribute.
Returns: Function or None if function not found
"""
if not isinstance(attr, str):
raise TypeError("`attr` parameter has to be of type str")
for attributes, func in cls._API_GETTERS.items():
if isinstance(attributes, str):
if attr == attributes:
return func
elif isinstance(attributes, tuple):
if attr in attributes:
return func
else:
raise NotImplementedError
return None
@classmethod
def _rest_to_python(cls, response: dict) -> dict:
"""Map REST API field names to Python API field names."""
for rest_name, python_name in cls._REST_ATTR_MAP.items():
if rest_name in response:
old = response.pop(rest_name)
if python_name:
response[python_name] = old
return response
@classmethod
def _python_to_rest(cls, response: dict) -> dict:
"""Map Python API field names to REST API field names."""
for rest_name, python_name in cls._REST_ATTR_MAP.items():
if python_name and python_name in response:
response[rest_name] = response.pop(python_name)
return response
def _set_object(self, **kwargs) -> None:
"""Set object attributes programmatically by providing keyword args.
Support ENUMs and creating component objects."""
object_info = helper.camel_to_snake(kwargs)
object_info = self._rest_to_python(object_info)
properties = set(
(elem[0]
for elem in inspect.getmembers(self.__class__, lambda x: isinstance(x, property))))
for key, val in object_info.items(): # type: ignore
# if self is a composite, create component instance
if key in self._FROM_DICT_MAP:
# determine which constructor will be used
if isinstance(self._FROM_DICT_MAP[key], DatetimeFormats):
val = map_str_to_datetime(key, val, self._FROM_DICT_MAP)
elif isinstance(self._FROM_DICT_MAP[key], type(Enum)):
val = self._FROM_DICT_MAP[key](val)
elif isinstance(self._FROM_DICT_MAP[key], list) and val is not None:
if isinstance(self._FROM_DICT_MAP[key][0], type(Enum)):
val = [self._FROM_DICT_MAP[key][0](v) for v in val]
else:
val = [
self._FROM_DICT_MAP[key][0](source=v, connection=self.connection)
for v in val
]
else:
val = self._FROM_DICT_MAP[key](source=val, connection=self.connection)
# create _AVAILABLE_ATTRIBUTES map
self._AVAILABLE_ATTRIBUTES.update({key: type(val)})
# check if attr is read-only and if yes return '_' version of it
if key in properties:
key = "_" + key
setattr(self, key, val)
def list_properties(self) -> dict:
"""List all properties of the object."""
if hasattr(self, "_API_GETTERS"): # fetch attributes not loaded on init
attr = [
attr for attr in self._API_GETTERS.keys() if isinstance(attr, str) # type: ignore
]
for a in attr:
try:
getattr(self, a)
except VersionException:
pass
properties = inspect.getmembers(self.__class__, lambda x: isinstance(x, property))
properties = {elem[0]: elem[1].fget(self) for elem in properties}
attributes = {key: val for key, val in vars(self).items() if not key.startswith('_')}
attributes = {**properties, **attributes}
return {
key: attributes[key] for key in sorted(attributes, key=helper.sort_object_properties)
}
def to_dataframe(self) -> DataFrame:
"""Return a `DataFrame` object containing object properties."""
return DataFrame.from_dict(self.list_properties(), orient='index', columns=['value'])
def print(self) -> None:
"""Pretty Print all properties of the object."""
if version_info.major >= 3 and version_info.minor >= 8:
pprint(self.list_properties(), sort_dicts=False) # type: ignore
else:
pprint(self.list_properties())
@classmethod
def from_dict(cls: T, source: Dict[str, Any], connection: Connection,
to_snake_case: bool = True) -> T:
"""Instantiate an object from response without calling any additional
getters."""
obj = cls.__new__(cls) # Does not call __init__
object_source = helper.camel_to_snake(source) if to_snake_case else source
obj._init_variables(connection=connection, **object_source)
return obj
def __str__(self):
if self.__dict__.get("name"):
return "{} object named: '{}' with ID: '{}'".format(self.__class__.__name__, self.name,
self.id)
else:
return "{} object with ID: '{}'".format(self.__class__.__name__, self.id)
def __repr__(self):
param_value_dict = auto_match_args_entity(self.__init__, self, exclude=['self'],
include_defaults=False)
params_list = []
for param, value in param_value_dict.items():
if param == "connection" and isinstance(value, Connection):
params_list.append("connection")
else:
params_list.append(f"{param}={repr(value)}")
formatted_params = ", ".join(params_list)
return f"{self.__class__.__name__}({formatted_params})"
def __hash__(self):
return hash((self.id, self._OBJECT_TYPE.value))
def __eq__(self, other):
"""Equals operator to compare if an entity is equal to another object.
We define 2 Entities as being equal if:
1. the other object is an Entity and also shares the same id
2. the other object is a string and is equal to either the name or
id of the Entity
This allows us to search for Entities in a collection by specifying its
id, name, or another Entity itself. For example:
if "user" in users: ...
if "28ECA8BB11D5188EC000E9ABCA1B1A4F" in users: ...
"""
if isinstance(other, str):
return self.id == other or self.name == other
elif isinstance(other, type(self)):
return self.id == other.id
elif isinstance(other, dict):
# check all items in other dict equal entity object attributes
return all(
(getattr(self, k) == v if hasattr(self, k) else False for k, v in other.items()))
else:
return NotImplemented # don't attempt to compare against unrelated types
@classmethod
def to_csv(cls: T, objects: Union[T, List[T]], name: str, path: Optional[str] = None,
properties: Optional[List[str]] = None) -> None:
"""Export MSTR objects to a csv file.
Optionally, save only the object properties specified in the properties
parameter.
Args:
objects: List of objects of the same type that will be exported
name: name of the csv file ending with '.csv'
path: path to the directory where the file will be saved
properties: list of object attribute names that should be included
in the exported file
"""
file = joinpath(path, name) if path else name
list_of_objects = []
if not name.endswith('.csv'):
msg = ("The file extension is different than '.csv', please note that using a "
"different extension might disrupt opening the file correctly.")
helper.exception_handler(msg, exception_type=Warning)
if isinstance(objects, cls):
properties_dict = objects.list_properties()
if properties:
list_of_objects.append(
{key: value for key, value in properties_dict.items() if key in properties})
else:
list_of_objects.append(properties_dict)
elif isinstance(objects, list):
if not properties:
properties = objects[0].list_properties().keys()
for obj in objects:
if isinstance(obj, cls):
list_of_objects.append({
key: value
for key, value in obj.list_properties().items()
if key in properties
})
else:
helper.exception_handler(
f"Object '{obj}' of type '{type(obj)}' is not supported.",
exception_type=Warning)
else:
raise TypeError((f"Objects should be of type {cls.__name__} or "
f"list of {cls.__name__}."))
with open(file, 'w') as f:
fieldnames = list_of_objects[0].keys()
w = csv.DictWriter(f, fieldnames=fieldnames)
w.writeheader()
w.writerows(list_of_objects)
if config.verbose:
print(f"Object exported successfully to '{file}'")
def is_modified(self, to_list: bool = False) -> Union[bool, list]:
# TODO decide if needed or just deprecate
"""Compare the current object to the object on I-Server.
Args:
to_list: If True, return a list of tuples with object differences
"""
temp = deepcopy(self)
temp.fetch()
differences = list(dictdiffer.diff(temp.__dict__, self.__dict__))
if len(differences) == 0:
if config.verbose:
print("There are no differences between local and remote '{}' object.".format(
ObjectTypes(self.type).name))
return differences if to_list else False
else:
return differences if to_list else True
def update_properties(self) -> None:
"""Save compatible local changes of the object attributes to the
I-Server.
Raises:
requests.HTTPError: if I-Server raises exception
"""
changes = {k: v[1] for k, v in self._altered_properties.items()}
self._alter_properties(**changes)
self._altered_properties.clear()
def _send_proper_patch_request(self, properties: dict, op: str = 'replace') -> List[bool]:
"""Internal method to update objects with the specified patch wrapper.
Used for adding and removing objects from nested properties of an
object like memberships.
Args:
properties: dictionary of required changes
op: operation type, 'replace' by default
Returns:
List of successful or unsuccessful requests.
"""
changed = []
camel_properties = helper.snake_to_camel(properties)
for attrs, (func, func_type) in self._API_PATCH.items():
body = {}
if func_type == 'partial_put':
for name, value in camel_properties.items():
if stringcase.snakecase(name) in attrs:
value = self._maybe_unpack(name, value, camel_case=True)
body[name] = self._validate_type(name, value)
elif func_type == 'put': # Update using the generic update_object()
for name, value in properties.items():
if name in attrs:
setattr(self, name, self._validate_type(name, value))
body = self.to_dict()
elif func_type == 'patch':
body = {"operationList": []}
for name, value in camel_properties.items():
if stringcase.snakecase(name) in attrs:
value = self._maybe_unpack(name, value, camel_case=True)
body['operationList'].append({
"op": op,
"path": "/{}".format(name),
"value": self._validate_type(name, value)
})
else:
msg = f"{func} function is not supported by `_send_proper_patch_request`"
raise NotImplementedError(msg)
if not body:
continue
# send patch request from the specified update wrapper
param_value_dict = auto_match_args_entity(func, self, exclude=["body"])
param_value_dict['body'] = body
response = func(**param_value_dict)
if response.ok:
changed.append(True)
response = response.json()
if type(response) == dict:
self._set_object(**response)
else:
changed.append(False)
return changed
def _alter_properties(self, **properties) -> None:
"""Generic alter method that has to be implemented in child classes
where arguments will be specified."""
if not properties:
if config.verbose:
print(f"No changes specified for {type(self).__name__} '{self.name}'.")
return None
changed = self._send_proper_patch_request(properties)
if config.verbose and all(changed):
msg = (f"{type(self).__name__} '{self.name}' has been modified on the server. "
f"Your changes are saved locally.")
print(msg)
def _update_nested_properties(self, objects, path: str, op: str,
existing_ids: Optional[List[str]] = None) -> Tuple[str, str]:
"""Internal method to update objects with the specified patch wrapper.
Used for adding and removing objects from nested properties of an
object like memberships.
Returns:
IDs of succeeded and failed operations by filtering by existing IDs.
"""
from mstrio.access_and_security.privilege import Privilege
# check whether existing_ids are supplied
if existing_ids is None:
existing_ids = [obj.get('id') for obj in getattr(self, path)]
# create list of objects from strings/objects/lists
objects_list = objects if isinstance(objects, list) else [objects]
object_map = {obj.id: obj.name for obj in objects_list if isinstance(obj, Entity)}
object_ids_list = [
obj.id if isinstance(obj, (EntityBase, Privilege)) else str(obj)
for obj in objects_list
]
# check if objects can be manipulated by comparing to existing values
if op == "add":
filtered_object_ids = sorted(
list(filter(lambda x: x not in existing_ids, object_ids_list)))
elif op == "remove":
filtered_object_ids = sorted(list(filter(lambda x: x in existing_ids,
object_ids_list)))
if filtered_object_ids:
properties = {path: filtered_object_ids}
self._send_proper_patch_request(properties, op)
failed = list(sorted(set(object_ids_list) - set(filtered_object_ids)))
failed_formatted = [object_map.get(object_id, object_id) for object_id in failed]
succeeded_formatted = [
object_map.get(object_id, object_id) for object_id in filtered_object_ids
]
return (succeeded_formatted, failed_formatted)
def _validate_type(self, name: str, value: T) -> T:
"""Validates whether the attribute is set using correct type.
Raises:
TypeError if incorrect.
"""
type_map = {**self._AVAILABLE_ATTRIBUTES, **self._PATCH_PATH_TYPES}
value_type = type_map.get(name, 'Not Found')
if value_type != 'Not Found' and type(value) != value_type:
raise TypeError(f"'{name}' has incorrect type. Expected type: '{value_type}'")
return value
def __setattr__(self, name: str, value: Any) -> None:
"""Overloads the __setattr__ method to validate if this attribute can
be set for current object and verify value data types."""
def track_changes():
already_tracked = name in self._altered_properties
if already_tracked:
current_val = self._altered_properties[name][0]
elif name in self.__dict__:
current_val = self.__dict__[name]
else:
current_val = None
is_changed = not (type(current_val) is type(value) and current_val == value)
if is_changed:
self._altered_properties.update({name: (current_val, value)})
elif already_tracked:
del self._altered_properties[name]
# Keep track of changed properties if value is already fetched
if hasattr(self, "_fetched_attributes"):
if name in self._PATCH_PATH_TYPES and name in self._fetched_attributes:
track_changes()
# if value not equal to None then treat as fetched
if value is not None:
self._add_to_fetched(name)
super(EntityBase, self).__setattr__(name, value)
def __getattribute__(self, name: str) -> Any:
"""Fetch attributes if not fetched."""
val = super(EntityBase, self).__getattribute__(name)
if name in ["_fetched_attributes", "_find_func"]:
return val
if not hasattr(self, "_fetched_attributes"):
self._fetched_attributes = set()
if hasattr(self, "_fetched_attributes") and hasattr(self, "_find_func"):
_name = name[1:] if name.startswith("_") else name
was_fetched = _name in self._fetched_attributes
can_fetch = self._find_func(_name) is not None and "id" in self._fetched_attributes
if can_fetch and not was_fetched:
self.fetch(_name) # fetch the relevant object data
val = super(EntityBase, self).__getattribute__(name)
return val
def _set_dates(self, **kwargs):
"""Transform all date strings provided in kwargs to datetime object.
Date format strings have to be provided in `_FROM_DICT_MAP`"""
kwargs = bulk_str_to_datetime(kwargs, self._FROM_DICT_MAP)
for key, val in self._FROM_DICT_MAP.items():
if isinstance(val, DatetimeFormats):
if kwargs.get(key):
setattr(self, key, kwargs.get(key))
elif len(key) > 1 and key[0] == '_' and kwargs.get(key[1:]):
setattr(self, key, kwargs.get(key[1:]))
# TODO add docstrings
@property
def connection(self) -> Connection:
return self._connection
@property
def id(self) -> str:
return self._id
@property
def type(self) -> ObjectTypes:
return self._type
class Entity(EntityBase, ACLMixin, DependenceMixin):
"""Base class representation of the MSTR object.
Provides methods to fetch, update, and view the object. To implement
this base class all class attributes have to be provided.
Attributes:
connection: A MicroStrategy connection object
id: Object ID
name: Object name
description: Object description
abbreviation: Object abbreviation
type: Object type
subtype: Object subtype
ext_type: Object extended type
date_created: Creation time, DateTime object
date_modified: Last modification time, DateTime object
version: Version ID
owner: Owner ID and name
icon_path: Object icon path
view_media: View media settings
ancestors: List of ancestor folders
certified_info: Certification status, time of certificaton, and
information about the certifier (currently only for document and
report)
acg: Access rights (See EnumDSSXMLAccessRightFlags for possible values)
acl: Object access control list
"""
_API_GETTERS: Dict[Union[str, tuple], Callable] = {
('id', 'name', 'description', 'abbreviation', 'type', 'subtype', 'ext_type',
'date_created', 'date_modified', 'version', 'owner', 'icon_path', 'view_media',
'ancestors', 'certified_info', 'acg', 'acl', 'comments', 'project_id', 'hidden',
'target_info'): objects.get_object_info
}
_API_PATCH: dict = {
('name', 'description', 'abbreviation'): (objects.update_object, 'partial_put')
}
_PATCH_PATH_TYPES = {"name": str, "description": str, "abbreviation": str}
_FROM_DICT_MAP = {
**EntityBase._FROM_DICT_MAP,
'ext_type': ExtendedType,
'date_created': DatetimeFormats.FULLDATETIME,
'date_modified': DatetimeFormats.FULLDATETIME,
'acl': [ACE.from_dict],
'acg': Rights,
}
def _init_variables(self, **kwargs) -> None:
"""Initialize variables given kwargs."""
from mstrio.users_and_groups.user import User
from mstrio.utils.certified_info import CertifiedInfo
super(Entity, self)._init_variables(**kwargs)
self._date_created = map_str_to_datetime("date_created", kwargs.get("date_created"),
self._FROM_DICT_MAP)
self._date_modified = map_str_to_datetime("date_modified", kwargs.get("date_modified"),
self._FROM_DICT_MAP)
self.description = kwargs.get("description")
self.abbreviation = kwargs.get("abbreviation")
self._subtype = kwargs.get("subtype")
self._ext_type = ExtendedType(kwargs["ext_type"]) if kwargs.get("ext_type") else None
self._version = kwargs.get("version")
self._owner = User.from_dict(
kwargs.get("owner"),
self.connection,
) if kwargs.get("owner") else None
self._icon_path = kwargs.get("icon_path")
self._view_media = kwargs.get("view_media")
self._ancestors = kwargs.get("ancestors")
self._certified_info = CertifiedInfo.from_dict(
kwargs.get("certified_info"),
self.connection) if kwargs.get("certified_info") else None
self._hidden = kwargs.get("hidden")
self._project_id = kwargs.get("project_id")
self._comments = kwargs.get("comments")
self._target_info = kwargs.get("target_info")
self._acg = Rights(kwargs.get("acg")) if kwargs.get("acg") else None
self._acl = ([ACE.from_dict(ac, self._connection) for ac in kwargs.get("acl")]
if kwargs.get("acl") else None)
# TODO add docstrings to all properties
@property
def subtype(self):
return self._subtype
@property
def ext_type(self):
return self._ext_type
@property
def date_created(self):
return self._date_created
@property
def date_modified(self):
return self._date_modified
@property
def version(self):
return self._version
@property
def owner(self):
return self._owner
@property
def icon_path(self):
return self._icon_path
@property
def view_media(self):
return self._view_media
@property
def ancestors(self):
return self._ancestors
@property
def certified_info(self):
return self._certified_info
@property
def acg(self):
return self._acg
@property
def acl(self):
return self._acl
@property
def hidden(self):
return self._hidden
@property
def project_id(self):
return self._project_id
@property
def comments(self):
return self._comments
@property
def target_info(self):
return self._target_info
class CopyMixin:
"""CopyMixin class adds creating copies of objects functionality.
Currently project objects are not supported. Must be mixedin with
Entity or its subclasses.
"""
def create_copy(self: Entity, name: Optional[str] = None, folder_id: Optional[str] = None,
project: Optional[Union["Project", str]] = None,
application: Optional[Union["Project", str]] = None) -> Any:
"""Create a copy of the object on the I-Server.
Args:
name: New name of the object. If None, a default name is generated,
such as 'Old Name (1)'
folder_id: ID of the destination folder. If None, the object is
saved in the same folder as the source object.
project: By default, the project selected when
creating Connection object. Override `project` to specify
project where the current object exists.
application: deprecated. Use project instead.
Returns:
New python object holding the copied object.
"""
if application:
helper.deprecation_warning(
'`application`',
'`project`',
'11.3.4.101', # NOSONAR
False)
project = project or application
if self._OBJECT_TYPE.value in [32]:
raise NotImplementedError("Object cannot be copied yet.")
# TODO if object uniqueness depends on project_id extract proj_id
# TODO automatically
response = objects.copy_object(self.connection, id=self.id, name=name, folder_id=folder_id,
object_type=self._OBJECT_TYPE.value, project_id=project)
return self.from_dict(source=response.json(), connection=self.connection)
class DeleteMixin:
"""DeleteMixin class adds deleting objects functionality.
Must be mixedin with Entity or its subclasses.
"""
_delete_confirm_msg: str = None
_delete_success_msg: str = None
def delete(self: Entity, force: bool = False) -> bool:
"""Delete object.
Args:
force: If True, then no additional prompt will be shown before
deleting object.
Returns:
True on success. False otherwise.
"""
object_name = self.__class__.__name__
user_input = 'N'
if not force:
message = self._delete_confirm_msg or (
f"Are you sure you want to delete {object_name} "
f"'{self.name}' with ID: {self._id}? [Y/N]: ")
user_input = input(message) or 'N'
if force or user_input == 'Y':
param_value_dict = auto_match_args_entity(self._API_DELETE, self)
response = self._API_DELETE(**param_value_dict)
if response.status_code == 204 and config.verbose:
print(self._delete_success_msg
or f"Successfully deleted {object_name} with ID: {self._id}.")
return response.ok
else:
return False
class CertifyMixin:
"""CertifyMixin class adds certifying and decertifying functionality.
Must be mixedin with Entity or its subclasses.
"""
def _toggle_certification(self: Entity, certify: bool = True, success_msg: str = None) -> bool:
object_name = self.__class__.__name__
expected_result = 'certified' if certify else 'decertified'
self.fetch()
if certify == self.certified_info.certified:
print(f"The {object_name} with ID: '{self._id}' is already {expected_result}")
return True
response = objects.toggle_certification(connection=self._connection, id=self._id,
object_type=self._OBJECT_TYPE.value,
certify=certify)
if response.ok and config.verbose:
self._set_object(**response.json())
print(success_msg
or f"The {object_name} with ID: '{self._id}' has been {expected_result}.")
return response.ok
def certify(self: Entity) -> bool:
"""Certify object.
Args:
success_msg: Custom message displayed on success.
Returns:
True on success, False otherwise."""
return self._toggle_certification(certify=True)
def decertify(self: Entity) -> bool:
"""Decertify object.
Args:
success_msg: Custom message displayed on success.
Returns:
True on success, False otherwise."""
return self._toggle_certification(certify=False)
class VldbMixin:
"""VLDBMixin class adds vldb management for supporting objects.
Objects currently supporting VLDB settings are dataset, document, dossier.
Must be mixedin with Entity or its subclasses.
"""
_parameter_error = "Please specify the project parameter."
def list_vldb_settings(self: Entity, project: Optional[str] = None,
application: Optional[str] = None) -> list:
"""List VLDB settings."""
if application:
helper.deprecation_warning(
'`application`',
'`project`',
'11.3.4.101', # NOSONAR
False)
project = project or application
connection = self.connection if hasattr(self, 'connection') else self._connection
if not project and connection.session.headers.get('X-MSTR-ProjectID') is None:
raise ValueError(self._parameter_error)
response = objects.get_vldb_settings(connection, self.id, self._OBJECT_TYPE.value, project)
return response.json()
def alter_vldb_settings(self: Entity, property_set_name: str, name: str, value: dict,
project: Optional[str] = None,
application: Optional[str] = None) -> None:
"""Alter VLDB settings for a given property set."""
if application:
helper.deprecation_warning(
'`application`',
'`project`',
'11.3.4.101', # NOSONAR
False)
project = project or application
connection = self.connection if hasattr(self, 'connection') else self._connection
if not project and connection.session.headers.get('X-MSTR-ProjectID') is None:
raise ValueError(self._parameter_error)
body = [{"name": name, "value": value}]
response = objects.set_vldb_settings(connection, self.id, self._OBJECT_TYPE.value,
property_set_name, body, project)
if config.verbose and response.ok:
print("VLDB settings altered")
def reset_vldb_settings(self: Entity, project: Optional[str] = None,
application: Optional[str] = None) -> None:
"""Reset VLDB settings to default values."""
if application:
helper.deprecation_warning(
'`application`',
'`project`',
'11.3.4.101', # NOSONAR
False)
project = project or application
connection = self.connection if hasattr(self, 'connection') else self._connection
if not project and connection.session.headers.get('X-MSTR-ProjectID') is None:
raise ValueError(self._parameter_error)
response = objects.delete_vldb_settings(connection, self.id, self._OBJECT_TYPE.value,
project)
if config.verbose and response.ok:
print("VLDB settings reset to default")
def auto_match_args_entity(func: Callable, obj: EntityBase, exclude: list = [],
include_defaults: bool = True) -> dict:
"""Automatically match `obj` object data to function arguments.
Handles default parameters. Extracts value from Enums. Returns matched
arguments as dict.
Args:
function: function for which args will be matched
obj: object to use for matching the function args
exclude: set `exclude` parameter to exclude specific param-value pairs
include_defaults: if `False` then values which have the same value as
default will not be included in the result
Raises:
KeyError: could not match all required arguments
"""
# convert names starting with '_'
obj_dict = {key[1:] if key.startswith("_") else key: val for key, val in obj.__dict__.items()}
kwargs = helper.auto_match_args(func, obj_dict, exclude, include_defaults)
if "object_type" in kwargs:
kwargs.update({"object_type": obj._OBJECT_TYPE.value})
return kwargs
|
python
|
from typing import NamedTuple
class User(NamedTuple):
name: str
age: int = 27
user1 = User('Jan')
print(user1)
print(dir(user1))
# user1.age = 30
# print(user1)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Sample Discourse Relation Classifier Train
Train parser for suplementary evaluation
Train should take three arguments
$inputDataset = the folder of the dataset to parse.
The folder structure is the same as in the tar file
$inputDataset/parses.json
$inputDataset/relations-no-senses.json
$inputRun = the folder that contains the word2vec_model file or other resources
$outputDir = the folder that the parser will output 'output.json' to
"""
import codecs
import json
import random
import sys
from datetime import datetime
import logging #word2vec logging
from sklearn import preprocessing
import validator
from Common_Utilities import CommonUtilities
import gensim
from gensim import corpora, models, similarities # used for word2vec
from gensim.models.word2vec import Word2Vec # used for word2vec
from gensim.models.doc2vec import Doc2Vec#used for doc2vec
import time # used for performance measuring
import math
from scipy import spatial # used for similarity calculation
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Phrases
from gensim import corpora # for dictionary
from gensim.models import LdaModel
# from sklearn.svm import libsvm
from sklearn.svm import SVC
sys.path.append('~/semanticz')
from Word2Vec_AverageVectorsUtilities import AverageVectorsUtilities
import pickle
from DiscourseSenseClassification_FeatureExtraction_v1 import DiscourseSenseClassification_FeatureExtraction
class DiscourseParser_Sup_v1(object):
"""Sample discourse relation sense classifier
"""
def __init__(self, valid_senses, input_run, input_dataset, output_dir, input_params, input_features,class_mapping
, scale_range=(-1, 1)):
self.valid_senses = valid_senses
self.input_run = input_run
self.input_dataset = input_dataset
self.output_dir = output_dir
self.input_params = input_params
self.input_features = input_features
self.class_mapping = class_mapping
self.scale_range = scale_range
pass
def train_sense(self, input_dataset, word2vec_model, save_model_file_basename, scale_features, save_scale_file_basename):
class_mapping = self.class_mapping
logging.debug(class_mapping)
word2vec_index2word_set = set(word2vec_model.index2word)
model_dir = self.input_run
relation_file = '%s/relations.json' % input_dataset # with senses to train
relation_dicts = [json.loads(x) for x in open(relation_file)]
parse_file = '%s/parses.json' % input_dataset
parse = json.load(codecs.open(parse_file, encoding='utf8'))
random.seed(10)
clf = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
train_x = []
train_y = []
for i, relation_dict in enumerate(relation_dicts):
curr_features_vec = DiscourseSenseClassification_FeatureExtraction.extract_features_as_vector_from_single_record(\
relation_dict=relation_dict,\
parse=parse,\
word2vec_model=word2vec_model,\
word2vec_index2word_set=word2vec_index2word_set)
if (i+1) % 1000 == 0:
print '%s of %s' % (i, len(relation_dicts))
logging.info('%s of %s' % (i, len(relation_dicts)))
print '%s features:%s'%(i, curr_features_vec)
curr_senseses = relation_dict['Sense'] # list of senses example: u'Sense': [u'Contingency.Cause.Reason']
# logging.debug('%s - %s'%(i, curr_senseses))
for curr_sense in curr_senseses:
if curr_sense in class_mapping:
class_idx = class_mapping[curr_sense]
train_x.append(curr_features_vec)
train_y.append(class_idx)
#else:
# logging.warn('Sense "%s" is not a valid class. Skip'%(curr_sense))
scaler = preprocessing.MinMaxScaler(self.scale_range)
if scale_features:
logging.info('Scaling %s items with %s features..' % (len(train_x),len(train_x[0])))
start = time.time()
train_x = scaler.fit_transform(train_x)
end = time.time()
logging.info("Done in %s s" % (end - start))
pickle.dump(scaler, open(save_scale_file_basename, 'wb'))
logging.info('Scale feats ranges saved to %s' % save_scale_file_basename)
else:
logging.info("No scaling!")
logging.info('Training with %s items' % len(train_x))
start = time.time()
clf.fit(train_x, train_y)
end = time.time()
logging.info("Done in %s s" % (end - start))
pickle.dump(clf, open(save_model_file_basename, 'wb'))
logging.info('Model saved to %s' % save_model_file_basename)
def classify_sense(self, input_dataset, word2vec_model, load_model_file_basename, scale_features, load_scale_file_basename, use_connectives_sim=False):
output_dir = self.output_dir
class_mapping = self.class_mapping
class_mapping_id_to_origtext = dict([(value, key) for key,value in class_mapping.iteritems()])
logging.debug('class_mapping_id_to_origtext:')
logging.debug(class_mapping_id_to_origtext)
word2vec_index2word_set = set(word2vec_model.index2word)
relation_file = '%s/relations-no-senses.json' % input_dataset
parse_file = '%s/parses.json' % input_dataset
parse = json.load(codecs.open(parse_file, encoding='utf8'))
relation_dicts = [json.loads(x) for x in open(relation_file)]
output_file = '%s/output.json' % output_dir
output = codecs.open(output_file, 'wb', encoding='utf8')
clf = SVC()
clf = pickle.load(open(load_model_file_basename, 'rb'))
if scale_features:
# scaler = preprocessing.MinMaxScaler(self.scale_range)
# scaler.transform(feats)
scaler = pickle.load(open(load_scale_file_basename, 'rb'))
logger.info('Scaling is enabled!')
else:
logger.info('NO scaling!')
for i, relation_dict in enumerate(relation_dicts):
# print relation_dict
curr_features_vec = DiscourseSenseClassification_FeatureExtraction.extract_features_as_vector_from_single_record( \
relation_dict=relation_dict, \
parse=parse, \
word2vec_model=word2vec_model, \
word2vec_index2word_set=word2vec_index2word_set)
if len(relation_dict['Connective']['TokenList']) > 0:
relation_dict['Type'] = 'Explicit'
else:
relation_dict['Type'] = 'Implicit'
#sense = valid_senses[random.randint(0, len(valid_senses) - 1)]
if scale_features:
curr_features_vec = scaler.transform([curr_features_vec])[0]
sense = clf.predict([curr_features_vec])[0]
# print 'predicted sense:%s' % sense
#TO DO classmaping id to original class mapping
sense_original = class_mapping_id_to_origtext[sense]
relation_dict['Sense'] = [sense_original]
#set output data
relation_dict['Arg1']['TokenList'] = \
[x[2] for x in relation_dict['Arg1']['TokenList']]
relation_dict['Arg2']['TokenList'] = \
[x[2] for x in relation_dict['Arg2']['TokenList']]
relation_dict['Connective']['TokenList'] = \
[x[2] for x in relation_dict['Connective']['TokenList']]
output.write(json.dumps(relation_dict) + '\n')
if (i+1) % 1000 == 0:
print '%s of %s' % (i, len(relation_dicts))
logging.info('%s of %s' % (i, len(relation_dicts)))
print '%s features:%s' % (i, curr_features_vec)
logging.info('output file written:%s' % output_file)
# Set logging info
logFormatter = logging.Formatter('%(asctime)s [%(threadName)-12.12s]: %(levelname)s : %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Enable file logging
logFileName = '%s/%s-%s.log' % ('logs', 'sup_parser_v1', '{:%Y-%m-%d-%H-%M-%S}'.format(datetime.now()))
fileHandler = logging.FileHandler(logFileName, 'wb')
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
# Enable console logging
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
# SAMPLE RUN:
# TRAIN:
# python sup_parser_v1.py en [dataset_folder_here] [model_folder_ghere] [output_dir_here] -run_name:sup_v1 -cmd:train -word2vec_model:""
#
#
if __name__ == '__main__':
language = sys.argv[1]
input_dataset = sys.argv[2]
input_run = sys.argv[3]
output_dir = sys.argv[4]
if language == 'en':
valid_senses = validator.EN_SENSES
elif language == 'zh':
valid_senses = validator.ZH_SENSES
cmd = 'train'
cmd = CommonUtilities.get_param_value("cmd", sys.argv, cmd)
logging.info('cmd:%s'%cmd)
#run name for output params
run_name = ""
run_name = CommonUtilities.get_param_value("run_name", sys.argv, run_name)
if run_name != "":
logging.info(('run_name:%s' % run_name))
else:
logging.error('Error: missing input file parameter - run_name')
quit()
# Perform scaling on the features
scale_features = False
scale_features = CommonUtilities.get_param_value_bool("scale_features", sys.argv, scale_features)
logging.info('scale_features:{0}'.format(scale_features))
#w2v/doc2vec params
# word2vec word2vec_model file
word2vec_model_file = "" # "qatarliving\\qatarliving_size400_win10_mincnt10.word2vec.bin"
word2vec_model_file = CommonUtilities.get_param_value("word2vec_model", sys.argv)
logging.info('Word2Vec File:\n\t%s' % word2vec_model_file)
# if word2vec_model_file == "":
# logging.error('Error: missing input file parameter - word2vec_model_file')
# quit()
# wordclusters_mapping_file
wordclusters_mapping_file = "" # "qatarliving\\qatarliving_size400_win10_mincnt10.word2vec.bin"
wordclusters_mapping_file = CommonUtilities.get_param_value("wordclusters_mapping_file", sys.argv)
logging.info('wordclusters_mapping_file:\n\t%s' % wordclusters_mapping_file)
doc2vec_model_file = "" # "qatarliving\\qatarliving_size400_win10_mincnt10.word2vec.bin"
doc2vec_model_file = CommonUtilities.get_param_value("doc2vec_model", sys.argv)
if doc2vec_model_file != "":
logging.info('Doc2Vec File:\n\t%s' % doc2vec_model_file)
if doc2vec_model_file == '' and word2vec_model_file == '':
logging.error('Error: missing input file parameter - either doc2vec_model_file or word2vec_model_file')
quit()
# use id for vector retrieval from doc2vec
use_id_for_vector = False
if sys.argv.count('-use_id_for_vector') > 0:
use_id_for_vector = True
logging.info('use_id_for_vector:{0}'.format(use_id_for_vector))
# load word2vec word2vec_model as binary file
word2vec_load_bin = False
word2vec_load_bin = CommonUtilities.get_param_value_bool("word2vec_load_bin", sys.argv, word2vec_load_bin)
logging.info('word2vec_load_bin:{0}'.format(word2vec_load_bin))
# Brown clusters file
brownclusters_file = ""
brownclusters_file = CommonUtilities.get_param_value("brownclusters_file", sys.argv, brownclusters_file)
logging.info('brownclusters_file:\n\t%s' % brownclusters_file)
# Load Models here
is_doc2vec_model = False
# load word2vec word2vec_model
if doc2vec_model_file != '':
model = Doc2Vec.load(doc2vec_model_file)
is_doc2vec_model = True
else:
if word2vec_load_bin:
model = Word2Vec.load_word2vec_format(word2vec_model_file, binary=False) # use this for google vectors
else:
model = Word2Vec.load(word2vec_model_file)
use_id_for_vector = use_id_for_vector and is_doc2vec_model
word2vec_num_features = len(model.syn0[0])
logging.info("Embeddings feature vectors length:%s" % word2vec_num_features)
logging.info("Model syn0 len=%d" % (len(model.syn0)))
# define classes
class_mapping = dict([(val, idx) for idx, val in enumerate(valid_senses)])
class_mapping_file = '%s/%s.classlabels' % (output_dir, run_name)
CommonUtilities.write_dictionary_to_file(class_mapping, class_mapping_file)
#RUN PARSER
parser = DiscourseParser_Sup_v1(valid_senses=valid_senses, input_run=input_run, input_dataset=input_dataset,\
output_dir=output_dir, \
input_params=None, input_features=None,\
class_mapping=class_mapping)
model_file_basename = '%s/%s_model_' % (input_run, run_name)
scale_file_basename = '%s/%s_scalerange_' % (input_run, run_name)
if cmd == 'train':
logging.info('-----------TRAIN---------------------------------')
parser.train_sense(input_dataset=input_dataset, word2vec_model=model, save_model_file_basename=model_file_basename,
scale_features=scale_features, save_scale_file_basename=scale_file_basename)
elif cmd == 'train-test':
logging.debug(class_mapping)
parser.train_sense(input_dataset=input_dataset, word2vec_model=model, save_model_file_basename=model_file_basename,
scale_features=scale_features, save_scale_file_basename=scale_file_basename)
logging.info('-------------------------------------------------------------')
parser.classify_sense(input_dataset=input_dataset, word2vec_model=model, load_model_file_basename=model_file_basename,
scale_features=scale_features, load_scale_file_basename=scale_file_basename)
elif cmd == 'test':
logging.info('-----------TEST----------------------------------')
parser.classify_sense(input_dataset=input_dataset, word2vec_model=model, load_model_file_basename=model_file_basename,
scale_features=scale_features, load_scale_file_basename=scale_file_basename)
else:
logging.error("command unknown: %s. Either -cmd:train or -cmd:test expected"%(cmd))
|
python
|
# -*- coding: utf-8 -*-
"""
Demonstrate a simple data-slicing task: given 3D data (displayed at top), select
a 2D plane and interpolate data along that plane to generate a slice image
(displayed at bottom).
"""
## Add path to library (just for examples; you do not need this)
import initExample
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
app = QtGui.QApplication([])
## Create window with two ImageView widgets
win = QtGui.QMainWindow()
win.resize(800,800)
win.setWindowTitle('pyqtgraph example: DataSlicing')
cw = QtGui.QWidget()
win.setCentralWidget(cw)
l = QtGui.QGridLayout()
cw.setLayout(l)
imv1 = pg.ImageView()
imv2 = pg.ImageView()
l.addWidget(imv1, 0, 0)
l.addWidget(imv2, 1, 0)
win.show()
roi = pg.LineSegmentROI([[10, 64], [120,64]], pen='r')
imv1.addItem(roi)
x1 = np.linspace(-30, 10, 128)[:, np.newaxis, np.newaxis]
x2 = np.linspace(-20, 20, 128)[:, np.newaxis, np.newaxis]
y = np.linspace(-30, 10, 128)[np.newaxis, :, np.newaxis]
z = np.linspace(-20, 20, 128)[np.newaxis, np.newaxis, :]
d1 = np.sqrt(x1**2 + y**2 + z**2)
d2 = 2*np.sqrt(x1[::-1]**2 + y**2 + z**2)
d3 = 4*np.sqrt(x2**2 + y[:,::-1]**2 + z**2)
data = (np.sin(d1) / d1**2) + (np.sin(d2) / d2**2) + (np.sin(d3) / d3**2)
def update():
global data, imv1, imv2
d2 = roi.getArrayRegion(data, imv1.imageItem, axes=(1,2))
imv2.setImage(d2)
roi.sigRegionChanged.connect(update)
## Display the data
imv1.setImage(data)
imv1.setHistogramRange(-0.01, 0.01)
imv1.setLevels(-0.003, 0.003)
update()
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
python
|
from itertools import combinations
from operator import mul
f = open("input.txt")
d = f.readlines()
total = 0
for p in d:
sides = [int(n) for n in p.split("x")]
combos = list(combinations(sides, 2))
areas = [ mul(*a) for a in combos]
areas.sort()
total += areas[0]
total += sum([2*a for a in areas])
print(total)
|
python
|
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.dist import Distribution
import os
from subprocess import call
from django.core.management.base import BaseCommand
DOMAINS = ['django', 'djangojs']
MODULES = ['openstack_dashboard', 'horizon']
class Command(BaseCommand):
help = ('Extract strings that have been marked for translation into .POT '
'files.')
def add_arguments(self, parser):
parser.add_argument('-m', '--module', type=str, nargs='+',
default=MODULES,
help=("The target python module(s) to extract "
"strings from. "
"Default: %s" % MODULES))
parser.add_argument('-d', '--domain', choices=DOMAINS,
nargs='+', default=DOMAINS,
metavar='DOMAIN',
help=("Domain(s) of the .pot file. "
"Default: %s" % DOMAINS))
parser.add_argument('--check-only', action='store_true',
help=("Checks that extraction works correctly, "
"then deletes the .pot file to avoid "
"polluting the source code"))
def handle(self, *args, **options):
cmd = ('python setup.py {quiet} extract_messages '
'-F babel-{domain}.cfg '
'--input-dirs {module} '
'-o {potfile}')
distribution = Distribution()
distribution.parse_config_files(distribution.find_config_files())
quiet = '-q' if int(options['verbosity']) == 0 else ''
if options['check_only']:
cmd += " ; rm {potfile}"
for module in options['module']:
for domain in options['domain']:
potfile = '{module}/locale/{domain}.pot'.format(module=module,
domain=domain)
if not os.path.exists(potfile):
with open(potfile, 'wb') as f:
f.write(b'')
call(cmd.format(module=module, domain=domain, potfile=potfile,
quiet=quiet), shell=True)
|
python
|
import numpy as np
a = np.array([0, 0.5, 1.0, 1.5, 2.0])
type(a)
a[:2] # Slicing works as for lists
# Built in methods
a.sum()
a.std()
a.cumsum()
a.max()
a.argmax()
# Careful with np.max!
np.max(2, 0)
np.max(-2, 0) # silent fail :) second argument is the axis
np.max(0, 2) # fail
np.maximum(-2, 0)
# Vectorized operations: operations are applied to each element
a*2
a**2
np.sqrt(a)
np.log2(a+1)
b = np.array([a, a*2])
b
b.sum(axis=0) # sum along axis 0 ==> columns
b.sum()
b.sum(axis=1)
eye = np.identity(4)
eye
np.ones_like(eye)
np.ones((2,3))
zeros = np.zeros((2,3,4))
zeros.shape
zeros[1]
# Optimized for speed!
import time
start = time.time()
acc = 0
for i in range(1000):
for j in range(1000):
acc += np.random.standard_normal()
end = time.time()
print("It took (ms): ", (end-start)*1000)
# Numpy outsources the loops to underlying C code for performance
# %timeit test = np.random.standard_normal((1000,1000)).sum()
# CODE VECTORIZATION
r = np.random.standard_normal((4,3))
s = np.random.standard_normal((4,3))
r+s
# Broadcasting
2*r+3 # same as 2*r+3*np.ones_like(r)
########################################
########################################
######## MOVE TO BROWSER HERE
########################################
########################################
# Functions are applied element-wise.
def f(x):
return 3*x+5
f(3)
f(r)
import math
math.sin(math.pi)
math.sin(r) # Error: this function only takes real numbers!
np.sin(r)
type(np.sin) # ufunc: universal function (works with arrays too)
|
python
|
import os
import sys
import logging
import configparser
import logging
import numpy as np
configfile_name ="NAAL_config"
logger = logging.getLogger(__name__)
if sys.platform.startswith('win'):
config_dir = os.path.expanduser(os.path.join("~", ".NAAL_FPGA"))
else:
config_dir = os.path.expanduser(os.path.join("~", ".NAAL_FPGA", "NAAL_FPGA"))
install_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
nengo_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
examples_dir = os.path.join(install_dir, "examples")
fpga_config = \
{'NAAL': os.path.join(nengo_dir, 'data', configfile_name),
'system': os.path.join(install_dir, configfile_name),
'user': os.path.join(config_dir, "fpga_config"),
'project': os.path.abspath(os.path.join(os.curdir, configfile_name))}
FPGA_CONFIG_FILES = [fpga_config['NAAL'],
fpga_config['system'],
fpga_config['user'],
fpga_config['project']]
def Is_fpgaboard(fgpaboad_name):
try :
config = configparser.ConfigParser()
config.read(FPGA_CONFIG_FILES[3])
fpga_config = config[fgpaboad_name]
except Exception as ex:
print('config_FPGA config error',ex)
exit()
return fpga_config
def config_parser_board(key,value):
config = configparser.ConfigParser()
config.read(FPGA_CONFIG_FILES[3])
try :
key_config = config[key]
except Exception as ex:
return False
return True
def config_parser(key,value):
config = configparser.ConfigParser()
config.read(FPGA_CONFIG_FILES[3])
try :
key_config = config[key]
except Exception as ex:
print('config_FPGA config error',ex)
exit()
return key_config[value]
def set_config(key,value,insert_value):
config = configparser.ConfigParser()
config.read(FPGA_CONFIG_FILES[3])
try :
key_config = config[key]
except Exception as ex:
print('config_FPGA config error',ex)
exit()
key_config[value]= str(insert_value)
with open(FPGA_CONFIG_FILES[3],'w') as f:
config.write(f)
|
python
|
import socket
import os
from dotenv import load_dotenv
import redis
from flask import Flask, Blueprint, url_for
from flask_caching import Cache
from flask_login import login_required, LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
import dash
import dash_bootstrap_components as dbc
from dask.distributed import Client, LocalCluster
from ai4good.config import FlaskConfig, ModelConfig
from ai4good.runner.facade import Facade
from ai4good.webapp.model_runner import ModelRunner, _sid
from ai4good.utils.logger_util import get_logger
cache_timeout = ModelConfig.CACHE_TIMEOUT
logger = get_logger(__name__,'DEBUG')
load_dotenv()
# register flask components
def register_flask_extensions(server):
db_sqlalchemy.init_app(server)
db_migrate.init_app(server, db_sqlalchemy)
login.init_app(server)
login.login_view = 'main.login'
def register_flask_blueprints(server):
from ai4good.webapp.authenticate.authapp import server_bp
server.register_blueprint(server_bp)
def _protect_dashviews(dashapp):
for view_func in dashapp.server.view_functions:
if view_func.startswith(dashapp.config.url_base_pathname):
dashapp.server.view_functions[view_func] = login_required(dashapp.server.view_functions[view_func])
# initialise dask distributed client
def dask_client() -> Client:
global _client
# client can only have one thread due to scipy ode solver constraints
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html
if _client is None:
if ("DASK_SCHEDULER_HOST" not in os.environ) :
logger.warn("No Dask Sceduler host specified in .env, Running Dask locally ...")
cluster = LocalCluster(n_workers=4, threads_per_worker=1)
_client = Client(cluster)
elif (os.environ.get("DASK_SCHEDULER_HOST")=="127.0.0.1") :
logger.info("Running Dask locally ...")
cluster = LocalCluster(n_workers=4, threads_per_worker=1)
_client = Client(cluster)
elif (os.environ.get("DASK_SCHEDULER_HOST")=='') :
logger.warn("No Dask Sceduler host specified in .env, Running Dask locally ...")
cluster = LocalCluster(n_workers=4, threads_per_worker=1)
_client = Client(cluster)
else :
logger.info("Running Dask Distributed using Dask Scheduler ["+os.environ.get("DASK_SCHEDULER_HOST")+"] ...")
_client = Client(os.environ.get("DASK_SCHEDULER_HOST")+":"+os.environ.get("DASK_SCHEDULER_PORT"))
return _client
# initialise flask extensions
db_sqlalchemy = SQLAlchemy()
db_migrate = Migrate()
login = LoginManager()
# create flask app
flask_app = Flask(__name__)
flask_app.config.from_object(FlaskConfig)
# register flask components
register_flask_extensions(flask_app)
register_flask_blueprints(flask_app)
# create cache
local_cache = Cache(flask_app, config={
'CACHE_TYPE': 'simple',
'CACHE_DEFAULT_TIMEOUT': cache_timeout
})
cache = Cache(flask_app, config={
'DEBUG': True,
'CACHE_DEFAULT_TIMEOUT': cache_timeout,
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': os.environ.get("REDIS_URL"),
'CACHE_KEY_PREFIX': socket.gethostname()
})
_redis = redis.Redis.from_url(os.environ.get("REDIS_URL"))
# create dash app under flask
dash_app = dash.Dash(
__name__,
server=flask_app,
#routes_pathname_prefix='/sim/',
url_base_pathname='/sim/',
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.BOOTSTRAP, '/static/css/ai4good.css']
)
dash_app.title = "AI4Good COVID-19 Model Server"
_protect_dashviews(dash_app)
# create dash auth app under flask
dash_auth_app = dash.Dash(
__name__,
server=flask_app,
url_base_pathname='/auth/',
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.BOOTSTRAP, '/static/css/ai4good.css']
)
dash_auth_app.title = "AI4Good COVID-19 Model Server authentication"
_client = None # Needs lazy init
facade = Facade.simple()
model_runner = ModelRunner(facade, _redis, dask_client, _sid)
|
python
|
# coding=utf-8
__author__ = "Dimitrios Karkalousos"
from abc import ABC
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.nn import L1Loss
from mridc.collections.common.losses.ssim import SSIMLoss
from mridc.collections.common.parts.fft import ifft2c
from mridc.collections.common.parts.utils import coil_combination
from mridc.collections.reconstruction.models.base import BaseMRIReconstructionModel, BaseSensitivityModel
from mridc.collections.reconstruction.models.unet_base.unet_block import NormUnet
from mridc.collections.reconstruction.models.varnet.vn_block import VarNetBlock
from mridc.collections.reconstruction.parts.utils import center_crop_to_smallest
from mridc.core.classes.common import typecheck
__all__ = ["VarNet"]
class VarNet(BaseMRIReconstructionModel, ABC):
"""
End-to-end Variational Network (VN) model implementation as presented in [1]_.
References
----------
.. [1] Sriram, A. et al. (2020) ‘End-to-End Variational Networks for Accelerated MRI Reconstruction’.
Available at: https://github.com/facebookresearch/fastMRI.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# init superclass
super().__init__(cfg=cfg, trainer=trainer)
cfg_dict = OmegaConf.to_container(cfg, resolve=True)
self.no_dc = cfg_dict.get("no_dc")
self.fft_type = cfg_dict.get("fft_type")
self.num_cascades = cfg_dict.get("num_cascades")
# Cascades of VN blocks
self.cascades = torch.nn.ModuleList(
[
VarNetBlock(
NormUnet(
chans=cfg_dict.get("channels"),
num_pools=cfg_dict.get("pooling_layers"),
padding_size=cfg_dict.get("padding_size"),
normalize=cfg_dict.get("normalize"),
),
fft_type=self.fft_type,
no_dc=self.no_dc,
)
for _ in range(self.num_cascades)
]
)
self.output_type = cfg_dict.get("output_type")
# Initialize the sensitivity network if use_sens_net is True
self.use_sens_net = cfg_dict.get("use_sens_net")
if self.use_sens_net:
self.sens_net = BaseSensitivityModel(
cfg_dict.get("sens_chans"),
cfg_dict.get("sens_pools"),
fft_type=self.fft_type,
mask_type=cfg_dict.get("sens_mask_type"),
normalize=cfg_dict.get("sens_normalize"),
)
# initialize weights if not using pretrained vn
# TODO if not cfg_dict.get("pretrained", False)
self.train_loss_fn = SSIMLoss() if cfg_dict.get("train_loss_fn") == "ssim" else L1Loss()
self.eval_loss_fn = SSIMLoss() if cfg_dict.get("eval_loss_fn") == "ssim" else L1Loss()
self.dc_weight = torch.nn.Parameter(torch.ones(1))
self.accumulate_estimates = False
@typecheck()
def forward(
self,
y: torch.Tensor,
sensitivity_maps: torch.Tensor,
mask: torch.Tensor,
init_pred: torch.Tensor,
target: torch.Tensor,
) -> torch.Tensor:
"""
Forward pass of the network.
Args:
y: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], masked kspace data
sensitivity_maps: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], coil sensitivity maps
mask: torch.Tensor, shape [1, 1, n_x, n_y, 1], sampling mask
init_pred: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for pred
target: torch.Tensor, shape [batch_size, n_x, n_y, 2], target data
Returns:
Final prediction of the network.
"""
sensitivity_maps = self.sens_net(y, mask) if self.use_sens_net else sensitivity_maps
estimation = y.clone()
for cascade in self.cascades:
# Forward pass through the cascades
estimation = cascade(estimation, y, sensitivity_maps, mask)
estimation = ifft2c(estimation, fft_type=self.fft_type)
estimation = coil_combination(estimation, sensitivity_maps, method=self.output_type, dim=1)
estimation = torch.view_as_complex(estimation)
_, estimation = center_crop_to_smallest(target, estimation)
return estimation
|
python
|
"""empty message
Revision ID: 073b3a3e8e58
Revises: 60c735df8d2f
Create Date: 2019-09-06 08:45:01.107447
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '073b3a3e8e58'
down_revision = '60c735df8d2f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('balance_sheets',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('start_date', sa.Date(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('start_date', 'end_date')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('balance_sheets')
# ### end Alembic commands ###
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import select
import time
import json
import requests
import yaml
import sys
import os
__PRODUCT_ID = "logslack 1.0 (c) 4k1/logslack"
def get_value(r, key, default=None):
if key in r:
if type(r[key]) is int:
return int(r[key])
else:
return str(r[key])
elif default == None:
raise
else:
return default
if __name__ == '__main__':
# Initialize
print ("[ ] " + __PRODUCT_ID)
# Check args
if (len(sys.argv) != 2):
print ("[ ] Usage: " + sys.argv[0] + " service_id");
exit(0)
service_id = sys.argv[1]
print ("[ ] Initializing for '" + sys.argv[1] + "'.");
# Load logslack.conf
yml_target = "/etc/logslack.conf"
try:
f = open(yml_target)
conf = yaml.load(f)
f.close()
except:
print ("[-] Cannot load as a yaml by '" + yml_target + "'")
exit(-1)
# Check configuration
if (service_id not in conf):
print ("[-] Unknown service_id.")
exit (-1)
if ("push" not in conf[service_id] or
"poll" not in conf[service_id] or
"rules" not in conf[service_id]):
print ("[-] Illegal yaml format.")
exit (-1)
# Check push engine
try:
eu = service_id + ".stopfile"; stopfile = get_value(conf[service_id], "stopfile")
eu = service_id + ".push.slack"; slack_key = get_value(conf[service_id]["push"], "slack")
eu = service_id + ".poll.target"; poll_target = get_value(conf[service_id]["poll"], "target")
eu = service_id + ".rules"
if ("rules" not in conf[service_id]):
raise
rules = conf[service_id]["rules"]
eu = slack_key + ".webhook"
if ("webhook" not in conf[slack_key]):
raise
eu = slack_key + ".webhook.url"; slack_url = get_value(conf[slack_key]["webhook"], "url")
eu = slack_key + ".webhook.username"; slack_username = get_value(conf[slack_key]["webhook"], "username")
eu = slack_key + ".webhook.icon_emoji"; slack_icon = get_value(conf[slack_key]["webhook"], "icon_emoji")
slackprm = conf[slack_key]["webhook"]
except:
print ("[-] Cannot load an entity of '" + eu + "'.")
exit (-1)
requests.post(slack_url, data = json.dumps({
'text': "*Start logslack for " + poll_target + ".*",
'username': slack_username,
'icon_emoji': slack_icon,
}))
cmd = ('tail -n +1 --follow=name ' + poll_target)
p = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
y = select.poll()
y.register(p.stdout, select.POLLIN)
slack_flag = 0
while True:
if (os.path.exists(stopfile)):
requests.post(slack_url, data = json.dumps({
'text': "*Stop logslack for " + poll_target + ".*",
'username': slack_username,
'icon_emoji': slack_icon,
}))
break
if y.poll(1):
da = p.stdout.readline()
else:
slack_flag = 1
if slack_flag != 1:
continue
for ar in rules:
match = get_value(ar, "match", "")
notice = get_value(ar, "notice", "")
level = get_value(ar, "level", "#dddddd")
if (match == "" or notice == ""):
continue
if (match in da):
push_data = notice
push_data = push_data.replace("{$log}", da)
push_data = push_data.replace("{$keyword}", match)
requests.post(slack_url, data = json.dumps({
'username': slack_username,
'icon_emoji': slack_icon,
'attachments': [
{
"title": "logslack detected",
"text": push_data,
"color": level,
}
]
}))
else:
time.sleep(1)
|
python
|
# Generated by Django 2.2.9 on 2020-02-02 18:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("core", "0027_auto_20200202_1822")]
operations = [
migrations.RemoveField(model_name="office", name="req_nominations"),
migrations.RemoveField(model_name="officesorderable", name="electionPage"),
]
|
python
|
import ctypes
import os
import math
from typing import Dict, List, cast
from abc import ABC, abstractclassmethod
import sdl2
from helpers import sdl, draw_texture, texture_from_bmp
import config
import entities
class Component(ABC):
"""Interface that each component must adhere to. Strictly speaking, components
in ECS are data buckets (only the __init__() part of subclasses), without
any behavior. In this simple ECS implementation, we make each component
support three events, though they sometimes have a no-op implementation."""
@abstractclassmethod
def update(cls) -> None:
"""Called every frame to update component's game state."""
raise NotImplementedError
@abstractclassmethod
def draw(cls, renderer: sdl2.render.SDL_Renderer) -> None:
"""Called every frame to draw component."""
raise NotImplementedError
@abstractclassmethod
def collision(cls, other: "entities.Entity") -> None:
"""Called every frame for component to react to collision on container
entity. other is the Entity with which we collided."""
raise NotImplementedError
class SpriteRenderer(Component):
"""Rendering a sprite is a piece of functionality shared among components."""
def __init__(self, renderer: sdl2.render.SDL_Renderer,
container: "entities.Entity", filename: str):
# Container of this component
self.container = container
# Never explicitly deallocated, but reclaimed by the operating system
# when the game exits.
self.texture = texture_from_bmp(renderer, filename)
# Dynamically determine width and height of sprite over using constants.
w = ctypes.pointer(ctypes.c_int(0))
h = ctypes.pointer(ctypes.c_int(0))
sdl(sdl2.SDL_QueryTexture(self.texture, None, None, w, h))
self.width = float(w.contents.value)
self.height = float(h.contents.value)
def draw(self, renderer: sdl2.render.SDL_Renderer) -> None:
con = self.container
draw_texture(renderer, self.texture, con.position, con.rotation)
def update(self) -> None:
pass
def collision(self, other: "entities.Entity") -> None:
pass
class Sequence():
def __init__(self, renderer: sdl2.render.SDL_Renderer, filepath: str,
sample_rate: int, loop: bool):
""" Creates a sequence from a list of files in filepath."""
self.textures: List[sdl2.render.SDL_Texture] = []
for filename in sorted(os.listdir(filepath)):
self.textures.append(
texture_from_bmp(renderer, os.path.join(filepath, filename)))
# Number of times to update current_frame per second
self.sample_rate = sample_rate
self.loop = loop
# Index into textures list
self.current_frame = 0
def current_texture(self) -> sdl2.render.SDL_Texture:
return self.textures[self.current_frame]
def next_frame(self) -> bool:
if self.current_frame == len(self.textures) - 1:
if self.loop:
self.current_frame = 0
else:
return True
else:
self.current_frame += 1
return False
class Animator(Component):
def __init__(self, container: "entities.Entity", sequences:
Dict[str, Sequence], default_sequence: str):
self.container = container
self.sequences = sequences
self.last_frame_change = sdl2.SDL_GetTicks()
self.finished = False
# Key used to index into self.sequences dictionary
self.current_animation_playing = default_sequence
def set_sequence(self, name: str) -> None:
self.current_animation_playing = name
self.last_frame_change = sdl2.SDL_GetTicks()
def draw(self, renderer: sdl2.render.SDL_Renderer) -> None:
texture = self.sequences[
self.current_animation_playing].current_texture()
con = self.container
draw_texture(renderer, texture, con.position, con.rotation)
def update(self) -> None:
sequence = self.sequences[self.current_animation_playing]
frame_interval = 1000.0 / sequence.sample_rate
if (sdl2.SDL_GetTicks() - self.last_frame_change) >= frame_interval:
self.finished = sequence.next_frame()
self.last_frame_change = sdl2.SDL_GetTicks()
def collision(self, other: "entities.Entity") -> None:
pass
class VulnerableToBullets(Component):
def __init__(self, container: "entities.Entity") -> None:
self.container = container
self.animator: Animator = cast(
Animator, container.get_component(Animator))
def draw(self, renderer: sdl2.render.SDL_Renderer) -> None:
pass
def update(self) -> None:
if self.animator.finished and self.animator.current_animation_playing == "destroy":
self.container.active = False
def collision(self, other: "entities.Entity") -> None:
if other.tag == "bullet":
self.animator.set_sequence("destroy")
class BulletMover(Component):
def __init__(self, container: "entities.Entity", speed: float):
self.container = container
self.speed = speed
def draw(self, renderer: sdl2.render.SDL_Renderer) -> None:
pass
def update(self) -> None:
# Compute how much of bullet's speed should go in x and y directions.
con = self.container
pos = con.position
pos.x += self.speed * math.cos(con.rotation) * config.delta_time
pos.y += self.speed * math.sin(con.rotation) * config.delta_time
if pos.x > config.SCREEN_WIDTH or pos.x < 0 or pos.y > config.SCREEN_HEIGHT or pos.y < 0:
con.active = False
# We know there's only ever one collision point for bullet.
con.collisions[0].center = con.position
def collision(self, other: "entities.Entity") -> None:
"""When bullet collides with enemy, make bullet invisible."""
# Deactivation of bullet on collision with other entity strictly
# speaking isn't related to the bullet movement component. Argument
# could be made that this functionality ought to be places in a separate
# component.
self.container.active = False
class KeyboardMover(Component):
def __init__(self, container: "entities.Entity", speed: float):
# Component to be applied to any element to allow it to be moved around
# based on keyboard input. Therefore we store a reference to parent
# element that this component is a part of.
self.container = container
# KeyboardMover must know container element's speed to make different
# KeyboardMovers for elements that move at different speeds.
self.speed = speed
# To detect when container element moves outside the screen we must know
# the screen's height and width. It's stored inside the element's
# SpriteRenderer which we must locate. This implies that when we attach
# a KeyboardMover component to an element, it's assumed that a
# SpriteRenderer component is also attached.
self.sprite_renderer: SpriteRenderer = cast(
SpriteRenderer, container.get_component(SpriteRenderer))
def draw(self, renderer: sdl2.render.SDL_Renderer) -> None:
pass
def update(self) -> None:
con = self.container
keys = sdl(sdl2.SDL_GetKeyboardState(None))
if keys[sdl2.SDL_SCANCODE_LEFT] == 1:
if con.position.x - self.sprite_renderer.width/2 > 0:
con.position.x -= self.speed * config.delta_time
elif keys[sdl2.SDL_SCANCODE_RIGHT] == 1:
if con.position.x + self.sprite_renderer.width/2 < config.SCREEN_WIDTH:
con.position.x += self.speed * config.delta_time
def collision(self, other: "entities.Entity") -> None:
pass
class KeyboardShooter(Component):
def __init__(self, container: "entities.Entity", cooldown: int) -> None:
self.container: entities.Entity = container
self.cooldown = cooldown
self.last_shot = 0
def draw(self, renderer: sdl2.render.SDL_Renderer) -> None:
pass
def update(self) -> None:
pos = self.container.position
keys = sdl(sdl2.SDL_GetKeyboardState(None))
if keys[sdl2.SDL_SCANCODE_SPACE] == 1:
if (sdl2.SDL_GetTicks() - self.last_shot) >= self.cooldown:
# Player has two turrets
self.shoot(pos.x + 25, pos.y - 20)
self.shoot(pos.x - 25, pos.y - 20)
self.last_shot = sdl2.SDL_GetTicks()
def shoot(self, x: float, y: float) -> None:
"""Creates a bullet at (x,y) to allow bullet to originate from left and
right gun turret instead of the player's center."""
bullet = entities.bullet_from_pool()
if bullet is not None:
bullet.active = True
bullet.position.x = x
bullet.position.y = y
bullet.rotation = 270 * (math.pi / 180) # degrees to radians
bullet.update()
def collision(self, other: "entities.Entity") -> None:
pass
|
python
|
import os
import re
import time
import random
import hashlib
from multiprocessing import cpu_count
from tandems import _tandems
try:
from urllib.parse import unquote
except ImportError:
from urlparse import unquote
UNK_ID = 0
SQL_RESERVED_WORDS = []
with open("./SQL_reserved_words.txt", "rb") as f:
for line in f:
if line[0] != bytes(b"#")[0]:
SQL_RESERVED_WORDS.append(line.replace(b"\n", b""))
def oracle_q_encoder(row):
def dashrepl(m):
if m.group(0):
return " q"+re.sub(r"[ |!|\'|\"|\;|\:|\(|\)|\{|\}]", "", m.group(0))+"q "
else:
return ""
row = re.sub(r"nq\'.*\'", dashrepl, row)
row = re.sub(r"nQ\'.*\'", dashrepl, row)
row = re.sub(r"Nq\'.*\'", dashrepl, row)
row = re.sub(r"NQ\'.*\'", dashrepl, row)
row = re.sub(r"q\'.*\'", dashrepl, row)
row = re.sub(r"Q\'.*\'", dashrepl, row)
return row
def mysql_n_encoder(row):
def dashrepl(m):
if m.group(0):
return " N"+re.sub(r"[ |!|\'|\"|\;|\:|\(|\)|\{|\}]", "", m.group(0))+"N "
else:
return ""
row = re.sub(r"[N|n]\'.*\'", dashrepl, row)
return row
def postgres_q_encoder(row):
def dashrepl(m):
if m.group(0):
return " "+re.sub(r"[ |!|\'|\"|\;|\:|\(|\)|\{|\}]", "", m.group(0))+" "
else:
return ""
row = re.sub(r"\$.*\$", dashrepl, row)
return row
def escape_byte(b):
if b in [
bytearray(b"-"),
bytearray(b"+"),
bytearray(b"."),
bytearray(b"?"),
bytearray(b"*"),
bytearray(b"}"),
bytearray(b"{"),
bytearray(b"["),
bytearray(b"]"),
bytearray(b")"),
bytearray(b"("),
bytearray(b"\\"),
bytearray(b"^"),
bytearray(b"$")]:
return b'\\'+b
else:
return b
"""
br_sub - byte range sub
Args:
ba - bytearray
repl - bytearray for replay
start_b - start byte like 0x3c
end_b - end byte like 0x3e. Have to be greather than start_b
multy_replave - True or False. If True all duplicate tokens will be replaced to one token.
return:
bytearray with repl insted byte from range from start_b to end_b
"""
def br_sub(ba, repl, start_b, end_b=None, multy_replace=True):
if end_b == None:
end_b = start_b
reg_tokens = []
for i in range(start_b, end_b+1):
reg_tokens.append(escape_byte(bytearray([i])))
if len(reg_tokens) > 1:
regex = b"|".join(reg_tokens)
if multy_replace:
regex = b"["+regex+b"]+"
else:
regex = bytes(reg_tokens[0])
if multy_replace:
regex = regex+b"+"
regex = re.compile(regex)
ba = re.sub(regex, b" "+repl+b" ", ba)
return ba
def replace_tokens(row, multy_replace=True, replace_unk=True, strict_unquote=False):
row = row.lower()
row = re.sub(re.compile(rb"\_"), b" UNDRSCR ", row)
row = br_sub(row, b"BT_CLNS", 0x3a, 0x3b, multy_replace=multy_replace)
row = br_sub(row, b"BT_CMPRSN", 0x3c, 0x3e, multy_replace=multy_replace)
row = br_sub(row, b"BT_SIGNS", 0x7f, 0x9f, multy_replace=multy_replace)
row = br_sub(row, b"BT_SIGNS", 0xa1, 0xff, multy_replace=multy_replace)
row = br_sub(row, b"BT_SPCIAL", 0x22, 0x27, multy_replace=multy_replace)
# row = br_sub(row, b"BTOKEN_FOUR", 0x41, 0x5a, multy_replace=multy_replace) # A-Z
# row = br_sub(row, b"BTOKEN_FOUR", 0x61, 0x7a, multy_replace=multy_replace) # a- z
row = br_sub(row, b"BT_BAR_BRCS", 0x7b, 0x7d, multy_replace=multy_replace)
row = br_sub(row, b"BT_PRNTHS", 0x28, 0x29, multy_replace=multy_replace)
row = re.sub(re.compile(rb"(--|#)+"), b" CMMNT_LINE ", row)
row = br_sub(row, b"BT_MATH_SIGNS", 0x2b, 0x2f, multy_replace=multy_replace)
row = br_sub(row, b"BT_BSLAH_BRCTS", 0x5b, 0x5d, multy_replace=multy_replace)
row = br_sub(row, b" ", 0x07, 0x0d, multy_replace=multy_replace)
row = br_sub(row, b" ", 0xa0, multy_replace=multy_replace)
# row = br_sub(row, b"BTOKEN_TEN", 0x20, multy_replace=multy_replace) # ' '
row = br_sub(row, b"BT_TRSH", 0x01, 0x06, multy_replace=multy_replace)
row = br_sub(row, b"BT_TRSH", 0x0e, 0x1f, multy_replace=multy_replace)
#row = br_sub(row, b"BTOKEN_ANOTHER_SEPARATORS", 0x5f, multy_replace=multy_replace)#_
row = re.sub(rb'\s{2,}', b' ', row)
row = row.decode()
if strict_unquote:
while row != unquote(row):
row = unquote(row)
row = oracle_q_encoder(row)
row = mysql_n_encoder(row)
row = postgres_q_encoder(row)
def ord_256(c):
c = ord(c)
if c > 256:
return 255
else:
return c
row = bytearray(map(ord_256, row))
tokens = [] #we are using lists to follow the order during parsing
tokens.append([re.compile(rb"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"), b"UUID"])
tokens.append([re.compile(rb"/\*\!"), b'X'])
tokens.append([re.compile(rb"\?+"), b'QUSTIN'])
tokens.append([re.compile(rb"/\*.*\*/"), b'CMMNT'])
tokens.append([re.compile(rb"/\*.*"), b'CMMNT_PART'])
tokens.append([re.compile(rb"@@?[a-zA-Z0-9]+"), b'VAR'])
tokens.append([re.compile(rb"(^|(?<=[(|)| |:|\"|\'|\[|\]|\{|\}|,|.|\?|\/|=|-]))([a-z]{1})([(|)| |:|\"|\'|\[|\]|\{|\}|,|.|\?|\/|=|-]|$)"), rb"SINGLE_CHAR \3"])
tokens.append([re.compile(rb"[0-9]+"), b'NUM'])
tokens.append([re.compile(rb"[0-9a-z]{8,}"), b'STRING'])
for token in tokens:
row = re.sub(token[0], b" "+token[1]+b" ", row)
row = br_sub(row, b"BT_NUMS", 0x30, 0x39, multy_replace=multy_replace)
if replace_unk:
row = row.split()
new_row = []
for w in row:
if (w.upper() == w) or (w.upper() in SQL_RESERVED_WORDS):
new_row.append(w)
else:
new_row.append(b'unk')
row = b" ".join(new_row)
return row
def replace_tandems(row):
return _tandems.replace_tandems(row.decode()).encode()
def norm_len(seq, max_len, UNK_ID=UNK_ID):
if len(seq) >= max_len:
return seq[:max_len]
else:
return seq+[UNK_ID]*(max_len-len(seq))
def prepare_sen(row, multy_replace, max_seq_len):
sen = replace_tokens(row)
if multy_replace:
if len(sen.split()) > max_seq_len:
sen = b" ".join(sen.split()[:max_seq_len])
sen = replace_tandems(sen)
return sen
def file_as_bytes(file):
with file:
return file.read()
def calculate_model_hash(checkpoint_file):
return hashlib.md5(file_as_bytes(open(checkpoint_file+".index", 'rb'))).hexdigest()
def batch(data, n=cpu_count()):
l = len(data)
for ndx in range(0, l, n):
yield data[ndx:min(ndx + n, l)]
|
python
|
#!/usr/bin/env python
# coding: utf-8
from typing import List
from typing import Optional
from typing import Union
import numpy as np
import pandas as pd
from dataclasses import dataclass
from sklearn import metrics
from evidently import ColumnMapping
from evidently.analyzers.base_analyzer import Analyzer
from evidently.options import QualityMetricsOptions
from evidently.analyzers.utils import process_columns
from evidently.analyzers.utils import DatasetColumns
@dataclass
class ConfusionMatrix:
labels: List[str]
values: list
@dataclass
class ClassificationPerformanceMetrics:
"""Class for performance metrics values"""
accuracy: float
precision: float
recall: float
f1: float
roc_auc: float
log_loss: float
metrics_matrix: dict
confusion_matrix: ConfusionMatrix
roc_aucs: Optional[list] = None
roc_curve: Optional[dict] = None
pr_curve: Optional[dict] = None
pr_table: Optional[Union[dict, list]] = None
@dataclass
class ProbClassificationPerformanceAnalyzerResults:
columns: DatasetColumns
quality_metrics_options: QualityMetricsOptions
reference_metrics: Optional[ClassificationPerformanceMetrics] = None
current_metrics: Optional[ClassificationPerformanceMetrics] = None
class ProbClassificationPerformanceAnalyzer(Analyzer):
@staticmethod
def get_results(analyzer_results) -> ProbClassificationPerformanceAnalyzerResults:
return analyzer_results[ProbClassificationPerformanceAnalyzer]
def calculate(self,
reference_data: pd.DataFrame,
current_data: Optional[pd.DataFrame],
column_mapping: ColumnMapping) -> ProbClassificationPerformanceAnalyzerResults:
if reference_data is None:
raise ValueError('reference_data should be present')
columns = process_columns(reference_data, column_mapping)
target_column = columns.utility_columns.target
prediction_column = columns.utility_columns.prediction
quality_metrics_options = self.options_provider.get(QualityMetricsOptions)
result = ProbClassificationPerformanceAnalyzerResults(
columns=columns,
quality_metrics_options=quality_metrics_options,
)
classification_threshold = quality_metrics_options.classification_threshold
if target_column is not None and prediction_column is not None:
reference_data.replace([np.inf, -np.inf], np.nan, inplace=True)
reference_data.dropna(axis=0, how='any', inplace=True)
binaraized_target = (reference_data[target_column].values.reshape(-1, 1) == prediction_column).astype(int)
array_prediction = reference_data[prediction_column].to_numpy()
if len(prediction_column) > 2:
prediction_ids = np.argmax(array_prediction, axis=-1)
prediction_labels = [prediction_column[x] for x in prediction_ids]
else:
maper = {True: prediction_column[0], False: prediction_column[1]}
prediction_labels = (reference_data[prediction_column[0]] >= classification_threshold).map(maper)
labels = sorted(set(reference_data[target_column]))
# calculate quality metrics
roc_auc = metrics.roc_auc_score(binaraized_target, array_prediction, average='macro')
log_loss = metrics.log_loss(binaraized_target, array_prediction)
accuracy_score = metrics.accuracy_score(reference_data[target_column], prediction_labels)
avg_precision = metrics.precision_score(reference_data[target_column], prediction_labels, average='macro')
avg_recall = metrics.recall_score(reference_data[target_column], prediction_labels, average='macro')
avg_f1 = metrics.f1_score(reference_data[target_column], prediction_labels, average='macro')
# calculate class support and metrics matrix
metrics_matrix = metrics.classification_report(
reference_data[target_column], prediction_labels, output_dict=True
)
roc_aucs = None
if len(prediction_column) > 2:
roc_aucs = metrics.roc_auc_score(binaraized_target, array_prediction, average=None).tolist()
# calculate confusion matrix
conf_matrix = metrics.confusion_matrix(reference_data[target_column], prediction_labels)
result.reference_metrics = ClassificationPerformanceMetrics(
accuracy=accuracy_score,
precision=avg_precision,
recall=avg_recall,
f1=avg_f1,
roc_auc=roc_auc,
log_loss=log_loss,
metrics_matrix=metrics_matrix,
confusion_matrix=ConfusionMatrix(labels=labels, values=conf_matrix.tolist()),
roc_aucs=roc_aucs
)
# calculate ROC and PR curves, PR table
if len(prediction_column) <= 2:
binaraized_target = pd.DataFrame(binaraized_target[:, 0])
binaraized_target.columns = ['target']
fpr, tpr, thrs = metrics.roc_curve(binaraized_target, reference_data[prediction_column[0]])
result.reference_metrics.roc_curve = {
'fpr': fpr.tolist(),
'tpr': tpr.tolist(),
'thrs': thrs.tolist()
}
pr, rcl, thrs = metrics.precision_recall_curve(binaraized_target, reference_data[prediction_column[0]])
result.reference_metrics.pr_curve = {
'pr': pr.tolist(),
'rcl': rcl.tolist(),
'thrs': thrs.tolist()
}
pr_table = []
step_size = 0.05
binded = list(zip(binaraized_target['target'].tolist(),
reference_data[prediction_column[0]].tolist()))
binded.sort(key=lambda item: item[1], reverse=True)
data_size = len(binded)
target_class_size = sum([x[0] for x in binded])
offset = max(round(data_size * step_size), 1)
for step in np.arange(offset, data_size + offset, offset):
count = min(step, data_size)
prob = round(binded[min(step, data_size - 1)][1], 2)
top = round(100.0 * min(step, data_size) / data_size, 1)
tp = sum([x[0] for x in binded[:min(step, data_size)]])
fp = count - tp
precision = round(100.0 * tp / count, 1)
recall = round(100.0 * tp / target_class_size, 1)
pr_table.append([top, int(count), prob, int(tp), int(fp), precision, recall])
result.reference_metrics.pr_table = pr_table
else:
binaraized_target = pd.DataFrame(binaraized_target)
binaraized_target.columns = prediction_column
result.reference_metrics.roc_curve = {}
result.reference_metrics.pr_curve = {}
result.reference_metrics.pr_table = {}
for label in prediction_column:
fpr, tpr, thrs = metrics.roc_curve(binaraized_target[label], reference_data[label])
result.reference_metrics.roc_curve[label] = {
'fpr': fpr.tolist(),
'tpr': tpr.tolist(),
'thrs': thrs.tolist()
}
pr, rcl, thrs = metrics.precision_recall_curve(binaraized_target[label], reference_data[label])
result.reference_metrics.pr_curve[label] = {
'pr': pr.tolist(),
'rcl': rcl.tolist(),
'thrs': thrs.tolist()
}
pr_table = []
step_size = 0.05
binded = list(zip(binaraized_target[label].tolist(),
reference_data[label].tolist()))
binded.sort(key=lambda item: item[1], reverse=True)
data_size = len(binded)
target_class_size = sum([x[0] for x in binded])
offset = max(round(data_size * step_size), 1)
for step in np.arange(offset, data_size + offset, offset):
count = min(step, data_size)
prob = round(binded[min(step, data_size - 1)][1], 2)
top = round(100.0 * min(step, data_size) / data_size, 1)
tp = sum([x[0] for x in binded[:min(step, data_size)]])
fp = count - tp
precision = round(100.0 * tp / count, 1)
recall = round(100.0 * tp / target_class_size, 1)
pr_table.append([top, int(count), prob, int(tp), int(fp), precision, recall])
result.reference_metrics.pr_table[label] = pr_table
if current_data is not None:
current_data.replace([np.inf, -np.inf], np.nan, inplace=True)
current_data.dropna(axis=0, how='any', inplace=True)
binaraized_target = (current_data[target_column].values.reshape(-1, 1) == prediction_column).astype(int)
array_prediction = current_data[prediction_column].to_numpy()
if len(prediction_column) > 2:
prediction_ids = np.argmax(array_prediction, axis=-1)
prediction_labels = [prediction_column[x] for x in prediction_ids]
else:
maper = {True: prediction_column[0], False: prediction_column[1]}
prediction_labels = (current_data[prediction_column[0]] >= classification_threshold).map(maper)
# calculate quality metrics
roc_auc = metrics.roc_auc_score(binaraized_target, array_prediction, average='macro')
log_loss = metrics.log_loss(binaraized_target, array_prediction)
accuracy_score = metrics.accuracy_score(current_data[target_column], prediction_labels)
avg_precision = metrics.precision_score(current_data[target_column], prediction_labels, average='macro')
avg_recall = metrics.recall_score(current_data[target_column], prediction_labels, average='macro')
avg_f1 = metrics.f1_score(current_data[target_column], prediction_labels, average='macro')
# calculate class support and metrics matrix
metrics_matrix = metrics.classification_report(
current_data[target_column], prediction_labels, output_dict=True
)
roc_aucs = None
if len(prediction_column) > 2:
roc_aucs = metrics.roc_auc_score(binaraized_target, array_prediction, average=None).tolist()
# calculate confusion matrix
conf_matrix = metrics.confusion_matrix(current_data[target_column], prediction_labels)
result.current_metrics = ClassificationPerformanceMetrics(
accuracy=accuracy_score,
precision=avg_precision,
recall=avg_recall,
f1=avg_f1,
roc_auc=roc_auc,
log_loss=log_loss,
metrics_matrix=metrics_matrix,
confusion_matrix=ConfusionMatrix(labels=labels, values=conf_matrix.tolist()),
roc_aucs=roc_aucs
)
# calculate ROC and PR curves, PR table
if len(prediction_column) <= 2:
binaraized_target = pd.DataFrame(binaraized_target[:, 0])
binaraized_target.columns = ['target']
fpr, tpr, thrs = metrics.roc_curve(binaraized_target, current_data[prediction_column[0]])
result.current_metrics.roc_curve = {
'fpr': fpr.tolist(),
'tpr': tpr.tolist(),
'thrs': thrs.tolist()
}
pr, rcl, thrs = metrics.precision_recall_curve(binaraized_target,
current_data[prediction_column[0]])
result.current_metrics.pr_curve = {
'pr': pr.tolist(),
'rcl': rcl.tolist(),
'thrs': thrs.tolist()
}
pr_table = []
step_size = 0.05
binded = list(zip(
binaraized_target['target'].tolist(), current_data[prediction_column[0]].tolist()
))
binded.sort(key=lambda item: item[1], reverse=True)
data_size = len(binded)
target_class_size = sum([x[0] for x in binded])
offset = max(round(data_size * step_size), 1)
for step in np.arange(offset, data_size + offset, offset):
count = min(step, data_size)
prob = round(binded[min(step, data_size - 1)][1], 2)
top = round(100.0 * min(step, data_size) / data_size, 1)
tp = sum([x[0] for x in binded[:min(step, data_size)]])
fp = count - tp
precision = round(100.0 * tp / count, 1)
recall = round(100.0 * tp / target_class_size, 1)
pr_table.append([top, int(count), prob, int(tp), int(fp), precision, recall])
result.current_metrics.pr_table = pr_table
else:
binaraized_target = pd.DataFrame(binaraized_target)
binaraized_target.columns = prediction_column
result.current_metrics.roc_curve = {}
result.current_metrics.pr_curve = {}
result.current_metrics.pr_table = {}
for label in prediction_column:
fpr, tpr, thrs = metrics.roc_curve(binaraized_target[label], current_data[label])
result.current_metrics.roc_curve[label] = {
'fpr': fpr.tolist(),
'tpr': tpr.tolist(),
'thrs': thrs.tolist()
}
pr, rcl, thrs = metrics.precision_recall_curve(binaraized_target[label], current_data[label])
result.current_metrics.pr_curve[label] = {
'pr': pr.tolist(),
'rcl': rcl.tolist(),
'thrs': thrs.tolist()
}
pr_table = []
step_size = 0.05
binded = list(zip(binaraized_target[label].tolist(),
current_data[label].tolist()))
binded.sort(key=lambda item: item[1], reverse=True)
data_size = len(binded)
target_class_size = sum([x[0] for x in binded])
offset = max(round(data_size * step_size), 1)
for step in np.arange(offset, data_size + offset, offset):
count = min(step, data_size)
prob = round(binded[min(step, data_size - 1)][1], 2)
top = round(100.0 * min(step, data_size) / data_size, 1)
tp = sum([x[0] for x in binded[:min(step, data_size)]])
fp = count - tp
precision = round(100.0 * tp / count, 1)
recall = round(100.0 * tp / target_class_size, 1)
pr_table.append([top, int(count), prob, int(tp), int(fp), precision, recall])
result.current_metrics.pr_table[label] = pr_table
return result
|
python
|
from django.shortcuts import render, redirect
from django.contrib import messages
from .forms import EveTimerForm
from .models import EveTimer, EveTimerType
from datetime import datetime, timedelta
from django.utils import timezone
from django.contrib.auth.decorators import login_required, permission_required
@login_required
@permission_required('django_eveonline_timerboard.add_evetimer', raise_exception=True)
def add_timer(request):
if request.method == "POST":
form = EveTimerForm(request.POST)
if form.is_valid():
EveTimer(
name=form.cleaned_data['name'],
timer=datetime.utcnow() + timedelta(days=form.cleaned_data['days'], hours=form.cleaned_data['hours'], minutes=form.cleaned_data['minutes'], seconds=form.cleaned_data['seconds']),
type=form.cleaned_data.get('type'),
location=form.cleaned_data['location'],
user=request.user,
).save()
messages.success(request, 'Succesfully added timer: %s' % form.cleaned_data['name'])
return redirect('django-eveonline-timerboard-view')
else:
messages.error(request, 'Failed to add timer: %s' % str(form.errors.as_text))
return redirect('django-eveonline-timerboard-view')
@login_required
@permission_required('django_eveonline_timerboard.delete_evetimer', raise_exception=True)
def remove_timer(request, pk):
EveTimer.objects.get(pk=pk).delete()
return redirect('django-eveonline-timerboard-view')
@login_required
@permission_required('django_eveonline_timerboard.view_evetimer', raise_exception=True)
def view_timerboard(request):
timers = EveTimer.objects.filter(timer__gte=timezone.now())
context = {
'timers': timers,
'types': EveTimerType.objects.all(),
'form': EveTimerForm()
}
return render(request, 'django_eveonline_timerboard/adminlte/timerboard.html', context)
|
python
|
#!/usr/bin/env python
"""
# ==============================================================================
# Author: Carlos A. Ruiz Perez
# Email: [email protected]
# Intitution: Georgia Institute of Technology
# Version: 1.0.0
# Date: Nov 13, 2020
# Description: Builds the search databases depending on the search method
# selected. Can be one of blast, sword or diamond. These must be in the PATH
# or the path must be provided.
# ==============================================================================
"""
# ==============================================================================
# Import modules
# ==============================================================================
from microbeannotator.utilities.logging import setup_logger
from pathlib import Path
from shutil import which
from sys import exit
from sys import argv
import subprocess
import argparse
# ==============================================================================
# ==============================================================================
# Initalize logger
# ==============================================================================
logger = setup_logger(__name__)
# ==============================================================================
# ==============================================================================
# Define functions
# ==============================================================================
# Function to create blast databases
def blastp_db_creator(input_directory: Path, bin_path: Path = None) -> None:
logger.info('Building blast databases')
database_files = []
if bin_path != None:
makeblasdb_call = Path(bin_path) / 'makeblastdb'
else:
makeblasdb_call = 'makeblastdb'
if which(makeblasdb_call) == None:
logger.error(
f"BLAST's binary {makeblasdb_call} not found.\n"
f"Plase make sure BLAST is installed and if possible in PATH.\n"
)
exit(1)
else:
for protein_file in Path(input_directory).iterdir():
if protein_file.suffix == '.fasta':
output_file_name = protein_file.with_suffix("")
database_files.append(output_file_name)
subprocess.call(
[makeblasdb_call, '-in', protein_file, '-dbtype', 'prot',
'-out', output_file_name])
with open(Path(input_directory)/'blast_db.list', 'w') as db_list:
for file in database_files:
db_list.write(f"{file.name}\n")
logger.info('Finished')
# Function to create diamond databases
def diamond_db_creator(input_directory, threads, bin_path=None):
logger.info('Building diamond databases')
database_files = []
if bin_path != None:
diamond_call = Path(bin_path) / 'diamond'
else:
diamond_call = 'diamond'
if which(diamond_call) == None:
logger.error(
f"Diamond's binary {diamond_call} not found.\n"
f"Plase make sure Diamond is installed and if possible in PATH.\n"
)
exit(1)
else:
for protein_file in Path(input_directory).iterdir():
if protein_file.suffix == '.fasta':
output_file_name = protein_file.with_suffix("")
final_db_name = Path(output_file_name).with_suffix('.dmnd')
database_files.append(final_db_name)
subprocess.call(
[diamond_call, 'makedb', '--in', protein_file, '-d',
output_file_name, '--threads', str(threads)])
with open(Path(input_directory)/'diamond_db.list', 'w') as db_list:
for file in database_files:
db_list.write(f"{file.name}\n")
logger.info('Finished')
# Function to create sword database
def sword_db_creator(input_directory, bin_path=None):
logger.info('Building sword databases')
logger.info('No DB needed for sword, just checking if sword is in PATH')
if bin_path != None:
sword_call = Path(bin_path) / 'sword'
else:
sword_call = 'sword'
if which(sword_call) == None:
logger.warning(
f"Sword's binary {sword_call} not found.\n"
f"Plase make sure Sword is installed and if possible in PATH.\n"
)
with open(Path(input_directory)/'sword_db.list', 'w') as db_list:
for file in Path(input_directory).iterdir():
if file.suffix == '.fasta':
db_list.write(f"{file.name}\n")
logger.info('Finished')
# ==============================================================================
# ==============================================================================
# Define main function
# ==============================================================================
def main():
# Setup parser for arguments.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=(
f"This script creates the search databases used by tools\n"
f"in MicrobeAnnotator.\n"
f"Mandatory parameters: -d [directory proteins] -m [method/tool]\n"
f"-d [database directory] -s [sqlite database]\n"
f"Optional parameters: See {argv[0]} -h"))
# Setup mandatory arguments
mandatory_arguments = parser.add_argument_group("Mandatory")
mandatory_arguments.add_argument(
'-d', '--dirprot', dest='dirprot', action='store', required=True,
help='Directory where all raw fasta files are located.')
mandatory_arguments.add_argument(
'-m', '--method', dest='method', action='store', required=True,
help='Search (and DB creation) method. One of blast, diamond or sword')
# Setup optional arguments
optional_arguments = parser.add_argument_group("Optional")
optional_arguments.add_argument(
'--bin_path', dest='bin_path', action='store', required=False,
help='Path to binary folder for selected method/tool.')
# If no arguments are provided
if len(argv) == 1:
parser.print_help()
exit(0)
arguments = parser.parse_args()
# Parse arguments
dirprot = arguments.dirprot
method = arguments.method
bin_path = arguments.bin_path
method = method.lower()
# Run functions
if method == 'blast':
blastp_db_creator(dirprot, bin_path)
elif method == 'diamond':
diamond_db_creator(dirprot, bin_path)
elif method == 'sword':
sword_db_creator(dirprot, bin_path)
else:
logger.error(
f"Search method not recognized. Must be blast, diamond, or sword.")
exit(1)
# ==============================================================================
# ==============================================================================
# Run main function
# ==============================================================================
if __name__ == "__main__":
main()
# ==============================================================================
|
python
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class SignUpForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
phone = forms.CharField(max_length=30, required=False, help_text='Optional.')
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'phone', 'email', 'password1', 'password2', )
# # from django import forms
# # from django.contrib.auth.models import User
# # from django.contrib.auth.forms import UserCreationForm
# # from accounts.models import Profile
# from django import forms
# from django.contrib.auth.forms import UserCreationForm
# from django.contrib.auth.models import User
# # class SignUpForm(UserCreationForm):
# # first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
# # last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
# # email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
# # birth_date = forms.DateField(help_text='Required. Format: YYYY-MM-DD')
# # class Meta:
# # model = User
# # fields = ('username', 'birth_date', 'first_name', 'last_name', 'email', 'password1', 'password2', )
# #
# class CustomUserCreationForm(UserCreationForm):
# class Meta(UserCreationForm.Meta):
# fields = UserCreationForm.Meta.fields + ("email",)
# # class RegisterForm(UserCreationForm):
# # # fields we want to include and customize in our form
# # first_name = forms.CharField(max_length=100,
# # required=True,
# # widget=forms.TextInput(attrs={'placeholder': 'First Name',
# # 'class': 'form-control',
# # }))
# # last_name = forms.CharField(max_length=100,
# # required=True,
# # widget=forms.TextInput(attrs={'placeholder': 'Last Name',
# # 'class': 'form-control',
# # }))
# # username = forms.CharField(max_length=100,
# # required=True,
# # widget=forms.TextInput(attrs={'placeholder': 'Username',
# # 'class': 'form-control',
# # }))
# # email = forms.EmailField(required=True,
# # widget=forms.TextInput(attrs={'placeholder': 'Email',
# # 'class': 'form-control',
# # }))
# # password1 = forms.CharField(max_length=50,
# # required=True,
# # widget=forms.PasswordInput(attrs={'placeholder': 'Password',
# # 'class': 'form-control',
# # 'data-toggle': 'password',
# # 'id': 'password',
# # }))
# # password2 = forms.CharField(max_length=50,
# # required=True,
# # widget=forms.PasswordInput(attrs={'placeholder': 'Confirm Password',
# # 'class': 'form-control',
# # 'data-toggle': 'password',
# # 'id': 'password',
# # }))
# # class Meta:
# # model = User
# # fields = ['first_name', 'last_name', 'username', 'email', 'password1', 'password2']
# # class ProfileForm(forms.ModelForm):
# # first_name = forms.CharField(max_length=255)
# # last_name = forms.CharField(max_length=255)
# # email = forms.EmailField()
# # class Meta:
# # model = Profile
# # fields = '__all__'
# # exclude = ['user']
# # def form_validation_error(form):
# # """
# # Form Validation Error
# # If any error happened in your form, this function returns the error message.
# # """
# # msg = ""
# # for field in form:
# # for error in field.errors:
# # msg += "%s: %s \\n" % (field.label if hasattr(field, 'label') else 'Error', error)
# # return msg
|
python
|
from odc.geo.data import country_geom, data_path, gbox_css, ocean_geojson, ocean_geom
def test_ocean_gjson():
g1 = ocean_geojson()
g2 = ocean_geojson()
assert g1 is g2
assert len(g1["features"]) == 2
def test_ocean_geom():
g = ocean_geom()
assert g.crs == "epsg:4326"
g = ocean_geom("epsg:3857", (-180, -80, 180, 80))
assert g.crs == "epsg:3857"
def test_country_geom():
g = country_geom("AUS")
assert g.crs == "epsg:4326"
g = country_geom("AUS", "epsg:3577")
assert g.crs == "epsg:3577"
def test_gbox_css():
assert isinstance(gbox_css(), str)
def test_data_path():
assert data_path().exists()
assert data_path("--no-such-thing--").exists() is False
|
python
|
import os
from hashkernel.bakery import CakeRole
from hashstore.bakery.lite import dal
from hashstore.bakery.lite.node import (
ServerConfigBase, GlueBase, CakeShardBase, User, UserType,
UserState, Permission, Portal, ServerKey, PermissionType as PT)
from hashstore.bakery.lite.node.blobs import BlobStore
from hashstore.utils.db import Dbf
from hashkernel.hashing import shard_name_int, SaltedSha
class CakeStore:
def __init__(self, store_dir):
self.store_dir = store_dir
self._blob_store = None
self.srvcfg_db = Dbf(
ServerConfigBase.metadata,
os.path.join(self.store_dir, 'server.db')
)
self.glue_db = Dbf(
GlueBase.metadata,
os.path.join(self.store_dir, 'glue.db')
)
self.max_shards = None
self.shards_db = None
def cake_shard_db(self, cake):
if self.max_shards is None:
self.max_shards = self.server_config().num_cake_shards
self.shards_db = [Dbf(
CakeShardBase.metadata,
os.path.join(self.store_dir,
'shard_' + shard_name_int(i) + '.db')
) for i in range(self.max_shards)]
db = self.shards_db[cake.shard_num(self.max_shards)]
if not(db.exists()):
db.ensure_db()
return db
def blob_store(self):
if self._blob_store is None:
self._blob_store = BlobStore(
os.path.join(self.store_dir, 'backend')
)
return self._blob_store
def initdb(self, external_ip, port, num_cake_shards=10):
if not os.path.exists(self.store_dir):
os.makedirs(self.store_dir)
self.srvcfg_db.ensure_db()
os.chmod(self.srvcfg_db.path, 0o600)
self.glue_db.ensure_db()
self.blob_store()
with self.srvcfg_db.session_scope() as srv_session:
skey = srv_session.query(ServerKey).one_or_none()
if skey is None:
skey = ServerKey()
skey.num_cake_shards = num_cake_shards
elif skey.num_cake_shards != num_cake_shards:
raise ValueError(
f'reshard required: '
f'{skey.num_cake_shards} != {num_cake_shards}')
skey.port = port
skey.external_ip = external_ip
srv_session.merge(skey)
with self.glue_db.session_scope() as glue_session:
make_system_user = lambda n: User(
email=f'{n}@' ,
user_type=UserType[n],
user_state=UserState.active,
passwd=SaltedSha.from_secret('*'),
full_name=f'{n} user'
)
#ensure guest
guest = dal.query_users_by_type(
glue_session,UserType.guest).one_or_none()
if guest is None:
guest = make_system_user('guest')
glue_session.add(guest)
glue_session.flush()
index_portal = guest.id.transform_portal(
role=CakeRole.NEURON)
with self.cake_shard_db(index_portal).session_scope() as \
shard_session:
shard_session.add(Portal(id=index_portal))
#ensure system
system = dal.query_users_by_type(
glue_session, UserType.system).one_or_none()
if system is None:
system = make_system_user('system')
glue_session.add(system)
glue_session.add(
Permission(permission_type=PT.Admin,
user=system))
def server_config(self):
with self.srvcfg_db.session_scope() as session:
return session.query(ServerKey).one()
|
python
|
# -*- coding: utf-8 -*-
import os, sys
import time
import pathlib
import glob
import cv2
import configparser
import copy
from datetime import datetime, timedelta
from matplotlib.lines import Line2D
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
#from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from PyQt5.QtWidgets import QMainWindow, QHeaderView, QCheckBox, QHBoxLayout, QLineEdit, QTableWidget, \
QTableWidgetItem, QDoubleSpinBox, QStyle, QSlider, QSizePolicy, QAction, QTextEdit, QMessageBox, \
QComboBox, QProgressBar, QSpinBox, QFileDialog, QTabWidget, QWidget, QLabel, QVBoxLayout, QGridLayout, \
QPushButton, QApplication, QDesktopWidget, QDialogButtonBox, QListWidget, QAbstractItemView
from PyQt5.QtGui import QIcon, QPixmap, QFont, QColor, QImage
from PyQt5.QtCore import Qt, pyqtSignal
#from PyQt5 import QtGui
from libraries import MultipleFoldersByUser, UserDialogs, Filters, helpfunctions, QuiverExportOptions, plotfunctions, OHW
from libraries.gui import tab_input, tab_motion, tab_TA, tab_quiver, tab_batch, tab_kinetics
class TableWidget(QWidget):
def __init__(self, parent):
super(QWidget, self).__init__(parent)
#read config file
self.config = helpfunctions.read_config()
self.current_ohw = OHW.OHW()
self.layout = QGridLayout(self)
self.tabs = QTabWidget()
self.layout.addWidget(self.tabs)
self.setLayout(self.layout)
self.plotted_peaks = False
self.tab_input = tab_input.TabInput(self)
self.tab_motion = tab_motion.TabMotion(self)
self.tab_kinetics = tab_kinetics.TabKinetics(self)
self.tab_quiver = tab_quiver.TabQuiver(self)
self.tab_TA = tab_TA.TabTA(self)
self.tab_batch = tab_batch.TabBatch(self)
self.tabROIs = QWidget()
self.tabs.resize(800,800)
# Add tabs
self.tabs.addTab(self.tab_input,"Video Input ")
#self.tabs.addTab(self.tabROIs, "Manage ROIs")
self.tabs.addTab(self.tab_motion,"Compute motion")
self.tabs.addTab(self.tab_kinetics,"Beating kinetics")
self.tabs.addTab(self.tab_quiver,"Heatmaps and Quiverplots")
self.tabs.addTab(self.tab_TA,"Time averaged motion")
self.tabs.addTab(self.tab_batch,"Batch analysis")
self.init_ohw()
#self.ROI_coordinates = []
#self.ROI_names = []
#self.ROI_OHWs = []
curr_date = datetime.now().date() # move updatecheck into function
last_check = datetime.strptime(self.config['UPDATE']['last_check'],"%Y-%m-%d").date()
if curr_date > last_check + timedelta(days=1): #older than a day
helpfunctions.check_update(self, self.config['UPDATE']['version'])# self needed for msgbox... get rid at some point?
self.config['UPDATE']['last_check'] = str(curr_date)
helpfunctions.save_config(self.config) # save curr_date back into config
# default values for quiver export
# self.config.getboolean(section='DEFAULT QUIVER SETTINGS', option='one_view')
self.quiver_settings = {}# self.config['DEFAULT QUIVER SETTINGS']
for item in ['one_view', 'three_views', 'show_scalebar']:
self.quiver_settings[item] = self.config.getboolean(section='DEFAULT QUIVER SETTINGS', option=item)
for item in ['quiver_density']:
self.quiver_settings[item] = self.config.getint(section='DEFAULT QUIVER SETTINGS', option=item)
for item in ['video_length']:
self.quiver_settings[item] = self.config.getfloat(section='DEFAULT QUIVER SETTINGS', option=item)
def close_Window(self):
''' called by closing event'''
self.current_ohw.save_ohw() #save on exit (MVs should be automatically saved, peaks might have changed)
def init_ohw(self):
''' init tabs to changed ohw '''
self.tab_input.init_ohw()
self.tab_motion.init_ohw()
self.tab_kinetics.init_ohw()
self.tab_TA.init_ohw()
self.tab_quiver.init_ohw()
"""
########### fill the ROI selection tab ###########
info_ROI = QTextEdit()
info_ROI.setText('In this tab you can add, edit and choose Regions of Interest.')
info_ROI.setReadOnly(True)
info_ROI.setMaximumHeight(40)
info_ROI.setMaximumWidth(800)
info_ROI.setStyleSheet("background-color: LightSkyBlue")
#Button for ROI selection
self.button_selectROI = QPushButton('Add a Region of Interest (ROI)')
self.button_selectROI.resize(self.button_selectROI.sizeHint())
self.button_selectROI.clicked.connect(self.on_selectROI)
self.button_selectROI.setEnabled(False)
#add a table to display ROIs and corresponding names
#self.ROI_tableWidget = QTableWidget()
#self.ROI_tableWidget.setRowCount(2)
#self.ROI_tableWidget.setColumnCount(2)
##titles
#header = self.ROI_tableWidget.horizontalHeader()
#header.setSectionResizeMode(QHeaderView.ResizeToContents)
#header.setSectionResizeMode(0, QHeaderView.Stretch)
#
#header_vert = self.ROI_tableWidget.verticalHeader()
#header_vert.setSectionResizeMode(QHeaderView.ResizeToContents)
#header_vert.setSectionResizeMode(0, QHeaderView.Stretch)
#header_vert.setSectionResizeMode(1, QHeaderView.Stretch)
#set exemplary item
self.ROI = QLabel()
self.ROI.setPixmap(QPixmap('icons/dummy_image.png').scaledToWidth(self.pixmap_width))
#self.ROI_tableWidget.setItem(0,0,
#QTableWidgetItem(QIcon(QPixmap('icons/dummy_image.png')), 'meee'))#.scaledToWidth(self.pixmap_width)),
#'meeee')
self.fig_ROI, self.ax_ROI = plt.subplots(1,1)
self.ax_ROI.axis('off')
self.canvas_ROI = FigureCanvas(self.fig_ROI)
#dict for ROIs
#save ROI names and corresponding OHWs!
self.ROI_Management = {}
self.ROIManagement_title = QLabel('Currently available ROIs:')
self.ROIManagement_title.setFont(QFont("Times",weight=QFont.Bold))
self.tabROIs.layout = QGridLayout(self)
self.tabROIs.layout.setSpacing(25)
self.tabROIs.layout.addWidget(info_ROI, 0,0)
self.tabROIs.layout.addWidget(self.button_selectROI, 1,0)
self.tabROIs.layout.addWidget(self.ROIManagement_title, 2,0)
self.tabROIs.layout.addWidget(QLineEdit('Example ROI'), 3,0)
self.tabROIs.layout.addWidget(self.ROI, 3,1)
self.tabROIs.setLayout(self.tabROIs.layout)
"""
"""
###############################################################################
def change_ROI_names(self, ROI_nr):
'''
emitted when name of one of the ROIs is changed by the user in one of the lineedits
Parameters:
ROI_nr index of the ROI
'''
#get the new name from the LineEdit which send the signal
new_name = self.sender().text()
self.ROI_names[ROI_nr] = new_name
#change the resultsfolder name in the corresponding ROI_OHW
self.ROI_OHWs[ROI_nr].results_folder = self.current_ohw.results_folder.joinpath(self.ROI_names[ROI_nr])
#change the items in all the comboboxes, first item is the full image
self.ekg_combobox.setItemText(ROI_nr+1, new_name)
self.advanced_combobox.setItemText(ROI_nr+1, new_name)
self.timeavg_combobox.setItemText(ROI_nr+1, new_name)
def on_chooseROI(self, current_index):
''' choose a ROI for displaying and analyzing results in beating_kinetics, heatmaps and quiverplots '''
if current_index == 0:
self.current_ohw = self.current_ohw
#self.current_ROI is specified as ROI_nr, index in self.ROI_OHWs!
else:
self.current_ROI_idx = current_index-1
self.current_ohw = self.ROI_OHWs[self.current_ROI_idx]
print(self.ROI_names[self.current_ROI_idx])
if self.sender() == self.ekg_combobox:
self.current_ohw.initialize_calculatedMVs()
self.initialize_kinetics()
elif self.sender() == self.advanced_combobox:
self.initialize_MV_graphs()
self.button_succeed_heatmaps.setStyleSheet("background-color: IndianRed")
self.button_succeed_quivers.setStyleSheet("background-color: IndianRed")
self.progressbar_heatmaps.setValue(0)
self.progressbar_quivers.setValue(0)
#elif self.sender() == self.timeavg_combobox:
# self.init_TAMotion()
def on_selectROI(self):
''' select a ROI from the first image of the rawImageStack, calculation of MVs will be performed on ROI only after this '''
widget_height = self.frameSize().height()
#take the first image of rawImageStack and scale to fit on display
img = cv2.cvtColor(self.current_ohw.rawImageStack[0], cv2.COLOR_GRAY2RGB)
hpercent = (widget_height / float(img.shape[1]))
wsize = int((float(img.shape[0]) * float(hpercent)))
image_scaled = cv2.resize(img, (wsize, widget_height))
#convert to uint8 if needed
if img.dtype != 'uint8':
image_norm = image_scaled
image_norm = cv2.normalize(image_scaled, image_norm, 0, 1, cv2.NORM_MINMAX)*255
image_scaled = image_norm.astype(np.uint8)
#open the ROI selection
r = cv2.selectROI('Press Enter to save the currently selected ROI:', image_scaled, fromCenter=False)
#transform the coordinates back to match the image of original size
r_transf = [r[idx]/hpercent for idx in range(0,len(r))]
#add the new ROI to the OHWs, the ROI_names
new_nr = len(self.ROI_names) + 1
new_name = 'ROI_{}'.format(new_nr)
self.ROI_names.append(new_name)
self.ekg_combobox.addItem(new_name)
self.advanced_combobox.addItem(new_name)
self.timeavg_combobox.addItem(new_name)
#create new OHW
self.manageROIs(r_transf)
cv2.destroyAllWindows()
self.check_scaling.setEnabled(False)
self.scaling_status = False
self.check_scaling.setEnabled(False)
self.scaling_status = False
def manageROIs(self, r):
''' Parameters: r coordinates returned by cv2.selectROI '''
self.ROI_coordinates.append(r)
self.ROI_OHWs = []
for nr_ROI in range(0, len(self.ROI_coordinates)):
#create new OHW object for each ROI
current_ROI_OHW = copy.deepcopy(self.current_ohw)
#create new subfolder for storing ROI analysis
current_ROI_OHW.results_folder = self.current_ohw.results_folder.joinpath(self.ROI_names[nr_ROI])
#mark as ROI_OHW
current_ROI_OHW.isROI_OHW = True
current_ROI_OHW.createROIImageStack(self.ROI_coordinates[nr_ROI])
#display ROI in tab
current_row = 3 + nr_ROI#len(self.ROI_coordinates)
self.display_ROI(current_ROI_OHW.ROIImageStack[0], nr_ROI, current_row)
#add OHW to list of all ROI OHWs
self.ROI_OHWs.append(current_ROI_OHW)
def perform_batchAnalysis(self, progressSignal=None):
#number of signals to be emitted
self.maxNumberSignals = self.getMaximumSignals_batch()
#internal counter of current batch signals
self.count_batch_signals = 0
print('Current folders for batch analysis:')
print(self.batch_folders)
for folder in self.batch_folders:
print('Start Analysis for folder %s:' %folder)
#### perform analysis for one folder:
current_ohw = OHW.OHW()
# create a subfolder for the results
# save_subfolder = self.results_folder_batch / folder.split('/')[-1]
save_subfolder = str(pathlib.PureWindowsPath(self.results_folder_batch)) + '/' + folder.split('/')[-1] #+ '/results'
if not os.path.exists(str(save_subfolder)):
os.makedirs(str(save_subfolder))
current_ohw.results_folder = save_subfolder
# read data
current_ohw.read_imagestack(folder)
print(' ... finished reading data.')
#progress signal for finishing reading data
if progressSignal != None:
self.count_batch_signals += 1
progressSignal.emit(self.count_batch_signals/self.maxNumberSignals)
# scale data if desired
if self.batch_scaling_status == True:
current_ohw.scale_ImageStack()
else:
# current_ohw.scale_ImageStack(current_ohw.rawImageStack.shape[0][1]) # too hacky, refactor...
if current_ohw.ROIImageStack is not None:
current_ohw.scale_ImageStack(current_ohw.ROIImageStack.shape[1], current_ohw.ROIImageStack[2])
else:
current_ohw.scale_ImageStack(current_ohw.rawImageStack.shape[1], current_ohw.rawImageStack[2])
# calculate MVs
current_ohw.calculate_MVs(blockwidth=self.blockwidth_batch, delay=self.delay_batch, max_shift=self.maxShift_batch)
print(' ... finished calculating motion vectors.')
#progress signal for finishing calc MVs
if progressSignal != None:
self.count_batch_signals += 1
progressSignal.emit(self.count_batch_signals/self.maxNumberSignals)
# plot beating kinetics
current_filename = str(save_subfolder) +'/' + 'beating_kinetics.png'
current_ohw.plot_beatingKinetics(filename=current_filename, keyword='batch')
print(' ... finished plotting beating kinetics.')
# create heatmap video if chosen by user
if self.batch_heatmap_status == True:
current_ohw.save_heatmap(subfolder=pathlib.Path(save_subfolder), keyword='batch')
print(' ... finished saving heatmaps.')
#progress signal for finishing heatmap data
if progressSignal != None:
self.count_batch_signals += 1
progressSignal.emit(self.count_batch_signals/self.maxNumberSignals)
# create quiver video if chosen by user
if self.batch_quiver_status == True:
current_ohw.save_quiver(subfolder=pathlib.Path(save_subfolder), keyword='batch')
print(' ... finished saving quivers.')
#progress signal for finishing quiver data
if progressSignal != None:
self.count_batch_signals += 1
progressSignal.emit(self.count_batch_signals/self.maxNumberSignals)
def display_ROI(self, ROI, ROI_nr, row):
fig_ROI, ax_ROI = plt.subplots(1,1)
ax_ROI.axis('off')
canvas_ROI = FigureCanvas(fig_ROI)
#create frame
frame = patches.Rectangle((0,0),ROI.shape[1],ROI.shape[0],linewidth=2,edgecolor='k',facecolor='none')
# Add the patch to the Axes
ax_ROI.add_patch(frame)
# canvas_ROI.drawRectangle([0,0, ROI.shape[1], ROI.shape[0]])
imshow_ROI = ax_ROI.imshow(ROI, cmap = 'gray', vmin = self.current_ohw.videometa["Blackval"], vmax = self.current_ohw.videometa["Whiteval"])
# #adapt size
# fig_size = plt.rcParams["figure.figsize"]
# ratio = fig_size[1]/fig_size[0]
# #change height
# fig_size[1] = 4
# fig_size[0] = fig_size[1] * ratio
canvas_ROI.draw()
current_lineedit = QLineEdit()
current_lineedit.setText(self.ROI_names[ROI_nr])
#if text is changed by user, save it to ROI_names:
current_lineedit.textEdited.connect(lambda: self.change_ROI_names(ROI_nr=ROI_nr))
self.tabROIs.layout.addWidget(current_lineedit, row, 0)
self.tabROIs.layout.addWidget(canvas_ROI,row,1)
def on_change_quiverSettings(self):
#calculate maximum video length
# del self.quiver_settings['video_length']
self.quiver_settings['video_length'] = str(
1/self.current_ohw.videometa["fps"] * self.current_ohw.absMotions.shape[0])
# open new window and let user change export settings
self.settingsWindow = QuiverExportOptions.QuiverExportOptions(prevSettings = self.quiver_settings)
self.settingsWindow.table_widget.got_settings.connect(self.save_quiver_settings)
self.settingsWindow.show()
def save_quiver_settings(self, settings):
''' receive the signals from quiver settings changed by user '''
self.quiver_settings = settings
#save quiver settings to config.ini:
#convert to string suitable for configparser
for item in ['one_view', 'three_views', 'show_scalebar']:
self.config.set("DEFAULT QUIVER SETTINGS", option=item, value=str(self.quiver_settings[item]).lower())
for item in ['quiver_density', 'video_length']:
self.config.set("DEFAULT QUIVER SETTINGS", option=item, value=str(self.quiver_settings[item]))
helpfunctions.save_config(self.config)
self.settingsWindow.close()
#self.initialize_MV_graphs()
"""
|
python
|
#!/usr/bin/python3
import asyncio
import random
import traceback
import aiomas
import hashlib
import logging
import errno
from helpers.validator import *
from helpers.chordInterval import *
from helpers.storage import Storage
from helpers.replica import Replica
from helpers.messageDefinitions import *
from jsonschema import validate, Draft3Validator
from jsonschema.exceptions import ValidationError, SchemaError
def filter_node_response(data, immediate_neighbors=False, trace_log=False):
if data is None:
return None
output = {
"node_id": data["node_id"],
"node_address": data["node_address"]
}
if immediate_neighbors and "successor" in data:
output["successor"] = filter_node_response(data["successor"])
if immediate_neighbors and "predecessor" in data:
output["predecessor"] = filter_node_response(data["predecessor"])
if trace_log:
output["trace"] = data["trace"]
return output
class InitiatorAgent(aiomas.Agent):
"""The initiator sends a *Call for Proposal (CfP)* to all bidders and
accepts the best proposal."""
async def run(self, bidder_addrs, target):
proposals = []
for addr in bidder_addrs:
# Connect to the BidderAgent
bidder_proxy = await self.container.connect(addr)
# Send a CfP to the agent.
proposal = await bidder_proxy.cfp(target)
# The reply is a list, so we need to make a "Proposal" from it:
proposal = Proposal(*proposal)
if proposal.value is not None:
proposals.append(proposal)
if proposals:
# Select the proposal that is closest to "target"
proposal_best = min(proposals, key=lambda p: (target - p.value))
# Proposal.bidder is a proxy to the respective agent. We can use
# it to send a message to it:
result = await proposal_best.bidder.accept(proposal_best.value)
proposals.remove(proposal_best)
for proposal in proposals:
# The same as before. "Proposal.bidder" is an agent proxy that
# we can use to reject the agent:
await proposal.bidder.reject(proposal.value)
return result
return None
class Node(aiomas.Agent):
"""
Node
"""
@aiomas.expose
def cfp(self, cfp):
"""Reply to a *cfp* with a "Proposal"."""
print('%s was called for proposal to %s.' % (self, cfp))
# Randomly choose "None" (no proposal) or a random number:
value = random.choice([None, random.random()])
# "self" can be sent to other agents and will be deserialized as
# a proxy to this agent:
return Proposal(bidder=self, value=value)
@aiomas.expose
def reject(self, reject):
"""Our proposal got rejected :-(."""
print('%s was rejected for proposal %.2f.' % (self, reject))
@aiomas.expose
def accept(self, accept):
"""Our proposal was the best. Do we accept this outcome?"""
print('%s was accepted for proposal %.2f.' % (self, accept))
return random.choice(['nay', 'yay'])
class Successor:
"""
List manager for successor references.
It is responsible that entries in the successor list and first finger are consistent.
"""
def __init__(self, finger_table_ref):
self.list = []
self._backup = None # List backup before ``update_others``
self.max_entries = 3
self._fingertable = finger_table_ref
def set(self, new_successor, replace_old=False):
if len(self.list) == 0:
self.list = [new_successor]
else:
self.list[0] = new_successor
# Maintain first finger to represent correct successor
self._correct_finger_table(new_successor, replace_old=replace_old)
# self._fingertable[0]["successor"] = new_successor
def get(self):
return self.list[0]
def update_others(self, successors, ignore_key=-1):
if successors:
self._backup = self.list
self.list = [self.get()] + [x for x in successors if x["node_id"] != ignore_key]
del self.list[self.max_entries:]
else:
print("[Node:update_others] Not able to update successor list based on input.")
def revert_update(self):
self.list = self._backup
def delete_first(self):
del self.list[0]
self._correct_finger_table(self.get(), replace_old=True)
def count_occurrence(self, successor):
# Increase efficiency by using collections.OrderedDict with last=False
return self.list.count(successor)
def _correct_finger_table(self, new_successor, replace_old=False, offset=0):
old_peer = self._fingertable[offset].get("successor")
self._fingertable[offset]["successor"] = new_successor
if old_peer is None or not replace_old:
return
for entry in self._fingertable[offset+1:]:
# if entry["successor"] is None:
# break
if entry and entry["successor"].get("node_id") == old_peer["node_id"]:
entry["successor"] = new_successor
else:
break
def print_list(self, pre_text="Successor list"):
print(pre_text)
print(" ID | Address")
print("----------------------")
for successor in self.list:
print("%s %s" % (str(successor["node_id"]).ljust(9), successor["node_address"]))
print("")
def __init__(self, container, node_address):
# Async RPC init
super().__init__(container, node_address)
self.node_address = node_address
self.log = logging.getLogger(__name__)
self.log.info("Node server listening on %s.", node_address)
# Node state
self.bootup_finished = False
self.activated = True
self.network_timeout = 7
self.storage = Storage()
# Wide-range Overlay network
self.fingertable = []
self.fix_interval = 4 + random.randint(0, 5)
self.fix_next = 0
# Short-range Successor list (manages finger[0] in fingertable)
self.successor = Node.Successor(self.fingertable)
@asyncio.coroutine
def _check_running_state(self):
"""
Delay operation if booting process is not finished yet.
This assures that internal data structures are not accessed before.
"""
while not self.bootup_finished:
self.log.info("Delaying request. Bootup not finished.")
yield from asyncio.sleep(1)
def as_dict(self, serialize_neighbors=False, additional_data=False):
dict_node = {
"node_id": self.id,
"node_address": self.node_address,
}
if serialize_neighbors:
dict_node["successor"] = self.successor.get()
if serialize_neighbors and self.predecessor:
dict_node["predecessor"] = self.predecessor
if additional_data:
dict_node["additional_data"] = self.additional_data
return dict_node
@staticmethod
def generate_key(address):
"""
Generates a node identifier (key) based on the network address for this instance.
:param address:
The network address as string
:return:
Generated node id
:rtype: int
"""
return int(hashlib.sha256(address.encode()).hexdigest(), 16) % CHORD_RING_SIZE
@asyncio.coroutine
def join(self, node_id=None, node_address=None, bootstrap_address=None, additional_data=None):
"""
Joins an existing Chord network or creates a new one.
It set ups all internal state variables needed for operation.
Needs to be called previously to any other function or RPC call.
:param node_id:
Optional node ID.
If not supplied, it will be generated automatically.
:param node_address:
Optional node address formatted as an aiomas agent address (IPv4 or IPv6 address)
:param bootstrap_address:
If not given, a new Chord network is created. Otherwise, the new node
will gather the required information to integrate into the Chord network.
:param additional_data:
Optional additional data as dict that is added to the trace log if a node calls
:func:`find_successor_rec` with tracing enabled.
"""
self.id = node_id or self.generate_key(self.node_address)
self.node_address = node_address or self.node_address # normally already set in __init__
self.bootstrap_address = bootstrap_address
self.predecessor = None
self.log.info("[Configuration] node_id: %d, bootstrap_node: %s", self.id, self.bootstrap_address)
self.additional_data = additional_data or {}
if self.bootstrap_address:
# Regular node joining via bootstrap node
self.__generate_fingers(None)
# Try joining later if our successor does not respond
successor = None
while True:
successor, status = yield from self.run_rpc_safe(self.bootstrap_address, "rpc_find_successor_rec",
self.fingertable[0]["start"])
if status == 0:
if successor["status"] == 0:
# Successors seems to be reachable: we can proceed
break
else:
self.log.warn("Successor node not responding.")
else:
self.log.warn("Bootstrap node not responding.")
self.log.warn("Will retry in 3 seconds.")
yield from asyncio.sleep(3)
# Proceed with a working successor
successor = filter_node_response(successor)
self.successor.set(successor)
yield from self.init_successor_list(successor)
yield from self.init_finger_table()
self.bootup_finished = True
yield from self.update_others()
else:
# This is the bootstrap node
successor_node = self.as_dict()
self.__generate_fingers(successor_node)
self.successor.set(successor_node) # bootstrap first references itself
self.bootup_finished = True
self.print_finger_table()
# if self.bootstrap_address:
# remote_peer = yield from self.container.connect(self.bootstrap_address)
# ft = yield from remote_peer.rpc_get_fingertable()
# print("Bootstrap Finger Table: ")
# self.print_finger_table(ft)
@asyncio.coroutine
def init_finger_table(self):
"""Generates a basic finger table for this node joining an existing Chord network.
"""
self.print_finger_table()
# Fix references to our direct neighbors
# This is necessary that find_successor works correctly.
yield from self.update_neighbors(initialization=True)
# Retrieve successor node for each finger 0 -> m-1 (finger 0 is already retrieved from bootstrap node)
for k in range(CHORD_FINGER_TABLE_SIZE - 1):
finger = self.fingertable[k]
finger_next = self.fingertable[k + 1]
if in_interval(finger_next["start"], self.id, finger["successor"]["node_id"], inclusive_left=True):
self.log.info("Copy previous finger: %d in between [%d, %d)",
finger_next["start"],
self.id,
finger["successor"]["node_id"])
# Reuse previous finger
finger_next["successor"] = finger["successor"]
else:
self.log.info("Exceeding our successor, need a RPC.")
# TODO: validate data
# BUG: if only 2 nodes in network, the node being responsible for the requested start ID
# is wrong because bootstrap node does not updated its table yet
finger_successor, status = yield from self.run_rpc_safe(self.bootstrap_address, "rpc_find_successor_rec",
finger_next["start"])
self.log.info("Node for %d: %s", finger_next["start"], finger_successor)
finger_next["successor"] = filter_node_response(finger_successor)
# Optimization for joining node (if not bootstrap node)
# - Find close node to myself (e.g., successor)
# - Request finger table and store temporary_entries
# - for each of my needed finger table starts, use closest entries and directly ask this node.
# - Fallback to node asked previously (or bootstrap node as last fallback) if node is not responding
def __generate_fingers(self, successor_reference):
for k in range(0, CHORD_FINGER_TABLE_SIZE):
entry = {
"start": ((self.id + 2**k) % CHORD_RING_SIZE),
"successor": successor_reference
}
# TODO: add successor if not bootstrap node
self.fingertable.append(entry)
self.log.debug("Default finger table: %s", str(self.fingertable)+"\n\n")
def print_finger_table(self, fingerTableToPrint=None):
if not fingerTableToPrint:
fingerTableToPrint = self.fingertable
print(" START | ID ")
print("-----------------------")
for tableEntry in fingerTableToPrint:
if tableEntry["successor"]:
print("%s %s" % (str(tableEntry["start"]).ljust(4), tableEntry["successor"]["node_id"]))
else:
print(str(tableEntry["start"]).ljust(4) + " - ")
if self.predecessor:
print("Predecessor ID: %d \n" % self.predecessor["node_id"])
else:
print("Predecessor ID: - \n")
@asyncio.coroutine
def init_successor_list(self, successor):
"""Fetch successor list from our immediate successor when joining a network.
"""
successor_details, status = yield from self.run_rpc_safe(successor["node_address"], "rpc_get_node_info",
successor_list=True)
self.successor.update_others(successor_details.get("successor_list"), self.id)
self.log.info("New successor list: %s", self.successor.list)
@asyncio.coroutine
def update_finger_table(self, origin_node, i):
# Do not include self.id in contrast to original paper to abort the recursive call if starting
# node tries to update itself
if in_interval(origin_node["node_id"], self.id, self.fingertable[i]["successor"]["node_id"]):
self.log.info("For finger %d: origin_node is %s; successor was %s",
i, origin_node, self.fingertable[i]["successor"]["node_id"])
self.fingertable[i]["successor"] = origin_node
# Only forward to predecessor if it is not the peer that started this update cascade
if self.predecessor["node_id"] != origin_node["node_id"]:
yield from self.run_rpc_safe(self.predecessor["node_address"],
"rpc_update_finger_table", origin_node, i)
@asyncio.coroutine
def update_neighbors(self, initialization=False):
""" Update immediate neighbors.
Update our successor's pointer to reference us as immediate predecessor
(according to default Chord specification).
Notify our direct predecessor about our presence. This allows to early stabilize
its immediate successor finger[0].
Requires that finger[0] is set properly.
"""
successor = self.successor.get()
if successor["node_id"] == self.id:
return
# Fix predecessor reference on our immediate successor
update_pred, conn_err = yield from self.run_rpc_safe(successor["node_address"], "rpc_update_predecessor",
self.as_dict())
if conn_err != 0:
# Immediate successor is not responding (should not happen as checked before)
self.log.warn("Immediate successor %s not responding.", successor)
return # TODO: better error handling
self.log.debug("Predecessor update result: %s", update_pred)
if update_pred["node_address"] == self.node_address and "old_predecessor" in update_pred:
# Successfully integrated into Chord overlay network
# Successor already references us at this point.
if initialization:
self.predecessor = filter_node_response(update_pred["old_predecessor"])
self.log.info("Set predecessor: %s", self.predecessor)
# Notify our predecessor to be aware of us (new immediate successor)
# It might already know. In that case, this call is useless.
# However, for a Chord network only consisting of one node, this is crucial that this node's
# successor references us. Only like this, the circle is closed in the forward direction.
yield from self.run_rpc_safe(self.predecessor["node_address"], "rpc_update_successor",
self.as_dict())
# Merge received key,values into own storage
print("Keys received:", update_pred.get("storage"))
self.storage.merge(update_pred.get("storage"))
# elif update_pred["node_address"] != self.node_address:
# # Fix concurrent joins in the same area:
# # Seems that our successor got a closere predecessor in the mean time.
# # We trust our original successor that it tells the truth and correct our successor reference.
# new_successor = filter_node_response(update_pred)
#
# if in_interval(new_successor["node_id"], self.id, successor["node_id"]):
# self.successor.set(new_successor)
# self.log.info("Periodic fix: updated successor reference to node %d (%s)",
# new_successor["node_id"], new_successor["node_address"])
#
# # Notify our new successor to change its predecessor reference to us
# # If this successor is still not the right one, it will be corrected in the next round.
# yield from self.run_rpc_safe(new_successor["node_address"], "rpc_update_predecessor",
# self.as_dict())
#
# else:
# self.log.warn("Could not stabilize. Our original successors sends rubbish.")
elif update_pred["node_address"] == self.node_address:
self.log.info("Predecessor and successor references ok. Nothing to do.")
else:
# Something went wrong during update. This is only relevant if it happened during startup
# of this node.
# A new node might have joined in the meantime -> TODO: update our reference or clean exit
print("[Update_neighbors] Response:", update_pred)
print("[Update_neighbors] Myself:", self.as_dict())
self.log.error("Could not update predecessor reference of our successor. Try restarting.")
@asyncio.coroutine
def update_successor(self, new_node):
"""Updates the reference to our immediate successor triggered by other peer's hint.
A neighboring successor uses this function to notify us about its presence. This ensures
that the Chord ring is correct.
The parameter ``new_node`` gives a hint about the new successor in this case. To verify
this hint, this node contacts its old successor.
:param new_node:
Successor hint.
"""
old_successor = self.successor.get()
# No other peers yet in the network -> no maintenance possible
if old_successor["node_id"] == self.id and new_node is None:
return
# New successor before old one or old one not responding anymore (last option is TODO)
if in_interval(new_node["node_id"], self.id, old_successor["node_id"]):
# Check old successor whether it already accepted new node
# TODO: validation + timeout catch
successor_view, peer_err = yield from self.run_rpc_safe(old_successor["node_address"],
"rpc_get_node_info")
if peer_err != 0:
# Immediate successor is not responding
self.log.warn("Immediate successor %s not responding.", old_successor)
return # TODO: better error handling, e.g., update on peer_err > 0
if successor_view["predecessor"]["node_address"] == new_node["node_address"]:
# Update finger table to point to new immediate successor
new_node = filter_node_response(new_node)
self.successor.set(new_node)
self.log.info("Updated successor reference to node %d (%s)",
new_node["node_id"], new_node["node_address"])
else:
# Do not update, only mention suspicious observation
self.log.error("Node %d (%s) wants to be our immediate successor, but original successor %d (%s) "
"does not reference it. Looks malicious. Or our view is not fresh anymore :(",
new_node["node_id"], new_node["node_address"],
old_successor["node_id"], old_successor["node_address"])
@asyncio.coroutine
def update_others(self):
"""Update peers' finger table that should refer to our node and notify them.
"""
for k in range(0, CHORD_FINGER_TABLE_SIZE):
id = (self.id - 2**k) % CHORD_RING_SIZE
# Find predecessor
successor = yield from self.find_successor(id, with_neighbors=True)
p = successor["predecessor"]
# In rare cases with id exactly matching the node's key, successor is more correct to reduce hops.
# Ex: 116 is looking for node 114 (finger 2), predecessor would be node 249 with successor 114
# In this case, finger in node 114 should be changed, too.
# if p["successor"]["node_id"] == id:
# p = p["successor"]
self.log.info("Update peer: %s", p)
if self.id != p["node_id"]:
yield from self.run_rpc_safe(p["node_address"], "rpc_update_finger_table",
self.as_dict(), k)
@asyncio.coroutine
def fix_finger(self, finger_id=-1):
"""
Resolves the responsible node for the given finger and updates it accordingly.
:param finger_id:
index of the finger table to update.
The value should be between 0 and length of the finger table.
"""
if not (0 <= finger_id < len(self.fingertable)):
raise IndexError("No valid finger ID.")
cur_finger = self.fingertable[finger_id]
successor = yield from self.find_successor(cur_finger["start"])
print("For start %d, successor is '%s'" % (cur_finger["start"], successor))
if successor is None:
self.log.warn("No suitable node found for start %d. Do not update finger.", cur_finger["start"])
elif successor != cur_finger["successor"]:
self.log.info("Finger %d updated: successor is now %s (old: %s)",
finger_id, successor, cur_finger["successor"])
cur_finger["successor"] = filter_node_response(successor)
# else:
# self.log.warn("Received successor for finger %d not fitting to ID ranges in finger table: %d not in [%d, %d)",
# finger_id, successor["node_id"], cur_finger["start"], next_finger["start"])
@asyncio.coroutine
def update_successor_list(self):
"""Periodically checks availability of our successor peer, maintains a list of possible successors
and swaps to another successor if the first fails.
"""
if len(self.successor.list) == 0 or self.successor.get() == self.as_dict():
return
while len(self.successor.list) > 0:
cur_successor = self.successor.get()
# Query our successor about its current successor list
successor_details, status = yield from self.run_rpc_safe(cur_successor["node_address"], "rpc_get_node_info",
successor_list=True)
if status == 0:
# TODO: filter successor_details
self.successor.print_list()
self.successor.update_others(successor_details["successor_list"], ignore_key=self.id)
# Predecessor of a successor can be missing (None)
new_successor = successor_details.get("predecessor")
print("[update_successor_list] New successor would be:", new_successor)
if new_successor and in_interval(new_successor["node_id"], self.id, cur_successor["node_id"]):
# Our successor already has a different and closer predecessor than us
new_successor, status = yield from self.run_rpc_safe(new_successor["node_address"], "rpc_get_node_info",
successor_list=True)
print("[update_successor_list] SPECIAL CASE: would move to:", new_successor)
if status == 0 and "successor_list" in new_successor:
# Linking to the new peer being our successor now.
print("update_successor_list] SPECIAL CASE: moved to new successor")
self.successor.set(filter_node_response(new_successor))
self.successor.update_others(new_successor["successor_list"], ignore_key=self.id)
# Successor view must contain at least our previous successor in its list.
# Otherwise, this peer seems to behave strange
if self.successor.count_occurrence(cur_successor) == 0:
self.log.warn("Reverting successor list as new successor does not include previous one. "
"Looks suspicious to me.")
self.successor.revert_update()
# Notify our successor here to accelerate the stabilization
yield from self.update_neighbors()
break
else:
# Try next successor as current one does not respond appropriate
self.log.info("Successor ID %d not responding. Trying next.", self.successor.get()["node_id"])
if len(self.successor.list) > 1:
self.successor.delete_first()
else:
self.log.warn("No evidence of any other peers alive. Going over to act as bootstrap for others")
self.successor.set(self.as_dict())
@asyncio.coroutine
def check_predecessor(self):
"""Verifies this node's immediate predecessor's live.
If it is lost, remove reference to give new nodes a chance to repair it.
"""
if self.predecessor is None or self.predecessor["node_id"] == self.id:
return
predecessor, status = yield from self.run_rpc_safe(self.predecessor["node_address"],
"rpc_get_node_info")
print("[check_predecessor] Connected to pred: %s" % predecessor)
print("[check_predecessor] Previous pred was: %s" % self.predecessor)
if status != 0 or \
(status == 0 and predecessor["successor"]["node_address"] != self.node_address):
# Predecessor not reachable anymore or our predecessor does not reference us -> Clean up.
self.predecessor = None
self.log.warn("Removing invalid predecessor reference.")
@asyncio.coroutine
def find_successor(self, node_id, with_neighbors=False):
"""Wrapper for :func:`find_successor_rec` to clean responses.
:param node_id:
Key ``node_id`` whose responsible successor is interesting.
:param with_neighbors:
If ``True``, the immediate successor and predecessor nodes augment the result of
the responsible successor.
:return:
Responsible successor node for given key ``node_id``.
:rtype: dict or None
"""
result = yield from self.find_successor_rec(node_id, with_neighbors=with_neighbors)
# Check for problems during lookup
if "status" in result and result["status"] != 0:
self.log.warn("Could not resolve responsible peer. Err: %s", result)
result = None
result = filter_node_response(result, immediate_neighbors=with_neighbors)
return result
@asyncio.coroutine
def find_successor_trace(self, node_id):
"""Wrapper for :func:`find_successor_rec` with trace log enabled for intermediate hops.
:param node_id:
Key ``node_id`` whose responsible successor is interesting.
:return:
Responsible successor node for given key ``node_id``.
:rtype: dict or None
"""
result = yield from self.find_successor_rec(node_id, tracing=True)
result = filter_node_response(result, trace_log=True)
return result
@asyncio.coroutine
def find_successor_rec(self, node_id, with_neighbors=False, tracing=False):
"""Recursively locate the responsible node for a given ``node_id`` (key).
This function is the heart of the Chord DHT.
It is used locally and by remote peers.
:param node_id:
Key ``node_id`` whose responsible successor is interesting.
:param with_neighbors:
If ``True``, the immediate successor and predecessor nodes augment the result of
the responsible successor.
This is useful if the predecessor of the responsible node is needed.
:return:
Responsible successor node for given key ``node_id``.
"""
successor = self.successor.get()
if in_interval(node_id, self.id, successor["node_id"], inclusive_right=True):
# Check live of successor node and augment its information with successor and predecessor links
# if required
successor_details = successor.copy()
successor_neighborhood, status = yield from self.run_rpc_safe(successor["node_address"], "rpc_get_node_info",
additional_data=tracing)
if status == 0:
# Successor node is alive
if with_neighbors:
successor_details.update(filter_node_response(successor_neighborhood, immediate_neighbors=True))
successor_details["status"] = 0
else:
# Successor node is dead
successor_details.update({"status": 1, "message": "last hop not responding"})
# Add list for tracing
if tracing:
last_hop = successor.copy()
last_hop.update({"additional_data": successor_neighborhood.get("additional_data", {}) if successor_neighborhood else {}})
successor_details["trace"] = [last_hop]
# Include our own additional data to be integrated by our preceding hop
successor_details["additional_data"] = self.additional_data
return successor_details
else:
# Find closest finger to node_id and forward recursive query.
# If the current finger's node does not respond, try a less optimal one -> requires more hops.
# TODO: remember faulty nodes and replace if it happens too often
this_node = self.as_dict()
i = 1
next_hop = self.get_closest_preceding_finger(node_id, fall_back=0)
while next_hop != this_node:
print("[find_successor_rec] Closest finger node for %d: %s" % (node_id, next_hop))
# TODO: validate and check for None
peer_data, status = yield from self.run_rpc_safe(next_hop["node_address"], "rpc_find_successor_rec",
node_id, with_neighbors=with_neighbors, tracing=tracing)
if status == 0:
print("[find_successor_rec] Remote result for id %d: %s" % (node_id, peer_data))
# Tracing
# If the recursion tree is built completely, the touched peers are inserted in a trace list on
# the way back.
# The preceding node inserts its next hop in the trace. This provides a basic protection that a
# malicious node cannot prevent being visible in the list.
# Regarding the order, the goal peer is at position 0 in the list and the first hop from the sender
# is at the last position n-1 (n describes all involved nodes).
if tracing:
if peer_data is None:
peer_data = {"status": 1, "message": "trace incomplete."}
successor_node = next_hop.copy()
successor_node["additional_data"] = peer_data["additional_data"]
peer_data["trace"].append(successor_node)
return peer_data
print("[find_successor_rec] Remote id %d with '%s' failed. Try next [%d]." %
(next_hop["node_id"], next_hop["node_address"], i))
next_hop = self.get_closest_preceding_finger(node_id, fall_back=i)
i += 1
# Already reached end of unique peers in our finger table: we are isolated right now
self.log.info("No suitable alternatives as next hop.")
return {"status": 1, "message": "no suitable alternatives found, giving up."}
# @asyncio.coroutine
# def find_successor(self, node_id):
# """ Iterative find successor
#
# .. warning::
# Deprecated: use recursive :func:`find_successor_rec` instead.
# """
# successor = self.fingertable[0]["successor"]
# if in_interval(node_id, self.id, successor["node_id"], inclusive_right=True):
# return successor
# else:
# node = yield from self.find_predecessor(node_id)
# print("[find_successor_rec] Calculated node for %d: %s" % (node_id, node))
# return node["successor"] # Attention: relies on available successor information which has to be
# # retrieved by closest_preceding_finger()
#
# @asyncio.coroutine
# def find_predecessor(self, node_id):
# """Find predecessor
#
# .. warning::
# Deprecated: use :func:`find_successor_rec` instead. It also contains a reference to the node's predecessor.
# """
# selected_node = self.as_dict(serialize_neighbors=True)
# previous_selected_node = None
#
# while not in_interval(node_id, selected_node["node_id"], selected_node["successor"]["node_id"], inclusive_right=True):
# self.log.info("Node ID %d not in interval (%d, %d]",
# node_id,
# selected_node["node_id"],
# selected_node["successor"]["node_id"])
# if selected_node["node_id"] == self.id:
# # Typically in first round: use our finger table to locate close peer
# print("Looking for predecessor of %d in first round." % node_id)
# selected_node = yield from self.get_closest_preceding_finger(node_id)
#
# print("Closest finger: %s" % selected_node)
# # If still our self, we do not know closer peer and should stop searching
# # if selected_node["node_id"] == self.id:
# # break
#
# else:
# # For all other remote peers, we have to do a RPC here
# self.log.debug("Starting remote call.")
# peer = yield from self.container.connect(selected_node["node_address"])
# selected_node = yield from peer.rpc_get_closest_preceding_finger(node_id)
# # TODO: validate received input before continuing the loop
# self.log.info("Remote closest node for ID %d: %s", node_id, str(selected_node))
#
# # Detect loop without progress
# if previous_selected_node == selected_node:
# self.log.error("No progress while looking for node closer to ID %d than node %s", node_id, selected_node)
# raise aiomas.RemoteException("Remote peer did not return more closer node to given Id " + str(node_id), "")
# previous_selected_node = selected_node
#
# return selected_node
def get_closest_preceding_finger(self, node_id, fall_back=0, start_offset=CHORD_FINGER_TABLE_SIZE-1):
"""
Find closest preceding finger within m -> 0 fingers.
:param node_id:
node ID as an integer.
:param fall_back:
chooses less optimal finger nodes if value increases.
This allows to find a slower, but still working lookup although the best matching finger
is not responding anymore.
In the worst case, this function falls back to this node itself. For example, this is the
case if our immediate successor is responsible for all of our fingers, but does not respond
to requests done previously.
:return:
returns the interesting node descriptor as a dictionary with successor and predecessor.
:rtype: dict
"""
prev_successor = None
for k in range(start_offset, -1, -1):
finger = self.fingertable[k]
finger_successor = self.fingertable[k]["successor"]
self.log.debug("Iterate finger %d: %d in %s", k, node_id, self.fingertable[k])
# Alternative: find entry with node_id > finger["start"] and already contact this node.
# In all cases, it will fall back to a less optimal predecessor if this node does not respond.
# Advantage: can reduce hops to a destination and is more stable in our 8-bit fingers test environment.
# if in_interval(finger["start"], self.id, node_id, inclusive_right=True):
if in_interval(finger_successor["node_id"], self.id, node_id):
if fall_back == 0:
return finger_successor
else:
if prev_successor is not None and prev_successor != finger_successor["node_address"]:
fall_back -= 1
prev_successor = finger_successor["node_address"]
continue
return self.as_dict()
@asyncio.coroutine
def stabilize(self):
"""
Stabilize routine
"""
yield from self._check_running_state()
while self.activated:
yield from asyncio.sleep(self.fix_interval)
self.log.info("Running periodic fix up.")
print("Current finger table:")
self.print_finger_table()
self.log.info("[This node] %s", self.as_dict())
print("Stored entries: ", len(self.storage.data))
# Assure that successor still references us as immediate predecessor
yield from self.update_successor_list()
# yield from self.update_neighbors() # called in update_successor_list
# Update fingers 1 -> m one after each other (finger[0] managed by update_successor)
self.fix_next = max(1, (self.fix_next + 1) % CHORD_FINGER_TABLE_SIZE)
yield from self.fix_finger(self.fix_next)
# Check predecessor and remove reference if wrong
yield from self.check_predecessor()
@asyncio.coroutine
def put_data(self, key, data, ttl, replication_count=-1):
replica = Replica(CHORD_RING_SIZE)
keys = replica.get_key_list(key, replicationCount=replication_count)
print("\n\n\n\nPUT KEYS ARE ", keys) # [197, 210, 70]
successes = 0
for keyWithReplicaIndex in keys:
storage_node = yield from self.find_successor(keyWithReplicaIndex)
print("Found successor for storage: ", storage_node)
if storage_node["node_id"] == self.id:
self.storage.put(keyWithReplicaIndex, data, ttl=ttl)
successes += 1
else:
# Directly connect to remote peer and store it there
# TODO: validate
result, status = yield from self.run_rpc_safe(storage_node["node_address"],
"rpc_dht_put_data", keyWithReplicaIndex, data, ttl)
if result["status"] == 0:
successes += 1
else:
pass # TODO: FAIL MESSAGE
print("\n\n\n\PUTS OK: ", successes)
if successes >= 1:
return {
"status": 0,
"successes": successes
}
else:
return {
"status": 1,
"successes": successes,
"message": "Data could not be saved."
}
@asyncio.coroutine
def get_data(self, key, replication_count=-1):
replica = Replica(CHORD_RING_SIZE)
keys = replica.get_key_list(key, replicationCount=replication_count) # 3 is the replications that are tried before abort
for keyWithReplicaIndex in keys:
storage_node = yield from self.find_successor(keyWithReplicaIndex)
print("got storage_node:", storage_node)
if storage_node.get("node_id") == self.id:
# Note the case that this node received the responsibility for a failed node.
# Given that the missing data might not be available on this node, continue the replica loop.
result = self.rpc_dht_get_data(keyWithReplicaIndex)
print("[rpc_dht_get_data] Result is:", result)
if result["status"] == 0:
return result
else:
# Directly connect to remote peer and fetch data from there
# TODO: validate
result, status = yield from self.run_rpc_safe(storage_node.get("node_address"),
"rpc_dht_get_data", keyWithReplicaIndex)
if status == 0 and result["status"] == 0:
return result
else:
print("result ERROR", result)
# Lookup was not successful. Try locating other replica.
return {"status": 1, "data": []}
@asyncio.coroutine
def get_trace(self, key):
"""Information about the hops involved in the path for the lookup of the given ``key``.
The list is in reverse order:
The target peer is at index 0. The node that started the request, is at the last position.
:param key:
Node ID to lookup.
:return:
Array with dicts containing the address information of all involved hops.
"""
nodes = yield from self.find_successor_trace(key)
print("Get_trace result:", nodes)
trace_list = nodes["trace"]
# Add our self as last hop to the list
trace_list.append(self.as_dict(additional_data=True))
for hop_index, node in enumerate(trace_list):
print("Hop %d : node %s" % (len(trace_list) - hop_index - 1, node))
return trace_list
##########################################################################
### RPC wrappers and functions for maintaining Chord's network overlay ###
@asyncio.coroutine
def run_rpc_safe(self, remote_address, func_name, *args, **kwargs):
if remote_address is None or func_name is None:
return None, errno.EINVAL
data = None
err = 1
try:
fut_peer = self.container.connect(remote_address)
remote_peer = yield from asyncio.wait_for(fut_peer, timeout=self.network_timeout)
# Invoke remote function
data = yield from getattr(remote_peer, func_name)(*args, **kwargs)
# Validate schema
validate(data, SCHEMA_OUTGOING_RPC[func_name])
err = 0
except (asyncio.TimeoutError, asyncio.CancelledError):
err = errno.ETIMEDOUT
self.log.warn("AsyncIO error: connection timed out to remote peer %s", remote_address)
except TimeoutError:
err = errno.ETIMEDOUT
self.log.warn("Connection timed out to remote peer %s", remote_address)
except ConnectionRefusedError:
err = errno.ECONNREFUSED
self.log.warn("Connection refused by remote peer %s", remote_address)
except ConnectionError:
# Base for connection related issues
err = errno.ECOMM
self.log.warn("Error connecting to %s", remote_address)
except ValidationError as ex:
err = 2
self.log.error("Validation error: %s", str(ex))
data = None
except SchemaError as ex:
err = 1
data = None
self.log.error("Schema validation error: %s", str(ex))
except Exception as ex:
err = 1
data = None
self.log.error("Unhandled error during RPC function %s to %s: %s", func_name, remote_address, ex)
traceback.print_exc()
return data, err
@aiomas.expose
def rpc_get_node_info(self, successor_list=False, additional_data=False):
node_info = self.as_dict(serialize_neighbors=True, additional_data=additional_data)
if successor_list:
node_info["successor_list"] = self.successor.list
return node_info
# @aiomas.expose
# def rpc_get_fingertable(self):
# # Check: might be critical to be published completely
# return self.fingertable
@aiomas.expose
def rpc_update_predecessor(self, remote_node):
yield from self._check_running_state()
if not isinstance(remote_node, dict):
raise TypeError('Invalid type in argument.')
remote_id = remote_node["node_id"]
# TODO: connect old predecessor if new node ID is not closer to us
if self.predecessor is None or in_interval(remote_id, self.predecessor["node_id"], self.id):
# If this is a bootstrap node and this is the first node joining,
# set predecessor of new node to us. Like this, the ring topology is properly maintained
old_predecessor = self.predecessor or self.as_dict()
self.predecessor = filter_node_response(remote_node)
self.log.info("Predecessor now links to requester %s (old: %s)", remote_node, old_predecessor)
res = self.predecessor.copy()
res["old_predecessor"] = old_predecessor
# Get storage between old and new node. Delete data from our node.
res["storage"] = self.storage.get_storage_data_between(old_predecessor["node_id"], remote_id)
self.storage.delete_storage_data_between(old_predecessor["node_id"], remote_id)
return res
else:
# No change for this node's predecessor, because it is closer to our node than the asking peer.
# Its live is checked periodically by ``check_predecessor``.
return self.predecessor
@aiomas.expose
def rpc_update_successor(self, node_hint):
yield from self._check_running_state()
if not isinstance(node_hint, dict):
raise TypeError('Invalid type in argument.')
yield from self.update_successor(node_hint)
@aiomas.expose
def rpc_update_finger_table(self, origin_node, i):
yield from self._check_running_state()
origin_node = filter_node_response(origin_node)
validate(origin_node, SCHEMA_INCOMING_RPC["rpc_update_finger_table"])
i = i % CHORD_RING_SIZE
yield from self.update_finger_table(origin_node, i)
return {"status": 0}
@aiomas.expose
def rpc_find_successor_rec(self, node_id, with_neighbors=False, tracing=False):
yield from self._check_running_state()
# TODO: validate params to prevent attacks!
res = yield from self.find_successor_rec(node_id, with_neighbors=with_neighbors, tracing=tracing)
return res
### RPC Data storage ###
@aiomas.expose
def rpc_dht_put_data(self, key, data, ttl):
# TODO: validate
if in_interval(key, self.predecessor["node_id"], self.id, inclusive_right=True):
self.storage.put(key, data, ttl=ttl)
return {
"status": 0
}
else:
self.log.warn("This node %d is not responsible for storing data with key %d.",
self.id, key)
return {
"status": 1,
"message": "not responsible"
}
@aiomas.expose
def rpc_dht_get_data(self, key):
if in_interval(key, self.predecessor["node_id"], self.id, inclusive_right=True):
data = self.storage.get(key)
status = 0 if len(data) > 0 else 1
return {
"status": status,
"data": data
}
else:
return {
"status": 1
}
### RPC tests ###
@asyncio.coroutine
def test_get_node_id(self, addr):
# RPC to remote node
remote_agent = yield from self.container.connect(addr)
id = yield from remote_agent.get_node_id()
print("%s got answer from %s: ID is %d" % (self.node_address, addr, id))
@asyncio.coroutine
def test_get_closest_preceding_finger(self, addr, node_id):
# RPC to remote node
remote_agent = yield from self.container.connect(addr)
res_node = yield from remote_agent.rpc_get_closest_preceding_finger(node_id)
print("%s got answer from %s: closest node is %s" % (self.node_address, addr, str(res_node)))
@asyncio.coroutine
def test_find_my_successor(self, addr):
# RPC to remote node
remote_agent = yield from self.container.connect(addr)
res_node = yield from remote_agent.rpc_find_successor_rec(self.id + 1)
print("%s got answer from %s: my successor is %s" % (self.node_address, addr, str(res_node)))
@asyncio.coroutine
def test_stresstest(self, message):
message = "ok"
# force some exceptions
try:
yield from self.rpc_update_predecessor({"node_id": 213}) # missing node address
except Exception as e:
message = str(e)
return message.encode("utf-8")
# if message == TESTMESSAGES_MESSAGE_FAKE_WRONGVALUE:
# return "test wrong value ok".encode("utf-8")
# if message == TESTMESSAGES_MESSAGE_FAKE_MISSINGVALUE:
# return "test missing value ok".encode("utf-8")
|
python
|
from Metodos import pontofixo
from prettytable import PrettyTable
from mpmath import *
import random
import matplotlib.pyplot as plt
valerror1 = []
valerror2 = []
valerror3 = []
valerrors = [valerror1,valerror2,valerror3]
i = 20
val = 2
def f(x):
return (x-1)*exp(x-2)**2 - 1
def g1(x):
return (x - (f(x)/10))
def g2(x):
return x-(f(x)/2)
def g3(x):
return x - (f(x)/1000)
gs = [g1,g2,g3]
erro=1e-6
max_it=1000000000000
tabela = PrettyTable(["Dado Inicial","xbarra","fi(x)","Erro","Número de Iterações"])
def pontofixo(f,val,eps,max_i,valerror):
i=0
x = val
old = None
valores = []
while(i < max_i and old != x):
old = x
x=float(x - f(x))
i+=1
valerror.append(abs(old-x))
if(abs(x)>1e+20):
return None
if(abs(old-x)<eps):
x = random.uniform(old,x)
valores.append(old)
valores.append(x)
valores.append(i)
return valores
for i in range(3):
pf = pontofixo(gs[i],val,erro,max_it,valerrors[i])
if(pf == None):
tabela.add_row([val,"-",i+1,"-","-"])
else:
tabela.add_row([val,pf[1],i+1,(abs(pf[0]-pf[1])),pf[2]])
print(tabela)
plt.plot(valerror1, color="#FF0080")
plt.plot(valerror2, color="#008080")
plt.plot(valerror3, color="#FF00FF")
plt.show()
|
python
|
import logging
from typing import Callable, Sequence, Set
from . import formats
logger = logging.getLogger(__name__)
# type aliases
FormatterType = Callable[[bool], formats.IFormatter]
def negotiate(accepts_headers: Sequence[str]) -> FormatterType:
"""Negotiate a response format by scanning through a list of ACCEPTS
headers and selecting the most efficient format.
The formatter returned by this function is used to render a response.
:param accepts_headers: a list of ACCEPT headers fields extracted from a request.
:returns: a formatter class to form up the response into the
appropriate representation.
"""
accepts = parse_accepts(accepts_headers)
formatter = formats.text.TextFormatter # type: formats.text.FormatterType
if formats.binary is not None:
if formats.binary.BINARY_ACCEPTS.issubset(accepts):
formatter = formats.binary.BinaryFormatter # type: ignore
logger.debug(f"negotiating {accepts} resulted in choosing {formatter.__name__}")
return formatter
def parse_accepts(accept_headers: Sequence[str]) -> Set[str]:
""" Return a sequence of accepts items in the request headers """
accepts = set() # type: Set[str]
for accept_items in accept_headers:
if ";" in accept_items:
accept_items = [i.strip() for i in accept_items.split(";")]
else:
accept_items = [accept_items]
accepts.update(accept_items)
return accepts
|
python
|
import redis
redis_db = redis.StrictRedis(host="redis", port=6379, db=0)
print(redis_db.keys())
redis_db.set('CT', 'Connecticut')
redis_db.set('OH', 'Ohio')
print(redis_db.keys())
ctstr = redis_db.get("CT").decode("utf-8")
print(ctstr)
print( "Good bye!" )
|
python
|
from django.test import TestCase
from .models import Category
# Create your tests here.
# category models test
class CategoryTestCase(TestCase):
def setUp(self):
"""
Create a category for testing
"""
Category.objects.create(name="Test Category")
def test_category_name(self):
"""
Test that the category name is correct
"""
category = Category.objects.get(name="Test Category")
self.assertEqual(category.name, "Test Category")
def test_category_str(self):
"""
Test that the category string representation is correct
"""
category = Category.objects.get(name="Test Category")
self.assertEqual(str(category), "Test Category")
|
python
|
from logging import ERROR, error
from os import terminal_size
import socket
import threading
import json
HEADER = 1024*4
PORT = 5544
# SERVER = "192.168.1.104"
SERVER = "localhost" # socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def validate_json(msg):
json_msg = json.loads(msg)
timestamp = []
temperature = []
sensorId = []
try:
for i in range(len(json_msg)):
timestamp.append(json_msg[i]["timestamp"])
temperature.append(json_msg[i]["payload"]["temperature"])
sensorId.append(json_msg[i]["payload"]["sensorId"])
except Exception as e:
print(f"exception occurded: {e}")
def handle_client(conn, addr):
print(f"[NEW CONNECTION {addr} connected.")
msg = conn.recv(HEADER)
print(msg)
msg = json.loads(msg)
print(msg)
print(msg[0]["timestamp"])
connected = True
"""
while connected:
if msg_lenth:
msg_lenth = int(msg_lenth)
msg = conn.recv(msg_lenth).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
conn.send("Msg received".encode(FORMAT))
"""
conn.close()
def start():
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.active_count() - 1}")
print("[STARTING]server is starting...")
start()
|
python
|
# Um assassinato aconteceu, e o programa fará 5 perguntas para o usuário.
# Se 2 respostas forem positivas, o usuário é considerado: "SUSPEITO"
# Se responder entre 3 e 4 respostas positivas: "CÚMPLICE"
# 5 respostas positivas, o usuário é considerado como: "ASSASSINO"
# Caso o usuário tenha no máximo 1 resposta positiva, ele é: "INOCENTE"
# Uma função para chamar as perguntas
def perguntas(res, tel, loc, mor, div, tra):
tel = res.append(telefone(tel))
loc = res.append(local(loc))
mor = res.append(morada(mor))
div = res.append(divida(div))
tra = res.append(trabalho(tra))
return res, tel, loc, mor, div, tra
# Pergunta sobre telefonema
def telefone(tel):
tel = input('Telefonou para a vítima? ').lower().strip()
if tel != 'sim' and tel != 'não' and tel != 'nao':
print('Por favor, responda apenas com sim ou não.')
telefone(tel)
return tel
# Pergunta sobre localização durante o crime
def local(loc):
loc = input('Esteve no local do crime? ').lower().strip()
if loc != 'sim' and loc != 'não' and loc != 'nao':
print('Por favor, responda apenas com sim ou não.')
local(loc)
return loc
# Pergunta sobre morada do suspeito
def morada(mor):
mor = input('Mora perto da vítima? ').lower().strip()
if mor != 'sim' and mor != 'não' and mor != 'nao':
print('Por favor, responda apenas com sim ou não.')
morada(mor)
return mor
# Pergunta sobre divida
def divida(div):
div = input('Devia para a vítima? ').lower().strip()
if div != 'sim' and div != 'não' and div != 'nao':
print('Por favor, responda apenas com sim ou não.')
divida(div)
return div
# Pergunta sobre trabalho do usuário
def trabalho(tra):
tra = input('Já trabalhou com a vítima? ').lower().strip()
if tra != 'sim' and tra != 'não' and tra != 'nao':
print('Por favor, responda apenas com sim ou não.')
trabalho(tra)
return tra
# Função para pegar as repostas e adiciona-las numa lista
def respostas(nome,sexo):
res = []
tel = ''
loc = ''
mor = ''
div = ''
tra = ''
perguntas(res, tel, loc, mor, div, tra)
# O programa vai somar quantas respostas SIM e NÃO tiveram
sim = res.count('sim')
nao = res.count('não') + res.count('nao') # O não é somado se o usuário colocar ou não acento
# Analise das respostas 'SIM' e resultado para o usuário
# Quando o usuário colocar o SEXO como HOMEM, as respostas serão para o sexo masculino
# Se o usuário colocar MULHER, as respostas serão no gênero feminino.
if sim <= 1 and sexo == 'homem':
print('\n{}, você foi considerado inocente'.format(nome))
if sim <= 1 and sexo == 'mulher':
print('\n{}, você foi considerada inocente'.format(nome))
if sim == 2 and sexo == 'homem':
print('\n{}, você é um suspeito'.format(nome))
if sim == 2 and sexo == 'mulher':
print('\n{}, você é uma suspeita'.format(nome))
if sim > 2 and sim <= 4:
print('\n{}, você é cúmplice do crime!'.format(nome))
if sim == 5 and sexo == 'homem':
print('\n{}, você será detido por assassinato!'.format(nome))
if sim == 5 and sexo == 'mulher':
print('\n{}, você será detida por assassinato!'.format(nome))
# Introdução do programa
def main():
print('Olá, obrigado pela sua presença.')
nome = input('Para começar, qual o seu nome? ').strip()
sexo = input('Você é homem ou mulher? ').lower().strip()
if sexo != 'homem' and sexo != 'mulher':
print('Não entendi. Vou tratar você como homem')
sexo = 'homem'
print('\nO{}, por gentileza, durante as perguntas responda apenas com \033[1mSim\033[m ou \033[1mNão\033[m'.format(nome))
comeco = input('\nPodemos começar? Responda com Sim ou Não: ').lower().strip()
if comeco == 'sim':
print('')
respostas(nome,sexo)
else:
print('Desculpa, mas não podemos demorar. Vamos ter que continuar assim mesmo')
print('')
respostas(nome,sexo)
return nome,sexo
main()
|
python
|
from checkpoint.types import ModelVersionStage
ANONYMOUS_USERNAME = "Anonymous"
CHECKPOINT_REDIRECT_PREFIX = "checkpoint_redirect"
CHECKPOINT_REDIRECT_SEPARATOR = ":"
NO_VERSION_SENTINAL = "no_version"
STAGES_WITH_CHAMPIONS = {
ModelVersionStage.STAGING,
ModelVersionStage.PRODUCTION,
}
INJECT_SCRIPT_TEMPLATE = """
<script>
function checkRequests () {
var req = new XMLHttpRequest();
req.onreadystatechange = function() {
if (this.readyState == 4 && this.status == 200) {
var requests = JSON.parse(this.responseText);
setRequests(requests);
}
};
req.open("GET", "/checkpoint/api/requests", true);
req.send();
}
function setRequests(requests) {
var openRequests = requests.filter(function (request) {
return request.status == "open";
}).length;
element = document.getElementById('requests-count');
element.innerHTML = openRequests;
if (openRequests > 0) {
element.style.display = "inline";
} else {
element.style.display = "none";
}
}
// https://stackoverflow.com/a/17076120
function decodeHTMLEntities(text) {
var entities = [
['amp', '&'],
['apos', '\\''],
['#x27', '\\''],
['#x2F', '/'],
['#39', '\\''],
['#47', '/'],
['lt', '<'],
['gt', '>'],
['nbsp', ' '],
['quot', '"']
];
for (var i = 0, max = entities.length; i < max; ++i) {
text = text.replace(new RegExp('&'+entities[i][0]+';', 'g'), entities[i][1]);
}
return text;
}
function checkRedirect() {
elements = document.getElementsByClassName("ant-message-error");
if (elements.length > 0) {
for (i = 0; i< elements.length; i++) {
const msg = elements[i].children[1].innerHTML;
if (msg.startsWith("CHECKPOINT_REDIRECT_PREFIX")) {
elements[i].style.display = "none";
const parts = msg.split("CHECKPOINT_REDIRECT_SEPARATOR");
window.location = window.location.protocol + '//' + window.location.host + decodeHTMLEntities(parts[1]);
}
}
}
}
window.onload = function () {
checkRequests();
setInterval(checkRequests, 5000);
setInterval(checkRedirect, 1000);
}
</script>
""" # noqa: E501
INJECT_ELEMENT = """
<button style="position: fixed; min-width: 165px; width: 10vw; left: 45vw; top: 0px;" type="button" class="ant-btn">
<a href="/checkpoint/requests">
🛂 Checkpoint
<span id="requests-count" style="background-color: red; color: white; margin-left: 10px;" class="ant-tag"></span>
</a>
</button>
""" # noqa: E501
INJECT_SCRIPT = INJECT_SCRIPT_TEMPLATE.replace(
"CHECKPOINT_REDIRECT_PREFIX", CHECKPOINT_REDIRECT_PREFIX
).replace("CHECKPOINT_REDIRECT_SEPARATOR", CHECKPOINT_REDIRECT_SEPARATOR)
GTM_HEAD_SCRIPT = """
<!-- Google Tag Manager -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-5QQCHV7');</script>
<!-- End Google Tag Manager -->
""" # noqa: E501
GTM_BODY_SCRIPT = """
<!-- Google Tag Manager (noscript) -->
<noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-5QQCHV7"
height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
<!-- End Google Tag Manager (noscript) -->
""" # noqa: E501
|
python
|
import numpy as np
import torch
import torchvision.transforms as transforms
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
def convert_image_to_tensor(image):
"""convert an image to pytorch tensor
Parameters:
----------
image: numpy array , h * w * c
Returns:
-------
image_tensor: pytorch.FloatTensor, c * h * w
"""
return transform(image)
def convert_chwTensor_to_hwcNumpy(tensor):
"""convert a group images pytorch tensor(count * c * h * w) to numpy array images(count * h * w * c)
Parameters:
----------
tensor: numpy array , count * c * h * w
Returns:
-------
numpy array images: count * h * w * c
"""
if isinstance(tensor, torch.FloatTensor):
return np.transpose(tensor.detach().numpy(), (0, 2, 3, 1))
else:
raise Exception(
"covert b*c*h*w tensor to b*h*w*c numpy error.This tensor must have 4 dimension of float data type.")
|
python
|
import argparse
from clusterize import k_means
def main():
args = parse_args()
dataset = read_dataset(args.dataset)
k_means(args.k, dataset, args.i)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-dataset", default="normal",
choices=["normal", "unbalance"], help="Choose which dataset to use.")
parser.add_argument("-k", default=4, type=int, help="Number of clusters.")
parser.add_argument("-i", default=10, type=int, help="Number of iterations.")
return parser.parse_args()
def read_dataset(dataset):
if(dataset == "normal"):
dataset_file_path = "./data/normal/normal.txt"
else:
dataset_file_path = "./data/unbalance/unbalance.txt"
with open(dataset_file_path, newline='') as dataset_file:
rows = [line.split() for line in dataset_file]
dataset = [[float(coordinate) for coordinate in position] for position in rows]
return dataset
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
"""stem package.
Word lemmatization and stemming.
API:
* PorterStemmer (class): Uses porter algorithm to find word stems.
* porter_stem (function): For instant stemming a word using porter algorithm.
* RegexStemmer (class): Uses regex pattern to find word stems.
* regex_stem (function): For instant stemming a word using regex pattern.
"""
from ._porter import PorterStemmer, porter_stem
from ._regex import RegexStemmer, regex_stem
|
python
|
import wave
import pyaudio
import webrtcvad
class AudioRecorder(object):
def __init__(self):
self.pyaudio = pyaudio.PyAudio()
self.format = self.pyaudio.get_format_from_width(width=2)
self.channels = 1
self.rate = 16000
self.chunk = 160
self.max_frame_count = 500
self.vad = webrtcvad.Vad(3)
def record(self, is_interrupted):
stream = self.pyaudio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer=self.chunk)
print("* Recording audio...")
frames = []
silence_count = 0
while len(frames) < self.max_frame_count and not is_interrupted():
data = stream.read(self.chunk)
frames.append(data)
if self.__is_speech(data):
silence_count = 0
else:
silence_count += 1
print(silence_count)
if silence_count >= 75:
break
print("* done")
stream.stop_stream()
stream.close()
return {'data': b''.join(frames), 'is_silent': len(frames) < 100}
def __is_speech(self, audio_data):
return self.vad.is_speech(audio_data, self.rate)
def play_stream(self, audio_stream):
stream = self.pyaudio.open(format=self.format,
channels=self.channels,
rate=self.rate,
output=True)
stream.write(audio_stream.read())
stream.stop_stream()
stream.close()
def play_wave(self, file):
wf = wave.open(file, 'rb')
stream = self.pyaudio.open(format=self.pyaudio.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(1024)
while len(data) > 0:
stream.write(data)
data = wf.readframes(1024)
stream.stop_stream()
stream.close()
|
python
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import unittest
import mock
from twitter.common.contextutil import temporary_file
from apache.aurora.admin.admin_util import make_admin_client, parse_script
from apache.aurora.common.cluster import Cluster
from apache.aurora.common.clusters import Clusters
class TestAdminUtil(unittest.TestCase):
TEST_CLUSTER_NAME = 'west'
TEST_CLUSTER = Cluster(
name=TEST_CLUSTER_NAME,
zk='zookeeper.example.com',
scheduler_zk_path='/foo/bar',
auth_mechanism='UNAUTHENTICATED')
TEST_CLUSTERS = Clusters([TEST_CLUSTER])
@mock.patch("apache.aurora.admin.admin_util.subprocess", spec=subprocess)
def test_parse_script(self, mock_subprocess):
with temporary_file() as fp:
mock_popen = mock.Mock()
mock_popen.wait.return_value = 0
mock_subprocess.Popen.return_value = mock_popen
parse_script(fp.name)('h1')
assert mock_popen.wait.call_count == 1
def test_parse_script_invalid_filename(self):
self.assertRaises(SystemExit, parse_script, "invalid filename")
def test_make_admin_client_cluster_string(self):
with mock.patch('apache.aurora.admin.admin_util.CLUSTERS', new=self.TEST_CLUSTERS):
self.assertIsNotNone(make_admin_client(self.TEST_CLUSTER_NAME))
def test_make_admin_client_cluster_object(self):
with mock.patch('apache.aurora.admin.admin_util.CLUSTERS', new=self.TEST_CLUSTERS):
self.assertIsNotNone(make_admin_client(self.TEST_CLUSTER))
def test_make_admin_client_cluster_unknown(self):
with mock.patch('apache.aurora.admin.admin_util.CLUSTERS', new=self.TEST_CLUSTERS):
self.assertRaises(SystemExit, make_admin_client, 'east')
|
python
|
from typing import Any
from typing import Callable
from typing import List
from typing import Optional
from typing import Union
import pytest
import tornado
from graphql import parse
from opencensus.trace import execution_context
from opencensus.trace import tracer as tracer_module
from opencensus.trace.base_exporter import Exporter
from opencensus.trace.propagation.google_cloud_format import GoogleCloudFormatPropagator
from opencensus.trace.samplers import AlwaysOnSampler
from graphene_tornado.apollo_tooling.operation_id import (
default_engine_reporting_signature,
)
from graphene_tornado.ext.apollo_engine_reporting.tests.schema import schema
from graphene_tornado.ext.apollo_engine_reporting.tests.test_engine_extension import (
QUERY,
)
from graphene_tornado.ext.opencensus.opencensus_tracing_extension import (
OpenCensusExtension,
)
from graphene_tornado.graphql_extension import GraphQLExtension
from graphene_tornado.tests.http_helper import HttpHelper
from graphene_tornado.tests.test_graphql import GRAPHQL_HEADER
from graphene_tornado.tests.test_graphql import response_json
from graphene_tornado.tests.test_graphql import url_string
from graphene_tornado.tornado_graphql_handler import TornadoGraphQLHandler
class GQLHandler(TornadoGraphQLHandler):
def initialize(
self,
schema=None,
middleware: Optional[Any] = None,
root_value: Any = None,
graphiql: bool = False,
pretty: bool = False,
batch: bool = False,
extensions: List[
Union[Callable[[], GraphQLExtension], GraphQLExtension]
] = None,
exporter=None,
):
super().initialize(
schema, middleware, root_value, graphiql, pretty, batch, extensions
)
execution_context.set_opencensus_tracer(
tracer_module.Tracer(
sampler=AlwaysOnSampler(),
exporter=exporter,
propagator=GoogleCloudFormatPropagator(),
)
)
def on_finish(self) -> None:
tracer = execution_context.get_opencensus_tracer()
tracer.finish()
class ExampleOpenCensusApplication(tornado.web.Application):
def __init__(self, exporter):
extension = lambda: OpenCensusExtension()
handlers = [
(
r"/graphql",
GQLHandler,
dict(
graphiql=True,
schema=schema,
extensions=[extension],
exporter=exporter,
),
),
]
tornado.web.Application.__init__(self, handlers)
@pytest.fixture
def app(exporter):
return ExampleOpenCensusApplication(exporter)
@pytest.fixture
def app(exporter):
return ExampleOpenCensusApplication(exporter)
@pytest.fixture
def http_helper(http_client, base_url):
return HttpHelper(http_client, base_url)
@pytest.fixture
def exporter():
return CapturingExporter()
@pytest.mark.gen_test()
def test_traces_match_query(http_helper, exporter):
response = yield http_helper.get(url_string(query=QUERY), headers=GRAPHQL_HEADER)
assert response.code == 200
assert "data" in response_json(response)
parent = exporter.spans.pop()[0]
assert parent.name == "gql[b5c7307ba564]"
assert parent.parent_span_id is None
assert parent.attributes.get(
"signature", None
) == default_engine_reporting_signature(parse(QUERY), "")
spans = [span for span_list in exporter.spans for span in span_list]
expected = [
"gql_parsing",
"gql_validation",
"author",
"aBoolean",
"author.name",
"author.posts",
"author.posts.0.id",
"author.posts.1.id",
]
for span, exp in zip(spans, expected):
assert span.name == exp
assert span.parent_span_id == parent.span_id
class CapturingExporter(Exporter):
def __init__(self):
super(CapturingExporter, self).__init__()
self._spans = []
@property
def spans(self):
return self._spans
def emit(self, span_datas):
pass
def export(self, span_datas):
self._spans.append(span_datas)
|
python
|
import os
from distutils.util import strtobool
from os.path import join
import dj_database_url
from configurations import Configuration
from corsheaders.defaults import default_headers
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class Common(Configuration):
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# Third party apps
"django_extensions",
"rest_framework", # utilities for rest apis
"django_filters", # for filtering rest endpoints
"drf_yasg",
"computedfields", # Allows for computed fields on models
"corsheaders",
# Your apps
"scpca_portal",
)
# https://docs.djangoproject.com/en/2.0/topics/http/middleware/
MIDDLEWARE = (
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
)
ALLOWED_HOSTS = ["*"]
ROOT_URLCONF = "scpca_portal.urls"
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY")
WSGI_APPLICATION = "scpca_portal.wsgi.application"
ADMINS = (("Author", "[email protected]"),)
# Postgres
DATABASES = {
"default": dj_database_url.config(
default="postgres://postgres:@postgres:5432/postgres",
conn_max_age=int(os.getenv("POSTGRES_CONN_MAX_AGE", 600)),
)
}
# Caching: for now we're only caching a single record and its not
# even intense to compute so the locally memory cache is
# sufficient and memcache would be overkill.
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "unique-snowflake",
"TIMEOUT": None,
}
}
# General
APPEND_SLASH = True
TIME_ZONE = "UTC"
LANGUAGE_CODE = "en-us"
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
USE_L10N = True
USE_TZ = True
LOGIN_REDIRECT_URL = "/"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = "/tmp/www/static/"
STATICFILES_DIRS = []
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
# Media files
MEDIA_ROOT = join(os.path.dirname(BASE_DIR), "media")
MEDIA_URL = "/media/"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": STATICFILES_DIRS,
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
# Set DEBUG to False as a default for safety
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = strtobool(os.getenv("DJANGO_DEBUG", "no"))
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"django.server": {
"()": "django.utils.log.ServerFormatter",
"format": "[%(server_time)s] %(message)s",
},
"verbose": {
"format": "%(asctime)s %(levelname)s %(module)s %(process)d %(thread)d %(message)s"
},
"simple": {"format": "%(asctime)s %(levelname)s %(message)s"},
},
"filters": {"require_debug_true": {"()": "django.utils.log.RequireDebugTrue",},},
"handlers": {
"django.server": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "django.server",
},
"console": {"level": "DEBUG", "class": "logging.StreamHandler", "formatter": "simple",},
"mail_admins": {"level": "ERROR", "class": "django.utils.log.AdminEmailHandler",},
},
"loggers": {
"django": {"handlers": ["console"], "propagate": True,},
"django.server": {"handlers": ["django.server"], "level": "INFO", "propagate": False,},
"django.request": {
"handlers": ["mail_admins", "console"],
"level": "ERROR",
"propagate": False,
},
"django.db.backends": {"handlers": ["console"], "level": "INFO"},
},
}
# Django Rest Framework
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": int(os.getenv("DJANGO_PAGINATION_LIMIT", 10)),
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"TEST_REQUEST_DEFAULT_FORMAT": "json",
"DEFAULT_RENDERER_CLASSES": (
"rest_framework.renderers.JSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
),
}
# CORS - unrestricted
CORS_ORIGIN_ALLOW_ALL = True
API_KEY_HEADER = "api-key"
CORS_ALLOW_HEADERS = default_headers + (API_KEY_HEADER,)
TERMS_AND_CONDITIONS = "PLACEHOLDER"
|
python
|
import numpy as np
from matplotlib import pyplot as plt
from os import listdir
def exp_moving_average(vec, a):
"""
Calculates EMA from given vector and alpha parameter.
:param vec: input vector
:param a: alpha parameter
:return: calculated average
"""
# Create elements multipliers vector. 1 for first element in vec and alpha
# for every other element.
multi_vec = np.ones(vec.shape)
multi_vec[1:] = a
exp_vec = np.flip(np.arange(vec.shape[0]), 0)
avg = np.sum(np.multiply(multi_vec, np.multiply(vec, (1 - a) ** exp_vec)))
return avg
def average_mat(mat, avg_range):
"""
Average open and close prices in given matrix with avg range.
:param mat: input matrix
:param avg_range: range of EMA
:return: new matrix with averaged open and close column
"""
# Indices of open and close columns.
open_i = 4
close_i = 5
avg_mat = np.zeros([mat.shape[0] - avg_range + 1, mat.shape[1]])
for i in range(avg_mat.shape[0]):
avg_mat[i] = mat[i + avg_range - 1]
# Calculate averaged open and close prices.
avg_mat[i, open_i] = exp_moving_average(
mat[i:i + avg_range, open_i], 1 / avg_range
)
avg_mat[i, close_i] = exp_moving_average(
mat[i:i + avg_range, close_i], 1 / avg_range
)
return avg_mat
def average_mats(in_dir_path, out_dir_path, avg_range):
"""
Average all matrices present in in_dir_path with given range.
:param in_dir_path: path to input dir
:param out_dir_path: path to output dir
:param avg_range: length of exp moving average
:return:
"""
# Average and save every company matrix.
for filename in listdir(in_dir_path):
mat = np.load(in_dir_path + "/" + filename)
# Convert matrix only if it contains enough rows.
if mat.shape[0] >= avg_range:
avg_mat = average_mat(mat, avg_range)
np.save(out_dir_path + "/" + filename, avg_mat)
def main():
"""
Main function of this script.
:return: -
"""
# Parameters for averaging with length 5, 25 and 50.
in_dir_path = "../../data/split"
out_dir_path_1 = "../../data/averaged_50"
out_dir_path_2 = "../../data/averaged_25"
out_dir_path_3 = "../../data/averaged_5"
avg_range_1 = 50
avg_range_2 = 25
avg_range_3 = 5
average_mats(in_dir_path, out_dir_path_1, avg_range_1)
average_mats(in_dir_path, out_dir_path_2, avg_range_2)
average_mats(in_dir_path, out_dir_path_3, avg_range_3)
if __name__ == "__main__":
main()
|
python
|
from random import randint
class King:
def __init__(self, start, size):
self.start = start
self.size = size
self.hole = (randint(0, size-1), randint(0, size-1))
def check(self, a, b):
return a >= 0 and b >= 0 and a <= self.size - 1 and b <= self.size - 1
def near(self, where):
s = []
for y in [-1, 0, 1]:
for x in [-1, 0, 1]:
if self.check(where[0] + x, where[1] + y):
s.append((where[0] + x, where[1] + y))
s.remove(where)
return s
def find(y_king):
aue = []
aue.append(y_king.start)
searched = []
while aue:
where = aue.pop(0) # 0 ---- width // None ---- depth
if where not in searched:
if where == y_king.hole:
return where
else:
aue += y_king.near(where)
searched.append(where)
"""
y_king = King((0, 0), 5)
print(find(y_king))
print(y_king.hole)
"""
|
python
|
# -*- coding: utf-8 -*-
"""
Requires Python 3.8 or later
"""
__author__ = "Jorge Morfinez Mojica ([email protected])"
__copyright__ = "Copyright 2021"
__license__ = ""
__history__ = """ """
__version__ = "1.21.H28.1 ($Rev: 1 $)"
import re
import time
import threading
from flask import Blueprint, json, request, render_template, redirect
from flask_jwt_extended import jwt_required
from db_controller.database_backend import *
from .UsersAuthModel import UsersAuthModel
from handler_controller.ResponsesHandler import ResponsesHandler as HandlerResponse
from handler_controller.messages import SuccessMsg, ErrorMsg
from auth_controller.api_authentication import *
from logger_controller.logger_control import *
from utilities.Utility import *
from datetime import datetime
cfg_app = get_config_settings_app()
authorization_api = Blueprint('authorization_api', __name__)
# jwt = JWTManager(bancos_api)
logger = configure_logger('ws')
# Se inicializa la App con un hilo para evitar problemas de ejecución
# (Falta validacion para cuando ya exista hilo corriendo)
def activate_job():
def run_job():
while True:
time.sleep(2)
thread = threading.Thread(target=run_job)
thread.start()
# Contiene la llamada al HTML que soporta la documentacion de la API,
# sus metodos, y endpoints con los modelos de datos I/O
# @authorization_api.before_app_first_request(activate_job())
@authorization_api.route('/doc')
def main():
return render_template('api_manage_product.html')
@authorization_api.route('/logout/')
def logout():
return redirect('/')
@authorization_api.route('/login/', methods=['POST'])
def get_authentication():
conn_db, session_db = init_db_connection()
data = dict()
json_token = dict()
if request.method == 'POST':
data = request.get_json(force=True)
if not data or str(data) is None:
return HandlerResponse.request_conflict(ErrorMsg.ERROR_REQUEST_DATA_CONFLICT, data)
user_name = data['username']
password = data['password']
is_super_user = data['is_superuser']
regex_username = r"^[(a-z0-9\_\-\.)]+@[(a-z0-9\_\-\.)]+\.[(a-z)]{2,15}$"
regex_passwd = r"^[(A-Za-z0-9\_\-\.\$\#\&\*)(A-Za-z0-9\_\-\.\$\#\&\*)]+"
match_username = re.match(regex_username, user_name, re.M | re.I)
match_passwd = re.match(regex_passwd, password, re.M | re.I)
if match_username and match_passwd and bool(is_super_user):
password = user_name + '_' + password + '_' + cfg_app.api_key
data['password'] = password
json_token = user_registration(session_db, data)
else:
return HandlerResponse.request_conflict(ErrorMsg.ERROR_REQUEST_DATA_CONFLICT, data)
logger.info('Data User to Register on DB: %s', str(data))
if not json_token:
return HandlerResponse.response_success(ErrorMsg.ERROR_DATA_NOT_FOUND, json_token)
return HandlerResponse.response_resource_created(SuccessMsg.MSG_CREATED_RECORD, json_token)
else:
return HandlerResponse.request_not_found(ErrorMsg.ERROR_REQUEST_NOT_FOUND)
@authorization_api('/', methods=['PUT', 'DELETE'])
@jwt_required
def manage_user_data():
conn_db, session_db = init_db_connection()
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return HandlerResponse.request_unauthorized(ErrorMsg.ERROR_REQUEST_UNAUTHORIZED, auth)
else:
data = dict()
if request.method == 'PUT':
data = request.get_json(force=True)
is_super_user = data['is_superuser']
user_model = UsersAuthModel(data)
if not data or str(data) is None and (bool(is_super_user) is False):
return HandlerResponse.request_conflict(ErrorMsg.ERROR_REQUEST_DATA_CONFLICT, data)
logger.info('Data Json User to Manage on DB: %s', str(data))
user_response = user_model.user_update(session_db, data)
logger.info('Data User to Update on DB: %s', str(data))
if not user_response:
return HandlerResponse.response_success(ErrorMsg.ERROR_DATA_NOT_FOUND, user_response)
return HandlerResponse.response_resource_created(SuccessMsg.MSG_UPDATED_RECORD, user_response)
elif request.method == 'DELETE':
data = request.get_json(force=True)
user_model = UsersAuthModel(data)
if not data or str(data) is None:
return HandlerResponse.request_conflict(ErrorMsg.ERROR_REQUEST_DATA_CONFLICT, data)
logger.info('Data Json Driver to Manage on DB: %s', str(data))
user_response = user_model.user_inactivate(session_db, data)
logger.info('Data User to Update on DB: %s', str(data))
if not user_response:
return HandlerResponse.response_success(ErrorMsg.ERROR_DATA_NOT_FOUND, user_response)
return HandlerResponse.response_resource_created(SuccessMsg.MSG_DELETED_RECORD, user_response)
else:
return HandlerResponse.request_not_found(ErrorMsg.ERROR_REQUEST_NOT_FOUND)
@authorization_api.route('/list', methods=['GET'])
@jwt_required
def get_list_users_auth():
conn_db, session_db = init_db_connection()
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return HandlerResponse.request_unauthorized(ErrorMsg.ERROR_REQUEST_UNAUTHORIZED, auth)
else:
data = dict()
json_token = dict()
if request.method == 'GET':
# To GET ALL Data of the Users:
data['offset'] = request.args.get('offset', 1)
data['limit'] = request.args.get('limit', 10)
users_on_db = None
user_model = UsersAuthModel(data)
users_on_db = user_model.get_all_users(session_db, data)
if not bool(users_on_db) or not users_on_db or "[]" == users_on_db:
return HandlerResponse.response_success(ErrorMsg.ERROR_DATA_NOT_FOUND, users_on_db)
return HandlerResponse.response_success(SuccessMsg.MSG_GET_RECORD, users_on_db)
else:
return HandlerResponse.request_not_found(ErrorMsg.ERROR_REQUEST_NOT_FOUND)
|
python
|