content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import os
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rlib.rposix import is_valid_fd
from rpython.rlib.rarithmetic import widen, ovfcheck_float_to_longlong
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rtyper.annlowlevel import llhelper
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.error import exception_from_saved_errno
from pypy.interpreter.gateway import unwrap_spec
from pypy.module.faulthandler import cintf, dumper
class Handler(object):
def __init__(self, space):
"NOT_RPYTHON"
self.space = space
self._cleanup_()
def _cleanup_(self):
self.fatal_error_w_file = None
self.dump_traceback_later_w_file = None
self.user_w_files = None
def check_err(self, p_err):
if p_err:
raise oefmt(self.space.w_RuntimeError, 'faulthandler: %s',
rffi.charp2str(p_err))
def get_fileno_and_file(self, w_file):
space = self.space
if space.is_none(w_file):
w_file = space.sys.get('stderr')
if space.is_none(w_file):
raise oefmt(space.w_RuntimeError, "sys.stderr is None")
elif space.isinstance_w(w_file, space.w_int):
fd = space.c_int_w(w_file)
if fd < 0 or not is_valid_fd(fd):
raise oefmt(space.w_ValueError,
"file is not a valid file descriptor")
return fd, None
fd = space.c_int_w(space.call_method(w_file, 'fileno'))
try:
space.call_method(w_file, 'flush')
except OperationError as e:
if e.async(space):
raise
pass # ignore flush() error
return fd, w_file
def setup(self):
dump_callback = llhelper(cintf.DUMP_CALLBACK, dumper._dump_callback)
self.check_err(cintf.pypy_faulthandler_setup(dump_callback))
def enable(self, w_file, all_threads):
fileno, w_file = self.get_fileno_and_file(w_file)
self.setup()
self.fatal_error_w_file = w_file
self.check_err(cintf.pypy_faulthandler_enable(
rffi.cast(rffi.INT, fileno),
rffi.cast(rffi.INT, all_threads)))
def disable(self):
cintf.pypy_faulthandler_disable()
self.fatal_error_w_file = None
def is_enabled(self):
return bool(widen(cintf.pypy_faulthandler_is_enabled()))
def dump_traceback(self, w_file, all_threads):
fileno, w_file = self.get_fileno_and_file(w_file)
self.setup()
cintf.pypy_faulthandler_dump_traceback(
rffi.cast(rffi.INT, fileno),
rffi.cast(rffi.INT, all_threads),
llmemory.NULL)
keepalive_until_here(w_file)
def dump_traceback_later(self, timeout, repeat, w_file, exit):
space = self.space
timeout *= 1e6
try:
microseconds = ovfcheck_float_to_longlong(timeout)
except OverflowError:
raise oefmt(space.w_OverflowError, "timeout value is too large")
if microseconds <= 0:
raise oefmt(space.w_ValueError, "timeout must be greater than 0")
fileno, w_file = self.get_fileno_and_file(w_file)
self.setup()
self.check_err(cintf.pypy_faulthandler_dump_traceback_later(
rffi.cast(rffi.LONGLONG, microseconds),
rffi.cast(rffi.INT, repeat),
rffi.cast(rffi.INT, fileno),
rffi.cast(rffi.INT, exit)))
self.dump_traceback_later_w_file = w_file
def cancel_dump_traceback_later(self):
cintf.pypy_faulthandler_cancel_dump_traceback_later()
self.dump_traceback_later_w_file = None
def check_signum(self, signum):
err = rffi.cast(lltype.Signed,
cintf.pypy_faulthandler_check_signum(signum))
if err < 0:
space = self.space
if err == -1:
raise oefmt(space.w_RuntimeError,
"signal %d cannot be registered, "
"use enable() instead", signum)
else:
raise oefmt(space.w_ValueError, "signal number out of range")
def register(self, signum, w_file, all_threads, chain):
self.check_signum(signum)
fileno, w_file = self.get_fileno_and_file(w_file)
self.setup()
self.check_err(cintf.pypy_faulthandler_register(
rffi.cast(rffi.INT, signum),
rffi.cast(rffi.INT, fileno),
rffi.cast(rffi.INT, all_threads),
rffi.cast(rffi.INT, chain)))
if self.user_w_files is None:
self.user_w_files = {}
self.user_w_files[signum] = w_file
def unregister(self, signum):
self.check_signum(signum)
change = cintf.pypy_faulthandler_unregister(
rffi.cast(rffi.INT, signum))
if self.user_w_files is not None:
self.user_w_files.pop(signum, None)
return rffi.cast(lltype.Signed, change) == 1
def finish(self):
cintf.pypy_faulthandler_teardown()
self._cleanup_()
def finish(space):
"Finalize the faulthandler logic (called from shutdown())"
space.fromcache(Handler).finish()
@unwrap_spec(all_threads=int)
def enable(space, w_file=None, all_threads=0):
"enable(file=sys.stderr, all_threads=True): enable the fault handler"
space.fromcache(Handler).enable(w_file, all_threads)
def disable(space):
"disable(): disable the fault handler"
space.fromcache(Handler).disable()
def is_enabled(space):
"is_enabled()->bool: check if the handler is enabled"
return space.newbool(space.fromcache(Handler).is_enabled())
@unwrap_spec(all_threads=int)
def dump_traceback(space, w_file=None, all_threads=0):
"""dump the traceback of the current thread into file
including all threads if all_threads is True"""
space.fromcache(Handler).dump_traceback(w_file, all_threads)
@unwrap_spec(timeout=float, repeat=int, exit=int)
def dump_traceback_later(space, timeout, repeat=0, w_file=None, exit=0):
"""dump the traceback of all threads in timeout seconds,
or each timeout seconds if repeat is True. If exit is True,
call _exit(1) which is not safe."""
space.fromcache(Handler).dump_traceback_later(timeout, repeat, w_file, exit)
def cancel_dump_traceback_later(space):
"""cancel the previous call to dump_traceback_later()."""
space.fromcache(Handler).cancel_dump_traceback_later()
@unwrap_spec(signum=int, all_threads=int, chain=int)
def register(space, signum, w_file=None, all_threads=1, chain=0):
space.fromcache(Handler).register(signum, w_file, all_threads, chain)
@unwrap_spec(signum=int)
def unregister(space, signum):
return space.newbool(space.fromcache(Handler).unregister(signum))
# for tests...
@unwrap_spec(release_gil=int)
def read_null(space, release_gil=0):
if release_gil:
cintf.pypy_faulthandler_read_null_releasegil()
else:
cintf.pypy_faulthandler_read_null()
@unwrap_spec(release_gil=int)
def sigsegv(space, release_gil=0):
if release_gil:
cintf.pypy_faulthandler_sigsegv_releasegil()
else:
cintf.pypy_faulthandler_sigsegv()
def sigfpe(space):
cintf.pypy_faulthandler_sigfpe()
def sigabrt(space):
cintf.pypy_faulthandler_sigabrt()
@unwrap_spec(levels=int)
def stack_overflow(space, levels=100000000):
levels = float(levels)
return space.newfloat(cintf.pypy_faulthandler_stackoverflow(levels))
|
python
|
from .CreateSource import CreateSource
from .DropSource import DropSource
from .InputKeys import InputKeys
from .OutputKeys import OutputKeys
from .ProducedKeys import (
ProducedKeys,
ProducedLinkKeys,
ProducedHubKeys
)
from .SatelliteQuery import SatelliteQuery
from .SerialiseSatellite import SerialiseSatellite
from .SerialiseSatelliteOwner import SerialiseSatelliteOwner
from .StarData import StarData
from .SatelliteOwnerKeys import SatelliteOwnerKeys
from .StarMerge import StarMerge
|
python
|
"""Array with time epochs
"""
# Standard library imports
from collections import namedtuple
from datetime import datetime, timedelta
from typing import Callable, Dict, List, Optional, Tuple, Any, TypeVar
from functools import lru_cache
try:
import importlib.resources as importlib_resources # Python >= 3.7
except ImportError:
import importlib_resources # Python <= 3.6: pip install importlib_resources
# Third party imports
import numpy as np
# Midgard imports
from midgard.dev import exceptions
from midgard.math.unit import Unit
from midgard.math.constant import constant
_SCALES: Dict[str, Dict[str, Callable]] = dict() # Populated by register_scale()
_CONVERSIONS: Dict[str, Dict[Tuple[str, str], Callable]] = dict() # Populated by register_scale()
_CONVERSION_HOPS: Dict[str, Dict[Tuple[str, str], List[str]]] = dict() # Cache for to_scale()
_FORMATS: Dict[str, Dict[str, Callable]] = dict() # Populated by register_format()
_FORMAT_UNITS: Dict[str, Dict[str, str]] = dict() # Populated by register_format()
# Type specification: scalar float or numpy array
np_float = TypeVar("np_float", float, np.ndarray)
#######################################################################################################################
# Module functions
#######################################################################################################################
def read_tai_utc():
package, _, _ = __name__.rpartition(".")
with importlib_resources.path(package, "_taiutc.txt") as path:
return np.genfromtxt(
path,
names=["start", "end", "offset", "ref_epoch", "factor"],
comments="#",
dtype=("f8", "f8", "f8", "f8", "f8"),
autostrip=True,
)
def register_scale(
convert_to: Dict[str, Callable] = None, convert_from: Dict[str, Callable] = None
) -> Callable[[Callable], Callable]:
"""Decorator used to register new time scales
The scale name is read from the .scale attribute of the Time class.
Args:
convert_to: Functions used to convert to other scales.
convert_from: Functions used to convert from other scales.
Returns:
Decorator registering scale.
"""
def wrapper(cls: Callable) -> Callable:
name = cls.scale
_SCALES[cls.cls_name][name] = cls
conversions = _CONVERSIONS.setdefault(cls.cls_name, dict())
if convert_to:
for to_scale, converter in convert_to.items():
conversions[(name, to_scale)] = converter
if convert_from:
for from_scale, converter in convert_from.items():
conversions[(from_scale, name)] = converter
return cls
return wrapper
def register_format(cls: Callable) -> Callable:
"""Decorator used to register new time formats
The format name is read from the .format attribute of the TimeFormat class.
"""
name = cls.fmt
_FORMATS[cls.cls_name][name] = cls
_FORMAT_UNITS[cls.cls_name][name] = cls.unit
return cls
def _find_conversion_hops(cls: str, hop: Tuple[str, str]) -> List[Tuple[str, str]]:
"""Calculate the hops needed to convert between scales using breadth first search"""
start_scale, target_scale = hop
queue = [(start_scale, [])]
visited = set()
if start_scale == target_scale:
return [hop]
while queue:
from_scale, hops = queue.pop(0)
for to_scale in [t for f, t in _CONVERSIONS[cls] if f == from_scale]:
one_hop = (from_scale, to_scale)
if to_scale == target_scale:
return hops + [one_hop]
if one_hop not in visited:
visited.add(one_hop)
queue.append((to_scale, hops + [one_hop]))
raise exceptions.UnknownConversionError(f"Can't convert TimeArray from {start_scale!r} to {target_scale!r}")
######################################################################################################################
# Time classes
######################################################################################################################
class TimeBase(np.ndarray):
"""Base class for TimeArray and TimeDeltaArray"""
scale = None
def __new__(cls, val, fmt, val2=None, _jd1=None, _jd2=None):
"""Create a new TimeArray"""
if cls.scale is None:
raise ValueError(f"{cls.__name__} cannot be instantiated. Use Time(val=..., scale={cls.scale!r}) instead")
if fmt not in cls._formats():
formats = ", ".join(cls._formats())
raise exceptions.UnknownSystemError(f"Format {fmt!r} unknown. Use one of {formats}")
# Convert to numpy array and read format
fmt_values = cls._formats()[fmt](val, val2, cls.scale)
val = np.asarray(val)
if val2 is not None:
val2 = np.asarray(val2)
if val2.shape != val.shape:
raise ValueError(f"'val2' must have the same shape as 'val': {val.shape}")
if val2.ndim == 0:
val2 = val2.item()
if val.ndim == 0:
val = val.item()
# Store values on array
obj = np.asarray(fmt_values.value).view(cls)
jd1 = fmt_values.jd1 if _jd1 is None else _jd1
jd2 = fmt_values.jd2 if _jd2 is None else _jd2
# Validate shape
fmt_ndim = cls._formats()[fmt].ndim
if obj.ndim > fmt_ndim:
raise ValueError(
f"{type(self).__name__!r} must be a {fmt_ndim - 1} or {fmt_ndim}-dimensional array for format type {obj_fmt}"
)
# Freeze
if isinstance(jd1, np.ndarray):
jd1.flags.writeable = False
if isinstance(jd2, np.ndarray):
jd2.flags.writeable = False
super(TimeBase, obj).__setattr__("fmt", fmt)
super(TimeBase, obj).__setattr__("jd1", jd1)
super(TimeBase, obj).__setattr__("jd2", jd2)
if isinstance(obj, np.ndarray):
obj.flags.writeable = False
return obj
def __array_finalize__(self, obj):
"""Called automatically when a new TimeArray is created"""
if obj is None:
return
obj_fmt = getattr(obj, "fmt", None)
# Copy attributes from the original object
super().__setattr__("fmt", obj_fmt)
jd1_sliced = getattr(obj, "_jd1_sliced", None)
if jd1_sliced is not None:
super().__setattr__("jd1", jd1_sliced)
else:
super().__setattr__("jd1", getattr(obj, "jd1", None))
jd2_sliced = getattr(obj, "_jd2_sliced", None)
if jd2_sliced is not None:
super().__setattr__("jd2", jd2_sliced)
else:
super().__setattr__("jd2", getattr(obj, "jd2", None))
# Validate shape for arrays not created with __new__
if obj_fmt and self.ndim > 1:
fmt_ndim = self._formats()[obj_fmt].ndim
if self.ndim > fmt_ndim:
raise ValueError(
f"{type(self).__name__!r} must be a {fmt_ndim - 1} or {fmt_ndim}-dimensional array for format type {obj_fmt}"
)
# Freeze
self.flags.writeable = False
if self.jd1 is not None and self.jd2 is not None:
if isinstance(self.jd1, np.ndarray):
self.jd1.flags.writeable = False
if isinstance(self.jd2, np.ndarray):
self.jd2.flags.writeable = False
def __lt__(self, other):
return self.jd < other.jd
@lru_cache()
def to_scale(self, scale: str) -> "TimeBase":
"""Convert to a different scale
Returns a new array with the same time in the new scale.
Args:
scale: Name of new scale.
Returns:
TimeBase representing the same times in the new scale.
"""
# Don't convert if not necessary
if scale == self.scale:
return self
# Raise error for unknown scales
if scale not in self._scales():
scales = ", ".join(self._scales())
raise exceptions.UnknownSystemError(f"Scale {scale!r} unknown. Use one of {scales}")
# Simplified conversion if time is None
if self.shape == () and self.item() == None: # time is None
return _SCALES[self.cls_name][scale](val=None, fmt=self.fmt, _jd1=None, _jd2=None)
# Convert to new scale
hop = (self.scale, scale)
if hop in _CONVERSIONS[self.cls_name]:
jd1, jd2 = _CONVERSIONS[self.cls_name][hop](self)
try:
return self._scales()[scale].from_jds(jd1, jd2, self.fmt)
except ValueError:
# Given format does not exist for selected time scale, use default jd
return self._scales()[scale].from_jds(jd1, jd2, "jd")
if hop not in _CONVERSION_HOPS.setdefault(self.cls_name, {}):
_CONVERSION_HOPS[self.cls_name][hop] = _find_conversion_hops(self.cls_name, hop)
converted_time = self
for one_hop in _CONVERSION_HOPS[self.cls_name][hop]:
jd1, jd2 = _CONVERSIONS[self.cls_name][one_hop](converted_time)
try:
converted_time = self._scales()[one_hop[-1]].from_jds(jd1, jd2, self.fmt)
except ValueError:
# Given format does not exist for selected time scale, use default jd
converted_time = self._scales()[one_hop[-1]].from_jds(jd1, jd2, "jd")
return converted_time
def subset(self, idx, memo):
"""Create a subset"""
old_id = id(self)
if old_id in memo:
return memo[old_id]
new_time = self._scales()[self.scale](
np.asarray(self)[idx], fmt=self.fmt, _jd1=self.jd1[idx], _jd2=self.jd2[idx]
)
memo[old_id] = new_time
return new_time
@classmethod
def insert(cls, a, pos, b, memo):
"""Insert b into a at index pos"""
id_a = id(a)
if id_a in memo:
return memo[id_a][-1]
id_b = id(b)
if id_b in memo:
return memo[id_b][-1]
b = b if a.scale == b.scale else getattr(b, a.scale)
b_formatted = np.asarray(b) if a.fmt == b.fmt else getattr(b, a.fmt)
val = np.insert(np.asarray(a), pos, b_formatted)
jd1 = np.insert(a.jd1, pos, b.jd1)
jd2 = np.insert(a.jd2, pos, b.jd2)
new_time = cls._scales()[a.scale](val, fmt=a.fmt, _jd1=jd1, _jd2=jd2)
memo[id(a)] = (a, new_time)
memo[id(b)] = (b, new_time)
return new_time
@property
def val(self):
return np.asarray(self)
@classmethod
def _cls_scale(cls, scale: str) -> "Type[TimeArray]":
"""Check that scale is valid and return corresponding type"""
if scale not in cls._scales():
scales = ", ".join(sorted(cls._scales()))
raise exceptions.UnknownSystemError(f"Scale {scale!r} unknown. Use one of {scales}")
return cls._scales()[scale]
@classmethod
def create(
cls,
val: np.ndarray,
scale: str,
fmt: str,
val2: Optional[np.ndarray] = None,
_jd1: Optional[np.ndarray] = None,
_jd2: Optional[np.ndarray] = None,
) -> "TimeArray":
"""Factory for creating TimeArrays for different scales
See each time class for exact optional parameters.
Args:
val: Array of time values.
scale: Name of time scale.
pos_args: Additional arguments used to create the TimeArray.
Returns:
Array with times in the given scale.
"""
return cls._cls_scale(scale)(val, val2=val2, fmt=fmt, _jd1=_jd1, _jd2=_jd2)
@classmethod
def from_jds(cls, jd1: np.ndarray, jd2: np.ndarray, fmt: str) -> "TimeArray":
"""Create a new time array with given Julian dates and format, keep scale
"""
fmt_value = cls._formats()[fmt].from_jds(jd1, jd2, cls.scale)
return cls(val=fmt_value, fmt=fmt, _jd1=jd1, _jd2=jd2)
@classmethod
def _scales(cls):
return _SCALES.get(cls.cls_name, dict())
@classmethod
def _conversions(cls):
return _CONVERSIONS.get(cls.cls_name, dict())
@property
def SCALES(self):
return list(self._scales().keys())
@property
def FORMATS(self):
return list(self._formats().keys())
@property
def CONVERSIONS(self):
return list(self._conversions().keys())
def fieldnames(self):
"""Return list of valid attributes for this object"""
# Pick one element to avoid doing calculations on a large array
obj = self if len(self) == 1 else self[0]
scales_and_formats = []
for scale in obj._scales():
try:
_find_conversion_hops(self.cls_name, (obj.scale, scale))
# Add scales
scales_and_formats.append(scale)
scale_time = getattr(obj, scale)
fmt_cls = obj.cls_name.replace("Array", "Format")
for fmt in _FORMATS.get(fmt_cls, {}):
# Add system fields
try:
fmt_time = getattr(scale_time, fmt)
if isinstance(fmt_time, tuple) and hasattr(fmt_time, "_fields"):
for f in fmt_time._fields:
scales_and_formats.append(f"{scale}.{fmt}.{f}")
else:
scales_and_formats.append(f"{scale}.{fmt}")
except ValueError:
pass # Skip formats that are invalid for that scale
except exceptions.UnknownConversionError:
pass # Skip systems that cannot be converted to
return scales_and_formats
@lru_cache()
def plot_fields(self):
"""Returns list of attributes that can be plotted"""
obj = self if len(self) == 1 else self[0]
fieldnames = set(self.fieldnames())
text_fields = set()
for f in fieldnames:
attr_value = getattr(obj, f)
if isinstance(attr_value, np.ndarray) and attr_value.dtype.type is np.str_:
text_fields.add(f)
elif isinstance(attr_value, str):
text_fields.add(f)
return list(fieldnames - text_fields)
def unit(self, field: str = "") -> Tuple[str, ...]:
"""Unit of field"""
# mainfield, _, subfield = field.partition(".")
# Units of formats
field = self.fmt if not field else field
if field in self._formats():
return _FORMAT_UNITS[field]
# Units of properties
else:
return self._unit
@lru_cache()
def to_format(self, fmt: str):
return self._formats()[fmt].from_jds(self.jd1, self.jd2, scale=self.scale)
def __hash__(self):
try:
return hash(self.jd1.data.tobytes()) + hash(self.jd2.data.tobytes())
except AttributeError:
return hash(str(self.jd1)) + hash(str(self.jd2))
def __eq__(self, other):
if isinstance(other, self.__class__):
return np.all(self.jd1 == other.jd1) and np.all(self.jd2 == other.jd2)
else:
return NotImplemented
def __getattr__(self, key):
"""Get attributes with dot notation
Add time scales and formats to attributes on Time arrays.
"""
if "." in key:
mainfield, _, subfield = key.partition(".")
return getattr(getattr(self, mainfield), subfield)
# Convert to a different scale
if key in self._scales():
return self.to_scale(key)
# Convert to a different format
elif key in self._formats():
return self.to_format(key)
# Raise error for unknown attributes
else:
raise AttributeError(f"{type(self).__name__!r} has no attribute {key!r}") from None
def __len__(self):
fmt_ndim = self._formats()[self.fmt].ndim
return int(self.size / fmt_ndim)
def __setattr__(self, name, value):
raise AttributeError(f"{self.__class__.__name__} object does not support item assignment ")
def __copy__(self):
return self.create(val=self.val.copy(), fmt=self.fmt, scale=self.scale, _jd1=self.jd1, _jd2=self.jd2)
def __deepcopy__(self, memo):
"""Deep copy a TimeArray
"""
time = self.create(val=self.val.copy(), fmt=self.fmt, scale=self.scale, _jd1=self.jd1, _jd2=self.jd2)
memo[id(time)] = time
return time
# Override numpys copy method # Might not be needed for numpy 1.16 or higher
copy = __copy__
def __getitem__(self, item):
"""Update _jd*_sliced with correct shape, used by __array_finalize__"""
fmt_ndim = self._formats()[self.fmt].ndim
if isinstance(item, tuple) and fmt_ndim > 1 and len(item) > 1:
# Only use item row to slice jds
super().__setattr__("_jd1_sliced", self.jd1[item[-1]])
super().__setattr__("_jd2_sliced", self.jd2[item[-1]])
else:
if isinstance(self.jd1, np.ndarray):
super().__setattr__("_jd1_sliced", self.jd1[item])
if isinstance(self.jd2, np.ndarray):
super().__setattr__("_jd2_sliced", self.jd2[item])
if isinstance(item, (int, np.int_)):
return self._scales()[self.scale].from_jds(self._jd1_sliced, self._jd2_sliced, self.fmt)
return super().__getitem__(item)
@classmethod
def _read(cls, h5_group, memo):
scale = h5_group.attrs["scale"]
fmt = h5_group.attrs["fmt"]
jd1 = h5_group["jd1"][...]
jd2 = h5_group["jd2"][...]
time = cls._cls_scale(scale).from_jds(jd1, jd2, fmt)
memo[f"{h5_group.attrs['fieldname']}"] = time
return time
def _write(self, h5_group, memo):
h5_group.attrs["scale"] = self.scale
h5_group.attrs["fmt"] = self.fmt
h5_field = h5_group.create_dataset("jd1", self.jd1.shape, dtype=self.jd1.dtype)
h5_field[...] = self.jd1
h5_field = h5_group.create_dataset("jd2", self.shape, dtype=self.jd2.dtype)
h5_field[...] = self.jd2
def __dir__(self):
"""List all fields and attributes on the Time array"""
return super().__dir__() + list(self._scales()) + list(self._formats())
def __repr__(self):
cls_name = type(self).__name__
repr_str = super().__repr__()
return repr_str.replace(f"{cls_name}(", f"{cls_name}(scale={self.scale!r}, fmt={self.fmt!r}, ")
#
# The main Time class
#
class TimeArray(TimeBase):
"""Base class for time objects. Is immutable to allow the data to be hashable"""
cls_name = "TimeArray"
type = "time"
_SCALES.setdefault(cls_name, dict())
_unit = None
@classmethod
def now(cls, scale="utc", fmt="datetime") -> "TimeArray":
"""Create a new time representing now"""
jd1, jd2 = cls._formats()["datetime"].to_jds(datetime.now(), scale=scale)
return cls._cls_scale("utc").from_jds(jd1, jd2, fmt=fmt).to_scale(scale)
@classmethod
def empty_from(cls, other: "TimeArray") -> "TimeArray":
"""Create a new time of the same type as other but with empty(datetime.min) values
"""
return _SCALES[other.scale](np.full(other.shape, fill_value=datetime.min), fmt="datetime")
@classmethod
def _formats(cls):
return _FORMATS["TimeFormat"]
@property
@Unit.register(("year",))
@lru_cache()
def year(self):
if isinstance(self.datetime, datetime):
return self.datetime.year
return np.array([d.year for d in self.datetime])
@property
@lru_cache()
@Unit.register(("month",))
def month(self):
if isinstance(self.datetime, datetime):
return self.datetime.month
return np.array([d.month for d in self.datetime])
@property
@lru_cache()
@Unit.register(("day",))
def day(self):
if isinstance(self.datetime, datetime):
return self.datetime.day
return np.array([d.day for d in self.datetime])
@property
@lru_cache()
@Unit.register(("hour",))
def hour(self):
if isinstance(self.datetime, datetime):
return self.datetime.hour
return np.array([d.hour for d in self.datetime])
@property
@lru_cache()
@Unit.register(("minute",))
def minute(self):
if isinstance(self.datetime, datetime):
return self.datetime.minute
return np.array([d.minute for d in self.datetime])
@property
@lru_cache()
@Unit.register(("second",))
def second(self):
if isinstance(self.datetime, datetime):
return self.datetime.second
return np.array([d.second for d in self.datetime])
@property
@lru_cache()
@Unit.register(("day",))
def doy(self):
if isinstance(self.datetime, datetime):
return self.datetime.timetuple().tm_yday
return np.array([d.timetuple().tm_yday for d in self.datetime])
@property
@lru_cache()
@Unit.register(("second",))
def sec_of_day(self):
"""Seconds since midnight
Note - Does not support leap seconds
Returns:
Seconds since midnight
"""
if isinstance(self.datetime, datetime):
return self.datetime.hour * 60 * 60 + self.datetime.minute * 60 + self.datetime.second
return np.array([d.hour * 60 * 60 + d.minute * 60 + d.second for d in self.datetime])
@property
@lru_cache()
def mean(self):
"""Mean time
Returns:
Time: Time object containing the mean time
"""
if self.size == 1:
return self
return self._cls_scale(self.scale)(np.mean(self.utc.jd), fmt="jd")
@property
@lru_cache()
def min(self):
return self[np.argmin(self.jd)]
@property
@lru_cache()
def max(self):
return self[np.argmax(self.jd)]
@property
@lru_cache()
def jd_int(self):
"""Integer part of Julian Day
To ensure consistency, we therefore add two properties `jd_int` and `jd_frac` where the integer part is
guaranteed to be a "half-integer" (e.g. 2457617.5) and the fractional part is guaranteed to be a float in the
range [0., 1.). The parts are calculated from `jd1` and `jd2` to preserve precision.
Returns:
Numpy-float scalar or array with (half-)integer part of Julian Day.
"""
return self.jd1 - self._jd_delta
@property
@lru_cache()
def jd_frac(self):
"""Fractional part of Julian Day
See the docstring of `jd_int` for more information.
Returns:
Numpy-float scalar or array with fractional part of Julian Day, in the range [0., 1.).
"""
return self.jd2 + self._jd_delta
@property
@lru_cache()
def _jd_delta(self):
"""Delta between jd1 and jd_int
This is a helper function used by `jd_int` and `jd_frac` to find the difference to `jd1` and `jd2`
respectively. See the docstring of `jd_int` for more information.
Returns:
Numpy-float scalar or array with difference between `jd1` and the integer part of Julian Day.
"""
return self.jd1 - (np.floor(self.jd - 0.5) + 0.5)
@property
@lru_cache()
def mjd_int(self):
"""Integer part of Modified Julian Day
In general, we have that MJD = JD - 2400000.5. See the docstring of `jd_int` for more information.
Returns:
Numpy-float scalar or array with the integer part of Modified Julian Day.
"""
return self.jd_int - 2_400_000.5
@property
@lru_cache()
def mjd_frac(self):
"""Fractional part of Modified Julian Day
See the docstring of `jd_int` for more information. The way we have defined `jd_int` and `jd_frac` means that
`mjd_frac` will be equal to `jd_frac`.
Returns:
Numpy-float scalar or array with the fractional part of Modified Julian Day, in the range [0., 1.).
"""
return self.jd_frac
def __add__(self, other):
"""self + other"""
if self.scale != other.scale:
return NotImplemented
if isinstance(other, TimeDeltaArray):
# time + timedelta
jd2 = self.jd2 + other.days
return self.from_jds(self.jd1, jd2, self.fmt)
elif isinstance(other, TimeArray):
# time1 + time2 does not make sense
return NotImplemented
return NotImplemented
def __sub__(self, other):
"""self - other"""
if self.scale != other.scale:
return NotImplemented
if isinstance(other, TimeDeltaArray):
# time - timedelta -> time
jd1 = self.jd1 - other.jd1
jd2 = self.jd2 - other.jd2
return self.from_jds(self.jd1, jd2, self.fmt)
elif isinstance(other, TimeArray):
# time - time -> timedelta
jd1 = self.jd1 - other.jd1
jd2 = self.jd2 - other.jd2
fmt = "timedelta" if self.fmt == other.fmt == "datetime" else "jd"
return _SCALES["TimeDeltaArray"][self.scale].from_jds(jd1, jd2, fmt)
return NotImplemented
# Turn off remaining arithmetic operations
def __rsub__(self, _):
""" other - self"""
return NotImplemented
def __radd__(self, _):
"""other + self"""
return NotImplemented
def __iadd__(self, _):
"""Immutable object does not support this operation"""
return NotImplemented
def __isub__(self, _):
"""Immutable object does not support this operation"""
return NotImplemented
class TimeDeltaArray(TimeBase):
"""Base class for time delta objects. Is immutable to allow the data to be hashable"""
cls_name = "TimeDeltaArray"
type = "time_delta"
_SCALES.setdefault(cls_name, dict())
_unit = None
@classmethod
def empty_from(cls, other: "TimeDeltaArray") -> "TimeDeltaArray":
"""Create a new time of the same type as other but with empty(datetime.min) values
"""
return _SCALES[other.scale](np.full(other.shape, fill_value=timedelta(seconds=0)), fmt="timedelta")
@lru_cache()
def plot_fields(self):
"""Returns list of attributes that can be plotted"""
obj = self if len(self) == 1 else self[0]
scales_and_formats = []
try:
# Add scale
scales_and_formats.append(obj.scale)
fmt_cls = obj.cls_name.replace("Array", "Format")
for fmt in _FORMATS.get(fmt_cls, {}):
# Add system fields
try:
fmt_time = getattr(obj, fmt)
if isinstance(fmt_time, np.ndarray) and fmt_time.dtype.type is np.str_:
# Skip string formats
continue
if isinstance(fmt_time, str):
# Skip string formats
continue
scales_and_formats.append(f"{obj.scale}.{fmt}")
except ValueError:
pass # Skip formats that are invalid for that scale
except exceptions.UnknownConversionError:
pass # Skip systems that cannot be converted to
return scales_and_formats
@classmethod
def _formats(cls):
return _FORMATS["TimeDeltaFormat"]
def __add__(self, other):
"""self + other """
if self.scale != other.scale:
return NotImplemented
if isinstance(other, TimeDeltaArray):
# timedelta + timedelta -> timedelta
jd1 = self.jd1 + other.jd2
jd2 = self.jd1 + other.jd2
return self.from_jds(jd1, jd2, fmt=self.fmt)
elif isinstance(other, TimeArray):
# timedelta + time -> time
jd1 = self.jd1 + other.jd1
jd2 = self.jd2 + other.jd2
return other.from_jds(jd1, jd2, fmt=other.fmt)
return NotImplemented
def __sub__(self, other):
"""self - other"""
if self.scale != other.scale:
return NotImplemented
if isinstance(other, TimeArray):
# timedelta - time -> time
jd1 = self.jd1 - other.jd1
jd2 = self.jd2 - other.jd2
return other.from_jds(jd1, jd2, fmt=other.fmt)
elif isinstance(other, TimeDeltaArray):
# timedelta - timedelta -> timedelta
jd1 = self.jd1 - other.jd1
jd2 = self.jd1 - other.jd2
return self.from_jds(jd1, jd2, fmt=self.fmt)
return NotImplemented
# Turn off remaining arithmetic operations
def __radd__(self, _):
"""other - self"""
return NotImplemented
def __rsub__(self, _):
"""other - self"""
return NotImplemented
def __iadd__(self, _):
"""Immutable object does not support this operation"""
return NotImplemented
def __isub__(self, _):
"""Immutable object does not support this operation"""
return NotImplemented
#######################################################################################################################
# Time scales
#######################################################################################################################
# Time deltas
def delta_tai_utc(time: "TimeArray") -> "np_float":
try:
idx = [np.argmax(np.logical_and(t.jd >= _TAIUTC["start"], t.jd < _TAIUTC["end"])) for t in time]
except TypeError:
idx = np.argmax(np.logical_and(time.jd >= _TAIUTC["start"], time.jd < _TAIUTC["end"]))
delta = _TAIUTC["offset"][idx] + (time.mjd - _TAIUTC["ref_epoch"][idx]) * _TAIUTC["factor"][idx]
if time.scale == "utc":
return delta * Unit.seconds2day
else:
# time.scale is tai
tmp_utc_jd = time.tai.jd - delta * Unit.seconds2day
tmp_utc_mjd = time.tai.mjd - delta * Unit.seconds2day
try:
idx = [np.argmax(np.logical_and(t >= _TAIUTC["start"], t < _TAIUTC["end"])) for t in tmp_utc_jd]
except TypeError:
idx = np.argmax(np.logical_and(tmp_utc_jd >= _TAIUTC["start"], tmp_utc_jd < _TAIUTC["end"]))
delta = _TAIUTC["offset"][idx] + (tmp_utc_mjd - _TAIUTC["ref_epoch"][idx]) * _TAIUTC["factor"][idx]
return -delta * Unit.seconds2day
def delta_tai_tt(time: "TimeArray") -> "np_float":
delta = 32.184 * Unit.seconds2day
if time.scale == "tt":
return -delta
else:
# time.scale is tai
return delta
def delta_tcg_tt(time: "TimeArray") -> "np_float":
dt = time.jd1 - constant.T_0_jd1 + time.jd2 - constant.T_0_jd2
if time.scale == "tt":
return constant.L_G / (1 - constant.L_G) * dt
else:
# time.scale is tcg
return -constant.L_G * dt
def delta_gps_tai(time: "TimeArray") -> "np_float":
delta = 19 * Unit.seconds2day
if time.scale == "gps":
return delta
else:
# time.scale is tai
return -delta
#
# Time scale conversions
#
def _utc2tai(utc: "TimeArray") -> ("np_float", "np_float"):
"""Convert UTC to TAI"""
return utc.jd1, utc.jd2 + delta_tai_utc(utc)
def _tai2utc(tai: "TimeArray") -> ("np_float", "np_float"):
"""Convert TAI to UTC"""
return tai.jd1, tai.jd2 + delta_tai_utc(tai)
def _tai2tt(tai: "TimeArray") -> ("np_float", "np_float"):
"""Convert TAI to UTC"""
return tai.jd1, tai.jd2 + delta_tai_tt(tai)
def _tt2tai(tt: "TimeArray") -> ("np_float", "np_float"):
"""Convert TT to TAI"""
return tt.jd1, tt.jd2 + delta_tai_tt(tt)
def _tt2tcg(tt: "TimeArray") -> ("np_float", "np_float"):
"""Convert TT to TCG"""
return tt.jd1, tt.jd2 + delta_tcg_tt(tt)
def _tcg2tt(tcg: "TimeArray") -> ("np_float", "np_float"):
"""Convert TCG to TT"""
return tcg.jd1, tcg.jd2 + delta_tcg_tt(tcg)
def _gps2tai(gps: "TimeArray") -> ("np_float", "np_float"):
"""Convert GPS to TAI"""
return gps.jd1, gps.jd2 + delta_gps_tai(gps)
def _tai2gps(tai: "TimeArray") -> ("np_float", "np_float"):
"""Convert TAI to GPS"""
return tai.jd1, tai.jd2 + delta_gps_tai(tai)
#
# Time scales
#
@register_scale(convert_to=dict(tai=_utc2tai))
class UtcTime(TimeArray):
scale = "utc"
@register_scale(convert_to=dict(utc=_tai2utc, tt=_tai2tt, gps=_tai2gps))
class TaiTime(TimeArray):
scale = "tai"
@register_scale(convert_to=dict(tt=_tcg2tt))
class TcgTime(TimeArray):
scale = "tcg"
@register_scale(convert_to=dict(tai=_gps2tai))
class GpsTime(TimeArray):
scale = "gps"
@register_scale(convert_to=dict(tai=_tt2tai, tcg=_tt2tcg))
class TtTime(TimeArray):
scale = "tt"
#
# Time Delta scales
#
@register_scale(convert_to=dict())
class UtcTimeDelta(TimeDeltaArray):
scale = "utc"
@register_scale(convert_to=dict())
class TaiTimeDelta(TimeDeltaArray):
scale = "tai"
@register_scale(convert_to=dict())
class TcgTimeDelta(TimeDeltaArray):
scale = "tcg"
@register_scale(convert_to=dict())
class GpsTimeDelta(TimeDeltaArray):
scale = "gps"
@register_scale(convert_to=dict())
class TtTimeDelta(TimeDeltaArray):
scale = "tt"
######################################################################################################################
# Formats
######################################################################################################################
#
# Time formats
#
class TimeFormat:
cls_name = "TimeFormat"
_FORMATS.setdefault(cls_name, dict())
_FORMAT_UNITS.setdefault(cls_name, dict())
fmt = None
unit = None
ndim = 1
day2seconds = Unit.day2seconds
week2days = Unit.week2days
def __init__(self, val, val2=None, scale=None):
"""Convert val and val2 to Julian days"""
self.scale = scale
if val is None:
self.jd1 = None
self.jd2 = None
elif np.asarray(val).size == 0 and np.asarray(val).ndim == 1: # Empty array
self.jd1 = np.array([])
self.jd2 = np.array([])
else:
self.jd1, self.jd2 = self.to_jds(val, val2=val2, scale=scale)
@classmethod
def to_jds(cls, val, val2=None, scale=None):
"""Convert val and val2 to Julian days and set the .jd1 and .jd2 attributes"""
if val is None and val2 is None:
return None, None
return cls._to_jds(val, val2, scale)
@classmethod
def _to_jds(cls, val, val2, scale):
"""Convert val and val2 to Julian days and set the .jd1 and .jd2 attributes"""
raise NotImplementedError
@classmethod
def from_jds(cls, jd1, jd2, scale):
"""Convert Julian days to the right format"""
if jd1 is None and jd2 is None:
return None
return cls._from_jds(jd1, jd2, scale)
@classmethod
def _from_jds(cls, jd1, jd2, scale):
"""Convert Julian days to the right format"""
raise NotImplementedError
@property
def value(self):
"""Convert Julian days to the right format"""
if self.jd1 is None and self.jd1 is None:
return None
return self.from_jds(self.jd1, self.jd2, self.scale)
class TimeDeltaFormat(TimeFormat):
"""Base class for Time Delta formats"""
cls_name = "TimeDeltaFormat"
_FORMATS.setdefault(cls_name, dict())
_FORMAT_UNITS.setdefault(cls_name, dict())
@register_format
class TimeJD(TimeFormat):
fmt = "jd"
unit = ("day",)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
val = np.asarray(val)
_delta = val - (np.floor(val + val2 - 0.5) + 0.5)
jd1 = val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return jd1 + jd2
@register_format
class TimeMJD(TimeFormat):
"""Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
fmt = "mjd"
unit = ("day",)
_mjd0 = 2_400_000.5
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
val = np.asarray(val)
_delta = val - (np.floor(val + val2 - 0.5) + 0.5)
jd1 = cls._mjd0 + val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return jd1 - cls._mjd0 + jd2
@register_format
class TimeDateTime(TimeFormat):
fmt = "datetime"
unit = None
_jd2000 = 2_451_544.5
_dt2000 = datetime(2000, 1, 1)
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
try:
if val2 is not None:
val = np.asarray(val) + np.asarray(val2)
return np.array([cls._dt2jd(dt) for dt in val]).T
except TypeError:
if val2 is not None:
val = val + val2
return cls._dt2jd(val)
@classmethod
@lru_cache()
def _dt2jd(cls, dt):
"""Convert one datetime to one Julian date pair"""
delta = dt - cls._dt2000
jd1 = cls._jd2000 + delta.days
delta -= timedelta(days=delta.days)
jd2 = delta.total_seconds() / cls.day2seconds
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._jd2dt(j1, j2) for j1, j2 in zip(jd1, jd2)])
except TypeError:
return cls._jd2dt(jd1, jd2)
@classmethod
@lru_cache()
def _jd2dt(cls, jd1, jd2):
"""Convert one Julian date to a datetime"""
return cls._dt2000 + timedelta(days=jd1 - cls._jd2000) + timedelta(days=jd2)
# @register_format
# class TimePlotDate(TimeFormat):
# """Matplotlib date format
#
# Matplotlib represents dates using floating point numbers specifying the number
# of days since 0001-01-01 UTC, plus 1. For example, 0001-01-01, 06:00 is 1.25,
# not 0.25. Values < 1, i.e. dates before 0001-01-01 UTC are not supported.
#
# Warning: This requires matplotlib version 3.2.2 or lower
# """
#
# fmt = "plot_date"
# unit = None
# _jd0001 = 1721424.5 # julian day 2001-01-01 minus 1
#
# def __init__(self, val, val2=None, scale=None):
# """Convert val and val2 to Julian days"""
# print(f"Warning: TimeFormat {self.fmt} is deprecated and requires matplotlib version 3.2.2 or lower. Will be removed in future versions.")
# super().__init__(val, val2, scale)
#
# @classmethod
# def _to_jds(cls, val, val2=None, scale=None):
# print(f"Warning: TimeFormat {cls.fmt} is deprecated and requires matplotlib version 3.2.2 or lower. Will be removed in future versions.")
# if val2 is None:
# try:
# val2 = np.zeros(val.shape)
# except AttributeError:
# val2 = 0
#
# _delta = val - (np.floor(val + val2 - 0.5) + 0.5)
# jd1 = cls._jd0001 + val - _delta
# jd2 = val2 + _delta
# return jd1, jd2
#
# @classmethod
# def _from_jds(cls, jd1, jd2, scale=None):
# print(f"Warning: TimeFormat {cls.fmt} is deprecated and requires matplotlib version 3.2.2 or lower. Will be removed in future versions.")
# return jd1 - cls._jd0001 + jd2
@register_format
class TimeGPSWeekSec(TimeFormat):
"""GPS weeks and seconds."""
fmt = "gps_ws"
unit = ("week", "second")
_jd19800106 = 2_444_244.5
WeekSec = namedtuple("week_sec", ["week", "seconds", "day"])
ndim = len(WeekSec._fields)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if scale != "gps":
raise ValueError(f"Format {cls.fmt} is only available for time scale gps")
if isinstance(val, cls.WeekSec):
week = np.asarray(val.week)
sec = np.asarray(val.seconds)
elif val2 is None:
raise ValueError(f"val2 should be seconds (not {val2}) for format {cls.fmt}")
else:
week = np.asarray(val)
sec = np.asarray(val2)
# Determine GPS day
wd = np.floor((sec + 0.5 * cls.day2seconds) / cls.day2seconds) # 0.5 d = 43200.0 s
# Determine remainder
fracSec = sec + 0.5 * cls.day2seconds - wd * cls.day2seconds
# Conversion GPS week and day to from Julian Date (JD)
jd_day = week * Unit.week2days + wd + cls._jd19800106 - 0.5
jd_frac = fracSec / cls.day2seconds
return jd_day, jd_frac
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
if scale != "gps":
raise ValueError(f"Format {cls.fmt} is only available for time scale gps")
if np.any(jd1 + jd2 < cls._jd19800106):
raise ValueError(f"Julian Day exceeds the GPS time start date of 6-Jan-1980 (JD {cls._jd19800106})")
# See Time.jd_int for explanation
_delta = jd1 - (np.floor(jd1 + jd2 - 0.5) + 0.5)
jd_int = jd1 - _delta
jd_frac = jd2 + _delta
# .. Conversion from Julian Date (JD) to GPS week and day
wwww = np.floor((jd_int - cls._jd19800106) / cls.week2days)
wd = np.floor(jd_int - cls._jd19800106 - wwww * cls.week2days)
gpssec = (jd_frac + wd) * cls.day2seconds
return cls.WeekSec(wwww, gpssec, wd)
@register_format
class TimeGPSSec(TimeFormat):
"""Number of seconds since the GPS epoch 1980-01-06 00:00:00 UTC."""
fmt = "gps_seconds"
unit = "second"
_jd19800106 = 2_444_244.5
@classmethod
def _to_jds(cls, val, val2, scale=None):
if scale != "gps":
raise ValueError(f"Format {cls.fmt} is only available for time scale gps")
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {cls.fmt}")
days = np.asarray(val) * Unit.second2day
days_int = np.floor(days)
days_frac = days - days_int
return cls._jd19800106 + days_int, days_frac
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
if scale != "gps":
raise ValueError(f"Format {cls.fmt} is only available for time scale gps")
if np.any(jd1 + jd2 < cls._jd19800106):
raise ValueError(f"Julian Day exceeds the GPS time start date of 6-Jan-1980 (JD {cls._jd19800106})")
# See Time.jd_int for explanation
_delta = jd1 - (np.floor(jd1 + jd2 - 0.5) + 0.5)
days_int = jd1 - _delta - cls._jd19800106
days_frac = jd2 + _delta
return (days_int + days_frac) * Unit.day2second
@register_format
class TimeJulianYear(TimeFormat):
""" Time as year with decimal number. (Ex: 2000.0). Fixed year length."""
fmt = "jyear"
unit = ("julian_year",)
_jd2000 = 2_451_545.0
_j2000 = 2000
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
"""Based on epj2jd.for from SOFA library"""
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {fmt}")
int_part, fraction = np.divmod((val - cls._j2000) * Unit.julian_year2day, 1)
return cls._jd2000 + int_part, fraction
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
"""Based on epj.for from SOFA library"""
return cls._j2000 + ((jd1 - cls._jd2000) + jd2) * Unit.day2julian_year
@register_format
class TimeDecimalYear(TimeFormat):
"""Time as year with decimal number. (Ex: 2000.0). Variable year length."""
fmt = "decimalyear"
unit = None # Year length is variable so this does not make sense to apply one value
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {fmt}")
if scale is None:
raise ValueError(f"scale must be defined for format {fmt}")
try:
return np.array([cls._dy2jd(t, scale) for t in val]).T
except TypeError:
return cls._dy2jd(val, scale)
@classmethod
@lru_cache()
def _dy2jd(cls, decimalyear, scale):
year_int = int(decimalyear)
year_frac = decimalyear - year_int
t_start_of_year = TimeArray.create(datetime(year_int, 1, 1), scale=scale, fmt="datetime")
days = year_frac * cls._year2days(year_int, scale)
jd = t_start_of_year.jd1 + days # t_start.jd2 is zero for start of year
jd1 = int(jd)
jd2 = jd - jd1
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._jd2dy(j1, j2, scale) for j1, j2 in zip(jd1, jd2)]).T
except TypeError:
return cls._jd2dy(jd1, jd2, scale)
@classmethod
@lru_cache()
def _jd2dy(cls, jd1, jd2, scale):
year = TimeDateTime._jd2dt(jd1, jd2).year
t_start_of_year = TimeArray.create(datetime(year, 1, 1), scale=scale, fmt="datetime")
year2days = cls._year2days(year, scale)
days = jd1 - t_start_of_year.jd1 + jd2 # t_start.jd2 is zero for start of year
decimalyear = year + days / year2days
return decimalyear
@classmethod
@lru_cache()
def _year2days(cls, year, scale):
"""Computes number of days in year, including leap seconds"""
t_start = TimeArray.create(datetime(year, 1, 1), scale=scale, fmt="datetime")
t_end = TimeArray.create(datetime(year + 1, 1, 1), scale=scale, fmt="datetime")
if scale == "utc":
# Account for leap seconds in UTC by differencing one TAI year
t_start = getattr(t_start, "tai")
t_end = getattr(t_end, "tai")
return (t_end - t_start).days
@register_format
class TimeYyDddSssss(TimeFormat):
""" Time as 2 digit year, doy and second of day.
Text based format "yy:ddd:sssss"
yy - decimal year without century
ddd - zero padded decimal day of year
sssss - zero padded seconds since midnight
Note - Does not support leap seconds
Returns:
Time converted to yydddssss format
"""
fmt = "yydddsssss"
unit = None
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {fmt}")
try:
return np.array([cls._yds2jd(v) for v in val])
except TypeError:
cls._yds2jd(val)
@classmethod
@lru_cache()
def _yds2jd(cls, val):
return datetime.strptime(val[:7], "%y:%j:") + timedelta(sec=float(val[7:]))
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._jd2yds(j1, j2) for j1, j2 in zip(jd1, jd2)])
except TypeError:
return cls._jd2yds(jd1, jd2)
@classmethod
@lru_cache()
def _jd2yds(cls, jd1, jd2):
dt = TimeDateTime._jd2dt(jd1, jd2)
delta = (dt - datetime(dt.year, dt.month, dt.day)).seconds
return dt.strftime("%y:%j:") + str(delta).zfill(5)
@register_format
class TimeYyyyDddSssss(TimeFormat):
""" Time as 4-digit year, doy and second of day.
Text based format "yyyy:ddd:sssss"
yyyy - decimal year with century
ddd - zero padded decimal day of year
sssss - zero padded seconds since midnight
Note - Does not support leap seconds
Returns:
Time converted to yydddssss format
"""
fmt = "yyyydddsssss"
unit = None
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {fmt}")
try:
return np.array([cls._yds2jd(v) for v in val])
except TypeError:
cls._yds2jd(val)
@classmethod
@lru_cache()
def _yds2jd(cls, val):
return datetime.strptime(val[:9], "%Y:%j:") + timedelta(sec=float(val[9:]))
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._jd2yds(j1, j2) for j1, j2 in zip(jd1, jd2)])
except TypeError:
return cls._jd2yds(jd1, jd2)
@classmethod
@lru_cache()
def _jd2yds(cls, jd1, jd2):
dt = TimeDateTime._jd2dt(jd1, jd2)
delta = (dt - datetime(dt.year, dt.month, dt.day)).seconds
return dt.strftime("%Y:%j:") + str(delta).zfill(5)
# Text based time formats
class TimeStr(TimeFormat):
""" Base class for text based time. """
unit = None
_dt_fmt = None
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {cls.fmt}")
if isinstance(val, str):
return TimeDateTime._dt2jd(cls._str2dt(val))
else:
return np.array([TimeDateTime._dt2jd(cls._str2dt(isot)) for isot in val]).T
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._dt2str(TimeDateTime._jd2dt(j1, j2)) for j1, j2 in zip(jd1, jd2)])
except TypeError:
return cls._dt2str(TimeDateTime._jd2dt(jd1, jd2))
@classmethod
@lru_cache()
def _dt2str(cls, dt):
return dt.strftime(cls._dt_fmt)
@classmethod
@lru_cache()
def _str2dt(cls, time_str):
# fractional parts are optional
main_str, _, fraction = time_str.partition(".")
if fraction and set(fraction) != "0":
# Truncate fraction to 6 digits due to limits of datetime
frac = float(f"0.{fraction}")
fraction = f"{frac:8.6f}"[2:]
time_str = f"{main_str}.{fraction}"
return datetime.strptime(time_str, cls._dt_fmt)
else:
fmt_str, _, _ = cls._dt_fmt.partition(".")
return datetime.strptime(main_str, fmt_str)
@register_format
class TimeIsot(TimeStr):
"""ISO 8601 compliant date-time format “YYYY-MM-DDTHH:MM:SS.sss…” """
fmt = "isot"
_dt_fmt = "%Y-%m-%dT%H:%M:%S.%f"
@register_format
class TimeIso(TimeStr):
"""ISO 8601 compliant date-time format “YYYY-MM-DD HH:MM:SS.sss…” without the T"""
fmt = "iso"
_dt_fmt = "%Y-%m-%d %H:%M:%S.%f"
@register_format
class TimeYearDoy(TimeStr):
fmt = "yday"
_dt_fmt = "%Y:%j:%H:%M:%S.%f"
@register_format
class TimeDate(TimeStr):
fmt = "date"
_dt_fmt = "%Y-%m-%d"
# Time Delta Formats
@register_format
class TimeDeltaJD(TimeDeltaFormat):
"""Time delta as Julian days"""
fmt = "jd"
unit = ("day",)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
_delta = val - (np.floor(val + val2))
jd1 = val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return jd1 + jd2
@register_format
class TimeDeltaSec(TimeDeltaFormat):
"""Time delta in seconds"""
fmt = "seconds"
unit = ("second",)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
val *= Unit.second2day
val2 *= Unit.second2day
_delta = val - (np.floor(val + val2))
jd1 = val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return (jd1 + jd2) * Unit.day2second
@register_format
class TimeDeltaDay(TimeDeltaFormat):
"""Time delta in days"""
fmt = "days"
unit = ("day",)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
_delta = val - (np.floor(val + val2))
jd1 = val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return jd1 + jd2
@register_format
class TimeDeltaDateTime(TimeDeltaFormat):
"""Time delta as datetime's timedelta"""
fmt = "timedelta"
unit = None
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = [timedelta(seconds=0)] * len(val)
except TypeError:
val2 = timedelta(seconds=0)
try:
days = (val + val2).total_seconds() * Unit.second2day
except AttributeError:
seconds = [v1.total_seconds() + v2.total_seconds() for v1, v2 in zip(val, val2)]
days = np.array(seconds) * Unit.second2day
jd1 = np.floor(days)
jd2 = days - jd1
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return timedelta(days=jd1 + jd2)
except TypeError:
return np.array([timedelta(days=j1 + j2) for j1, j2 in zip(jd1, jd2)])
#######################################################################################################################
# Execute on import
#######################################################################################################################
_TAIUTC = read_tai_utc()
|
python
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
p = list(map(int, input().split()))
from collections import deque
_min = 0
used = set()
num = deque(range(1, 200000 + 2))
for v in p:
used.add(v)
if _min not in used:
print(_min)
else:
while num:
candidate = num.popleft()
if candidate not in used:
_min = candidate
break
print(_min)
|
python
|
class Solution:
def plusOne(self, digits):
length = len(digits)
for i in range(length - 1, -1, -1):
if digits[i] < 9:
digits[i] += 1
return digits
digits[i] = 0
return [1] + [0] * length
|
python
|
# coding: utf-8
# **Appendix D – Autodiff**
# _This notebook contains toy implementations of various autodiff techniques, to explain how they works._
# # Setup
# First, let's make sure this notebook works well in both python 2 and 3:
# In[1]:
# To support both python 2 and python 3
from __future__ import absolute_import, division, print_function, unicode_literals
# # Introduction
# Suppose we want to compute the gradients of the function $f(x,y)=x^2y + y + 2$ with regards to the parameters x and y:
# In[2]:
def f(x,y):
return x*x*y + y + 2
# One approach is to solve this analytically:
#
# $\dfrac{\partial f}{\partial x} = 2xy$
#
# $\dfrac{\partial f}{\partial y} = x^2 + 1$
# In[3]:
def df(x,y):
return 2*x*y, x*x + 1
# So for example $\dfrac{\partial f}{\partial x}(3,4) = 24$ and $\dfrac{\partial f}{\partial y}(3,4) = 10$.
# In[4]:
df(3, 4)
# Perfect! We can also find the equations for the second order derivatives (also called Hessians):
#
# $\dfrac{\partial^2 f}{\partial x \partial x} = \dfrac{\partial (2xy)}{\partial x} = 2y$
#
# $\dfrac{\partial^2 f}{\partial x \partial y} = \dfrac{\partial (2xy)}{\partial y} = 2x$
#
# $\dfrac{\partial^2 f}{\partial y \partial x} = \dfrac{\partial (x^2 + 1)}{\partial x} = 2x$
#
# $\dfrac{\partial^2 f}{\partial y \partial y} = \dfrac{\partial (x^2 + 1)}{\partial y} = 0$
# At x=3 and y=4, these Hessians are respectively 8, 6, 6, 0. Let's use the equations above to compute them:
# In[5]:
def d2f(x, y):
return [2*y, 2*x], [2*x, 0]
# In[6]:
d2f(3, 4)
# Perfect, but this requires some mathematical work. It is not too hard in this case, but for a deep neural network, it is pratically impossible to compute the derivatives this way. So let's look at various ways to automate this!
# # Numeric differentiation
# Here, we compute an approxiation of the gradients using the equation: $\dfrac{\partial f}{\partial x} = \displaystyle{\lim_{\epsilon \to 0}}\dfrac{f(x+\epsilon, y) - f(x, y)}{\epsilon}$ (and there is a similar definition for $\dfrac{\partial f}{\partial y}$).
# In[7]:
def gradients(func, vars_list, eps=0.0001):
partial_derivatives = []
base_func_eval = func(*vars_list)
for idx in range(len(vars_list)):
tweaked_vars = vars_list[:]
tweaked_vars[idx] += eps
tweaked_func_eval = func(*tweaked_vars)
derivative = (tweaked_func_eval - base_func_eval) / eps
partial_derivatives.append(derivative)
return partial_derivatives
# In[8]:
def df(x, y):
return gradients(f, [x, y])
# In[9]:
df(3, 4)
# It works well!
# The good news is that it is pretty easy to compute the Hessians. First let's create functions that compute the first order derivatives (also called Jacobians):
# In[10]:
def dfdx(x, y):
return gradients(f, [x,y])[0]
def dfdy(x, y):
return gradients(f, [x,y])[1]
dfdx(3., 4.), dfdy(3., 4.)
# Now we can simply apply the `gradients()` function to these functions:
# In[11]:
def d2f(x, y):
return [gradients(dfdx, [3., 4.]), gradients(dfdy, [3., 4.])]
# In[12]:
d2f(3, 4)
# So everything works well, but the result is approximate, and computing the gradients of a function with regards to $n$ variables requires calling that function $n$ times. In deep neural nets, there are often thousands of parameters to tweak using gradient descent (which requires computing the gradients of the loss function with regards to each of these parameters), so this approach would be much too slow.
# ## Implementing a Toy Computation Graph
# Rather than this numerical approach, let's implement some symbolic autodiff techniques. For this, we will need to define classes to represent constants, variables and operations.
# In[13]:
class Const(object):
def __init__(self, value):
self.value = value
def evaluate(self):
return self.value
def __str__(self):
return str(self.value)
class Var(object):
def __init__(self, name, init_value=0):
self.value = init_value
self.name = name
def evaluate(self):
return self.value
def __str__(self):
return self.name
class BinaryOperator(object):
def __init__(self, a, b):
self.a = a
self.b = b
class Add(BinaryOperator):
def evaluate(self):
return self.a.evaluate() + self.b.evaluate()
def __str__(self):
return "{} + {}".format(self.a, self.b)
class Mul(BinaryOperator):
def evaluate(self):
return self.a.evaluate() * self.b.evaluate()
def __str__(self):
return "({}) * ({})".format(self.a, self.b)
# Good, now we can build a computation graph to represent the function $f$:
# In[14]:
x = Var("x")
y = Var("y")
f = Add(Mul(Mul(x, x), y), Add(y, Const(2))) # f(x,y) = x²y + y + 2
# And we can run this graph to compute $f$ at any point, for example $f(3, 4)$.
# In[15]:
x.value = 3
y.value = 4
f.evaluate()
# Perfect, it found the ultimate answer.
# ## Computing gradients
# The autodiff methods we will present below are all based on the *chain rule*.
# Suppose we have two functions $u$ and $v$, and we apply them sequentially to some input $x$, and we get the result $z$. So we have $z = v(u(x))$, which we can rewrite as $z = v(s)$ and $s = u(x)$. Now we can apply the chain rule to get the partial derivative of the output $z$ with regards to the input $x$:
#
# $ \dfrac{\partial z}{\partial x} = \dfrac{\partial s}{\partial x} \cdot \dfrac{\partial z}{\partial s}$
# Now if $z$ is the output of a sequence of functions which have intermediate outputs $s_1, s_2, ..., s_n$, the chain rule still applies:
#
# $ \dfrac{\partial z}{\partial x} = \dfrac{\partial s_1}{\partial x} \cdot \dfrac{\partial s_2}{\partial s_1} \cdot \dfrac{\partial s_3}{\partial s_2} \cdot \dots \cdot \dfrac{\partial s_{n-1}}{\partial s_{n-2}} \cdot \dfrac{\partial s_n}{\partial s_{n-1}} \cdot \dfrac{\partial z}{\partial s_n}$
# In forward mode autodiff, the algorithm computes these terms "forward" (i.e., in the same order as the computations required to compute the output $z$), that is from left to right: first $\dfrac{\partial s_1}{\partial x}$, then $\dfrac{\partial s_2}{\partial s_1}$, and so on. In reverse mode autodiff, the algorithm computes these terms "backwards", from right to left: first $\dfrac{\partial z}{\partial s_n}$, then $\dfrac{\partial s_n}{\partial s_{n-1}}$, and so on.
#
# For example, suppose you want to compute the derivative of the function $z(x)=\sin(x^2)$ at x=3, using forward mode autodiff. The algorithm would first compute the partial derivative $\dfrac{\partial s_1}{\partial x}=\dfrac{\partial x^2}{\partial x}=2x=6$. Next, it would compute $\dfrac{\partial z}{\partial x}=\dfrac{\partial s_1}{\partial x}\cdot\dfrac{\partial z}{\partial s_1}= 6 \cdot \dfrac{\partial \sin(s_1)}{\partial s_1}=6 \cdot \cos(s_1)=6 \cdot \cos(3^2)\approx-5.46$.
# Let's verify this result using the `gradients()` function defined earlier:
# In[16]:
from math import sin
def z(x):
return sin(x**2)
gradients(z, [3])
# Look good. Now let's do the same thing using reverse mode autodiff. This time the algorithm would start from the right hand side so it would compute $\dfrac{\partial z}{\partial s_1} = \dfrac{\partial \sin(s_1)}{\partial s_1}=\cos(s_1)=\cos(3^2)\approx -0.91$. Next it would compute $\dfrac{\partial z}{\partial x}=\dfrac{\partial s_1}{\partial x}\cdot\dfrac{\partial z}{\partial s_1} \approx \dfrac{\partial s_1}{\partial x} \cdot -0.91 = \dfrac{\partial x^2}{\partial x} \cdot -0.91=2x \cdot -0.91 = 6\cdot-0.91=-5.46$.
# Of course both approaches give the same result (except for rounding errors), and with a single input and output they involve the same number of computations. But when there are several inputs or outputs, they can have very different performance. Indeed, if there are many inputs, the right-most terms will be needed to compute the partial derivatives with regards to each input, so it is a good idea to compute these right-most terms first. That means using reverse-mode autodiff. This way, the right-most terms can be computed just once and used to compute all the partial derivatives. Conversely, if there are many outputs, forward-mode is generally preferable because the left-most terms can be computed just once to compute the partial derivatives of the different outputs. In Deep Learning, there are typically thousands of model parameters, meaning there are lots of inputs, but few outputs. In fact, there is generally just one output during training: the loss. This is why reverse mode autodiff is used in TensorFlow and all major Deep Learning libraries.
# There's one additional complexity in reverse mode autodiff: the value of $s_i$ is generally required when computing $\dfrac{\partial s_{i+1}}{\partial s_i}$, and computing $s_i$ requires first computing $s_{i-1}$, which requires computing $s_{i-2}$, and so on. So basically, a first pass forward through the network is required to compute $s_1$, $s_2$, $s_3$, $\dots$, $s_{n-1}$ and $s_n$, and then the algorithm can compute the partial derivatives from right to left. Storing all the intermediate values $s_i$ in RAM is sometimes a problem, especially when handling images, and when using GPUs which often have limited RAM: to limit this problem, one can reduce the number of layers in the neural network, or configure TensorFlow to make it swap these values from GPU RAM to CPU RAM. Another approach is to only cache every other intermediate value, $s_1$, $s_3$, $s_5$, $\dots$, $s_{n-4}$, $s_{n-2}$ and $s_n$. This means that when the algorithm computes the partial derivatives, if an intermediate value $s_i$ is missing, it will need to recompute it based on the previous intermediate value $s_{i-1}$. This trades off CPU for RAM (if you are interested, check out [this paper](https://pdfs.semanticscholar.org/f61e/9fd5a4878e1493f7a6b03774a61c17b7e9a4.pdf)).
# ### Forward mode autodiff
# In[17]:
Const.gradient = lambda self, var: Const(0)
Var.gradient = lambda self, var: Const(1) if self is var else Const(0)
Add.gradient = lambda self, var: Add(self.a.gradient(var), self.b.gradient(var))
Mul.gradient = lambda self, var: Add(Mul(self.a, self.b.gradient(var)), Mul(self.a.gradient(var), self.b))
x = Var(name="x", init_value=3.)
y = Var(name="y", init_value=4.)
f = Add(Mul(Mul(x, x), y), Add(y, Const(2))) # f(x,y) = x²y + y + 2
dfdx = f.gradient(x) # 2xy
dfdy = f.gradient(y) # x² + 1
# In[18]:
dfdx.evaluate(), dfdy.evaluate()
# Since the output of the `gradient()` method is fully symbolic, we are not limited to the first order derivatives, we can also compute second order derivatives, and so on:
# In[19]:
d2fdxdx = dfdx.gradient(x) # 2y
d2fdxdy = dfdx.gradient(y) # 2x
d2fdydx = dfdy.gradient(x) # 2x
d2fdydy = dfdy.gradient(y) # 0
# In[20]:
[[d2fdxdx.evaluate(), d2fdxdy.evaluate()],
[d2fdydx.evaluate(), d2fdydy.evaluate()]]
# Note that the result is now exact, not an approximation (up to the limit of the machine's float precision, of course).
# ### Forward mode autodiff using dual numbers
# A nice way to apply forward mode autodiff is to use [dual numbers](https://en.wikipedia.org/wiki/Dual_number). In short, a dual number $z$ has the form $z = a + b\epsilon$, where $a$ and $b$ are real numbers, and $\epsilon$ is an infinitesimal number, positive but smaller than all real numbers, and such that $\epsilon^2=0$.
# It can be shown that $f(x + \epsilon) = f(x) + \dfrac{\partial f}{\partial x}\epsilon$, so simply by computing $f(x + \epsilon)$ we get both the value of $f(x)$ and the partial derivative of $f$ with regards to $x$.
# Dual numbers have their own arithmetic rules, which are generally quite natural. For example:
#
# **Addition**
#
# $(a_1 + b_1\epsilon) + (a_2 + b_2\epsilon) = (a_1 + a_2) + (b_1 + b_2)\epsilon$
#
# **Subtraction**
#
# $(a_1 + b_1\epsilon) - (a_2 + b_2\epsilon) = (a_1 - a_2) + (b_1 - b_2)\epsilon$
#
# **Multiplication**
#
# $(a_1 + b_1\epsilon) \times (a_2 + b_2\epsilon) = (a_1 a_2) + (a_1 b_2 + a_2 b_1)\epsilon + b_1 b_2\epsilon^2 = (a_1 a_2) + (a_1b_2 + a_2b_1)\epsilon$
#
# **Division**
#
# $\dfrac{a_1 + b_1\epsilon}{a_2 + b_2\epsilon} = \dfrac{a_1 + b_1\epsilon}{a_2 + b_2\epsilon} \cdot \dfrac{a_2 - b_2\epsilon}{a_2 - b_2\epsilon} = \dfrac{a_1 a_2 + (b_1 a_2 - a_1 b_2)\epsilon - b_1 b_2\epsilon^2}{{a_2}^2 + (a_2 b_2 - a_2 b_2)\epsilon - {b_2}^2\epsilon} = \dfrac{a_1}{a_2} + \dfrac{a_1 b_2 - b_1 a_2}{{a_2}^2}\epsilon$
#
# **Power**
#
# $(a + b\epsilon)^n = a^n + (n a^{n-1}b)\epsilon$
#
# etc.
# Let's create a class to represent dual numbers, and implement a few operations (addition and multiplication). You can try adding some more if you want.
# In[21]:
class DualNumber(object):
def __init__(self, value=0.0, eps=0.0):
self.value = value
self.eps = eps
def __add__(self, b):
return DualNumber(self.value + self.to_dual(b).value,
self.eps + self.to_dual(b).eps)
def __radd__(self, a):
return self.to_dual(a).__add__(self)
def __mul__(self, b):
return DualNumber(self.value * self.to_dual(b).value,
self.eps * self.to_dual(b).value + self.value * self.to_dual(b).eps)
def __rmul__(self, a):
return self.to_dual(a).__mul__(self)
def __str__(self):
if self.eps:
return "{:.1f} + {:.1f}ε".format(self.value, self.eps)
else:
return "{:.1f}".format(self.value)
def __repr__(self):
return str(self)
@classmethod
def to_dual(cls, n):
if hasattr(n, "value"):
return n
else:
return cls(n)
# $3 + (3 + 4 \epsilon) = 6 + 4\epsilon$
# In[22]:
3 + DualNumber(3, 4)
# $(3 + 4ε)\times(5 + 7ε)$ = $3 \times 5 + 3 \times 7ε + 4ε \times 5 + 4ε \times 7ε$ = $15 + 21ε + 20ε + 28ε^2$ = $15 + 41ε + 28 \times 0$ = $15 + 41ε$
# In[23]:
DualNumber(3, 4) * DualNumber(5, 7)
# Now let's see if the dual numbers work with our toy computation framework:
# In[24]:
x.value = DualNumber(3.0)
y.value = DualNumber(4.0)
f.evaluate()
# Yep, sure works. Now let's use this to compute the partial derivatives of $f$ with regards to $x$ and $y$ at x=3 and y=4:
# In[25]:
x.value = DualNumber(3.0, 1.0) # 3 + ε
y.value = DualNumber(4.0) # 4
dfdx = f.evaluate().eps
x.value = DualNumber(3.0) # 3
y.value = DualNumber(4.0, 1.0) # 4 + ε
dfdy = f.evaluate().eps
# In[26]:
dfdx
# In[27]:
dfdy
# Great! However, in this implementation we are limited to first order derivatives.
# Now let's look at reverse mode.
# ### Reverse mode autodiff
# Let's rewrite our toy framework to add reverse mode autodiff:
# In[28]:
class Const(object):
def __init__(self, value):
self.value = value
def evaluate(self):
return self.value
def backpropagate(self, gradient):
pass
def __str__(self):
return str(self.value)
class Var(object):
def __init__(self, name, init_value=0):
self.value = init_value
self.name = name
self.gradient = 0
def evaluate(self):
return self.value
def backpropagate(self, gradient):
self.gradient += gradient
def __str__(self):
return self.name
class BinaryOperator(object):
def __init__(self, a, b):
self.a = a
self.b = b
class Add(BinaryOperator):
def evaluate(self):
self.value = self.a.evaluate() + self.b.evaluate()
return self.value
def backpropagate(self, gradient):
self.a.backpropagate(gradient)
self.b.backpropagate(gradient)
def __str__(self):
return "{} + {}".format(self.a, self.b)
class Mul(BinaryOperator):
def evaluate(self):
self.value = self.a.evaluate() * self.b.evaluate()
return self.value
def backpropagate(self, gradient):
self.a.backpropagate(gradient * self.b.value)
self.b.backpropagate(gradient * self.a.value)
def __str__(self):
return "({}) * ({})".format(self.a, self.b)
# In[29]:
x = Var("x", init_value=3)
y = Var("y", init_value=4)
f = Add(Mul(Mul(x, x), y), Add(y, Const(2))) # f(x,y) = x²y + y + 2
result = f.evaluate()
f.backpropagate(1.0)
# In[30]:
print(f)
# In[31]:
result
# In[32]:
x.gradient
# In[33]:
y.gradient
# Again, in this implementation the outputs are just numbers, not symbolic expressions, so we are limited to first order derivatives. However, we could have made the `backpropagate()` methods return symbolic expressions rather than values (e.g., return `Add(2,3)` rather than 5). This would make it possible to compute second order gradients (and beyond). This is what TensorFlow does, as do all the major libraries that implement autodiff.
# ### Reverse mode autodiff using TensorFlow
# In[34]:
import tensorflow as tf
# In[35]:
tf.reset_default_graph()
x = tf.Variable(3., name="x")
y = tf.Variable(4., name="y")
f = x*x*y + y + 2
jacobians = tf.gradients(f, [x, y])
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
f_val, jacobians_val = sess.run([f, jacobians])
f_val, jacobians_val
# Since everything is symbolic, we can compute second order derivatives, and beyond. However, when we compute the derivative of a tensor with regards to a variable that it does not depend on, instead of returning 0.0, the `gradients()` function returns None, which cannot be evaluated by `sess.run()`. So beware of `None` values. Here we just replace them with zero tensors.
# In[36]:
hessians_x = tf.gradients(jacobians[0], [x, y])
hessians_y = tf.gradients(jacobians[1], [x, y])
def replace_none_with_zero(tensors):
return [tensor if tensor is not None else tf.constant(0.)
for tensor in tensors]
hessians_x = replace_none_with_zero(hessians_x)
hessians_y = replace_none_with_zero(hessians_y)
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
hessians_x_val, hessians_y_val = sess.run([hessians_x, hessians_y])
hessians_x_val, hessians_y_val
# And that's all folks! Hope you enjoyed this notebook.
|
python
|
from django.db import models
# from django.contrib.auth.models import User
from django.contrib.auth import get_user_model as user_model
User = user_model()
from apps.lobby.main.models import Lobby
class Room(models.Model):
title = models.CharField(max_length=30, primary_key=True)
description = models.CharField(max_length=200)
members = models.ManyToManyField(User, related_name='room_members')
creator = models.ForeignKey(User, on_delete=models.CASCADE, related_name='room_creator')
admins = models.ManyToManyField(User, blank=True, related_name='room_admins')
onlineUsers = models.ManyToManyField(User, blank=True, related_name='room_online_users')
requests = models.ManyToManyField(User, blank=True, related_name='room_requests')
Lobby = models.ForeignKey(Lobby, on_delete=models.CASCADE, related_name='room_lobby')
def __str__(self):
return self.title
|
python
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2016 Aarón Abraham Velasco Alvarez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import state_table, reserved
def print_error(verbose, message, line):
if verbose == 1:
print "{} on line {}".format(message, line)
def get_event(input):
try:
if input.isalpha():
key = "alpha"
elif unicode(input).isnumeric():
key = "numeric"
else:
key = input
except UnicodeDecodeError:
key = get_event(input.decode('unicode_escape'))
return state_table.input.get(key, len(state_table.input))
def get_tokens(string, verbose):
tokens = []
result = True
next_state = lambda state, event : state_table.table[state][0][event]
is_final = lambda state : state_table.table[state][1]
token = lambda state : state_table.table[state][2]
error = lambda state : state_table.table[state][3]
error_message = lambda state : state_table.errors[state_table.table[state][3]]
current_state = 0
lexeme = ''
index = 0
count = 0
checkpoint = 0
line = 1
new_lines = 0
while index < len(string):
char = string[index]
if char == "\n":
if current_state == 0:
line += 1
else:
new_lines += 1
event = get_event(char)
if verbose >= 3:
print "debug:", line, current_state, char, next_state(current_state, event)
if next_state(current_state, event) > 0:
current_state = next_state(current_state, event)
index += 1
lexeme += char
checkpoint = current_state
elif next_state(current_state, event) < 0:
current_state = next_state(current_state, event)
index += 1
count += 1
else:
if current_state > 0:
if is_final(current_state):
tokens.append((reserved.words.get(lexeme, token(current_state)), lexeme, line))
else:
print_error(verbose, error_message(current_state), line)
result = False
current_state = 0
lexeme = ''
line += new_lines
new_lines = 0
if char.isspace():
index += 1
elif current_state < 0:
if is_final(current_state):
tokens.append((reserved.words.get(lexeme, token(checkpoint)), lexeme, line))
lexeme = ''
current_state = token(current_state)
index -= count
count = 0
checkpoint = 0
new_lines = 0
else:
if not char.isspace():
tokens.append((reserved.words.get(char, None), char, line))
index += 1
if current_state > 0 and not is_final(current_state):
print_error(verbose, error_message(current_state), line)
result = False
elif current_state > 0 and is_final(current_state):
tokens.append((reserved.words.get(lexeme, token(current_state)), lexeme, line))
return tokens, result
|
python
|
""" Npy module
This module implements all npy file management class and methods
...
Classes
-------
Npy
Class that manages npy file's reading and writing functionalities
"""
import numpy as np
import os
class Npy(object):
""" Class that manages npy file's reading and writing functionalities
Methods
-------
read(path: str)
Method that reads all npy files in the dir's path. Npy files in this dir
should be form one numpy array when read, this method is going to create
one np array with the npy files found
write(path: str, npy_fname: str, npy: np.ndarray)
Method that write a new npy file in the path passed with the npy_fname as
the file's name
count_npy_files(path: str)
Method that counts the number of npy files in a dir
"""
def read(self, path: str) -> np.ndarray:
""" Method to read all npy files in the path passed. It expects npy fi-
-les to be in the following format XXXX_<name>.npy, where XXXX is a
number ordering the files
Parameters
----------
path: str
path to the folder containing the npy files
Returns
--------
npy_arrays: np.ndarray
np.array with shape (N, S) where N is the number of files in
the path and S is the shape of each npy file read
"""
npy_fnames = os.listdir(path) # getting all npy files in path
assert len(npy_fnames) != 0, "No files in path {0}".format(path)
npy_fnames = [ # making sure each file is npy with index prefix
fn for fn in npy_fnames if self.__assert_is_npy(fn)
]
assert len(npy_fnames) == len(os.listdir(path)), "There must be only npy files in path {0}".format(path)
# sorting the files by the index prefix in each file name
npy_fnames = sorted(npy_fnames, key=lambda p: int(p.split("_")[0]))
features = [np.load(os.path.join(path, fname))
for fname in npy_fnames]
features = np.array(features)
return features.reshape((features.shape[0] * features.shape[1],
features.shape[2]))
def write(self, path: str, npy_fname: str, npy: np.ndarray) -> None:
""" Method to write a npy file in the path passed
Parameters:
path: str
path to the folder to write the new npy file
npy_fname: str
name of the new npy file
npy: np.ndarray
content of the new npy file
"""
self.__assert_is_npy(npy_fname)
if os.path.isdir(path) is False:
os.mkdir(path)
write_path = os.path.join(path, npy_fname)
np.save(write_path, npy)
def count_npy_files(self, path: str) -> int:
""" Method that counts the number of files in a folder
Parameters
----------
path: str
path to the folder where the method should count the number of npy
files
Returns
-------
int
Returns the number of files in a folder
"""
if os.path.isdir(path) is False:
return 0
return len([
fn for fn in os.listdir(path) if self.__assert_is_npy(fn)
])
def __assert_is_npy(self, fname: str):
""" Method that returns true if the extension of a file name is npy
Parameters
----------
fname: str
a file name
Returns
-------
boolean
Returns True if the extension is npy otherwise False
"""
return "npy" == fname.split(".")[-1]
|
python
|
from machine import I2C
import LIS2MDL
i2c = I2C(1)
mdl = LIS2MDL.LIS2MDL(i2c)
mdl.x()
mdl.get()
|
python
|
#!/usr/bin/env python3
#==================
# gmail_pycamera
#==================
import os
import json
import datetime
import shutil
from devices import CameraMount
from h264tomp4 import h264tomp4
from gmail import Gmail
from command import parse_command
class GmailPiCamera:
"""
gmail_picamera
"""
def __init__(self, video_setting=None, gmail_setting=None, command_setting=None):
self.vsetting = self._load_video_setting(video_setting)
self.gsetting = self._load_gmail_setting(gmail_setting)
self.csetting = self._load_command_setting(command_setting)
self.history_json = './history.json'
self.history = self._load_history()
self.fname = './video.mp4'
self.tfname = './tmp.h264'
self.video_store = './videos'
self.now = None
def _load_video_setting(self, setting_json):
"""
loading setting file
"""
setting = {
"width": 240,
"height": 320,
"store": False
}
if setting_json is not None and os.path.isfile(setting_json):
with open(setting_json) as f:
setting = json.load(f)
return setting
def _load_gmail_setting(self, setting_json):
"""
loading setting file
"""
setting = {
"sender_address": "SENDER_ADDRESS",
"user_addresses": [
"USER_ADDRESS1",
"USER_ADDRESS2"
],
"credential": "CREDENTIAL",
"subject": "SUBJECT",
"message": "MESSAGE"
}
if setting_json is not None and os.path.isfile(setting_json):
with open(setting_json) as f:
setting = json.load(f)
return setting
def _load_command_setting(self, setting_json):
"""
loading setting file
"""
setting = {
"execute": "EXECUTE_COMMAND",
"pan": "PAN_COMMAND",
"tilt": "TILT_COMMAND"
}
if setting_json is not None and os.path.isfile(setting_json):
with open(setting_json) as f:
setting = json.load(f)
return setting
def _load_history(self):
"""
loading history
"""
setting = {}
setting_json = self.history_json
if setting_json is not None and os.path.isfile(setting_json):
with open(setting_json) as f:
setting = json.load(f)
return setting
def save_history(self):
"""
save history
"""
setting_json = self.history_json
with open(setting_json, 'w') as f:
json.dump(self.history, f)
def video(self, motion):
"""
video pan or tilt
"""
self.now = None
width, height, store = self._get_video_setting()
with CameraMount() as camera:
if motion == 'pan':
camera.video_pan(width, height, self.tfname)
elif motion == 'tilt':
camera.video_tilt(width, height, self.tfname)
else:
raise ValueError("Invalid motion value!")
camera.center()
h264tomp4(self.tfname, self.fname)
d = datetime.datetime.today()
year = d.strftime("%Y")
month = d.strftime("%m")
day = d.strftime("%d")
now = d.strftime("%Y%m%d%H%M%S")
if self.vsetting["store"] is True:
if not os.path.isdir(self.video_store):
os.mkdir(self.video_store)
if not os.path.isdir(self.video_store + "/" + year):
os.mkdir(self.video_store + "/" + year)
if not os.path.isdir(self.video_store + "/" + year + "/" + month):
os.mkdir(self.video_store + "/" + year + "/" + month)
shutil.copyfile(self.fname, self.video_store + "/" + year + "/" + month + "/" + now + ".mp4")
self.now = now
def _get_video_setting(self):
"""
get video setting
"""
width = self.vsetting["width"]
height = self.vsetting["height"]
store = self.vsetting["store"]
return (width, height, store)
def send(self, to_index=None):
"""
send gmail
"""
if self.now is not None:
gmail = Gmail(self.gsetting)
gmail.send(to_index, self.fname, self.now + '.mp4')
def receive(self, from_address=None):
"""
receive gmail
"""
gmail = Gmail(self.gsetting)
date, message = gmail.receive(from_address)
return date, message
def parse(self, message=None):
"""
parse command
"""
return parse_command(self.csetting, message)
if __name__ == '__main__':
gcamera = GmailPiCamera('./video_setting.json', './gmail_setting.json', './command_setting.json')
for index, address in enumerate(gcamera.gsetting['user_addresses']):
# receive
date, message = gcamera.receive(address)
# check history
if not (address in gcamera.history and date == gcamera.history[address]):
# save history
gcamera.history[address] = date
gcamera.save_history()
# parse command
command = gcamera.parse(message)
print(command)
# execute command
if command:
gcamera.video(command)
gcamera.send(index)
|
python
|
# Generated by Django 3.2.8 on 2021-11-02 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annotations', '0004_auto_20211102_1819'),
]
operations = [
migrations.RemoveField(
model_name='imagegroup',
name='group_size',
),
migrations.AlterField(
model_name='image',
name='image_name',
field=models.CharField(max_length=200, unique=True),
),
migrations.AlterField(
model_name='imagegroup',
name='group_name',
field=models.CharField(max_length=200, unique=True),
),
migrations.AlterField(
model_name='organization',
name='organization_name',
field=models.CharField(max_length=40, unique=True),
),
migrations.AlterField(
model_name='user',
name='UNI',
field=models.CharField(max_length=8, unique=True),
),
]
|
python
|
import unittest
import solver
from solver import sort_colors
class TestSolver(unittest.TestCase):
def test_sort_colors(self):
self.assertEqual(sort_colors([0, 0, 0, 0, 0, 0]), [0, 0, 0, 0, 0, 0])
self.assertEqual(sort_colors([2, 2, 2, 2, 2, 2]), [2, 2, 2, 2, 2, 2])
self.assertEqual(sort_colors([0, 0, 2, 0, 0, 0]), [0, 0, 0, 0, 0, 2])
self.assertEqual(sort_colors([0, 0, 0, 0, 1, 0]), [0, 0, 0, 0, 0, 1])
self.assertEqual(sort_colors([2, 2, 1, 2, 2, 2]), [1, 2, 2, 2, 2, 2])
self.assertEqual(sort_colors([0, 2, 2, 0, 2, 0]), [0, 0, 0, 2, 2, 2])
self.assertEqual(sort_colors([2, 0, 2, 1, 1, 0]), [0, 0, 1, 1, 2, 2])
if __name__ == "__main__":
unittest.main()
|
python
|
from django import template
from raids.utils import get_instances
register = template.Library()
def get_loot_history(character):
# Only filters items for main specialization -> where entitlement is set for the characters specialization
# Dict for easy checking if instance is already a key
instances = {instance: 0 for instance in get_instances()}
for acquisition in character.loot_history.all():
for entitlement in acquisition.item.entitlement.all():
if entitlement.specialization == character.specialization:
if acquisition.item.encounter.all()[0].instance in instances:
instances[acquisition.item.encounter.all()[0].instance] += 1
# Need to return the loot history as list of sets
return [(instance, count) for instance, count in instances.items()]
register.filter('get_loot_history', get_loot_history)
|
python
|
# Copyright (c) Open-MMLab. All rights reserved.
from .checkpoint import (_load_checkpoint, load_checkpoint, load_state_dict,
save_checkpoint, weights_to_cpu)
from .dist_utils import get_dist_info, init_dist, master_only
from .hooks import (CheckpointHook, ClosureHook, DistSamplerSeedHook, Hook,
IterTimerHook, LoggerHook, LrUpdaterHook, OptimizerHook,
TensorboardLoggerHook, TextLoggerHook, WandbLoggerHook)
from .log_buffer import LogBuffer
from .parallel_test import parallel_test
from .priority import Priority, get_priority
from .runner import Runner
from .utils import get_host_info, get_time_str, obj_from_dict
__all__ = [
'Runner', 'LogBuffer', 'Hook', 'CheckpointHook', 'ClosureHook',
'LrUpdaterHook', 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook',
'LoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', 'WandbLoggerHook',
'_load_checkpoint', 'load_state_dict', 'load_checkpoint', 'weights_to_cpu',
'save_checkpoint', 'parallel_test', 'Priority', 'get_priority',
'get_host_info', 'get_time_str', 'obj_from_dict', 'init_dist',
'get_dist_info', 'master_only'
]
|
python
|
# Standard imports
from types import SimpleNamespace
import numpy as np
from scipy import optimize
from scipy import stats
from scipy import random
from scipy.stats import beta
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
#Constructing grid of x and q in specific ranges
q_values = np.linspace(0.0, 0.9)
x_values = np.linspace(0.01, 0.9)
#Creating a class of the known parameters
par = SimpleNamespace()
par.theta = -2
par.y = 1
par.p = 0.2
#Utility function
def u(z, par):
""" Calculates utility of assets
Args:
par.theta (int): Degree of risk aversion
z (float or int): Input in utility function
par: SimpleNamespace
Returns:
u (float): Utility of assets
"""
return z**(1+par.theta)/(1+par.theta)
#Premium policy
def pi(q, par):
""" Calculates premium for better coverage
Args:
q (float): Insurance coverage
par.p: Probability that the loss is incurred
par: SimpleNamespace
Returns:
Premium for better coverage
"""
return par.p*q
#Expected utility function if not insured
def V0(x, par):
""" Calculates the expected utility of a non-insured agent with insurance coverage (q) and premium (pi)
Args:
x (float): Monetary loss in the case of a bad outcome
par.y (int): Total assets
par.p (float): Probability that the loss is incurred
par: SimpleNamespace
Returns:
V0 (float): Expected utility from not buying an insurance.
"""
u0_loss = u(par.y-x, par)
u0_win = u(par.y, par)
return par.p*u0_loss + (1-par.p)*u0_win
#Expected utility function if insured
def V(q, x, par):
""" Calculates the expected utility of an insured agent with insurance coverage (q) and premium (pi)
Args:
q (float): Insurance coverage
pi (float): Insurance premium
x (float): Monetary loss in the case of a bad outcome
par.y (int): Total assets
par.p (float): Probability that the loss is incurred
par: SimpleNamespace
Returns:
V (float): Expected utility from buying an insurance.
"""
u_loss = u(par.y-x+q-pi(q, par), par)
u_win = u(par.y-pi(q, par), par)
return par.p*u_loss + (1-par.p)*u_win
#Constructing a grid of qs over [0.01;0,6]
q_vec = np.linspace(0.01,0.6)
#Creating a new class of the known parameters letting x=0.6
par1 = SimpleNamespace()
par1.theta = -2
par1.y = 1
par1.p = 0.2
par1.x = 0.6
#Expected utility function if not insured with x=0.6
def V0_function(par1):
""" Calculates the expected utility of a non-insured agent with insurance coverage (q) and premium (pi)
Args:
par1.x (float): Monetary loss in the case of a bad outcome
par1.y (int): Total assets
par1.p (float): Probability that the loss is incurred
par1: SimpleNamespace
Returns:
V0_function (float): Expected utility from not buying an insurance with a monetary loss equal to 0.6
"""
u0_loss = u(par1.y-par1.x, par1)
u0_win = u(par1.y, par1)
return par1.p*u0_loss + (1-par1.p)*u0_win
#Expected utility function if insured with x=0.6
def V_function(q, pi, par1):
""" Calculates the expected utility of an insured agent with insurance coverage (q) and premium (pi)
Args:
q (float): Insurance coverage
pi (float): Insurance premium
par1.x (float): Monetary loss in the case of a bad outcome
par1.y (int): Total assets
par1.p (float): Probability that the loss is incurred
par1: SimpleNamespace
Returns:
V_function (float): Expected utility from buying an insurance with a monetary loss equal to 0.6
"""
u_loss = u(par1.y-par1.x+q-pi, par1)
u_win = u(par1.y-pi, par1)
return par1.p*u_loss + (1-par1.p)*u_win
#Defining a function that subtracts the expected utility from buying an insurance with the expected utility from not buying an insurance
def pi_tilde_acc(q, pi, par1):
""" Calculates the difference in the expected utility from buying an insurance vs. not buying an insurance
Args:
V0_function (float): Expected utility from not buying an insurance with a monetary loss equal to 0.6
V_function (float): Expected utility from buying an insurance with a monetary loss equal to 0.6
q (float): Insurance coverage
pi (float): Insurance premium
par1: SimpleNamespace
Returns:
pi_tilde_acc (float): the difference in the expected utility from buying an insurance vs. not buying an insurance
"""
return V_function(q, pi, par1)-V0_function(par1)
#Creating a class of the known parameters
par2 = SimpleNamespace()
par2.theta = -2
par2.y = 1
par2.p = 0.2
#Creating a class of the known parameters of the two insurance policies
par3 = SimpleNamespace()
par3.gamma1 = 0.9
par3.gamma2 = 0.2
par3.pi1 = 0.45
par3.pi2 = 0.1
def g1(x, par2, par3):
""" Calculates the agents value of policy 1
Args:
x (float): Drawn from a beta distribution (X)
par2.y (int):Total assets
par3.gamma1 (float): Coverage ratio of policy 1
par3.pi1 (float): Insurance premium of policy 1
par2: SimpleNamespace
par3: SimpleNamespace
Returns:
g1 (float): Agents value of policy 1
"""
return u(par2.y-(1-par3.gamma1)*x-par3.pi1, par2)
def MC1(N,g1,x):
""" Calculating the numerical solution to the integral of the agents value by Monte Carlo of policy 1
Args:
N (int): Number of iterations/draws
g1 (float): Agents value of policy 1
x (float): Drawn from a beta distribution (X)
Returns:
MC1 (float): Agents value of policy 1
"""
#Draw N random values from a beta distribution (x)
X = np.random.beta(2, 7, size=N)
return np.mean(g1(X, par2, par3))
def g2(x, par2, par3):
""" Calculates the agents value of policy 2
Args:
x (float): Drawn from a beta distribution (X)
par2.y (int):Total assets
par3.gamma2 (float): Coverage ratio of policy 2
par3.pi2 (float): Insurance premium of policy 2
par2: SimpleNamespace
par3: SimpleNamespace
Returns:
g2 (float): Agents value of policy 2
"""
return u(par2.y-(1-par3.gamma2)*x-par3.pi2, par2)
def MC2(N,g2,x):
""" Calculating the numerical solution to the integral of the agents value by Monte Carlo of policy 2
Args:
N (int): Number of iterations/draws
g2 (float): Agents value of policy 2
x (float): Drawn from a beta distribution (X)
Returns:
MC2 (float): Agents value of policy 2
"""
X = np.random.beta(2, 7, size=N) # rvs = draw N random values from a beta distribution (x)
return np.mean(g2(X, par2, par3))
#Defining known parameters globally
N = 10_000
a = 0
b = 1
#Creating a class of the known parameters
par4 = SimpleNamespace()
par4.gamma = 0.95
par4.y = 1
par4.theta = -2
def g3(x, pi, N, par4):
""" Calculates the agents value of a policy
Args:
x (float): Drawn from a beta distribution (X)
pi (float): Insurance Premium
N (int): Iterations
par4.y (int):Total assets
par4.gamma (float): Coverage ratio of policy 2
par4: SimpleNamespace
Returns:
g3 (float): Agents value of a policy
"""
X = np.random.beta(2, 7, size=N)
return np.mean(u(par4.y-(1-par4.gamma)*X-pi, par4)) #taken the mean of it, V(gamma, pi)=\int mean(u(z,par)*X)
|
python
|
from core.config.setting import static_setting
from core.resource.pool import ResourceSetting
static_setting.setting_path = "/Users/lilen/mySetting"
ResourceSetting.load()
print(f"资源文件路径{ResourceSetting.resource_path}")
print(f"配置文件路径{ResourceSetting.setting_path}")
ResourceSetting.resource_path = "/User/user/new_resource"
static_setting.save_all()
|
python
|
import pygame
from settings import *
class Entity:
def __init__(self, x, y, w, h, speed, begin_act):
self.x = x
self.y = y
self.w = w
self.h = h
self.rect = pygame.Rect(x, y, w, h)
self.life = 100
self.life_rect = pygame.Rect(x, y, w, 4)
self.action = begin_act
self.animations_database = {}
self.frame = 0
self.flip = False
self.velocity = [0, 0]
self.speed = speed
@staticmethod
def animations(path, frame_length):
db_animation = []
for i in range(len(frame_length)):
animation_dir = path + "_" + str(i) + ".png"
animation_image = pygame.transform.scale2x(pygame.image.load(animation_dir))
animation_image.set_colorkey(WHITE_COLOR)
for frame in range(frame_length[i]):
db_animation.append(animation_image)
return db_animation
def move_right(self):
self.rect.x += self.velocity[0]
def move_left(self):
self.rect.x += self.velocity[0]
def move_up(self):
self.rect.y += self.velocity[1]
def move_down(self):
self.rect.y += self.velocity[1]
def update(self):
self.frame += 1
if self.frame >= len(self.animations_database[self.action]):
self.frame = 0
self.life_rect.x = self.rect.x
self.life_rect.y = self.rect.y - 26
def damage(self, life_decrease):
self.life -= life_decrease
self.life_rect.width -= self.w / (100 / life_decrease)
def check_die(self):
if self.life <= 0:
return True
def draw(self, window):
window.blit(pygame.transform.flip(self.animations_database[self.action][self.frame], self.flip, False), (self.rect.x, self.rect.y - 20))
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mail.py: email server utils
'''
import sys
import argparse
import getpass
from os import getenv
try:
import mysql.connector as mariadb
except ImportError:
print('找不到 mysql 客户端,请安装: pip install mysql-connector-python')
sys.exit(2)
_version = '2.1'
parser = argparse.ArgumentParser(description='作用: 插入邮件、删除邮件、查看邮箱、修改密码')
parser.add_argument(
'-v',
'--version',
action='version',
version=f"%(prog)s version: {_version}",
help='显示版本并退出'
)
args = parser.parse_args()
username = getenv('DB_USERNAME')
password = getenv('DB_PASSWORD')
dbname = getenv('DB_DATABASE')
dbhost = getenv('DB_HOST')
try:
conn = mariadb.connect(
host=dbhost,
user=username,
passwd=password,
database=dbname,
use_pure=True
)
except mariadb.Error as err:
print("Error: {}".format(err))
sys.exit(0)
all_mails = []
cursor = conn.cursor()
cursor.execute("SELECT * FROM users")
myresult = cursor.fetchall()
for x in myresult:
all_mails.append(x[0])
prompt_text = """\
0.退出
1.新建邮箱
2.删除邮箱
3.显示所有邮箱
4.修改密码
5.显示可用邮箱域名
输入数字:
"""
var1 = input(prompt_text)
while var1 != "0":
# 插入邮箱
if var1 == "1":
str0 = input("输入新邮箱: ")
print("输入的内容是: ", str0)
print("输入密码: ")
str1 = getpass.getpass()
print(("再次输入密码: "))
str_1 = getpass.getpass()
if str1 != str_1:
print("再次密码不一样")
continue
try:
cursor = conn.cursor(prepared=True)
val = (str0, str1)
cursor.execute("INSERT INTO users (email, password) VALUES (?, ENCRYPT(?))", val)
conn.commit()
print (cursor.rowcount, "个邮箱已插入")
except mariadb.Error as error:
print("Error: {}".format(error))
input("Press Enter to continue...")
# 删除邮箱
elif var1 == "2":
str2 = input("输入删除的邮箱: ")
if str2 not in all_mails:
print(str2, "不存在")
input("Press Enter to continue...")
else:
try:
cursor = conn.cursor(prepared=True)
sql1 = "DELETE FROM users WHERE email = %s"
val1 = (str2)
cursor.execute(sql1, (val1,))
conn.commit()
print (str2, "已删除")
except mariadb.Error as error:
print("Error: {}".format(error))
input("Press Enter to continue...")
# 显示所有邮箱
elif var1 == "3":
cursor = conn.cursor()
cursor.execute("SELECT email FROM users")
myresult = cursor.fetchall()
for x in myresult:
print(x[0])
# 修改密码
elif var1 == "4":
str3 = input("输入邮箱: ")
print("输入的内容是: ", str3)
str4 = getpass.getpass("输入新密码: ")
str_4 = getpass.getpass("再次输入密码: ")
if str4 != str_4:
print("两次密码不一样")
continue
try:
cursor = conn.cursor(prepared=True)
val2 = (str4, str3)
cursor.execute("UPDATE users SET password = ENCRYPT(?) WHERE email = ?", val2)
conn.commit()
print(str3, "密码已修改")
except mariadb.Error as error:
print("Errot: {}".format(error))
input("Press Enter to continue...")
# 显示可用域名
elif var1 == "5":
cursor = conn.cursor()
cursor.execute("SELECT domain FROM domains")
myresult = cursor.fetchall()
for x in myresult:
print(x[0])
else:
print("请输入有效数字")
input("Press Enter to continue...")
input("Press Enter to continue...")
var1 = input(prompt_text)
|
python
|
import os
import json
from .android_component_builder import AndroidComponentBuilder
class LabelBuilder(AndroidComponentBuilder):
def __init__(self, options, component):
super().__init__(options, component)
self.constraints = {}
self.name = ''
self.text = ''
self.text_align = 'left'
self.font = {
'face': 'Arial',
'size': 20,
'color': '#000000',
'weight': 'normal'
}
self.lines = []
self.load_attributes()
def load_lines(self):
self.lines = [
' <TextView',
f" android:id=\"@+id/{self.name}\"",
f" android:text=\"{self.text}\"",
]
if self.text_align != 'left':
self.lines += [f" android:textAlignment=\"{self.text_align}\""]
self.lines += self.constraint_lines()
self.lines += self.text_styling_lines()
self.lines += [" />\n"]
return ("\n").join(self.lines)
def text_styling_lines(self):
return [
f" fontPath=\"font/{self.font['face']}.ttf\"",
f" android:textStyle=\"{self.font['weight']}\"",
f" android:textSize=\"{self.font['size']}sp\"",
f" android:textColor=\"{self.font['color']}\""
]
|
python
|
import http
import secrets
import pytest
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils import timezone
from django.utils.http import urlencode
from guildmaster import views
from guildmaster.utils import reverse
@pytest.fixture()
def user():
user, __ = User.objects.get_or_create(username='henry', email='[email protected]')
return user
def test_authorization(rf, user, tf_client):
request = rf.get(
reverse('guildmaster:authorization', kwargs={'client_name': tf_client.name}), username=user.username
)
response = views.AuthorizationView.as_view()(request, client_name=tf_client.name)
assert response.status_code == 302
assert response.url.startswith(tf_client.authorization_url)
def test_authorization_state(rf, user, tf_client, settings):
request = rf.get(
reverse('guildmaster:authorization', kwargs={'client_name': tf_client.name}), username=user.username
)
response = views.AuthorizationView.as_view()(request, client_name=tf_client.name)
assert request.session[settings.GUILDMASTER_SESSION_STATE_KEY] in response.url
def test_authorization_return_url(rf, user, tf_client, settings):
request = rf.get(
reverse('guildmaster:authorization', kwargs={'client_name': tf_client.name}), username=user.username
)
views.AuthorizationView.as_view()(request, client_name=tf_client.name)
assert request.session[settings.GUILDMASTER_SESSION_RETURN_KEY] == settings.GUILDMASTER_RETURN_URL
request = rf.get(
'{}?{}'.format(
reverse('guildmaster:authorization', kwargs={'client_name': tf_client.name}),
urlencode({f'{settings.GUILDMASTER_RETURN_FIELD_NAME}': '/other'}),
),
username=user.username,
)
views.AuthorizationView.as_view()(request, client_name=tf_client.name)
assert request.session[settings.GUILDMASTER_SESSION_RETURN_KEY] == '/other'
def test_token(rf, settings, user, tf_client, requests_mock):
expected = {
'access_token': secrets.token_urlsafe(64),
'refresh_token': secrets.token_urlsafe(64),
'token_type': 'bearer',
'expires_in': 3600,
}
requests_mock.post(
tf_client.token_url, json=expected, headers={'Date': timezone.now().strftime('%a, %d %b %Y %H:%M:%S %Z')}
)
userinfo = {'username': 'henry', 'discriminator': '1234', 'battletag': 'henry#1234'}
requests_mock.get(tf_client.userinfo_url, json=userinfo)
code = secrets.token_urlsafe(64)
state = secrets.token_urlsafe(64)
request = rf.get(
reverse('guildmaster:token', kwargs={'client_name': tf_client.name}),
{'code': code, 'state': state},
username=user.username,
)
request.user = user
request.session[settings.GUILDMASTER_SESSION_STATE_KEY] = state
response = views.TokenView.as_view()(request, client_name=tf_client.name)
assert response.status_code == 302
assert requests_mock.called
assert requests_mock.call_count == 2
assert requests_mock.request_history[0].method == 'POST'
assert requests_mock.request_history[0].url == tf_client.token_url
assert requests_mock.request_history[1].method == 'GET'
assert requests_mock.request_history[1].url == tf_client.userinfo_url
def test_token_error(rf, settings, user, tf_client):
secrets.token_urlsafe(64)
state = secrets.token_urlsafe(64)
request = rf.get(
reverse('guildmaster:token', kwargs={'client_name': tf_client.name}),
{'error': 'access_denied', 'state': state},
username=user.username,
)
request.session[settings.GUILDMASTER_SESSION_STATE_KEY] = state
response = views.TokenView.as_view()(request, client_name=tf_client.name)
assert response.status_code == 403
def test_token_bogus(rf, settings, user, tf_client):
code = secrets.token_urlsafe(64)
state = secrets.token_urlsafe(64)
request = rf.get(
reverse('guildmaster:token', kwargs={'client_name': tf_client.name}),
{'code': code, 'state': state},
username=user.username,
)
request.session[settings.GUILDMASTER_SESSION_STATE_KEY] = secrets.token_urlsafe(64)
response = views.TokenView.as_view()(request, client_name=tf_client.name)
assert response.status_code == 403
def test_discord_list(settings, client, user):
client.force_login(user)
response = client.get(reverse('guildmaster:discord-list'))
assert response.status_code == http.HTTPStatus.OK
|
python
|
# coding: utf-8
"""
Json-serializers for books-rest-api.
"""
from rest_framework.serializers import (
ModelSerializer, ReadOnlyField
)
from books.models import *
__author__ = "Vladimir Gerasimenko"
__copyright__ = "Copyright (C) 2017, Vladimir Gerasimenko"
__version__ = "0.0.1"
__maintainer__ = "Vladimir Gerasimenko"
__email__ = "[email protected]"
class BookSerializer(ModelSerializer):
"""
Book serializer class.
"""
owner = ReadOnlyField(source='owner.username')
class Meta:
model = Book
fields = "__all__"
class CategorySerializer(ModelSerializer):
"""
Book serializer class.
"""
owner = ReadOnlyField(source='owner.username')
class Meta:
model = Category
fields = "__all__"
|
python
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from faker import Faker
from members.models.members import Member
from members.models.services import Service
from members.models.countries import City
from django.template.defaultfilters import slugify
import random
from django_countries import countries
from members.models.countries import City
import os
import csv
module_dir = os.path.dirname(__file__) # get current directory
file_path = os.path.join(module_dir, 'members_city.csv')
class Command(BaseCommand):
help = "Command information"
def handle(self, *args, **kwargs):
try:
user = User.objects.get(username='admin')
except:
user = User.objects.create_superuser(
username='admin',
email='[email protected]',
password='testpass123'
)
fake = Faker()
with open(file_path) as f:
reader = csv.reader(f)
for row in reader:
_, created = City.objects.get_or_create(
name=row[3],
user=user,
country='EG'
)
member_obj = Member.objects.create(
first_name=fake.first_name(),
last_name=fake.last_name(),
email=fake.unique.email(),
mobile=fake.unique.phone_number(),
address=fake.address(),
birthday=fake.date_of_birth(),
user=user,
city=City.objects.filter(name='New Cairo').first(),
job="Engineer",
marital_status="S",
gender="F",
height='175'
)
# for _ in range(10):
# membership_obj = Membership.objects.create(
# name=fake.word(),
# type=random.choice(['Clinic', 'Online']),
# period=random.randint(1, 12),
# sessions=random.randint(12, 120),
# price=random.randint(1000, 5000),
# user=user
|
python
|
q1 = []
q2 = []
n = 5
def push(data):
if not q1==[] and len(q1) == n:
print("Overflow")
return
if not q2==[] and len(q2) == n:
print("Overflow")
return
if(q2 == []):
q1.append(data)
else:
q2.append(data)
def pop():
if(q1 == [] and q2 == []):
print("Underflow")
if(not q1 == []):
while(len(q1)>1):
q2.append(q1[0])
q1.pop(0)
q1.pop()
return
if(not q2 == []):
while(len(q2)>1):
q1.append(q2[0])
q2.pop(0)
q2.pop()
return
def Top():
if(q1 == [] and q2 == []):
print("Underflow")
if(not q1 == []):
while(len(q1)>1):
q2.append(q1[0])
q1.pop(0)
x = q1[0]
q1.pop(0)
q2.append(x)
return x
if(not q2 == []):
while(len(q2)>1):
q1.append(q2[0])
q2.pop(0)
x = q2[0]
q2.pop(0)
q1.append(x)
return x
if __name__ == '__main__':
push(1)
push(2)
push(3)
push(4)
push(5)
print(q1)
print(q2)
pop()
print(q1)
print(q2)
print(Top())
|
python
|
import requests, time
import sys,time,socket
from Sensor import Sensor
if __name__ == "__main__":
sensor = Sensor("/dev/ttyUSB0",9600)
while True:
data = sensor.PM25_Hex(10).split(" ")
pm = int(data[3]+data[2], 16)/10
print str(time.strftime("%H:%M:%S", time.localtime())) + ' PM2.5: ', pm
|
python
|
from test_helper import run_common_tests, failed, passed, check_tests_pass
from maximum_salary import largest_number
def reference(numbers):
numbers = list(map(str, numbers))
for _ in numbers:
for i in range(len(numbers) - 1):
if numbers[i] + numbers[i + 1] < numbers[i + 1] + numbers[i]:
t = numbers[i]
numbers[i] = numbers[i + 1]
numbers[i + 1] = t
return int("".join(numbers))
if __name__ == '__main__':
run_common_tests()
check_tests_pass("maximum_salary_unit_tests.py")
all_tests_passed = True
for numbers in [
[2, 21, 23, 211, 213, 231, 232],
[56, 5, 6, 556, 566, 666, 665, 656]
]:
if reference(numbers) != largest_number(numbers):
all_tests_passed = False
failed("Wrong answer for n={}".format(numbers))
break
if all_tests_passed:
passed()
|
python
|
#!/usr/bin/python3
import requests
s = requests.Session()
#url = "http://127.0.0.1:8080"
url = "http://52.76.131.184/_mijkweb/"
#payload = {'request' : '{"type":"login", "login":"[email protected]", "password":"test", "as":"admin"}'}
payload = {'request' : '{"type":"login", "login":"mijkenator", "password":"test"}'}
#payload = {'request' : '{"type":"login", "login":"[email protected]", "password":"lalala", "as":"contractor"}'}
r = s.post(url, data=payload)
print(r.text)
#payload = {'request' : '{"type":"get_orders", "uid":"18", "cid":"4"}'}
#r = s.post(url+"admin/order/", data=payload)
#print(r.text)
payload = {'request' : '{"type":"cancel_order","order_id":5}'}
r = s.post(url+"user", data=payload)
print(r.text)
|
python
|
import pytest
from anticrlf.types import SubstitutionMap
from anticrlf.exception import UnsafeSubstitutionError
def test_substitution_assign():
smap = SubstitutionMap(key="value")
assert type(smap) == SubstitutionMap
assert smap['key'] == 'value'
assert smap["\n"] == "\\n"
assert smap["\r"] == "\\r"
smap["key"] = "2value"
assert type(smap) == SubstitutionMap
assert smap['key'] == '2value'
def test_bad_substitution():
with pytest.raises(UnsafeSubstitutionError):
SubstitutionMap(x="hex")
smap = SubstitutionMap(x="y")
with pytest.raises(UnsafeSubstitutionError):
smap['x'] = 'hex'
smap = SubstitutionMap()
smap["x"] = "r"
with pytest.raises(UnsafeSubstitutionError):
smap["r"] = "\\r"
assert "\r" in smap.keys()
with pytest.raises(UnsafeSubstitutionError):
smap["x"] = "\r" # any use of \r as a value should trigger this
with pytest.raises(UnsafeSubstitutionError):
smap["x"] = "\n" # any use of \n as a value should trigger this
def test_delete():
smap = SubstitutionMap()
del smap["\n"]
assert smap["\n"] == "\\n"
smap["x"] = "y"
del smap["x"]
assert "x" not in smap
|
python
|
from gitlab.base import RESTManager, RESTObject
from gitlab.mixins import (
AccessRequestMixin,
CreateMixin,
DeleteMixin,
ListMixin,
ObjectDeleteMixin,
)
__all__ = [
"GroupAccessRequest",
"GroupAccessRequestManager",
"ProjectAccessRequest",
"ProjectAccessRequestManager",
]
class GroupAccessRequest(AccessRequestMixin, ObjectDeleteMixin, RESTObject):
pass
class GroupAccessRequestManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):
_path = "/groups/{group_id}/access_requests"
_obj_cls = GroupAccessRequest
_from_parent_attrs = {"group_id": "id"}
class ProjectAccessRequest(AccessRequestMixin, ObjectDeleteMixin, RESTObject):
pass
class ProjectAccessRequestManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):
_path = "/projects/{project_id}/access_requests"
_obj_cls = ProjectAccessRequest
_from_parent_attrs = {"project_id": "id"}
|
python
|
#n = ''
#while n != 'MnFf':
#print('Qual o seu sexo? ')
#x = input('[M/F]').upper().strip()
#if x in 'MmFf':
#print('OBRIGADO PELO ACESSO.')
#else:
#print('TENTE NOVAMENTE, INVALIDO.')
sexo = str(input('Qual seu SEXO?: [M/F] ')).strip()
while sexo not in 'FfMm':
sexo = str(input('Opção invalida.Tente novamente.\nQual seu SEXO?: [M/F]'))
if sexo in 'Mm':
print(f'Homao da porra em rs')
else:
print(f'Rabudinha em princesa')
|
python
|
"""
Package for working with JSON-format configuration files.
"""
from ._JSONObject import JSONObject
from ._StrictJSONObject import StrictJSONObject
from ._typing import (
Absent,
OptionallyPresent,
PropertyValueType
)
|
python
|
import itertools
from datetime import datetime, timedelta
from notifications_utils.polygons import Polygons
from notifications_utils.template import BroadcastPreviewTemplate
from orderedset import OrderedSet
from werkzeug.utils import cached_property
from app.broadcast_areas import CustomBroadcastAreas, broadcast_area_libraries
from app.formatters import round_to_significant_figures
from app.models import JSONModel, ModelList
from app.models.user import User
from app.notify_client.broadcast_message_api_client import (
broadcast_message_api_client,
)
class BroadcastMessage(JSONModel):
ALLOWED_PROPERTIES = {
'id',
'service_id',
'template_id',
'content',
'service_id',
'created_by',
'personalisation',
'starts_at',
'finishes_at',
'created_at',
'approved_at',
'cancelled_at',
'updated_at',
'created_by_id',
'approved_by_id',
'cancelled_by_id',
}
libraries = broadcast_area_libraries
def __lt__(self, other):
if self.starts_at and other.starts_at:
return self.starts_at < other.starts_at
if self.starts_at and not other.starts_at:
return True
if not self.starts_at and other.starts_at:
return False
if self.updated_at and not other.updated_at:
return self.updated_at < other.created_at
if not self.updated_at and other.updated_at:
return self.created_at < other.updated_at
if not self.updated_at and not other.updated_at:
return self.created_at < other.created_at
return self.updated_at < other.updated_at
@classmethod
def create(cls, *, service_id, template_id):
return cls(broadcast_message_api_client.create_broadcast_message(
service_id=service_id,
template_id=template_id,
content=None,
reference=None,
))
@classmethod
def create_from_content(cls, *, service_id, content, reference):
return cls(broadcast_message_api_client.create_broadcast_message(
service_id=service_id,
template_id=None,
content=content,
reference=reference,
))
@classmethod
def from_id(cls, broadcast_message_id, *, service_id):
return cls(broadcast_message_api_client.get_broadcast_message(
service_id=service_id,
broadcast_message_id=broadcast_message_id,
))
@property
def areas(self):
library_areas = self.get_areas(areas=self._dict['areas'])
if library_areas:
if len(library_areas) != len(self._dict['areas']):
raise RuntimeError(
f'BroadcastMessage has {len(self._dict["areas"])} areas '
f'but {len(library_areas)} found in the library'
)
return library_areas
return CustomBroadcastAreas(
areas=self._dict['areas'],
polygons=self._dict['simple_polygons'],
)
@property
def parent_areas(self):
return sorted(set(self._parent_areas_iterator))
@property
def _parent_areas_iterator(self):
for area in self.areas:
for parent in area.parents:
yield parent
@cached_property
def polygons(self):
return Polygons(
list(itertools.chain(*(
area.polygons for area in self.areas
)))
)
@cached_property
def simple_polygons(self):
return self.get_simple_polygons(areas=self.areas)
@property
def reference(self):
if self.template_id:
return self._dict['template_name']
return self._dict['reference']
@property
def template(self):
return BroadcastPreviewTemplate({
'template_type': BroadcastPreviewTemplate.template_type,
'name': self.reference,
'content': self.content,
})
@property
def status(self):
if (
self._dict['status']
and self._dict['status'] == 'broadcasting'
and self.finishes_at < datetime.utcnow().isoformat()
):
return 'completed'
return self._dict['status']
@cached_property
def created_by(self):
return User.from_id(self.created_by_id) if self.created_by_id else None
@cached_property
def approved_by(self):
return User.from_id(self.approved_by_id)
@cached_property
def cancelled_by(self):
return User.from_id(self.cancelled_by_id)
@property
def count_of_phones(self):
return round_to_significant_figures(
sum(area.count_of_phones for area in self.areas),
1
)
@property
def count_of_phones_likely(self):
area_estimate = self.simple_polygons.estimated_area
bleed_area_estimate = self.simple_polygons.bleed.estimated_area - area_estimate
return round_to_significant_figures(
self.count_of_phones + (self.count_of_phones * bleed_area_estimate / area_estimate),
1
)
def get_areas(self, areas):
return broadcast_area_libraries.get_areas(
*areas
)
def get_simple_polygons(self, areas):
polygons = Polygons(
list(itertools.chain(*(
area.simple_polygons for area in areas
)))
)
# If we’ve added multiple areas then we need to re-simplify the
# combined shapes to keep the point count down
return polygons.smooth.simplify if len(areas) > 1 else polygons
def add_areas(self, *new_areas):
areas = list(OrderedSet(
self._dict['areas'] + list(new_areas)
))
simple_polygons = self.get_simple_polygons(areas=self.get_areas(areas=areas))
self._update(areas=areas, simple_polygons=simple_polygons.as_coordinate_pairs_lat_long)
def remove_area(self, area_to_remove):
areas = [
area for area in self._dict['areas']
if area != area_to_remove
]
simple_polygons = self.get_simple_polygons(areas=self.get_areas(areas=areas))
self._update(areas=areas, simple_polygons=simple_polygons.as_coordinate_pairs_lat_long)
def _set_status_to(self, status):
broadcast_message_api_client.update_broadcast_message_status(
status,
broadcast_message_id=self.id,
service_id=self.service_id,
)
def _update(self, **kwargs):
broadcast_message_api_client.update_broadcast_message(
broadcast_message_id=self.id,
service_id=self.service_id,
data=kwargs,
)
def request_approval(self):
self._set_status_to('pending-approval')
def approve_broadcast(self):
self._update(
starts_at=datetime.utcnow().isoformat(),
finishes_at=(
datetime.utcnow() + timedelta(hours=4, minutes=0)
).isoformat(),
)
self._set_status_to('broadcasting')
def reject_broadcast(self):
self._set_status_to('rejected')
def cancel_broadcast(self):
self._set_status_to('cancelled')
class BroadcastMessages(ModelList):
model = BroadcastMessage
client_method = broadcast_message_api_client.get_broadcast_messages
def with_status(self, *statuses):
return [
broadcast for broadcast in self if broadcast.status in statuses
]
|
python
|
#!/usr/bin/env python
'''
Dane Warren
Obtain the k nearest neighbors of the given player and use the second seasons these neighbors to predict the second season of the given player.
'''
import cPickle as pickle
'''
@param rbID: The ID of the running back to get the stats for
@param rrbStats: A list containing dictionaries of rookie running back statlines
@returns: The rookie season statline of the given ID
'''
def getIndividualRookieStats(rbID, rrbStats):
for player in rrbStats:
if player["ID"] == rbID:
print(player)
return player
return 0
'''
@param x: Number to compare to y
@param y: Number to compare to x
@returns: The similarity of the two numbers
'''
def similarityScore(x, y):
if x < y and y != 0:
similarity = float(x) / float(y)
elif x > y and x != 0:
similarity = float(y) / float(x)
else:
similarity = 1
if x < 0 or y < 0:
similarity = 0
if x == 0 and y == 0:
similarity = 1
return similarity
'''
@param rb: The stats of every rookie running back since 1950
@param stats: The given running back's rookie season stats
@param ID: The ID of the given running back
@returns: The similarity rating of the given rb and the next rb in the list
'''
def getSimilarity(rb, stats):
similarity = 0
if rb["gamesPlayed"] >= 8:
rushYpASim = similarityScore(rb["rushYpA"], stats["rushYpA"])
rushYpGSim = similarityScore(rb["rushYpG"], stats["rushYpG"])
rushTDpGSim = similarityScore(rb["rushTDpG"], stats["rushTDpG"])
rushTDpASim = similarityScore(rb["rushTDpA"], stats["rushTDpA"])
airYpGSim = similarityScore(rb["airYpG"], stats["airYpG"])
airYpRSim = similarityScore(rb["airYpR"], stats["airYpR"])
airTDpGSim = similarityScore(rb["airTDpG"], stats["airTDpG"])
airTDpRSim = similarityScore(rb["airTDpR"], stats["airTDpR"])
similarity = .125 * (rushYpASim + rushYpGSim + rushTDpGSim + rushTDpASim + airYpGSim + airYpRSim + airTDpGSim + airTDpRSim)
if rb["ID"] == stats["ID"]:
similarity = 0
return similarity
'''
@param k: The number of neighbors to return
@param inputRB: The stats of the given running back
@param rrbStats: The stats of every rookie running back
@returns: k nearest neighbors and the similarity scores
'''
def getNearestNeighbors(k, inputRB, rrbStats):
nearestNeighbors = {}
for rb in rrbStats:
similarity = getSimilarity(rb, inputRB)
if len(nearestNeighbors) < k:
nearestNeighbors[rb["ID"]] = similarity
else:
sortedNums = sorted(nearestNeighbors.values())
if(similarity > sortedNums[0]):
sortedNames = sorted(nearestNeighbors, key=nearestNeighbors.get)
del nearestNeighbors[sortedNames[0]]
nearestNeighbors[rb["ID"]] = similarity
return nearestNeighbors
def getNeighbors(inputRB, rrbStats):
neighbors = {}
for rb in rrbStats:
neighbors[rb["ID"]] = getSimilarity(rb, inputRB)
return neighbors
def getSophomoreStats(srbStats, ID):
for player in srbStats:
if player["ID"] == ID:
if player["fantasyPoints"] == 0:
return -1 #Player did not touch the ball in their second season
return player
return -1
def main():
file = open("../../datasets/pkl_datasets/rookie_rbStats.pkl","rb")
rrbStats = pickle.load(file)
file.close()
file = open("../../datasets/pkl_datasets/sophomore_rbStats.pkl","rb")
srbStats = pickle.load(file)
file.close()
file = open("../../datasets/pkl_datasets/runningbacksNameID.pkl")
rbs = pickle.load(file)
file.close()
#GET INPUT RB
rbID = raw_input("Enter a running back. (Name or ID) \n")
if rbID.isdigit():
rbID = int(rbID)
else:
rbID = rbs[rbID]
inputRB = getIndividualRookieStats(rbID, rrbStats)
if inputRB["fantasyPoints"] == 0:
print("This player did not touch the ball in their rookie season.")
return
if getSophomoreStats(srbStats, rbID) == "This player did not touch the ball in their sophomore season.":
print("This player did not touch the ball in their sophomore season.")
return
#GET NEIGHBORS
for rb in rrbStats:
if rb["fantasyPoints"] == 0:
rrbStats.remove(rb)
neighbors = getNeighbors(inputRB, rrbStats)
#SORT NEIGHBORS
sortedNeighbors = []
for value in sorted(neighbors.values()):
for key in neighbors.keys():
sort = {}
if neighbors[key] == value:
sort[key] = value
sortedNeighbors.append(sort)
#GET K NEAREST NEIGHBORS
k = 5
kNeighbors = sortedNeighbors[-k:]
#GET NEIGHBOR SOPHOMORE STATS AND COMPARE
stats = {}
neighborStats = []
maxPredictiveRating = 0
bestK = 0
bestNeighborFPPG = 0
while k <= 40:
kNeighbors = sortedNeighbors[-k:]
totalNeighborFP = 0
totalNeighborGames = 0
for neighbor in kNeighbors:
stats = getSophomoreStats(srbStats, neighbor.keys()[0])
if stats["fantasyPoints"] != 0:
totalNeighborFP += stats["fantasyPoints"]
totalNeighborGames += stats["gamesPlayed"]
neighborFPPG = float(totalNeighborFP) / float(totalNeighborGames)
inputRBSophomoreStats = getSophomoreStats(srbStats, rbID)
predictiveRating = similarityScore(neighborFPPG, inputRBSophomoreStats["fantasyPointsPerGame"])
if predictiveRating > maxPredictiveRating:
maxPredictiveRating = predictiveRating
bestK = k
bestNeighborFPPG = neighborFPPG
k += 1
print(bestK)
print(maxPredictiveRating)
print(bestNeighborFPPG)
print(inputRBSophomoreStats["fantasyPointsPerGame"])
print(getSophomoreStats(srbStats, 4418))
if __name__ == "__main__":
main()
|
python
|
import FWCore.ParameterSet.Config as cms
# list of crystal indecies
ics = cms.untracked.vint32(1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
81, 82, 83, 84, 85,
86, 87, 88, 89, 90,
101, 102, 103, 104, 105,
106, 107, 108, 109, 110,
121, 122, 123, 124, 125,
126, 127, 128, 129, 130,
141, 142, 143, 144, 145,
146, 147, 148, 149, 150,
161, 162, 163, 164, 165,
166, 167, 168, 169, 170,
181, 182, 183, 184, 185,
186, 187, 188, 189, 190)
# list of tower IDs (DQM numbering scheme)
towerIDs = cms.untracked.vint32(1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6)
# list of corresponding strip (VFE) numbers
stripIDs = cms.untracked.vint32(1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1,
1, 2, 3, 4, 5,
5, 4, 3, 2, 1)
# list of channel IDs
channelIDs = cms.untracked.vint32(1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
3, 3, 3, 3, 3,
3, 3, 3, 3, 3,
4, 4, 4, 4, 4,
4, 4, 4, 4, 4,
5, 5, 5, 5, 5,
5, 5, 5, 5, 5,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
3, 3, 3, 3, 3,
3, 3, 3, 3, 3,
4, 4, 4, 4, 4,
4, 4, 4, 4, 4,
5, 5, 5, 5, 5,
5, 5, 5, 5, 5)
# list of status IDs
statusIDs = cms.untracked.vint32(1, 2, 3, 4)
# list of tower CCUIDs
ccuIDs = cms.untracked.vint32(1, 71, 80, 45)
# list of tower DQM position IDs
positionIDs = cms.untracked.vint32(6, 2, 5, 1)
|
python
|
import urllib.parse
import urllib.request
from doodledashboard.component import MissingRequiredOptionException, NotificationCreator, \
ComponentCreationException
from doodledashboard.filters.contains_text import ContainsTextFilter
from doodledashboard.filters.matches_regex import MatchesRegexFilter
from doodledashboard.notifications.image.file_downloader import FileDownloader
from doodledashboard.notifications.notification import Notification
from doodledashboard.notifications.outputs import ImageNotificationOutput
class ImageDependingOnMessageContent(Notification):
"""
* First message that contains text that matches an image's filter
"""
def __init__(self):
super().__init__()
self._filtered_images = []
self._default_image_path = None
self._chosen_image_path = None
def add_image_filter(self, absolute_path, choice_filter=None):
if choice_filter:
self._filtered_images.append({"path": absolute_path, "filter": choice_filter})
else:
self._default_image_path = absolute_path
def create_output(self, messages):
if not messages:
if self._default_image_path:
return ImageNotificationOutput(self._default_image_path)
else:
return None
last_message = messages[-1]
for image_filter in self._filtered_images:
if image_filter["filter"].filter(last_message):
self._chosen_image_path = image_filter["path"]
if self._chosen_image_path:
image_path = self._chosen_image_path
else:
image_path = self._default_image_path
return ImageNotificationOutput(image_path) if image_path else None
@property
def default_image(self):
return self._default_image_path
@property
def filtered_images(self):
return self._filtered_images
def get_output_types(self):
return [ImageNotificationOutput]
def __str__(self):
notification_name = "ImageDependingOnMessageContent"
if self.name:
notification_name += " (%s)" % self._name
return notification_name
class ImageDependingOnMessageContentCreator(NotificationCreator):
def __init__(self, file_downloader=FileDownloader()):
super().__init__()
self._file_downloader = file_downloader
@staticmethod
def get_id():
return "image-depending-on-message-content"
def create(self, options, secret_store):
notification = ImageDependingOnMessageContent()
has_images = "images" in options
has_default_image = "default-image" in options
if not has_images and not has_default_image:
raise MissingRequiredOptionException("Expected 'images' list and/or default-image to exist")
if has_default_image:
image_url = self._encode_url(options["default-image"])
image_path = self.download(image_url)
notification.add_image_filter(image_path)
if has_images:
for image_config_section in options["images"]:
if "path" not in image_config_section:
raise MissingRequiredOptionException("Expected 'path' option to exist")
image_url = self._encode_url(image_config_section["path"])
image_filter = self._create_filter(image_config_section)
image_path = self.download(image_url)
notification.add_image_filter(image_path, image_filter)
return notification
def download(self, url):
try:
return self._file_downloader.download(url)
except Exception as err:
raise ImageUnavailable(url, err)
@staticmethod
def _encode_url(full_url):
"""
Encode invalid characters in URL to provide, such as spaces.
This implements code from the following URLs
https://bugs.python.org/issue14826
https://hg.python.org/cpython/rev/ebd37273e0fe
"""
return urllib.parse.quote(full_url, safe="%/:=&?~#+!$,;'@()*[]|")
@staticmethod
def _create_filter(image_config_section):
pattern_exists = "if-matches" in image_config_section
contains_exists = "if-contains" in image_config_section
if not pattern_exists and not contains_exists:
raise MissingRequiredOptionException("Expected either 'if-contains' or 'if-matches' option to exist")
if pattern_exists and contains_exists:
raise MissingRequiredOptionException("Expected either 'if-contains' or 'if-matches' option, but not both")
if pattern_exists:
return MatchesRegexFilter(image_config_section["if-matches"])
else:
return ContainsTextFilter(image_config_section["if-contains"])
class ImageUnavailable(ComponentCreationException):
def __init__(self, url, error):
super().__init__("Error downloading '%s'" % url)
self._url = url
self._error = error
@property
def url(self):
return self._url
@property
def error(self):
return self._error
|
python
|
from asyncio import Future, ensure_future
from typing import Any, Callable
from websockets import WebSocketCommonProtocol as WebSocket
from .models import Notification
__all__ = [
'Notifier',
]
_Sender = Callable[[Notification], Future]
_Finalizer = Callable[[Future], Any]
class Notifier:
def __init__(self, ws: WebSocket, sender: _Sender, finalizer: _Finalizer):
self._ws = ws
self._sender = sender
self._finalizer = finalizer
self._pending = set()
@property
def closed(self) -> bool:
return self._ws.closed
def _done_callback(self, fut: Future):
self._finalizer(fut)
if fut in self._pending:
self._pending.remove(fut)
def send(self, notification: Notification) -> Future:
fut = self._sender(notification) if self._ws.open else ensure_future(self._ws.ensure_open())
fut.add_done_callback(self._done_callback)
self._pending.add(fut)
return fut
def cancel(self):
for fut in self._pending:
fut.cancel()
|
python
|
"""Function for building a diatomic molecule."""
def create_diatomic_molecule_geometry(species1, species2, bond_length):
"""Create a molecular geometry for a diatomic molecule.
Args:
species1 (str): Chemical symbol of the first atom, e.g. 'H'.
species2 (str): Chemical symbol of the second atom.
bond_length (float): bond distance.
Returns:
dict: a dictionary containing the coordinates of the atoms.
"""
geometry = {"sites": [
{'species': species1, 'x': 0, 'y': 0, 'z': 0},
{'species': species2, 'x': 0, 'y': 0, 'z': bond_length}
]}
return geometry
|
python
|
from abc import abstractmethod
from typing import Iterable
from .common import PipelineContext, RecordEnvelope
class Transformer:
@abstractmethod
def transform(
self, record_envelopes: Iterable[RecordEnvelope]
) -> Iterable[RecordEnvelope]:
"""
Transforms a sequence of records.
:param records: the records to be transformed
:return: 0 or more transformed records
"""
@classmethod
@abstractmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "Transformer":
pass
|
python
|
from pygame.surface import Surface, SurfaceType
from typing import Union, List, Tuple
from pygame.color import Color
GRAVITY_LEFT = 0
GRAVITY_RIGHT = 1
GRAVITY_TOP = 0
GRAVITY_BOTTOM = 2
GRAVITY_CENTER_HORIZONTAL = 4
GRAVITY_CENTER_VERTICAL = 8
STYLE_NORMAL = 0
STYLE_BOLD = 1
STYLE_ITALIC = 2
MOUSE_MODE_CONFINED = 1
MOUSE_MODE_CAPTURED = 2
_pyi_Color_type = Union[Color, str, Tuple[int, int, int], List[int], int, Tuple[int, int, int, int]]
_pyi_Surface_type = Union[Surface, SurfaceType]
|
python
|
import collections
from reclist.abstractions import RecList, rec_test
from typing import List
import random
class CoveoCartRecList(RecList):
@rec_test(test_type='stats')
def basic_stats(self):
"""
Basic statistics on training, test and prediction data
"""
from reclist.metrics.standard_metrics import statistics
return statistics(self._x_train,
self._y_train,
self._x_test,
self._y_test,
self._y_preds)
@rec_test(test_type='price_homogeneity')
def price_test(self):
"""
Measures the absolute log ratio of ground truth and prediction price
"""
from reclist.metrics.price_homogeneity import price_homogeneity_test
return price_homogeneity_test(y_test=self.sku_only(self._y_test),
y_preds=self.sku_only(self._y_preds),
product_data=self.product_data,
price_sel_fn=lambda x: float(x['price_bucket'])
if x['price_bucket']
else None
)
@rec_test(test_type='Coverage@10')
def coverage_at_k(self):
"""
Coverage is the proportion of all possible products which the RS
recommends based on a set of sessions
"""
from reclist.metrics.standard_metrics import coverage_at_k
return coverage_at_k(self.sku_only(self._y_preds),
self.product_data,
k=10)
@rec_test(test_type='HR@10')
def hit_rate_at_k(self):
"""
Compute the rate in which the top-k predictions contain the item to be predicted
"""
from reclist.metrics.standard_metrics import hit_rate_at_k
return hit_rate_at_k(self.sku_only(self._y_preds),
self.sku_only(self._y_test),
k=10)
@rec_test(test_type='hits_distribution')
def hits_distribution(self):
"""
Compute the distribution of hit-rate across product frequency in training data
"""
from reclist.metrics.hits import hits_distribution
return hits_distribution(self.sku_only(self._x_train),
self.sku_only(self._x_test),
self.sku_only(self._y_test),
self.sku_only(self._y_preds),
k=10,
debug=True)
@rec_test(test_type='distance_to_query')
def dist_to_query(self):
"""
Compute the distribution of distance from query to label and query to prediction
"""
from reclist.metrics.distance_metrics import distance_to_query
return distance_to_query(self.rec_model,
self.sku_only(self._x_test),
self.sku_only(self._y_test),
self.sku_only(self._y_preds), k=10, bins=25, debug=True)
def sku_only(self, l: List[List]):
return [[e['product_sku'] for e in s] for s in l]
class SpotifySessionRecList(RecList):
@rec_test(test_type='basic_stats')
def basic_stats(self):
"""
Basic statistics on training, test and prediction data for Next Event Prediction
"""
from reclist.metrics.standard_metrics import statistics
return statistics(self._x_train,
self._y_train,
self._x_test,
self._y_test,
self._y_preds)
@rec_test(test_type='HR@10')
def hit_rate_at_k(self):
"""
Compute the rate at which the top-k predictions contain the item to be predicted
"""
from reclist.metrics.standard_metrics import hit_rate_at_k
return hit_rate_at_k(self.uri_only(self._y_preds),
self.uri_only(self._y_test),
k=10)
@rec_test(test_type='perturbation_test')
def perturbation_at_k(self):
"""
Compute average consistency in model predictions when inputs are perturbed
"""
from reclist.metrics.perturbation import session_perturbation_test
from collections import defaultdict
from functools import partial
# Step 1: Generate a map from artist uri to track uri
substitute_mapping = defaultdict(list)
for track_uri, row in self.product_data.items():
substitute_mapping[row['artist_uri']].append(track_uri)
# Step 2: define a custom perturbation function
def perturb(session, sub_map):
last_item = session[-1]
last_item_artist = self.product_data[last_item['track_uri']]['artist_uri']
substitutes = set(sub_map.get(last_item_artist,[])) - {last_item['track_uri']}
if substitutes:
similar_item = random.sample(substitutes, k=1)
new_session = session[:-1] + [{"track_uri": similar_item[0]}]
return new_session
return []
# Step 3: call test
return session_perturbation_test(self.rec_model,
self._x_test,
self._y_preds,
partial(perturb, sub_map=substitute_mapping),
self.uri_only,
k=10)
@rec_test(test_type='shuffle_session')
def perturbation_shuffle_at_k(self):
"""
Compute average consistency in model predictions when inputs are re-ordered
"""
from reclist.metrics.perturbation import session_perturbation_test
# Step 1: define a custom perturbation function
def perturb(session):
return random.sample(session, len(session))
# Step 2: call test
return session_perturbation_test(self.rec_model,
self._x_test,
self._y_preds,
perturb,
self.uri_only,
k=10)
@rec_test(test_type='hits_distribution_by_slice')
def hits_distribution_by_slice(self):
"""
Compute the distribution of hit-rate across various slices of data
"""
from reclist.metrics.hits import hits_distribution_by_slice
len_map = collections.defaultdict(list)
for idx, playlist in enumerate(self._x_test):
len_map[len(playlist)].append(idx)
slices = collections.defaultdict(list)
bins = [(x * 5, (x + 1) * 5) for x in range(max(len_map) // 5 + 1)]
for bin_min, bin_max in bins:
for i in range(bin_min + 1, bin_max + 1, 1):
slices[f'({bin_min}, {bin_max}]'].extend(len_map[i])
del len_map[i]
assert len(len_map) == 0
return hits_distribution_by_slice(slices,
self.uri_only(self._y_test),
self.uri_only(self._y_preds),
debug=True)
@rec_test(test_type='Coverage@10')
def coverage_at_k(self):
"""
Coverage is the proportion of all possible products which the RS
recommends based on a set of sessions
"""
from reclist.metrics.standard_metrics import coverage_at_k
return coverage_at_k(self.uri_only(self._y_preds),
self.product_data,
# this contains all the track URIs from train and test sets
k=10)
@rec_test(test_type='Popularity@10')
def popularity_bias_at_k(self):
"""
Compute average frequency of occurrence across recommended items in training data
"""
from reclist.metrics.standard_metrics import popularity_bias_at_k
return popularity_bias_at_k(self.uri_only(self._y_preds),
self.uri_only(self._x_train),
k=10)
@rec_test(test_type='MRR@10')
def mrr_at_k(self):
"""
MRR calculates the mean reciprocal of the rank at which the first
relevant item was retrieved
"""
from reclist.metrics.standard_metrics import mrr_at_k
return mrr_at_k(self.uri_only(self._y_preds),
self.uri_only(self._y_test))
def uri_only(self, playlists: List[dict]):
return [[track['track_uri'] for track in playlist] for playlist in playlists]
class MovieLensSimilarItemRecList(RecList):
@rec_test(test_type="stats")
def basic_stats(self):
"""
Basic statistics on training, test and prediction data
"""
from reclist.metrics.standard_metrics import statistics
return statistics(
self._x_train,
self._y_train,
self._x_test,
self._y_test,
self._y_preds
)
@rec_test(test_type='HR@10')
def hit_rate_at_k(self):
"""
Compute the rate at which the top-k predictions contain the movie to be predicted
"""
from reclist.metrics.standard_metrics import hit_rate_at_k
return hit_rate_at_k(
self.movie_only(self._y_preds),
self.movie_only(self._y_test),
k=10
)
@rec_test(test_type='Coverage@10')
def coverage_at_k(self):
"""
Coverage is the proportion of all possible movies which the RS
recommends based on a set of movies and their respective ratings
"""
from reclist.metrics.standard_metrics import coverage_at_k
return coverage_at_k(
self.movie_only(self._y_preds),
self.product_data,
k=10
)
@rec_test(test_type='hits_distribution')
def hits_distribution(self):
"""
Compute the distribution of hit-rate across movie frequency in training data
"""
from reclist.metrics.hits import hits_distribution
return hits_distribution(
self.movie_only(self._x_train),
self.movie_only(self._x_test),
self.movie_only(self._y_test),
self.movie_only(self._y_preds),
k=10,
debug=True
)
@rec_test(test_type="hits_distribution_by_rating")
def hits_distribution_by_rating(self):
"""
Compute the distribution of hit-rate across movie ratings in testing data
"""
from reclist.metrics.hits import hits_distribution_by_rating
return hits_distribution_by_rating(
self._y_test,
self._y_preds,
debug=True
)
def movie_only(self, movies):
return [[x["movieId"] for x in y] for y in movies]
|
python
|
"""
Method Resolution Order (MRO)
MRO é a ordem de execução dos métodos, ou seja quem será executado primeiro.
MRO tem 3 formas:
- Via propriedade da clase
- Via método MRO()
- Via help
Polimorfismo - Objetos que podem se comportar de diferentes formas
"""
class Animal:
def __init__(self, nome):
self.__nome = nome
def falar(self):
raise NotImplementedError('A classe filha precisa implementar este método')
def comer(self):
print(f'{self.__nome} esta comendo')
class Cachorro(Animal):
def __init__(self, nome):
super().__init__(nome)
def falar(self):
print(f'{self.__nome} fala wau wau')
class Gato(Animal):
def __init__(self, nome):
super().__init__(nome)
def falar(self):
print(f'{self.__nome} fala miau maiu')
class Formiga(Animal):
def __init__(self, nome):
super().__init__(nome)
def falar(self):
print(f'{self.__nome} fala algo')
|
python
|
"""
[2014-11-19] Challenge #189 [Intermediate] Roman Numeral Conversion
https://www.reddit.com/r/dailyprogrammer/comments/2ms946/20141119_challenge_189_intermediate_roman_numeral/
Your friend is an anthropology major who is studying roman history. They have never been able to quite get a handle for
roman numerals and how to read them, so they've asked you to come up with a simple program that will let them input
some numbers and return roman numerals, as well as the opposite, to input roman numerals and return base-10 numbers.
They are bribing you with Indiana Jones memorabilia, so you are totally up for the challenge!
#Description
Most people learn about roman numerals at a young age. If you look at many analog clocks, you will find that many of
them actually use roman numerals for the numbers. Roman numerals do not just stop at 12 though, they actually can
represent numbers as high as 4999 using their most basic form.
The challenge, is to create a program that will allow you to convert decimal (base-10) numbers to roman numerals as
well as roman numerals to decimal numbers. The history of roman numerals is a bit debated because of their varied use
throughout history and a seeming lack of a standard definition. Some rules are well accepted and some less-so. Here are
the guidelines for your implementation:
| I | V | X | L | C | D | M |
|:---|:---|----:|:---|:------|:----|:---|
| 1 | 5 | 10 | 50 | 100 |500 |1000
#Rules
You cannot repeat the same roman numeral more than three times in a row, except for M, which can be added up to four
times. (Note: Some descriptions of roman numerals allows for IIII to represent 4 instead of IV. For the purposes of
this exercise, that is not allowed.)
When read from left to right, if successive roman numerals decrease or stay the same in value, you add them to the
total sum.
When read from left to right, if successive roman numerals increase in value, you subtract the smaller value from the
larger one and add the result to the total sum.
#Restrictions
I can only be subtracted from V or X
X can only be subtracted from L or C
C can only be subtracted from D or M
Only one smaller value can be subtracted from a following larger value. (e.g. 'IIX' would be an invalid way to
represent the number 8)
#Examples
XII = 10 + 1 + 1 = 12
MDCCLXXVI = 1000 + 500 + 100 + 100 + 50 + 10 + 10 + 5 + 1 = 1776
IX = "1 from 10" = 10 - 1 = 9
XCIV = "10 from 100" + "1 from 5" = (100 - 10) + (5 - 1) = 90 + 4 = 94
#Inputs & Outputs
Your program should be able to accept numbers in either integer or roman numeral format to return the other. You may
want to add validation checks on the input.
When converting to a roman numeral, the maximum number is 4999.
When converting from a roman numeral, I,V,X,L,C,D,M are the only valid characters.
You should be able to accept one or many numbers or numerals and convert to the other direction.
#Challenge
Some historical accounts state that roman numerals could actually go much higher than 4999. There are incredibly varied
explanations and syntactical requirements for them. Some state that an over-line (vinculum) would be used over a number
to multiply it by 1000, some say that you would put a curved line on either side of a number to multiply it by 1000.
For the challenge, see if you can add support to your code to allow parenthesis to encapsulate parts of a number that
can be multiplied by one thousand. You can nest parenthesis as well to allow for numbers that are incredibly large.
#Restriction
The last roman numeral digit inside a set of parenthesis can not be an "I". There are two reasons for this (1) because
historical accounts claimed that confusion would happen with the curved lines that encapsulate a number to be
multiplied by one thousand and (2) because the easiest way to validate your numbers is with Wolfram Alpha and they do
not allow it either.
#Examples
(V)M = 5*1000 + 1000 = 6000
(X)MMCCCXLV = 10*1000 + 1000 + 1000 + 100 + 100 + 100 + (50 - 10) + 5 = 10000 + 2000 + 300 + 40 + 5 = 12345
((XV)M)DCC = ((10 + 5) * 1000 + 1000) * 1000 + 500 + 100 + 100 = (15000 + 1000) * 1000 + 1700 = 16000000 + 1700 =
16001700
#Hints
You can visit Wolfram Alpha to validate some of your numbers if you are having any trouble.
http://www.wolframalpha.com/input/?i=314+in+roman+numerals
#Sample Data
##Basic
IV = 4
XXXIV = 34
CCLXVII = 267
DCCLXIV = 764
CMLXXXVII = 987
MCMLXXXIII = 1983
MMXIV = 2014
MMMM = 4000
MMMMCMXCIX = 4999
##Challenge
(V) = 5000
(V)CDLXXVIII = 5478
(V)M = 6000
(IX) = 9000
(X)M = 11000
(X)MM = 12000
(X)MMCCCXLV = 12345
(CCCX)MMMMCLIX = 314159
(DLXXV)MMMCCLXVII = 578267
(MMMCCXV)CDLXVIII = 3215468
(MMMMCCX)MMMMCDLXVIII = 4214468
(MMMMCCXV)CDLXVIII = 4215468
(MMMMCCXV)MMMCDLXVIII = 4218468
(MMMMCCXIX)CDLXVIII = 4219468
((XV)MDCCLXXV)MMCCXVI = 16777216
((CCCX)MMMMCLIX)CCLXV = 314159265
((MLXX)MMMDCCXL)MDCCCXXIV = 1073741824
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
Thanks to /u/pshatmsft for the submission!
"""
def main():
pass
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import DataClassJsonMixin
from .equipment_port_type_fragment import EquipmentPortTypeFragment, QUERY as EquipmentPortTypeFragmentQuery
QUERY: List[str] = EquipmentPortTypeFragmentQuery + ["""
query EquipmentPortTypeQuery($id: ID!) {
port_type: node(id: $id) {
... on EquipmentPortType {
...EquipmentPortTypeFragment
}
}
}
"""]
@dataclass
class EquipmentPortTypeQuery(DataClassJsonMixin):
@dataclass
class EquipmentPortTypeQueryData(DataClassJsonMixin):
@dataclass
class Node(EquipmentPortTypeFragment):
pass
port_type: Optional[Node]
data: EquipmentPortTypeQueryData
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient, id: str) -> EquipmentPortTypeQueryData:
# fmt: off
variables = {"id": id}
response_text = client.call(''.join(set(QUERY)), variables=variables)
return cls.from_json(response_text).data
|
python
|
# from .base import Base
#
# def initDB(engine):
# metadata = Base.metadata
# metadata.create_all(engine)
# print ('Database structure created')
#
#
|
python
|
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import time
import json
import requests
import argparse
import lxml.html
import io
from lxml.cssselect import CSSSelector
YOUTUBE_COMMENTS_URL = 'https://www.youtube.com/all_comments?v={youtube_id}'
YOUTUBE_COMMENTS_AJAX_URL = 'https://www.youtube.com/comment_ajax'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'
def find_value(html, key, num_chars=2):
pos_begin = html.find(key) + len(key) + num_chars
pos_end = html.find('"', pos_begin)
return html[pos_begin: pos_end]
def extract_comments(html):
tree = lxml.html.fromstring(html)
item_sel = CSSSelector('.comment-item')
text_sel = CSSSelector('.comment-text-content')
time_sel = CSSSelector('.time')
author_sel = CSSSelector('.user-name')
for item in item_sel(tree):
yield {'cid': item.get('data-cid'),
'text': text_sel(item)[0].text_content(),
'time': time_sel(item)[0].text_content().strip(),
'author': author_sel(item)[0].text_content()}
def extract_reply_cids(html):
tree = lxml.html.fromstring(html)
sel = CSSSelector('.comment-replies-header > .load-comments')
return [i.get('data-cid') for i in sel(tree)]
def ajax_request(session, url, params, data, retries=10, sleep=20):
for _ in range(retries):
response = session.post(url, params=params, data=data)
if response.status_code == 200:
response_dict = json.loads(response.text)
return response_dict.get('page_token', None), response_dict['html_content']
else:
time.sleep(sleep)
def download_comments(youtube_id, sleep=1):
session = requests.Session()
session.headers['User-Agent'] = USER_AGENT
# Get Youtube page with initial comments
response = session.get(YOUTUBE_COMMENTS_URL.format(youtube_id=youtube_id))
html = response.text
reply_cids = extract_reply_cids(html)
ret_cids = []
for comment in extract_comments(html):
ret_cids.append(comment['cid'])
yield comment
page_token = find_value(html, 'data-token')
session_token = find_value(html, 'XSRF_TOKEN', 4)
first_iteration = True
# Get remaining comments (the same as pressing the 'Show more' button)
while page_token:
data = {'video_id': youtube_id,
'session_token': session_token}
params = {'action_load_comments': 1,
'order_by_time': True,
'filter': youtube_id}
if first_iteration:
params['order_menu'] = True
else:
data['page_token'] = page_token
response = ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)
if not response:
break
page_token, html = response
reply_cids += extract_reply_cids(html)
for comment in extract_comments(html):
if comment['cid'] not in ret_cids:
ret_cids.append(comment['cid'])
yield comment
first_iteration = False
time.sleep(sleep)
# Get replies (the same as pressing the 'View all X replies' link)
for cid in reply_cids:
data = {'comment_id': cid,
'video_id': youtube_id,
'can_reply': 1,
'session_token': session_token}
params = {'action_load_replies': 1,
'order_by_time': True,
'filter': youtube_id,
'tab': 'inbox'}
response = ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)
if not response:
break
_, html = response
for comment in extract_comments(html):
if comment['cid'] not in ret_cids:
ret_cids.append(comment['cid'])
yield comment
time.sleep(sleep)
def main(argv):
parser = argparse.ArgumentParser(add_help=False, description=('Download Youtube comments without using the Youtube API'))
parser.add_argument('--help', '-h', action='help', default=argparse.SUPPRESS, help='Show this help message and exit')
parser.add_argument('--youtubeid', '-y', help='ID of Youtube video for which to download the comments')
parser.add_argument('--output', '-o', help='Output filename (output format is line delimited JSON)')
parser.add_argument('--limit', '-l', type=int, help='Limit the number of comments')
try:
args = parser.parse_args(argv)
youtube_id = args.youtubeid
output = args.output
limit = args.limit
if not youtube_id or not output:
parser.print_usage()
raise ValueError('you need to specify a Youtube ID and an output filename')
print('Downloading Youtube comments for video:', youtube_id)
count = 0
with io.open(output, 'w', encoding='utf8') as fp:
for comment in download_comments(youtube_id):
print(json.dumps(comment, ensure_ascii=False), file=fp)
count += 1
sys.stdout.write('Downloaded %d comment(s)\r' % count)
sys.stdout.flush()
if limit and count >= limit:
break
print('\nDone!')
except Exception as e:
print('Error:', str(e))
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
python
|
from django.db import models
# Create your models here.
from django.db import models
from blog.models import Artikel
from django.contrib.auth.models import User
class Comment(models.Model):
message = models.TextField()
artikel_creator_username = models.CharField(max_length=200, null=True, blank=True)
artikel_creator = models.ForeignKey(Artikel, on_delete=models.CASCADE, blank = True, null = True)
comment_creator = models.ForeignKey(User, on_delete=models.CASCADE, blank = True, null = True)
created_at = models.CharField(max_length=50,null=True, blank=True)
comment_creator_username = models.CharField(max_length=200, null=True, blank=True)
id_forum = models.CharField(max_length=200, null=True, blank=True)
id_user = models.CharField(max_length=200, null=True, blank=True)
|
python
|
import json
from http import HTTPStatus
from unittest.mock import patch
from bridges.tests.api.basic_test import BasicTest
DUMMY_USER_FULL_NAME = 'John Doe'
DUMMY_USER_EMAIL = '[email protected]'
class GetWhoAmITest(BasicTest):
"""
Class to test whoami endpoint.
"""
@patch('bridges.api.endpoints.info.get_user_name_and_email_from_session',
return_value={'userFullName': DUMMY_USER_FULL_NAME, 'userEmail': DUMMY_USER_EMAIL})
def test_empty(self, _):
future = self.make_future_get_request("info/whoami")
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.OK)
data = json.loads(http_response.get_data(as_text=True))
self.assertEquals(DUMMY_USER_FULL_NAME, data['userFullName'])
self.assertEquals(DUMMY_USER_EMAIL, data['userEmail'])
|
python
|
# (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#importing Magics module
from ???? import *
#Setting of the output file name
files = output({"output_formats":['ps', 'png'],
'output_name':'cloud_cover_asia'})
#Setting the coordinates of the geographical area
asia = mmap({ ???})
#Coastlines setting
coast = mcoast( {???})
#Import the cloud data
cloud_cover = mgrib({ "grib_input_file_name" : "cloud_cover.grb" })
colour_list= ['HSL(0,0,1)','HSL(29,0.14,0.92)',
'HSL(29,0.29,0.83)','HSL(29,0.43,0.75)','HSL(300,0.08,0.92)',
'HSL(360,0.16,0.84)','HSL(13,0.3,0.75)','HSL(18,0.44,0.67)',
'HSL(300,0.16,0.83)','HSL(340,0.22,0.75)','HSL(360,0.34,0.67)',
'HSL(8,0.47,0.58)','HSL(300,0.24,0.75)','HSL(330,0.28,0.67)',
'HSL(349,0.38,0.58)','HSL(360,0.5,0.5)','HSL(180,0.17,0.92)',
'HSL(120,0.08,0.84)','HSL(57,0.17,0.75)','HSL(44,0.3,0.67)',
'HSL(209,0.14,0.84)','HSL(187,0,0.75)','HSL(29,0.15,0.67)',
'HSL(29,0.29,0.59)','HSL(239,0.16,0.75)','HSL(299,0.08,0.67)',
'HSL(360,0.17,0.58)','HSL(13,0.3,0.5)','HSL(258,0.21,0.67)',
'HSL(299,0.16,0.59)','HSL(341,0.22,0.5)','HSL(360,0.33,0.42)',
'HSL(180,0.34,0.83)','HSL(161,0.22,0.75)','HSL(120,0.16,0.67)',
'HSL(78,0.21,0.58)','HSL(193,0.3,0.75)','HSL(180,0.17,0.67)',
'HSL(120,0.08,0.58)','HSL(59,0.16,0.5)','HSL(209,0.29,0.67)',
'HSL(209,0.15,0.58)','HSL(217,0,0.5)','HSL(29,0.14,0.42)',
'HSL(224,0.3,0.58)','HSL(237,0.17,0.5)','HSL(299,0.08,0.42)',
'HSL(360,0.16,0.33)','HSL(180,0.5, 0.75)','HSL(169,0.38,0.67)',
'HSL(150,0.28,0.58)','HSL(120,0.24,0.5)','HSL(188,0.47,0.67)',
'HSL(180,0.34,0.59)','HSL(160,0.22,0.5)','HSL(120,0.16,0.42)',
'HSL(198,0.44,0.58)','HSL(193,0.3,0.5)','HSL(180,0.17,0.42)',
'HSL(120,0.08,0.33)','HSL(209,0.43,0.5)','HSL(209,0.29,0.42)',
'HSL(209,0.14,0.33)','HSL(191,0,0.25)']
#Define the cloud cover
cloud_cover_contour = mcont({
????
'contour_shade_technique': 'cell_shading',
'contour_shade_colour_method': 'list',
'contour_shade_colour_list': colour_list,
})
texts = [ [colour_list[3], "Low"],
[colour_list[15], "L+M"],
[colour_list[12], "Medium"],
[colour_list[60], "M+H"],
[colour_list[48], "High"],
[colour_list[51], "H+L"]
]
line = " "
for t in texts:
line = line + "<font colour='" + t[0] + "'> " + t[1] + " </font>"
#Here we configure the title and add some colours
lines = ["Cloud cover valid for <grib_info key='valid-date'/>",
line
]
title = mtext({
"text_lines" : lines,
?????})
#To the plot
??????
|
python
|
"""
This class is responsible for storing all the information about the current state of a chess game.
It will also be responsible for determining the valid moves at the current state.
It will also keep keep a move log.
"""
class GameState():
def __init__(self):
# board is an 8x8 2d list, each element of the list has 2 characters.
# The first character represents the color of the peice, 'b' or 'w'
# The second character represents the type of the piece, 'K', 'Q' 'R', 'B', 'N' or 'P'
# "--" - represents an empty space with no piece.
self.board = [
["bR", "bN", "bB", "bQ", "bK", "bB", "bN", "bR"],
["bp", "bp", "bp", "bp", "bp", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wp", "wp", "wp", "wp", "wp", "wp", "wp", "wp"],
["wR", "wN", "wB", "wQ", "wK", "wB", "wN", "wR"]]
self.moveFunctions = {'p': self.getPawnMoves, 'R': self.getRookMoves, 'N': self.getKnightMoves,
'B': self.getBishopMoves, 'Q': self.getQueenMoves, 'K': self.getKingMoves}
self.whiteToMove = True
self.moveLog = []
self.whiteKingLocation = (7, 4)
self.blackKingLocation = (0, 4)
self.checkMate = False # the king has no valid moves and is in check
self.staleMate = False # the king has no valid moves and is not in check
"""
Takes a Move as a parameter and executes it (this will not work for castling, pawn promotion and en-passant)
"""
def makeMove(self, move):
self.board[move.startRow][move.startCol] = "--" # leave square of moved piece blank
self.board[move.endRow][move.endCol] = move.pieceMoved
self.moveLog.append(move) # log the move so we can undo it later or display the history
self.whiteToMove = not self.whiteToMove #swap players
# update the king's location if moved
if move.pieceMoved == 'wK':
self.whiteKingLocation = (move.endRow, move.endCol)
elif move.pieceMoved == 'bK':
self.blackKingLocation = (move.endRow, move.endCol)
# pawn promotion
if move.isPawnPromotion:
self.board[move.endRow][move.endCol] = move.pieceMoved[0] + 'Q'
"""
Undo the last move made
"""
def undoMove(self):
if len(self.moveLog) != 0: # make sure that there is a move to undo
move = self.moveLog.pop()
self.board[move.startRow][move.startCol] = move.pieceMoved
self.board[move.endRow][move.endCol] = move.pieceCaptured
self.whiteToMove = not self.whiteToMove # switch turns back
# updates the kings board position
if move.pieceMoved == 'wK':
self.whiteKingLocation = (move.startRow, move.startCol)
elif move.pieceMoved == 'bK':
self.blackKingLocation = (move.startRow, move.startCol)
self.checkMate = False
self.checkMate = False
"""
All moves considering checks
"""
def getValidMoves(self):
# algorithm
# 1) generate all possible moves
moves = self.getAllPossibleMoves()
# 2) for eah move, make the move
for i in range(len(moves)-1, -1, -1):
self.makeMove(moves[i])
# # why we remove from a list backwards, avoid bugs with indecies
# nums = [0, 1, 2, 3, 4, 5]
# for num in nums:
# if num == 3:
# nums.remove(num)
# 3) generate all opponents moves
# 4) for each of your opponents moves, see if they attack your king
self.whiteToMove = not self.whiteToMove
if self.inCheck():
moves.remove(moves[i]) # 5) if they do attack your king, not a valid move
self.whiteToMove = not self.whiteToMove
self.undoMove()
if len(moves) == 0: # either checkmate or stalemate, there are 0 valid moves left
if self.inCheck():
self.checkMate = True
else:
self.staleMate = True
else:
self.checkMate = False
self.staleMate = False
return moves
"""
Determine if the current player is in check
"""
def inCheck(self):
if self.whiteToMove:
return self.squareUnderAttack(self.whiteKingLocation[0], self.whiteKingLocation[1])
else:
return self.squareUnderAttack(self.blackKingLocation[0], self.blackKingLocation[1])
"""
Determine if the enemy can attack the square r, c
"""
def squareUnderAttack(self, r, c):
self.whiteToMove = not self.whiteToMove # switch to opponent's turn
oppMoves = self.getAllPossibleMoves()
for move in oppMoves:
if move.endRow == r and move.endCol == c: # means that the kigns square is in attack
self.whiteToMove = not self.whiteToMove
return True
self.whiteToMove = not self.whiteToMove # switch turns back
return False
"""
All moves without considering checks
"""
def getAllPossibleMoves(self):
moves = []
for r in range(len(self.board)): # number of rows
for c in range(len(self.board[r])): # number of cols in given row
turn = self.board[r][c][0] # [r][c][gets one of 3 possible strings, "w, b, -"]
if (turn == 'w' and self.whiteToMove) or (turn == 'b' and not self.whiteToMove):
piece = self.board[r][c][1]
self.moveFunctions[piece](r, c, moves) # calls the appropiate move function on piece type
return moves
"""
Get all the pawn moves for the pawn located at row, col and add these moves to the list
"""
def getPawnMoves(self, r, c, moves):
if self.whiteToMove: # white pawn moves
if self.board[r-1][c] == "--": # 1 square pawn advance
moves.append(Move((r,c), (r-1,c), self.board))
if r == 6 and self.board[r-2][c] == "--": # 2 square pawn advance
moves.append(Move((r,c), (r-2,c), self.board))
if c-1 >= 0:
if self.board[r-1][c-1][0] == 'b': # there exists an enemy piece to capture to the left
moves.append(Move((r,c), (r-1,c-1), self.board))
if c+1 <= 7:
if self.board[r-1][c+1][0] == 'b': # there exists an enemy piece to capture to the right
moves.append(Move((r,c), (r-1,c+1), self.board))
else: # black pawn moves
if self.board[r+1][c] == "--": # 1 square pawn advance
moves.append(Move((r,c), (r+1,c), self.board))
if r == 1 and self.board[r+2][c] == "--": # 2 square pawn advance
moves.append(Move((r,c), (r+2,c), self.board))
if c-1 >= 0:
if self.board[r+1][c-1][0] == 'w': # there exists an enemy piece to capture to the left
moves.append(Move((r,c), (r+1,c-1), self.board))
if c+1 <= 7:
if self.board[r+1][c+1][0] == 'w': # there exists an enemy piece to capture to the right
moves.append(Move((r,c), (r+1,c+1), self.board))
"""
Get all the rook moves for the rook located at row, col and add these moves to the list
"""
def getRookMoves(self, r, c, moves): # either get to friendly, enemy, or empty moves
directions = ((-1,0), (0,-1), (1,0), (0,1)) # up, left, down, right
enemyColor = "b" if self.whiteToMove else "w"
for d in directions:
for i in range(1,8):
endRow = r + d[0] * i
endCol = c + d[1] * i
if 0 <= endRow < 8 and 0 <= endCol < 8: # on board
endPiece = self.board[endRow][endCol]
if endPiece == "--": # empty space valid
moves.append(Move((r,c), (endRow,endCol), self.board))
elif endPiece[0] == enemyColor: # enemy piece valid
moves.append(Move((r,c), (endRow, endCol), self.board))
break
else: # friendly piece invalid
break
else:
break
"""
Get all the bishop moves for the bishop located at row, col and add these moves to the list
"""
def getBishopMoves(self, r, c, moves):
directions = ((-1,-1),(-1,1),(1,-1),(1,1)) # TL, TR, BL, BR
enemyColor = "b" if self.whiteToMove else "w"
for d in directions:
for i in range(1,8): # max bishop moves = 8, the for loop gives us numbers 0-7
endRow = r + d[0] * i
endCol = c + d[1] * i
if 0 <= endRow < 8 and 0 <= endCol < 8: # on board
endPiece = self.board[endRow][endCol]
if endPiece == "--": # empty space valid
moves.append(Move((r,c), (endRow,endCol), self.board))
elif endPiece[0] == enemyColor: # enemy piece valid
moves.append(Move((r,c), (endRow, endCol), self.board))
break
else: # friendly piece invalid
break
else:
break
"""
Get all the queen moves for the queen located at row, col and add these moves to the list
"""
def getQueenMoves(self, r, c, moves):
self.getRookMoves(r, c, moves)
self.getBishopMoves(r, c, moves)
"""
Get all the knight moves for the knight located at row, col and add these moves to the list
"""
def getKnightMoves(self, r, c, moves):
knightMoves = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))
allyColor = "w" if self.whiteToMove else "b"
for i in range(8):
endRow = r + knightMoves[i][0]
endCol = c + knightMoves[i][1]
if 0 <= endRow < 8 and 0 <= endCol < 8:
endPiece = self.board[endRow][endCol]
if endPiece[0] != allyColor:
moves.append(Move((r,c), (endRow, endCol), self.board))
"""
Get all the king moves for the king located at row, col and add these moves to the list
"""
def getKingMoves(self, r, c, moves):
kingMoves = ((-1, -1),(-1, 0),(-1, 1),(0, -1),(0, 1),(1, -1),(1, 0),(1, 1))
allyColor = "w" if self.whiteToMove else "b"
for i in range(8):
endRow = r + kingMoves[i][0]
endCol = c + kingMoves[i][1]
if 0 <= endRow < 8 and 0 <= endCol < 8:
endPiece = self.board[endRow][endCol]
if endPiece[0] != allyColor:
moves.append(Move((r,c), (endRow, endCol), self.board))
class Move():
# maps keys to values
# key : value
ranksToRows = {"1": 7, "2": 6, "3": 5, "4": 4,
"5": 3, "6": 2, "7": 1, "8": 0}
rowToRanks = {v: k for k, v in ranksToRows.items()}
filesToCols = {"a": 0, "b": 1, "c": 2, "d": 3,
"e": 4, "f": 5, "g": 6, "h": 7}
colsToFiles = {v: k for k, v in filesToCols.items()}
def __init__(self, startSq, endSq, board):
self.startRow = startSq[0]
self.startCol = startSq[1]
self.endRow = endSq[0]
self.endCol = endSq[1]
self.pieceMoved = board[self.startRow][self.startCol]
self.pieceCaptured = board[self.endRow][self.endCol]
self.isPawnPromotion = False
if (self.pieceMoved == 'wp' and self.endRow == 0) or (self.pieceMoved == 'bp' and self.endRow == 7):
self.isPawnPromotion = True
self.moveID = self.startRow * 1000 + self.startCol * 100 + self.endRow * 10 + self.endCol
"""
Overriding the equals method
"""
def __eq__(self, other):
if isinstance(other, Move):
return self.moveID == other.moveID
return False
def getChessNotation(self):
# you can add to make this real chess notation
return self.getRankFile(self.startRow, self.startCol) + " -> " + self.getRankFile(self.endRow, self.endCol)
def getRankFile(self,r,c):
return self.colsToFiles[c] + self.rowToRanks[r]
|
python
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Power flow data for IEEE 118 bus test case.
"""
from numpy import array
def case118():
"""Power flow data for IEEE 118 bus test case.
Please see L{caseformat} for details on the case file format.
This data was converted from IEEE Common Data Format
(ieee118cdf.txt) on 20-Sep-2004 by cdf2matp, rev. 1.11
See end of file for warnings generated during conversion.
Converted from IEEE CDF file from:
U{http://www.ee.washington.edu/research/pstca/}
With baseKV data take from the PSAP format file from the same site,
added manually on 10-Mar-2006.
08/25/93 UW ARCHIVE 100.0 1961 W IEEE 118 Bus Test Case
@return: Power flow data for IEEE 118 bus test case.
"""
ppc = {"version": '2'}
##----- Power Flow Data -----##
## system MVA base
ppc["baseMVA"] = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
ppc["bus"] = array([
[1, 2, 51, 27, 0, 0, 1, 0.955, 10.67, 138, 1, 1.06, 0.94],
[2, 1, 20, 9, 0, 0, 1, 0.971, 11.22, 138, 1, 1.06, 0.94],
[3, 1, 39, 10, 0, 0, 1, 0.968, 11.56, 138, 1, 1.06, 0.94],
[4, 2, 39, 12, 0, 0, 1, 0.998, 15.28, 138, 1, 1.06, 0.94],
[5, 1, 0, 0, 0, -40, 1, 1.002, 15.73, 138, 1, 1.06, 0.94],
[6, 2, 52, 22, 0, 0, 1, 0.99, 13, 138, 1, 1.06, 0.94],
[7, 1, 19, 2, 0, 0, 1, 0.989, 12.56, 138, 1, 1.06, 0.94],
[8, 2, 28, 0, 0, 0, 1, 1.015, 20.77, 345, 1, 1.06, 0.94],
[9, 1, 0, 0, 0, 0, 1, 1.043, 28.02, 345, 1, 1.06, 0.94],
[10, 2, 0, 0, 0, 0, 1, 1.05, 35.61, 345, 1, 1.06, 0.94],
[11, 1, 70, 23, 0, 0, 1, 0.985, 12.72, 138, 1, 1.06, 0.94],
[12, 2, 47, 10, 0, 0, 1, 0.99, 12.2, 138, 1, 1.06, 0.94],
[13, 1, 34, 16, 0, 0, 1, 0.968, 11.35, 138, 1, 1.06, 0.94],
[14, 1, 14, 1, 0, 0, 1, 0.984, 11.5, 138, 1, 1.06, 0.94],
[15, 2, 90, 30, 0, 0, 1, 0.97, 11.23, 138, 1, 1.06, 0.94],
[16, 1, 25, 10, 0, 0, 1, 0.984, 11.91, 138, 1, 1.06, 0.94],
[17, 1, 11, 3, 0, 0, 1, 0.995, 13.74, 138, 1, 1.06, 0.94],
[18, 2, 60, 34, 0, 0, 1, 0.973, 11.53, 138, 1, 1.06, 0.94],
[19, 2, 45, 25, 0, 0, 1, 0.963, 11.05, 138, 1, 1.06, 0.94],
[20, 1, 18, 3, 0, 0, 1, 0.958, 11.93, 138, 1, 1.06, 0.94],
[21, 1, 14, 8, 0, 0, 1, 0.959, 13.52, 138, 1, 1.06, 0.94],
[22, 1, 10, 5, 0, 0, 1, 0.97, 16.08, 138, 1, 1.06, 0.94],
[23, 1, 7, 3, 0, 0, 1, 1, 21, 138, 1, 1.06, 0.94],
[24, 2, 13, 0, 0, 0, 1, 0.992, 20.89, 138, 1, 1.06, 0.94],
[25, 2, 0, 0, 0, 0, 1, 1.05, 27.93, 138, 1, 1.06, 0.94],
[26, 2, 0, 0, 0, 0, 1, 1.015, 29.71, 345, 1, 1.06, 0.94],
[27, 2, 71, 13, 0, 0, 1, 0.968, 15.35, 138, 1, 1.06, 0.94],
[28, 1, 17, 7, 0, 0, 1, 0.962, 13.62, 138, 1, 1.06, 0.94],
[29, 1, 24, 4, 0, 0, 1, 0.963, 12.63, 138, 1, 1.06, 0.94],
[30, 1, 0, 0, 0, 0, 1, 0.968, 18.79, 345, 1, 1.06, 0.94],
[31, 2, 43, 27, 0, 0, 1, 0.967, 12.75, 138, 1, 1.06, 0.94],
[32, 2, 59, 23, 0, 0, 1, 0.964, 14.8, 138, 1, 1.06, 0.94],
[33, 1, 23, 9, 0, 0, 1, 0.972, 10.63, 138, 1, 1.06, 0.94],
[34, 2, 59, 26, 0, 14, 1, 0.986, 11.3, 138, 1, 1.06, 0.94],
[35, 1, 33, 9, 0, 0, 1, 0.981, 10.87, 138, 1, 1.06, 0.94],
[36, 2, 31, 17, 0, 0, 1, 0.98, 10.87, 138, 1, 1.06, 0.94],
[37, 1, 0, 0, 0, -25, 1, 0.992, 11.77, 138, 1, 1.06, 0.94],
[38, 1, 0, 0, 0, 0, 1, 0.962, 16.91, 345, 1, 1.06, 0.94],
[39, 1, 27, 11, 0, 0, 1, 0.97, 8.41, 138, 1, 1.06, 0.94],
[40, 2, 66, 23, 0, 0, 1, 0.97, 7.35, 138, 1, 1.06, 0.94],
[41, 1, 37, 10, 0, 0, 1, 0.967, 6.92, 138, 1, 1.06, 0.94],
[42, 2, 96, 23, 0, 0, 1, 0.985, 8.53, 138, 1, 1.06, 0.94],
[43, 1, 18, 7, 0, 0, 1, 0.978, 11.28, 138, 1, 1.06, 0.94],
[44, 1, 16, 8, 0, 10, 1, 0.985, 13.82, 138, 1, 1.06, 0.94],
[45, 1, 53, 22, 0, 10, 1, 0.987, 15.67, 138, 1, 1.06, 0.94],
[46, 2, 28, 10, 0, 10, 1, 1.005, 18.49, 138, 1, 1.06, 0.94],
[47, 1, 34, 0, 0, 0, 1, 1.017, 20.73, 138, 1, 1.06, 0.94],
[48, 1, 20, 11, 0, 15, 1, 1.021, 19.93, 138, 1, 1.06, 0.94],
[49, 2, 87, 30, 0, 0, 1, 1.025, 20.94, 138, 1, 1.06, 0.94],
[50, 1, 17, 4, 0, 0, 1, 1.001, 18.9, 138, 1, 1.06, 0.94],
[51, 1, 17, 8, 0, 0, 1, 0.967, 16.28, 138, 1, 1.06, 0.94],
[52, 1, 18, 5, 0, 0, 1, 0.957, 15.32, 138, 1, 1.06, 0.94],
[53, 1, 23, 11, 0, 0, 1, 0.946, 14.35, 138, 1, 1.06, 0.94],
[54, 2, 113, 32, 0, 0, 1, 0.955, 15.26, 138, 1, 1.06, 0.94],
[55, 2, 63, 22, 0, 0, 1, 0.952, 14.97, 138, 1, 1.06, 0.94],
[56, 2, 84, 18, 0, 0, 1, 0.954, 15.16, 138, 1, 1.06, 0.94],
[57, 1, 12, 3, 0, 0, 1, 0.971, 16.36, 138, 1, 1.06, 0.94],
[58, 1, 12, 3, 0, 0, 1, 0.959, 15.51, 138, 1, 1.06, 0.94],
[59, 2, 277, 113, 0, 0, 1, 0.985, 19.37, 138, 1, 1.06, 0.94],
[60, 1, 78, 3, 0, 0, 1, 0.993, 23.15, 138, 1, 1.06, 0.94],
[61, 2, 0, 0, 0, 0, 1, 0.995, 24.04, 138, 1, 1.06, 0.94],
[62, 2, 77, 14, 0, 0, 1, 0.998, 23.43, 138, 1, 1.06, 0.94],
[63, 1, 0, 0, 0, 0, 1, 0.969, 22.75, 345, 1, 1.06, 0.94],
[64, 1, 0, 0, 0, 0, 1, 0.984, 24.52, 345, 1, 1.06, 0.94],
[65, 2, 0, 0, 0, 0, 1, 1.005, 27.65, 345, 1, 1.06, 0.94],
[66, 2, 39, 18, 0, 0, 1, 1.05, 27.48, 138, 1, 1.06, 0.94],
[67, 1, 28, 7, 0, 0, 1, 1.02, 24.84, 138, 1, 1.06, 0.94],
[68, 1, 0, 0, 0, 0, 1, 1.003, 27.55, 345, 1, 1.06, 0.94],
[69, 3, 0, 0, 0, 0, 1, 1.035, 30, 138, 1, 1.06, 0.94],
[70, 2, 66, 20, 0, 0, 1, 0.984, 22.58, 138, 1, 1.06, 0.94],
[71, 1, 0, 0, 0, 0, 1, 0.987, 22.15, 138, 1, 1.06, 0.94],
[72, 2, 12, 0, 0, 0, 1, 0.98, 20.98, 138, 1, 1.06, 0.94],
[73, 2, 6, 0, 0, 0, 1, 0.991, 21.94, 138, 1, 1.06, 0.94],
[74, 2, 68, 27, 0, 12, 1, 0.958, 21.64, 138, 1, 1.06, 0.94],
[75, 1, 47, 11, 0, 0, 1, 0.967, 22.91, 138, 1, 1.06, 0.94],
[76, 2, 68, 36, 0, 0, 1, 0.943, 21.77, 138, 1, 1.06, 0.94],
[77, 2, 61, 28, 0, 0, 1, 1.006, 26.72, 138, 1, 1.06, 0.94],
[78, 1, 71, 26, 0, 0, 1, 1.003, 26.42, 138, 1, 1.06, 0.94],
[79, 1, 39, 32, 0, 20, 1, 1.009, 26.72, 138, 1, 1.06, 0.94],
[80, 2, 130, 26, 0, 0, 1, 1.04, 28.96, 138, 1, 1.06, 0.94],
[81, 1, 0, 0, 0, 0, 1, 0.997, 28.1, 345, 1, 1.06, 0.94],
[82, 1, 54, 27, 0, 20, 1, 0.989, 27.24, 138, 1, 1.06, 0.94],
[83, 1, 20, 10, 0, 10, 1, 0.985, 28.42, 138, 1, 1.06, 0.94],
[84, 1, 11, 7, 0, 0, 1, 0.98, 30.95, 138, 1, 1.06, 0.94],
[85, 2, 24, 15, 0, 0, 1, 0.985, 32.51, 138, 1, 1.06, 0.94],
[86, 1, 21, 10, 0, 0, 1, 0.987, 31.14, 138, 1, 1.06, 0.94],
[87, 2, 0, 0, 0, 0, 1, 1.015, 31.4, 161, 1, 1.06, 0.94],
[88, 1, 48, 10, 0, 0, 1, 0.987, 35.64, 138, 1, 1.06, 0.94],
[89, 2, 0, 0, 0, 0, 1, 1.005, 39.69, 138, 1, 1.06, 0.94],
[90, 2, 163, 42, 0, 0, 1, 0.985, 33.29, 138, 1, 1.06, 0.94],
[91, 2, 10, 0, 0, 0, 1, 0.98, 33.31, 138, 1, 1.06, 0.94],
[92, 2, 65, 10, 0, 0, 1, 0.993, 33.8, 138, 1, 1.06, 0.94],
[93, 1, 12, 7, 0, 0, 1, 0.987, 30.79, 138, 1, 1.06, 0.94],
[94, 1, 30, 16, 0, 0, 1, 0.991, 28.64, 138, 1, 1.06, 0.94],
[95, 1, 42, 31, 0, 0, 1, 0.981, 27.67, 138, 1, 1.06, 0.94],
[96, 1, 38, 15, 0, 0, 1, 0.993, 27.51, 138, 1, 1.06, 0.94],
[97, 1, 15, 9, 0, 0, 1, 1.011, 27.88, 138, 1, 1.06, 0.94],
[98, 1, 34, 8, 0, 0, 1, 1.024, 27.4, 138, 1, 1.06, 0.94],
[99, 2, 42, 0, 0, 0, 1, 1.01, 27.04, 138, 1, 1.06, 0.94],
[100, 2, 37, 18, 0, 0, 1, 1.017, 28.03, 138, 1, 1.06, 0.94],
[101, 1, 22, 15, 0, 0, 1, 0.993, 29.61, 138, 1, 1.06, 0.94],
[102, 1, 5, 3, 0, 0, 1, 0.991, 32.3, 138, 1, 1.06, 0.94],
[103, 2, 23, 16, 0, 0, 1, 1.001, 24.44, 138, 1, 1.06, 0.94],
[104, 2, 38, 25, 0, 0, 1, 0.971, 21.69, 138, 1, 1.06, 0.94],
[105, 2, 31, 26, 0, 20, 1, 0.965, 20.57, 138, 1, 1.06, 0.94],
[106, 1, 43, 16, 0, 0, 1, 0.962, 20.32, 138, 1, 1.06, 0.94],
[107, 2, 50, 12, 0, 6, 1, 0.952, 17.53, 138, 1, 1.06, 0.94],
[108, 1, 2, 1, 0, 0, 1, 0.967, 19.38, 138, 1, 1.06, 0.94],
[109, 1, 8, 3, 0, 0, 1, 0.967, 18.93, 138, 1, 1.06, 0.94],
[110, 2, 39, 30, 0, 6, 1, 0.973, 18.09, 138, 1, 1.06, 0.94],
[111, 2, 0, 0, 0, 0, 1, 0.98, 19.74, 138, 1, 1.06, 0.94],
[112, 2, 68, 13, 0, 0, 1, 0.975, 14.99, 138, 1, 1.06, 0.94],
[113, 2, 6, 0, 0, 0, 1, 0.993, 13.74, 138, 1, 1.06, 0.94],
[114, 1, 8, 3, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
[115, 1, 22, 7, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
[116, 2, 184, 0, 0, 0, 1, 1.005, 27.12, 138, 1, 1.06, 0.94],
[117, 1, 20, 8, 0, 0, 1, 0.974, 10.67, 138, 1, 1.06, 0.94],
[118, 1, 33, 15, 0, 0, 1, 0.949, 21.92, 138, 1, 1.06, 0.94]
])
## generator data
# bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
# Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
ppc["gen"] = array([
[1, 0, 0, 15, -5, 0.955, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 0, 0, 300, -300, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[6, 0, 0, 50, -13, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[8, 0, 0, 300, -300, 1.015, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[10, 450, 0, 200, -147, 1.05, 100, 1, 550, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[12, 85, 0, 120, -35, 0.99, 100, 1, 185, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 0, 0, 30, -10, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[18, 0, 0, 50, -16, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[19, 0, 0, 24, -8, 0.962, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[24, 0, 0, 300, -300, 0.992, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[25, 220, 0, 140, -47, 1.05, 100, 1, 320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[26, 314, 0, 1000, -1000, 1.015, 100, 1, 414, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[27, 0, 0, 300, -300, 0.968, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[31, 7, 0, 300, -300, 0.967, 100, 1, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[32, 0, 0, 42, -14, 0.963, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[34, 0, 0, 24, -8, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[36, 0, 0, 24, -8, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[40, 0, 0, 300, -300, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[42, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[46, 19, 0, 100, -100, 1.005, 100, 1, 119, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49, 204, 0, 210, -85, 1.025, 100, 1, 304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[54, 48, 0, 300, -300, 0.955, 100, 1, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[55, 0, 0, 23, -8, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[56, 0, 0, 15, -8, 0.954, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[59, 155, 0, 180, -60, 0.985, 100, 1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[61, 160, 0, 300, -100, 0.995, 100, 1, 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[62, 0, 0, 20, -20, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[65, 391, 0, 200, -67, 1.005, 100, 1, 491, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[66, 392, 0, 200, -67, 1.05, 100, 1, 492, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[69, 516.4, 0, 300, -300, 1.035, 100, 1, 805.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[70, 0, 0, 32, -10, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[72, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[73, 0, 0, 100, -100, 0.991, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[74, 0, 0, 9, -6, 0.958, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[76, 0, 0, 23, -8, 0.943, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[77, 0, 0, 70, -20, 1.006, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[80, 477, 0, 280, -165, 1.04, 100, 1, 577, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[85, 0, 0, 23, -8, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[87, 4, 0, 1000, -100, 1.015, 100, 1, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[89, 607, 0, 300, -210, 1.005, 100, 1, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[90, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[91, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[92, 0, 0, 9, -3, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[99, 0, 0, 100, -100, 1.01, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[100, 252, 0, 155, -50, 1.017, 100, 1, 352, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[103, 40, 0, 40, -15, 1.01, 100, 1, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[104, 0, 0, 23, -8, 0.971, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[105, 0, 0, 23, -8, 0.965, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[107, 0, 0, 200, -200, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[110, 0, 0, 23, -8, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[111, 36, 0, 1000, -100, 0.98, 100, 1, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[112, 0, 0, 1000, -100, 0.975, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[113, 0, 0, 200, -100, 0.993, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[116, 0, 0, 1000, -1000, 1.005, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
ppc["branch"] = array([
[1, 2, 0.0303, 0.0999, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
[1, 3, 0.0129, 0.0424, 0.01082, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 5, 0.00176, 0.00798, 0.0021, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 5, 0.0241, 0.108, 0.0284, 9900, 0, 0, 0, 0, 1, -360, 360],
[5, 6, 0.0119, 0.054, 0.01426, 9900, 0, 0, 0, 0, 1, -360, 360],
[6, 7, 0.00459, 0.0208, 0.0055, 9900, 0, 0, 0, 0, 1, -360, 360],
[8, 9, 0.00244, 0.0305, 1.162, 9900, 0, 0, 0, 0, 1, -360, 360],
[8, 5, 0, 0.0267, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
[9, 10, 0.00258, 0.0322, 1.23, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 11, 0.0209, 0.0688, 0.01748, 9900, 0, 0, 0, 0, 1, -360, 360],
[5, 11, 0.0203, 0.0682, 0.01738, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 12, 0.00595, 0.0196, 0.00502, 9900, 0, 0, 0, 0, 1, -360, 360],
[2, 12, 0.0187, 0.0616, 0.01572, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 12, 0.0484, 0.16, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[7, 12, 0.00862, 0.034, 0.00874, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 13, 0.02225, 0.0731, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 14, 0.0215, 0.0707, 0.01816, 9900, 0, 0, 0, 0, 1, -360, 360],
[13, 15, 0.0744, 0.2444, 0.06268, 9900, 0, 0, 0, 0, 1, -360, 360],
[14, 15, 0.0595, 0.195, 0.0502, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 16, 0.0212, 0.0834, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 17, 0.0132, 0.0437, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
[16, 17, 0.0454, 0.1801, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 18, 0.0123, 0.0505, 0.01298, 9900, 0, 0, 0, 0, 1, -360, 360],
[18, 19, 0.01119, 0.0493, 0.01142, 9900, 0, 0, 0, 0, 1, -360, 360],
[19, 20, 0.0252, 0.117, 0.0298, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 19, 0.012, 0.0394, 0.0101, 9900, 0, 0, 0, 0, 1, -360, 360],
[20, 21, 0.0183, 0.0849, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[21, 22, 0.0209, 0.097, 0.0246, 9900, 0, 0, 0, 0, 1, -360, 360],
[22, 23, 0.0342, 0.159, 0.0404, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 24, 0.0135, 0.0492, 0.0498, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 25, 0.0156, 0.08, 0.0864, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 25, 0, 0.0382, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[25, 27, 0.0318, 0.163, 0.1764, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 28, 0.01913, 0.0855, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[28, 29, 0.0237, 0.0943, 0.0238, 9900, 0, 0, 0, 0, 1, -360, 360],
[30, 17, 0, 0.0388, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[8, 30, 0.00431, 0.0504, 0.514, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 30, 0.00799, 0.086, 0.908, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 31, 0.0474, 0.1563, 0.0399, 9900, 0, 0, 0, 0, 1, -360, 360],
[29, 31, 0.0108, 0.0331, 0.0083, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 32, 0.0317, 0.1153, 0.1173, 9900, 0, 0, 0, 0, 1, -360, 360],
[31, 32, 0.0298, 0.0985, 0.0251, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 32, 0.0229, 0.0755, 0.01926, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 33, 0.038, 0.1244, 0.03194, 9900, 0, 0, 0, 0, 1, -360, 360],
[19, 34, 0.0752, 0.247, 0.0632, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 36, 0.00224, 0.0102, 0.00268, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 37, 0.011, 0.0497, 0.01318, 9900, 0, 0, 0, 0, 1, -360, 360],
[33, 37, 0.0415, 0.142, 0.0366, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 36, 0.00871, 0.0268, 0.00568, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 37, 0.00256, 0.0094, 0.00984, 9900, 0, 0, 0, 0, 1, -360, 360],
[38, 37, 0, 0.0375, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[37, 39, 0.0321, 0.106, 0.027, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 40, 0.0593, 0.168, 0.042, 9900, 0, 0, 0, 0, 1, -360, 360],
[30, 38, 0.00464, 0.054, 0.422, 9900, 0, 0, 0, 0, 1, -360, 360],
[39, 40, 0.0184, 0.0605, 0.01552, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 41, 0.0145, 0.0487, 0.01222, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 42, 0.0555, 0.183, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
[41, 42, 0.041, 0.135, 0.0344, 9900, 0, 0, 0, 0, 1, -360, 360],
[43, 44, 0.0608, 0.2454, 0.06068, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 43, 0.0413, 0.1681, 0.04226, 9900, 0, 0, 0, 0, 1, -360, 360],
[44, 45, 0.0224, 0.0901, 0.0224, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 46, 0.04, 0.1356, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
[46, 47, 0.038, 0.127, 0.0316, 9900, 0, 0, 0, 0, 1, -360, 360],
[46, 48, 0.0601, 0.189, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 49, 0.0191, 0.0625, 0.01604, 9900, 0, 0, 0, 0, 1, -360, 360],
[42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
[42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 49, 0.0684, 0.186, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
[48, 49, 0.0179, 0.0505, 0.01258, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 50, 0.0267, 0.0752, 0.01874, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 51, 0.0486, 0.137, 0.0342, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 52, 0.0203, 0.0588, 0.01396, 9900, 0, 0, 0, 0, 1, -360, 360],
[52, 53, 0.0405, 0.1635, 0.04058, 9900, 0, 0, 0, 0, 1, -360, 360],
[53, 54, 0.0263, 0.122, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 54, 0.073, 0.289, 0.0738, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 54, 0.0869, 0.291, 0.073, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 55, 0.0169, 0.0707, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 56, 0.00275, 0.00955, 0.00732, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 56, 0.00488, 0.0151, 0.00374, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 57, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[50, 57, 0.0474, 0.134, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 58, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 58, 0.0255, 0.0719, 0.01788, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 59, 0.0503, 0.2293, 0.0598, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0825, 0.251, 0.0569, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0803, 0.239, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 59, 0.04739, 0.2158, 0.05646, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 60, 0.0317, 0.145, 0.0376, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 61, 0.0328, 0.15, 0.0388, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 61, 0.00264, 0.0135, 0.01456, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 62, 0.0123, 0.0561, 0.01468, 9900, 0, 0, 0, 0, 1, -360, 360],
[61, 62, 0.00824, 0.0376, 0.0098, 9900, 0, 0, 0, 0, 1, -360, 360],
[63, 59, 0, 0.0386, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[63, 64, 0.00172, 0.02, 0.216, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 61, 0, 0.0268, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
[38, 65, 0.00901, 0.0986, 1.046, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 65, 0.00269, 0.0302, 0.38, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 66, 0.0482, 0.218, 0.0578, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 67, 0.0258, 0.117, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 66, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[66, 67, 0.0224, 0.1015, 0.02682, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 68, 0.00138, 0.016, 0.638, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 69, 0.0844, 0.2778, 0.07092, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 69, 0.0985, 0.324, 0.0828, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 69, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[69, 70, 0.03, 0.127, 0.122, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 70, 0.00221, 0.4115, 0.10198, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 71, 0.00882, 0.0355, 0.00878, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 72, 0.0488, 0.196, 0.0488, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 72, 0.0446, 0.18, 0.04444, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 73, 0.00866, 0.0454, 0.01178, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 74, 0.0401, 0.1323, 0.03368, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 75, 0.0428, 0.141, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 75, 0.0405, 0.122, 0.124, 9900, 0, 0, 0, 0, 1, -360, 360],
[74, 75, 0.0123, 0.0406, 0.01034, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 77, 0.0444, 0.148, 0.0368, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 77, 0.0309, 0.101, 0.1038, 9900, 0, 0, 0, 0, 1, -360, 360],
[75, 77, 0.0601, 0.1999, 0.04978, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 78, 0.00376, 0.0124, 0.01264, 9900, 0, 0, 0, 0, 1, -360, 360],
[78, 79, 0.00546, 0.0244, 0.00648, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.017, 0.0485, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.0294, 0.105, 0.0228, 9900, 0, 0, 0, 0, 1, -360, 360],
[79, 80, 0.0156, 0.0704, 0.0187, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 81, 0.00175, 0.0202, 0.808, 9900, 0, 0, 0, 0, 1, -360, 360],
[81, 80, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[77, 82, 0.0298, 0.0853, 0.08174, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 83, 0.0112, 0.03665, 0.03796, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 84, 0.0625, 0.132, 0.0258, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 85, 0.043, 0.148, 0.0348, 9900, 0, 0, 0, 0, 1, -360, 360],
[84, 85, 0.0302, 0.0641, 0.01234, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 86, 0.035, 0.123, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[86, 87, 0.02828, 0.2074, 0.0445, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 88, 0.02, 0.102, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 89, 0.0239, 0.173, 0.047, 9900, 0, 0, 0, 0, 1, -360, 360],
[88, 89, 0.0139, 0.0712, 0.01934, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0518, 0.188, 0.0528, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0238, 0.0997, 0.106, 9900, 0, 0, 0, 0, 1, -360, 360],
[90, 91, 0.0254, 0.0836, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0099, 0.0505, 0.0548, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0393, 0.1581, 0.0414, 9900, 0, 0, 0, 0, 1, -360, 360],
[91, 92, 0.0387, 0.1272, 0.03268, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 93, 0.0258, 0.0848, 0.0218, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 94, 0.0481, 0.158, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[93, 94, 0.0223, 0.0732, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 95, 0.0132, 0.0434, 0.0111, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 96, 0.0356, 0.182, 0.0494, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 96, 0.0162, 0.053, 0.0544, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 96, 0.0269, 0.0869, 0.023, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 97, 0.0183, 0.0934, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 98, 0.0238, 0.108, 0.0286, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 99, 0.0454, 0.206, 0.0546, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 100, 0.0648, 0.295, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 100, 0.0178, 0.058, 0.0604, 9900, 0, 0, 0, 0, 1, -360, 360],
[95, 96, 0.0171, 0.0547, 0.01474, 9900, 0, 0, 0, 0, 1, -360, 360],
[96, 97, 0.0173, 0.0885, 0.024, 9900, 0, 0, 0, 0, 1, -360, 360],
[98, 100, 0.0397, 0.179, 0.0476, 9900, 0, 0, 0, 0, 1, -360, 360],
[99, 100, 0.018, 0.0813, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 101, 0.0277, 0.1262, 0.0328, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 102, 0.0123, 0.0559, 0.01464, 9900, 0, 0, 0, 0, 1, -360, 360],
[101, 102, 0.0246, 0.112, 0.0294, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 103, 0.016, 0.0525, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 104, 0.0451, 0.204, 0.0541, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 104, 0.0466, 0.1584, 0.0407, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 105, 0.0535, 0.1625, 0.0408, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 106, 0.0605, 0.229, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[104, 105, 0.00994, 0.0378, 0.00986, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 106, 0.014, 0.0547, 0.01434, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 108, 0.0261, 0.0703, 0.01844, 9900, 0, 0, 0, 0, 1, -360, 360],
[106, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[108, 109, 0.0105, 0.0288, 0.0076, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 110, 0.03906, 0.1813, 0.0461, 9900, 0, 0, 0, 0, 1, -360, 360],
[109, 110, 0.0278, 0.0762, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
[110, 111, 0.022, 0.0755, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360],
[110, 112, 0.0247, 0.064, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 113, 0.00913, 0.0301, 0.00768, 9900, 0, 0, 0, 0, 1, -360, 360],
[32, 113, 0.0615, 0.203, 0.0518, 9900, 0, 0, 0, 0, 1, -360, 360],
[32, 114, 0.0135, 0.0612, 0.01628, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 115, 0.0164, 0.0741, 0.01972, 9900, 0, 0, 0, 0, 1, -360, 360],
[114, 115, 0.0023, 0.0104, 0.00276, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 116, 0.00034, 0.00405, 0.164, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 117, 0.0329, 0.14, 0.0358, 9900, 0, 0, 0, 0, 1, -360, 360],
[75, 118, 0.0145, 0.0481, 0.01198, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 118, 0.0164, 0.0544, 0.01356, 9900, 0, 0, 0, 0, 1, -360, 360]
])
##----- OPF Data -----##
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
ppc["gencost"] = array([
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0222222, 20, 0],
[2, 0, 0, 3, 0.117647, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0454545, 20, 0],
[2, 0, 0, 3, 0.0318471, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 1.42857, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.526316, 20, 0],
[2, 0, 0, 3, 0.0490196, 20, 0],
[2, 0, 0, 3, 0.208333, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0645161, 20, 0],
[2, 0, 0, 3, 0.0625, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0255754, 20, 0],
[2, 0, 0, 3, 0.0255102, 20, 0],
[2, 0, 0, 3, 0.0193648, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0209644, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 2.5, 20, 0],
[2, 0, 0, 3, 0.0164745, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0396825, 20, 0],
[2, 0, 0, 3, 0.25, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.277778, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0]
])
return ppc
|
python
|
#!/usr/bin/env python
"""Tests the tdb data store - in memory implementation."""
import shutil
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.data_stores import tdb_data_store
# pylint: mode=test
class TDBTestMixin(object):
def InitDatastore(self):
self.token = access_control.ACLToken(username="test",
reason="Running tests")
config_lib.CONFIG.Set("Datastore.location", "%s/tdb_test/" % self.temp_dir)
self.DestroyDatastore()
data_store.DB = tdb_data_store.TDBDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
def testCorrectDataStore(self):
self.assertTrue(isinstance(data_store.DB, tdb_data_store.TDBDataStore))
def DestroyDatastore(self):
try:
shutil.rmtree(config_lib.CONFIG.Get("Datastore.location"))
except (OSError, IOError):
pass
class TDBDataStoreTest(TDBTestMixin, data_store_test._DataStoreTest):
"""Test the tdb data store."""
class TDBDataStoreBenchmarks(TDBTestMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the TDB data store abstraction."""
class TDBDataStoreCSVBenchmarks(TDBTestMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the TDB data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
python
|
from radiacode.bytes_buffer import BytesBuffer
from radiacode.radiacode import spectrum_channel_to_energy, RadiaCode
from radiacode.types import *
|
python
|
class GameActuator:
filename = None
mode = None
|
python
|
import codecs
import re
import sys
def warn(s):
sys.stderr.write(s)
sys.stderr.flush()
class CfgParserError(Exception):
pass
_name_pat = re.compile("[a-z][a-z0-9]*", re.UNICODE)
class CfgParser:
"""Important note: parser related methods and attributes are capitalized.
You can access (get and set) actual configuration values using
lower case letters.
@param ancestor: use this parameter to specify default config values on the same level. E.g. you can merge
two different config files by giving an ancestor. If the actual config file does not have a value
for a given key, then its ancestor will be queried.
"""
def __init__(self, ancestor=None):
self.Values = {}
self.Ancestor = ancestor
self.Fpaths = []
self.Fpath = None
self.Lineno = -1
def __str__(self):
return "CfgParser(%s)" % self.Fpaths
def ParseFile(self, fpath, encoding="UTF-8"):
"""Note: we use capital letters here so that we do not collide with keys."""
self.Fpath = fpath
self.Fpaths.append(fpath)
try:
fin = codecs.open(fpath, "r", encoding=encoding)
self.Lineno = 0
for line in fin:
self.Lineno += 1
if line.strip() and not line.strip().startswith("#"):
pos = line.strip().find("=")
if pos < 0:
raise CfgParserError("%s: invalid syntax at line %d" % (
fpath, self.Lineno))
key = line[:pos].strip()
value = line[pos + 1:].strip() # remove \n ???
if not key:
raise CfgParserError("%s: empty key at line %d" % (
fpath, self.Lineno))
names = key.split('.')
self.SetValue(names, value)
finally:
self.Lineno = -1
return self
def SetValue(self, names, value):
if isinstance(names, str):
self.SetValue(names.split("."), value)
else:
key = []
for name in names:
key.append(self.CheckName(name))
key = tuple(key)
self.Values[key] = self.CheckValue(value)
def GetValue(self, names):
if isinstance(names, str):
return self.GetValue(names.split("."))
else:
key = tuple(names)
if key in self.Values:
return self.Values[key]
elif self.Ancestor:
return self.Ancestor.GetValue(key)
else:
raise AttributeError("no such config key: %s" % ".".join(key))
def CheckName(self, name):
global _name_pat
if name == "value":
raise CfgParserError("%s: reserved key 'value' at line %d" % (
self.Fpath, self.Lineno))
if not _name_pat.match(name):
raise CfgParserError("%s: invalid key at line %d" % (
self.Fpath, self.Lineno))
return str(name)
def CheckValue(self, value):
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def __getattr__(self, key):
return CfgResolver(self, [key])
class CfgResolver:
"""Resolver allows attribute-style access."""
def __init__(self, cfgparser, namepath):
self._cfgparser = cfgparser
self._namepath = tuple(namepath)
def __getattr__(self, name):
if name == "value":
return self.GetValue()
else:
return CfgResolver(self._cfgparser, list(self._namepath) + [name])
def __setattr__(self, name, value):
if name in ["_cfgparser", "_namepath"]:
self.__dict__[name] = value
elif name == "value":
self.SetValue(value)
else:
raise AttributeError("Cannot set any attribute except 'value'.")
def GetValue(self):
return self._cfgparser.GetValue(self._namepath)
def SetValue(self, value):
self._cfgparser.SetValue(self._namepath, value)
|
python
|
from setuptools import setup, find_packages
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name='jkx',
version='1.0.4',
license='MIT',
author="Andrew Heaney",
author_email='[email protected]',
long_description=long_description,
long_description_content_type='text/markdown',
entry_points={
'console_scripts': [
'jkx=jkx.main:start'
]
},
packages=['jkx'],
url='https://github.com/AndrewHeaney/json-key-explorer',
keywords='json',
install_requires=[
'inquirer',
],
)
|
python
|
#Crie um algoritimo que leia um numero e mostre o seu dobro
# o seu triplo e a raiz quadrada
n = float(input('Digite um numero: '))
print('Seu dobro é {}'.format(n * 2))
print('Seu triplo é {}'.format(n * 3))
print('Sua raiz quadrada é: {}'.format(n**(1/2)))
|
python
|
# -*- coding: utf-8 -*-
"""
flask_babelplus.domain
~~~~~~~~~~~~~~~~~~~~~~
Localization domain.
:copyright: (c) 2013 by Armin Ronacher, Daniel Neuhäuser and contributors.
:license: BSD, see LICENSE for more details.
"""
import os
from babel import support
from .utils import get_state, get_locale
from .speaklater import LazyString
class Domain(object):
"""Localization domain. By default it will look for tranlations in the
Flask application directory and "messages" domain - all message
catalogs should be called ``messages.mo``.
"""
def __init__(self, dirname=None, domain='messages'):
self.dirname = dirname
self.domain = domain
self.cache = dict()
def as_default(self):
"""Set this domain as the default one for the current request"""
get_state().domain = self
def get_translations_cache(self):
"""Returns a dictionary-like object for translation caching"""
return self.cache
def get_translations_path(self, app):
"""Returns the translations directory path. Override if you want
to implement custom behavior.
"""
return self.dirname or os.path.join(app.root_path, 'translations')
def get_translations(self):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
state = get_state(silent=True)
if state is None:
return support.NullTranslations()
locale = get_locale()
cache = self.get_translations_cache()
translations = cache.get(str(locale))
if translations is None:
dirname = self.get_translations_path(state.app)
translations = support.Translations.load(
dirname,
locale,
domain=self.domain
)
self.cache[str(locale)] = translations
return translations
def gettext(self, string, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
::
gettext(u'Hello World!')
gettext(u'Hello %(name)s!', name='World')
"""
t = self.get_translations()
if variables:
return t.ugettext(string) % variables
return t.ugettext(string)
def ngettext(self, singular, plural, num, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
The `num` parameter is used to dispatch between singular and various
plural forms of the message. It is available in the format string
as ``%(num)d`` or ``%(num)s``. The source language should be
English or a similar language which only has one plural form.
::
ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
"""
variables.setdefault('num', num)
t = self.get_translations()
return t.ungettext(singular, plural, num) % variables
def pgettext(self, context, string, **variables):
"""Like :func:`gettext` but with a context.
Gettext uses the ``msgctxt`` notation to distinguish different
contexts for the same ``msgid``
For example::
pgettext(u'Button label', 'Log in')
Learn more about contexts here:
https://www.gnu.org/software/gettext/manual/html_node/Contexts.html
.. versionadded:: 0.7
"""
t = self.get_translations()
if variables:
return t.upgettext(context, string) % variables
return t.upgettext(context, string)
def npgettext(self, context, singular, plural, num, **variables):
"""Like :func:`ngettext` but with a context.
.. versionadded:: 0.7
"""
variables.setdefault('num', num)
t = self.get_translations()
return t.unpgettext(context, singular, plural, num) % variables
def lazy_gettext(self, string, **variables):
"""Like :func:`gettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
hello = lazy_gettext(u'Hello World')
@app.route('/')
def index():
return unicode(hello)
"""
return LazyString(self.gettext, string, **variables)
def lazy_ngettext(self, singular, plural, num, **variables):
"""Like :func:`ngettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
apples = lazy_ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
@app.route('/')
def index():
return unicode(apples)
"""
return LazyString(self.ngettext, singular, plural, num, **variables)
def lazy_pgettext(self, context, string, **variables):
"""Like :func:`pgettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
.. versionadded:: 0.7
"""
return LazyString(self.pgettext, context, string, **variables)
# This is the domain that will be used if there is no request context
# and thus no app.
# It will also use this domain if the app isn't initialized for babel.
# Note that if there is no request context, then the standard
# Domain will use NullTranslations.
domain = Domain()
def get_domain():
"""Return the correct translation domain that is used for this request.
This will return the default domain
e.g. "messages" in <approot>/translations" if none is set for this
request.
"""
state = get_state(silent=True)
if state is None:
return domain
return state.domain
# Create shortcuts for the default Flask domain
def gettext(*args, **kwargs):
return get_domain().gettext(*args, **kwargs)
_ = gettext # noqa
def ngettext(*args, **kwargs):
return get_domain().ngettext(*args, **kwargs)
def pgettext(*args, **kwargs):
return get_domain().pgettext(*args, **kwargs)
def npgettext(*args, **kwargs):
return get_domain().npgettext(*args, **kwargs)
def lazy_gettext(*args, **kwargs):
return LazyString(gettext, *args, **kwargs)
def lazy_ngettext(*args, **kwargs):
return LazyString(ngettext, *args, **kwargs)
def lazy_pgettext(*args, **kwargs):
return LazyString(pgettext, *args, **kwargs)
|
python
|
from __future__ import print_function
from botocore.exceptions import ClientError
import json
import datetime
import boto3
import os
def handler(event, context):
print("log -- Event: %s " % json.dumps(event))
response = "Error auto-remediating the finding."
try:
# Set Clients
ec2 = boto3.client('ec2')
# Current Time
time = datetime.datetime.utcnow().isoformat()
# Send Response Email
response = "GuardDuty Remediation"
sns = boto3.client('sns')
sns.publish(
TopicArn='guardduty_response',
Message=response
)
except ClientError as e:
print(e)
print("log -- Response: %s " % response)
return response
|
python
|
import pytest
from whatlies.language import BytePairLang
@pytest.fixture()
def lang():
return BytePairLang("en", vs=1000, dim=25, cache_dir="tests/cache")
def test_single_token_words(lang):
assert lang["red"].vector.shape == (25,)
assert len(lang[["red", "blue"]]) == 2
def test_similar_retreival(lang):
assert len(lang.score_similar("hi", 10)) == 10
assert len(lang.embset_similar("hi", 10)) == 10
@pytest.mark.parametrize("item", [2, 0.12341])
def test_raise_error(lang, item):
with pytest.raises(ValueError):
_ = lang[item]
|
python
|
class Solution:
@staticmethod
def addBinary(a: str, b: str) -> str:
length = max(len(a), len(b))
answer = ''
rem = 0
answer, rem = Solution.calculate(a.zfill(length), answer, b.zfill(length), length, rem)
if rem != 0:
answer = '1' + answer
return answer.zfill(length)
@staticmethod
def calculate(a, answer, b, length, rem):
for i in range(length - 1, -1, -1):
r = rem
r += 1 if a[i] == '1' else 0
r += 1 if b[i] == '1' else 0
answer = ('1' if r % 2 == 1 else '0') + answer
rem = 0 if r < 2 else 1
return answer, rem
def print_hi(name):
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
if __name__ == '__main__':
print_hi('PyCharm')
print(Solution.addBinary("1"))
|
python
|
import sys
sys.path.append('/root/csdc3/src/sensors')
import unittest
import time
from sensor_manager import SensorManager
from sensor_constants import *
class Tests(unittest.TestCase):
def setUp(self):
pass
def test_ds1624(self):
ds1624 = [TEMP_PAYLOAD_A, TEMP_BAT_1]
for sensor in ds1624:
SensorManager.init_temp_sensor(sensor)
value = SensorManager.read_temp_sensor(sensor)
self.assertNotEqual(value, -1)
def test_ds18b20(self):
ds18b20 = [PANEL0, PANEL1]
for sensor in ds18b20:
value = SensorManager.get_panel_data(sensor)
self.assertNotEqual(value, -1)
def test_gpio(self):
for i in range(5):
SensorManager.gpio_output(PAYLOAD_HTR_A_GPIO, ON)
time.sleep(0.2)
retval = SensorManager.gpio_output(PAYLOAD_HTR_A_GPIO, OFF)
time.sleep(0.2)
self.assertEqual(True, retval)
def test_read_mag(self):
"""
SensorManager.init_magnetometer()
for i in range(5):
x, y, z = SensorManager.read_magnetometer()
print(x, y, z)
time.sleep(1)
self.assertNotEqual(-1, x)
self.assertNotEqual(-1, y)
self.assertNotEqual(-1, z)
"""
self.assertEqual(1, 1)
def test_read_power(self):
"""
SensorManager.init_power_sensor()
for i in range(5):
current, shunt, bus, power = read_power_sensor()
time.sleep(1)
SensorManager.stop_power_sensor()
"""
self.assertEqual(1, 1)
def test_power_init(self):
"""
SensorManager.mux_select(POWER_0)
SensorManager.init_power_sensor(POWER_0)
addr = SensorEntropy.addr(POWER_0)
adc_reg = SensorEntropy.reg(POWER_0)
bus = SensorManager.bus
calibration = bus.read_byte_data(addr, power_reg['REG_CALIBRATION'])
config = bus.read_byte_data(addr, power_reg['REG_CONFIG'])
self.assertEqual(calibration, 0x1000)
self.assertEqual(config, 0x00)
"""
pass
if __name__ == "__main__":
unittest.main()
|
python
|
import sys
sys.dont_write_bytecode = True
import json
from PyQt5.QtWidgets import QApplication
from models.Authenticate import Dialog
from models.Base import MainWindow
if __name__ == "__main__":
try:
cfg_file = open("config.json","r")
config = json.loads(cfg_file.read())
ip = config['server_ip']
port = config['server_port']
except (IndexError, FileNotFoundError, json.decoder.JSONDecodeError):
cfg_file = open("config.json","w")
demo = {"server_ip" : "127.0.0.1", "server_port" : 65432}
cfg_file.write(json.dumps(demo))
ip = demo['server_ip']
port = demo['server_port']
app = QApplication(sys.argv)
app.aboutToQuit.connect(lambda : sys.exit())
main = MainWindow()
dial = Dialog(main,(ip,port))
dial.exec()
sys.exit(app.exec())
|
python
|
from logging import getLogger
from threading import Thread
from time import sleep
from signalrc.ws_transport import WebSocketsTransport
logger = getLogger('signalr.client')
class SignalRClient:
def __init__(self, url, hub, session=None):
self.url = url
self._invokes_counter = -1
self.token = None
self.id = None
self.invokes_data = {}
self.received = EventHook()
self.error = EventHook()
self.starting = EventHook()
self.stopping = EventHook()
self.exception = EventHook()
self.is_open = False
self._transport = WebSocketsTransport(self.url, session)
self._message_listener = None
self.started = False
self.hub_name = hub
self.received.add_hooks(self.handle_hub_message, self.handle_error)
self._hub_handlers = {}
def handle_hub_message(self, data):
if 'R' in data and not isinstance(data['R'], bool):
if 'R' in self._hub_handlers:
self._hub_handlers['R'].trigger_hooks({'R': data['R']})
messages = data['M'] if 'M' in data and len(data['M']) > 0 else {}
for inner_data in messages:
method = inner_data['M']
if method in self._hub_handlers:
arguments = inner_data['A']
self._hub_handlers[method].trigger_hooks(*arguments)
def handle_error(self, data):
if 'E' in data:
invoke_index = int(data.get('I', -1))
self.error.trigger_hooks({'error': data['E'],
'call_arguments': self.invokes_data.get(invoke_index)})
def start(self):
logger.info('Starting connection')
self.starting.trigger_hooks()
negotiate_data = self._transport.negotiate(self.hub_name)
self.token = negotiate_data['ConnectionToken']
self.id = negotiate_data['ConnectionId']
self._transport.init_connection(self.token, self.hub_name)
self.is_open = True
self._message_listener = Thread(target=self.wrapped_listener)
self._message_listener.start()
self.started = True
def wrapped_listener(self):
while self.is_open:
try:
data = self._transport.receive()
self.received.trigger_hooks(data)
except Exception as error:
logger.exception('Failed to receive the data via transport')
try:
self.exception.trigger_hooks(error)
finally:
self.is_open = False
def close(self):
logger.info('Closing connection')
if self.is_open:
self.is_open = False
self._message_listener.join()
self._transport.close()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def run_while_open(self):
try:
while self.is_open:
sleep(0.01)
except KeyboardInterrupt:
self.close()
self.stopping.trigger_hooks()
raise
def invoke(self, method, *data):
self._invokes_counter += 1
self._transport.send({'H': self.hub_name, 'M': method, 'A': data,
'I': self._invokes_counter})
self.invokes_data[self._invokes_counter] = {'hub_name': self.hub_name, 'method': method,
'data': data}
def subscribe_to_event(self, event_id, handler):
if event_id not in self._hub_handlers:
self._hub_handlers[event_id] = EventHook()
self._hub_handlers[event_id].add_hooks(handler)
class EventHook:
def __init__(self):
self._handlers = []
def add_hooks(self, *handlers):
self._handlers.extend(handlers)
return self
def trigger_hooks(self, *args, **kwargs):
for handler in self._handlers:
handler(*args, **kwargs)
|
python
|
import shutil
from fastapi import APIRouter, File, HTTPException, UploadFile
from models.migration_models import ChowdownURL
from services.migrations.chowdown import chowdown_migrate as chowdow_migrate
from services.migrations.nextcloud import migrate as nextcloud_migrate
from app_config import MIGRATION_DIR
from utils.snackbar import SnackResponse
router = APIRouter(tags=["Migration"])
# Chowdown
@router.post("/api/migration/chowdown/repo/")
def import_chowdown_recipes(repo: ChowdownURL):
""" Import Chowsdown Recipes from Repo URL """
try:
report = chowdow_migrate(repo.url)
return SnackResponse.success(
"Recipes Imported from Git Repo, see report for failures.",
additional_data=report,
)
except:
return HTTPException(
status_code=400,
detail=SnackResponse.error(
"Unable to Migrate Recipes. See Log for Details"
),
)
# Nextcloud
@router.get("/api/migration/nextcloud/available/")
def get_avaiable_nextcloud_imports():
""" Returns a list of avaiable directories that can be imported into Mealie """
available = []
for dir in MIGRATION_DIR.iterdir():
if dir.is_dir():
available.append(dir.stem)
elif dir.suffix == ".zip":
available.append(dir.name)
return available
@router.post("/api/migration/nextcloud/{selection}/import/")
def import_nextcloud_directory(selection: str):
""" Imports all the recipes in a given directory """
return nextcloud_migrate(selection)
@router.delete("/api/migration/{file_folder_name}/delete/")
def delete_migration_data(file_folder_name: str):
""" Removes migration data from the file system """
remove_path = MIGRATION_DIR.joinpath(file_folder_name)
if remove_path.is_file():
remove_path.unlink()
elif remove_path.is_dir():
shutil.rmtree(remove_path)
else:
SnackResponse.error("File/Folder not found.")
return SnackResponse.info(f"Migration Data Remove: {remove_path.absolute()}")
@router.post("/api/migration/upload/")
def upload_nextcloud_zipfile(archive: UploadFile = File(...)):
""" Upload a .zip File to later be imported into Mealie """
dest = MIGRATION_DIR.joinpath(archive.filename)
with dest.open("wb") as buffer:
shutil.copyfileobj(archive.file, buffer)
if dest.is_file:
return SnackResponse.success("Migration data uploaded")
else:
return SnackResponse.error("Failure uploading file")
|
python
|
"""Top-level package for django-extra-field-validation."""
__author__ = """Tonye Jack"""
__email__ = "[email protected]"
__version__ = "1.1.1"
from .field_validation import FieldValidationMixin
__all__ = ["FieldValidationMixin"]
|
python
|
#!/usr/bin/env python
import random
import rospy
from std_msgs.msg import UInt32
if __name__ == '__main__':
random.seed()
rospy.init_node('random')
pub = rospy.Publisher('rand_int', UInt32, queue_size = 1)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
pub.publish(random.randint(0, 1000))
rate.sleep()
|
python
|
# Generated by Django 3.0.5 on 2021-04-23 10:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('cse', '0011_delete_dev'),
]
operations = [
migrations.CreateModel(
name='semester',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sem', models.CharField(blank=True, max_length=200, null=True)),
('subjects', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200, null=True)),
('book', models.TextField(blank=True, null=True)),
('other', models.TextField(blank=True, null=True)),
],
),
]
|
python
|
# Generated by Django 2.1 on 2018-08-18 02:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
('inventories', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='ItemInventory',
new_name='Inventory',
),
]
|
python
|
# -*- coding: utf-8 -*-
"""
The view ports widget
@author: Chris Scott
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import logging
from PySide2 import QtWidgets
from . import rendererSubWindow
class ViewPortsWidget(QtWidgets.QWidget):
"""
Class for holding view ports (renderer windows)
"""
def __init__(self, parent=None):
super(ViewPortsWidget, self).__init__(parent)
self._logger = logging.getLogger(__name__)
self._viewPorts = []
self._layout = QtWidgets.QGridLayout(self)
self._mainWindow = parent
def numViewPortsChanged(self, num):
"""Add/remove view ports."""
currentNum = len(self._viewPorts)
if num == currentNum:
self._logger.debug("No change in number of view ports ({0})".format(num))
else:
if num > currentNum:
self._logger.debug("Adding more view ports ({0} was {1})".format(num, currentNum))
for i in range(currentNum, num):
row = i // 2
col = i % 2
self._logger.debug("Adding view port with index {0} ({1}, {2})".format(i, row, col))
rw = rendererSubWindow.RendererWindow(self._mainWindow, i, parent=self)
self._viewPorts.append(rw)
self._layout.addWidget(rw, row, col)
else:
self._logger.debug("Removing view ports ({0} was {1})".format(num, currentNum))
while len(self._viewPorts) > num:
rw = self._viewPorts.pop()
self._layout.removeWidget(rw)
rw.deleteLater()
# for rw in self._viewPorts:
# rw.outputDialog.imageTab.imageSequenceTab.refreshLinkedRenderers()
def getViewPorts(self):
"""Return the list of current view ports."""
return self._viewPorts
|
python
|
from .views import SearchContact, markSpam, detailView
from django.urls import path
urlpatterns = [
path('Search/', SearchContact.as_view()),
path('mark/<int:id>', markSpam, ),
path('Detail/<int:id>', detailView),
]
|
python
|
from . import data
from . import datasets
from . import layers
from . import losses
from . import metrics
from . import models
from . import optimizers
from . import utils
|
python
|
"""!
@brief Unit-test runner for core wrapper.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
from pyclustering.tests.suite_holder import suite_holder
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.core.tests import ut_package as core_package_unit_tests
import os
import warnings
from pyclustering.core.definitions import PATH_PYCLUSTERING_CCORE_LIBRARY
from pyclustering.core.wrapper import ccore_library
class remove_library(object):
"""!
@brief Decorator for tests where ccore library should be removed.
"""
def __init__(self, call_object):
self.call_object = call_object
def __call__(self, *args):
test_result = True
try:
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY, PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted")
warnings.filterwarnings("ignore", category=ResourceWarning)
ccore_library.initialize()
self.call_object(args)
except os.error:
warnings.warn("Test skipped: no rights to rename C/C++ pyclustering library for testing.")
return
except:
test_result = False
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted", PATH_PYCLUSTERING_CCORE_LIBRARY)
ccore_library.initialize()
warnings.filterwarnings("default", category=ResourceWarning)
if test_result is False:
raise AssertionError("Test failed")
class corrupt_library(object):
"""!
@brief Decorator for tests where ccore library should be corrupted.
"""
def __init__(self, call_object):
self.call_object = call_object
def __create_corrupted_library(self, filepath):
with open(filepath, 'wb') as binary_file_descriptor:
binary_file_descriptor.write(bytes("corrupted binary library", 'UTF-8'))
def __remove_corrupted_library(self, filepath):
os.remove(filepath)
def __call__(self, *args):
try:
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY, PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted")
except os.error:
warnings.warn("Test skipped: no rights to rename C/C++ pyclustering library for testing.")
return
self.__create_corrupted_library(PATH_PYCLUSTERING_CCORE_LIBRARY)
warnings.filterwarnings("ignore", category=ResourceWarning)
ccore_library.initialize()
self.call_object(args)
self.__remove_corrupted_library(PATH_PYCLUSTERING_CCORE_LIBRARY)
os.rename(PATH_PYCLUSTERING_CCORE_LIBRARY + "_corrupted", PATH_PYCLUSTERING_CCORE_LIBRARY)
ccore_library.initialize()
warnings.filterwarnings("default", category=ResourceWarning)
class core_tests(suite_holder):
def __init__(self):
super().__init__()
core_tests.fill_suite(self.get_suite())
@staticmethod
def fill_suite(core_suite):
core_suite.addTests(unittest.TestLoader().loadTestsFromModule(core_package_unit_tests))
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#===============================================================================#
#title :MangaPark.py #
#description :contains the MangaPark class #
#author :August B. Sandoval (asandova) #
#date :2020-3-2 #
#version :0.3 #
#usage :defineds the MangaPark class #
#notes : #
#python_version :3.6.9 #
#===============================================================================#
from .Chapter import Chapter
from .TitleSource import TitleSource
from .Stream import Stream
from bs4 import BeautifulSoup
import requests, re, json, os
class MangaPark_Source(TitleSource):
Versions = {
"Duck" : 4,
4 : "Duck",
"Rock" : 6,
6 : "Rock",
"Fox" : 1,
1 : "Fox",
"Panda" : 3,
3 : "Panda"
}
def __init__(self):
TitleSource.__init__(self)
self.site_url = "https://www.mangapark.net"
self.site_domain = "https://www.mangapark.net"
def from_dict(self, dictionary):
self.site_url = dictionary["Site URL"]
self.site_domain = dictionary["Site Domain"]
self.manga_extention = dictionary["Manga Extention"]
self.Title = dictionary["Title"]
self.directory = self.Title.replace(' ', '_')
self.summary = dictionary["Summary"]
self.authors = dictionary["Author(s)"]
self.artists = dictionary["Artist(s)"]
self.genres = dictionary["Genre(s)"]
self.cover_location = dictionary["Cover Location"]
for s in dictionary["Manga Stream(s)"]:
stream = Stream()
stream.from_dict( s )
self.streams.append( stream )
def to_dict(self):
dic = {}
dic["Site URL"] = self.site_url
dic["Site Domain"] = self.site_domain
dic["Manga Extention"] = self.manga_extention
dic["Title"] = self.Title
dic["Summary"] = self.summary
dic["Author(s)"] = self.authors
dic["Artist(s)"] = self.artists
dic["Genre(s)"] = self.genres
dic["Cover Location"] = self.cover_location
dic["Manga Stream(s)"] = []
for s in self.streams:
dic["Manga Stream(s)"].append( s.to_dict() )
return dic
def Download_Manga(self, location="",keep=False):
save_location = self.save_location
if location != "":
save_location == location
for s in self.streams:
for c in s.chapters:
if keep == True:
if self.keep.get(s) == False:
self.keep[s.name] = []
self.keep[s.name].append(c.get_chapter_number)
else:
if self.keep[s.name].count(c.get_chapter_number) == 0:
self.keep[s.name].append(c.get_chapter_number)
#title = self.Title.replace(" ", '_')
stream_name = s.name.replace(' ', '_')
c.download_chapter( save_location +'/'+self.directory+'/'+ stream_name)
def Download_Manga_stream(self, stream_id, location="",Keep=False):
save_location = self.save_location
if location != "":
save_location == location
for s in self.streams:
if s.id == stream_id:
for c in s.chapters:
if Keep == True:
if self.keep.get(s) == False:
self.keep[s.name] = []
self.keep[s.name].append(c.get_chapter_number)
else:
if self.keep[s.name].count(c.get_chapter_number) == 0:
self.keep[s.name].append(c.get_chapter_number)
#title = self.Title.replace(" ", '_')
stream_name = self.streams[stream_id].name.replace(' ', '_')
c.download_chapter( save_location +'/'+self.directory+'/'+ stream_name)
return
def Download_Manga_Chapter(self, stream_id, chapter_number, location="", KillDownload=[False]):
save_location = self.save_location
if location != "":
save_location == location
for s in self.streams:
if s.id == stream_id:
for k in s.chapters.keys():
if s.chapters[k].get_chapter_number() == chapter_number:
#title = self.Title.replace(" ", '_')
stream = self.get_stream_with_id(stream_id)
stream_name = stream.name.replace(' ', '_')
code = s.chapters[k].download_chapter(save_location +'/'+self.directory+'/'+ stream_name,KillDownload)
return code
return -1
return -2
def _extract_cover(self):
cover_data = self.site_html.find('div', class_="w-100 cover")
if os.path.exists(self.save_location+'/'+self.directory) == False:
os.mkdir(self.save_location+'/'+self.directory)
cover_image_link = cover_data.img["src"]
cover = requests.get("https:"+ cover_image_link)
ext_loc = 0
for i in range(0,len(cover_image_link)):
if cover_image_link[i] == '.':
ext_loc = i
extention = cover_image_link[ext_loc:]
if cover.ok != True:
print("Failed to download cover")
return
self.cover_location = self.save_location+'/'+self.directory+"/cover"+extention
with open(self.cover_location, 'wb') as f:
f.write(cover.content)
f.close()
def _extract_title(self):
self.Title = self.site_html.find('div', class_="pb-1 mb-2 line-b-f hd").h2.a.text
self.directory = self.Title.replace(' ', '_')
def _extract_summary(self):
s = self.site_html.find('p', class_='summary').text
self.summary = s
def _extract_managa_info(self):
table = self.site_html.find('table', class_="attr")
Author_data = table.find('th', text="Author(s)").parent
Artist_data = table.find('th', text="Artist(s)").parent
Genre_data = table.find('th', text="Genre(s)").parent
for a in Author_data.find_all('a', target='_blank'):
self.authors.append( a.text )
for a in Artist_data.find_all('a', target="_blank"):
self.artists.append( a.text )
for g in Genre_data.find_all('a', target='_blank'):
if g.b != None:
self.genres.append(g.b.text)
else:
self.genres.append(g.text)
def _extract_streams(self):
stream_list = self.site_html.find('div', class_='book-list-1')
streams = stream_list.find_all('div', class_='mt-3 stream')
streams += stream_list.find_all('div', class_='mt-3 stream collapsed')
for s in streams:
stream_id_str = s['id'].split('_')
stream_id = int(stream_id_str[-1])
version_tag = "ml-1 stream-text-" + str(stream_id)
version_name = s.find('span', class_=version_tag).text
manga_stream = Stream(version_name, stream_id)
chapters = s.find_all('a', class_="ml-1 visited ch")
for c in chapters:
link = c.parent.parent
link = link.find('a', text="all")["href"]
number_str = c.text
number_str_elements = re.compile("[vV]ol(ume)*[.]*[ ]*[0-9]+[ ]").split(number_str)
#print(number_str_elements)
number_start = -1
number_end = -1
#print(number_str_elements[-1])
for num in range(0, len(number_str_elements[-1])):
if number_start == -1 and number_str_elements[-1][num].isnumeric():
number_start = num
elif number_start != -1 and number_str_elements[-1][num].isnumeric() == False:
if number_str_elements[-1][num+1].isnumeric() == True:
continue
else:
number_end = num
#print(number_end)
break
#print(number_str_elements)
#print(f"start Number: {number_start}\tend Number: {number_end}")
if number_end != -1:
number = float(number_str_elements[-1][number_start:number_end])
elif number_end == -1 and number_start == -1:
print("encountered non-numbered chapter")
continue
else:
number = float(number_str_elements[-1][number_start:])
number_str_elements = number_str_elements[-1].split(': ')
name = ""
if len( number_str_elements) > 1:
name = number_str_elements[-1]
else:
#if stream_id == 4:
#print(c.parent.parent.prettify())
Title_tag = c.parent.parent.find('div', class_="d-none d-md-flex align-items-center ml-0 ml-md-1 txt")
if Title_tag != None:
#print(Title_tag.text)
name = Title_tag.text
start = 0
for c in name:
if c.isalpha() == True:
break
start += 1
name = name[start:]
#print(name)
else:
name = ""
if len(name) > 0:
end = len(name)-1
for i in range( len(name)-1, -1,-1 ):
#print(name[i])
if name[i] != ' ':
end = i+1
break
name = name[0:end]
chap = Chapter(name, number)
chap.set_link( self.site_domain + link)
#print(f"adding chapter {chap.get_full_title()}")
manga_stream.add_chapter(chap)
#print("adding stream " + manga_stream.name)
self.add_stream(manga_stream)
print("extraction of streams: Complete")
def __str__(self):
s = "----------Manga Park----------\n"
s += "Title: " + self.Title + "\n"
s += "Author(s): "
for a in self.authors:
s += a + " | "
s += "\nArtist(s): "
for a in self.artists:
s += a + ' | '
s+= "\nGenre(s): "
for g in self.genres:
s += g + ' | '
s += "\nSummary: "+ self.summary + "\n"
for stream in self.streams:
s += str(stream) + "\n"
return s
"""
if __name__ == "__main__":
#test = MangaPark_Source()
test2 = MangaPark_Source()
test2.set_default_save_location('./Manga')
#test.request_manga("https://mangapark.net/manga/ryoumin-0-nin-start-no-henkyou-ryoushusama-fuurou")
test2.request_manga("https://mangapark.net/manga/tensei-shitara-ken-deshita")
test2.extract_manga()
with open('test.json', 'w') as f:
f.write( json.dumps( test2.to_dict(),indent=1 ) )
test2.Download_Manga_Chapter(stream_id=MangaPark_Source.Versions["Fox"],chapter_number=1 , location="./Manga")
"""
|
python
|
__all__ = ['HttpCacheControlMixin']
class HttpCacheControlMixin:
http_cache_control_max_age = None
def get_http_cache_control_max_age(self):
return self.http_cache_control_max_age
def dispatch(self, *args, **kwargs):
response = super().dispatch(*args, **kwargs)
if response.status_code in [200, 304]:
max_age = self.get_http_cache_control_max_age()
if max_age:
response['Cache-Control'] = 'max-age=%s' % max_age
return response
|
python
|
from __future__ import annotations
# python
import logging
import os
import random
import importlib
import json
import datetime
from halo_app.classes import AbsBaseClass
from halo_app.app.context import HaloContext, InitCtxFactory
from halo_app.infra.providers.providers import get_provider,ONPREM
from halo_app.app.response import HaloResponseFactory, AbsHaloResponse
from halo_app.entrypoints.client_type import ClientType
from halo_app.infra.providers.util import ProviderUtil
from .notification import Notification, ValidError
from .request import AbsHaloRequest
from .result import Result
from ..error import Error
from ..reflect import Reflect
from ..settingsx import settingsx
settings = settingsx()
logger = logging.getLogger(__name__)
def strx(str1):
"""
:param str1:
:return:
"""
if str1:
try:
return str1.encode('utf-8').strip()
except AttributeError as e:
return str(str1)
except Exception as e:
return str(str1)
return ''
class Util(AbsBaseClass):
@classmethod
def init_halo_context(cls,env:dict=None):
if settings.HALO_CONTEXT_CLASS:
context = Reflect.instantiate(settings.HALO_CONTEXT_CLASS,HaloContext,env)
else:
context = InitCtxFactory.get_initial_context(env)
return context
@classmethod
def get_client_type(cls)->ClientType:
if settings.HALO_CLIENT_CLASS:
client_type_ins = Reflect.instantiate(settings.HALO_CLIENT_CLASS,ClientType)
else:
client_type_ins = ClientType()
return client_type_ins
@classmethod
def get_response_factory(cls)->HaloResponseFactory:
if settings.HALO_RESPONSE_FACTORY_CLASS:
response_factory_ins = Reflect.instantiate(settings.HALO_RESPONSE_FACTORY_CLASS, HaloResponseFactory)
else:
response_factory_ins = HaloResponseFactory()
return response_factory_ins
@staticmethod
def create_result_response(halo_request:AbsHaloRequest, result:Result)->AbsHaloResponse:
# for result - OK or FAIL
response_factory = Util.get_response_factory()
success = result.success
if success:
data = result.payload
else:
data = result.error
return response_factory.get_halo_response(halo_request,success, data)
@staticmethod
def create_notification_response(halo_request:AbsHaloRequest, notification:Notification) -> AbsHaloResponse:
# for validation errors
response_factory = Util.get_response_factory()
success = not notification.hasErrors()
return response_factory.get_halo_response(halo_request, success, notification.errors)
@staticmethod
def create_payload_response(halo_request: AbsHaloRequest,data) -> AbsHaloResponse:
# for query result
response_factory = Util.get_response_factory()
return response_factory.get_halo_response(halo_request, True, data)
@staticmethod
def create_response(halo_request,success, data=None) -> AbsHaloResponse:
# for async command
response_factory = Util.get_response_factory()
return response_factory.get_halo_response(halo_request, success, data)
@staticmethod
def create_exception_response(halo_request: AbsHaloRequest, e: Exception) -> AbsHaloResponse:
# for exception
response_factory = Util.get_response_factory()
success = False
data = Error("exception thrown!",e)
return response_factory.get_halo_response(halo_request, success, data)
@classmethod
def get_timeout(cls, halo_context:HaloContext):
"""
:param request:
:return:
"""
if "timeout" in halo_context.keys():
timeout = halo_context.get("timeout")
if timeout:
return timeout
return settings.SERVICE_CONNECT_TIMEOUT_IN_SC
@classmethod
def get_halo_timeout1(cls, halo_request):
"""
:param request:
:return:
"""
if "timeout" in halo_request.context.keys():
timeout = halo_request.context.get("timeout")
if timeout:
return timeout
return settings.SERVICE_CONNECT_TIMEOUT_IN_SC
"""
env = {HaloContext.items[HaloContext.USER_AGENT]: x_user_agent,
HaloContext.items[HaloContext.REQUEST]: request_id,
HaloContext.items[HaloContext.CORRELATION]: x_correlation_id,
HaloContext.items[HaloContext.DEBUG_LOG]: dlog}
if api_key:
env[HaloContext.items[HaloContext.API_KEY]] = api_key
"""
@staticmethod
def get_func_name():
"""
:return:
"""
provider = get_provider()
if provider.PROVIDER_NAME != ONPREM:
return provider.get_func_name()
return settings.FUNC_NAME
@staticmethod
def get_func_ver():
"""
:return:
"""
provider = get_provider()
if provider.PROVIDER_NAME != ONPREM:
return provider.get_func_ver()
return settings.FUNC_VER
@classmethod
def get_system_debug_enabled(cls):
"""
:return:
"""
# check if env var for sampled debug logs is on and activate for percentage in settings (5%)
if ('DEBUG_LOG' in os.environ and os.environ['DEBUG_LOG'] == 'true') or (ProviderUtil.get_debug_param() == 'true'):
rand = random.random()
if settings.LOG_SAMPLE_RATE > rand:
return 'true'
return 'false'
@classmethod
def isDebugEnabled(cls, halo_context):
"""
:param req_context:
:param request:
:return:
"""
# disable debug logging by default, but allow override via env variables
# or if enabled via forwarded request context or if debug flag is on
if halo_context.get(
HaloContext.DEBUG_LOG) == 'true' or cls.get_system_debug_enabled() == 'true':
return True
return False
@staticmethod
def json_error_response(halo_context, clazz,err:Error): # code, msg, requestId):
"""
:param req_context:
:param clazz:
:param e:
:return:
"""
module = importlib.import_module(clazz)
my_class = getattr(module, 'ErrorMessages')
msgs = my_class()
e = err.cause
error_code, message = msgs.get_code(e)
error_detail = type(e)
e_msg = err.message
if hasattr(e, 'detail'):
error_detail = e.detail
elif hasattr(e, 'original_exception'):
error_detail = Util.get_detail(e.original_exception)
else:
if hasattr(e, 'message'):
e_msg = e.message
else:
e_msg = str(e)
if e_msg is not None and e_msg != 'None' and e_msg != "":
error_detail = e_msg
#@todo check when to use data
error_data = {}
if hasattr(e, 'view'):
error_data = json.dumps(e.data)
payload = {"error":
{"error_code": error_code, "error_message": message, "error_detail": error_detail,"timestamp": datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)"),
"view": error_data, "trace_id": halo_context.get(HaloContext.items[HaloContext.CORRELATION])}
}
if Util.isDebugEnabled(halo_context) and hasattr(e, 'stack'):
payload["stack"] = json.dumps(e.stack)
payload["context"] = json.dumps(halo_context.table)
return payload
@staticmethod
def json_exception_response(halo_context, clazz, e): # code, msg, requestId):
"""
:param req_context:
:param clazz:
:param e:
:return:
"""
module = importlib.import_module(clazz)
my_class = getattr(module, 'ErrorMessages')
msgs = my_class()
error_code, message = msgs.get_code(e)
error_detail = type(e)
e_msg = ""
if hasattr(e, 'detail'):
error_detail = e.detail
elif hasattr(e, 'original_exception'):
error_detail = Util.get_detail(e.original_exception)
else:
if hasattr(e, 'message'):
e_msg = e.message
else:
e_msg = str(e)
if e_msg is not None and e_msg != 'None' and e_msg != "":
error_detail = e_msg
#@todo check when to use data
error_data = {}
if hasattr(e, 'view'):
error_data = json.dumps(e.data)
payload = {"error":
{"error_code": error_code, "error_message": message, "error_detail": error_detail,"timestamp": datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)"),
"view": error_data, "trace_id": halo_context.get(HaloContext.items[HaloContext.CORRELATION])}
}
if Util.isDebugEnabled(halo_context) and hasattr(e, 'stack'):
payload["stack"] = json.dumps(e.stack)
payload["context"] = json.dumps(halo_context.table)
return payload
@staticmethod
def get_detail(e):
detail = None
if e.original_exception:
detail = Util.get_detail(e.original_exception)
if detail:
return str(e)+':'+detail
return str(e)
@staticmethod
def json_notification_response(halo_context, errors:[ValidError]): # code, msg, requestId):
"""
:param req_context:
:param clazz:
:param e:
:return:
"""
default_message = 'A Validation error occurred!'
#@todo set validation error code
error_code = "validation"
payload = {
"error_code": error_code,
"error_message": default_message,
"timestamp": datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)"),
"trace_id": halo_context.get(HaloContext.items[HaloContext.CORRELATION]),
"errors": [],
}
for error in errors:
payload['errors'].append({"name": error.name,"error": error.message})
if Util.isDebugEnabled(halo_context):
payload["context"] = json.dumps(halo_context.table)
return payload
|
python
|
"""A helper rule for testing detect_root function."""
load("@rules_foreign_cc//tools/build_defs:detect_root.bzl", "detect_root")
def _impl(ctx):
detected_root = detect_root(ctx.attr.srcs)
out = ctx.actions.declare_file(ctx.attr.out)
ctx.actions.write(
output = out,
content = detected_root,
)
return [DefaultInfo(files = depset([out]))]
detect_root_test_rule = rule(
implementation = _impl,
attrs = {
"srcs": attr.label(mandatory = True),
"out": attr.string(mandatory = True),
},
)
|
python
|
import sys
import json
from .kafka import Consumer
from .postgres import PGClient
from .model import URLStatus
if __name__ == '__main__':
try:
with PGClient() as pg_client, Consumer() as kafka_consumer:
# TODO: change to subscript
# TODO: try https://github.com/aio-libs/aiokafka
while True:
msg = kafka_consumer.consume()
if msg:
print(msg.decode('utf-8'))
url_status = URLStatus(**json.loads(msg.decode('utf-8')))
# print(url_status)
pg_client.insert(url_status)
except KeyboardInterrupt:
print('Ctrl+C to exit...')
sys.exit()
|
python
|
import os
import neat
import pygame
from bird import Bird
from pipe import Pipe
from base import Base
from background import Background
class Game:
WIN_WIDTH = 500
WIN_HEIGHT = 800
def __init__(self):
self.isRunning = True
self.score = 0
self.birds = []
self.nets = []
self.ge = []
self.base = Base(730)
self.pipes = []
self.background = Background()
pygame.font.init()
self.font = pygame.font.Font(pygame.font.get_default_font(), 50)
self.win = pygame.display.set_mode((self.WIN_WIDTH, self.WIN_HEIGHT))
def draw_score(self):
text = self.font.render(f"Score: {self.score}", 1, (255, 255, 255))
self.win.blit(text, (self.WIN_WIDTH - 10 - text.get_width(), 10))
def draw_game(self):
self.background.draw(self.win)
for bird in self.birds:
bird.draw(self.win)
for pipe in self.pipes:
pipe.draw(self.win)
self.base.draw(self.win)
self.draw_score()
pygame.display.update()
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.isRunning = False
def kill_bird(self, bird_idx):
self.ge[bird_idx].fitness -= 1
self.birds.pop(bird_idx)
self.nets.pop(bird_idx)
def update_score(self):
self.score += 1
for bird_idx, bird in enumerate(self.birds):
self.ge[bird_idx].fitness += 5
def move_pipes(self):
pipes_to_remove = []
add_pipe = False
for pipe in self.pipes:
is_pipe_off_screen = pipe.x + pipe.PIPE_BOTTOM.get_width() < 0
for bird_idx, bird in enumerate(self.birds):
is_bird_pass_pipe = pipe.x + pipe.PIPE_BOTTOM.get_width() < bird.x
if pipe.collide(bird):
self.kill_bird(bird_idx)
if not pipe.passed and is_bird_pass_pipe:
pipe.passed = True
add_pipe = True
if is_pipe_off_screen:
pipes_to_remove.append(pipe)
pipe.move()
self.pipes = [
pipe for pipe in self.pipes if pipe not in pipes_to_remove]
if add_pipe:
self.update_score()
self.add_pipe()
def check_bird_hit_limits(self, bird_idx, bird):
is_bird_touching_ground = bird.y + bird.img.get_height() >= 730
is_bird_above_screen = bird.y + bird.img.get_height() < 0
if is_bird_touching_ground:
self.kill_bird(bird_idx)
if is_bird_above_screen:
self.kill_bird(bird_idx)
def move_birds(self):
for bird_idx, bird in enumerate(self.birds):
bird.move()
self.check_bird_hit_limits(bird_idx, bird)
def add_pipe(self):
self.pipes.append(Pipe(550))
def get_next_pipe(self):
for pipe in self.pipes:
if not pipe.passed:
return pipe
def command_birds(self):
pipe = self.get_next_pipe()
if not pipe:
return
for bird_idx, bird in enumerate(self.birds):
self.ge[bird_idx].fitness += 0.1
neat = self.nets[bird_idx]
dist_bird_pipe = {
"top_pipe": abs(bird.y - pipe.height),
"bottom_pipe": abs(bird.y - pipe.bottom)
}
output = neat.activate((bird.y, dist_bird_pipe["top_pipe"],
dist_bird_pipe["bottom_pipe"]))
if (output[0] > 0.5):
bird.jump()
def is_birds_alive(self):
return len(self.birds) > 0
def reset(self):
self.pipes = []
self.score = 0
self.add_pipe()
def gameloop(self, genomes, config):
for genome_id, genome in genomes:
net = neat.nn.FeedForwardNetwork.create(genome, config)
self.nets.append(net)
self.birds.append(Bird(230, 350))
genome.fitness = 0
self.ge.append(genome)
clock = pygame.time.Clock()
while self.isRunning and self.is_birds_alive():
clock.tick(30)
self.handle_events()
self.background.move()
self.move_birds()
self.command_birds()
self.base.move()
self.move_pipes()
self.draw_game()
self.reset()
# pygame.quit()
# quit()
def run(fitness, config_path):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
population = neat.Population(config)
population.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
population.add_reporter(stats)
winner = population.run(fitness, 50)
if __name__ == "__main__":
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'neat.config')
run(Game().gameloop, config_path)
|
python
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("ANALYSIS")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:clustering.root')
)
process.pfClusterAnalyzer = cms.EDAnalyzer("PFClusterAnalyzer",
PFClusters = cms.InputTag("particleFlowClusterECAL"),
verbose = cms.untracked.bool(True),
printBlocks = cms.untracked.bool(False)
)
process.p = cms.Path(process.pfClusterAnalyzer)
|
python
|
#!/usr/bin/env python
# David Prihoda
# Calculate coverage of BGCs by a DataFrame of BGC Candidates
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import argparse
def get_single_contig_coverage(a_cands, b_cands):
"""
Get coverage of each BGC candidate in a_cands by BGC candidates in b_cands,
where all candidates come from the same contig.
:param a_cands: Reference DataFrame of BGC candidates (from a single contig)
:param b_cands: Compared DataFrame of BGC candidates (from a single contig)
:return: row of each BGC candidate in a_cands with 'coverage' column that defines
fractional coverage by overlapping BGC candidates in b_cands
"""
if b_cands is None:
remaining_cands = []
else:
remaining_cands = list(b_cands.reset_index(drop=True).iterrows())
# Create binary mask based on longest canidate length
max_len = int((a_cands['nucl_end'] - a_cands['nucl_start'] + 1).max())
mask = np.zeros(max_len)
# For each A candidate
coverages = []
for c, cand in a_cands.iterrows():
# For each suitable candidate from other model
cand_start = int(cand['nucl_start']) - 1
cand_end = int(cand['nucl_end'])
cand_len = cand_end - cand_start
#print('Cand {}: {}-{} (len {})'.format(c, cand_start, cand_end, cand_len))
any_exact = False
max_covered = 0
for i, other in remaining_cands:
other_start = int(other['nucl_start']) - 1
other_end = int(other['nucl_end'])
other_len = other_end - other_start
# No overlap anymore
if other_start > cand_end:
continue
# No overlap yet
if other_end < cand_start:
# Discard all previous candidates up to current one
continue
# Exact match
if other_start == cand_start and other_end == cand_end:
any_exact = True
# Start and end coordinates relative from cand_start
overlap_start = max(other_start, cand_start) - cand_start
overlap_end = min(other_end, cand_end) - cand_start
overlap_length = overlap_end - overlap_start
mask[overlap_start:overlap_end] = 1
max_covered = max(max_covered, overlap_length / other_len)
num_covered = sum(mask[:cand_len])
mask[:cand_len] = 0
#print('overlap {}/{} = {}'.format(num_covered, cand_len, num_covered / cand_len))
coverage = pd.Series(
[num_covered / cand_len, any_exact, max_covered],
['coverage', 'any_exact', 'max_covered']
).append(cand)
if 'model' in coverage:
del coverage['model']
coverages.append(coverage)
return coverages
def get_coverage(a_cands, b_cands):
"""
Get coverage of each BGC candidate in a_cands by BGC candidates in b_cands,
where each candidate can be found in a different contig as indicated by the 'contig_id' column.
:param a_cands: Reference DataFrame of BGC candidates
:param b_cands: Compared DataFrame of BGC candidates
:return: row of each BGC candidate in a_cands with 'coverage' column that defines
fractional coverage by overlapping BGC candidates in b_cands
"""
a_grouped = a_cands.groupby('contig_id')
b_grouped = b_cands.groupby('contig_id')
coverages = []
# Get coverage separately for all contigs
for contig_id in a_grouped.groups:
#print(contig_id)
a_contig_cands = a_grouped.get_group(contig_id)
b_contig_cands = b_grouped.get_group(contig_id) if contig_id in b_grouped.groups else None
coverages += get_single_contig_coverage(a_contig_cands, b_contig_cands)
return pd.DataFrame(coverages)
def plot_coverage_hist(coverage, title, label, **kwargs):
"""
Plot histogram of coverage by model
:param coverage: DataFrame with BGC candidates and their 'coverage' column and 'model' column
:param title: Plot title
:param label: Plot x-axis label
:param kwargs: Arguments to pass to the histogram plot function
"""
cols = len(coverage['model'].unique())
axes = coverage[['coverage', 'model']].hist(by='model', bins=25, figsize=(cols * 3, 2.7), layout=(1, cols),
sharey=True, **kwargs)
axes[0].set_ylabel('# BGCs')
plt.suptitle(title)
plt.tight_layout()
plt.subplots_adjust(top=0.77)
for ax in axes:
ax.set_xlim(0, 1)
ax.set_xlabel(label)
ax.set_xticklabels(['{:.0f}%'.format(x * 100) for x in ax.get_xticks()])
def plot_coverage_boxplot(coverage, title, label, **kwargs):
"""
Plot boxplot of coverage by model
:param coverage: DataFrame with BGC candidates and their 'coverage' column and 'model' column
:param title: Plot title
:param label: Plot x-axis label
:param kwargs: Arguments to pass to the boxplot function
"""
cols = len(coverage['model'].unique())
ax = coverage[['coverage', 'model']].boxplot(by='model', figsize=(cols * 0.7+1, 2.7), **kwargs)
plt.suptitle(title)
plt.tight_layout()
plt.xticks(rotation=90)
plt.subplots_adjust(top=0.80)
ax.set_ylabel(label)
ax.set_yticklabels(['{:.0f}%'.format(x * 100) for x in ax.get_yticks()])
if __name__ == "__main__":
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input", required=True,
help="Target model candidate csv file path.", metavar="FILE")
parser.add_argument("-o", "--output", dest="output", required=True,
help="Output file path.", metavar="FILE")
parser.add_argument(dest='candidates', nargs='+',
help="Paths to other models' candidate files.", metavar="FILE")
options = parser.parse_args()
target_cands = pd.read_csv(options.input)
other_cands: pd.DataFrame = pd.concat([pd.read_csv(path) for path in options.candidates])
coverage = get_coverage(target_cands, other_cands)
coverage.to_csv(options.output, index=False)
print('Saved {} candidates to: {}'.format(len(coverage), options.output))
|
python
|
import tensorflow as tf
class Generator(object):
def __init__(self, n_node, node_emd_init, config):
self.n_node = n_node
self.node_emd_init = node_emd_init
self.motif_size = config.motif_size
self.max_value = config.max_value
with tf.compat.v1.variable_scope('generator'):
self.embedding_matrix = tf.compat.v1.get_variable(name="embedding",
shape=self.node_emd_init.shape,
initializer=tf.constant_initializer(self.node_emd_init),
trainable=True)
self.motifs = tf.compat.v1.placeholder(tf.int32, shape=[None, config.motif_size])
self.reward = tf.compat.v1.placeholder(tf.float32, shape=[None])
self.node_embedding = tf.nn.embedding_lookup(self.embedding_matrix, self.motifs) # Batch * motif_size * embedding_size
self.score = tf.reduce_sum(tf.reduce_prod(self.node_embedding, axis=1), axis=1)
self.p = 1 - tf.exp(-self.score)
self.p = tf.clip_by_value(self.p, 1e-5, 1)
self.loss = -tf.reduce_mean((self.p) * (self.reward))
optimizer = tf.optimizers.Adam(config.lr_gen)
print("here")
# model = tf.keras.Sequential()
# var_list_fn = lambda: model.trainable_weights
# print(type(var_list_fn))
self.g_updates = optimizer.minimize(self.loss)
self.clip_op = tf.assign(self.embedding_matrix, tf.clip_by_value(self.embedding_matrix, 0, self.max_value))
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.conf import settings
FRAME_FORMATTER = getattr(settings, 'FRAME_FORMATTER', None)
FRAME_SEPARATOR = getattr(settings, 'FRAME_SEPARATOR', None)
if FRAME_FORMATTER is None:
raise ValueError('Improperly Configured FRAME_FORMATTER')
|
python
|
from __future__ import division
from utils.utils import *
from utils.datasets import *
from utils.parse_config import *
from models.darknet import *
from models.yolo_nano_helper import YoloNano
from torch.nn.parallel import DataParallel
import os
import sys
import time
import datetime
import argparse
import tqdm
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import cv2
import os.path as osp
VIDEO_SIZE={
"vid_65132":(1920,1080),
"vid_64708": (1920, 1080),
"multi_person":(1920,1080)
}
def str_id(cnt):
cnt = str(cnt)
pre=""
for _ in range(8-len(cnt)):
pre+='0'
return pre+cnt
@torch.no_grad()
def inference(model, path, conf_thres, nms_thres, img_size, batch_size,data_type,video_id):
model.eval()
# Get dataloader
dataset = InferenceDataset(path, img_size=img_size, augment=False, multiscale=False,data_type =data_type)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn
)
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vider_write = cv2.VideoWriter(osp.join("./result", "{}.mp4".format(video_id)), fourcc, 30.0,
VIDEO_SIZE[video_id])
for batch_i, (img_id, imgs, pads) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):
# Extract labels
imgs = Variable(imgs.type(Tensor), requires_grad=False)
outputs = model(imgs)
outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres)
for id,output,pad in zip(img_id,outputs,pads):
img =cv2.imread(id)
h,w,c = img.shape
square_edge = max(h,w)
ratio = square_edge/imgs.shape[-1]
if output is None:
vider_write.write(img)
continue
output = output.detach().cpu().numpy()[:]
output[:,:4]*=ratio
output[:,0]-= pad[0]
output[:,1]-= pad[2]
output[:,2]-= pad[1]
output[:,3]-= pad[3]
for out in output:
category = int(out[-1])
if category>0:
continue
out = out[:4].astype(np.int).tolist()
img = cv2.rectangle(img,tuple(out[:2]),tuple(out[2:4]),(0,0,255),3)
vider_write.write(img)
# Concatenate sample statistics
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=8, help="size of each image batch")
parser.add_argument("--model_def", type=str, default="config/yolov3.cfg", help="path to model definition file")
parser.add_argument("--data_config", type=str, default="config/coco.data", help="path to data config file")
parser.add_argument("--weights_path", type=str, default="weights/yolov3.weights", help="path to weights file")
parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file")
parser.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected")
parser.add_argument("--conf_thres", type=float, default=0.7, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.5, help="iou thresshold for non-maximum suppression")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--data_type", type=str, default="coco_test", help="Dataset type")
parser.add_argument("--video_id", type=str, default="vid_65132", help=" video id info")
opt = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_config = parse_data_config(opt.data_config)
valid_path = data_config["valid"]
class_names = load_classes(data_config["names"])
# Initiate model
if "yolov3" in opt.model_def:
model = Darknet(opt.model_def).to(device)
model.apply(weights_init_normal)
else:
kargs = get_nano_info(opt.model_def)
model = YoloNano(**kargs).to(device)
model.apply(weights_init_normal)
model = DataParallel(model)
if opt.weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(opt.weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(opt.weights_path))
inference(
model,
path=valid_path,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres,
img_size=opt.img_size,
batch_size=1,
data_type = opt.data_type,
video_id = opt.video_id
)
|
python
|
"""A module to generate OpenAPI and JSONSchemas."""
import json
import os
from pkg_resources import get_distribution
from pydantic_openapi_helper.core import get_openapi
from pydantic_openapi_helper.inheritance import class_mapper
from queenbee.repository import RepositoryIndex
from queenbee.job import Job, JobStatus
from queenbee.recipe import Recipe, RecipeInterface
from queenbee.plugin import Plugin
folder = os.path.join(os.path.dirname(__file__), 'docs/_static/schemas')
if not os.path.isdir(folder):
os.mkdir(folder)
VERSION = '.'.join(get_distribution('queenbee').version.split('.')[:3])
info = {
"description": "",
"version": VERSION,
"title": "",
"contact": {
"name": "Ladybug Tools",
"email": "[email protected]",
"url": "https://github.com/ladybug-tools/queenbee"
},
"x-logo": {
"url": "https://www.ladybug.tools/assets/img/honeybee.png",
"altText": "Queenbee logo"
},
"license": {
"name": "MIT",
"url": "https://github.com/ladybug-tools/queenbee-schema/blob/master/LICENSE"
}
}
with open(os.path.join(folder, 'job-openapi.json'), 'w') as out_file:
json.dump(
get_openapi(
base_object=[Job], title='Queenbee Job Schema',
description='Schema documentation for Queenbee Jobs',
version=VERSION
),
out_file,
indent=2
)
with open(os.path.join(folder, 'plugin-openapi.json'), 'w') as out_file:
json.dump(
get_openapi(
base_object=[Plugin], title='Queenbee Plugin Schema',
description='Schema documentation for Queenbee Plugins',
version=VERSION
),
out_file,
indent=2
)
with open(os.path.join(folder, 'recipe-openapi.json'), 'w') as out_file:
json.dump(
get_openapi(
base_object=[Recipe], title='Queenbee Recipe Schema',
description='Schema documentation for Queenbee Recipes',
version=VERSION
),
out_file,
indent=2
)
with open(os.path.join(folder, 'repository-openapi.json'), 'w') as out_file:
json.dump(
get_openapi(
base_object=[RepositoryIndex], title='Queenbee Repository Schema',
description='Schema documentation for Queenbee Recipes',
version=VERSION
),
out_file,
indent=2
)
with open(os.path.join(folder, 'job-schema.json'), 'w') as out_file:
out_file.write(Job.schema_json())
with open(os.path.join(folder, 'plugin-schema.json'), 'w') as out_file:
out_file.write(Plugin.schema_json())
with open(os.path.join(folder, 'recipe-schema.json'), 'w') as out_file:
out_file.write(Recipe.schema_json())
with open(os.path.join(folder, 'repository-schema.json'), 'w') as out_file:
out_file.write(RepositoryIndex.schema_json())
# write openapi with inheritance and mapper json files
# these files are mainly used for creating .NET SDK
external_docs = {
"description": "OpenAPI Specification with Inheritance",
"url": "./queenbee_inheritance.json"
}
models = [Recipe, Plugin, Job, RepositoryIndex, RecipeInterface, JobStatus]
openapi = get_openapi(
models,
title='Queenbee Schema',
description='Documentation for Queenbee schema.',
version=VERSION, info=info,
external_docs=external_docs
)
with open(os.path.join(folder, 'queenbee.json'), 'w') as out_file:
json.dump(openapi, out_file, indent=2)
# with inheritance
openapi = get_openapi(
models,
title='Queenbee Schema with Inheritance',
description='Documentation for Queenbee schema.',
version=VERSION, info=info,
inheritance=True,
external_docs=external_docs
)
with open(os.path.join(folder, 'queenbee_inheritance.json'), 'w') as out_file:
json.dump(openapi, out_file, indent=2)
# add the mapper file
with open(os.path.join(folder, 'queenbee_mapper.json'), 'w') as out_file:
json.dump(
class_mapper(
models,
['queenbee', 'queenbee.interface']
),
out_file, indent=2
)
|
python
|
import unittest
from time import sleep
from random import randint
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import json
# from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
#
#
# class MyListener(AbstractEventListener):
# def before_find(self, by, value, driver):
# print(by, value)
# def after_find(self, by, value, driver):
# print(by, value, "found")
# def on_exception(self, exception, driver):
# print(exception)
class CreateQuizes(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path="../browsers/chromedriver")
# self.driver = EventFiringWebDriver(webdriver.Chrome(executable_path="../browsers/chromedriver"), MyListener())
#self.driver = webdriver.Firefox(executable_path="../browsers/geckodriver")
self.wait = WebDriverWait(self.driver, 20)
def test_create_quizes(self):
""" Verify that user with Teachers role can Create Quiz with 3 Textual, 3 Single-Choice,
3 Multiple-Choice questions 75% passing rate."""
driver = self.driver
wait = self.wait
# Test data:
number = randint(100,1000)
quiz_name = "QA BASIC {}".format(number)
textual_question_1 = "What is Software Testing?"
textual_question_2 = "What is Software Quality Assurance?"
textual_question_3 = "Explain SDLC methodology?"
# 1. Login with tichers role
email_teacher = '[email protected]'
password_teacher = 'internship'
login_url = "http://local.school.portnov.com:4520/#/login"
driver.get(login_url)
driver.find_element_by_id("mat-input-0").send_keys(email_teacher)
driver.find_element_by_id("mat-input-1").send_keys(password_teacher)
driver.find_element_by_css_selector("button[type='submit']").click()
wait.until(EC.presence_of_element_located((By.XPATH, "// div[@class = 'info']/p[contains(text(),'TEACHER')]")))
sleep(1)
wait.until(EC.visibility_of_element_located((By.XPATH, "// div[@class = 'info']/p[contains(text(),'TEACHER')]")))
driver.find_element(By.PARTIAL_LINK_TEXT, "Quizzes").click()
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "a[href='#/quiz-builder']")))
driver.find_element(By.PARTIAL_LINK_TEXT,"Create New Quiz").click()
driver.find_element(By.TAG_NAME,"input").send_keys(quiz_name)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Textual')]").click()
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.mat-input-infix.mat-form-field-infix textarea").send_keys(textual_question_1)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Textual')]").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']").send_keys(textual_question_2)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Textual')]").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']").send_keys(textual_question_3)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
# Single choice questions:
single_choice_1 = "What is a Defect?"
single_choice_1_opt_1 = "Any flaw or imperfection in a software work product"
single_choice_1_opt_2 = "without any issues"
single_choice_2 = "What is Priority?"
single_choice_2_opt_1 = "It indicates the importance or urgency of fixing a defect"
single_choice_2_opt_2 = "anytime can fix this bug. No time limit"
single_choice_3 = "What is the difference between static testing?"
single_choice_3_opt_1 = "without code executing the program is called as Static Testing."
single_choice_3_opt_2 = "with code"
driver.find_element(By.XPATH, "//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Single-Choice')]").click()
sleep(1)
driver.find_element(By.XPATH, "//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']").send_keys(single_choice_1)
sleep(1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(single_choice_1)).send_keys(single_choice_1_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']/../../../../..//mat-radio-button".format(single_choice_1)).click()
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(single_choice_1)).send_keys(single_choice_1_opt_2)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.left.wide mat-slider")))
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Single-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(single_choice_2)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(single_choice_2)))).send_keys(single_choice_2_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']/../../../../..//mat-radio-button".format(single_choice_2)).click()
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(single_choice_2)).send_keys(single_choice_2_opt_2)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.left.wide mat-slider")))
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Single-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(single_choice_3)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(single_choice_3)))).send_keys(single_choice_3_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']/../../../../..//mat-radio-button".format(single_choice_3)).click()
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(single_choice_3)).send_keys(single_choice_3_opt_2)
# Multiple choice questions:
multiple_choice_1 = "What is a Bug?"
multiple_choice_1_opt_1 = "Mismatch between actual and intended behaviors of the software"
multiple_choice_1_opt_2 = "Some small insect that flies around"
multiple_choice_2 = "Are Java and Javascript same languages?"
multiple_choice_2_opt_1 = "Yes"
multiple_choice_2_opt_2 = "No"
multiple_choice_3 = "What is a prime objective of a bug tracking database?"
multiple_choice_3_opt_1 = "Tracking the bugs"
multiple_choice_3_opt_2 = "To get a bug fixed"
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Multiple-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(multiple_choice_1)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(multiple_choice_1)))).send_keys(multiple_choice_1_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']/../../../../../mat-checkbox".format(multiple_choice_1)).click()
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(multiple_choice_1)).send_keys(multiple_choice_1_opt_2)
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Multiple-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(multiple_choice_2)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(multiple_choice_2)))).send_keys(multiple_choice_2_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(multiple_choice_2)).send_keys(multiple_choice_2_opt_2)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']/../../../../../mat-checkbox".format(multiple_choice_2)).click()
sleep(1)
driver.find_element(By.CSS_SELECTOR, "div.controls.ng-star-inserted>button").click()
sleep(1)
driver.find_element(By.XPATH, "//*[contains(text(), 'new empty question')]/../../..//div[contains(text(), 'Multiple-Choice')]").click()
wait.until(EC.visibility_of_element_located((By.XPATH,"//*[contains(text(), 'new empty question')]/../../..//textarea[@placeholder='Question *']"))).send_keys(multiple_choice_3)
wait.until(EC.visibility_of_element_located((By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 1*']".format(multiple_choice_3)))).send_keys(multiple_choice_3_opt_1)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']".format(multiple_choice_3)).send_keys(multiple_choice_3_opt_2)
driver.find_element(By.XPATH, "//*[contains(text(), '{}')]/../../..//*[@placeholder='Option 2*']/../../../../../mat-checkbox".format(multiple_choice_3)).click()
driver.find_element(By.XPATH, "//button/*[contains(text(),'Save')]").click()
quiz_locator = "//ac-quizzes-list//div[@class = 'quizzes']//*[contains(text(),'{}')]".format(quiz_name)
wait.until(EC.visibility_of_element_located((By.XPATH, quiz_locator)))
element = driver.find_element_by_xpath(quiz_locator)
driver.execute_script("arguments[0].scrollIntoView();", element)
element.click()
driver.get_screenshot_as_file('{} created.png'.format(quiz_name))
driver.find_element_by_xpath("//div[@class='mat-list-item-content']//h5[contains(text(),'Log Out')]").click()
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".mat-button.mat-warn")))
driver.find_element_by_css_selector(".mat-button.mat-warn").click()
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,"button[type='submit']")))
#Sign in with role TEACHER
email = '[email protected]'
password = 'internship'
url = "http://local.school.portnov.com:4520/api/v1/sign-in"
payload = {
'email': email,
'password': password
}
headers = {
'content-type': "application/json",
'Connection': "keep-alive"
}
response = requests.post(url, data=json.dumps(payload), headers=headers)
parsed_json = json.loads(response.text)
token = parsed_json["token"]
url = "http://local.school.portnov.com:4520/api/v1/quizzes"
headers = {
'Authorization': "Bearer {}".format(token)
}
r = requests.get(url, headers=headers)
parsed_json = json.loads(r.text)
quiz_id = None
for i in parsed_json:
if i["name"] == quiz_name:
quiz_id = i["id"]
else:
continue
url = "http://local.school.portnov.com:4520/api/v1/quiz/{}".format(quiz_id)
r = requests.delete(url, headers=headers)
print(r.status_code)
self.assertTrue(r.status_code == 200)
print("Quiz: {} with id {} was permanently deleted".format(quiz_name, quiz_id))
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
python
|
#!/usr/bin/env python
import sys
for line in sys.stdin:
# extract data
key, val = line.strip().split('\t', 1)
s_val = val.split(',')
# day
day = key.split(',')[3][:10]
# revenue
try:
revenue = float(s_val[11]) + float(s_val[12]) + float(s_val[14])
except ValueError:
continue
# tolls
try:
tolls = float(s_val[15])
except ValueError:
continue
# print
print '%s\t%s,%s' % (day, revenue, tolls)
'''
cd ~/hw1/Task2-d
rm -rf TotalRevenueSamp.out
hfs -rm -r TotalRevenueSamp.out
hjs -D mapreduce.job.reduces=0 \
-file ~/hw1/Task2-d/src/ \
-mapper src/mapper.sh \
-input /user/wl2154/TripFareJoinSamp.txt \
-output /user/wl2154/TotalRevenueSamp.out
hfs -get TotalRevenueSamp.out
hfs -getmerge TotalRevenueSamp.out TotalRevenueSamp.txt
cat TotalRevenueSamp.txt
'''
|
python
|
from typing import TypeVar
T = TypeVar("T")
class Node:
def __init__(self, item: T):
self.item = item
self.next = None
|
python
|
import re
from File import *
from Base import *
from subprocess import call
class Animation_Html():
##!
##! Animation main HTML path
##!
def Animation_HTML_Path(self):
return "/".join( [ self.Path,self.FileName,self.Curve_Parms_Path ] )
##!
##! Animation main HTML file name
##!
def Animation_HTML_FileName(self):
return "/".join( [self.Animation_HTML_Path(),self.Name+".html"] )
##!
##! HTML head section
##!
def Animation_HTML_Doc_Head(self):
return (
"<!DOCTYPE html>\n"+
self.XML_Tag_Start("HTML")+
self.Animation_HTML_Head()
)
##!
##! Write animation main HTML file
##!
def Animation_HTML_Write(self):
html=self.Animation_HTML_Doc_Head()
html=html+self.Animation_HTML_Body()
html=html+self.XML_Tag_End("HTML")
outfile=self.Animation_HTML_FileName()
self.File_Path_Create(outfile)
res=self.File_Write(outfile,[html])
print outfile+":",res,"bytes"
##!
##! Write animation css
##!
def Animation_HTML_CSS(self):
return self.XML_TagIt(
"LINK",
{
"rel": "stylesheet",
"href": self.HTML_Root+"/W3.css",
}
)+"\n"+self.XML_TagIt(
"LINK",
{
"rel": "stylesheet",
"href": self.HTML_Root+"/Poops.css",
}
)
##!
##! Write animation script section
##!
def Animation_HTML_Script(self):
return self.XML_Tags_NL(
"SCRIPT",
self.File_Read("poops.js")
)
##!
##! Write animation head section
##!
def Animation_HTML_Head(self):
return self.XML_Tags_NL(
"HEAD",
self.Animation_HTML_Title()+self.Animation_HTML_CSS()
)
##!
##! Write animation title section
##!
def Animation_HTML_Title(self):
return self.XML_Tags_NL(
"TITLE",
"TITLE"
)
##!
##! Write animation body section
##!
def Animation_HTML_Body(self):
return self.XML_Tags_NL(
"BODY",
self.Animation_HTML_Animation_Element()
)
##!
##! Write animation SVG animation element.
##!
def Animation_HTML_Animation_Element(self):
imgs=[""]
n=0
for svgfile in (self.Iteration_Files):
imgs.append(
self.Animation_HTML_Body_File_IMG(svgfile,n)
)
n+=1
return self.XML_Tags(
"DIV",
"\n".join(imgs)+"\n",
{
"class": "w3-content w3-section",
}
)+"\n"+self.Animation_HTML_Script()
##!
##! Generate animated image tag.
##!
def Animation_HTML_Body_File_IMG(self,svgfile,n=0):
href=self.CGI_Root+"?"+svgfile
return self.XML_Tag(
"IMG",
{
"src": href,
"width": "800px",
"class": "mySlides",
}
)
|
python
|
from django.contrib.postgres.fields import ArrayField
from django.db import models
from osf.models import Node
from osf.models import OSFUser
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.validators import validate_subscription_type
from osf.utils.fields import NonNaiveDateTimeField
from website.notifications.constants import NOTIFICATION_TYPES
class NotificationSubscription(BaseModel):
primary_identifier_name = '_id'
_id = models.CharField(max_length=50, db_index=True, unique=True) # pxyz_wiki_updated, uabc_comment_replies
event_name = models.CharField(max_length=50) # wiki_updated, comment_replies
user = models.ForeignKey('OSFUser', related_name='notification_subscriptions',
null=True, blank=True, on_delete=models.CASCADE)
node = models.ForeignKey('Node', related_name='notification_subscriptions',
null=True, blank=True, on_delete=models.CASCADE)
# Notification types
none = models.ManyToManyField('OSFUser', related_name='+') # reverse relationships
email_digest = models.ManyToManyField('OSFUser', related_name='+') # for these
email_transactional = models.ManyToManyField('OSFUser', related_name='+') # are pointless
@classmethod
def load(cls, q):
# modm doesn't throw exceptions when loading things that don't exist
try:
return cls.objects.get(_id=q)
except cls.DoesNotExist:
return None
@property
def owner(self):
# ~100k have owner==user
if self.user is not None:
return self.user
# ~8k have owner=Node
elif self.node is not None:
return self.node
@owner.setter
def owner(self, value):
if isinstance(value, OSFUser):
self.user = value
elif isinstance(value, Node):
self.node = value
def add_user_to_subscription(self, user, notification_type, save=True):
for nt in NOTIFICATION_TYPES:
if getattr(self, nt).filter(id=user.id).exists():
if nt != notification_type:
getattr(self, nt).remove(user)
else:
if nt == notification_type:
getattr(self, nt).add(user)
if notification_type != 'none' and isinstance(self.owner, Node) and self.owner.parent_node:
user_subs = self.owner.parent_node.child_node_subscriptions
if self.owner._id not in user_subs.setdefault(user._id, []):
user_subs[user._id].append(self.owner._id)
self.owner.parent_node.save()
if save:
self.save()
def remove_user_from_subscription(self, user, save=True):
for notification_type in NOTIFICATION_TYPES:
try:
getattr(self, notification_type, []).remove(user)
except ValueError:
pass
if isinstance(self.owner, Node) and self.owner.parent_node:
try:
self.owner.parent_node.child_node_subscriptions.get(user._id, []).remove(self.owner._id)
self.owner.parent_node.save()
except ValueError:
pass
if save:
self.save()
class NotificationDigest(ObjectIDMixin, BaseModel):
user = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=models.CASCADE)
timestamp = NonNaiveDateTimeField()
send_type = models.CharField(max_length=50, db_index=True, validators=[validate_subscription_type, ])
event = models.CharField(max_length=50)
message = models.TextField()
# TODO: Could this be a m2m with or without an order field?
node_lineage = ArrayField(models.CharField(max_length=5))
|
python
|
import logging
log = logging.getLogger(__name__)
from dogpile.cache import make_region
from dogpile.cache.api import NO_VALUE
import os, errno
CACHE_FAILS = (NO_VALUE,)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
class CachedData(object):
keys = None
request = None
dbSession = None
query_args = None
regions_manager = None
keyed_multiples = None
class LazyloadedFunction(object):
"""a deferred function"""
def __init__(
self,
object,
object_attribute,
cache_function,
*cache_function_args,
**cache_function_kwargs
):
self.object = object
self.object_attribute = object_attribute
self.cache_function = cache_function
self.cache_function_args = cache_function_args
self.cache_function_kwargs = cache_function_kwargs
try:
self.__doc__ = function.__doc__
except: # pragma: no cover
pass
def execute(self):
val = self.cache_function(
*self.cache_function_args, **self.cache_function_kwargs
)
return val
class ObjectifiedDict(dict):
"""Dict that allows for .dotted access"""
def __getitem__(self, attr):
if attr in self:
item = dict.__getitem__(self, attr)
if isinstance(item, LazyloadedFunction):
item = item.execute()
dict.__setitem__(self, attr, item)
return item
def __getattr__(self, attr):
if attr in self:
if isinstance(self[attr], LazyloadedFunction):
value = self[attr].execute()
self[attr] = value
return self[attr]
return self.__getattribute__(attr)
def _lazyload(self, attr, function, *args, **kwargs):
self[attr] = LazyloadedFunction(self, attr, function, *args, **kwargs)
def _expand(self):
for k, v in self.iteritems():
if isinstance(v, LazyloadedFunction):
v = v.execute()
dict.__setitem__(self, k, v)
def _cacheable(self, exclude=None):
copied = self.copy()
for k, v in copied.iteritems():
if isinstance(v, LazyloadedFunction):
del copied[k]
if exclude:
for k in exclude:
if k in copied:
del copied[k]
return copied
class AttributeSafeObject(object):
"""
Object with lax attribute access. Returns an empty string ('') when the
attribute does not exist; good for templating). Based on Pylons.
"""
def __init__(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
def __getattr__(self, name):
try:
## note that we're using the object class directly
return object.__getattribute__(self, name)
except AttributeError:
if name[:2] == "__":
raise
if DEBUG_ATTRIB_SAFE:
log.debug(
"No attribute `%s` found in AttributeSafeObject instance,"
"returning empty string",
name,
)
return ""
def keys(self):
return self.__dict__.keys()
class AttributeSafeObject_set(AttributeSafeObject):
"""An AttributeSafeObject that sets & gets `set({})` on misses"""
def __getattr__(self, k):
try:
return object.__getattribute__(self, k)
except AttributeError:
if k[:2] == "__":
raise
setattr(self, k, set())
return object.__getattribute__(self, k)
class AttributeSafeObject_dict(AttributeSafeObject):
"""An AttributeSafeObject that sets & gets dict `{}` on misses"""
def __getattr__(self, k):
try:
return object.__getattribute__(self, k)
except AttributeError:
if k[:2] == "__":
raise
setattr(self, k, {})
return object.__getattribute__(self, k)
class AttributeSafeObject_dict_ids(AttributeSafeObject_dict):
"""An AttributeSafeObject_dict_ids used to manage ids"""
def add_unknown(self, key, items_to_update, v=None):
store = getattr(self, key)
for k in items_to_update:
if k not in store:
store[k] = v
def update(self, key, items_to_update, v=None):
store = getattr(self, key)
for k in items_to_update:
store[k] = v
def get_true(self, key):
store = getattr(self, key)
rval = [k for k in store.keys() if store[k]]
return rval
def get_false(self, key):
store = getattr(self, key)
rval = [k for k in store.keys() if not store[k]]
return rval
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8
import sys
import os
import math
import statistics
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from matplotlib.backends.backend_pdf import PdfPages
from datetime import datetime
import collections
import errno
import gzip
from ruamel_yaml import YAML
PATHS = {
"local": {
"sourcepath" : "./asciigrids_debug/",
"outputpath" : ".",
"png-out" : "png_debug/" , # path to png images
"pdf-out" : "pdf-out_debug/" , # path to pdf package
},
"test": {
"sourcepath" : "./asciigrid/",
"outputpath" : "./testout/",
"png-out" : "png2/" , # path to png images
"pdf-out" : "pdf-out2/" , # path to pdf package
},
"cluster": {
"sourcepath" : "/source/",
"outputpath" : "/out/",
"png-out" : "png/" , # path to png images
"pdf-out" : "pdf-out/" , # path to pdf package
}
}
USER = "local"
NONEVALUE = -9999
def build() :
"main"
pathId = USER
sourceFolder = ""
outputFolder = ""
if len(sys.argv) > 1 and __name__ == "__main__":
for arg in sys.argv[1:]:
k, v = arg.split("=")
if k == "path":
pathId = v
if k == "source" :
sourceFolder = v
if k == "out" :
outputFolder = v
if not sourceFolder :
sourceFolder = PATHS[pathId]["sourcepath"]
if not outputFolder :
outputFolder = PATHS[pathId]["outputpath"]
pngFolder = os.path.join(outputFolder, PATHS[pathId]["png-out"])
pdfFolder = os.path.join(outputFolder,PATHS[pathId]["pdf-out"])
for root, dirs, files in os.walk(sourceFolder):
if len(files) > 0 :
print("root", root)
print("dirs", dirs)
scenario = os.path.basename(root)
pdfpath = os.path.join(pdfFolder, "scenario_{0}.pdf".format(scenario))
makeDir(pdfpath)
pdf = PdfPages(pdfpath)
files.sort()
for file in files:
if not file.endswith(".meta"):
print("file", file)
pngfilename = file[:-3]+"png"
metafilename = file+".meta"
isGZ = file.endswith(".gz")
if isGZ :
pngfilename = file[:-6]+"png"
metafilename = file[:-2]+"meta"
filepath = os.path.join(root, file)
metapath = os.path.join(root, metafilename)
out_path = os.path.join(pngFolder, scenario, pngfilename)
createImgFromMeta( filepath, metapath, out_path, pdf=pdf)
pdf.close()
def createImgFromMeta(ascii_path, meta_path, out_path, pdf=None) :
if ascii_path.endswith(".gz") :
# Read in ascii header data
with gzip.open(ascii_path, 'rt') as source:
ascii_header = source.readlines()[:6]
else :
# Read in ascii header data
with open(ascii_path, 'r') as source:
ascii_header = source.readlines()[:6]
# Read the ASCII raster header
ascii_header = [item.strip().split()[-1] for item in ascii_header]
ascci_cols = int(ascii_header[0])
ascii_rows = int(ascii_header[1])
ascii_xll = float(ascii_header[2])
ascii_yll = float(ascii_header[3])
ascii_cs = float(ascii_header[4])
ascii_nodata = float(ascii_header[5])
title=""
label=""
colormap = 'viridis'
cMap = None
cbarLabel = None
factor = 0.001
ticklist = None
maxValue = ascii_nodata
maxLoaded = False
minValue = ascii_nodata
minLoaded = False
with open(meta_path, 'rt') as meta:
# documents = yaml.load(meta, Loader=yaml.FullLoader)
yaml=YAML(typ='safe') # default, if not specfied, is 'rt' (round-trip)
documents = yaml.load(meta)
#documents = yaml.full_load(meta)
for item, doc in documents.items():
print(item, ":", doc)
if item == "title" :
title = doc
elif item == "labeltext" :
label = doc
elif item == "factor" :
factor = float(doc)
elif item == "maxValue" :
maxValue = float(doc)
maxLoaded = True
elif item == "minValue" :
minValue = float(doc)
minLoaded = True
elif item == "colormap" :
colormap = doc
elif item == "colorlist" :
cMap = doc
elif item == "cbarLabel" :
cbarLabel = doc
elif item == "ticklist" :
ticklist = list()
for i in doc :
ticklist.append(float(i))
# Read in the ascii data array
ascii_data_array = np.loadtxt(ascii_path, dtype=np.float, skiprows=6)
# Set the nodata values to nan
ascii_data_array[ascii_data_array == ascii_nodata] = np.nan
# data is stored as an integer but scaled by a factor
ascii_data_array *= factor
maxValue *= factor
minValue *= factor
image_extent = [
ascii_xll, ascii_xll + ascci_cols * ascii_cs,
ascii_yll, ascii_yll + ascii_rows * ascii_cs]
# Plot data array
fig, ax = plt.subplots()
ax.set_title(title)
# Get the img object in order to pass it to the colorbar function
if cMap :
colorM = ListedColormap(cMap)
if minLoaded and maxLoaded:
img_plot = ax.imshow(ascii_data_array, cmap=colorM, extent=image_extent, interpolation='none', vmin=minValue, vmax=maxValue)
elif minLoaded :
img_plot = ax.imshow(ascii_data_array, cmap=colorM, extent=image_extent, interpolation='none', vmax=minValue)
elif maxLoaded :
img_plot = ax.imshow(ascii_data_array, cmap=colorM, extent=image_extent, interpolation='none', vmax=maxValue)
else :
img_plot = ax.imshow(ascii_data_array, cmap=colorM, extent=image_extent, interpolation='none')
else :
if minLoaded and maxLoaded:
img_plot = ax.imshow(ascii_data_array, cmap=colormap, extent=image_extent, interpolation='none', vmin=minValue, vmax=maxValue)
elif minLoaded :
img_plot = ax.imshow(ascii_data_array, cmap=colormap, extent=image_extent, interpolation='none', vmax=minValue)
elif maxLoaded :
img_plot = ax.imshow(ascii_data_array, cmap=colormap, extent=image_extent, interpolation='none', vmax=maxValue)
else :
img_plot = ax.imshow(ascii_data_array, cmap=colormap, extent=image_extent, interpolation='none')
if ticklist :
# Place a colorbar next to the map
cbar = plt.colorbar(img_plot, ticks=ticklist, orientation='vertical', shrink=0.5, aspect=14)
else :
# Place a colorbar next to the map
cbar = plt.colorbar(img_plot, orientation='vertical', shrink=0.5, aspect=14)
cbar.set_label(label)
if cbarLabel :
cbar.ax.set_yticklabels(cbarLabel)
ax.grid(True, alpha=0.5)
# save image and pdf
makeDir(out_path)
if pdf :
pdf.savefig()
plt.savefig(out_path, dpi=150)
plt.close(fig)
def makeDir(out_path) :
if not os.path.exists(os.path.dirname(out_path)):
try:
os.makedirs(os.path.dirname(out_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
if __name__ == "__main__":
build()
|
python
|
r"""
``cotk.metrics`` provides classes and functions evaluating results of models. It provides
a fair metric for every model.
"""
import random
import multiprocessing
from multiprocessing import Pool
import numpy as np
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction
from .._utils.unordered_hash import UnorderedSha256
class MetricBase:
'''Base class for metrics.
'''
def __init__(self):
pass
class _PrecisionRecallMetric(MetricBase):
'''Base class for precision recall metrics. This is an abstract class.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences are passed to :func:`forward` by
``data[reference_allvocabs_key]``. Default: ``resp_allvocabs``.
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
Attributes:
res_prefix (str): Prefix added to the front of each key
in the result dict of ^close^
'''
def __init__(self, dataloader, reference_allvocabs_key='resp_allvocabs', gen_key='gen'):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.gen_key = gen_key
self.prec_list = []
self.rec_list = []
self.res_prefix = ""
def score(self, gen, reference):
r'''This function is called by ^forward^
Arguments:
* gen (list): list of generated word ids
* reference (list): list of word ids of a reference
Returns:
(scalar): score \in [0, 1]
'''
raise NotImplementedError( \
"This function should be implemented by subclasses.")
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list of list of list): Reference sentences.
Does not contain start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Outermost list: batch_size
Innermost list: number of words, allow different sizes
Second innermost list: number of sentences, allow different sizes
data[gen_prob_key] (list of list of list): Sentence generations model outputs
similar to data[reference_allvocabs_key]
'''
references = data[self.reference_allvocabs_key]
gens = data[self.gen_key]
if len(references) != len(gens):
raise ValueError("Batch num is not matched.")
for reference, gen in zip(references, gens):
# pylint: disable=no-member
matrix = np.zeros((len(reference), len(gen)), dtype=np.float32)
for i, single_ref in enumerate(reference):
for j, single_gen in enumerate(gen):
matrix[i][j] = self.score(single_gen, single_ref)
self.prec_list.append(float(np.sum(np.max(matrix, 0))) / len(gen))
self.rec_list.append(float(np.sum(np.max(matrix, 1))) / len(references))
def close(self):
'''Return a dict which contains:
* **precision**: average precision
* **recall**: average recall
'''
return {'{} precision'.format(self.res_prefix): np.average(self.prec_list), \
'{} recall'.format(self.res_prefix): np.average(self.rec_list)}
class BleuPrecisionRecallMetric(_PrecisionRecallMetric):
'''Metric for calculating sentence BLEU precision and recall
Arguments:
* ngram (int): Specifies BLEU-ngram
'''
def __init__(self, dataloader, ngram, reference_allvocabs_key='resp_allvocabs', gen_key='gen'):
super().__init__(dataloader, reference_allvocabs_key, gen_key)
if ngram not in range(1, 5):
raise ValueError("ngram should belong to [1, 4]")
self.ngram = ngram
self.weights = [1 / ngram] * ngram
self.res_prefix = 'BLEU-{}'.format(ngram)
def score(self, gen, reference):
r'''Score_fn of BLEU-ngram precision and recall
Returns:
(scalar): sentence bleu score \in [0, 1]
'''
return sentence_bleu([reference], gen, self.weights, SmoothingFunction().method1)
class EmbSimilarityPrecisionRecallMetric(_PrecisionRecallMetric):
'''Metric for calculating cosine similarity precision and recall
Arguments:
* embed (:class:^numpy.array^): A 2-d padded array of word embeddings
* mode (str): Specifies the operation that computes the bag-of-word representation.
Must be 'avg' or 'extrema':
'avg': element-wise average word embeddings
'extrema': element-wise maximum word embeddings
'''
def __init__(self, dataloader, embed, mode, \
reference_allvocabs_key='resp_allvocabs', gen_key='gen'):
super().__init__(dataloader, reference_allvocabs_key, gen_key)
if not isinstance(embed, np.ndarray) or len(np.shape(embed)) != 2:
raise ValueError("invalid type or shape or embed.")
if mode not in ['avg', 'extrema']:
raise ValueError("mode should be 'avg' or 'extrema'.")
if len(embed) != self.dataloader.vocab_size:
raise ValueError("embed size not equal to vocab size.")
self.embed = embed
self.mode = mode
self.res_prefix = '{}-bow'.format(mode)
def score(self, gen, reference):
r'''Score_fn of cosine similarity precision and recall
Returns:
(Scalar): cosine similarity between two sentence embeddings \in [0, 1]
'''
gen_vec = []
ref_vec = []
for i in gen:
if i < 0:
raise ValueError("gen index out of range.")
elif i >= self.dataloader.vocab_size:
gen_vec.append(self.embed[self.dataloader.unk_id])
else:
gen_vec.append(self.embed[i])
for i in reference:
if i < 0:
raise ValueError("reference index out of range.")
elif i >= self.dataloader.vocab_size:
ref_vec.append(self.embed[self.dataloader.unk_id])
else:
ref_vec.append(self.embed[i])
if self.mode == 'avg':
gen_embed = np.average(gen_vec, 0)
ref_embed = np.average(ref_vec, 0)
else:
gen_embed = np.max(gen_vec, 0)
ref_embed = np.max(ref_vec, 0)
cos = np.sum(gen_embed * ref_embed) / \
np.sqrt(np.sum(gen_embed * gen_embed) * np.sum(ref_embed * ref_embed))
norm = (cos + 1) / 2
return norm
class PerplexityMetric(MetricBase):
'''Metric for calculating perplexity.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences with all vocabs
are passed to :func:`forward` by ``data[reference_allvocabs_key]``.
Default: ``resp_allvocabs``.
reference_len_key (str): Length of reference sentences are passed to :func:`forward`
by ``data[reference_len_key]``. Default: ``resp_length``.
gen_log_prob_key (str): Sentence generations model outputs of **log softmax** probability
are passed to :func:`forward` by ``data[gen_log_prob_key]``. Default: ``gen_log_prob``.
invalid_vocab (bool): whether gen_log_prob contains invalid vocab. Default: False
full_check (bool): whether perform full checks on `gen_log_prob` to make sure the sum
of probability is 1. Otherwise, a random check will be performed for efficiency.
Default: False
'''
def __init__(self, dataloader, \
reference_allvocabs_key="resp_allvocabs", \
reference_len_key="resp_length", \
gen_log_prob_key="gen_log_prob", \
invalid_vocab=False, \
full_check=False \
):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.reference_len_key = reference_len_key
self.gen_log_prob_key = gen_log_prob_key
self.word_loss = 0
self.length_sum = 0
self.invalid_vocab = invalid_vocab
self.full_check = full_check
def forward(self, data):
'''Processing a batch of data. Smoothing will be performed for invalid vocabs.
Unknowns vocabs will be ignored.
TODO:
Find a place to explain valid vocabs, invalid vocabs, and unknown vocabs.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list or :class:`numpy.array`): Reference sentences with all vocabs
with all vocabs. Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_sentence_length]`
data[reference_len_key] (list): Length of Reference sentences. Contains start token (eg:``<go>``)
and end token (eg:``<eos>``). Size: `[batch_size]`
data[gen_log_prob_key] (list or :class:`numpy.array`): Sentence generations model outputs of
**log softmax** probability. Contains end token (eg:``<eos>``), but without start token
(eg: ``<go>``). The 2nd dimension can be jagged.
Size: `[batch_size, gen_sentence_length, vocab_size]` for ``invalid_vocab = False``.
`[batch_size, gen_sentence_length, all_vocab_size]` for ``invalid_vocab = True``.
Warning:
``data[gen_log_prob_key]`` must be processed after log_softmax. That means,
``np.sum(np.exp(gen_log_prob), -1)`` equals ``np.ones((batch_size, gen_sentence_length))``
'''
resp_allvocabs = data[self.reference_allvocabs_key]
resp_length = data[self.reference_len_key]
gen_log_prob = data[self.gen_log_prob_key]
if len(resp_allvocabs) != len(resp_length) or len(resp_allvocabs) != len(gen_log_prob):
raise ValueError("Batch num is not matched.")
# perform random check to assert the probability is valid
checkid = random.randint(0, len(resp_length)-1)
if resp_length[checkid] < 2:
raise ValueError("resp_length must no less than 2, because <go> and <eos> are always included.")
checkrow = random.randint(0, resp_length[checkid]-2)
if not np.isclose(np.sum(np.exp(gen_log_prob[checkid][checkrow])), 1):
print("gen_log_prob[%d][%d] exp sum is equal to %f." % (checkid, checkrow, \
np.sum(np.exp(gen_log_prob[checkid][checkrow]))))
raise ValueError("data[gen_log_prob_key] must be processed after log_softmax.")
if not isinstance(resp_allvocabs, np.ndarray):
resp_allvocabs = np.array(resp_allvocabs)
if not isinstance(gen_log_prob, np.ndarray):
gen_log_prob = np.array(gen_log_prob)
invalid_vocab_num = self.dataloader.all_vocab_size - self.dataloader.vocab_size
#resp = resp_allvocabs.copy()
#resp[resp >= self.dataloader.vocab_size] = self.dataloader.unk_id
for i, single_length in enumerate(resp_length):
# perform full check to assert the probability is valid
if self.full_check:
expsum = np.sum(np.exp(gen_log_prob[i][:single_length-1]), -1)
if not np.allclose(expsum, [1] * (single_length - 1)):
raise ValueError("data[gen_log_prob_key] must be processed after log_softmax.")
resp_now = np.array(resp_allvocabs[i][1:single_length])
gen_log_prob_now = np.array(gen_log_prob[i])
if not self.invalid_vocab:
if gen_log_prob_now.shape[1] != self.dataloader.vocab_size:
raise ValueError("The third dimension gen_log_prob should be equals to vocab_size when \
invalid_vocab = False, \
but %d != %d" % (gen_log_prob_now.shape[1], self.dataloader.vocab_size))
else:
if gen_log_prob_now.shape[1] != self.dataloader.all_vocab_size:
raise ValueError("The third dimension gen_log_prob should be equals to all_vocab_size \
when invalid_vocab = True, \
but %d != %d" % (gen_log_prob_now.shape[1], self.dataloader.vocab_size))
# calc normal vocab
normal_idx = np.where(np.logical_and(resp_now != self.dataloader.unk_id, \
resp_now < self.dataloader.vocab_size))
self.word_loss += -np.sum(gen_log_prob_now[normal_idx, resp_now[normal_idx]])
self.length_sum += np.array(normal_idx).shape[1]
# calc invalid vocab
invalid_idx = np.where(resp_now >= self.dataloader.vocab_size)
invalid_log_prob = gen_log_prob_now[\
invalid_idx, [self.dataloader.unk_id] * len(invalid_idx) \
] - np.log(invalid_vocab_num)
if self.invalid_vocab:
extra_invalid_log_prob = gen_log_prob_now[invalid_idx, resp_now[invalid_idx]]
self.word_loss += -np.sum(np.log( \
np.exp(invalid_log_prob) + np.exp(extra_invalid_log_prob) \
))
else:
self.word_loss += -np.sum(invalid_log_prob)
self.length_sum += np.array(invalid_idx).shape[1]
def close(self):
'''Return a dict which contains:
* **perplexity**: perplexity value
'''
return {"perplexity": np.exp(self.word_loss / self.length_sum)}
class MultiTurnPerplexityMetric(MetricBase):
'''Metric for calculating multi-turn perplexity.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences with all vocabs
are passed to :func:`forward` by ``data[reference_allvocabs_key]``.
Default: ``sent_allvocabs``.
reference_len_key (str): Length of reference sentences are passed to :func:`forward`
by ``data[reference_len_key]``. Default: ``sent_length``.
gen_log_prob_key (str): Sentence generations model outputs of **log softmax** probability
are passed to :func:`forward` by ``data[gen_log_prob_key]``. Default: ``gen_log_prob``.
invalid_vocab (bool): whether gen_log_prob contains invalid vocab. Default: False
full_check (bool): whether perform full checks on `gen_log_prob` to make sure the sum
of probability is 1. Otherwise, a random check will be performed for efficiency.
Default: False
'''
def __init__(self, dataloader, reference_allvocabs_key="sent_allvocabs", \
reference_len_key="sent_length", \
gen_log_prob_key="gen_log_prob", \
invalid_vocab=False, \
full_check=False \
):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.reference_len_key = reference_len_key
self.gen_log_prob_key = gen_log_prob_key
self.invalid_vocab = invalid_vocab
self.sub_metric = PerplexityMetric(dataloader, \
reference_allvocabs_key="sent_allvocabs", \
reference_len_key="sent_length", \
gen_log_prob_key="gen_log_prob", \
invalid_vocab=invalid_vocab, \
full_check=full_check)
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list or :class:`numpy.array`): Reference sentences
with all vocabs.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_turn_length, max_sentence_length]`
data[reference_len_key] (list of list): Length of Reference sentences. Contains
start token (eg:``<go>``) and end token (eg:``<eos>``). It must NOT be padded,
which means the inner lists may have different length.
Length of outer list: `batch_size`
data[gen_log_prob_key] (list or :class:`numpy.array`): Sentence generations model outputs of
**log softmax** probability. Contains end token (eg:``<eos>``), but without start token
(eg: ``<go>``). The 2nd / 3rd dimension can be jagged or padded.
Size: `[batch_size, max_turn_length, gen_sentence_length, vocab_size]`.
Warning:
``data[gen_log_prob_key]`` must be processed after log_softmax. That means,
``np.sum(np.exp(gen_log_prob), -1)`` equals ``np.ones((batch_size, gen_sentence_length))``
'''
reference_allvocabs = data[self.reference_allvocabs_key]
length = data[self.reference_len_key]
gen_log_prob = data[self.gen_log_prob_key]
if len(length) != len(reference_allvocabs) or len(length) != len(gen_log_prob):
raise ValueError("Batch num is not matched.")
for i, sent_length in enumerate(length):
# Pass turn as batch for sub_metric, the result will be same.
turn_length = len(sent_length)
if len(reference_allvocabs[i]) < turn_length or len(gen_log_prob[i]) < turn_length:
raise ValueError("Turn num is not matched.")
self.sub_metric.forward({"sent_allvocabs": reference_allvocabs[i][:turn_length], \
"sent_length": sent_length, \
"gen_log_prob": gen_log_prob[i][:turn_length]})
def close(self):
'''Return a dict which contains:
* **perplexity**: perplexity value
'''
return self.sub_metric.close()
class BleuCorpusMetric(MetricBase):
'''Metric for calculating BLEU.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences with all vocabs
are passed to :func:.forward by ``data[reference_allvocabs_key]``.
Default: ``resp``.
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
'''
def __init__(self, dataloader, reference_allvocabs_key="resp_allvocabs", gen_key="gen"):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.gen_key = gen_key
self.refs = []
self.hyps = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list or :class:`numpy.array` of `int`):
reference_allvocabs sentences.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_sentence_length]`
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
gen = data[self.gen_key]
resp = data[self.reference_allvocabs_key]
if len(resp) != len(gen):
raise ValueError("Batch num is not matched.")
for gen_sen, resp_sen in zip(gen, resp):
self.hyps.append(self.dataloader.trim_index(gen_sen))
self.refs.append([self.dataloader.trim_index(resp_sen[1:])])
def close(self):
'''Return a dict which contains:
* **bleu**: bleu value.
'''
try:
return {"bleu": \
corpus_bleu(self.refs, self.hyps, smoothing_function=SmoothingFunction().method7)}
except ZeroDivisionError as _:
raise ZeroDivisionError("Bleu smoothing divided by zero. This is a known bug of corpus_bleu, \
usually caused when there is only one sample and the sample length is 1.")
class SelfBleuCorpusMetric(MetricBase):
'''Metric for calculating Self-BLEU.
Arguments:
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
sample (int): Number of samples sampled from the generated sentences. Default: 1000.
'''
def __init__(self, dataloader, gen_key="gen", sample=1000):
super().__init__()
self.dataloader = dataloader
self.gen_key = gen_key
self.sample = sample
self.refs = []
self.hyps = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
gen = data[self.gen_key]
for gen_sen in gen:
self.hyps.append(self.dataloader.trim_index(gen_sen))
def run_f(self, ele):
'''Auxiliary function which returns:
* **sentence-self-bleu**: sentence-self-bleu value.
'''
return sentence_bleu(ele[0], ele[1], smoothing_function=SmoothingFunction().method1)
def close(self):
'''Return a dict which contains:
* **self-bleu**: self-bleu value.
'''
if self.sample > len(self.hyps):
self.sample = len(self.hyps)
random.shuffle(self.hyps)
ref = self.hyps[:self.sample]
try:
bleu_irl = []
if self.sample >= 1000:
pool = Pool(multiprocessing.cpu_count())
bleu_irl = pool.map(self.run_f, [(ref[:i]+ref[i+1:self.sample], ref[i]) \
for i in range(self.sample)])
pool.close()
pool.join()
elif self.sample > 1:
for i in range(self.sample):
bleu_irl.append(self.run_f((ref[:i]+ref[i+1:], ref[i])))
return {"self-bleu" : 1.0 * sum(bleu_irl) / len(bleu_irl)}
except ZeroDivisionError as _:
raise ZeroDivisionError("Bleu smoothing divided by zero. This is a known bug of corpus_bleu, \
usually caused when there is only one sample and the sample length is 1.")
class FwBwBleuCorpusMetric(MetricBase):
'''Metric for calculating BLEU.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_test_key (str): Reference sentences with all vocabs in test data
are passed to :func:.forward by ``data[reference_test_key]``.
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
sample (int): Number of samples sampled from the generated sentences. Default: 1000.
'''
def __init__(self, dataloader, \
reference_test_key, \
gen_key="gen", \
sample=1000):
super().__init__()
self.dataloader = dataloader
self.reference_test_key = reference_test_key
self.gen_key = gen_key
self.sample = sample
self.refs = []
self.hyps = []
resp = self.dataloader.data["test"][self.reference_test_key]
for resp_sen in resp:
self.refs.append(self.dataloader.trim_index(resp_sen[1:]))
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
gen = data[self.gen_key]
for gen_sen in gen:
self.hyps.append(self.dataloader.trim_index(gen_sen))
def run_f(self, ele):
'''Auxiliary function which returns:
* **sentence-self-bleu**: sentence-self-bleu value.
'''
return sentence_bleu(ele[0], ele[1], ele[2], smoothing_function=SmoothingFunction().method1)
def close(self):
'''Return a dict which contains:
* **fwbwbleu**: fw/bw bleu value.
'''
max_len = max([len(self.hyps), len(self.refs)])
if self.sample > max_len:
self.sample = max_len
random.shuffle(self.hyps)
random.shuffle(self.refs)
try:
result = {}
for ngram in range(2, 5):
weight = tuple((1. / ngram for _ in range(ngram)))
if self.sample >= 1000:
pool = Pool(multiprocessing.cpu_count())
bleu_irl_fw = pool.map(self.run_f, \
[(self.refs, self.hyps[i], weight) for i in range(self.sample)])
bleu_irl_bw = pool.map(self.run_f, \
[(self.hyps, self.refs[i], weight) for i in range(self.sample)])
pool.close()
pool.join()
else:
bleu_irl_fw, bleu_irl_bw = [], []
for i in range(self.sample):
bleu_irl_fw.append(self.run_f((self.refs, self.hyps[i], weight)))
bleu_irl_bw.append(self.run_f((self.hyps, self.refs[i], weight)))
fw_bleu = (1.0 * sum(bleu_irl_fw) / len(bleu_irl_fw))
bw_bleu = (1.0 * sum(bleu_irl_bw) / len(bleu_irl_bw))
result["fw-bleu-%d"%ngram] = fw_bleu
result["bw-bleu-%d"%ngram] = bw_bleu
result["fw-bw-bleu-%d"%ngram] = 2.0 * bw_bleu * fw_bleu / (fw_bleu + bw_bleu)
return result
except ZeroDivisionError as _:
raise ZeroDivisionError("Bleu smoothing divided by zero. This is a known bug of corpus_bleu, \
usually caused when there is only one sample and the sample length is 1.")
class MultiTurnBleuCorpusMetric(MetricBase):
'''Metric for calculating multi-turn BLEU.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
reference_allvocabs_key (str): Reference sentences with all vocabs are passed to
:func:`forward` by ``data[reference_allvocabs_key]``.
Default: ``reference_allvocabs``.
gen_key (str): Sentences generated by model are passed to :func:.forward by
``data[gen_key]``. Default: ``gen``.
turn_len_key (str): Turn length are passed to :func:.forward by
``data[turn_len_key]``. Default: ``turn_length``.
'''
def __init__(self, dataloader, reference_allvocabs_key="reference_allvocabs", \
gen_key="gen", \
turn_len_key="turn_length" \
):
super().__init__()
self.dataloader = dataloader
self.reference_allvocabs_key = reference_allvocabs_key
self.turn_len_key = turn_len_key
self.gen_key = gen_key
self.refs = []
self.hyps = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[reference_allvocabs_key] (list or :class:`numpy.array`):
Reference sentences with all vocabs.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_turn_length, max_sentence_length]`
data[gen_key] (list or :class:`numpy.array`): 3-d array of int.
Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
The 2nd / 3rd dimension can be jagged.
Size: `[batch_size, max_turn_length, gen_sentence_length]`.
data[turn_len_key] (list or :class:`numpy.array`): Length of turns in each sample.
Size: `[batch_size]`
'''
reference_allvocabs = data[self.reference_allvocabs_key]
length = data[self.turn_len_key]
gen = data[self.gen_key]
if len(length) != len(reference_allvocabs) or len(length) != len(gen):
raise ValueError("Batch num is not matched.")
for i, turn_length in enumerate(length):
gen_session = gen[i]
ref_session = reference_allvocabs[i]
for j in range(turn_length):
self.hyps.append(self.dataloader.trim_index(gen_session[j]))
self.refs.append([self.dataloader.trim_index(ref_session[j])[1:]])
def close(self):
'''Return a dict which contains:
* **bleu**: bleu value.
'''
try:
return {"bleu": \
corpus_bleu(self.refs, self.hyps, smoothing_function=SmoothingFunction().method7)}
except ZeroDivisionError as _:
raise ZeroDivisionError("Bleu smoothing divided by zero. This is a known bug of corpus_bleu, \
usually caused when there is only one sample and the sample length is 1.")
class SingleTurnDialogRecorder(MetricBase):
'''A metric-like class for recording generated sentences and references.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
post_allvocabs_key (str): Dialog post are passed to :func:`forward`
by ``data[post_allvocabs_key]``.
Default: ``post``.
resp_allvocabs_key (str): Dialog responses are passed to :func:`forward`
by ``data[resp_allvocabs_key]``.
Default: ``resp``.
gen_key (str): Sentence generated by model are passed to :func:`forward` by
``data[gen_key]``. Default: ``gen``.
'''
def __init__(self, dataloader, post_allvocabs_key="post_allvocabs", \
resp_allvocabs_key="resp_allvocabs", gen_key="gen"):
super().__init__()
self.dataloader = dataloader
self.post_allvocabs_key = post_allvocabs_key
self.resp_allvocabs_key = resp_allvocabs_key
self.gen_key = gen_key
self.post_list = []
self.resp_list = []
self.gen_list = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[post_allvocabs_key] (list or :class:`numpy.array` of `int`):
Dialog posts with all vocabs.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_sentence_length]`
data[resp_allvocabs_key] (list or :class:`numpy.array` of `int`):
Dialog responses with all vocabs.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_sentence_length]`
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``)`, but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
post_allvocabs = data[self.post_allvocabs_key]
resp_allvocabs = data[self.resp_allvocabs_key]
gen = data[self.gen_key]
if len(post_allvocabs) != len(resp_allvocabs) or len(resp_allvocabs) != len(gen):
raise ValueError("Batch num is not matched.")
for i, post_sen in enumerate(post_allvocabs):
self.post_list.append(self.dataloader.index_to_sen(post_sen[1:]))
self.resp_list.append(self.dataloader.index_to_sen(resp_allvocabs[i][1:]))
self.gen_list.append(self.dataloader.index_to_sen(gen[i]))
def close(self):
'''Return a dict which contains:
* **post**: a list of post sentences.
* **resp**: a list of response sentences.
* **gen**: a list of generated sentences.
'''
return {"post": self.post_list, "resp": self.resp_list, "gen": self.gen_list}
class MultiTurnDialogRecorder(MetricBase):
'''A metric-like class for recording generated sentences and references.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
context_allvocabs_key (str): Dialog context are passed to :func:`forward` by
``data[context_key]``. Default: ``context_allvocabs``.
reference_allvocabs_key (str): Dialog references with all vocabs
are passed to :func:`forward` by ``data[reference_allvocabs_key]``.
Default: ``reference_allvocabs``.
gen_key (str): Sentences generated by model are passed to :func:`forward` by
``data[gen_key]``. Default: ``gen``.
turn_len_key (str): Turn length are passed to :func:.forward by
``data[turn_len_key]``. Default: ``turn_length``.
'''
def __init__(self, dataloader, context_allvocabs_key="context_allvocabs", \
reference_allvocabs_key="reference_allvocabs", gen_key="gen", \
turn_len_key="turn_length"):
super().__init__()
self.dataloader = dataloader
self.context_allvocabs_key = context_allvocabs_key
self.reference_allvocabs_key = reference_allvocabs_key
self.gen_key = gen_key
self.turn_len_key = turn_len_key
self.context_list = []
self.reference_list = []
self.gen_list = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[context_allvocabs_key] (list or :class:`numpy.array` of `int`): Dialog post.
A 3-d padded array containing id of words.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, _turn_length, max_sentence_length]`
data[reference_allvocabs_key] (list or :class:`numpy.array` of `int`):
Dialog responses with all vocabs. A 3-d padded array containing id of words.
Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``).
Size: `[batch_size, max_turn_length, max_sentence_length]`
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
A 3-d padded array containing id of words.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, max_turn_length, gen_sentence_length]`.
data[turn_len_key] (list or :class:`numpy.array`): Length of turns in each sample.
Size: `[batch_size]`
'''
context_allvocabs = data[self.context_allvocabs_key]
reference_allvocabs = data[self.reference_allvocabs_key]
gen = data[self.gen_key]
turn_length = data[self.turn_len_key]
if len(gen) != len(reference_allvocabs):
raise ValueError("Batch num is not matched.")
for i, context_sen in enumerate(context_allvocabs):
self.context_list.append(self.dataloader.multi_turn_index_to_sen( \
np.array(context_sen), ignore_first_token=True))
self.reference_list.append(self.dataloader.multi_turn_index_to_sen( \
np.array(reference_allvocabs[i]), turn_length=turn_length[i], ignore_first_token=True))
self.gen_list.append(self.dataloader.multi_turn_index_to_sen( \
np.array(gen[i]), turn_length=turn_length[i]))
print(turn_length[i])
print(len(self.reference_list[-1]))
if len(self.reference_list[-1]) != len(self.gen_list[-1]):
raise ValueError("Reference turn num %d != gen turn num %d." % \
(len(self.reference_list[-1]), len(self.gen_list[-1])))
def close(self):
'''Return a dict which contains:
* **context**: a list of post sentences.
* **reference**: a list of response sentences.
* **gen**: a list of generated sentences.
'''
return {"context": self.context_list, "reference": self.reference_list, "gen": self.gen_list}
class LanguageGenerationRecorder(MetricBase):
'''A metric-like class for recorder BLEU.
Arguments:
dataloader (:class:cotk.GenerationBase): A language generation dataloader.
gen_key (str): Sentences generated by model are passed to :func:`forward` by
``data[gen_key]``. Default: ``gen``.
'''
def __init__(self, dataloader, gen_key="gen"):
super().__init__()
self.dataloader = dataloader
self.gen_key = gen_key
self.gen_list = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys.
data[gen_key] (list or :class:`numpy.array` of `int`): Sentences generated by model.
Contains end token (eg: ``<eos>``), but without start token (eg: ``<go>``).
Size: `[batch_size, gen_sentence_length]`.
'''
gen = data[self.gen_key]
for sen in gen:
self.gen_list.append(self.dataloader.index_to_sen(sen))
def close(self):
'''Return a dict which contains:
* **gen**: a list of generated sentences.
'''
return {"gen": self.gen_list}
class HashValueRecorder(MetricBase):
'''A metric-like class for recording hash value metric.
'''
def __init__(self, hash_key="hashvalue"):
super().__init__()
self._hash_key = hash_key
self.unordered_hash = None
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains hashvalue.
'''
if "hashvalue" in data:
if self.unordered_hash is None:
self.unordered_hash = UnorderedSha256()
self.unordered_hash.update_hash(data["hashvalue"])
def close(self):
'''Return a dict which contains the items which all the
metric components returned.
'''
if self.unordered_hash:
return {self._hash_key: self.unordered_hash.digest()}
else:
return {}
class MetricChain(MetricBase):
'''A metric-like class for stacked metric. You can use this class
making multiples metric combination like one.
Examples:
>>> metric = MetricChain()
>>> metric.add_metric(BleuCorpusMetric())
>>> metric.add_metric(SingleDialogRecorder(dataloader))
'''
def __init__(self):
super().__init__()
self.metric_list = []
def add_metric(self, metric):
'''Add metric for processing.
Arguments:
metric (MetricBase): a metric class
'''
if not isinstance(metric, MetricBase):
raise TypeError("Metric must be a subclass of MetricBase")
self.metric_list.append(metric)
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains keys which all the
metric components need.
'''
for metric in self.metric_list:
metric.forward(data)
def close(self):
'''Return a dict which contains the items which all the
metric components returned.
'''
ret_dict = {}
for metric in self.metric_list:
ret_dict.update(metric.close())
return ret_dict
|
python
|
from re import search
from requests import get, post
from requests.exceptions import ConnectionError, MissingSchema, ReadTimeout
from sqlalchemy import Boolean, case, ForeignKey, Integer
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import true
from eNMS import app
from eNMS.database import db
from eNMS.models import models
from eNMS.models.base import AbstractBase
@db.set_custom_properties
class Task(AbstractBase):
__tablename__ = type = "task"
id = db.Column(Integer, primary_key=True)
name = db.Column(db.SmallString, unique=True)
description = db.Column(db.SmallString)
scheduling_mode = db.Column(db.SmallString, default="standard")
frequency = db.Column(Integer)
frequency_unit = db.Column(db.SmallString, default="seconds")
start_date = db.Column(db.SmallString)
end_date = db.Column(db.SmallString)
crontab_expression = db.Column(db.SmallString)
is_active = db.Column(Boolean, default=False)
initial_payload = db.Column(db.Dict)
devices = relationship(
"Device", secondary=db.task_device_table, back_populates="tasks"
)
pools = relationship("Pool", secondary=db.task_pool_table, back_populates="tasks")
service_id = db.Column(Integer, ForeignKey("service.id"))
service = relationship("Service", back_populates="tasks")
service_name = association_proxy("service", "name")
model_properties = ["next_run_time", "time_before_next_run", "status"]
def __init__(self, **kwargs):
super().update(**kwargs)
def update(self, **kwargs):
super().update(**kwargs)
if self.is_active:
self.schedule()
def delete(self):
post(f"{app.scheduler_address}/delete_job", json=self.id)
@hybrid_property
def status(self):
return "Active" if self.is_active else "Inactive"
@status.expression
def status(cls): # noqa: N805
return case([(cls.is_active, "Active")], else_="Inactive")
@classmethod
def rbac_filter(cls, query, mode, user):
public_tasks = query.join(cls.service).filter(
models["service"].public == true()
)
user_access_tasks = (
query.join(cls.service)
.join(models["access"], models["service"].access)
.join(models["user"], models["access"].users)
.filter(models["user"].name == user.name)
)
user_group_access_tasks = (
query.join(cls.service)
.join(models["access"], models["service"].access)
.join(models["group"], models["access"].groups)
.join(models["user"], models["group"].users)
.filter(models["user"].name == user.name)
)
return public_tasks.union(user_access_tasks, user_group_access_tasks)
@property
def next_run_time(self):
try:
return get(
f"{app.scheduler_address}/next_runtime/{self.id}", timeout=0.01
).json()
except (ConnectionError, MissingSchema, ReadTimeout):
return "Scheduler Unreachable"
@property
def time_before_next_run(self):
try:
return get(
f"{app.scheduler_address}/time_left/{self.id}", timeout=0.01
).json()
except (ConnectionError, MissingSchema, ReadTimeout):
return "Scheduler Unreachable"
def schedule(self, mode="schedule"):
try:
result = post(
f"{app.scheduler_address}/schedule",
json={"mode": mode, "task": self.get_properties()},
).json()
except ConnectionError:
return {"alert": "Scheduler Unreachable: the task cannot be scheduled."}
self.is_active = result.get("active", False)
return result
@db.set_custom_properties
class Event(AbstractBase):
__tablename__ = type = "event"
id = db.Column(Integer, primary_key=True)
name = db.Column(db.SmallString, unique=True)
log_source = db.Column(db.SmallString)
log_source_regex = db.Column(Boolean, default=False)
log_content = db.Column(db.SmallString)
log_content_regex = db.Column(Boolean, default=False)
service_id = db.Column(Integer, ForeignKey("service.id"))
service = relationship("Service", back_populates="events")
service_name = association_proxy("service", "name")
def match_log(self, source, content):
source_match = (
search(self.log_source, source)
if self.log_source_regex
else self.log_source in source
)
content_match = (
search(self.log_content, content)
if self.log_content_regex
else self.log_content in content
)
if source_match and content_match:
self.service.run()
|
python
|
# Generated by Django 2.1 on 2020-08-04 10:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('encounterapp', '0032_auto_20200801_1914'),
('encounterapp', '0032_auto_20200801_1758'),
]
operations = [
]
|
python
|
import os
#BASE_DIR = os.path.abspath('.')
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) #os.path.abspath('.')
ROUGE_DIR = os.path.join(BASE_DIR,'summariser','rouge','ROUGE-RELEASE-1.5.5/') #do not delete the '/' at the end
PROCESSED_PATH = os.path.join(BASE_DIR,'data','summaries_processed_data')
SUMMARY_DB_DIR = os.path.join(BASE_DIR,'data','sampled_summaries')
DOC_SEQUENCE_PATH = os.path.join(BASE_DIR,'summariser','utils','DocsSequence.txt')
LANGUAGE = 'english'
|
python
|
""""Utilities for Diffie-Hellman key exchange."""
from __future__ import unicode_literals
import base64
import warnings
import six
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric.dh import DHParameterNumbers, DHPublicNumbers
from openid import cryptutil
from openid.constants import DEFAULT_DH_GENERATOR, DEFAULT_DH_MODULUS
from openid.oidutil import toBase64
def _xor(a_b):
# Python 2 only
a, b = a_b
return chr(ord(a) ^ ord(b))
def strxor(x, y):
if len(x) != len(y):
raise ValueError('Inputs to strxor must have the same length')
if six.PY2:
return b"".join(_xor((a, b)) for a, b in zip(x, y))
else:
assert six.PY3
return bytes((a ^ b) for a, b in zip(x, y))
class DiffieHellman(object):
"""Utility for Diffie-Hellman key exchange."""
def __init__(self, modulus, generator):
"""Create a new instance.
@type modulus: six.text_type, Union[six.integer_types] are deprecated
@type generator: six.text_type, Union[six.integer_types] are deprecated
"""
if isinstance(modulus, six.integer_types):
warnings.warn("Modulus should be passed as base64 encoded string.")
else:
modulus = cryptutil.base64ToLong(modulus)
if isinstance(generator, six.integer_types):
warnings.warn("Generator should be passed as base64 encoded string.")
else:
generator = cryptutil.base64ToLong(generator)
self.parameter_numbers = DHParameterNumbers(modulus, generator)
parameters = self.parameter_numbers.parameters(default_backend())
self.private_key = parameters.generate_private_key()
@classmethod
def fromDefaults(cls):
"""Create Diffie-Hellman with the default modulus and generator."""
return cls(DEFAULT_DH_MODULUS, DEFAULT_DH_GENERATOR)
@property
def modulus(self):
"""Return the prime modulus value.
@rtype: Union[six.integer_types]
"""
warnings.warn("Modulus property will return base64 encoded string.", DeprecationWarning)
return self.parameter_numbers.p
@property
def generator(self):
"""Return the generator value.
@rtype: Union[six.integer_types]
"""
warnings.warn("Generator property will return base64 encoded string.", DeprecationWarning)
return self.parameter_numbers.g
@property
def parameters(self):
"""Return base64 encoded modulus and generator.
@return: Tuple with modulus and generator
@rtype: Tuple[six.text_type, six.text_type]
"""
modulus = self.parameter_numbers.p
generator = self.parameter_numbers.g
return cryptutil.longToBase64(modulus), cryptutil.longToBase64(generator)
@property
def public(self):
"""Return the public key.
@rtype: Union[six.integer_types]
"""
warnings.warn("Attribute 'public' is deprecated. Use 'public_key' instead.", DeprecationWarning)
return self.private_key.public_key().public_numbers().y
@property
def public_key(self):
"""Return base64 encoded public key.
@rtype: six.text_type
"""
return cryptutil.longToBase64(self.private_key.public_key().public_numbers().y)
def usingDefaultValues(self):
return self.parameters == (DEFAULT_DH_MODULUS, DEFAULT_DH_GENERATOR)
def getSharedSecret(self, composite):
"""Return a shared secret.
@param composite: Public key of the other party.
@type composite: Union[six.integer_types]
@rtype: Union[six.integer_types]
"""
warnings.warn("Method 'getSharedSecret' is deprecated in favor of '_get_shared_secret'.", DeprecationWarning)
return cryptutil.bytes_to_int(self._get_shared_secret(composite))
def _get_shared_secret(self, public_key):
"""Return a shared secret.
@param public_key: Base64 encoded public key of the other party.
@type public_key: six.text_type
@rtype: six.binary_type
"""
public_numbers = DHPublicNumbers(cryptutil.base64ToLong(public_key), self.parameter_numbers)
return self.private_key.exchange(public_numbers.public_key(default_backend()))
def xorSecret(self, composite, secret, hash_func):
warnings.warn("Method 'xorSecret' is deprecated, use 'xor_secret' instead.", DeprecationWarning)
dh_shared = self._get_shared_secret(cryptutil.longToBase64(composite))
# The DH secret must be `btwoc` compatible.
# See http://openid.net/specs/openid-authentication-2_0.html#rfc.section.8.2.3 for details.
dh_shared = cryptutil.fix_btwoc(dh_shared)
hashed_dh_shared = hash_func(dh_shared)
return strxor(secret, hashed_dh_shared)
def xor_secret(self, public_key, secret, algorithm):
"""Return a base64 encoded XOR of a secret key and hash of a DH exchanged secret.
@param public_key: Base64 encoded public key of the other party.
@type public_key: six.text_type
@param secret: Base64 encoded secret
@type secret: six.text_type
@type algorithm: hashes.HashAlgorithm
@rtype: six.text_type
"""
dh_shared = self._get_shared_secret(public_key)
# The DH secret must be `btwoc` compatible.
# See http://openid.net/specs/openid-authentication-2_0.html#rfc.section.8.2.3 for details.
dh_shared = cryptutil.fix_btwoc(dh_shared)
digest = hashes.Hash(algorithm, backend=default_backend())
digest.update(dh_shared)
hashed_dh_shared = digest.finalize()
return toBase64(strxor(base64.b64decode(secret), hashed_dh_shared))
|
python
|
import imp
import astropy.units as un
import astropy.coordinates as coord
import matplotlib.pyplot as plt
import gala.coordinates as gal_coord
from astropy.table import Table
from vector_plane_calculations import *
from velocity_transformations import *
imp.load_source('helper', '../tSNE_test/helper_functions.py')
from helper import move_to_dir
imp.load_source('gal_move', '../tSNE_test/convert_gal_movement.py')
from gal_move import gal_uvw
imp.load_source('veltrans', '../tSNE_test/velocity_transform.py')
from veltrans import *
# --------------------------------------------------------
# ---------------- FUNCTIONS -----------------------------
# --------------------------------------------------------
def _prepare_hist_data(d, bins, range, norm=True):
heights, edges = np.histogram(d, bins=bins, range=range)
width = np.abs(edges[0] - edges[1])
if norm:
heights = 1.*heights / np.max(heights)
return edges[:-1], heights, width
def _get_range(data, perc_cut=2.):
return (np.nanpercentile(data, perc_cut), np.nanpercentile(data, 100-perc_cut))
# return (np.nanmin(data), np.nanmax(data))
def plot_hist(obs, obs_f, galx, galx_f, path=None, title='', hist_bins = 100):
hist_range = _get_range(obs[obs_f])
# zgal_range = _get_range(galaxia_sub['pz'])
plt.title(title)
h_edg, h_hei, h_wid = _prepare_hist_data(obs[obs_f], hist_bins, hist_range, norm=True)
plt.bar(h_edg, h_hei, width=h_wid, color='green', alpha=0.2)
h_edg, h_hei, h_wid = _prepare_hist_data(galx[galx_f], hist_bins, hist_range, norm=True)
plt.bar(h_edg, h_hei, width=h_wid, color='blue', alpha=0.2)
plt.show()
plt.close()
# --------------------------------------------------------
# ---------------- CONSTANTS AND SETTINGS ----------------
# --------------------------------------------------------
# GALAH
# simulation_dir = '/home/klemen/GALAH_data/Galaxia_simulation/GALAH/'
# simulation_ebf = 'galaxy_galah_complete.ebf'
# simulation_ebf = 'galaxy_galah_fields.ebf'
# RAVE
simulation_dir = '/home/klemen/GALAH_data/Galaxia_simulation/RAVE/'
simulation_ebf = 'galaxy_rave_complete.ebf'
simulation_fits = simulation_ebf.split('.')[0]+'.fits'
obs_file_fits = 'RAVE_GALAH_TGAS_stack.fits'
# analysis constants
l_center = 310.
b_center = -70.
r_center = 10.
# --------------------------------------------------------
# ---------------- INPUT DATA HANDLING -------------------
# --------------------------------------------------------
print 'Reading data'
glaxia_data = Table.read(simulation_dir + simulation_fits)
obs_data = Table.read(obs_file_fits)
obs_data = obs_data.filled()
# compute observation galactic coordinates
l_b_obs = coord.ICRS(ra=obs_data['ra_gaia']*un.deg, dec=obs_data['dec_gaia']*un.deg).transform_to(coord.Galactic)
obs_data['l'] = l_b_obs.l.value
obs_data['b'] = l_b_obs.b.value
# create a subset of data
lb_center = coord.Galactic(l=l_center*un.deg, b=b_center*un.deg)
xyz_vel_stream = compute_xyz_vel(np.deg2rad(lb_center.l.value), np.deg2rad(lb_center.b.value), 10)
galaxia_sub = glaxia_data[coord.Galactic(l=glaxia_data['glon']*un.deg, b=glaxia_data['glat']*un.deg).separation(lb_center) < r_center*un.deg]
obs_sub = obs_data[coord.Galactic(l=obs_data['l']*un.deg, b=obs_data['b']*un.deg).separation(lb_center) < r_center*un.deg]
print 'Galaxia stars: '+str(len(galaxia_sub))
print 'Observation stars: '+str(len(obs_sub))
galaxia_sub['px'] *= 1e3 # kpc to pc conversion
galaxia_sub['py'] *= 1e3
galaxia_sub['pz'] *= 1e3
# galaxia_sub['vx'] *= -1. # it has different orientation than our coordinate system
# compute galactic velocities and positions for the obs stars
obs_gal_coord = coord.Galactic(l=obs_sub['l']*un.deg, b=obs_sub['b']*un.deg, distance=1e3/obs_sub['parallax'].data*un.pc)
obs_gal_xyz = obs_gal_coord.cartesian
obs_sub['x_gal'] = obs_gal_xyz.x.value
obs_sub['y_gal'] = obs_gal_xyz.y.value
obs_sub['z_gal'] = obs_gal_xyz.z.value
plot_hist(obs_sub, 'x_gal', galaxia_sub, 'px', path=None, title='')
plot_hist(obs_sub, 'y_gal', galaxia_sub, 'py', path=None, title='')
plot_hist(obs_sub, 'z_gal', galaxia_sub, 'pz', path=None, title='')
# convert velocities from ra/de/pmra/pmdec to more consisten units
u_gal, v_gal, w_gal = gal_uvw(obs_sub['ra_gaia'], obs_sub['dec_gaia'], obs_sub['pmra'], obs_sub['pmdec'], obs_sub['RV'],
plx=obs_sub['parallax'])
obs_sub['u_gal'] = u_gal * -1.
obs_sub['v_gal'] = v_gal
obs_sub['w_gal'] = w_gal
ra_dec_pm = np.vstack((obs_sub['pmra'], obs_sub['pmdec'])) * un.mas/un.yr
l_b_pm = gal_coord.pm_icrs_to_gal(coord.ICRS(ra=obs_sub['ra_gaia']*un.deg, dec=obs_sub['dec_gaia']*un.deg), ra_dec_pm)
obs_sub['pml'] = l_b_pm[0].value
obs_sub['pmb'] = l_b_pm[1].value
xyz_vel = motion_to_cartesic(np.array(obs_sub['l']), np.array(obs_sub['b']),
np.array(obs_sub['pml']), np.array(obs_sub['pmb']),
np.array(obs_sub['RV']), plx=np.array(obs_sub['parallax']))
obs_sub['vx_gal'] = xyz_vel[0]
obs_sub['vy_gal'] = xyz_vel[1]
obs_sub['vz_gal'] = xyz_vel[2]
# plot_hist(obs_sub, 'u_gal', obs_sub, 'vx_gal', path=None, title='')
# plot_hist(obs_sub, 'v_gal', obs_sub, 'vy_gal', path=None, title='')
# plot_hist(obs_sub, 'w_gal', obs_sub, 'vz_gal', path=None, title='')
plot_hist(obs_sub, 'u_gal', galaxia_sub, 'vx', path=None, title='')
plot_hist(obs_sub, 'v_gal', galaxia_sub, 'vy', path=None, title='')
plot_hist(obs_sub, 'w_gal', galaxia_sub, 'vz', path=None, title='')
xyz_pos_stars = np.vstack((obs_sub['x_gal'],obs_sub['y_gal'],obs_sub['z_gal'])).T
xyz_vel_stars = np.vstack((obs_sub['u_gal'],obs_sub['v_gal'],obs_sub['w_gal'])).T
print xyz_pos_stars
print xyz_vel_stars
print xyz_vel_stream
obs_plane_intersects_3D = stream_plane_vector_intersect(xyz_pos_stars, xyz_vel_stars, xyz_vel_stream)
obs_plane_intersects_2D = intersects_to_2dplane(obs_plane_intersects_3D, xyz_vel_stream)
xyz_pos_stars = np.vstack((galaxia_sub['px'],galaxia_sub['py'],galaxia_sub['pz'])).T
xyz_vel_stars = np.vstack((galaxia_sub['vx'],galaxia_sub['vy'],galaxia_sub['vz'])).T
galaxia_plane_intersects_3D = stream_plane_vector_intersect(xyz_pos_stars, xyz_vel_stars, xyz_vel_stream)
galaxia_plane_intersects_2D = intersects_to_2dplane(galaxia_plane_intersects_3D, xyz_vel_stream)
plot_lim = (-1000, 1000)
# Create a plot
fig, ax = plt.subplots(1, 1)
ax.scatter(obs_plane_intersects_2D[:, 0], obs_plane_intersects_2D[:, 1], lw=0, c='red', s=2, alpha=1.)
ax.scatter(galaxia_plane_intersects_2D[:, 0], galaxia_plane_intersects_2D[:, 1], lw=0, c='blue', s=2, alpha=1.)
ax.scatter(0, 0, lw=0, c='black', s=10, marker='*') # solar position
ax.set(xlabel='X stream plane', ylabel='Y stream plane', xlim=plot_lim, ylim=plot_lim)
fig.tight_layout()
plt.show()
plt.close()
|
python
|
#import needed packages
import os, json, sys
#creates functions global
commands = {}
#import modules
modules = os.listdir(path='modules')
print('Importing modules')
count_mod = 0
count_ok_mod = 0
for module in modules:
try:
with open('modules/' + module + '/index.json') as read_modules:
mod_data = json.load(read_modules)
print('Importing ' + module + '... OK')
for com in mod_data['functions']:
commands[com] = module + '/' + mod_data['functions'][com]
count_ok_mod += 1
except:
print('Importing ' + module + '... ERROR')
count_mod += 1
print("{} of {} modules loaded".format(count_ok_mod, count_mod))
|
python
|
from django.urls import path, include
from management import views
from rest_framework_simplejwt import views as jwt_views
urlpatterns = [
# Used to signup as a teacher or a student
path('signup/', views.SignUpView.as_view(), name = 'signup'),
# Used to obtain refresh and access token
path('login/access/', views.MyTokenObtainPairView.as_view(), name = 'access-token'),
# Used to obtain access token from refresh token
path('login/refresh/', jwt_views.TokenRefreshView.as_view(), name='token-refresh'),
# Used to reset password if forgotten
path('login/changepassword/', views.ChangePasswordView.as_view(), name='reset-password')
]
|
python
|
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso, Ridge
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
model = lambda x: (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
# sample n_samples in uniform distribution between [-1.2,2]
x = np.array(np.linspace(-1.2, 2, n_samples))
clean_y = model(x)
noise_data = np.random.normal(loc=0, scale=noise, size=len(clean_y))
dirty_y = clean_y + noise_data
x.flatten()
# split into training and testing portions (2/3 for training, 1/3 for testing)
test_x_clean, test_y_clean, train_x_clean, train_y_clean = Q_1_plot_data(clean_y, dirty_y, x, noise)
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
train_errors_clean = validate_errors_clean = []
if noise == 0:
train_errors_clean, validate_errors_clean = Q_2_poly_over_clean(train_x_clean, train_y_clean)
# split into training and testing portions (2/3 for training, 1/3 for testing)
test_x_dirty, test_y_dirty, train_errors_dirty, train_x_dirty, train_y_dirty, validate_errors_dirty = Q_2_poly_over_dirty(
dirty_y, x, noise)
# best degree is
test_results_over_best_fit(test_x_clean, test_x_dirty, test_y_clean, test_y_dirty, train_errors_clean,
train_errors_dirty, train_x_clean, train_x_dirty, train_y_clean, train_y_dirty,
validate_errors_clean, validate_errors_dirty, noise)
def Practical_part_1():
select_polynomial_degree()
print()
select_polynomial_degree(100, 0)
print()
select_polynomial_degree(1500, 10)
def test_results_over_best_fit(test_x_clean, test_x_dirty, test_y_clean, test_y_dirty, train_errors_clean,
train_errors_dirty, train_x_clean, train_x_dirty, train_y_clean, train_y_dirty,
validate_errors_clean, validate_errors_dirty, noise=5):
print(train_errors_dirty)
print(validate_errors_dirty)
best_degree_dirty = np.argmin(validate_errors_dirty)
best_degree_clean = 0
if validate_errors_clean or noise == 0:
best_degree_clean = np.argmin(validate_errors_clean)
print(f"Best degree for dirty noise: {noise} data is {best_degree_dirty}, {best_degree_clean} for clean data")
# fit a polynoimal model with the best degree and plot the mean square error results
poly_dirty = PolynomialFitting(best_degree_dirty)
poly_dirty.fit(train_x_dirty, train_y_dirty)
# predict the test data
test_y_pred_dirty = poly_dirty.predict(test_x_dirty)
# present the error results
print(f"Mean square error for dirty noise: {noise} data is {mean_square_error(test_y_pred_dirty, test_y_dirty)} noise level is {noise}")
print(f"Mean square error for training data: {noise} data is {mean_square_error(poly_dirty.predict(train_x_dirty), train_y_dirty)} noise level is {noise}")
def Q_2_poly_over_dirty(dirty_y, x, noise=5):
train_x_dirty, train_y_dirty, test_x_dirty, test_y_dirty = split_train_test(x, dirty_y, 0.667)
train_x_dirty = train_x_dirty.flatten()
test_x_dirty = test_x_dirty.flatten()
train_errors_dirty = []
validate_errors_dirty = []
for degree in range(0, 10):
# Create a polynomial fitting object
train_error, validate_error = cross_validate(PolynomialFitting(degree), train_x_dirty, train_y_dirty,
mean_square_error, cv=5)
train_errors_dirty.append(train_error)
validate_errors_dirty.append(validate_error)
# plot the training and validation errors
fig = go.Figure()
fig.add_trace(go.Scatter(x=np.arange(0, 10), y=train_errors_dirty, mode='lines+markers', name='Training error'))
fig.add_trace(
go.Scatter(x=np.arange(0, 10), y=validate_errors_dirty, mode='lines+markers', name='Validation error'))
# add axes titles and graph title
fig.update_layout(title_text=f'Polynomial fitting over dirty data with noise level {noise}', xaxis_title_text='Degree',
yaxis_title_text='Error')
fig.show()
return test_x_dirty, test_y_dirty, train_errors_dirty, train_x_dirty, train_y_dirty, validate_errors_dirty
def Q_2_poly_over_clean(train_x_clean, train_y_clean):
train_errors_clean = []
validate_errors_clean = []
for degree in range(0, 10):
# Create a polynomial fitting object
train_error, validate_error = cross_validate(PolynomialFitting(degree), train_x_clean, train_y_clean,
mean_square_error, cv=5)
train_errors_clean.append(train_error)
validate_errors_clean.append(validate_error)
# plot the training and validation errors
fig = go.Figure()
fig.add_trace(go.Scatter(x=np.arange(0, 10), y=train_errors_clean, mode='lines+markers', name='Training error'))
fig.add_trace(
go.Scatter(x=np.arange(0, 10), y=validate_errors_clean, mode='lines+markers', name='Validation error'))
# add axes titles and graph title
fig.update_layout(title_text='Polynomial fitting over clean data', xaxis_title_text='Degree',
yaxis_title_text='Error')
fig.show()
return train_errors_clean, validate_errors_clean
def Q_1_plot_data(clean_y, dirty_y ,x, noise=0):
train_x_clean, train_y_clean, test_x_clean, test_y_clean = split_train_test(x, clean_y, 0.667)
train_x_dirty, train_y_dirty, test_x_dirty, test_y_dirty = split_train_test(x, dirty_y, 0.667)
train_x_clean = train_x_clean.flatten()
test_x_clean = test_x_clean.flatten()
train_x_dirty = train_x_dirty.flatten()
test_x_dirty = test_x_dirty.flatten()
x.flatten()
fig = go.Figure()
fig.add_trace(
go.Scatter(x=train_x_dirty, y=train_y_dirty, mode='markers', name='Training data', marker_color='blue'))
fig.add_trace(go.Scatter(x=test_x_dirty, y=test_y_dirty, mode='markers', name='Test data', marker_color='red'))
fig.add_trace(go.Scatter(x=x, y=clean_y, mode='markers', name='Training data', marker_color='green'))
fig.update_layout(title=f'Training and test data with noise level of {noise}', xaxis_title='x', yaxis_title='y')
fig.show()
return test_x_clean, test_y_clean, train_x_clean, train_y_clean
def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X_test, X_train, l1_ratios, y_test, y_train = load_data(n_evaluations, n_samples)
# use cross_validate to evaluate the performance of Ridge and Lasso regression for each regularization parameter
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
train_errors_lasso, train_errors_ridge, validate_errors_lasso, validate_errors_ridge = hyper_parameters_eval(
X_train, l1_ratios, y_train)
# Question 8 - Plot the training and validation errors for Ridge and Lasso regressions for each regularization parameter
plot_lasso_ridge_errors(l1_ratios, train_errors_lasso, train_errors_ridge, validate_errors_lasso,
validate_errors_ridge)
compare_regression_with_best_lambda(X_test, X_train, l1_ratios, validate_errors_lasso, validate_errors_ridge,
y_test, y_train)
def compare_regression_with_best_lambda(X_test, X_train, l1_ratios, validate_errors_lasso, validate_errors_ridge,
y_test, y_train):
print(f'Best lambda for Ridge: {l1_ratios[np.argmin(validate_errors_ridge)]}')
print(f'Best lambda for Lasso: {l1_ratios[np.argmin(validate_errors_lasso)]}')
# fit the model with the best regularization parameter over the training data for both Ridge and Lasso and Linear Regression
ridge_model = Ridge(alpha=l1_ratios[np.argmin(validate_errors_ridge)])
ridge_model.fit(X_train, y_train)
lasso_model = Lasso(alpha=l1_ratios[np.argmin(validate_errors_lasso)])
lasso_model.fit(X_train, y_train)
linear_model = LinearRegression()
linear_model.fit(X_train, y_train)
# print the mean squared error for the Ridge, Lasso and Linear Regression models
print(f'Mean squared error for Ridge: {mean_square_error(y_test, ridge_model.predict(X_test))}')
print(f'Mean squared error for Lasso: {mean_square_error(y_test, lasso_model.predict(X_test))}')
print(f'Mean squared error for Linear Regression: {mean_square_error(y_test, linear_model.predict(X_test))}')
def plot_lasso_ridge_errors(l1_ratios, train_errors_lasso, train_errors_ridge, validate_errors_lasso,
validate_errors_ridge):
fig = go.Figure()
fig.add_trace(go.Scatter(x=l1_ratios, y=train_errors_ridge, mode='lines+markers', name='Training error - Ridge'))
fig.add_trace(
go.Scatter(x=l1_ratios, y=validate_errors_ridge, mode='lines+markers', name='Validation error - Ridge'))
fig.add_trace(go.Scatter(x=l1_ratios, y=train_errors_lasso, mode='lines+markers', name='Training error - Lasso'))
fig.add_trace(
go.Scatter(x=l1_ratios, y=validate_errors_lasso, mode='lines+markers', name='Validation error- Lasso'))
# add axes titles and graph title
fig.update_layout(title_text='Training and validation errors for Ridge and Lasso regressions',
xaxis_title_text='Lambda',
yaxis_title_text='Error')
fig.show()
def hyper_parameters_eval(X_train, l1_ratios, y_train):
train_errors_ridge, validate_errors_ridge = [], []
train_errors_lasso, validate_errors_lasso = [], []
for lam in l1_ratios:
# Question 6 - Use cross_validate to evaluate the performance of Ridge and Lasso regression for each regularization parameter
ridge_model = RidgeRegression(lam=lam, include_intercept=True)
train_error_ridge, validate_error_ridge = cross_validate(ridge_model, X_train, y_train, mean_square_error, cv=5)
train_errors_ridge.append(train_error_ridge)
validate_errors_ridge.append(validate_error_ridge)
lasso_model = Lasso(alpha=lam)
train_error_lasso, validate_error_lasso = cross_validate(lasso_model, X_train, y_train, mean_square_error, cv=5)
train_errors_lasso.append(train_error_lasso)
validate_errors_lasso.append(validate_error_lasso)
return train_errors_lasso, train_errors_ridge, validate_errors_lasso, validate_errors_ridge
def load_data(n_evaluations, n_samples):
X, y = datasets.load_diabetes(return_X_y=True)
# choose the first n_samples samples as training data
X_train, y_train = X[:n_samples], y[:n_samples]
# choose the remaining samples as testing data
X_test, y_test = X[n_samples:], y[n_samples:]
# Question 6 - Create a list of regularization parameter values to evaluate
l1_ratios = np.linspace(0.001, 1, 500)
return X_test, X_train, l1_ratios, y_test, y_train
if __name__ == '__main__':
np.random.seed(0)
Practical_part_1()
select_regularization_parameter()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.